-
Notifications
You must be signed in to change notification settings - Fork 1
/
dataset.py
78 lines (61 loc) · 2.58 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
import torch
from torch.utils.data import Dataset
import cv2
import numpy as np
from utils import *
from PackPathwayTransform import PackPathway
class FrameDataset(Dataset):
classes = [ 'Basketball', 'Biking', 'Diving', 'PizzaTossing', 'RopeClimbing' ]
def __init__(self, main_dir, transforms=None, slowfast=False):
super().__init__()
self.transforms = transforms
self.main_dir = main_dir
self.slowfast = slowfast
self.pack_pathway = PackPathway()
self.x = []
self.y = []
self.load()
def load(self):
# iterate over the different classes (each class has its own folder)
for cls in os.listdir(self.main_dir):
class_path = os.path.join(self.main_dir, cls) # get the path of the class
folders_only = [ f for f in os.listdir(class_path) if os.path.isdir( os.path.join(class_path, f) ) ] # get all folders in that path, each folder is a video
# itertare for each folder (video)
# generate its full path and add all items in that path to a list (each item is a frame)
# x[i] will hold list of frame paths
# y[i] will hold an integer (class id)
for item in folders_only:
item_path = os.path.join(class_path, item)
frames = os.listdir(item_path)
frames = [ os.path.join(item_path, f) for f in frames ]
self.x.append(frames)
self.y.append(FrameDataset.classes.index(cls))
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
frame_paths = self.x[idx]
frames = []
# iterate over all paths (frames), open each frame and append to list
for path in frame_paths:
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)
if self.transforms:
img = self.transforms(image=img)['image']
frames.append(img)
labels = self.y[idx]
frames = torch.stack(frames)
if self.slowfast:
frames = torch.permute(frames, (1, 0, 2, 3))
frames = self.pack_pathway(frames)
return frames, labels, idx, dict()
return frames, labels
if __name__ == "__main__":
train_path = 'data/train/'
valid_path = 'data/valid/'
train_dataset = FrameDataset(train_path, get_transformer('valid'))
print(len(train_dataset))
print(len(train_dataset.x), len(train_dataset.y))
batch = train_dataset.__getitem__(30)
# imgs, lbls = batch
# print(imgs.shape)