-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcombined_dataloaders.py
140 lines (115 loc) · 5.49 KB
/
combined_dataloaders.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
from sentence_encoder import SentenceEmbedding, SentenceEmbeddingGlove
import nltk
from InferSent.models import InferSent
from torch.utils.data import Dataset, DataLoader
import torch
import pandas as pd
import pickle
from torchvision import transforms
from PIL import Image
class CombinedBookDataset(Dataset):
"""
dataloaders for cover and title with infersent
"""
def __init__(self, csv_file, datasetTransform, transform=None):
cols = ["index", "filename", "url", "title", "author", "class", "class_name"]
self.dataset = pd.read_csv(csv_file, header = None, names = cols, encoding = "ISO-8859-1")
self.titles = datasetTransform.transform_titles(self.dataset)
self.transform = transform
#Create list of classes
df = self.dataset.reset_index().drop_duplicates(subset='class', keep='last').set_index('index')
df = df.sort_values(by=['class'])
self.classes = df['class_name'].tolist()
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
img_name = "dataset/covers/" + self.dataset.iloc[idx, 1]
cover = Image.open(img_name)
line = self.dataset.iloc[idx]
title = self.titles[idx]
label = line["class"]
if self.transform:
cover = self.transform(cover)
return ((cover, torch.from_numpy(title).float()), label)
def create_combined_data_loaders(train_csv_file, val_csv_file, test_csv_file, batch_size, num_workers = 1, word_emb = "FastText"):
"""
Create the dataloaders
"""
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
if word_emb == "FastText":
datasetTransform = SentenceEmbedding([train_csv_file, val_csv_file, test_csv_file])
elif word_emb == "Glove":
datasetTransform = SentenceEmbeddingGlove([train_csv_file, val_csv_file, test_csv_file])
else:
return None
print("creating datasets")
train_set = CombinedBookDataset(train_csv_file, datasetTransform = datasetTransform, transform = data_transforms["train"])
val_set = CombinedBookDataset(val_csv_file, datasetTransform = datasetTransform, transform = data_transforms["val"])
test_set = CombinedBookDataset(test_csv_file, datasetTransform = datasetTransform, transform = data_transforms["test"])
print("creating dataloaders")
data_loaders = {
"train": DataLoader(train_set, batch_size = batch_size, shuffle = True,
num_workers = num_workers),
"val": DataLoader(val_set, batch_size = batch_size, shuffle = True,
num_workers = num_workers),
"test": DataLoader(test_set, batch_size = batch_size, shuffle = True,
num_workers = num_workers)
}
return data_loaders
def save_combined_data_loaders(pickle_file_name, batch_size, num_workers = 0, word_emb = "FastText"):
"""
Save the dataloaders
"""
train_csv_path = "dataset/train_set_cleaned.csv"
val_csv_path = "dataset/validation_set_cleaned.csv"
test_csv_path = "dataset/book30-listing-test_cleaned.csv"
data_loaders = create_combined_data_loaders(train_csv_path, val_csv_path, test_csv_path, batch_size, num_workers, word_emb)
if data_loaders:
print("pickling dataloaders")
with open(pickle_file_name, "wb") as fp:
pickle.dump(data_loaders, fp)
else:
print("Invalid word_emb arg")
def save_combined_10_classes_data_loaders(pickle_file_name, batch_size, num_workers = 0, word_emb = "FastText"):
"""
Save dataloaders for the 10 classes dataset
"""
train_csv_path = "dataset/train_set_cleaned_10.csv"
val_csv_path = "dataset/validation_set_cleaned_10.csv"
test_csv_path = "dataset/book30-listing-test_cleaned_10.csv"
data_loaders = create_combined_data_loaders(train_csv_path, val_csv_path, test_csv_path, batch_size, num_workers, word_emb)
if data_loaders:
print("pickling dataloaders")
with open(pickle_file_name, "wb") as fp:
pickle.dump(data_loaders, fp)
else:
print("Invalid word_emb arg")
if __name__ == "__main__":
BATCH_SIZES = [4, 8, 16, 32, 64]
nltk.download('punkt')
for batch_size in BATCH_SIZES:
pickle_file_name = "dataloaders/combined_data_loaders_{}.pickle".format(batch_size)
save_combined_data_loaders(pickle_file_name, batch_size, 0)
pickle_file_name = "dataloaders/final_combined_data_loaders_{}.pickle".format(batch_size)
save_final_combined_data_loaders(pickle_file_name, batch_size, 0)
pickle_file_name = "dataloaders/combined_data_loaders_{}_10.pickle".format(batch_size)
save_combined_10_classes_data_loaders(pickle_file_name, batch_size)