Skip to content

Commit

Permalink
final submission for the challenge
Browse files Browse the repository at this point in the history
  • Loading branch information
yashbhalgat committed Mar 31, 2019
1 parent dd069c2 commit bb2a18f
Show file tree
Hide file tree
Showing 17 changed files with 890 additions and 57 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
*.jpg
*.tar.xz
*.pt
*.pkl
*.csv
Binary file added 428_525613_cf_Codes.zip
Binary file not shown.
Binary file added 428_663089_cf_Scene_Challenge.zip
Binary file not shown.
1 change: 1 addition & 0 deletions best_submissions/.~lock.all_submissions_ranked.csv#
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
,yash,yash-XPS-15-9560,31.03.2019 14:07,file:///home/yash/.config/libreoffice/4;
125 changes: 125 additions & 0 deletions code/MobileNetV2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import torch.nn as nn
import math


def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)


def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)


class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]

hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup

if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)

def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)


class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]

# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)

# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)

self._initialize_weights()

def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x

def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
Binary file modified code/__pycache__/fine_tuning_config_file.cpython-36.pyc
Binary file not shown.
71 changes: 71 additions & 0 deletions code/check_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import torch
import torch.hub
import pretrainedmodels
import torch.nn as nn
import torch.utils.data as data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
import numpy as np
from PIL import Image
import os
import pdb
import pickle

class ImageFolderWithPaths(datasets.ImageFolder):
"""Custom dataset that includes image file paths. Extends
torchvision.datasets.ImageFolder
"""

# override the __getitem__ method. this is the method dataloader calls
def __getitem__(self, index):
# this is what ImageFolder normally returns
original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)
# the image file path
path = self.imgs[index][0]
# make a new tuple that includes original and the path
tuple_with_path = (original_tuple + (path,))
return tuple_with_path


inp_size = 331
data_transforms = transforms.Compose([
transforms.Resize(inp_size),
transforms.CenterCrop(inp_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

data_dir = "../imgs_resized/"

dsets = {x: ImageFolderWithPaths(os.path.join(data_dir, x), data_transforms) for x in ['val']}

dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=1, shuffle=True, num_workers=25) for x in ['val']}

dset_sizes = {x: len(dsets[x]) for x in ['val']}

#model = models.resnet18(num_classes=6)
#model.load_state_dict(torch.load('fine_tuned_best_model.pt'))

model = pretrainedmodels.xception(num_classes=1000)
num_ftrs = model.last_linear.in_features
model.last_linear = nn.Linear(num_ftrs, 6)
model.load_state_dict(torch.load('best_model_xception_cutout_aug.pt'))

#model = torch.hub.load('moskomule/senet.pytorch', 'se_resnet20', num_classes=6)
#model.load_state_dict(torch.load('best_model_senet20_aug_nofreeze.pt'))

model.cuda()
model.eval()

for data in dset_loaders['val']:
image, label, path = data
image_var = torch.autograd.Variable(image.cuda(), volatile=True)
y_pred = model(image_var)
smax = nn.Softmax()
smax_out = smax(y_pred)[0]
pred = np.argmax(smax_out.cpu().data).item()
label = label.cpu().data.item()

if pred!=label:
print("path:", path, "pred:", pred, "label:", label)
98 changes: 98 additions & 0 deletions code/create_features.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
### Section 1 - First, let's import everything we will be needing.

from __future__ import print_function, division
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import copy
import os
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from fine_tuning_config_file import *
import pdb
from tqdm import tqdm

### Non-deeplearning
from sklearn.svm import NuSVC
from sklearn.metrics import accuracy_score

use_gpu = GPU_MODE
if use_gpu:
torch.cuda.set_device(CUDA_DEVICE)

count=0

data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}



data_dir = DATA_DIR
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=BATCH_SIZE,
shuffle=True, num_workers=25)
for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes


### SECTION 3 : Writing the functions that do training and validation phase.
def create_features(model, phase="train"):
model.eval()

for i, data in tqdm(enumerate(dset_loaders[phase])):
inputs, labels = data
if use_gpu:
inputs = Variable(inputs.float().cuda())
else:
print("Use a GPU!")

features_var = model(inputs)
features = features_var.squeeze().cpu().data.numpy()

if i==0:
X = features
Y = labels.numpy()
else:
X = np.concatenate((X, features), axis=0)
Y = np.concatenate((Y, labels), axis=0)

return X, Y



model_ft = models.resnet18(pretrained=False, num_classes=6)
model_ft.load_state_dict(torch.load('best_model_resnet18_aug.pt'))

my_model = nn.Sequential(*list(model_ft.children())[:-1])
for param in my_model.parameters():
param.requires_grad = False

if use_gpu:
model_ft.cuda()

train_x, train_y = create_features(my_model, phase="train")
val_x, val_y = create_features(my_model, phase="val")

pdb.set_trace()

clf = NuSVC(gamma='scale', verbose=True)
clf.fit(train_x, train_y)
predictions = clf.predict(val_x)
print("Val accuracy:", accuracy_score(val_y, predictions))
46 changes: 35 additions & 11 deletions code/create_submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,40 +9,64 @@

csv_map = {}

pkl_file = open('../dumps/dump_xception_cutout_aug.pkl', 'rb')
pkl_file = open('../dumps/dump_fastai_incepres2_full.pkl', 'rb')
dump0 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_resnet50_cutout_aug.pkl', 'rb')
pkl_file = open('../dumps/dump_fastai_wrn_full.pkl', 'rb')
dump1 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_alexnet_cutout_aug.pkl', 'rb')
pkl_file = open('../dumps/dump_nasnet_cutout_aug.pkl', 'rb')
dump2 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_squeeze1_1_cutout_aug.pkl', 'rb')
pkl_file = open('../dumps/dump_resnext101_32_cutout_aug.pkl', 'rb')
dump3 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_nasnet_cutout_aug.pkl', 'rb')
pkl_file = open('../dumps/dump_dense161_cutout_aug.pkl', 'rb')
dump4 = pickle.load(pkl_file)
pkl_file.close()

#pkl_file = open('../dumps/dump_resnet34_aug_nofreeze.pkl', 'rb')
#dump5 = pickle.load(pkl_file)
#pkl_file.close()
pkl_file = open('../dumps/dump_fastai_res152_v2_full.pkl', 'rb')
dump5 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_fastai_resnext10164_full.pkl', 'rb')
dump6 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_fastai_res152.pkl', 'rb')
dump7 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_fastai_incep4_full.pkl', 'rb')
dump8 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_fastai_dn161.pkl', 'rb')
dump9 = pickle.load(pkl_file)
pkl_file.close()

pkl_file = open('../dumps/dump_fastai_res152_full.pkl', 'rb')
dump10 = pickle.load(pkl_file)
pkl_file.close()

for fnum in dump1:
avg_arr = (dump0[fnum]+dump1[fnum]+dump2[fnum]+dump3[fnum]+dump4[fnum])/5.0
for fnum in dump0:
#avg_arr = (dump0[fnum]+dump1[fnum]+2*dump2[fnum]+dump3[fnum]+6*dump4[fnum]+dump5[fnum]+dump6[fnum]+dump7[fnum]+dump8[fnum]+6*dump9[fnum])/13.0
avg_arr = (dump4[fnum]+dump5[fnum]+dump6[fnum]+dump7[fnum]+dump8[fnum]+dump9[fnum]+dump10[fnum])/6.0
#avg_arr = dump0[fnum]
y_pred = torch.from_numpy(avg_arr)
smax = nn.Softmax()
smax_out = smax(y_pred)
c = np.argmax(smax_out.data).item()
csv_map[fnum] = c
#print(fnum, ": ", c)

with open("../submissions/submission_xcep_res50_alex_squ_nas_allcut.csv", 'w') as csvfile:
#with open("../submissions/submission_xcep_wrn_full_2_nas_resnext32_6_dense161_res152_v2_full_resnext64_full_res152_incep4_v2_full_fast_3_dn161.csv", 'w') as csvfile:
with open("../submissions/submission_dense161_cut_res152_v2_full_resnext64_full_res152_incep4_full_fast_dn161_res152_full.csv", 'w') as csvfile:
#with open("../submissions/submission_xcep_cutout_full.csv", 'w') as csvfile:
fieldnames = ['image_name', 'label']
csvfile.write('image_name,label')
csvfile.write('\n')
Expand Down
Loading

0 comments on commit bb2a18f

Please sign in to comment.