-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
131 lines (101 loc) · 3.46 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import torch
import torch.nn as nn
from config import config
activation = {} # initialize the activation dictionary
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
# Normalize the image from int8 (0, 255) to float32 (-1.0, 1.0)
# This operation is included in the model instead of the data loader
# for taking advantage of the GPU
class Normalize(nn.Module):
def forward(self, x):
return x / 127.5 - 1.0
class NvidiaModel(nn.Module):
def __init__(self):
super().__init__()
# define layers using nn.Sequential
self.conv_layers = nn.Sequential(
# first convolutional layer
nn.Conv2d(3, 24, kernel_size=5, stride=2),
nn.BatchNorm2d(24),
nn.ReLU(),
# second convolutional layer
nn.Conv2d(24, 36, kernel_size=5, stride=2),
nn.BatchNorm2d(36),
nn.ReLU(),
# third convolutional layer
nn.Conv2d(36, 48, kernel_size=5, stride=2),
nn.BatchNorm2d(48),
nn.ReLU(),
# fourth convolutional layer
nn.Conv2d(48, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(),
# fifth convolutional layer
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(),
)
if config.is_image_logging_enabled:
self.conv_layers[2].register_forward_hook(get_activation('first_conv_layer'))
self.conv_layers[5].register_forward_hook(get_activation('second_conv_layer'))
self.flat_layers = nn.Sequential(
# flatten
nn.Flatten(),
nn.Dropout(p=0.5),
# first fully connected layer
nn.Linear(1152, 1164),
nn.BatchNorm1d(1164),
nn.ReLU(),
# second fully connected layer
nn.Linear(1164, 100),
nn.BatchNorm1d(100),
nn.ReLU(),
# third fully connected layer
nn.Linear(100, 50),
nn.BatchNorm1d(50),
nn.ReLU(),
# fourth fully connected layer
nn.Linear(50, 10),
# output layer
nn.Linear(10, 1)
)
def forward(self, x):
x = self.conv_layers(x)
x = self.flat_layers(x)
return x.squeeze()
class NvidiaModelTransferLearning(nn.Module):
def __init__(self, resnet):
super().__init__()
# Use the pretrained ResNet model as the convolutional layers
self.conv_layers = resnet
# Define the flat layers as before
self.flat_layers = nn.Sequential(
# flatten
nn.Flatten(),
nn.Dropout(p=0.5),
# first fully connected layer
nn.Linear(512, 1164),
nn.BatchNorm1d(1164),
nn.ReLU(),
# second fully connected layer
nn.Linear(1164, 100),
nn.BatchNorm1d(100),
nn.ReLU(),
# third fully connected layer
nn.Linear(100, 50),
nn.BatchNorm1d(50),
nn.ReLU(),
# fourth fully connected layer
nn.Linear(50, 10),
nn.BatchNorm1d(10),
nn.ReLU(),
# output layer
nn.Linear(10, 1)
)
def forward(self, x):
x = self.conv_layers(x)
x = self.flat_layers(x)
return x.squeeze()