forked from nomi-sethi/Wise-Translator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModel.py
92 lines (72 loc) · 2.92 KB
/
Model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# import useful libraries
import torch
torch.cuda.empty_cache()
import torch.nn as nn
from Encoder import EncoderLayer
from Decoder import DecoderLayer
from Embedding import Embedding
from PositionalEncoding import PositionalEncoder
from Normalization import Normalization
import copy
'''Build an encoder for transformer with defined sublayers
Embedding,Positional encoding and normalization'''
class Encoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.embed = Embedding(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, dropout=dropout)
self.layers = get_clones(EncoderLayer(d_model, heads, dropout), N)
self.norm = Normalization(d_model)
def forward(self, src, mask):
x = self.embed(src).cuda()
x = self.pe(x)
for i in range(self.N):
x = self.layers[i](x, mask)
return self.norm(x)
# Make a deep copy of the input Module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
'''Build a decoder for transformer with defined sublayers
Embedding,Positional encoding and normalization'''
class Decoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.embed = Embedding(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, dropout=dropout)
self.layers = get_clones(DecoderLayer(d_model, heads, dropout), N)
self.norm = Normalization(d_model)
def forward(self, trg, e_outputs, src_mask, trg_mask):
x = self.embed(trg)
x = self.pe(x)
for i in range(self.N):
x = self.layers[i](x, e_outputs, src_mask, trg_mask)
return self.norm(x)
# Build the transformer
class Transformer(nn.Module):
def __init__(self, src_vocab, trg_vocab, d_model, N, heads, dropout):
super().__init__()
self.encoder = Encoder(src_vocab, d_model, N, heads, dropout)
self.decoder = Decoder(trg_vocab, d_model, N, heads, dropout)
self.out = nn.Linear(d_model, trg_vocab)
def forward(self, src, trg, src_mask, trg_mask):
e_outputs = self.encoder(src, src_mask)
# print("DECODER")
d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)
output = self.out(d_output)
return output
def get_model(opt, src_vocab, trg_vocab):
assert opt.d_model % opt.heads == 0
assert opt.dropout < 1
model = Transformer(src_vocab, trg_vocab, opt.d_model, opt.n_layers, opt.heads, opt.dropout)
if opt.load_weights is not None:
print("loading pretrained weights...")
model.load_state_dict(torch.load(opt.load_weights+'/model_weights'))
else:
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
if opt.device == "cuda":
model = model.cuda()
return model