-
Notifications
You must be signed in to change notification settings - Fork 4
/
train.py
275 lines (244 loc) · 10.6 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
"""Training file for SMP-CAIL2020-Argmine.
Author: Yixu GAO [email protected]
Usage:
python -m torch.distributed.launch train.py \
--config_file 'config/bert_config.json'
CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch train.py \
--config_file 'config/rnn_config.json'
"""
from typing import Dict
import argparse
import json
import os
from copy import deepcopy
from types import SimpleNamespace
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers.optimization import (
AdamW, get_linear_schedule_with_warmup, get_constant_schedule)
from data import Data
from evaluate import evaluate, calculate_accuracy_f1, get_labels_from_file
from model import BertForClassification, RnnForSentencePairClassification
from utils import get_csv_logger, get_path
from vocab import build_vocab
MODEL_MAP = {
'bert': BertForClassification,
'rnn': RnnForSentencePairClassification
}
class Trainer:
"""Trainer for SMP-CAIL2020-Argmine.
"""
def __init__(self,
model, data_loader: Dict[str, DataLoader], device, config):
"""Initialize trainer with model, data, device, and config.
Initialize optimizer, scheduler, criterion.
Args:
model: model to be evaluated
data_loader: dict of torch.utils.data.DataLoader
device: torch.device('cuda') or torch.device('cpu')
config:
config.experiment_name: experiment name
config.model_type: 'bert' or 'rnn'
config.lr: learning rate for optimizer
config.num_epoch: epoch number
config.num_warmup_steps: warm-up steps number
config.gradient_accumulation_steps: gradient accumulation steps
config.max_grad_norm: max gradient norm
"""
self.model = model
self.device = device
self.config = config
self.data_loader = data_loader
self.config.num_training_steps = config.num_epoch * (
len(data_loader['train']) // config.batch_size)
self.optimizer = self._get_optimizer()
self.scheduler = self._get_scheduler()
self.criterion = nn.CrossEntropyLoss()
def _get_optimizer(self):
"""Get optimizer for different models.
Returns:
optimizer
"""
if self.config.model_type == 'bert':
no_decay = ['bias', 'gamma', 'beta']
optimizer_parameters = [
{'params': [p for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}]
optimizer = AdamW(
optimizer_parameters,
lr=self.config.lr,
betas=(0.9, 0.999),
weight_decay=1e-8,
correct_bias=False)
else: # rnn
optimizer = Adam(self.model.parameters(), lr=self.config.lr)
return optimizer
def _get_scheduler(self):
"""Get scheduler for different models.
Returns:
scheduler
"""
if self.config.model_type == 'bert':
scheduler = get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.config.num_warmup_steps,
num_training_steps=self.config.num_training_steps)
else: # rnn
scheduler = get_constant_schedule(self.optimizer)
return scheduler
def _evaluate_for_train_valid(self):
"""Evaluate model on train and valid set and get acc and f1 score.
Returns:
train_acc, train_f1, valid_acc, valid_f1
"""
train_predictions = evaluate(
model=self.model, data_loader=self.data_loader['valid_train'],
device=self.device)
valid_predictions = evaluate(
model=self.model, data_loader=self.data_loader['valid_valid'],
device=self.device)
train_answers = get_labels_from_file(self.config.train_file_path)
valid_answers = get_labels_from_file(self.config.valid_file_path)
train_acc, train_f1 = calculate_accuracy_f1(
train_answers, train_predictions)
valid_acc, valid_f1 = calculate_accuracy_f1(
valid_answers, valid_predictions)
return train_acc, train_f1, valid_acc, valid_f1
def _epoch_evaluate_update_description_log(
self, tqdm_obj, logger, epoch):
"""Evaluate model and update logs for epoch.
Args:
tqdm_obj: tqdm/trange object with description to be updated
logger: logging.logger
epoch: int
Return:
train_acc, train_f1, valid_acc, valid_f1
"""
# Evaluate model for train and valid set
results = self._evaluate_for_train_valid()
train_acc, train_f1, valid_acc, valid_f1 = results
# Update tqdm description for command line
tqdm_obj.set_description(
'Epoch: {:d}, train_acc: {:.6f}, train_f1: {:.6f}, '
'valid_acc: {:.6f}, valid_f1: {:.6f}, '.format(
epoch, train_acc, train_f1, valid_acc, valid_f1))
# Logging
logger.info(','.join([str(epoch)] + [str(s) for s in results]))
return train_acc, train_f1, valid_acc, valid_f1
def save_model(self, filename):
"""Save model to file.
Args:
filename: file name
"""
torch.save(self.model.state_dict(), filename)
def train(self):
"""Train model on train set and evaluate on train and valid set.
Returns:
state dict of the best model with highest valid f1 score
"""
epoch_logger = get_csv_logger(
os.path.join(self.config.log_path,
self.config.experiment_name + '-epoch.csv'),
title='epoch,train_acc,train_f1,valid_acc,valid_f1')
step_logger = get_csv_logger(
os.path.join(self.config.log_path,
self.config.experiment_name + '-step.csv'),
title='step,loss')
trange_obj = trange(self.config.num_epoch, desc='Epoch', ncols=120)
self._epoch_evaluate_update_description_log(
tqdm_obj=trange_obj, logger=epoch_logger, epoch=0)
best_model_state_dict, best_valid_f1, global_step = None, 0, 0
for epoch, _ in enumerate(trange_obj):
self.model.train()
tqdm_obj = tqdm(self.data_loader['train'], ncols=80)
for step, batch in enumerate(tqdm_obj):
batch = tuple(t.to(self.device) for t in batch)
logits = self.model(*batch[:-1]) # the last one is label
loss = self.criterion(logits, batch[-1])
if self.config.gradient_accumulation_steps > 1:
loss = loss / self.config.gradient_accumulation_steps
self.optimizer.zero_grad()
loss.backward()
if (step + 1) % self.config.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.config.max_grad_norm)
self.optimizer.step()
self.scheduler.step()
global_step += 1
tqdm_obj.set_description('loss: {:.6f}'.format(loss.item()))
step_logger.info(str(global_step) + ',' + str(loss.item()))
results = self._epoch_evaluate_update_description_log(
tqdm_obj=trange_obj, logger=epoch_logger, epoch=epoch + 1)
self.save_model(os.path.join(
self.config.model_path, self.config.experiment_name,
self.config.model_type + '-' + str(epoch + 1) + '.bin'))
if results[-1] > best_valid_f1:
best_model_state_dict = deepcopy(self.model.state_dict())
best_valid_f1 = results[-1]
return best_model_state_dict
def main(config_file='config/bert_config.json'):
"""Main method for training.
Args:
config_file: in config dir
"""
# 0. Load config and mkdir
with open(config_file) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
get_path(os.path.join(config.model_path, config.experiment_name))
get_path(config.log_path)
if config.model_type == 'rnn': # build vocab for rnn
build_vocab(file_in=config.all_train_file_path,
file_out=os.path.join(config.model_path, 'vocab.txt'))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type)
datasets = data.load_train_and_valid_files(
train_file=config.train_file_path,
valid_file=config.valid_file_path)
train_set, valid_set_train, valid_set_valid = datasets
if torch.cuda.is_available():
device = torch.device('cuda')
torch.distributed.init_process_group(backend="nccl")
sampler_train = DistributedSampler(train_set)
else:
device = torch.device('cpu')
sampler_train = RandomSampler(train_set)
data_loader = {
'train': DataLoader(
train_set, sampler=sampler_train, batch_size=config.batch_size),
'valid_train': DataLoader(
valid_set_train, batch_size=config.batch_size, shuffle=False),
'valid_valid': DataLoader(
valid_set_valid, batch_size=config.batch_size, shuffle=False)}
# 2. Build model
model = MODEL_MAP[config.model_type](config)
model.to(device)
if torch.cuda.is_available():
model = torch.nn.parallel.DistributedDataParallel(
model, find_unused_parameters=True)
# 3. Train
trainer = Trainer(model=model, data_loader=data_loader,
device=device, config=config)
best_model_state_dict = trainer.train()
# 4. Save model
torch.save(best_model_state_dict,
os.path.join(config.model_path, 'model.bin'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config_file', default='config/bert_config.json',
help='model config file')
parser.add_argument(
'--local_rank', default=0,
help='used for distributed parallel')
args = parser.parse_args()
main(args.config_file)