-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathbase_args.py
103 lines (75 loc) · 4.42 KB
/
base_args.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import argparse
from src.utils.logger import Logger
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class CustomArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(
formatter_class=lambda prog: CustomFormatter(prog,
max_help_position=40),
*args, **kwargs)
base_parser = argparse.ArgumentParser(description='', add_help=False)
data_args = base_parser.add_argument_group('Data proportions',
'Data proportions to use when '
'training and validating')
data_args.add_argument('-tdp', '--train_data_proportion', type=float,
default=1.0, metavar='',
help='Proportion of the training data to use.')
data_args.add_argument('-ddp', '--dev_data_proportion', type=float,
default=1.0, metavar='',
help='Proportion of the validation data to use.')
training_args = base_parser.add_argument_group('Training Hyperparameters',
'Hyperparameters specific to '
'the training procedure, and '
'unrelated to the NN '
'architecture')
training_args.add_argument('--epochs', type=int, default=10, metavar='',
help='Number of epochs for training')
training_args.add_argument('--batch_size', type=int, default=64, metavar='',
help='Size of the minibatch to use for training and'
'validation')
optim_args = base_parser.add_argument_group('Optimizer parameters')
optim_args.add_argument('-lr', '--learning_rate', type=float, default=2e-4,
metavar='',
help='Initial learning rate')
optim_choices = ['sgd', 'adagrad', 'adadelta', 'adam', 'rmsprop']
optim_args.add_argument('--optim', type=str, default='adam',
choices=optim_choices,
help='Optimizer to use')
optim_args.add_argument('-gc', '--grad_clipping', type=float, default=5.0,
metavar='',
help='Gradients are clipped to this value each '
'time step is called by the optimizer')
optim_args.add_argument('-ulr', '--update_learning_rate',
action='store_true',
help='Whether to update/decay the learning rate. '
'If you pass this flag make sure to modify '
'the learning rate decay')
optim_args.add_argument('-lrd', '--learning_rate_decay', type=float,
default=1.0, metavar='',
help='Factor by which current learning rate is '
'multiplied each time it is decayed')
optim_args.add_argument('-sda', '--start_decay_at', type=int,
default=2, metavar='',
help='Epoch at which the learning rate decay should '
'begin')
optim_args.add_argument('-de', '--decay_every', type=int, default=2,
metavar='',
help='Frequency in epochs by which the learning rate '
'should be decayed')
misc_args = base_parser.add_argument_group('Miscellaneous')
misc_args.add_argument('--write_mode', type=str,
choices=Logger.WRITE_MODES, default='NONE',
help='Mode for saving hyperparameters and results')
misc_args.add_argument('--save_model', action='store_true',
help='Whether to save the best and last model '
'checkpoints')
misc_args.add_argument('--no_cuda', action='store_true',
help='Force the use of the cpu even if a gpu is '
'available')
misc_args.add_argument('--log_interval', type=int, default=50, metavar='',
help='Number of iterations between training loss '
'loggings')
misc_args.add_argument('--seed', type=int, default=43, metavar='',
help='Random seed to be used by torch initializations')