forked from andabi/deep-voice-conversion
-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathtrain2.py
123 lines (86 loc) · 4.17 KB
/
train2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# -*- coding: utf-8 -*-
#/usr/bin/python3
import os, sys
import argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from models import Model
from data_load import get_batch
import convert, eval2
import hparams as hp
def train(logdir1, logdir2, hparams):
model = Model(mode="train2", hparams=hparams)
# Loss
loss_op = model.loss_net2()
# Training Scheme
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=hp.Train2.lr)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net2')
train_op = optimizer.minimize(loss_op, global_step=global_step, var_list=var_list)
# Summary
summ_op = summaries(loss_op)
#session_conf = tf.ConfigProto(
# gpu_options=tf.GPUOptions(
# allow_growth=True,
# per_process_gpu_memory_fraction=0.6,
# ),
#)
session_conf=tf.ConfigProto()
session_conf.gpu_options.per_process_gpu_memory_fraction=0.9
# Training
with tf.Session(config=session_conf) as sess:
# Load trained model
sess.run(tf.global_variables_initializer())
model.load(sess, mode='train2', logdir=logdir1, logdir2=logdir2)
writer = tf.summary.FileWriter(logdir2, sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for epoch in range(1, hp.Train2.num_epochs + 1):
for step in list(range(model.num_batch)):
mfcc, spec, mel = get_batch(model.mode, model.batch_size)
sess.run(train_op, feed_dict={model.x_mfcc: mfcc, model.y_spec: spec, model.y_mel: mel})
# Write checkpoint files at every epoch
summ, gs = sess.run([summ_op, global_step], feed_dict={model.x_mfcc: mfcc, model.y_spec: spec, model.y_mel: mel})
if epoch % hp.Train2.save_per_epoch == 0:
tf.train.Saver().save(sess, '{}/epoch_{}_step_{}'.format(logdir2, epoch, gs))
# Eval at every n epochs
with tf.Graph().as_default():
eval2.eval(logdir2, hparams)
# Convert at every n epochs
#with tf.Graph().as_default():
# convert.convert(logdir2, hparams)
writer.add_summary(summ, global_step=gs)
writer.close()
coord.request_stop()
coord.join(threads)
def summaries(loss):
# for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'net/net2'):
# tf.summary.histogram(v.name, v)
tf.summary.scalar('net2/train/loss', loss)
return tf.summary.merge_all()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-case1', type=str, default='default' ,help='experiment case name of train1')
parser.add_argument('-case2', type=str, default='default' ,help='experiment case name of train2')
parser.add_argument('-logdir', type=str, default='./logdir' ,help='tensorflow logdir, default: ./logdir')
parser.add_argument('-data_path', type=str, default=hp.Train2.data_path,
help='trainign data path, default: {}'.format(hp.Train2.data_path) )
parser.add_argument('-batch_size', type=int, default=hp.Train2.batch_size,
help='batch size, default {}'.format(hp.Train2.batch_size))
parser.add_argument('-num_epochs', type=int, default=hp.Train2.num_epochs,
help='number of epochs, default: {}'.format(hp.Train2.num_epochs) )
parser.add_argument('-save_per_epoch', type=int, default=hp.Train2.save_per_epoch,
help='save model every n epoch, default: {}'.format(hp.Train2.save_per_epoch) )
arguments = parser.parse_args()
return arguments
if __name__ == '__main__':
args = get_arguments()
logdir1 = '{}/{}/train1'.format(args.logdir, args.case1)
logdir2 = '{}/{}/train2'.format(args.logdir, args.case2)
hp.Train2.batch_size = args.batch_size
hp.Train2.num_epochs = args.num_epochs
hp.Train2.save_per_epoch = args.save_per_epoch
hp.Train2.data_path = args.data_path
train(logdir1=logdir1, logdir2=logdir2, hparams=hp)
print("Done")