-
Notifications
You must be signed in to change notification settings - Fork 3
/
train.py
184 lines (146 loc) · 7.04 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import tensorflow as tf
import argparse
from typing import Any
import time
from utils.common import Common_helpers
from utils.training_utils import Training_helpers
import numpy as np
from training.training_loop import train_d, train_all
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
def setup_training_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--awv_path", type=str, default='./data/cmu_us_bdl_arctic/wav',
help="Source spectrograms path")
parser.add_argument("--bwv_path", type=str, default='./data/cmu_us_clb_arctic/wav',
help="Target spectrograms path")
parser.add_argument("--dest_path", type=str, default='./Results/',
help="Destination path to save network weights")
parser.add_argument("--model_path", type=str, default=None,
help="Destination path to previously saved network weights")
parser.add_argument("--id_loss_weight", type=float, default=0.5,
help="Weight for id loss")
parser.add_argument("--travel_loss_weight", type=float, default=10.,
help="Weight for travel loss")
parser.add_argument("--hop", type=int, default=192,
help="Hop size (window size = 6*hop)")
parser.add_argument("--sr", type=int, default=16000,
help="Sampling rate")
parser.add_argument("--min_level_db", type=int, default=-100,
help="Reference values to normalize data")
parser.add_argument("--ref_level_db", type=int, default=20,
help="Reference values to normalize data")
parser.add_argument("--shape", type=int, default=24,
help="Length of time axis of split spectrograms to feed to generator")
parser.add_argument("--vec_len", type=int, default=128,
help="Length of vector generated by siamese vector")
parser.add_argument("--batch_size", type=int, default=128,
help="Batch size")
parser.add_argument("--delta", type=float, default=2.,
help="Constant for siamese loss")
parser.add_argument("-lr", type=float, default=0.0004,
help="Learning rate")
parser.add_argument("--n_save", type=int, default=1,
help="How many epochs between each saving and displaying of results")
parser.add_argument("--gupt", type=int, default=3,
help="How many discriminator updates for generator+siamese update")
parser.add_argument("--epoch", type=int, default=5000,
help="Epoch number")
parser.add_argument("--device", type=str, default='cpu',
help="Device")
temp_args = parser.parse_args()
args.awv_path = temp_args.awv_path
args.bwv_path = temp_args.bwv_path
args.dest_path = temp_args.dest_path
args.model_path = temp_args.model_path
args.id_loss_weight = temp_args.id_loss_weight
args.travel_loss_weight = temp_args.travel_loss_weight
args.hop = temp_args.hop
args.sr = temp_args.sr
args.min_level_db = temp_args.min_level_db
args.ref_level_db = temp_args.ref_level_db
args.shape = temp_args.shape
args.vec_len = temp_args.vec_len
args.batch_size = temp_args.batch_size
args.delta = temp_args.delta
args.lr = temp_args.lr
args.n_save = temp_args.n_save
args.gupt = temp_args.gupt
args.epoch = temp_args.epoch
args.device = temp_args.device
return args
if __name__ == "__main__":
args = EasyDict()
args = setup_training_args(args)
CH = Common_helpers(args)
# MALE1
awv = CH.audio_array(args.awv_path) # get waveform array from folder containing wav files
aspec = CH.tospec(awv) # get spectrogram array
adata = CH.splitcut(aspec) # split spectrogams to fixed length
# FEMALE1
bwv = CH.audio_array(args.bwv_path)
bspec = CH.tospec(bwv)
bdata = CH.splitcut(bspec)
@tf.function
def proc(x):
return tf.image.random_crop(x, size=[args.hop, 3 * args.shape, 1])
dsa = tf.data.Dataset.from_tensor_slices(adata).repeat(50).map(
proc, num_parallel_calls=tf.data.experimental.AUTOTUNE).shuffle(10000).batch(args.batch_size,
drop_remainder=True)
dsb = tf.data.Dataset.from_tensor_slices(bdata).repeat(50).map(
proc, num_parallel_calls=tf.data.experimental.AUTOTUNE).shuffle(10000).batch(args.batch_size,
drop_remainder=True)
# Build models and initialize optimizers
# If load_model=True, specify the path where the models are saved
TH = Training_helpers(args, aspec)
if args.model_path is None:
args.gen, args.critic, args.siam, [args.opt_gen, args.opt_disc] = TH.get_networks(load_model=False)
else:
args.gen, args.critic, args.siam, [args.opt_gen, args.opt_disc] = TH.get_networks(load_model=True,
path=args.model_path)
TH.update_lr(args.lr)
df_list = []
dr_list = []
g_list = []
id_list = []
c = 0
g = 0
for epoch in range(args.epoch):
bef = time.time()
for batchi, (a, b) in enumerate(zip(dsa, dsb)):
if batchi % args.gupt == 0:
dloss_t, dloss_f, gloss, idloss = train_all(a, b, args)
else:
dloss_t, dloss_f = train_d(a, b, args)
df_list.append(dloss_f)
dr_list.append(dloss_t)
g_list.append(gloss)
id_list.append(idloss)
c += 1
g += 1
if batchi % 600 == 0:
print(
f'[Epoch {epoch}/{args.epoch}] [Batch {batchi}] [D loss f: {np.mean(df_list[-g:], axis=0)} ',
end='')
print(f'r: {np.mean(dr_list[-g:], axis=0)}] ', end='')
print(f'[G loss: {np.mean(g_list[-g:], axis=0)}] ', end='')
print(f'[ID loss: {np.mean(id_list[-g:])}] ', end='')
print(f'[LR: {args.lr}]')
g = 0
nbatch = batchi
print(f'Time/Batch {(time.time() - bef) / nbatch}')
TH.save_end(epoch, np.mean(g_list[-args.n_save * c:], axis=0), np.mean(
df_list[-args.n_save * c:], axis=0), np.mean(id_list[-args.n_save * c:], axis=0), n_save=args.n_save,
save_path=args.dest_path)
print(
f'Mean D loss: {np.mean(df_list[-c:], axis=0)} Mean G loss: {np.mean(g_list[-c:], axis=0)} Mean ID loss: {np.mean(id_list[-c:], axis=0)}')
c = 0