Merge pull request #3186 from PaddlePaddle/vits_pr

[TTS]update lr schedulers from per iter to per epoch for VITS
pull/3201/head
Hui Zhang 1 year ago committed by GitHub
commit e3dcfa8815
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -179,7 +179,7 @@ generator_first: False # whether to start updating generator first
# OTHER TRAINING SETTING #
##########################################################
num_snapshots: 10 # max number of snapshots to keep while training
train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000
save_interval_steps: 1000 # Interval steps to save checkpoint.
eval_interval_steps: 250 # Interval steps to evaluate the network.
max_epoch: 1000 # Number of training epochs.
save_interval_epochs: 1 # Interval epochs to save checkpoint.
eval_interval_epochs: 1 # Interval steps to evaluate the network.
seed: 777 # random seed number

@ -230,17 +230,15 @@ def train_sp(args, config):
output_dir=output_dir)
trainer = Trainer(
updater,
stop_trigger=(config.train_max_steps, "iteration"),
out=output_dir)
updater, stop_trigger=(config.max_epoch, 'epoch'), out=output_dir)
if dist.get_rank() == 0:
trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration'))
evaluator, trigger=(config.eval_interval_epochs, 'epoch'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend(
Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration'))
trigger=(config.save_interval_epochs, 'epoch'))
print("Trainer Done!")
trainer.run()

@ -166,7 +166,9 @@ class VITSUpdater(StandardUpdater):
gen_loss.backward()
self.optimizer_g.step()
self.scheduler_g.step()
# learning rate updates on each epoch.
if self.state.iteration % self.updates_per_epoch == 0:
self.scheduler_g.step()
# reset cache
if self.model.reuse_cache_gen or not self.model.training:
@ -202,7 +204,9 @@ class VITSUpdater(StandardUpdater):
dis_loss.backward()
self.optimizer_d.step()
self.scheduler_d.step()
# learning rate updates on each epoch.
if self.state.iteration % self.updates_per_epoch == 0:
self.scheduler_d.step()
# reset cache
if self.model.reuse_cache_dis or not self.model.training:

Loading…
Cancel
Save