Merge pull request #1445 from yt605155624/update_train

[TTS]init for all works in train.py when ngpu>1
pull/1447/head
Hui Zhang 3 years ago committed by GitHub
commit 718c849f68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -62,9 +62,3 @@ Contents
:caption: Acknowledgement :caption: Acknowledgement
asr/reference asr/reference

@ -160,9 +160,8 @@ def train_sp(args, config):
if dist.get_rank() == 0: if dist.get_rank() == 0:
trainer.extend(evaluator, trigger=(1, "epoch")) trainer.extend(evaluator, trigger=(1, "epoch"))
trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) trainer.extend(VisualDL(output_dir), trigger=(1, "iteration"))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch'))
# print(trainer.extensions)
trainer.run() trainer.run()

@ -231,9 +231,9 @@ def train_sp(args, config):
trainer.extend( trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration')) evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration')) trigger=(config.save_interval_steps, 'iteration'))
print("Trainer Done!") print("Trainer Done!")
trainer.run() trainer.run()

@ -219,9 +219,9 @@ def train_sp(args, config):
trainer.extend( trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration')) evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration')) trigger=(config.save_interval_steps, 'iteration'))
print("Trainer Done!") print("Trainer Done!")
trainer.run() trainer.run()

@ -194,11 +194,10 @@ def train_sp(args, config):
trainer.extend( trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration')) evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration')) trigger=(config.save_interval_steps, 'iteration'))
# print(trainer.extensions.keys())
print("Trainer Done!") print("Trainer Done!")
trainer.run() trainer.run()

@ -212,9 +212,9 @@ def train_sp(args, config):
trainer.extend( trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration')) evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration')) trigger=(config.save_interval_steps, 'iteration'))
print("Trainer Done!") print("Trainer Done!")
trainer.run() trainer.run()

@ -171,8 +171,8 @@ def train_sp(args, config):
if dist.get_rank() == 0: if dist.get_rank() == 0:
trainer.extend(evaluator, trigger=(1, "epoch")) trainer.extend(evaluator, trigger=(1, "epoch"))
trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) trainer.extend(VisualDL(output_dir), trigger=(1, "iteration"))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch'))
trainer.run() trainer.run()

@ -155,9 +155,8 @@ def train_sp(args, config):
if dist.get_rank() == 0: if dist.get_rank() == 0:
trainer.extend(evaluator, trigger=(1, "epoch")) trainer.extend(evaluator, trigger=(1, "epoch"))
trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) trainer.extend(VisualDL(output_dir), trigger=(1, "iteration"))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch'))
# print(trainer.extensions)
trainer.run() trainer.run()

@ -148,9 +148,8 @@ def train_sp(args, config):
if dist.get_rank() == 0: if dist.get_rank() == 0:
trainer.extend(evaluator, trigger=(1, "epoch")) trainer.extend(evaluator, trigger=(1, "epoch"))
trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) trainer.extend(VisualDL(output_dir), trigger=(1, "iteration"))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch'))
# print(trainer.extensions)
trainer.run() trainer.run()

@ -168,9 +168,9 @@ def train_sp(args, config):
trainer.extend( trainer.extend(
evaluator, trigger=(config.eval_interval_steps, 'iteration')) evaluator, trigger=(config.eval_interval_steps, 'iteration'))
trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration'))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), Snapshot(max_size=config.num_snapshots),
trigger=(config.save_interval_steps, 'iteration')) trigger=(config.save_interval_steps, 'iteration'))
print("Trainer Done!") print("Trainer Done!")
trainer.run() trainer.run()

@ -135,9 +135,8 @@ def train_sp(args, config):
if dist.get_rank() == 0: if dist.get_rank() == 0:
trainer.extend(evaluator, trigger=(1, "epoch")) trainer.extend(evaluator, trigger=(1, "epoch"))
trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) trainer.extend(VisualDL(output_dir), trigger=(1, "iteration"))
trainer.extend( trainer.extend(
Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch'))
# print(trainer.extensions)
trainer.run() trainer.run()

Loading…
Cancel
Save