no_sync if paddle support else nullcontext

pull/879/head
Hui Zhang 3 years ago
parent b4e16eb815
commit 466672e1de

@ -87,7 +87,8 @@ class DeepSpeech2Trainer(Trainer):
# Disable gradient synchronizations across DDP processes. # Disable gradient synchronizations across DDP processes.
# Within this context, gradients will be accumulated on module # Within this context, gradients will be accumulated on module
# variables, which will later be synchronized. # variables, which will later be synchronized.
context = self.model.no_sync context = self.model.no_sync if (hasattr(self.model, "no_sync") and
self.parallel) else nullcontext
else: else:
# Used for single gpu training and DDP gradient synchronization # Used for single gpu training and DDP gradient synchronization
# processes. # processes.

@ -106,7 +106,8 @@ class U2Trainer(Trainer):
# Within this context, gradients will be accumulated on module # Within this context, gradients will be accumulated on module
# variables, which will later be synchronized. # variables, which will later be synchronized.
# When using cpu w/o DDP, model does not have `no_sync` # When using cpu w/o DDP, model does not have `no_sync`
context = self.model.no_sync if self.parallel else nullcontext context = self.model.no_sync if (hasattr(self.model, "no_sync") and
self.parallel) else nullcontext
else: else:
# Used for single gpu training and DDP gradient synchronization # Used for single gpu training and DDP gradient synchronization
# processes. # processes.

@ -105,7 +105,8 @@ class U2Trainer(Trainer):
# Disable gradient synchronizations across DDP processes. # Disable gradient synchronizations across DDP processes.
# Within this context, gradients will be accumulated on module # Within this context, gradients will be accumulated on module
# variables, which will later be synchronized. # variables, which will later be synchronized.
context = self.model.no_sync context = self.model.no_sync if (hasattr(self.model, "no_sync") and
self.parallel) else nullcontext
else: else:
# Used for single gpu training and DDP gradient synchronization # Used for single gpu training and DDP gradient synchronization
# processes. # processes.

@ -110,7 +110,8 @@ class U2STTrainer(Trainer):
# Disable gradient synchronizations across DDP processes. # Disable gradient synchronizations across DDP processes.
# Within this context, gradients will be accumulated on module # Within this context, gradients will be accumulated on module
# variables, which will later be synchronized. # variables, which will later be synchronized.
context = self.model.no_sync context = self.model.no_sync if (hasattr(self.model, "no_sync") and
self.parallel) else nullcontext
else: else:
# Used for single gpu training and DDP gradient synchronization # Used for single gpu training and DDP gradient synchronization
# processes. # processes.

Loading…
Cancel
Save