From b3934536ab3ae5e35381074e6f209b3988dd633d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=AB=A0=E5=AE=8F=E5=BD=AC?= <57510731+hopingZ@users.noreply.github.com> Date: Tue, 31 Jan 2023 18:14:36 +0800 Subject: [PATCH] Avoid using variable "attn_loss" before assignment --- paddlespeech/t2s/models/tacotron2/tacotron2_updater.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddlespeech/t2s/models/tacotron2/tacotron2_updater.py b/paddlespeech/t2s/models/tacotron2/tacotron2_updater.py index 09e6827d0..ee7f0a2ea 100644 --- a/paddlespeech/t2s/models/tacotron2/tacotron2_updater.py +++ b/paddlespeech/t2s/models/tacotron2/tacotron2_updater.py @@ -113,16 +113,18 @@ class Tacotron2Updater(StandardUpdater): loss.backward() optimizer.step() + if self.use_guided_attn_loss: + report("eval/attn_loss", float(attn_loss)) + losses_dict["attn_loss"] = float(attn_loss) + report("train/l1_loss", float(l1_loss)) report("train/mse_loss", float(mse_loss)) report("train/bce_loss", float(bce_loss)) - report("train/attn_loss", float(attn_loss)) report("train/loss", float(loss)) losses_dict["l1_loss"] = float(l1_loss) losses_dict["mse_loss"] = float(mse_loss) losses_dict["bce_loss"] = float(bce_loss) - losses_dict["attn_loss"] = float(attn_loss) losses_dict["loss"] = float(loss) self.msg += ', '.join('{}: {:>.6f}'.format(k, v) for k, v in losses_dict.items())