From cf846f9ebce0ca2bb0c6bf29d4b7740c5e845b46 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 30 Jun 2022 12:12:59 +0000 Subject: [PATCH] rm extra log --- paddlespeech/cli/executor.py | 2 +- paddlespeech/cli/tts/infer.py | 6 ------ paddlespeech/t2s/models/fastspeech2/fastspeech2.py | 4 ---- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/paddlespeech/cli/executor.py b/paddlespeech/cli/executor.py index d390f947..d4187a51 100644 --- a/paddlespeech/cli/executor.py +++ b/paddlespeech/cli/executor.py @@ -217,7 +217,7 @@ class BaseExecutor(ABC): logging.getLogger(name) for name in logging.root.manager.loggerDict ] for l in loggers: - l.disabled = True + l.setLevel(logging.ERROR) def show_rtf(self, info: Dict[str, List[float]]): """ diff --git a/paddlespeech/cli/tts/infer.py b/paddlespeech/cli/tts/infer.py index 5468f257..7c837761 100644 --- a/paddlespeech/cli/tts/infer.py +++ b/paddlespeech/cli/tts/infer.py @@ -267,21 +267,18 @@ class TTSExecutor(BaseExecutor): with open(self.phones_dict, "r") as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) - print("vocab_size:", vocab_size) tone_size = None if self.tones_dict: with open(self.tones_dict, "r") as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) - print("tone_size:", tone_size) spk_num = None if self.speaker_dict: with open(self.speaker_dict, 'rt') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) - print("spk_num:", spk_num) # frontend if lang == 'zh': @@ -291,7 +288,6 @@ class TTSExecutor(BaseExecutor): elif lang == 'en': self.frontend = English(phone_vocab_path=self.phones_dict) - print("frontend done!") # acoustic model odim = self.am_config.n_mels @@ -324,7 +320,6 @@ class TTSExecutor(BaseExecutor): am_normalizer = ZScore(am_mu, am_std) self.am_inference = am_inference_class(am_normalizer, am) self.am_inference.eval() - print("acoustic model done!") # vocoder # model: {model_name}_{dataset} @@ -347,7 +342,6 @@ class TTSExecutor(BaseExecutor): voc_normalizer = ZScore(voc_mu, voc_std) self.voc_inference = voc_inference_class(voc_normalizer, voc) self.voc_inference.eval() - print("voc done!") def preprocess(self, input: Any, *args, **kwargs): """ diff --git a/paddlespeech/t2s/models/fastspeech2/fastspeech2.py b/paddlespeech/t2s/models/fastspeech2/fastspeech2.py index 48595bb2..ec5ed984 100644 --- a/paddlespeech/t2s/models/fastspeech2/fastspeech2.py +++ b/paddlespeech/t2s/models/fastspeech2/fastspeech2.py @@ -258,7 +258,6 @@ class FastSpeech2(nn.Layer): padding_idx=self.padding_idx) if encoder_type == "transformer": - print("encoder_type is transformer") self.encoder = TransformerEncoder( idim=idim, attention_dim=adim, @@ -275,7 +274,6 @@ class FastSpeech2(nn.Layer): positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) elif encoder_type == "conformer": - print("encoder_type is conformer") self.encoder = ConformerEncoder( idim=idim, attention_dim=adim, @@ -362,7 +360,6 @@ class FastSpeech2(nn.Layer): # NOTE: we use encoder as decoder # because fastspeech's decoder is the same as encoder if decoder_type == "transformer": - print("decoder_type is transformer") self.decoder = TransformerEncoder( idim=0, attention_dim=adim, @@ -380,7 +377,6 @@ class FastSpeech2(nn.Layer): positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) elif decoder_type == "conformer": - print("decoder_type is conformer") self.decoder = ConformerEncoder( idim=0, attention_dim=adim,