Merge pull request #2107 from yt605155624/rm_more_log

[cli]rm extra log
pull/2113/head
TianYuan 3 years ago committed by GitHub
commit bbc442b887
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -217,7 +217,7 @@ class BaseExecutor(ABC):
logging.getLogger(name) for name in logging.root.manager.loggerDict logging.getLogger(name) for name in logging.root.manager.loggerDict
] ]
for l in loggers: for l in loggers:
l.disabled = True l.setLevel(logging.ERROR)
def show_rtf(self, info: Dict[str, List[float]]): def show_rtf(self, info: Dict[str, List[float]]):
""" """

@ -267,21 +267,18 @@ class TTSExecutor(BaseExecutor):
with open(self.phones_dict, "r") as f: with open(self.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()] phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id) vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
tone_size = None tone_size = None
if self.tones_dict: if self.tones_dict:
with open(self.tones_dict, "r") as f: with open(self.tones_dict, "r") as f:
tone_id = [line.strip().split() for line in f.readlines()] tone_id = [line.strip().split() for line in f.readlines()]
tone_size = len(tone_id) tone_size = len(tone_id)
print("tone_size:", tone_size)
spk_num = None spk_num = None
if self.speaker_dict: if self.speaker_dict:
with open(self.speaker_dict, 'rt') as f: with open(self.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()] spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id) spk_num = len(spk_id)
print("spk_num:", spk_num)
# frontend # frontend
if lang == 'zh': if lang == 'zh':
@ -291,7 +288,6 @@ class TTSExecutor(BaseExecutor):
elif lang == 'en': elif lang == 'en':
self.frontend = English(phone_vocab_path=self.phones_dict) self.frontend = English(phone_vocab_path=self.phones_dict)
print("frontend done!")
# acoustic model # acoustic model
odim = self.am_config.n_mels odim = self.am_config.n_mels
@ -324,7 +320,6 @@ class TTSExecutor(BaseExecutor):
am_normalizer = ZScore(am_mu, am_std) am_normalizer = ZScore(am_mu, am_std)
self.am_inference = am_inference_class(am_normalizer, am) self.am_inference = am_inference_class(am_normalizer, am)
self.am_inference.eval() self.am_inference.eval()
print("acoustic model done!")
# vocoder # vocoder
# model: {model_name}_{dataset} # model: {model_name}_{dataset}
@ -347,7 +342,6 @@ class TTSExecutor(BaseExecutor):
voc_normalizer = ZScore(voc_mu, voc_std) voc_normalizer = ZScore(voc_mu, voc_std)
self.voc_inference = voc_inference_class(voc_normalizer, voc) self.voc_inference = voc_inference_class(voc_normalizer, voc)
self.voc_inference.eval() self.voc_inference.eval()
print("voc done!")
def preprocess(self, input: Any, *args, **kwargs): def preprocess(self, input: Any, *args, **kwargs):
""" """

@ -258,7 +258,6 @@ class FastSpeech2(nn.Layer):
padding_idx=self.padding_idx) padding_idx=self.padding_idx)
if encoder_type == "transformer": if encoder_type == "transformer":
print("encoder_type is transformer")
self.encoder = TransformerEncoder( self.encoder = TransformerEncoder(
idim=idim, idim=idim,
attention_dim=adim, attention_dim=adim,
@ -275,7 +274,6 @@ class FastSpeech2(nn.Layer):
positionwise_layer_type=positionwise_layer_type, positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) positionwise_conv_kernel_size=positionwise_conv_kernel_size, )
elif encoder_type == "conformer": elif encoder_type == "conformer":
print("encoder_type is conformer")
self.encoder = ConformerEncoder( self.encoder = ConformerEncoder(
idim=idim, idim=idim,
attention_dim=adim, attention_dim=adim,
@ -362,7 +360,6 @@ class FastSpeech2(nn.Layer):
# NOTE: we use encoder as decoder # NOTE: we use encoder as decoder
# because fastspeech's decoder is the same as encoder # because fastspeech's decoder is the same as encoder
if decoder_type == "transformer": if decoder_type == "transformer":
print("decoder_type is transformer")
self.decoder = TransformerEncoder( self.decoder = TransformerEncoder(
idim=0, idim=0,
attention_dim=adim, attention_dim=adim,
@ -380,7 +377,6 @@ class FastSpeech2(nn.Layer):
positionwise_layer_type=positionwise_layer_type, positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) positionwise_conv_kernel_size=positionwise_conv_kernel_size, )
elif decoder_type == "conformer": elif decoder_type == "conformer":
print("decoder_type is conformer")
self.decoder = ConformerEncoder( self.decoder = ConformerEncoder(
idim=0, idim=0,
attention_dim=adim, attention_dim=adim,

Loading…
Cancel
Save