diff --git a/paddlespeech/cli/tts/infer.py b/paddlespeech/cli/tts/infer.py index 707518c05..5515ade26 100644 --- a/paddlespeech/cli/tts/infer.py +++ b/paddlespeech/cli/tts/infer.py @@ -292,19 +292,19 @@ class TTSExecutor(BaseExecutor): with open(self.voc_config) as f: self.voc_config = CfgNode(yaml.safe_load(f)) - with open(self.phones_dict, "r") as f: + with open(self.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) tone_size = None if self.tones_dict: - with open(self.tones_dict, "r") as f: + with open(self.tones_dict, 'rt', encoding='utf-8') as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) spk_num = None if self.speaker_dict: - with open(self.speaker_dict, 'rt') as f: + with open(self.speaker_dict, 'rt', encoding='utf-8') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) diff --git a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py index e450aa1a0..c43dafb3c 100644 --- a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py @@ -437,7 +437,7 @@ if __name__ == '__main__': vocab_phones = {} - with open(args.phones_dict, 'rt') as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] for phn, id in phn_id: vocab_phones[phn] = int(id) diff --git a/paddlespeech/t2s/exps/ernie_sat/train.py b/paddlespeech/t2s/exps/ernie_sat/train.py index 75a666bb1..c98d691be 100644 --- a/paddlespeech/t2s/exps/ernie_sat/train.py +++ b/paddlespeech/t2s/exps/ernie_sat/train.py @@ -109,7 +109,7 @@ def train_sp(args, config): num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) diff --git a/paddlespeech/t2s/exps/fastspeech2/train.py b/paddlespeech/t2s/exps/fastspeech2/train.py index d31e62a82..97626db0b 100644 --- a/paddlespeech/t2s/exps/fastspeech2/train.py +++ b/paddlespeech/t2s/exps/fastspeech2/train.py @@ -67,7 +67,7 @@ def train_sp(args, config): if args.speaker_dict is not None: print("multiple speaker fastspeech2!") collate_fn = fastspeech2_multi_spk_batch_fn - with open(args.speaker_dict, 'rt') as f: + with open(args.speaker_dict, 'rt', encoding='utf-8') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) fields += ["spk_id"] @@ -123,7 +123,7 @@ def train_sp(args, config): num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) diff --git a/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py b/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py index 644ec250d..d05dfafcf 100644 --- a/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py @@ -39,18 +39,18 @@ def evaluate(args, speedyspeech_config, pwg_config): # construct dataset for evaluation sentences = [] - with open(args.text, 'rt') as f: + with open(args.text, 'rt', encoding='utf-8') as f: for line in f: items = line.strip().split() utt_id = items[0] sentence = "".join(items[1:]) sentences.append((utt_id, sentence)) - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) - with open(args.tones_dict, "r") as f: + with open(args.tones_dict, 'rt', encoding='utf-8') as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) print("tone_size:", tone_size) diff --git a/paddlespeech/t2s/exps/speedyspeech/train.py b/paddlespeech/t2s/exps/speedyspeech/train.py index 7b422e64f..c90090daa 100644 --- a/paddlespeech/t2s/exps/speedyspeech/train.py +++ b/paddlespeech/t2s/exps/speedyspeech/train.py @@ -70,7 +70,7 @@ def train_sp(args, config): if args.speaker_dict is not None: print("multiple speaker speedyspeech!") collate_fn = speedyspeech_multi_spk_batch_fn - with open(args.speaker_dict, 'rt') as f: + with open(args.speaker_dict, 'rt', encoding='utf-8') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) fields += ["spk_id"] @@ -133,11 +133,11 @@ def train_sp(args, config): collate_fn=collate_fn, num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) - with open(args.tones_dict, "r") as f: + with open(args.tones_dict, 'rt', encoding='utf-8') as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) print("tone_size:", tone_size) diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 6b693440c..491edda30 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -106,7 +106,7 @@ def get_chunks(data, block_size: int, pad_size: int): def get_sentences(text_file: Optional[os.PathLike], lang: str='zh'): # construct dataset for evaluation sentences = [] - with open(text_file, 'rt') as f: + with open(text_file, 'rt', encoding='utf-8') as f: for line in f: if line.strip() != "": items = re.split(r"\s+", line.strip(), 1) @@ -325,17 +325,17 @@ def get_am_inference(am: str='fastspeech2_csmsc', tones_dict: Optional[os.PathLike]=None, speaker_dict: Optional[os.PathLike]=None, return_am: bool=False): - with open(phones_dict, "r") as f: + with open(phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) tone_size = None if tones_dict is not None: - with open(tones_dict, "r") as f: + with open(tones_dict, 'rt', encoding='utf-8') as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) spk_num = None if speaker_dict is not None: - with open(speaker_dict, 'rt') as f: + with open(speaker_dict, 'rt', encoding='utf-8') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) odim = am_config.n_mels diff --git a/paddlespeech/t2s/exps/tacotron2/train.py b/paddlespeech/t2s/exps/tacotron2/train.py index 69ff80e46..db88009a8 100644 --- a/paddlespeech/t2s/exps/tacotron2/train.py +++ b/paddlespeech/t2s/exps/tacotron2/train.py @@ -119,7 +119,7 @@ def train_sp(args, config): num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) diff --git a/paddlespeech/t2s/exps/transformer_tts/train.py b/paddlespeech/t2s/exps/transformer_tts/train.py index da48b6b99..d49baad99 100644 --- a/paddlespeech/t2s/exps/transformer_tts/train.py +++ b/paddlespeech/t2s/exps/transformer_tts/train.py @@ -114,7 +114,7 @@ def train_sp(args, config): num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py index f6a31ced2..0e74bf631 100644 --- a/paddlespeech/t2s/exps/vits/train.py +++ b/paddlespeech/t2s/exps/vits/train.py @@ -78,7 +78,7 @@ def train_sp(args, config): if args.speaker_dict is not None: print("multiple speaker vits!") collate_fn = vits_multi_spk_batch_fn - with open(args.speaker_dict, 'rt') as f: + with open(args.speaker_dict, 'rt', encoding='utf-8') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) fields += ["spk_id"] @@ -132,7 +132,7 @@ def train_sp(args, config): num_workers=config.num_workers) print("dataloaders done!") - with open(args.phones_dict, "r") as f: + with open(args.phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) diff --git a/paddlespeech/t2s/frontend/phonectic.py b/paddlespeech/t2s/frontend/phonectic.py index 261db80a8..af86d9b80 100644 --- a/paddlespeech/t2s/frontend/phonectic.py +++ b/paddlespeech/t2s/frontend/phonectic.py @@ -58,7 +58,7 @@ class English(Phonetics): self.punc = ":,;。?!“”‘’':,;.?!" self.text_normalizer = TextNormalizer() if phone_vocab_path: - with open(phone_vocab_path, 'rt') as f: + with open(phone_vocab_path, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] for phn, id in phn_id: self.vocab_phones[phn] = int(id) diff --git a/paddlespeech/t2s/frontend/zh_frontend.py b/paddlespeech/t2s/frontend/zh_frontend.py index efb673e36..35b97a93a 100644 --- a/paddlespeech/t2s/frontend/zh_frontend.py +++ b/paddlespeech/t2s/frontend/zh_frontend.py @@ -144,12 +144,12 @@ class Frontend(): self.vocab_phones = {} self.vocab_tones = {} if phone_vocab_path: - with open(phone_vocab_path, 'rt') as f: + with open(phone_vocab_path, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] for phn, id in phn_id: self.vocab_phones[phn] = int(id) if tone_vocab_path: - with open(tone_vocab_path, 'rt') as f: + with open(tone_vocab_path, 'rt', encoding='utf-8') as f: tone_id = [line.strip().split() for line in f.readlines()] for tone, id in tone_id: self.vocab_tones[tone] = int(id)