From f8f73e41f0ce7c9f85f667bd438ddaab314b88fe Mon Sep 17 00:00:00 2001 From: lym0302 Date: Tue, 16 Aug 2022 06:28:13 +0000 Subject: [PATCH 001/101] fix point bug, test=tts --- paddlespeech/t2s/frontend/mix_frontend.py | 43 +++++++++++++++++++---- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/paddlespeech/t2s/frontend/mix_frontend.py b/paddlespeech/t2s/frontend/mix_frontend.py index 5f145098e..8f6822e17 100644 --- a/paddlespeech/t2s/frontend/mix_frontend.py +++ b/paddlespeech/t2s/frontend/mix_frontend.py @@ -62,9 +62,31 @@ class MixFrontend(): def _split(self, text: str) -> List[str]: text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text) + # 替换英文句子的句号 "." --> "。" 用于后续分句 + point = "." + point_indexs = [] + index = -1 + for i in range(text.count(point)): + index = text.find(".", index + 1, len(text)) + point_indexs.append(index) + + print(point_indexs) + + for point_index in point_indexs: + # 如果点在最开始或者最末尾的位置,不处理 + if point_index == 0 or point_index == len(text) - 1: + pass + else: + if ((self.is_alphabet(text[point_index - 1]) or + text[point_index - 1] == " ") and + (self.is_alphabet(text[point_index + 1]) or + text[point_index + 1] == " ")): + text = text.replace(text[point_index], "。") + text = self.SENTENCE_SPLITOR.sub(r'\1\n', text) text = text.strip() sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] + return sentences def _distinguish(self, text: str) -> List[str]: @@ -77,9 +99,11 @@ class MixFrontend(): temp_seg = "" temp_lang = "" - # Determine the type of each character. type: blank, chinese, alphabet, number, unk. + # Determine the type of each character. type: blank, chinese, alphabet, number, unk and point. for ch in text: - if self.is_chinese(ch): + if ch == ".": + types.append("point") + elif self.is_chinese(ch): types.append("zh") elif self.is_alphabet(ch): types.append("en") @@ -96,21 +120,26 @@ class MixFrontend(): # find the first char of the seg if flag == 0: - if types[i] != "unk" and types[i] != "blank": + # 首个字符是中文,英文或者数字 + if types[i] == "zh" or types[i] == "en" or types[i] == "num": temp_seg += text[i] temp_lang = types[i] flag = 1 else: - if types[i] == temp_lang or types[i] == "num": + # 数字和小数点均与前面的字符合并,类型属于前面一个字符的类型 + if types[i] == temp_lang or types[i] == "num" or types[ + i] == "point": temp_seg += text[i] - elif temp_lang == "num" and types[i] != "unk": + # 数字与后面的任意字符都拼接 + elif temp_lang == "num": temp_seg += text[i] if types[i] == "zh" or types[i] == "en": temp_lang = types[i] - elif temp_lang == "en" and types[i] == "blank": + # 如果是空格则与前面字符拼接 + elif types[i] == "blank": temp_seg += text[i] elif types[i] == "unk": @@ -119,7 +148,7 @@ class MixFrontend(): else: segments.append((temp_seg, temp_lang)) - if types[i] != "unk" and types[i] != "blank": + if types[i] == "zh" or types[i] == "en": temp_seg = text[i] temp_lang = types[i] flag = 1 From b9be2bd64a52315b99ed2fe059fab685181210a4 Mon Sep 17 00:00:00 2001 From: pangchao04 Date: Wed, 17 Aug 2022 15:21:59 +0800 Subject: [PATCH 002/101] add ernie-sat sampler --- paddlespeech/t2s/datasets/sampler.py | 181 ++++++++++++++++++ paddlespeech/t2s/exps/ernie_sat/normalize.py | 2 +- paddlespeech/t2s/exps/ernie_sat/preprocess.py | 2 +- paddlespeech/t2s/exps/ernie_sat/train.py | 3 +- .../t2s/training/updaters/standard_updater.py | 5 +- 5 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 paddlespeech/t2s/datasets/sampler.py diff --git a/paddlespeech/t2s/datasets/sampler.py b/paddlespeech/t2s/datasets/sampler.py new file mode 100644 index 000000000..a69bc8600 --- /dev/null +++ b/paddlespeech/t2s/datasets/sampler.py @@ -0,0 +1,181 @@ +import paddle +import math +import numpy as np +from paddle.io import BatchSampler + +class ErnieSATSampler(BatchSampler): + """Sampler that restricts data loading to a subset of the dataset. + In such case, each process can pass a DistributedBatchSampler instance + as a DataLoader sampler, and load a subset of the original dataset that + is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + + Args: + dataset(paddle.io.Dataset): this could be a `paddle.io.Dataset` implement + or other python object which implemented + `__len__` for BatchSampler to get sample + number of data source. + batch_size(int): sample indice number in a mini-batch indices. + num_replicas(int, optional): porcess number in distributed training. + If :attr:`num_replicas` is None, :attr:`num_replicas` will be + retrieved from :code:`paddle.distributed.ParallenEnv`. + Default None. + rank(int, optional): the rank of the current process among :attr:`num_replicas` + processes. If :attr:`rank` is None, :attr:`rank` is retrieved from + :code:`paddle.distributed.ParallenEnv`. Default None. + shuffle(bool): whther to shuffle indices order before genrating + batch indices. Default False. + drop_last(bool): whether drop the last incomplete batch dataset size + is not divisible by the batch size. Default False + Examples: + .. code-block:: python + import numpy as np + from paddle.io import Dataset, DistributedBatchSampler + # init with dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + dataset = RandomDataset(100) + sampler = DistributedBatchSampler(dataset, batch_size=64) + for data in sampler: + # do something + break + """ + + def __init__(self, + dataset, + batch_size, + num_replicas=None, + rank=None, + shuffle=False, + drop_last=False): + self.dataset = dataset + + assert isinstance(batch_size, int) and batch_size > 0, \ + "batch_size should be a positive integer" + self.batch_size = batch_size + assert isinstance(shuffle, bool), \ + "shuffle should be a boolean value" + self.shuffle = shuffle + assert isinstance(drop_last, bool), \ + "drop_last should be a boolean number" + + from paddle.fluid.dygraph.parallel import ParallelEnv + + if num_replicas is not None: + assert isinstance(num_replicas, int) and num_replicas > 0, \ + "num_replicas should be a positive integer" + self.nranks = num_replicas + else: + self.nranks = ParallelEnv().nranks + + if rank is not None: + assert isinstance(rank, int) and rank >= 0, \ + "rank should be a non-negative integer" + self.local_rank = rank + else: + self.local_rank = ParallelEnv().local_rank + + self.drop_last = drop_last + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.nranks)) + self.total_size = self.num_samples * self.nranks + + def __iter__(self): + num_samples = len(self.dataset) + indices = np.arange(num_samples).tolist() + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + def _get_indices_by_batch_size(indices): + subsampled_indices = [] + last_batch_size = self.total_size % (self.batch_size * self.nranks) + assert last_batch_size % self.nranks == 0 + last_local_batch_size = last_batch_size // self.nranks + + for i in range(self.local_rank * self.batch_size, + len(indices) - last_batch_size, + self.batch_size * self.nranks): + subsampled_indices.extend(indices[i:i + self.batch_size]) + + indices = indices[len(indices) - last_batch_size:] + subsampled_indices.extend(indices[ + self.local_rank * last_local_batch_size:( + self.local_rank + 1) * last_local_batch_size]) + return subsampled_indices + + if self.nranks > 1: + indices = _get_indices_by_batch_size(indices) + + assert len(indices) == self.num_samples + _sample_iter = iter(indices) + + batch_indices_list = [] + batch_indices = [] + for idx in _sample_iter: + batch_indices.append(idx) + if len(batch_indices) == self.batch_size: + batch_indices_list.append(batch_indices) + batch_indices = [] + if not self.drop_last and len(batch_indices) > 0: + batch_indices_list.append(batch_indices) + + if self.shuffle: + np.random.RandomState(self.epoch).shuffle(batch_indices_list) + self.epoch += 1 + + for batch_indices in batch_indices_list: + yield batch_indices + + def __len__(self): + num_samples = self.num_samples + num_samples += int(not self.drop_last) * (self.batch_size - 1) + return num_samples // self.batch_size + + def set_epoch(self, epoch): + """ + Sets the epoch number. When :attr:`shuffle=True`, this number is used + as seeds of random numbers. By default, users may not set this, all + replicas (workers) use a different random ordering for each epoch. + If set same number at each epoch, this sampler will yield the same + ordering at all epoches. + Arguments: + epoch (int): Epoch number. + Examples: + .. code-block:: python + + import numpy as np + + from paddle.io import Dataset, DistributedBatchSampler + + # init with dataset + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + dataset = RandomDataset(100) + sampler = DistributedBatchSampler(dataset, batch_size=64) + + for epoch in range(10): + sampler.set_epoch(epoch) + """ + self.epoch = epoch diff --git a/paddlespeech/t2s/exps/ernie_sat/normalize.py b/paddlespeech/t2s/exps/ernie_sat/normalize.py index 74cdae2a6..fed111475 100644 --- a/paddlespeech/t2s/exps/ernie_sat/normalize.py +++ b/paddlespeech/t2s/exps/ernie_sat/normalize.py @@ -118,7 +118,7 @@ def main(): record["spk_emb"] = str(item["spk_emb"]) output_metadata.append(record) - output_metadata.sort(key=itemgetter('utt_id')) + output_metadata.sort(key=itemgetter('speech_lengths')) output_metadata_path = Path(args.dumpdir) / "metadata.jsonl" with jsonlines.open(output_metadata_path, 'w') as writer: for item in output_metadata: diff --git a/paddlespeech/t2s/exps/ernie_sat/preprocess.py b/paddlespeech/t2s/exps/ernie_sat/preprocess.py index fc9e0888b..486ed13a5 100644 --- a/paddlespeech/t2s/exps/ernie_sat/preprocess.py +++ b/paddlespeech/t2s/exps/ernie_sat/preprocess.py @@ -165,7 +165,7 @@ def process_sentences(config, if record: results.append(record) - results.sort(key=itemgetter("utt_id")) + results.sort(key=itemgetter("speech_lengths")) # replace 'w' with 'a' to write from the end of file with jsonlines.open(output_dir / "metadata.jsonl", 'a') as writer: for item in results: diff --git a/paddlespeech/t2s/exps/ernie_sat/train.py b/paddlespeech/t2s/exps/ernie_sat/train.py index ccd1245e1..af653ef89 100644 --- a/paddlespeech/t2s/exps/ernie_sat/train.py +++ b/paddlespeech/t2s/exps/ernie_sat/train.py @@ -31,6 +31,7 @@ from yacs.config import CfgNode from paddlespeech.t2s.datasets.am_batch_fn import build_erniesat_collate_fn from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.datasets.sampler import ErnieSATSampler from paddlespeech.t2s.models.ernie_sat import ErnieSAT from paddlespeech.t2s.models.ernie_sat import ErnieSATEvaluator from paddlespeech.t2s.models.ernie_sat import ErnieSATUpdater @@ -86,7 +87,7 @@ def train_sp(args, config): seg_emb=config.model['enc_input_layer'] == 'sega_mlm', text_masking=config["model"]["text_masking"]) - train_sampler = DistributedBatchSampler( + train_sampler = ErnieSATSampler( train_dataset, batch_size=config.batch_size, shuffle=True, diff --git a/paddlespeech/t2s/training/updaters/standard_updater.py b/paddlespeech/t2s/training/updaters/standard_updater.py index b1c48620e..668d2fc69 100644 --- a/paddlespeech/t2s/training/updaters/standard_updater.py +++ b/paddlespeech/t2s/training/updaters/standard_updater.py @@ -27,7 +27,7 @@ from timer import timer from paddlespeech.t2s.training.reporter import report from paddlespeech.t2s.training.updater import UpdaterBase from paddlespeech.t2s.training.updater import UpdaterState - +from paddlespeech.t2s.datasets.sampler import ErnieSATSampler class StandardUpdater(UpdaterBase): """An example of over-simplification. Things may not be that simple, but @@ -165,7 +165,8 @@ class StandardUpdater(UpdaterBase): # NOTE: all batch sampler for distributed training should # subclass DistributedBatchSampler and implement `set_epoch` method batch_sampler = self.dataloader.batch_sampler - if isinstance(batch_sampler, DistributedBatchSampler): + if isinstance(batch_sampler, DistributedBatchSampler) \ + or isinstance(batch_sampler, ErnieSATSampler): batch_sampler.set_epoch(self.state.epoch) self.train_iterator = iter(self.dataloader) From f7780658dbec74b2e1f07d87805bcad15aed222d Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 16 Aug 2022 04:54:19 +0000 Subject: [PATCH 003/101] fix tone sand_hi bugs for Chinese frontend --- examples/other/g2p/README.md | 6 +- paddlespeech/resource/pretrained_models.py | 4 +- paddlespeech/t2s/frontend/g2pw/onnx_api.py | 63 ++++++++++++------- paddlespeech/t2s/frontend/polyphonic.yaml | 20 +++++- paddlespeech/t2s/frontend/tone_sandhi.py | 72 ++++++++++++---------- paddlespeech/t2s/frontend/zh_frontend.py | 47 +++++++++----- 6 files changed, 135 insertions(+), 77 deletions(-) diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index 84f5fe234..a8f8f7340 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -7,18 +7,18 @@ We use `WER` as an evaluation criterion. # Start Run the command below to get the results of the test. + ```bash ./run.sh ``` -The `avg WER` of g2p is: 0.028952373312476395 +The `avg WER` of g2p is: 0.024219452438490413 ```text ,--------------------------------------------------------------------. | ./exp/g2p/text.g2p | |--------------------------------------------------------------------| | SPKR | # Snt # Wrd | Corr Sub Del Ins Err S.Err | - |--------+-----------------+-----------------------------------------| - | Sum/Avg| 9996 299181 | 97.2 2.8 0.0 0.1 2.9 53.3 | + | Sum/Avg| 9996 299181 | 97.6 2.4 0.0 0.0 2.4 49.2 | `--------------------------------------------------------------------' ``` diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index 9d9be0aca..872d564cd 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -1359,9 +1359,9 @@ g2pw_onnx_models = { 'G2PWModel': { '1.0': { 'url': - 'https://paddlespeech.bj.bcebos.com/Parakeet/released_models/g2p/G2PWModel.tar', + 'https://paddlespeech.bj.bcebos.com/Parakeet/released_models/g2p/G2PWModel_1.0.zip', 'md5': - '63bc0894af15a5a591e58b2130a2bcac', + '7e049a55547da840502cf99e8a64f20e', }, }, } diff --git a/paddlespeech/t2s/frontend/g2pw/onnx_api.py b/paddlespeech/t2s/frontend/g2pw/onnx_api.py index 3a406ad20..9e708ec88 100644 --- a/paddlespeech/t2s/frontend/g2pw/onnx_api.py +++ b/paddlespeech/t2s/frontend/g2pw/onnx_api.py @@ -31,8 +31,11 @@ from paddlespeech.t2s.frontend.g2pw.dataset import get_char_phoneme_labels from paddlespeech.t2s.frontend.g2pw.dataset import get_phoneme_labels from paddlespeech.t2s.frontend.g2pw.dataset import prepare_onnx_input from paddlespeech.t2s.frontend.g2pw.utils import load_config +from paddlespeech.t2s.frontend.zh_normalization.char_convert import tranditional_to_simplified from paddlespeech.utils.env import MODEL_HOME +model_version = '1.0' + def predict(session, onnx_input, labels): all_preds = [] @@ -62,34 +65,38 @@ class G2PWOnnxConverter: style='bopomofo', model_source=None, enable_non_tradional_chinese=False): - if not os.path.exists(os.path.join(model_dir, 'G2PWModel/g2pW.onnx')): - uncompress_path = download_and_decompress( - g2pw_onnx_models['G2PWModel']['1.0'], model_dir) + uncompress_path = download_and_decompress( + g2pw_onnx_models['G2PWModel'][model_version], model_dir) sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL sess_options.intra_op_num_threads = 2 self.session_g2pW = onnxruntime.InferenceSession( - os.path.join(model_dir, 'G2PWModel/g2pW.onnx'), + os.path.join(uncompress_path, 'g2pW.onnx'), sess_options=sess_options) self.config = load_config( - os.path.join(model_dir, 'G2PWModel/config.py'), use_default=True) + os.path.join(uncompress_path, 'config.py'), use_default=True) self.model_source = model_source if model_source else self.config.model_source self.enable_opencc = enable_non_tradional_chinese self.tokenizer = BertTokenizer.from_pretrained(self.config.model_source) - polyphonic_chars_path = os.path.join(model_dir, - 'G2PWModel/POLYPHONIC_CHARS.txt') - monophonic_chars_path = os.path.join(model_dir, - 'G2PWModel/MONOPHONIC_CHARS.txt') + polyphonic_chars_path = os.path.join(uncompress_path, + 'POLYPHONIC_CHARS.txt') + monophonic_chars_path = os.path.join(uncompress_path, + 'MONOPHONIC_CHARS.txt') self.polyphonic_chars = [ line.split('\t') for line in open(polyphonic_chars_path, encoding='utf-8').read() .strip().split('\n') ] + self.non_polyphonic = { + '一', '不', '和', '咋', '嗲', '剖', '差', '攢', '倒', '難', '奔', '勁', '拗', + '肖', '瘙', '誒', '泊' + } + self.non_monophonic = {'似', '攢'} self.monophonic_chars = [ line.split('\t') for line in open(monophonic_chars_path, encoding='utf-8').read() @@ -101,13 +108,27 @@ class G2PWOnnxConverter: self.polyphonic_chars) self.chars = sorted(list(self.char2phonemes.keys())) + + self.polyphonic_chars_new = set(self.chars) + for char in self.non_polyphonic: + if char in self.polyphonic_chars_new: + self.polyphonic_chars_new.remove(char) + + self.monophonic_chars_dict = { + char: phoneme + for char, phoneme in self.monophonic_chars + } + for char in self.non_monophonic: + if char in self.monophonic_chars_dict: + self.monophonic_chars_dict.pop(char) + self.pos_tags = [ 'UNK', 'A', 'C', 'D', 'I', 'N', 'P', 'T', 'V', 'DE', 'SHI' ] with open( - os.path.join(model_dir, - 'G2PWModel/bopomofo_to_pinyin_wo_tune_dict.json'), + os.path.join(uncompress_path, + 'bopomofo_to_pinyin_wo_tune_dict.json'), 'r', encoding='utf-8') as fr: self.bopomofo_convert_dict = json.load(fr) @@ -117,7 +138,7 @@ class G2PWOnnxConverter: }[style] with open( - os.path.join(model_dir, 'G2PWModel/char_bopomofo_dict.json'), + os.path.join(uncompress_path, 'char_bopomofo_dict.json'), 'r', encoding='utf-8') as fr: self.char_bopomofo_dict = json.load(fr) @@ -175,25 +196,25 @@ class G2PWOnnxConverter: return results def _prepare_data(self, sentences): - polyphonic_chars = set(self.chars) - monophonic_chars_dict = { - char: phoneme - for char, phoneme in self.monophonic_chars - } texts, query_ids, sent_ids, partial_results = [], [], [], [] for sent_id, sent in enumerate(sentences): - pypinyin_result = pinyin(sent, style=Style.TONE3) + # pypinyin works well for Simplified Chinese than Traditional Chinese + sent_s = tranditional_to_simplified(sent) + pypinyin_result = pinyin(sent_s, style=Style.TONE3) partial_result = [None] * len(sent) for i, char in enumerate(sent): - if char in polyphonic_chars: + if char in self.polyphonic_chars_new: texts.append(sent) query_ids.append(i) sent_ids.append(sent_id) - elif char in monophonic_chars_dict: + elif char in self.monophonic_chars_dict: partial_result[i] = self.style_convert_func( - monophonic_chars_dict[char]) + self.monophonic_chars_dict[char]) elif char in self.char_bopomofo_dict: partial_result[i] = pypinyin_result[i][0] # partial_result[i] = self.style_convert_func(self.char_bopomofo_dict[char][0]) + else: + partial_result[i] = pypinyin_result[i][0] + partial_results.append(partial_result) return texts, query_ids, sent_ids, partial_results diff --git a/paddlespeech/t2s/frontend/polyphonic.yaml b/paddlespeech/t2s/frontend/polyphonic.yaml index 629bcd262..2c7cf33fb 100644 --- a/paddlespeech/t2s/frontend/polyphonic.yaml +++ b/paddlespeech/t2s/frontend/polyphonic.yaml @@ -23,4 +23,22 @@ polyphonic: 鸭绿江: ['ya1','lu4','jiang1'] 撒切尔: ['sa4','qie4','er3'] 比比皆是: ['bi3','bi3','jie1','shi4'] - 身无长物: ['shen1','wu2','chang2','wu4'] \ No newline at end of file + 身无长物: ['shen1','wu2','chang2','wu4'] + 手里: ['shou2','li3'] + 关卡: ['guan1','qia3'] + 怀揣: ['huai2','chuai1'] + 挑剔: ['tiao1','ti4'] + 供称: ['gong4','cheng1'] + 作坊: ['zuo1', 'fang5'] + 中医: ['zhong1','yi1'] + 嚷嚷: ['rang1','rang5'] + 商厦: ['shang1','sha4'] + 大厦: ['da4','sha4'] + 刹车: ['sha1','che1'] + 嘚瑟: ['de4','se5'] + 朝鲜: ['chao2','xian3'] + 阿房宫: ['e1','pang2','gong1'] + 阿胶: ['e1','jiao1'] + 咖喱: ['ga1','li5'] + 时分: ['shi2','fen1'] + 蚌埠: ['beng4','bu4'] diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py index e3102b9bc..e5ef617a9 100644 --- a/paddlespeech/t2s/frontend/tone_sandhi.py +++ b/paddlespeech/t2s/frontend/tone_sandhi.py @@ -41,30 +41,32 @@ class ToneSandhi(): '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' + '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打扮', '打听', '打发', '扎实', '扁担', + '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', '念叨', + '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', '干事', + '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', '屁股', + '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', '实在', + '官司', '学问', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', '姑娘', '姐夫', + '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', '大意', '大夫', + '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', '嘱咐', '嘟囔', + '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', '咳嗽', '和尚', + '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', '叫唤', '口袋', + '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', '功夫', '力气', + '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', '凑合', '凉快', + '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', '佩服', '作坊', + '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', '交情', '云彩', + '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', '不由', '下水', + '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', '父亲', '母亲', '咕噜', + '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', '幸福', '熟悉', '计划', + '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', '凤凰', '拖沓', '寒碜', + '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', '扫把', '惦记', '戏弄', + '将军', '别人' } self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎", - "幺幺" + '男子', '女子', '分子', '原子', '量子', '莲子', '石子', '瓜子', '电子', '人人', '虎虎', + '幺幺', '干嘛', '学子', '哈哈', '数数', '袅袅', '局地', '以下', '娃哈哈', '花花草草', '留得', + '耕地', '想想', '熙熙', '攘攘', '卵子', '死死', '冉冉', '恳恳', '佼佼', '吵吵', '打打', + '考考', '整整', '莘莘' } self.punc = ":,;。?!“”‘’':,;.?!" @@ -75,27 +77,24 @@ class ToneSandhi(): # finals: ['ia1', 'i3'] def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]: - + if word in self.must_not_neural_tone_words: + return finals # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: + if j - 1 >= 0 and item == word[j - 1] and pos[0] in {"n", "v", "a"}: finals[j] = finals[j][:-1] + "5" ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢哈啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": + if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": finals[-1] = finals[-1][:-1] + "5" elif len(word) >= 1 and word[-1] in "的地得": finals[-1] = finals[-1][:-1] + "5" # e.g. 走了, 看着, 去过 elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: + elif len(word) > 1 and word[-1] in "们子" and pos in {"r", "n"}: finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: + # e.g. 桌上, 地下 + elif len(word) > 1 and word[-1] in "上下" and pos in {"s", "l", "f"}: finals[-1] = finals[-1][:-1] + "5" # e.g. 上来, 下去 elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": @@ -147,7 +146,7 @@ class ToneSandhi(): for i, char in enumerate(word): if char == "一" and i + 1 < len(word): # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": + if finals[i + 1][-1] in {'4', '5'}: finals[i] = finals[i][:-1] + "2" # "一" before non-tone4 should be yi4, e.g. 一天 else: @@ -239,7 +238,12 @@ class ToneSandhi(): for i, (word, pos) in enumerate(seg): if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] + if i - 1 < len(new_seg): + new_seg[i - + 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] + else: + new_seg.append([word, pos]) + new_seg.append([seg[i + 1][0], pos]) else: if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ 0] == word and pos == "v": diff --git a/paddlespeech/t2s/frontend/zh_frontend.py b/paddlespeech/t2s/frontend/zh_frontend.py index 9513a459c..722eed601 100644 --- a/paddlespeech/t2s/frontend/zh_frontend.py +++ b/paddlespeech/t2s/frontend/zh_frontend.py @@ -84,6 +84,24 @@ class Frontend(): self.tone_modifier = ToneSandhi() self.text_normalizer = TextNormalizer() self.punc = ":,;。?!“”‘’':,;.?!" + self.phrases_dict = { + '开户行': [['ka1i'], ['hu4'], ['hang2']], + '发卡行': [['fa4'], ['ka3'], ['hang2']], + '放款行': [['fa4ng'], ['kua3n'], ['hang2']], + '茧行': [['jia3n'], ['hang2']], + '行号': [['hang2'], ['ha4o']], + '各地': [['ge4'], ['di4']], + '借还款': [['jie4'], ['hua2n'], ['kua3n']], + '时间为': [['shi2'], ['jia1n'], ['we2i']], + '为准': [['we2i'], ['zhu3n']], + '色差': [['se4'], ['cha1']], + '嗲': [['dia3']], + '呗': [['bei5']], + '不': [['bu4']], + '咗': [['zuo5']], + '嘞': [['lei5']], + '掺和': [['chan1'], ['huo5']] + } # g2p_model can be pypinyin and g2pM and g2pW self.g2p_model = g2p_model if self.g2p_model == "g2pM": @@ -91,6 +109,8 @@ class Frontend(): self.pinyin2phone = generate_lexicon( with_tone=True, with_erhua=False) elif self.g2p_model == "g2pW": + # use pypinyin as backup for non polyphonic characters in g2pW + self._init_pypinyin() self.corrector = Polyphonic() self.g2pM_model = G2pM() self.g2pW_model = G2PWOnnxConverter( @@ -99,8 +119,10 @@ class Frontend(): with_tone=True, with_erhua=False) else: - self.__init__pypinyin() - self.must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"} + self._init_pypinyin() + self.must_erhua = { + "小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿", "媳妇儿" + } self.not_erhua = { "虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿", "拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿", "脑瘫儿", @@ -108,6 +130,7 @@ class Frontend(): "孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿", "狗儿" } + self.vocab_phones = {} self.vocab_tones = {} if phone_vocab_path: @@ -121,20 +144,9 @@ class Frontend(): for tone, id in tone_id: self.vocab_tones[tone] = int(id) - def __init__pypinyin(self): + def _init_pypinyin(self): large_pinyin.load() - - load_phrases_dict({u'开户行': [[u'ka1i'], [u'hu4'], [u'hang2']]}) - load_phrases_dict({u'发卡行': [[u'fa4'], [u'ka3'], [u'hang2']]}) - load_phrases_dict({u'放款行': [[u'fa4ng'], [u'kua3n'], [u'hang2']]}) - load_phrases_dict({u'茧行': [[u'jia3n'], [u'hang2']]}) - load_phrases_dict({u'行号': [[u'hang2'], [u'ha4o']]}) - load_phrases_dict({u'各地': [[u'ge4'], [u'di4']]}) - load_phrases_dict({u'借还款': [[u'jie4'], [u'hua2n'], [u'kua3n']]}) - load_phrases_dict({u'时间为': [[u'shi2'], [u'jia1n'], [u'we2i']]}) - load_phrases_dict({u'为准': [[u'we2i'], [u'zhu3n']]}) - load_phrases_dict({u'色差': [[u'se4'], [u'cha1']]}) - + load_phrases_dict(self.phrases_dict) # 调整字的拼音顺序 load_single_dict({ord(u'地'): u'de,di4'}) @@ -258,7 +270,6 @@ class Frontend(): phones.append('sp') if v and v not in self.punc: phones.append(v) - phones_list.append(phones) if merge_sentences: merge_list = sum(phones_list, []) @@ -275,6 +286,10 @@ class Frontend(): finals: List[str], word: str, pos: str) -> List[List[str]]: + # fix er1 + for i, phn in enumerate(finals): + if i == len(finals) - 1 and word[i] == "儿" and phn == 'er1': + finals[i] = 'er2' if word not in self.must_erhua and (word in self.not_erhua or pos in {"a", "j", "nr"}): return initials, finals From 0baec4325a8322b9bd0ab5fedb35db794ffa2d04 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 18 Aug 2022 11:51:24 +0000 Subject: [PATCH 004/101] fix stats bugs --- paddlespeech/cli/base_commands.py | 2 ++ paddlespeech/server/bin/paddlespeech_server.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddlespeech/cli/base_commands.py b/paddlespeech/cli/base_commands.py index f9e2a55f8..7210091a9 100644 --- a/paddlespeech/cli/base_commands.py +++ b/paddlespeech/cli/base_commands.py @@ -125,9 +125,11 @@ class StatsCommand: "Here is the list of {} pretrained models released by PaddleSpeech that can be used by command line and python API" .format(self.task.upper())) self.show_support_models(pretrained_models) + return True except BaseException: print("Failed to get the list of {} pretrained models.".format( self.task.upper())) + return False # Dynamic import when running specific command diff --git a/paddlespeech/server/bin/paddlespeech_server.py b/paddlespeech/server/bin/paddlespeech_server.py index 175e8ffb6..10a91d9be 100644 --- a/paddlespeech/server/bin/paddlespeech_server.py +++ b/paddlespeech/server/bin/paddlespeech_server.py @@ -18,7 +18,6 @@ from typing import List import uvicorn from fastapi import FastAPI -from starlette.middleware.cors import CORSMiddleware from prettytable import PrettyTable from starlette.middleware.cors import CORSMiddleware @@ -46,6 +45,7 @@ app.add_middleware( allow_methods=["*"], allow_headers=["*"]) + @cli_server_register( name='paddlespeech_server.start', description='Start the service') class ServerExecutor(BaseExecutor): @@ -177,7 +177,7 @@ class ServerStatsExecutor(): logger.info( "Here is the table of {} static pretrained models supported in the service.". format(self.task.upper())) - self.show_support_models(pretrained_models) + self.show_support_models(static_pretrained_models) return True From 677e0961a88dbda4908cd1d1d7ff2d52c5bddb38 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Fri, 19 Aug 2022 06:43:27 +0000 Subject: [PATCH 005/101] fix point bug, test=tts --- paddlespeech/t2s/frontend/mix_frontend.py | 87 +++++++++++++++++++---- 1 file changed, 74 insertions(+), 13 deletions(-) diff --git a/paddlespeech/t2s/frontend/mix_frontend.py b/paddlespeech/t2s/frontend/mix_frontend.py index 8f6822e17..6868d3357 100644 --- a/paddlespeech/t2s/frontend/mix_frontend.py +++ b/paddlespeech/t2s/frontend/mix_frontend.py @@ -60,9 +60,16 @@ class MixFrontend(): else: return False - def _split(self, text: str) -> List[str]: - text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text) - # 替换英文句子的句号 "." --> "。" 用于后续分句 + def is_end(self, before_char, after_char) -> bool: + if ((self.is_alphabet(before_char) or before_char == " ") and (self.is_alphabet(after_char) or after_char == " ")): + return True + else: + return False + + def _replace(self, text: str) -> str: + new_text = "" + + # get "." indexs point = "." point_indexs = [] index = -1 @@ -70,23 +77,77 @@ class MixFrontend(): index = text.find(".", index + 1, len(text)) point_indexs.append(index) - print(point_indexs) + # replace "." -> "。" when English sentence ending + if len(point_indexs) == 0: + new_text = text - for point_index in point_indexs: - # 如果点在最开始或者最末尾的位置,不处理 + elif len(point_indexs) == 1: + point_index = point_indexs[0] if point_index == 0 or point_index == len(text) - 1: - pass + new_text = text else: - if ((self.is_alphabet(text[point_index - 1]) or - text[point_index - 1] == " ") and - (self.is_alphabet(text[point_index + 1]) or - text[point_index + 1] == " ")): - text = text.replace(text[point_index], "。") + if not self.is_end(text[point_index - 1], text[point_index + 1]): + new_text = text + else: + new_text = text[: point_index] + "。" + text[point_index + 1:] + elif len(point_indexs) == 2: + first_index = point_indexs[0] + end_index = point_indexs[1] + + # first + if first_index != 0: + if not self.is_end(text[first_index - 1], text[first_index + 1]): + new_text += (text[:first_index] + ".") + else: + new_text += (text[:first_index] + "。") + else: + new_text += "." + # last + if end_index != len(text) - 1: + if not self.is_end(text[end_index - 1], text[end_index + 1]): + new_text += text[point_indexs[-2] + 1 : ] + else: + new_text += (text[point_indexs[-2] + 1 : end_index] + "。" + text[end_index + 1 : ]) + else: + new_text += "." + + else: + first_index = point_indexs[0] + end_index = point_indexs[-1] + # first + if first_index != 0: + if not self.is_end(text[first_index - 1], text[first_index + 1]): + new_text += (text[:first_index] + ".") + else: + new_text += (text[:first_index] + "。") + else: + new_text += "." + # middle + for j in range(1, len(point_indexs) - 1): + point_index = point_indexs[j] + if not self.is_end(text[point_index - 1], text[point_index + 1]): + new_text += (text[point_indexs[j-1] + 1 : point_index] + ".") + else: + new_text += (text[point_indexs[j-1] + 1 : point_index] + "。") + # last + if end_index != len(text) - 1: + if not self.is_end(text[end_index - 1], text[end_index + 1]): + new_text += text[point_indexs[-2] + 1 : ] + else: + new_text += (text[point_indexs[-2] + 1 : end_index] + "。" + text[end_index + 1 : ]) + else: + new_text += "." + + return new_text + + def _split(self, text: str) -> List[str]: + text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text) + # 替换英文句子的句号 "." --> "。" 用于后续分句 + text = self._replace(text) text = self.SENTENCE_SPLITOR.sub(r'\1\n', text) text = text.strip() sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] - return sentences def _distinguish(self, text: str) -> List[str]: From c3865f2ab75ee0229ca928cde52d0067925b440f Mon Sep 17 00:00:00 2001 From: Ming Date: Fri, 19 Aug 2022 15:19:00 +0800 Subject: [PATCH 006/101] update readme and add aistudio demo, test=doc (#2270) --- README.md | 185 +++++++++++++++++++++++++++++++++++++++++++-------- README_cn.md | 6 +- 2 files changed, 162 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index e35289e2b..3d1859f1f 100644 --- a/README.md +++ b/README.md @@ -180,62 +180,191 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision ## Installation We strongly recommend our users to install PaddleSpeech in **Linux** with *python>=3.7* and *paddlepaddle>=2.3.1*. -Up to now, **Linux** supports CLI for the all our tasks, **Mac OSX** and **Windows** only supports PaddleSpeech CLI for Audio Classification, Speech-to-Text and Text-to-Speech. To install `PaddleSpeech`, please see [installation](./docs/source/install.md). + +### **Dependency Introduction** + ++ gcc >= 4.8.5 ++ paddlepaddle >= 2.3.1 ++ python >= 3.7 ++ OS support: Linux(recommend), Windows, Mac OSX + +PaddleSpeech depends on paddlepaddle. For installation, please refer to the official website of [paddlepaddle](https://www.paddlepaddle.org.cn/en) and choose according to your own machine. Here is an example of the cpu version. + +```bash +pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple +``` + +There are two quick installation methods for PaddleSpeech, one is pip installation, and the other is source code compilation (recommended). +### pip install + +```shell +pip install pytest-runner +pip install paddlespeech +``` + +### source code compilation + +```shell +git clone https://github.com/PaddlePaddle/PaddleSpeech.git +cd PaddleSpeech +pip install pytest-runner +pip install . +``` + +For more installation problems, such as conda environment, librosa-dependent, gcc problems, kaldi installation, etc., you can refer to this [installation document](./docs/source/install.md). If you encounter problems during installation, you can leave a message on [#2150](https://github.com/PaddlePaddle/PaddleSpeech/issues/2150) and find related problems ## Quick Start -Developers can have a try of our models with [PaddleSpeech Command Line](./paddlespeech/cli/README.md). Change `--input` to test your own audio/text. +Developers can have a try of our models with [PaddleSpeech Command Line](./paddlespeech/cli/README.md) or Python. Change `--input` to test your own audio/text and support 16k wav format audio. + +**You can also quickly experience it in AI Studio 👉🏻 [PaddleSpeech API Demo](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660876445786)** + + +Test audio sample download -**Audio Classification** ```shell -paddlespeech cls --input input.wav +wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav +wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav ``` -**Speaker Verification** +### Automatic Speech Recognition + +
 (Click to expand)Open Source Speech Recognition + +**command line experience** + +```shell +paddlespeech asr --lang zh --input zh.wav ``` -paddlespeech vector --task spk --input input_16k.wav + +**Python API experience** + +```python +>>> from paddlespeech.cli.asr.infer import ASRExecutor +>>> asr = ASRExecutor() +>>> result = asr(audio_file="zh.wav") +>>> print(result) +我认为跑步最重要的就是给我带来了身体健康 ``` +
+ +### Text-to-Speech + +
 Open Source Speech Synthesis + +Output 24k sample rate wav format audio + + +**command line experience** -**Automatic Speech Recognition** ```shell -paddlespeech asr --lang zh --input input_16k.wav +paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output output.wav ``` -- web demo for Automatic Speech Recognition is integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See Demo: [ASR Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) -**Speech Translation** (English to Chinese) -(not support for Mac and Windows now) +**Python API experience** + +```python +>>> from paddlespeech.cli.tts.infer import TTSExecutor +>>> tts = TTSExecutor() +>>> tts(text="今天天气十分不错。", output="output.wav") +``` +- You can experience in [Huggingface Spaces](https://huggingface.co/spaces) [TTS Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) + +
+ +### Audio Classification + +
 An open-domain sound classification tool + +Sound classification model based on 527 categories of AudioSet dataset + +**command line experience** + ```shell -paddlespeech st --input input_16k.wav +paddlespeech cls --input zh.wav ``` -**Text-to-Speech** +**Python API experience** + +```python +>>> from paddlespeech.cli.cls.infer import CLSExecutor +>>> cls = CLSExecutor() +>>> result = cls(audio_file="zh.wav") +>>> print(result) +Speech 0.9027186632156372 +``` + +
+ +### Voiceprint Extraction + +
 Industrial-grade voiceprint extraction tool + +**command line experience** + ```shell -paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --output output.wav +paddlespeech vector --task spk --input zh.wav ``` -- web demo for Text to Speech is integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See Demo: [TTS Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) -**Text Postprocessing** -- Punctuation Restoration - ```bash - paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 - ``` +**Python API experience** -**Batch Process** +```python +>>> from paddlespeech.cli.vector import VectorExecutor +>>> vec = VectorExecutor() +>>> result = vec(audio_file="zh.wav") +>>> print(result) # 187维向量 +[ -0.19083306 9.474295 -14.122263 -2.0916545 0.04848729 + 4.9295826 1.4780062 0.3733844 10.695862 3.2697146 + -4.48199 -0.6617882 -9.170393 -11.1568775 -1.2358263 ...] ``` -echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts + +
+ +### Punctuation Restoration + +
 Quick recovery of text punctuation, works with ASR models + +**command line experience** + +```shell +paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 ``` -**Shell Pipeline** -- ASR + Punctuation Restoration +**Python API experience** + +```python +>>> from paddlespeech.cli.text.infer import TextExecutor +>>> text_punc = TextExecutor() +>>> result = text_punc(text="今天的天气真不错啊你下午有空吗我想约你一起去吃饭") +今天的天气真不错啊!你下午有空吗?我想约你一起去吃饭。 ``` -paddlespeech asr --input ./zh.wav | paddlespeech text --task punc + +
+ +### Speech Translation + +
 End-to-end English to Chinese Speech Translation Tool + +Use pre-compiled kaldi related tools, only support experience in Ubuntu system + +**command line experience** + +```shell +paddlespeech st --input en.wav ``` -For more command lines, please see: [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos) +**Python API experience** -If you want to try more functions like training and tuning, please have a look at [Speech-to-Text Quick Start](./docs/source/asr/quick_start.md) and [Text-to-Speech Quick Start](./docs/source/tts/quick_start.md). +```python +>>> from paddlespeech.cli.st.infer import STExecutor +>>> st = STExecutor() +>>> result = st(audio_file="en.wav") +['我 在 这栋 建筑 的 古老 门上 敲门 。'] +``` + +
@@ -243,6 +372,8 @@ If you want to try more functions like training and tuning, please have a look a Developers can have a try of our speech server with [PaddleSpeech Server Command Line](./paddlespeech/server/README.md). +**You can try it quickly in AI Studio (recommend): [SpeechServer](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660877827034)** + **Start server** ```shell diff --git a/README_cn.md b/README_cn.md index 1c6a949fd..e18823151 100644 --- a/README_cn.md +++ b/README_cn.md @@ -225,7 +225,7 @@ pip install . 安装完成后,开发者可以通过命令行或者Python快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持16k wav格式音频。 -你也可以在`aistudio`中快速体验 👉🏻[PaddleSpeech API Demo ](https://aistudio.baidu.com/aistudio/projectdetail/4281335?shared=1)。 +你也可以在`aistudio`中快速体验 👉🏻[一键预测,快速上手Speech开发任务](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660878142250)。 测试音频示例下载 ```shell @@ -373,7 +373,9 @@ python API 一键预测 ## 快速使用服务 -安装完成后,开发者可以通过命令行一键启动语音识别,语音合成,音频分类三种服务。 +安装完成后,开发者可以通过命令行一键启动语音识别,语音合成,音频分类等多种服务。 + +你可以在 AI Studio 中快速体验:[SpeechServer一键部署](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660878208266) **启动服务** ```shell From 99977b2f7e13c2c5002e0be365ebbfd72cb688ab Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 22 Aug 2022 13:54:15 +0800 Subject: [PATCH 007/101] Update README.md --- examples/aishell/asr0/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/aishell/asr0/README.md b/examples/aishell/asr0/README.md index 4459b1382..131de36e3 100644 --- a/examples/aishell/asr0/README.md +++ b/examples/aishell/asr0/README.md @@ -197,7 +197,7 @@ In some situations, you want to use the trained model to do the inference for th ```bash if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} fi ``` you can train the model by yourself, or you can download the pretrained model by the script below: @@ -211,5 +211,5 @@ wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wa ``` You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of the audio demo by running the script below. ```bash -CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/deepspeech2.yaml exp/deepspeech2/checkpoints/avg_1 data/demo_01_03.wav +CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/deepspeech2.yaml conf/tuning/decode.yaml exp/deepspeech2/checkpoints/avg_1 data/demo_01_03.wav ``` From 979f75e4831d70d9e1fc29cceef29c1b3a73b944 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 22 Aug 2022 16:09:51 +0800 Subject: [PATCH 008/101] [doc]updade readme for mix tts (#2284) * format g2pw * fix tone sand_hi bugs for Chinese frontend * fix stats bugs * fix point bug, test=tts * fix point bug, test=tts * update readme for mix tts, test=doc Co-authored-by: liangym <34430015+lym0302@users.noreply.github.com> Co-authored-by: lym0302 --- README.md | 4 +-- README_cn.md | 19 ++++++------ demos/text_to_speech/README.md | 48 +++++++++++++++++++++-------- demos/text_to_speech/README_cn.md | 51 ++++++++++++++++++++++--------- 4 files changed, 83 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index 3d1859f1f..122704d2d 100644 --- a/README.md +++ b/README.md @@ -558,9 +558,9 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r FastSpeech2 - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN - fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 + fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en diff --git a/README_cn.md b/README_cn.md index e18823151..ca42e71f6 100644 --- a/README_cn.md +++ b/README_cn.md @@ -196,13 +196,13 @@ + python >= 3.7 + linux(推荐), mac, windows -PaddleSpeech依赖于paddlepaddle,安装可以参考[paddlepaddle官网](https://www.paddlepaddle.org.cn/),根据自己机器的情况进行选择。这里给出cpu版本示例,其它版本大家可以根据自己机器的情况进行安装。 +PaddleSpeech 依赖于 paddlepaddle,安装可以参考[ paddlepaddle 官网](https://www.paddlepaddle.org.cn/),根据自己机器的情况进行选择。这里给出 cpu 版本示例,其它版本大家可以根据自己机器的情况进行安装。 ```shell pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple ``` -PaddleSpeech快速安装方式有两种,一种是pip安装,一种是源码编译(推荐)。 +PaddleSpeech 快速安装方式有两种,一种是 pip 安装,一种是源码编译(推荐)。 ### pip 安装 ```shell @@ -223,9 +223,9 @@ pip install . ## 快速开始 -安装完成后,开发者可以通过命令行或者Python快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持16k wav格式音频。 +安装完成后,开发者可以通过命令行或者 Python 快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持 16k wav 格式音频。 -你也可以在`aistudio`中快速体验 👉🏻[一键预测,快速上手Speech开发任务](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660878142250)。 +你也可以在 `aistudio` 中快速体验 👉🏻[一键预测,快速上手 Speech 开发任务](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660878142250)。 测试音频示例下载 ```shell @@ -281,7 +281,7 @@ Python API 一键预测
 适配多场景的开放领域声音分类工具 -基于AudioSet数据集527个类别的声音分类模型 +基于 AudioSet 数据集 527 个类别的声音分类模型 命令行一键体验 @@ -350,7 +350,7 @@ Python API 一键预测
 端到端英译中语音翻译工具 -使用预编译的kaldi相关工具,只支持在Ubuntu系统中体验 +使用预编译的 kaldi 相关工具,只支持在 Ubuntu 系统中体验 命令行一键体验 @@ -370,12 +370,11 @@ python API 一键预测
- ## 快速使用服务 安装完成后,开发者可以通过命令行一键启动语音识别,语音合成,音频分类等多种服务。 -你可以在 AI Studio 中快速体验:[SpeechServer一键部署](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660878208266) +你可以在 AI Studio 中快速体验:[SpeechServer 一键部署](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660878208266) **启动服务** ```shell @@ -554,9 +553,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 FastSpeech2 - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN - fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 + fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en diff --git a/demos/text_to_speech/README.md b/demos/text_to_speech/README.md index 389847a12..3288ecf2f 100644 --- a/demos/text_to_speech/README.md +++ b/demos/text_to_speech/README.md @@ -45,7 +45,19 @@ The input of this demo should be a text of the specific language that can be pas You can change `spk_id` here. ```bash paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "hello, boys" --lang en --spk_id 0 - ``` + ``` + - Chinese English Mixed, multi-speaker + You can change `spk_id` here. + ```bash + # The `am` must be `fastspeech2_mix`! + # The `lang` must be `mix`! + # The voc must be chinese datasets' voc now! + # spk 174 is csmcc, spk 175 is ljspeech + paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174.wav + paddlespeech tts --am fastspeech2_mix --voc hifigan_aishell3 --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174_aishell3.wav + paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav + paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav + ``` Usage: ```bash @@ -110,19 +122,29 @@ The input of this demo should be a text of the specific language that can be pas Here is a list of pretrained models released by PaddleSpeech that can be used by command and python API: - Acoustic model - | Model | Language + | Model | Language | | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - | fastspeech2_aishell3| zh - | fastspeech2_ljspeech| en - | fastspeech2_vctk| en + | speedyspeech_csmsc | zh | + | fastspeech2_csmsc | zh | + | fastspeech2_ljspeech | en | + | fastspeech2_aishell3 | zh | + | fastspeech2_vctk | en | + | fastspeech2_cnndecoder_csmsc | zh | + | fastspeech2_mix | mix | + | tacotron2_csmsc | zh | + | tacotron2_ljspeech | en | - Vocoder - | Model | Language + | Model | Language | | :--- | :---: | - | pwgan_csmsc| zh - | pwgan_aishell3| zh - | pwgan_ljspeech| en - | pwgan_vctk| en - | mb_melgan_csmsc| zh + | pwgan_csmsc | zh | + | pwgan_ljspeech | en | + | pwgan_aishell3 | zh | + | pwgan_vctk | en | + | mb_melgan_csmsc | zh | + | style_melgan_csmsc | zh | + | hifigan_csmsc | zh | + | hifigan_ljspeech | en | + | hifigan_aishell3 | zh | + | hifigan_vctk | en | + | wavernn_csmsc | zh | diff --git a/demos/text_to_speech/README_cn.md b/demos/text_to_speech/README_cn.md index f967d3d4d..ec5eb5ae9 100644 --- a/demos/text_to_speech/README_cn.md +++ b/demos/text_to_speech/README_cn.md @@ -34,7 +34,7 @@ ``` - 中文, 多说话人 - 你可以改变 `spk_id` 。 + 你可以改变 `spk_id`。 ```bash paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 ``` @@ -45,10 +45,23 @@ ``` - 英文,多说话人 - 你可以改变 `spk_id` 。 + 你可以改变 `spk_id`。 ```bash paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "hello, boys" --lang en --spk_id 0 ``` + - 中英文混合,多说话人 + 你可以改变 `spk_id`。 + ```bash + # The `am` must be `fastspeech2_mix`! + # The `lang` must be `mix`! + # The voc must be chinese datasets' voc now! + # spk 174 is csmcc, spk 175 is ljspeech + paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174.wav + paddlespeech tts --am fastspeech2_mix --voc hifigan_aishell3 --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174_aishell3.wav + paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav + paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav + ``` + 使用方法: ```bash @@ -112,19 +125,29 @@ 以下是 PaddleSpeech 提供的可以被命令行和 python API 使用的预训练模型列表: - 声学模型 - | 模型 | 语言 + | 模型 | 语言 | | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - | fastspeech2_aishell3| zh - | fastspeech2_ljspeech| en - | fastspeech2_vctk| en + | speedyspeech_csmsc | zh | + | fastspeech2_csmsc | zh | + | fastspeech2_ljspeech | en | + | fastspeech2_aishell3 | zh | + | fastspeech2_vctk | en | + | fastspeech2_cnndecoder_csmsc | zh | + | fastspeech2_mix | mix | + | tacotron2_csmsc | zh | + | tacotron2_ljspeech | en | - 声码器 - | 模型 | 语言 + | 模型 | 语言 | | :--- | :---: | - | pwgan_csmsc| zh - | pwgan_aishell3| zh - | pwgan_ljspeech| en - | pwgan_vctk| en - | mb_melgan_csmsc| zh + | pwgan_csmsc | zh | + | pwgan_ljspeech | en | + | pwgan_aishell3 | zh | + | pwgan_vctk | en | + | mb_melgan_csmsc | zh | + | style_melgan_csmsc | zh | + | hifigan_csmsc | zh | + | hifigan_ljspeech | en | + | hifigan_aishell3 | zh | + | hifigan_vctk | en | + | wavernn_csmsc | zh | From 3f9339edfffb5df21a127dda478ce4d30c3b0d9e Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 22 Aug 2022 17:48:03 +0800 Subject: [PATCH 009/101] Update polyphonic.yaml --- paddlespeech/t2s/frontend/polyphonic.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/paddlespeech/t2s/frontend/polyphonic.yaml b/paddlespeech/t2s/frontend/polyphonic.yaml index 2c7cf33fb..de60d1a39 100644 --- a/paddlespeech/t2s/frontend/polyphonic.yaml +++ b/paddlespeech/t2s/frontend/polyphonic.yaml @@ -42,3 +42,4 @@ polyphonic: 咖喱: ['ga1','li5'] 时分: ['shi2','fen1'] 蚌埠: ['beng4','bu4'] + 驯服: ['xun4','fu2'] From 5a58a274926c39d991d49b19d0d0a4fddc5992a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E5=AD=90?= <54951765+kslz@users.noreply.github.com> Date: Tue, 23 Aug 2022 12:55:18 +0800 Subject: [PATCH 010/101] =?UTF-8?q?[TTS]=E6=8C=87=E5=AE=9AG2PW=E7=9A=84?= =?UTF-8?q?=E4=BC=A0=E5=85=A5=E6=95=B0=E6=8D=AE=E7=B1=BB=E5=9E=8B=20,=20te?= =?UTF-8?q?st=3Dtts=20(#2288)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix ONNXRuntimeError Specify data type (int64),test=tts * Tactron2→Tacotron2 ,test=doc --- docs/source/released_model.md | 2 +- docs/source/tts/quick_start.md | 4 ++-- docs/source/tts/quick_start_cn.md | 4 ++-- docs/tutorial/tts/tts_tutorial.ipynb | 2 +- examples/aishell3/README.md | 4 ++-- examples/csmsc/README.md | 2 +- examples/ljspeech/README.md | 2 +- examples/vctk/README.md | 2 +- paddlespeech/t2s/frontend/g2pw/dataset.py | 10 +++++----- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/source/released_model.md b/docs/source/released_model.md index a1e3eb879..8d0ff1d47 100644 --- a/docs/source/released_model.md +++ b/docs/source/released_model.md @@ -67,7 +67,7 @@ WaveRNN | CSMSC |[WaveRNN-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tr Model Type | Dataset| Example Link | Pretrained Models :-------------:| :------------:| :-----: | :-----: | GE2E| AISHELL-3, etc. |[ge2e](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/ge2e)|[ge2e_ckpt_0.3.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ge2e/ge2e_ckpt_0.3.zip) -GE2E + Tactron2| AISHELL-3 |[ge2e-tactron2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc0)|[tacotron2_aishell3_ckpt_vc0_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/tacotron2/tacotron2_aishell3_ckpt_vc0_0.2.0.zip) +GE2E + Tacotron2| AISHELL-3 |[ge2e-Tacotron2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc0)|[tacotron2_aishell3_ckpt_vc0_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/tacotron2/tacotron2_aishell3_ckpt_vc0_0.2.0.zip) GE2E + FastSpeech2 | AISHELL-3 |[ge2e-fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc1)|[fastspeech2_nosil_aishell3_vc1_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_vc1_ckpt_0.5.zip) diff --git a/docs/source/tts/quick_start.md b/docs/source/tts/quick_start.md index bddee7786..d8dbc646c 100644 --- a/docs/source/tts/quick_start.md +++ b/docs/source/tts/quick_start.md @@ -7,7 +7,7 @@ The examples in PaddleSpeech are mainly classified by datasets, the TTS datasets * VCTK (English multiple speakers) The models in PaddleSpeech TTS have the following mapping relationship: -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 @@ -17,7 +17,7 @@ The models in PaddleSpeech TTS have the following mapping relationship: * voc3 - MultiBand MelGAN * voc4 - Style MelGAN * voc5 - HiFiGAN -* vc0 - Tactron2 Voice Clone with GE2E +* vc0 - Tacotron2 Voice Clone with GE2E * vc1 - FastSpeech2 Voice Clone with GE2E ## Quick Start diff --git a/docs/source/tts/quick_start_cn.md b/docs/source/tts/quick_start_cn.md index 37246e84e..c56d9bb45 100644 --- a/docs/source/tts/quick_start_cn.md +++ b/docs/source/tts/quick_start_cn.md @@ -9,7 +9,7 @@ PaddleSpeech 的 TTS 模型具有以下映射关系: -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 @@ -19,7 +19,7 @@ PaddleSpeech 的 TTS 模型具有以下映射关系: * voc3 - MultiBand MelGAN * voc4 - Style MelGAN * voc5 - HiFiGAN -* vc0 - Tactron2 Voice Clone with GE2E +* vc0 - Tacotron2 Voice Clone with GE2E * vc1 - FastSpeech2 Voice Clone with GE2E ## 快速开始 diff --git a/docs/tutorial/tts/tts_tutorial.ipynb b/docs/tutorial/tts/tts_tutorial.ipynb index 81f713efa..583adb014 100644 --- a/docs/tutorial/tts/tts_tutorial.ipynb +++ b/docs/tutorial/tts/tts_tutorial.ipynb @@ -769,7 +769,7 @@ "```\n", "我们在每个数据集的 README.md 介绍了子目录和模型的对应关系, 在 TTS 中有如下对应关系:\n", "```text\n", - "tts0 - Tactron2\n", + "tts0 - Tacotron2\n", "tts1 - TransformerTTS\n", "tts2 - SpeedySpeech\n", "tts3 - FastSpeech2\n", diff --git a/examples/aishell3/README.md b/examples/aishell3/README.md index 273f488e4..191974dec 100644 --- a/examples/aishell3/README.md +++ b/examples/aishell3/README.md @@ -1,6 +1,6 @@ # Aishell3 -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 @@ -8,5 +8,5 @@ * voc1 - Parallel WaveGAN * voc2 - MelGAN * voc3 - MultiBand MelGAN -* vc0 - Tactron2 Voice Cloning with GE2E +* vc0 - Tacotron2 Voice Cloning with GE2E * vc1 - FastSpeech2 Voice Cloning with GE2E diff --git a/examples/csmsc/README.md b/examples/csmsc/README.md index 2aad609cb..77375faa8 100644 --- a/examples/csmsc/README.md +++ b/examples/csmsc/README.md @@ -1,7 +1,7 @@ # CSMSC -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 diff --git a/examples/ljspeech/README.md b/examples/ljspeech/README.md index 67b1bf473..ccafdb141 100644 --- a/examples/ljspeech/README.md +++ b/examples/ljspeech/README.md @@ -1,7 +1,7 @@ # LJSpeech -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 diff --git a/examples/vctk/README.md b/examples/vctk/README.md index 4007c0319..ac5fd24f8 100644 --- a/examples/vctk/README.md +++ b/examples/vctk/README.md @@ -1,7 +1,7 @@ # VCTK -* tts0 - Tactron2 +* tts0 - Tacotron2 * tts1 - TransformerTTS * tts2 - SpeedySpeech * tts3 - FastSpeech2 diff --git a/paddlespeech/t2s/frontend/g2pw/dataset.py b/paddlespeech/t2s/frontend/g2pw/dataset.py index ab715dc36..98af5f463 100644 --- a/paddlespeech/t2s/frontend/g2pw/dataset.py +++ b/paddlespeech/t2s/frontend/g2pw/dataset.py @@ -81,12 +81,12 @@ def prepare_onnx_input(tokenizer, position_ids.append(position_id) outputs = { - 'input_ids': np.array(input_ids), - 'token_type_ids': np.array(token_type_ids), - 'attention_masks': np.array(attention_masks), + 'input_ids': np.array(input_ids).astype(np.int64), + 'token_type_ids': np.array(token_type_ids).astype(np.int64), + 'attention_masks': np.array(attention_masks).astype(np.int64), 'phoneme_masks': np.array(phoneme_masks).astype(np.float32), - 'char_ids': np.array(char_ids), - 'position_ids': np.array(position_ids), + 'char_ids': np.array(char_ids).astype(np.int64), + 'position_ids': np.array(position_ids).astype(np.int64), } return outputs From 2aef6958de17ce0c121c92bc1d9d63ba2d79edff Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 24 Aug 2022 11:23:27 +0800 Subject: [PATCH 011/101] Create preprocess.py If there are no spaces between sentences in your text file, use this file to generate a new file, which adds spaces between each token. --- examples/iwslt2012/punc0/preprocess.py | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 examples/iwslt2012/punc0/preprocess.py diff --git a/examples/iwslt2012/punc0/preprocess.py b/examples/iwslt2012/punc0/preprocess.py new file mode 100644 index 000000000..c6645bdb5 --- /dev/null +++ b/examples/iwslt2012/punc0/preprocess.py @@ -0,0 +1,27 @@ +import argparse +import os + +def process_sentence(line): + if line == '': return '' + res = line[0] + for i in range(1, len(line)): + res += (' ' + line[i]) + return res + +if __name__ == "__main__": + paser = argparse.ArgumentParser(description = "Input filename") + paser.add_argument('-input_file') + paser.add_argument('-output_file') + sentence_cnt = 0 + args = paser.parse_args() + with open(args.input_file, 'r') as f: + with open(args.output_file, 'w') as write_f: + while True: + line = f.readline() + if line: + sentence_cnt += 1 + write_f.write(process_sentence(line)) + else: + break + print('preprocess over') + print('total sentences number:', sentence_cnt) From d2f7362aa718cab9378f18b31f0fcb5e924a232a Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 24 Aug 2022 14:40:39 +0800 Subject: [PATCH 012/101] Delete preprocess.py --- examples/iwslt2012/punc0/preprocess.py | 27 -------------------------- 1 file changed, 27 deletions(-) delete mode 100644 examples/iwslt2012/punc0/preprocess.py diff --git a/examples/iwslt2012/punc0/preprocess.py b/examples/iwslt2012/punc0/preprocess.py deleted file mode 100644 index c6645bdb5..000000000 --- a/examples/iwslt2012/punc0/preprocess.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse -import os - -def process_sentence(line): - if line == '': return '' - res = line[0] - for i in range(1, len(line)): - res += (' ' + line[i]) - return res - -if __name__ == "__main__": - paser = argparse.ArgumentParser(description = "Input filename") - paser.add_argument('-input_file') - paser.add_argument('-output_file') - sentence_cnt = 0 - args = paser.parse_args() - with open(args.input_file, 'r') as f: - with open(args.output_file, 'w') as write_f: - while True: - line = f.readline() - if line: - sentence_cnt += 1 - write_f.write(process_sentence(line)) - else: - break - print('preprocess over') - print('total sentences number:', sentence_cnt) From 9473b8468c2c979f69017be4eff9df80edc16c31 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 24 Aug 2022 14:42:31 +0800 Subject: [PATCH 013/101] Create preprocess.py If there are no spaces between sentences in your text file, use this file to generate a new file, which adds spaces between each token. --- examples/iwslt2012/punc0/local/preprocess.py | 27 ++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 examples/iwslt2012/punc0/local/preprocess.py diff --git a/examples/iwslt2012/punc0/local/preprocess.py b/examples/iwslt2012/punc0/local/preprocess.py new file mode 100644 index 000000000..03b27e89f --- /dev/null +++ b/examples/iwslt2012/punc0/local/preprocess.py @@ -0,0 +1,27 @@ +import argparse +import os + +def process_sentence(line): + if line == '': return '' + res = line[0] + for i in range(1, len(line)): + res += (' ' + line[i]) + return res + +if __name__ == "__main__": + paser = argparse.ArgumentParser(description = "Input filename") + paser.add_argument('-input_file') + paser.add_argument('-output_file') + sentence_cnt = 0 + args = paser.parse_args() + with open(args.input_file, 'r') as f: + with open(args.output_file, 'w') as write_f: + while True: + line = f.readline() + if line: + sentence_cnt += 1 + write_f.write(process_sentence(line)) + else: + break + print('preprocess over') + print('total sentences number:', sentence_cnt) From c1d45510553105d3eef3652cf30d69385ecb9e8b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 24 Aug 2022 18:34:11 +0800 Subject: [PATCH 014/101] add ernie sat synthesize_e2e, test=tts (#2287) --- examples/aishell3/README.md | 1 + examples/aishell3/ernie_sat/README.md | 152 ++++++- examples/aishell3/ernie_sat/conf/default.yaml | 9 +- .../aishell3/ernie_sat/local/synthesize.sh | 23 +- .../ernie_sat/local/synthesize_e2e.sh | 52 +++ examples/aishell3/ernie_sat/local/train.sh | 4 +- examples/aishell3/ernie_sat/run.sh | 6 +- examples/aishell3_vctk/README.md | 1 + examples/aishell3_vctk/ernie_sat/README.md | 162 +++++++- .../aishell3_vctk/ernie_sat/conf/default.yaml | 9 +- .../ernie_sat/local/synthesize.sh | 23 +- .../ernie_sat/local/synthesize_e2e.sh | 53 +++ .../aishell3_vctk/ernie_sat/local/train.sh | 2 +- examples/aishell3_vctk/ernie_sat/run.sh | 6 +- examples/vctk/README.md | 1 + examples/vctk/ernie_sat/README.md | 153 ++++++- examples/vctk/ernie_sat/conf/default.yaml | 7 +- examples/vctk/ernie_sat/local/synthesize.sh | 26 +- .../vctk/ernie_sat/local/synthesize_e2e.sh | 52 +++ examples/vctk/ernie_sat/local/train.sh | 2 +- examples/vctk/ernie_sat/run.sh | 6 +- paddlespeech/t2s/exps/ernie_sat/align.py | 15 +- .../t2s/exps/ernie_sat/synthesize_e2e.py | 379 ++++++++++++------ paddlespeech/t2s/exps/syn_utils.py | 4 + .../t2s/models/ernie_sat/ernie_sat.py | 4 +- 25 files changed, 937 insertions(+), 215 deletions(-) create mode 100755 examples/aishell3/ernie_sat/local/synthesize_e2e.sh create mode 100755 examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh create mode 100755 examples/vctk/ernie_sat/local/synthesize_e2e.sh diff --git a/examples/aishell3/README.md b/examples/aishell3/README.md index 191974dec..e022cef42 100644 --- a/examples/aishell3/README.md +++ b/examples/aishell3/README.md @@ -10,3 +10,4 @@ * voc3 - MultiBand MelGAN * vc0 - Tacotron2 Voice Cloning with GE2E * vc1 - FastSpeech2 Voice Cloning with GE2E +* ernie_sat - ERNIE-SAT diff --git a/examples/aishell3/ernie_sat/README.md b/examples/aishell3/ernie_sat/README.md index 8086d007c..707ee1381 100644 --- a/examples/aishell3/ernie_sat/README.md +++ b/examples/aishell3/ernie_sat/README.md @@ -1 +1,151 @@ -# ERNIE SAT with AISHELL3 dataset +# ERNIE-SAT with AISHELL3 dataset + +ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 + +## 模型框架 +ERNIE-SAT 中我们提出了两项创新: +- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 +- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 + +

+ +

+ +## Dataset +### Download and Extract +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + +### Get MFA Result and Extract +We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. +You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Get Started +Assume the path to the dataset is `~/datasets/data_aishell3`. +Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from text file. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── norm + ├── raw + └── speech_stats.npy +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, speaker, and id of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. + +### Synthesizing +We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc5) as the neural vocoder. + +Download pretrained HiFiGAN model from [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) and unzip it. +```bash +unzip hifigan_aishell3_ckpt_0.2.0.zip +``` +HiFiGAN checkpoint contains files listed below. +```text +hifigan_aishell3_ckpt_0.2.0 +├── default.yaml # default config used to train HiFiGAN +├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN +└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +## Speech Synthesis and Speech Editing +### Prepare +**prepare aligner** +```bash +mkdir -p tools/aligner +cd tools +# download MFA +wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz +# extract MFA +tar xvf montreal-forced-aligner_linux.tar.gz +# fix .so of MFA +cd montreal-forced-aligner/lib +ln -snf libpython3.6m.so.1.0 libpython3.6m.so +cd - +# download align models and dicts +cd aligner +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b +cd ../../ +``` +**prepare pretrained FastSpeech2 models** + +ERNIE-SAT use FastSpeech2 as phoneme duration predictor: +```bash +mkdir download +cd download +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip +unzip fastspeech2_conformer_baker_ckpt_0.5.zip +unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip +cd ../ +``` +**prepare source data** +```bash +mkdir source +cd source +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540307.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540428.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/LJ050-0278.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p243_313.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p299_096.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/this_was_not_the_show_for_me.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/README.md +cd ../ +``` + +You can check the text of downloaded wavs in `source/README.md`. +### Speech Synthesis and Speech Editing +```bash +./run.sh --stage 3 --stop-stage 3 --gpus 0 +``` +`stage 3` of `run.sh` calls `local/synthesize_e2e.sh`, `stage 0` of it is **Speech Synthesis** and `stage 1` of it is **Speech Editing**. + +You can modify `--wav_path`、`--old_str` and `--new_str` yourself, `--old_str` should be the text corresponding to the audio of `--wav_path`, `--new_str` should be designed according to `--task_name`, both `--source_lang` and `--target_lang` should be `zh` for model trained with AISHELL3 dataset. +## Pretrained Model +Pretrained ErnieSAT model: +- [erniesat_aishell3_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/erniesat_aishell3_ckpt_1.2.0.zip) + +Model | Step | eval/mlm_loss | eval/loss +:-------------:| :------------:| :-----: | :-----: +default| 8(gpu) x 289500|51.723782|51.723782 diff --git a/examples/aishell3/ernie_sat/conf/default.yaml b/examples/aishell3/ernie_sat/conf/default.yaml index fdc767fb0..dbd5c467e 100644 --- a/examples/aishell3/ernie_sat/conf/default.yaml +++ b/examples/aishell3/ernie_sat/conf/default.yaml @@ -1,3 +1,6 @@ +# This configuration tested on 8 GPUs (A100) with 80GB GPU memory. +# It takes around 3 days to finish the training,You can adjust +# batch_size、num_workers here and ngpu in local/train.sh for your machine ########################################################### # FEATURE EXTRACTION SETTING # ########################################################### @@ -21,8 +24,8 @@ mlm_prob: 0.8 ########################################################### # DATA SETTING # ########################################################### -batch_size: 20 -num_workers: 2 +batch_size: 40 +num_workers: 8 ########################################################### # MODEL SETTING # @@ -280,4 +283,4 @@ token_list: - o3 - iang5 - ei5 -- \ No newline at end of file +- diff --git a/examples/aishell3/ernie_sat/local/synthesize.sh b/examples/aishell3/ernie_sat/local/synthesize.sh index 3e907427c..8b4178f13 100755 --- a/examples/aishell3/ernie_sat/local/synthesize.sh +++ b/examples/aishell3/ernie_sat/local/synthesize.sh @@ -4,28 +4,11 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 -stage=1 -stop_stage=1 - -# pwgan -if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then - FLAGS_allocator_strategy=naive_best_fit \ - FLAGS_fraction_of_gpu_memory_to_use=0.01 \ - python3 ${BIN_DIR}/synthesize.py \ - --erniesat_config=${config_path} \ - --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --erniesat_stat=dump/train/speech_stats.npy \ - --voc=pwgan_aishell3 \ - --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt -fi +stage=0 +stop_stage=0 # hifigan -if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/synthesize.py \ diff --git a/examples/aishell3/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3/ernie_sat/local/synthesize_e2e.sh new file mode 100755 index 000000000..b33e8ca09 --- /dev/null +++ b/examples/aishell3/ernie_sat/local/synthesize_e2e.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +stage=0 +stop_stage=1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + echo 'speech synthesize !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=synthesize \ + --wav_path=source/SSB03540307.wav\ + --old_str='请播放歌曲小苹果。' \ + --new_str='歌曲真好听。' \ + --source_lang=zh \ + --target_lang=zh \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_gen.wav +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo 'speech edit !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=edit \ + --wav_path=source/SSB03540428.wav \ + --old_str='今天天气很好' \ + --new_str='今天心情很好' \ + --source_lang=zh \ + --target_lang=zh \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_edit.wav +fi diff --git a/examples/aishell3/ernie_sat/local/train.sh b/examples/aishell3/ernie_sat/local/train.sh index 30720e8f5..829310832 100755 --- a/examples/aishell3/ernie_sat/local/train.sh +++ b/examples/aishell3/ernie_sat/local/train.sh @@ -8,5 +8,5 @@ python3 ${BIN_DIR}/train.py \ --dev-metadata=dump/dev/norm/metadata.jsonl \ --config=${config_path} \ --output-dir=${train_output_path} \ - --ngpu=2 \ - --phones-dict=dump/phone_id_map.txt \ No newline at end of file + --ngpu=8 \ + --phones-dict=dump/phone_id_map.txt diff --git a/examples/aishell3/ernie_sat/run.sh b/examples/aishell3/ernie_sat/run.sh index d75a19f23..cb354de41 100755 --- a/examples/aishell3/ernie_sat/run.sh +++ b/examples/aishell3/ernie_sat/run.sh @@ -9,7 +9,7 @@ stop_stage=100 conf_path=conf/default.yaml train_output_path=exp/default -ckpt_name=snapshot_iter_153.pdz +ckpt_name=snapshot_iter_289500.pdz # with the following command, you can choose the stage range you want to run # such as `./run.sh --stage 0 --stop-stage 0` @@ -30,3 +30,7 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # synthesize, vocoder is pwgan CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/examples/aishell3_vctk/README.md b/examples/aishell3_vctk/README.md index 330b25934..12213da2a 100644 --- a/examples/aishell3_vctk/README.md +++ b/examples/aishell3_vctk/README.md @@ -1 +1,2 @@ # Mixed Chinese and English TTS with AISHELL3 and VCTK datasets +* ernie_sat - ERNIE-SAT diff --git a/examples/aishell3_vctk/ernie_sat/README.md b/examples/aishell3_vctk/ernie_sat/README.md index 1c6bbe230..777bea326 100644 --- a/examples/aishell3_vctk/ernie_sat/README.md +++ b/examples/aishell3_vctk/ernie_sat/README.md @@ -1 +1,161 @@ -# ERNIE SAT with AISHELL3 and VCTK dataset +# ERNIE-SAT with AISHELL3 and VCTK dataset + +ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 + +## 模型框架 +ERNIE-SAT 中我们提出了两项创新: +- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 +- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 + +

+ +

+ +## Dataset +### Download and Extract +Download all datasets and extract it to `~/datasets`: +- The aishell3 dataset is in the directory `~/datasets/data_aishell3` +- The vctk dataset is in the directory `~/datasets/VCTK-Corpus-0.92` + +### Get MFA Result and Extract +We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for the fastspeech2 training. +You can download from here: +- [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz) +- [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz) + +Or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Get Started +Assume the paths to the datasets are: +- `~/datasets/data_aishell3` +- `~/datasets/VCTK-Corpus-0.92` +Assume the path to the MFA results of the datasets are: +- `./aishell3_alignment_tone` +- `./vctk_alignment` +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from text file. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── norm + ├── raw + └── speech_stats.npy +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, speaker, and id of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. + +### Synthesizing +We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc5) as the neural vocoder. + +Download pretrained HiFiGAN model from [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) and unzip it. +```bash +unzip hifigan_aishell3_ckpt_0.2.0.zip +``` +HiFiGAN checkpoint contains files listed below. +```text +hifigan_aishell3_ckpt_0.2.0 +├── default.yaml # default config used to train HiFiGAN +├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN +└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +## Speech Synthesis and Speech Editing +### Prepare + +**prepare aligner** +```bash +mkdir -p tools/aligner +cd tools +# download MFA +wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz +# extract MFA +tar xvf montreal-forced-aligner_linux.tar.gz +# fix .so of MFA +cd montreal-forced-aligner/lib +ln -snf libpython3.6m.so.1.0 libpython3.6m.so +cd - +# download align models and dicts +cd aligner +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b +cd ../../ +``` +**prepare pretrained FastSpeech2 models** + +ERNIE-SAT use FastSpeech2 as phoneme duration predictor: +```bash +mkdir download +cd download +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip +unzip fastspeech2_conformer_baker_ckpt_0.5.zip +unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip +cd ../ +``` +**prepare source data** +```bash +mkdir source +cd source +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540307.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540428.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/LJ050-0278.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p243_313.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p299_096.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/this_was_not_the_show_for_me.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/README.md +cd ../ +``` +You can check the text of downloaded wavs in `source/README.md`. +### Cross Language Voice Cloning +```bash +./run.sh --stage 3 --stop-stage 3 --gpus 0 +``` +`stage 3` of `run.sh` calls `local/synthesize_e2e.sh`. + +You can modify `--wav_path`、`--old_str` and `--new_str` yourself, `--old_str` should be the text corresponding to the audio of `--wav_path`, `--new_str` should be designed according to `--task_name`, `--source_lang` and `--target_lang` should be different in this example. +## Pretrained Model +Pretrained ErnieSAT model: +- [erniesat_aishell3_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/erniesat_aishell3_vctk_ckpt_1.2.0.zip) + +Model | Step | eval/text_mlm_loss | eval/mlm_loss | eval/loss +:-------------:| :------------:| :-----: | :-----:| :-----: +default| 8(gpu) x 489000|0.000001|52.477642 |52.477642 diff --git a/examples/aishell3_vctk/ernie_sat/conf/default.yaml b/examples/aishell3_vctk/ernie_sat/conf/default.yaml index abb69fcc0..efbdd456d 100644 --- a/examples/aishell3_vctk/ernie_sat/conf/default.yaml +++ b/examples/aishell3_vctk/ernie_sat/conf/default.yaml @@ -1,3 +1,6 @@ +# This configuration tested on 8 GPUs (A100) with 80GB GPU memory. +# It takes around 4 days to finish the training,You can adjust +# batch_size、num_workers here and ngpu in local/train.sh for your machine ########################################################### # FEATURE EXTRACTION SETTING # ########################################################### @@ -21,8 +24,8 @@ mlm_prob: 0.8 ########################################################### # DATA SETTING # ########################################################### -batch_size: 20 -num_workers: 2 +batch_size: 40 +num_workers: 8 ########################################################### # MODEL SETTING # @@ -79,7 +82,7 @@ grad_clip: 1.0 ########################################################### # TRAINING SETTING # ########################################################### -max_epoch: 700 +max_epoch: 1500 num_snapshots: 50 ########################################################### diff --git a/examples/aishell3_vctk/ernie_sat/local/synthesize.sh b/examples/aishell3_vctk/ernie_sat/local/synthesize.sh index 3e907427c..8b4178f13 100755 --- a/examples/aishell3_vctk/ernie_sat/local/synthesize.sh +++ b/examples/aishell3_vctk/ernie_sat/local/synthesize.sh @@ -4,28 +4,11 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 -stage=1 -stop_stage=1 - -# pwgan -if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then - FLAGS_allocator_strategy=naive_best_fit \ - FLAGS_fraction_of_gpu_memory_to_use=0.01 \ - python3 ${BIN_DIR}/synthesize.py \ - --erniesat_config=${config_path} \ - --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --erniesat_stat=dump/train/speech_stats.npy \ - --voc=pwgan_aishell3 \ - --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt -fi +stage=0 +stop_stage=0 # hifigan -if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/synthesize.py \ diff --git a/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh new file mode 100755 index 000000000..c30af6e85 --- /dev/null +++ b/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh @@ -0,0 +1,53 @@ +# not ready yet +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +stage=0 +stop_stage=1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + echo 'speech cross language from en to zh !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=synthesize \ + --wav_path=source/p243_313.wav \ + --old_str='For that reason cover should not be given.' \ + --new_str='今天天气很好' \ + --source_lang=en \ + --target_lang=zh \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_clone_en_zh.wav +fi +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo 'speech cross language from zh to en !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=synthesize \ + --wav_path=source/SSB03540307.wav \ + --old_str='请播放歌曲小苹果。' \ + --new_str="Thank you!" \ + --source_lang=zh \ + --target_lang=en \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_clone_zh_en.wav +fi + diff --git a/examples/aishell3_vctk/ernie_sat/local/train.sh b/examples/aishell3_vctk/ernie_sat/local/train.sh index 30720e8f5..526aac435 100755 --- a/examples/aishell3_vctk/ernie_sat/local/train.sh +++ b/examples/aishell3_vctk/ernie_sat/local/train.sh @@ -8,5 +8,5 @@ python3 ${BIN_DIR}/train.py \ --dev-metadata=dump/dev/norm/metadata.jsonl \ --config=${config_path} \ --output-dir=${train_output_path} \ - --ngpu=2 \ + --ngpu=8 \ --phones-dict=dump/phone_id_map.txt \ No newline at end of file diff --git a/examples/aishell3_vctk/ernie_sat/run.sh b/examples/aishell3_vctk/ernie_sat/run.sh index d75a19f23..5509fc4ad 100755 --- a/examples/aishell3_vctk/ernie_sat/run.sh +++ b/examples/aishell3_vctk/ernie_sat/run.sh @@ -9,7 +9,7 @@ stop_stage=100 conf_path=conf/default.yaml train_output_path=exp/default -ckpt_name=snapshot_iter_153.pdz +ckpt_name=snapshot_iter_489000.pdz # with the following command, you can choose the stage range you want to run # such as `./run.sh --stage 0 --stop-stage 0` @@ -30,3 +30,7 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # synthesize, vocoder is pwgan CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/examples/vctk/README.md b/examples/vctk/README.md index ac5fd24f8..41163dbe7 100644 --- a/examples/vctk/README.md +++ b/examples/vctk/README.md @@ -9,3 +9,4 @@ * voc1 - Parallel WaveGAN * voc2 - MelGAN * voc3 - MultiBand MelGAN +* ernie_sat - ERNIE-SAT diff --git a/examples/vctk/ernie_sat/README.md b/examples/vctk/ernie_sat/README.md index 055e7903d..0a2f9359e 100644 --- a/examples/vctk/ernie_sat/README.md +++ b/examples/vctk/ernie_sat/README.md @@ -1 +1,152 @@ -# ERNIE SAT with VCTK dataset +# ERNIE-SAT with VCTK dataset + +ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 + +## 模型框架 +ERNIE-SAT 中我们提出了两项创新: +- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 +- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 + +

+ +

+ +## Dataset +### Download and Extract the dataset +Download VCTK-0.92 from it's [Official Website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. + +### Get MFA Result and Extract +We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for fastspeech2. +You can download from here [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) of our repo. +ps: we remove three speakers in VCTK-0.92 (see [reorganize_vctk.py](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/examples/other/mfa/local/reorganize_vctk.py)): +1. `p315`, because of no text for it. +2. `p280` and `p362`, because no *_mic2.flac (which is better than *_mic1.flac) for them. + +## Get Started +Assume the path to the dataset is `~/datasets/VCTK-Corpus-0.92`. +Assume the path to the MFA result of VCTK is `./vctk_alignment`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from text file. +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── norm + ├── raw + └── speech_stats.npy +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, speaker, and id of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. + +### Synthesizing +We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/voc5) as the neural vocoder. + +Download pretrained HiFiGAN model from [hifigan_vctk_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_vctk_ckpt_0.2.0.zip) and unzip it. +```bash +unzip hifigan_vctk_ckpt_0.2.0.zip +``` +HiFiGAN checkpoint contains files listed below. +```text +hifigan_vctk_ckpt_0.2.0 +├── default.yaml # default config used to train HiFiGAN +├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN +└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +## Speech Synthesis and Speech Editing +### Prepare +**prepare aligner** +```bash +mkdir -p tools/aligner +cd tools +# download MFA +wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz +# extract MFA +tar xvf montreal-forced-aligner_linux.tar.gz +# fix .so of MFA +cd montreal-forced-aligner/lib +ln -snf libpython3.6m.so.1.0 libpython3.6m.so +cd - +# download align models and dicts +cd aligner +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b +cd ../../ +``` +**prepare pretrained FastSpeech2 models** + +ERNIE-SAT use FastSpeech2 as phoneme duration predictor: +```bash +mkdir download +cd download +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip +unzip fastspeech2_conformer_baker_ckpt_0.5.zip +unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip +cd ../ +``` +**prepare source data** +```bash +mkdir source +cd source +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540307.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540428.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/LJ050-0278.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p243_313.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p299_096.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/this_was_not_the_show_for_me.wav +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/README.md +cd ../ +``` +You can check the text of downloaded wavs in `source/README.md`. +### Speech Synthesis and Speech Editing +```bash +./run.sh --stage 3 --stop-stage 3 --gpus 0 +``` +`stage 3` of `run.sh` calls `local/synthesize_e2e.sh`, `stage 0` of it is **Speech Synthesis** and `stage 1` of it is **Speech Editing**. + +You can modify `--wav_path`、`--old_str` and `--new_str` yourself, `--old_str` should be the text corresponding to the audio of `--wav_path`, `--new_str` should be designed according to `--task_name`, both `--source_lang` and `--target_lang` should be `en` for model trained with VCTK dataset. +## Pretrained Model +Pretrained ErnieSAT model: +- [erniesat_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/erniesat_vctk_ckpt_1.2.0.zip) + +Model | Step | eval/mlm_loss | eval/loss +:-------------:| :------------:| :-----: | :-----: +default| 8(gpu) x 199500|57.622215|57.622215 diff --git a/examples/vctk/ernie_sat/conf/default.yaml b/examples/vctk/ernie_sat/conf/default.yaml index 672f937ef..88b3d376d 100644 --- a/examples/vctk/ernie_sat/conf/default.yaml +++ b/examples/vctk/ernie_sat/conf/default.yaml @@ -1,3 +1,6 @@ +# This configuration tested on 8 GPUs (A100) with 80GB GPU memory. +# It takes around 2 days to finish the training,You can adjust +# batch_size、num_workers here and ngpu in local/train.sh for your machine ########################################################### # FEATURE EXTRACTION SETTING # ########################################################### @@ -21,8 +24,8 @@ mlm_prob: 0.8 ########################################################### # DATA SETTING # ########################################################### -batch_size: 20 -num_workers: 2 +batch_size: 40 +num_workers: 8 ########################################################### # MODEL SETTING # diff --git a/examples/vctk/ernie_sat/local/synthesize.sh b/examples/vctk/ernie_sat/local/synthesize.sh index b24db018a..5667f30f8 100755 --- a/examples/vctk/ernie_sat/local/synthesize.sh +++ b/examples/vctk/ernie_sat/local/synthesize.sh @@ -4,31 +4,11 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 -stage=1 -stop_stage=1 - -# use am to predict duration here -# 增加 am_phones_dict am_tones_dict 等,也可以用新的方式构造 am, 不需要这么多参数了就 - -# pwgan -if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then - FLAGS_allocator_strategy=naive_best_fit \ - FLAGS_fraction_of_gpu_memory_to_use=0.01 \ - python3 ${BIN_DIR}/synthesize.py \ - --erniesat_config=${config_path} \ - --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --erniesat_stat=dump/train/speech_stats.npy \ - --voc=pwgan_vctk \ - --voc_config=pwg_vctk_ckpt_0.1.1/default.yaml \ - --voc_ckpt=pwg_vctk_ckpt_0.1.1/snapshot_iter_1500000.pdz \ - --voc_stat=pwg_vctk_ckpt_0.1.1/feats_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt -fi +stage=0 +stop_stage=0 # hifigan -if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/synthesize.py \ diff --git a/examples/vctk/ernie_sat/local/synthesize_e2e.sh b/examples/vctk/ernie_sat/local/synthesize_e2e.sh new file mode 100755 index 000000000..fee540169 --- /dev/null +++ b/examples/vctk/ernie_sat/local/synthesize_e2e.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +stage=0 +stop_stage=1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + echo 'speech synthesize !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=synthesize \ + --wav_path=source/p243_313.wav \ + --old_str='For that reason cover should not be given.' \ + --new_str='I love you very much do you love me' \ + --source_lang=en \ + --target_lang=en \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_vctk \ + --voc_config=hifigan_vctk_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_vctk_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_vctk_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_gen.wav +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo 'speech edit !' + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --task_name=edit \ + --wav_path=source/p243_313.wav \ + --old_str='For that reason cover should not be given.' \ + --new_str='For that reason cover is not impossible to be given.' \ + --source_lang=en \ + --target_lang=en \ + --erniesat_config=${config_path} \ + --phones_dict=dump/phone_id_map.txt \ + --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --erniesat_stat=dump/train/speech_stats.npy \ + --voc=hifigan_vctk \ + --voc_config=hifigan_vctk_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_vctk_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_vctk_ckpt_0.2.0/feats_stats.npy \ + --output_name=exp/pred_edit.wav +fi diff --git a/examples/vctk/ernie_sat/local/train.sh b/examples/vctk/ernie_sat/local/train.sh index 30720e8f5..526aac435 100755 --- a/examples/vctk/ernie_sat/local/train.sh +++ b/examples/vctk/ernie_sat/local/train.sh @@ -8,5 +8,5 @@ python3 ${BIN_DIR}/train.py \ --dev-metadata=dump/dev/norm/metadata.jsonl \ --config=${config_path} \ --output-dir=${train_output_path} \ - --ngpu=2 \ + --ngpu=8 \ --phones-dict=dump/phone_id_map.txt \ No newline at end of file diff --git a/examples/vctk/ernie_sat/run.sh b/examples/vctk/ernie_sat/run.sh index d75a19f23..94d130d41 100755 --- a/examples/vctk/ernie_sat/run.sh +++ b/examples/vctk/ernie_sat/run.sh @@ -9,7 +9,7 @@ stop_stage=100 conf_path=conf/default.yaml train_output_path=exp/default -ckpt_name=snapshot_iter_153.pdz +ckpt_name=snapshot_iter_199500.pdz # with the following command, you can choose the stage range you want to run # such as `./run.sh --stage 0 --stop-stage 0` @@ -30,3 +30,7 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # synthesize, vocoder is pwgan CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/paddlespeech/t2s/exps/ernie_sat/align.py b/paddlespeech/t2s/exps/ernie_sat/align.py index 529a8221c..464f51a3b 100755 --- a/paddlespeech/t2s/exps/ernie_sat/align.py +++ b/paddlespeech/t2s/exps/ernie_sat/align.py @@ -19,9 +19,9 @@ import librosa import numpy as np import pypinyin from praatio import textgrid -from paddlespeech.t2s.exps.ernie_sat.utils import get_tmp_name -from paddlespeech.t2s.exps.ernie_sat.utils import get_dict +from paddlespeech.t2s.exps.ernie_sat.utils import get_dict +from paddlespeech.t2s.exps.ernie_sat.utils import get_tmp_name DICT_EN = 'tools/aligner/cmudict-0.7b' DICT_ZH = 'tools/aligner/simple.lexicon' @@ -30,6 +30,7 @@ MODEL_DIR_ZH = 'tools/aligner/aishell3_model.zip' MFA_PATH = 'tools/montreal-forced-aligner/bin' os.environ['PATH'] = MFA_PATH + '/:' + os.environ['PATH'] + def _get_max_idx(dic): return sorted([int(key.split('_')[0]) for key in dic.keys()])[-1] @@ -106,11 +107,11 @@ def alignment(wav_path: str, wav_name = os.path.basename(wav_path) utt = wav_name.split('.')[0] # prepare data for MFA - tmp_name = get_tmp_name(text=text) + tmp_name = get_tmp_name(text=text) tmpbase = './tmp_dir/' + tmp_name tmpbase = Path(tmpbase) tmpbase.mkdir(parents=True, exist_ok=True) - print("tmp_name in alignment:",tmp_name) + print("tmp_name in alignment:", tmp_name) shutil.copyfile(wav_path, tmpbase / wav_name) txt_name = utt + '.txt' @@ -340,7 +341,7 @@ def get_phns_spans(wav_path: str, if __name__ == '__main__': text = "For that reason cover should not be given." - phn, dur, word2phns = alignment("exp/p243_313.wav", text, lang='en') + phn, dur, word2phns = alignment("source/p243_313.wav", text, lang='en') print(phn, dur) print(word2phns) print("---------------------------------") @@ -352,7 +353,7 @@ if __name__ == '__main__': style=pypinyin.Style.TONE3, tone_sandhi=True) text_zh = " ".join(text_zh) - phn, dur, word2phns = alignment("exp/000001.wav", text_zh, lang='zh') + phn, dur, word2phns = alignment("source/000001.wav", text_zh, lang='zh') print(phn, dur) print(word2phns) print("---------------------------------") @@ -367,7 +368,7 @@ if __name__ == '__main__': print("---------------------------------") outs = get_phns_spans( - wav_path="exp/p243_313.wav", + wav_path="source/p243_313.wav", old_str="For that reason cover should not be given.", new_str="for that reason cover is impossible to be given.") diff --git a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py index 95b07367c..21c9ae044 100644 --- a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py @@ -11,35 +11,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import os +from pathlib import Path +from typing import List + import librosa import numpy as np +import paddle +import pypinyin import soundfile as sf +import yaml +from pypinyin_dict.phrase_pinyin_data import large_pinyin +from yacs.config import CfgNode +from paddlespeech.t2s.datasets.am_batch_fn import build_erniesat_collate_fn +from paddlespeech.t2s.datasets.get_feats import LogMelFBank from paddlespeech.t2s.exps.ernie_sat.align import get_phns_spans from paddlespeech.t2s.exps.ernie_sat.utils import eval_durs from paddlespeech.t2s.exps.ernie_sat.utils import get_dur_adj_factor from paddlespeech.t2s.exps.ernie_sat.utils import get_span_bdy -from paddlespeech.t2s.datasets.am_batch_fn import build_erniesat_collate_fn -from paddlespeech.t2s.exps.syn_utils import get_frontend -from paddlespeech.t2s.datasets.get_feats import LogMelFBank -from paddlespeech.t2s.exps.syn_utils import norm from paddlespeech.t2s.exps.ernie_sat.utils import get_tmp_name +from paddlespeech.t2s.exps.syn_utils import get_am_inference +from paddlespeech.t2s.exps.syn_utils import get_voc_inference +from paddlespeech.t2s.exps.syn_utils import norm +from paddlespeech.t2s.utils import str2bool +large_pinyin.load() - - - - -def _p2id(self, phonemes: List[str]) -> np.ndarray: +def _p2id(phonemes: List[str]) -> np.ndarray: # replace unk phone with sp - phonemes = [ - phn if phn in vocab_phones else "sp" for phn in phonemes - ] + phonemes = [phn if phn in vocab_phones else "sp" for phn in phonemes] phone_ids = [vocab_phones[item] for item in phonemes] return np.array(phone_ids, np.int64) - def prep_feats_with_dur(wav_path: str, old_str: str='', new_str: str='', @@ -67,12 +73,12 @@ def prep_feats_with_dur(wav_path: str, fs=fs, n_shift=n_shift) - mfa_start = phns_spans_outs["mfa_start"] - mfa_end = phns_spans_outs["mfa_end"] - old_phns = phns_spans_outs["old_phns"] - new_phns = phns_spans_outs["new_phns"] - span_to_repl = phns_spans_outs["span_to_repl"] - span_to_add = phns_spans_outs["span_to_add"] + mfa_start = phns_spans_outs['mfa_start'] + mfa_end = phns_spans_outs['mfa_end'] + old_phns = phns_spans_outs['old_phns'] + new_phns = phns_spans_outs['new_phns'] + span_to_repl = phns_spans_outs['span_to_repl'] + span_to_add = phns_spans_outs['span_to_add'] # 中文的 phns 不一定都在 fastspeech2 的字典里, 用 sp 代替 if target_lang in {'en', 'zh'}: @@ -132,7 +138,7 @@ def prep_feats_with_dur(wav_path: str, [wav_org[:wav_left_idx], blank_wav, wav_org[wav_right_idx:]]) # 音频是正常遮住了 - sf.write(str("new_wav.wav"), new_wav, samplerate=fs) + sf.write(str("mask_wav.wav"), new_wav, samplerate=fs) # 4. get old and new mel span to be mask old_span_bdy = get_span_bdy( @@ -152,8 +158,6 @@ def prep_feats_with_dur(wav_path: str, return outs - - def prep_feats(wav_path: str, old_str: str='', new_str: str='', @@ -163,7 +167,7 @@ def prep_feats(wav_path: str, fs: int=24000, n_shift: int=300): - outs = prep_feats_with_dur( + with_dur_outs = prep_feats_with_dur( wav_path=wav_path, old_str=old_str, new_str=new_str, @@ -176,138 +180,240 @@ def prep_feats(wav_path: str, wav_name = os.path.basename(wav_path) utt_id = wav_name.split('.')[0] - wav = outs['new_wav'] - phns = outs['new_phns'] - mfa_start = outs['new_mfa_start'] - mfa_end = outs['new_mfa_end'] - old_span_bdy = outs['old_span_bdy'] - new_span_bdy = outs['new_span_bdy'] + wav = with_dur_outs['new_wav'] + phns = with_dur_outs['new_phns'] + mfa_start = with_dur_outs['new_mfa_start'] + mfa_end = with_dur_outs['new_mfa_end'] + old_span_bdy = with_dur_outs['old_span_bdy'] + new_span_bdy = with_dur_outs['new_span_bdy'] span_bdy = np.array(new_span_bdy) - text = _p2id(phns) mel = mel_extractor.get_log_mel_fbank(wav) erniesat_mean, erniesat_std = np.load(erniesat_stat) normed_mel = norm(mel, erniesat_mean, erniesat_std) - tmp_name = get_tmp_name(text=old_str) + tmp_name = get_tmp_name(text=old_str) tmpbase = './tmp_dir/' + tmp_name tmpbase = Path(tmpbase) tmpbase.mkdir(parents=True, exist_ok=True) - print("tmp_name in synthesize_e2e:",tmp_name) mel_path = tmpbase / 'mel.npy' - print("mel_path:",mel_path) - np.save(mel_path, logmel) + np.save(mel_path, normed_mel) durations = [e - s for e, s in zip(mfa_end, mfa_start)] + text = _p2id(phns) - datum={ - "utt_id": utt_id, - "spk_id": 0, - "text": text, - "text_lengths": len(text), - "speech_lengths": 115, - "durations": durations, - "speech": mel_path, - "align_start": mfa_start, + datum = { + "utt_id": utt_id, + "spk_id": 0, + "text": text, + "text_lengths": len(text), + "speech_lengths": len(normed_mel), + "durations": durations, + "speech": np.load(mel_path), + "align_start": mfa_start, "align_end": mfa_end, "span_bdy": span_bdy } batch = collate_fn([datum]) - print("batch:",batch) - - return batch, old_span_bdy, new_span_bdy - - -def decode_with_model(mlm_model: nn.Layer, - collate_fn, - wav_path: str, - old_str: str='', - new_str: str='', - source_lang: str='en', - target_lang: str='en', - use_teacher_forcing: bool=False, - duration_adjust: bool=True, - fs: int=24000, - n_shift: int=300, - token_list: List[str]=[]): - batch, old_span_bdy, new_span_bdy = prep_feats( - source_lang=source_lang, - target_lang=target_lang, + outs = dict() + outs['batch'] = batch + outs['old_span_bdy'] = old_span_bdy + outs['new_span_bdy'] = new_span_bdy + return outs + + +def get_mlm_output(wav_path: str, + old_str: str='', + new_str: str='', + source_lang: str='en', + target_lang: str='en', + duration_adjust: bool=True, + fs: int=24000, + n_shift: int=300): + + prep_feats_outs = prep_feats( wav_path=wav_path, old_str=old_str, new_str=new_str, + source_lang=source_lang, + target_lang=target_lang, duration_adjust=duration_adjust, fs=fs, - n_shift=n_shift, - token_list=token_list) - - - - feats = collate_fn(batch)[1] + n_shift=n_shift) - if 'text_masked_pos' in feats.keys(): - feats.pop('text_masked_pos') + batch = prep_feats_outs['batch'] + new_span_bdy = prep_feats_outs['new_span_bdy'] + old_span_bdy = prep_feats_outs['old_span_bdy'] - output = mlm_model.inference( - text=feats['text'], - speech=feats['speech'], - masked_pos=feats['masked_pos'], - speech_mask=feats['speech_mask'], - text_mask=feats['text_mask'], - speech_seg_pos=feats['speech_seg_pos'], - text_seg_pos=feats['text_seg_pos'], - span_bdy=new_span_bdy, - use_teacher_forcing=use_teacher_forcing) + out_mels = erniesat_inference( + speech=batch['speech'], + text=batch['text'], + masked_pos=batch['masked_pos'], + speech_mask=batch['speech_mask'], + text_mask=batch['text_mask'], + speech_seg_pos=batch['speech_seg_pos'], + text_seg_pos=batch['text_seg_pos'], + span_bdy=new_span_bdy) # 拼接音频 - output_feat = paddle.concat(x=output, axis=0) + output_feat = paddle.concat(x=out_mels, axis=0) wav_org, _ = librosa.load(wav_path, sr=fs) - return wav_org, output_feat, old_span_bdy, new_span_bdy, fs, hop_length + outs = dict() + outs['wav_org'] = wav_org + outs['output_feat'] = output_feat + outs['old_span_bdy'] = old_span_bdy + outs['new_span_bdy'] = new_span_bdy + return outs -if __name__ == '__main__': - fs = 24000 - n_shift = 300 - wav_path = "exp/p243_313.wav" - old_str = "For that reason cover should not be given." - # for edit - # new_str = "for that reason cover is impossible to be given." - # for synthesize - append_str = "do you love me i love you so much" - new_str = old_str + append_str - ''' - outs = prep_feats_with_dur( +def get_wav(wav_path: str, + source_lang: str='en', + target_lang: str='en', + old_str: str='', + new_str: str='', + duration_adjust: bool=True, + fs: int=24000, + n_shift: int=300): + + outs = get_mlm_output( wav_path=wav_path, old_str=old_str, new_str=new_str, + source_lang=source_lang, + target_lang=target_lang, + duration_adjust=duration_adjust, fs=fs, n_shift=n_shift) - new_wav = outs['new_wav'] - new_phns = outs['new_phns'] - new_mfa_start = outs['new_mfa_start'] - new_mfa_end = outs['new_mfa_end'] + wav_org = outs['wav_org'] + output_feat = outs['output_feat'] old_span_bdy = outs['old_span_bdy'] new_span_bdy = outs['new_span_bdy'] - print("---------------------------------") + masked_feat = output_feat[new_span_bdy[0]:new_span_bdy[1]] + + with paddle.no_grad(): + alt_wav = voc_inference(masked_feat) + alt_wav = np.squeeze(alt_wav) + + old_time_bdy = [n_shift * x for x in old_span_bdy] + wav_replaced = np.concatenate( + [wav_org[:old_time_bdy[0]], alt_wav, wav_org[old_time_bdy[1]:]]) + + wav_dict = {"origin": wav_org, "output": wav_replaced} + return wav_dict + + +def parse_args(): + # parse args and config + parser = argparse.ArgumentParser( + description="Synthesize with acoustic model & vocoder") + # ernie sat + + parser.add_argument( + '--erniesat_config', + type=str, + default=None, + help='Config of acoustic model.') + parser.add_argument( + '--erniesat_ckpt', + type=str, + default=None, + help='Checkpoint file of acoustic model.') + parser.add_argument( + "--erniesat_stat", + type=str, + default=None, + help="mean and standard deviation used to normalize spectrogram when training acoustic model." + ) + parser.add_argument( + "--phones_dict", type=str, default=None, help="phone vocabulary file.") + # vocoder + parser.add_argument( + '--voc', + type=str, + default='pwgan_csmsc', + choices=[ + 'pwgan_aishell3', + 'pwgan_vctk', + 'hifigan_aishell3', + 'hifigan_vctk', + ], + help='Choose vocoder type of tts task.') + parser.add_argument( + '--voc_config', type=str, default=None, help='Config of voc.') + parser.add_argument( + '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') + parser.add_argument( + "--voc_stat", + type=str, + default=None, + help="mean and standard deviation used to normalize spectrogram when training voc." + ) + # other + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") + + # ernie sat related + parser.add_argument("--task_name", type=str, help="task name") + parser.add_argument("--wav_path", type=str, help="path of old wav") + parser.add_argument("--old_str", type=str, help="old string") + parser.add_argument("--new_str", type=str, help="new string") + parser.add_argument( + "--source_lang", type=str, default="en", help="source language") + parser.add_argument( + "--target_lang", type=str, default="en", help="target language") + parser.add_argument( + "--duration_adjust", + type=str2bool, + default=True, + help="whether to adjust duration.") + parser.add_argument("--output_name", type=str, default="output.wav") + + args = parser.parse_args() + return args - print("new_wav:", new_wav) - print("new_phns:", new_phns) - print("new_mfa_start:", new_mfa_start) - print("new_mfa_end:", new_mfa_end) - print("old_span_bdy:", old_span_bdy) - print("new_span_bdy:", new_span_bdy) - print("---------------------------------") - ''' - erniesat_config = "/home/yuantian01/PaddleSpeech_ERNIE_SAT/PaddleSpeech/examples/vctk/ernie_sat/local/default.yaml" +if __name__ == '__main__': + args = parse_args() + + if args.ngpu == 0: + paddle.set_device("cpu") + elif args.ngpu > 0: + paddle.set_device("gpu") + else: + print("ngpu should >= 0 !") - with open(erniesat_config) as f: + # evaluate(args) + with open(args.erniesat_config) as f: erniesat_config = CfgNode(yaml.safe_load(f)) - - erniesat_stat = "/home/yuantian01/PaddleSpeech_ERNIE_SAT/PaddleSpeech/examples/vctk/ernie_sat/dump/train/speech_stats.npy" + old_str = args.old_str + new_str = args.new_str + + # convert Chinese characters to pinyin + if args.source_lang == 'zh': + old_str = pypinyin.lazy_pinyin( + old_str, + neutral_tone_with_five=True, + style=pypinyin.Style.TONE3, + tone_sandhi=True) + old_str = ' '.join(old_str) + if args.target_lang == 'zh': + new_str = pypinyin.lazy_pinyin( + new_str, + neutral_tone_with_five=True, + style=pypinyin.Style.TONE3, + tone_sandhi=True) + new_str = ' '.join(new_str) + + if args.task_name == 'edit': + new_str = new_str + elif args.task_name == 'synthesize': + new_str = old_str + new_str + else: + new_str = old_str + new_str + print("new_str:", new_str) # Extractor mel_extractor = LogMelFBank( @@ -319,28 +425,51 @@ if __name__ == '__main__': n_mels=erniesat_config.n_mels, fmin=erniesat_config.fmin, fmax=erniesat_config.fmax) - - collate_fn = build_erniesat_collate_fn( mlm_prob=erniesat_config.mlm_prob, mean_phn_span=erniesat_config.mean_phn_span, seg_emb=erniesat_config.model['enc_input_layer'] == 'sega_mlm', text_masking=False) - - phones_dict='/home/yuantian01/PaddleSpeech_ERNIE_SAT/PaddleSpeech/examples/vctk/ernie_sat/dump/phone_id_map.txt' + vocab_phones = {} - with open(phones_dict, 'rt') as f: + with open(args.phones_dict, 'rt') as f: phn_id = [line.strip().split() for line in f.readlines()] for phn, id in phn_id: vocab_phones[phn] = int(id) - prep_feats(wav_path=wav_path, - old_str=old_str, - new_str=new_str, - fs=fs, - n_shift=n_shift) - - - + # ernie sat model + erniesat_inference = get_am_inference( + am='erniesat_dataset', + am_config=erniesat_config, + am_ckpt=args.erniesat_ckpt, + am_stat=args.erniesat_stat, + phones_dict=args.phones_dict) + + with open(args.voc_config) as f: + voc_config = CfgNode(yaml.safe_load(f)) + + # vocoder + voc_inference = get_voc_inference( + voc=args.voc, + voc_config=voc_config, + voc_ckpt=args.voc_ckpt, + voc_stat=args.voc_stat) + + erniesat_stat = args.erniesat_stat + + wav_dict = get_wav( + wav_path=args.wav_path, + source_lang=args.source_lang, + target_lang=args.target_lang, + old_str=old_str, + new_str=new_str, + duration_adjust=args.duration_adjust, + fs=erniesat_config.fs, + n_shift=erniesat_config.n_shift) + + sf.write( + args.output_name, wav_dict['output'], samplerate=erniesat_config.fs) + print( + f"\033[1;32;m Generated audio saved into {args.output_name} ! \033[0m") diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 127e1a3ba..c8eb1c64a 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -82,6 +82,10 @@ def denorm(data, mean, std): return data * std + mean +def norm(data, mean, std): + return (data - mean) / std + + def get_chunks(data, block_size: int, pad_size: int): data_len = data.shape[1] chunks = [] diff --git a/paddlespeech/t2s/models/ernie_sat/ernie_sat.py b/paddlespeech/t2s/models/ernie_sat/ernie_sat.py index 54f5d542d..08c43dc5f 100644 --- a/paddlespeech/t2s/models/ernie_sat/ernie_sat.py +++ b/paddlespeech/t2s/models/ernie_sat/ernie_sat.py @@ -389,7 +389,7 @@ class MLM(nn.Layer): speech_seg_pos: paddle.Tensor, text_seg_pos: paddle.Tensor, span_bdy: List[int], - use_teacher_forcing: bool=False, ) -> List[paddle.Tensor]: + use_teacher_forcing: bool=True, ) -> List[paddle.Tensor]: ''' Args: speech (paddle.Tensor): input speech (1, Tmax, D). @@ -657,7 +657,7 @@ class ErnieSAT(nn.Layer): speech_seg_pos: paddle.Tensor, text_seg_pos: paddle.Tensor, span_bdy: List[int], - use_teacher_forcing: bool=False, ) -> Dict[str, paddle.Tensor]: + use_teacher_forcing: bool=True, ) -> Dict[str, paddle.Tensor]: return self.model.inference( speech=speech, text=text, From 25b96405df6b45a9bfab33361b84fb86f62ccb09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20An=20=EF=BC=88An=20Hongliang=EF=BC=89?= Date: Thu, 25 Aug 2022 18:10:48 +0800 Subject: [PATCH 015/101] add chinese words correct phonic,test=tts (#2300) --- paddlespeech/t2s/frontend/polyphonic.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddlespeech/t2s/frontend/polyphonic.yaml b/paddlespeech/t2s/frontend/polyphonic.yaml index de60d1a39..51b76f23f 100644 --- a/paddlespeech/t2s/frontend/polyphonic.yaml +++ b/paddlespeech/t2s/frontend/polyphonic.yaml @@ -43,3 +43,7 @@ polyphonic: 时分: ['shi2','fen1'] 蚌埠: ['beng4','bu4'] 驯服: ['xun4','fu2'] + 幸免于难: ['xing4','mian3','yu2','nan4'] + 恶行: ['e4','xing2'] + 唉: ['ai4'] + From 043b21d3b412e7e1f290178a7f8f0d34390a2aee Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Thu, 25 Aug 2022 18:50:47 +0800 Subject: [PATCH 016/101] fix mix frontend, test=tts (#2299) --- paddlespeech/t2s/frontend/mix_frontend.py | 70 +++++++++++++---------- 1 file changed, 40 insertions(+), 30 deletions(-) diff --git a/paddlespeech/t2s/frontend/mix_frontend.py b/paddlespeech/t2s/frontend/mix_frontend.py index 6868d3357..a681445c7 100644 --- a/paddlespeech/t2s/frontend/mix_frontend.py +++ b/paddlespeech/t2s/frontend/mix_frontend.py @@ -61,7 +61,8 @@ class MixFrontend(): return False def is_end(self, before_char, after_char) -> bool: - if ((self.is_alphabet(before_char) or before_char == " ") and (self.is_alphabet(after_char) or after_char == " ")): + if ((self.is_alphabet(before_char) or before_char == " ") and + (self.is_alphabet(after_char) or after_char == " ")): return True else: return False @@ -86,10 +87,11 @@ class MixFrontend(): if point_index == 0 or point_index == len(text) - 1: new_text = text else: - if not self.is_end(text[point_index - 1], text[point_index + 1]): + if not self.is_end(text[point_index - 1], text[point_index + + 1]): new_text = text else: - new_text = text[: point_index] + "。" + text[point_index + 1:] + new_text = text[:point_index] + "。" + text[point_index + 1:] elif len(point_indexs) == 2: first_index = point_indexs[0] @@ -97,7 +99,8 @@ class MixFrontend(): # first if first_index != 0: - if not self.is_end(text[first_index - 1], text[first_index + 1]): + if not self.is_end(text[first_index - 1], text[first_index + + 1]): new_text += (text[:first_index] + ".") else: new_text += (text[:first_index] + "。") @@ -106,18 +109,20 @@ class MixFrontend(): # last if end_index != len(text) - 1: if not self.is_end(text[end_index - 1], text[end_index + 1]): - new_text += text[point_indexs[-2] + 1 : ] + new_text += text[point_indexs[-2] + 1:] else: - new_text += (text[point_indexs[-2] + 1 : end_index] + "。" + text[end_index + 1 : ]) + new_text += (text[point_indexs[-2] + 1:end_index] + "。" + + text[end_index + 1:]) else: - new_text += "." + new_text += "." else: first_index = point_indexs[0] end_index = point_indexs[-1] # first if first_index != 0: - if not self.is_end(text[first_index - 1], text[first_index + 1]): + if not self.is_end(text[first_index - 1], text[first_index + + 1]): new_text += (text[:first_index] + ".") else: new_text += (text[:first_index] + "。") @@ -126,16 +131,20 @@ class MixFrontend(): # middle for j in range(1, len(point_indexs) - 1): point_index = point_indexs[j] - if not self.is_end(text[point_index - 1], text[point_index + 1]): - new_text += (text[point_indexs[j-1] + 1 : point_index] + ".") + if not self.is_end(text[point_index - 1], text[point_index + + 1]): + new_text += ( + text[point_indexs[j - 1] + 1:point_index] + ".") else: - new_text += (text[point_indexs[j-1] + 1 : point_index] + "。") + new_text += ( + text[point_indexs[j - 1] + 1:point_index] + "。") # last if end_index != len(text) - 1: if not self.is_end(text[end_index - 1], text[end_index + 1]): - new_text += text[point_indexs[-2] + 1 : ] + new_text += text[point_indexs[-2] + 1:] else: - new_text += (text[point_indexs[-2] + 1 : end_index] + "。" + text[end_index + 1 : ]) + new_text += (text[point_indexs[-2] + 1:end_index] + "。" + + text[end_index + 1:]) else: new_text += "." @@ -224,7 +233,7 @@ class MixFrontend(): def get_input_ids(self, sentence: str, - merge_sentences: bool=True, + merge_sentences: bool=False, get_tone_ids: bool=False, add_sp: bool=True, to_tensor: bool=True) -> Dict[str, List[paddle.Tensor]]: @@ -232,28 +241,29 @@ class MixFrontend(): sentences = self._split(sentence) phones_list = [] result = {} - for text in sentences: phones_seg = [] segments = self._distinguish(text) for seg in segments: content = seg[0] lang = seg[1] - if lang == "zh": - input_ids = self.zh_frontend.get_input_ids( - content, - merge_sentences=True, - get_tone_ids=get_tone_ids, - to_tensor=to_tensor) - - elif lang == "en": - input_ids = self.en_frontend.get_input_ids( - content, merge_sentences=True, to_tensor=to_tensor) - - phones_seg.append(input_ids["phone_ids"][0]) - if add_sp: - phones_seg.append(self.sp_id_tensor) - + if content != '': + if lang == "en": + input_ids = self.en_frontend.get_input_ids( + content, merge_sentences=True, to_tensor=to_tensor) + else: + input_ids = self.zh_frontend.get_input_ids( + content, + merge_sentences=True, + get_tone_ids=get_tone_ids, + to_tensor=to_tensor) + + phones_seg.append(input_ids["phone_ids"][0]) + if add_sp: + phones_seg.append(self.sp_id_tensor) + + if phones_seg == []: + phones_seg.append(self.sp_id_tensor) phones = paddle.concat(phones_seg) phones_list.append(phones) From 1f100b15731b9ff66fc1da17c9d89401d80b21ce Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Thu, 25 Aug 2022 18:59:17 +0800 Subject: [PATCH 017/101] [tts] add tts finetune example (#2297) * add tts finetune example, test=tts * fix finetune Co-authored-by: TianYuan --- examples/other/tts_finetune/tts3/README.md | 214 +++++++++++++ examples/other/tts_finetune/tts3/finetune.py | 192 ++++++++++++ .../tts_finetune/tts3/local/check_oov.py | 125 ++++++++ .../other/tts_finetune/tts3/local/extract.py | 287 ++++++++++++++++++ .../tts_finetune/tts3/local/label_process.py | 63 ++++ .../tts_finetune/tts3/local/prepare_env.py | 35 +++ examples/other/tts_finetune/tts3/path.sh | 13 + examples/other/tts_finetune/tts3/run.sh | 61 ++++ 8 files changed, 990 insertions(+) create mode 100644 examples/other/tts_finetune/tts3/README.md create mode 100644 examples/other/tts_finetune/tts3/finetune.py create mode 100644 examples/other/tts_finetune/tts3/local/check_oov.py create mode 100644 examples/other/tts_finetune/tts3/local/extract.py create mode 100644 examples/other/tts_finetune/tts3/local/label_process.py create mode 100644 examples/other/tts_finetune/tts3/local/prepare_env.py create mode 100755 examples/other/tts_finetune/tts3/path.sh create mode 100755 examples/other/tts_finetune/tts3/run.sh diff --git a/examples/other/tts_finetune/tts3/README.md b/examples/other/tts_finetune/tts3/README.md new file mode 100644 index 000000000..dbb7a32db --- /dev/null +++ b/examples/other/tts_finetune/tts3/README.md @@ -0,0 +1,214 @@ +# Finetune your own AM based on FastSpeech2 with AISHELL-3. +This example shows how to finetune your own AM based on FastSpeech2 with AISHELL-3. We use part of csmsc's data (top 200) as finetune data in this example. The example is implemented according to this [discussion](https://github.com/PaddlePaddle/PaddleSpeech/discussions/1842). Thanks to the developer for the idea. + +We use AISHELL-3 to train a multi-speaker fastspeech2 model here. You can refer [examples/aishell3/tts3](https://github.com/lym0302/PaddleSpeech/tree/develop/examples/aishell3/tts3) to train multi-speaker fastspeech2 from scratch. + +## Prepare +### Download Pretrained Fastspeech2 model +Assume the path to the model is `./pretrained_models`. Download pretrained fastspeech2 model with aishell3: [fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip). + +```bash +mkdir -p pretrained_models && cd pretrained_models +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip +unzip fastspeech2_aishell3_ckpt_1.1.0.zip +cd ../ +``` +### Download MFA tools and pretrained model +Assume the path to the MFA tool is `./tools`. Download [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz) and pretrained MFA models with aishell3: [aishell3_model.zip](https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip). + +```bash +mkdir -p tools && cd tools +# mfa tool +wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz +tar xvf montreal-forced-aligner_linux.tar.gz +cp montreal-forced-aligner/lib/libpython3.6m.so.1.0 montreal-forced-aligner/lib/libpython3.6m.so +# pretrained mfa model +mkdir -p aligner && cd aligner +wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip +unzip aishell3_model.zip +wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon +cd ../../ +``` + +### Prepare your data +Assume the path to the dataset is `./input`. This directory contains audio files (*.wav) and label file (labels.txt). The audio file is in wav format. The format of the label file is: utt_id|pinyin. Here is an example of the first 200 data of csmsc. + +```bash +mkdir -p input && cd input +wget https://paddlespeech.bj.bcebos.com/datasets/csmsc_mini.zip +unzip csmsc_mini.zip +cd ../ +``` + +When "Prepare" done. The structure of the current directory is listed below. +```text +├── input +│ ├── csmsc_mini +│ │ ├── 000001.wav +│ │ ├── 000002.wav +│ │ ├── 000003.wav +│ │ ├── ... +│ │ ├── 000200.wav +│ │ ├── labels.txt +│ └── csmsc_mini.zip +├── pretrained_models +│ ├── fastspeech2_aishell3_ckpt_1.1.0 +│ │ ├── default.yaml +│ │ ├── energy_stats.npy +│ │ ├── phone_id_map.txt +│ │ ├── pitch_stats.npy +│ │ ├── snapshot_iter_96400.pdz +│ │ ├── speaker_id_map.txt +│ │ └── speech_stats.npy +│ └── fastspeech2_aishell3_ckpt_1.1.0.zip +└── tools + ├── aligner + │ ├── aishell3_model + │ ├── aishell3_model.zip + │ └── simple.lexicon + ├── montreal-forced-aligner + │ ├── bin + │ ├── lib + │ └── pretrained_models + └── montreal-forced-aligner_linux.tar.gz + ... + +``` + + +## Get Started +Run the command below to +1. **source path**. +2. finetune the model. +3. synthesize wavs. + - synthesize waveform from text file. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to run only one stage. + +### Model Finetune + +Finetune a FastSpeech2 model. + +```bash +./run.sh --stage 0 --stop-stage 0 +``` +`stage 0` of `run.sh` calls `finetune.py`, here's the complete help message. + +```text +usage: finetune.py [-h] [--input_dir INPUT_DIR] [--pretrained_model_dir PRETRAINED_MODEL_DIR] + [--mfa_dir MFA_DIR] [--dump_dir DUMP_DIR] + [--output_dir OUTPUT_DIR] [--lang LANG] + [--ngpu NGPU] + +optional arguments: + -h, --help show this help message and exit + --input_dir INPUT_DIR + directory containing audio and label file + --pretrained_model_dir PRETRAINED_MODEL_DIR + Path to pretrained model + --mfa_dir MFA_DIR directory to save aligned files + --dump_dir DUMP_DIR + directory to save feature files and metadata + --output_dir OUTPUT_DIR + directory to save finetune model + --lang LANG Choose input audio language, zh or en + --ngpu NGPU if ngpu=0, use cpu + --epoch EPOCH the epoch of finetune + --batch_size BATCH_SIZE + the batch size of finetune, default -1 means same as pretrained model + +``` +1. `--input_dir` is the directory containing audio and label file. +2. `--pretrained_model_dir` is the directory incluing pretrained fastspeech2_aishell3 model. +3. `--mfa_dir` is the directory to save the results of aligning from pretrained MFA_aishell3 model. +4. `--dump_dir` is the directory including audio feature and metadata. +5. `--output_dir` is the directory to save finetune model. +6. `--lang` is the language of input audio, zh or en. +7. `--ngpu` is the number of gpu. +8. `--epoch` is the epoch of finetune. +9. `--batch_size` is the batch size of finetune. + +### Synthesizing +We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc5) as the neural vocoder. +Assume the path to the hifigan model is `./pretrained_models`. Download the pretrained HiFiGAN model from [hifigan_aishell3_ckpt_0.2.0](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) and unzip it. + +```bash +cd pretrained_models +wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip +unzip hifigan_aishell3_ckpt_0.2.0.zip +cd ../ +``` + +HiFiGAN checkpoint contains files listed below. +```text +hifigan_aishell3_ckpt_0.2.0 +├── default.yaml # default config used to train HiFiGAN +├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN +└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN +``` +Modify `ckpt` in `run.sh` to the final model in `exp/default/checkpoints`. +```bash +./run.sh --stage 1 --stop-stage 1 +``` +`stage 1` of `run.sh` calls `${BIN_DIR}/../synthesize_e2e.py`, which can synthesize waveform from text file. + +```text +usage: synthesize_e2e.py [-h] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--tones_dict TONES_DICT] + [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} + Choose acoustic model type of tts task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --tones_dict TONES_DICT + tone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --spk_id SPK_ID spk id for multi speaker acoustic model + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} + Choose vocoder type of tts task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --lang LANG Choose model language. zh or en + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize, a 'utt_id sentence' pair per line. + --output_dir OUTPUT_DIR + output dir. +``` +1. `--am` is acoustic model type with the format {model_name}_{dataset} +2. `--am_config`, `--am_ckpt`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. +3. `--voc` is vocoder type with the format {model_name}_{dataset} +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +5. `--lang` is the model language, which can be `zh` or `en`. +6. `--text` is the text file, which contains sentences to synthesize. +7. `--output_dir` is the directory to save synthesized audio files. +8. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + +### Tips +If you want to get better audio quality, you can use more audios to finetune. \ No newline at end of file diff --git a/examples/other/tts_finetune/tts3/finetune.py b/examples/other/tts_finetune/tts3/finetune.py new file mode 100644 index 000000000..f05ba9435 --- /dev/null +++ b/examples/other/tts_finetune/tts3/finetune.py @@ -0,0 +1,192 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +from pathlib import Path +from typing import Union + +import yaml +from paddle import distributed as dist +from yacs.config import CfgNode + +from paddlespeech.t2s.exps.fastspeech2.train import train_sp + +from local.check_oov import get_check_result +from local.extract import extract_feature +from local.label_process import get_single_label +from local.prepare_env import generate_finetune_env +from utils.gen_duration_from_textgrid import gen_duration_from_textgrid + +DICT_EN = 'tools/aligner/cmudict-0.7b' +DICT_ZH = 'tools/aligner/simple.lexicon' +MODEL_DIR_EN = 'tools/aligner/vctk_model.zip' +MODEL_DIR_ZH = 'tools/aligner/aishell3_model.zip' +MFA_PHONE_EN = 'tools/aligner/vctk_model/meta.yaml' +MFA_PHONE_ZH = 'tools/aligner/aishell3_model/meta.yaml' +MFA_PATH = 'tools/montreal-forced-aligner/bin' +os.environ['PATH'] = MFA_PATH + '/:' + os.environ['PATH'] + + +class TrainArgs(): + def __init__(self, ngpu, config_file, dump_dir: Path, output_dir: Path): + self.config = str(config_file) + self.train_metadata = str(dump_dir / "train/norm/metadata.jsonl") + self.dev_metadata = str(dump_dir / "dev/norm/metadata.jsonl") + self.output_dir = str(output_dir) + self.ngpu = ngpu + self.phones_dict = str(dump_dir / "phone_id_map.txt") + self.speaker_dict = str(dump_dir / "speaker_id_map.txt") + self.voice_cloning = False + + +def get_mfa_result( + input_dir: Union[str, Path], + mfa_dir: Union[str, Path], + lang: str='en', ): + """get mfa result + + Args: + input_dir (Union[str, Path]): input dir including wav file and label + mfa_dir (Union[str, Path]): mfa result dir + lang (str, optional): input audio language. Defaults to 'en'. + """ + # MFA + if lang == 'en': + DICT = DICT_EN + MODEL_DIR = MODEL_DIR_EN + + elif lang == 'zh': + DICT = DICT_ZH + MODEL_DIR = MODEL_DIR_ZH + else: + print('please input right lang!!') + + CMD = 'mfa_align' + ' ' + str( + input_dir) + ' ' + DICT + ' ' + MODEL_DIR + ' ' + str(mfa_dir) + os.system(CMD) + + +if __name__ == '__main__': + # parse config and args + parser = argparse.ArgumentParser( + description="Preprocess audio and then extract features.") + + parser.add_argument( + "--input_dir", + type=str, + default="./input/baker_mini", + help="directory containing audio and label file") + + parser.add_argument( + "--pretrained_model_dir", + type=str, + default="./pretrained_models/fastspeech2_aishell3_ckpt_1.1.0", + help="Path to pretrained model") + + parser.add_argument( + "--mfa_dir", + type=str, + default="./mfa_result", + help="directory to save aligned files") + + parser.add_argument( + "--dump_dir", + type=str, + default="./dump", + help="directory to save feature files and metadata.") + + parser.add_argument( + "--output_dir", + type=str, + default="./exp/default/", + help="directory to save finetune model.") + + parser.add_argument( + '--lang', + type=str, + default='zh', + choices=['zh', 'en'], + help='Choose input audio language. zh or en') + + parser.add_argument( + "--ngpu", type=int, default=2, help="if ngpu=0, use cpu.") + + parser.add_argument("--epoch", type=int, default=100, help="finetune epoch") + + parser.add_argument( + "--batch_size", + type=int, + default=-1, + help="batch size, default -1 means same as pretrained model") + + args = parser.parse_args() + + fs = 24000 + n_shift = 300 + input_dir = Path(args.input_dir).expanduser() + mfa_dir = Path(args.mfa_dir).expanduser() + mfa_dir.mkdir(parents=True, exist_ok=True) + dump_dir = Path(args.dump_dir).expanduser() + dump_dir.mkdir(parents=True, exist_ok=True) + output_dir = Path(args.output_dir).expanduser() + output_dir.mkdir(parents=True, exist_ok=True) + pretrained_model_dir = Path(args.pretrained_model_dir).expanduser() + + # read config + config_file = pretrained_model_dir / "default.yaml" + with open(config_file) as f: + config = CfgNode(yaml.safe_load(f)) + config.max_epoch = config.max_epoch + args.epoch + if args.batch_size > 0: + config.batch_size = args.batch_size + + if args.lang == 'en': + lexicon_file = DICT_EN + mfa_phone_file = MFA_PHONE_EN + elif args.lang == 'zh': + lexicon_file = DICT_ZH + mfa_phone_file = MFA_PHONE_ZH + else: + print('please input right lang!!') + am_phone_file = pretrained_model_dir / "phone_id_map.txt" + label_file = input_dir / "labels.txt" + + #check phone for mfa and am finetune + oov_words, oov_files, oov_file_words = get_check_result( + label_file, lexicon_file, mfa_phone_file, am_phone_file) + input_dir = get_single_label(label_file, oov_files, input_dir) + + # get mfa result + get_mfa_result(input_dir, mfa_dir, args.lang) + + # # generate durations.txt + duration_file = "./durations.txt" + gen_duration_from_textgrid(mfa_dir, duration_file, fs, n_shift) + + # generate phone and speaker map files + extract_feature(duration_file, config, input_dir, dump_dir, + pretrained_model_dir) + + # create finetune env + generate_finetune_env(output_dir, pretrained_model_dir) + + # create a new args for training + train_args = TrainArgs(args.ngpu, config_file, dump_dir, output_dir) + + # finetune models + # dispatch + if args.ngpu > 1: + dist.spawn(train_sp, (train_args, config), nprocs=args.ngpu) + else: + train_sp(train_args, config) diff --git a/examples/other/tts_finetune/tts3/local/check_oov.py b/examples/other/tts_finetune/tts3/local/check_oov.py new file mode 100644 index 000000000..4d6854826 --- /dev/null +++ b/examples/other/tts_finetune/tts3/local/check_oov.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from pathlib import Path +from typing import Dict +from typing import List +from typing import Union + + +def check_phone(label_file: Union[str, Path], + pinyin_phones: Dict[str, str], + mfa_phones: List[str], + am_phones: List[str], + oov_record: str="./oov_info.txt"): + """Check whether the phoneme corresponding to the audio text content + is in the phoneme list of the pretrained mfa model to ensure that the alignment is normal. + Check whether the phoneme corresponding to the audio text content + is in the phoneme list of the pretrained am model to ensure finetune (normalize) is normal. + + Args: + label_file (Union[str, Path]): label file, format: utt_id|phone seq + pinyin_phones (dict): pinyin to phones map dict + mfa_phones (list): the phone list of pretrained mfa model + am_phones (list): the phone list of pretrained mfa model + + Returns: + oov_words (list): oov words + oov_files (list): utt id list that exist oov + oov_file_words (dict): the oov file and oov phone in this file + """ + oov_words = [] + oov_files = [] + oov_file_words = {} + + with open(label_file, "r") as f: + for line in f.readlines(): + utt_id = line.split("|")[0] + transcription = line.strip().split("|")[1] + flag = 0 + temp_oov_words = [] + for word in transcription.split(" "): + if word not in pinyin_phones.keys(): + temp_oov_words.append(word) + flag = 1 + if word not in oov_words: + oov_words.append(word) + else: + for p in pinyin_phones[word]: + if p not in mfa_phones or p not in am_phones: + temp_oov_words.append(word) + flag = 1 + if word not in oov_words: + oov_words.append(word) + if flag == 1: + oov_files.append(utt_id) + oov_file_words[utt_id] = temp_oov_words + + if oov_record is not None: + with open(oov_record, "w") as fw: + fw.write("oov_words: " + str(oov_words) + "\n") + fw.write("oov_files: " + str(oov_files) + "\n") + fw.write("oov_file_words: " + str(oov_file_words) + "\n") + + return oov_words, oov_files, oov_file_words + + +def get_pinyin_phones(lexicon_file: Union[str, Path]): + # pinyin to phones + pinyin_phones = {} + with open(lexicon_file, "r") as f2: + for line in f2.readlines(): + line_list = line.strip().split(" ") + pinyin = line_list[0] + if line_list[1] == '': + phones = line_list[2:] + else: + phones = line_list[1:] + pinyin_phones[pinyin] = phones + + return pinyin_phones + + +def get_mfa_phone(mfa_phone_file: Union[str, Path]): + # get phones from pretrained mfa model (meta.yaml) + mfa_phones = [] + with open(mfa_phone_file, "r") as f: + for line in f.readlines(): + if line.startswith("-"): + phone = line.strip().split(" ")[-1] + mfa_phones.append(phone) + + return mfa_phones + + +def get_am_phone(am_phone_file: Union[str, Path]): + # get phones from pretrained am model (phone_id_map.txt) + am_phones = [] + with open(am_phone_file, "r") as f: + for line in f.readlines(): + phone = line.strip().split(" ")[0] + am_phones.append(phone) + + return am_phones + + +def get_check_result(label_file: Union[str, Path], + lexicon_file: Union[str, Path], + mfa_phone_file: Union[str, Path], + am_phone_file: Union[str, Path]): + pinyin_phones = get_pinyin_phones(lexicon_file) + mfa_phones = get_mfa_phone(mfa_phone_file) + am_phones = get_am_phone(am_phone_file) + oov_words, oov_files, oov_file_words = check_phone( + label_file, pinyin_phones, mfa_phones, am_phones) + return oov_words, oov_files, oov_file_words diff --git a/examples/other/tts_finetune/tts3/local/extract.py b/examples/other/tts_finetune/tts3/local/extract.py new file mode 100644 index 000000000..edd92420b --- /dev/null +++ b/examples/other/tts_finetune/tts3/local/extract.py @@ -0,0 +1,287 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import math +import os +from operator import itemgetter +from pathlib import Path +from typing import Dict +from typing import Union + +import jsonlines +import numpy as np +from sklearn.preprocessing import StandardScaler +from tqdm import tqdm + +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.datasets.get_feats import Energy +from paddlespeech.t2s.datasets.get_feats import LogMelFBank +from paddlespeech.t2s.datasets.get_feats import Pitch +from paddlespeech.t2s.datasets.preprocess_utils import get_phn_dur +from paddlespeech.t2s.datasets.preprocess_utils import merge_silence +from paddlespeech.t2s.exps.fastspeech2.preprocess import process_sentences + + +def read_stats(stats_file: Union[str, Path]): + scaler = StandardScaler() + scaler.mean_ = np.load(stats_file)[0] + scaler.scale_ = np.load(stats_file)[1] + scaler.n_features_in_ = scaler.mean_.shape[0] + return scaler + + +def get_stats(pretrained_model_dir: Path): + speech_stats_file = pretrained_model_dir / "speech_stats.npy" + pitch_stats_file = pretrained_model_dir / "pitch_stats.npy" + energy_stats_file = pretrained_model_dir / "energy_stats.npy" + speech_scaler = read_stats(speech_stats_file) + pitch_scaler = read_stats(pitch_stats_file) + energy_scaler = read_stats(energy_stats_file) + + return speech_scaler, pitch_scaler, energy_scaler + + +def get_map(duration_file: Union[str, Path], + dump_dir: Path, + pretrained_model_dir: Path): + """get phone map and speaker map, save on dump_dir + + Args: + duration_file (str): durantions.txt + dump_dir (Path): dump dir + pretrained_model_dir (Path): pretrained model dir + """ + # copy phone map file from pretrained model path + phones_dict = dump_dir / "phone_id_map.txt" + os.system("cp %s %s" % + (pretrained_model_dir / "phone_id_map.txt", phones_dict)) + + # create a new speaker map file, replace the previous speakers. + sentences, speaker_set = get_phn_dur(duration_file) + merge_silence(sentences) + speakers = sorted(list(speaker_set)) + num = len(speakers) + speaker_dict = dump_dir / "speaker_id_map.txt" + with open(speaker_dict, 'w') as f, open(pretrained_model_dir / + "speaker_id_map.txt", 'r') as fr: + for i, spk in enumerate(speakers): + f.write(spk + ' ' + str(i) + '\n') + for line in fr.readlines(): + spk_id = line.strip().split(" ")[-1] + if int(spk_id) >= num: + f.write(line) + + vocab_phones = {} + with open(phones_dict, 'rt') as f: + phn_id = [line.strip().split() for line in f.readlines()] + for phn, id in phn_id: + vocab_phones[phn] = int(id) + + vocab_speaker = {} + with open(speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + for spk, id in spk_id: + vocab_speaker[spk] = int(id) + + return sentences, vocab_phones, vocab_speaker + + +def get_extractor(config): + # Extractor + mel_extractor = LogMelFBank( + sr=config.fs, + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window, + n_mels=config.n_mels, + fmin=config.fmin, + fmax=config.fmax) + pitch_extractor = Pitch( + sr=config.fs, + hop_length=config.n_shift, + f0min=config.f0min, + f0max=config.f0max) + energy_extractor = Energy( + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window) + + return mel_extractor, pitch_extractor, energy_extractor + + +def normalize(speech_scaler, + pitch_scaler, + energy_scaler, + vocab_phones: Dict, + vocab_speaker: Dict, + raw_dump_dir: Path, + type: str): + + dumpdir = raw_dump_dir / type / "norm" + dumpdir = Path(dumpdir).expanduser() + dumpdir.mkdir(parents=True, exist_ok=True) + + # get dataset + metadata_file = raw_dump_dir / type / "raw" / "metadata.jsonl" + with jsonlines.open(metadata_file, 'r') as reader: + metadata = list(reader) + dataset = DataTable( + metadata, + converters={ + "speech": np.load, + "pitch": np.load, + "energy": np.load, + }) + logging.info(f"The number of files = {len(dataset)}.") + + # process each file + output_metadata = [] + + for item in tqdm(dataset): + utt_id = item['utt_id'] + speech = item['speech'] + pitch = item['pitch'] + energy = item['energy'] + # normalize + speech = speech_scaler.transform(speech) + speech_dir = dumpdir / "data_speech" + speech_dir.mkdir(parents=True, exist_ok=True) + speech_path = speech_dir / f"{utt_id}_speech.npy" + np.save(speech_path, speech.astype(np.float32), allow_pickle=False) + + pitch = pitch_scaler.transform(pitch) + pitch_dir = dumpdir / "data_pitch" + pitch_dir.mkdir(parents=True, exist_ok=True) + pitch_path = pitch_dir / f"{utt_id}_pitch.npy" + np.save(pitch_path, pitch.astype(np.float32), allow_pickle=False) + + energy = energy_scaler.transform(energy) + energy_dir = dumpdir / "data_energy" + energy_dir.mkdir(parents=True, exist_ok=True) + energy_path = energy_dir / f"{utt_id}_energy.npy" + np.save(energy_path, energy.astype(np.float32), allow_pickle=False) + + phone_ids = [vocab_phones[p] for p in item['phones']] + spk_id = vocab_speaker[item["speaker"]] + record = { + "utt_id": item['utt_id'], + "spk_id": spk_id, + "text": phone_ids, + "text_lengths": item['text_lengths'], + "speech_lengths": item['speech_lengths'], + "durations": item['durations'], + "speech": str(speech_path), + "pitch": str(pitch_path), + "energy": str(energy_path) + } + # add spk_emb for voice cloning + if "spk_emb" in item: + record["spk_emb"] = str(item["spk_emb"]) + + output_metadata.append(record) + output_metadata.sort(key=itemgetter('utt_id')) + output_metadata_path = Path(dumpdir) / "metadata.jsonl" + with jsonlines.open(output_metadata_path, 'w') as writer: + for item in output_metadata: + writer.write(item) + logging.info(f"metadata dumped into {output_metadata_path}") + + +def extract_feature(duration_file: str, + config, + input_dir: Path, + dump_dir: Path, + pretrained_model_dir: Path): + + sentences, vocab_phones, vocab_speaker = get_map(duration_file, dump_dir, + pretrained_model_dir) + mel_extractor, pitch_extractor, energy_extractor = get_extractor(config) + + wav_files = sorted(list((input_dir).rglob("*.wav"))) + # split data into 3 sections, train: 80%, dev: 10%, test: 10% + num_train = math.ceil(len(wav_files) * 0.8) + num_dev = math.ceil(len(wav_files) * 0.1) + print(num_train, num_dev) + + train_wav_files = wav_files[:num_train] + dev_wav_files = wav_files[num_train:num_train + num_dev] + test_wav_files = wav_files[num_train + num_dev:] + + train_dump_dir = dump_dir / "train" / "raw" + train_dump_dir.mkdir(parents=True, exist_ok=True) + dev_dump_dir = dump_dir / "dev" / "raw" + dev_dump_dir.mkdir(parents=True, exist_ok=True) + test_dump_dir = dump_dir / "test" / "raw" + test_dump_dir.mkdir(parents=True, exist_ok=True) + + # process for the 3 sections + num_cpu = 4 + cut_sil = True + spk_emb_dir = None + write_metadata_method = "w" + speech_scaler, pitch_scaler, energy_scaler = get_stats(pretrained_model_dir) + + if train_wav_files: + process_sentences( + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + nprocs=num_cpu, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=write_metadata_method) + # norm + normalize(speech_scaler, pitch_scaler, energy_scaler, vocab_phones, + vocab_speaker, dump_dir, "train") + + if dev_wav_files: + process_sentences( + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + nprocs=num_cpu, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=write_metadata_method) + # norm + normalize(speech_scaler, pitch_scaler, energy_scaler, vocab_phones, + vocab_speaker, dump_dir, "dev") + + if test_wav_files: + process_sentences( + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + nprocs=num_cpu, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=write_metadata_method) + + # norm + normalize(speech_scaler, pitch_scaler, energy_scaler, vocab_phones, + vocab_speaker, dump_dir, "test") diff --git a/examples/other/tts_finetune/tts3/local/label_process.py b/examples/other/tts_finetune/tts3/local/label_process.py new file mode 100644 index 000000000..711dde4b6 --- /dev/null +++ b/examples/other/tts_finetune/tts3/local/label_process.py @@ -0,0 +1,63 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from pathlib import Path +from typing import List +from typing import Union + + +def change_baker_label(baker_label_file: Union[str, Path], + out_label_file: Union[str, Path]): + """change baker label file to regular label file + + Args: + baker_label_file (Union[str, Path]): Original baker label file + out_label_file (Union[str, Path]): regular label file + """ + with open(baker_label_file) as f: + lines = f.readlines() + + with open(out_label_file, "w") as fw: + for i in range(0, len(lines), 2): + utt_id = lines[i].split()[0] + transcription = lines[i + 1].strip() + fw.write(utt_id + "|" + transcription + "\n") + + +def get_single_label(label_file: Union[str, Path], + oov_files: List[Union[str, Path]], + input_dir: Union[str, Path]): + """Divide the label file into individual files according to label_file + + Args: + label_file (str or Path): label file, format: utt_id|phones id + input_dir (Path): input dir including audios + """ + input_dir = Path(input_dir).expanduser() + new_dir = input_dir / "newdir" + new_dir.mkdir(parents=True, exist_ok=True) + + with open(label_file, "r") as f: + for line in f.readlines(): + utt_id = line.split("|")[0] + if utt_id not in oov_files: + transcription = line.split("|")[1].strip() + wav_file = str(input_dir) + "/" + utt_id + ".wav" + new_wav_file = str(new_dir) + "/" + utt_id + ".wav" + os.system("cp %s %s" % (wav_file, new_wav_file)) + single_file = str(new_dir) + "/" + utt_id + ".txt" + with open(single_file, "w") as fw: + fw.write(transcription) + + return new_dir diff --git a/examples/other/tts_finetune/tts3/local/prepare_env.py b/examples/other/tts_finetune/tts3/local/prepare_env.py new file mode 100644 index 000000000..f2166ff1b --- /dev/null +++ b/examples/other/tts_finetune/tts3/local/prepare_env.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from pathlib import Path + + +def generate_finetune_env(output_dir: Path, pretrained_model_dir: Path): + + output_dir = output_dir / "checkpoints/" + output_dir = output_dir.resolve() + output_dir.mkdir(parents=True, exist_ok=True) + + model_path = sorted(list((pretrained_model_dir).rglob("*.pdz")))[0] + model_path = model_path.resolve() + iter = int(str(model_path).split("_")[-1].split(".")[0]) + model_file = str(model_path).split("/")[-1] + + os.system("cp %s %s" % (model_path, output_dir)) + + records_file = output_dir / "records.jsonl" + with open(records_file, "w") as f: + line = "\"time\": \"2022-08-06 07:51:53.463650\", \"path\": \"%s\", \"iteration\": %d" % ( + str(output_dir / model_file), iter) + f.write("{" + line + "}" + "\n") diff --git a/examples/other/tts_finetune/tts3/path.sh b/examples/other/tts_finetune/tts3/path.sh new file mode 100755 index 000000000..9e4bf23c3 --- /dev/null +++ b/examples/other/tts_finetune/tts3/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=fastspeech2 +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} diff --git a/examples/other/tts_finetune/tts3/run.sh b/examples/other/tts_finetune/tts3/run.sh new file mode 100755 index 000000000..9bb7ec6f0 --- /dev/null +++ b/examples/other/tts_finetune/tts3/run.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +set -e +source path.sh + + +input_dir=./input/csmsc_mini +pretrained_model_dir=./pretrained_models/fastspeech2_aishell3_ckpt_1.1.0 +mfa_dir=./mfa_result +dump_dir=./dump +output_dir=./exp/default +lang=zh +ngpu=2 + +ckpt=snapshot_iter_96600 + +gpus=0,1 +CUDA_VISIBLE_DEVICES=${gpus} +stage=0 +stop_stage=100 + + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # finetune + python3 finetune.py \ + --input_dir=${input_dir} \ + --pretrained_model_dir=${pretrained_model_dir} \ + --mfa_dir=${mfa_dir} \ + --dump_dir=${dump_dir} \ + --output_dir=${output_dir} \ + --lang=${lang} \ + --ngpu=${ngpu} \ + --epoch=100 +fi + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo "in hifigan syn_e2e" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=fastspeech2_aishell3 \ + --am_config=${pretrained_model_dir}/default.yaml \ + --am_ckpt=${output_dir}/checkpoints/${ckpt}.pdz \ + --am_stat=${pretrained_model_dir}/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=pretrained_models/hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=pretrained_models/hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=pretrained_models/hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --lang=zh \ + --text=${BIN_DIR}/../sentences.txt \ + --output_dir=./test_e2e \ + --phones_dict=${dump_dir}/phone_id_map.txt \ + --speaker_dict=${dump_dir}/speaker_id_map.txt \ + --spk_id=0 +fi From d1c70a780985e226c00a1d4c6e22626c4f668444 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 25 Aug 2022 19:51:04 +0800 Subject: [PATCH 018/101] fix g2pw model (#2304) --- paddlespeech/resource/pretrained_models.py | 6 ++++++ paddlespeech/t2s/frontend/g2pw/onnx_api.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index 872d564cd..f049879a3 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -1363,5 +1363,11 @@ g2pw_onnx_models = { 'md5': '7e049a55547da840502cf99e8a64f20e', }, + '1.1': { + 'url': + 'https://paddlespeech.bj.bcebos.com/Parakeet/released_models/g2p/G2PWModel_1.1.zip', + 'md5': + 'f8b60501770bff92ed6ce90860a610e6', + }, }, } diff --git a/paddlespeech/t2s/frontend/g2pw/onnx_api.py b/paddlespeech/t2s/frontend/g2pw/onnx_api.py index 9e708ec88..180e8ae15 100644 --- a/paddlespeech/t2s/frontend/g2pw/onnx_api.py +++ b/paddlespeech/t2s/frontend/g2pw/onnx_api.py @@ -34,7 +34,7 @@ from paddlespeech.t2s.frontend.g2pw.utils import load_config from paddlespeech.t2s.frontend.zh_normalization.char_convert import tranditional_to_simplified from paddlespeech.utils.env import MODEL_HOME -model_version = '1.0' +model_version = '1.1' def predict(session, onnx_input, labels): From 7cc1d66863a48b50c2430059c8b84060d84b11a3 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 25 Aug 2022 21:29:49 +0800 Subject: [PATCH 019/101] Update README.md --- examples/other/tts_finetune/tts3/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/other/tts_finetune/tts3/README.md b/examples/other/tts_finetune/tts3/README.md index dbb7a32db..1ad30328b 100644 --- a/examples/other/tts_finetune/tts3/README.md +++ b/examples/other/tts_finetune/tts3/README.md @@ -1,7 +1,7 @@ # Finetune your own AM based on FastSpeech2 with AISHELL-3. This example shows how to finetune your own AM based on FastSpeech2 with AISHELL-3. We use part of csmsc's data (top 200) as finetune data in this example. The example is implemented according to this [discussion](https://github.com/PaddlePaddle/PaddleSpeech/discussions/1842). Thanks to the developer for the idea. -We use AISHELL-3 to train a multi-speaker fastspeech2 model here. You can refer [examples/aishell3/tts3](https://github.com/lym0302/PaddleSpeech/tree/develop/examples/aishell3/tts3) to train multi-speaker fastspeech2 from scratch. +We use AISHELL-3 to train a multi-speaker fastspeech2 model. You can refer [examples/aishell3/tts3](https://github.com/lym0302/PaddleSpeech/tree/develop/examples/aishell3/tts3) to train multi-speaker fastspeech2 from scratch. ## Prepare ### Download Pretrained Fastspeech2 model @@ -211,4 +211,4 @@ optional arguments: 8. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. ### Tips -If you want to get better audio quality, you can use more audios to finetune. \ No newline at end of file +If you want to get better audio quality, you can use more audios to finetune. From f795d6f03464041766e7be465c08efb6869e0658 Mon Sep 17 00:00:00 2001 From: sneaxiy <32832641+sneaxiy@users.noreply.github.com> Date: Fri, 26 Aug 2022 12:52:19 +0800 Subject: [PATCH 020/101] add barrier (#2309) --- tests/benchmark/pwgan/run_benchmark.sh | 1 + tests/test_tipc/barrier.sh | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 tests/test_tipc/barrier.sh diff --git a/tests/benchmark/pwgan/run_benchmark.sh b/tests/benchmark/pwgan/run_benchmark.sh index b9cc154fe..9cc070fa1 100755 --- a/tests/benchmark/pwgan/run_benchmark.sh +++ b/tests/benchmark/pwgan/run_benchmark.sh @@ -43,6 +43,7 @@ function _train(){ log_parse_file="mylog/workerlog.0" ;; *) echo "choose run_mode(sp or mp)"; exit 1; esac + bash tests/test_tipc/barrier.sh # 以下不用修改 timeout 15m ${train_cmd} > ${log_file} 2>&1 if [ $? -ne 0 ];then diff --git a/tests/test_tipc/barrier.sh b/tests/test_tipc/barrier.sh new file mode 100644 index 000000000..d29634cc4 --- /dev/null +++ b/tests/test_tipc/barrier.sh @@ -0,0 +1,10 @@ +set -ex +NNODES=${PADDLE_TRAINERS_NUM:-"1"} +PYTHON=${PYTHON:-"python"} +TIMEOUT=${1:-"10m"} + +if [[ "$NNODES" -gt 1 ]]; then + while ! timeout "$TIMEOUT" "$PYTHON" -m paddle.distributed.launch run_check; do + echo "Retry barrier ......" + done +fi From c9de22eaa84c625915fa04199aa277340eb699ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BD=AD=E9=9C=87=E4=B8=9C?= <275331498@qq.com> Date: Fri, 26 Aug 2022 12:57:28 +0800 Subject: [PATCH 021/101] [TN] Update quantifiers (#2308) --- paddlespeech/t2s/frontend/zh_normalization/num.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddlespeech/t2s/frontend/zh_normalization/num.py b/paddlespeech/t2s/frontend/zh_normalization/num.py index ec1367736..0002ed504 100644 --- a/paddlespeech/t2s/frontend/zh_normalization/num.py +++ b/paddlespeech/t2s/frontend/zh_normalization/num.py @@ -28,7 +28,7 @@ UNITS = OrderedDict({ 8: '亿', }) -COM_QUANTIFIERS = '(所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' +COM_QUANTIFIERS = '(人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' # 分数表达式 RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') From 1c2a6b8e30dd69c36ee59851db70febcddcf99c2 Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Fri, 26 Aug 2022 15:23:53 +0800 Subject: [PATCH 022/101] updata readme, test=doc (#2313) --- examples/aishell3/tts3/README.md | 2 +- examples/zh_en_tts/tts3/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md index 21bad51ec..6ef2870c2 100644 --- a/examples/aishell3/tts3/README.md +++ b/examples/aishell3/tts3/README.md @@ -217,7 +217,7 @@ optional arguments: ## Pretrained Model Pretrained FastSpeech2 model with no silence in the edge of audios: -- [fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip) +- [fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip) - [fastspeech2_conformer_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_aishell3_ckpt_0.2.0.zip) (Thanks for [@awmmmm](https://github.com/awmmmm)'s contribution) The static model can be downloaded here: diff --git a/examples/zh_en_tts/tts3/README.md b/examples/zh_en_tts/tts3/README.md index 131d7f2c4..e7365baa2 100644 --- a/examples/zh_en_tts/tts3/README.md +++ b/examples/zh_en_tts/tts3/README.md @@ -251,7 +251,7 @@ optional arguments: ## Pretrained Model Pretrained FastSpeech2 model with no silence in the edge of audios: -- [fastspeech2_mix_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_0.2.0.zip) +- [fastspeech2_mix_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_1.2.0.zip) The static model can be downloaded here: - [fastspeech2_mix_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_static_0.2.0.zip) From 984886fb8c3f8b0e0a75b4423c544926cda91bf1 Mon Sep 17 00:00:00 2001 From: sneaxiy <32832641+sneaxiy@users.noreply.github.com> Date: Fri, 26 Aug 2022 15:30:03 +0800 Subject: [PATCH 023/101] add barrier (#2311) --- tests/test_tipc/benchmark_train.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_tipc/benchmark_train.sh b/tests/test_tipc/benchmark_train.sh index 4b7677c72..7f0382ac5 100644 --- a/tests/test_tipc/benchmark_train.sh +++ b/tests/test_tipc/benchmark_train.sh @@ -154,6 +154,7 @@ else device_num_list=($device_num) fi +PYTHON="${python}" bash test_tipc/barrier.sh IFS="|" for batch_size in ${batch_size_list[*]}; do for precision in ${fp_items_list[*]}; do From c7163abffa643342a294d24883416e788dfbf3af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20An=20=EF=BC=88An=20Hongliang=EF=BC=89?= Date: Fri, 26 Aug 2022 15:43:13 +0800 Subject: [PATCH 024/101] add thanks into readme, append data for chinese unit (#2312) * add chinese words correct phonic,test=tts * added thanks into readme. add data of unit, test=tts * added thanks into readme. add data of unit, test=tts * modify data of unit, test=tts * modify thanks, test=tts --- README.md | 3 ++- README_cn.md | 3 ++- paddlespeech/t2s/frontend/zh_normalization/num.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 122704d2d..7f10fc02e 100644 --- a/README.md +++ b/README.md @@ -793,6 +793,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P ### Contributors

+ @@ -829,7 +830,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P

## Acknowledgement - +- Many thanks to [david-95](https://github.com/david-95) improved TTS, fixed multi-punctuation bug, and contributed to multiple program and data. - Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW) - Many thanks to [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) for years of attention, constructive advice and great help. - Many thanks to [mymagicpower](https://github.com/mymagicpower) for the Java implementation of ASR upon [short](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk) and [long](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk) audio files. diff --git a/README_cn.md b/README_cn.md index ca42e71f6..b4bd53f36 100644 --- a/README_cn.md +++ b/README_cn.md @@ -797,6 +797,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 ### 贡献者

+ @@ -833,7 +834,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声

## 致谢 - +- 非常感谢 [david-95](https://github.com/david-95)修复句尾多标点符号出错的问题,补充frontend语音polyphonic 数据,贡献补充多条程序和数据 - 非常感谢 [BarryKCL](https://github.com/BarryKCL)基于[G2PW](https://github.com/GitYCC/g2pW)对TTS中文文本前端的优化。 - 非常感谢 [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) 多年来的关注和建议,以及在诸多问题上的帮助。 - 非常感谢 [mymagicpower](https://github.com/mymagicpower) 采用PaddleSpeech 对 ASR 的[短语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk)及[长语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk)进行 Java 实现。 diff --git a/paddlespeech/t2s/frontend/zh_normalization/num.py b/paddlespeech/t2s/frontend/zh_normalization/num.py index 0002ed504..8a54d3e63 100644 --- a/paddlespeech/t2s/frontend/zh_normalization/num.py +++ b/paddlespeech/t2s/frontend/zh_normalization/num.py @@ -28,7 +28,7 @@ UNITS = OrderedDict({ 8: '亿', }) -COM_QUANTIFIERS = '(人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' +COM_QUANTIFIERS = '(封|艘|把|目|套|段|人|所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|十|)吨|(亿|千万|百万|万|千|百|)块|角|毛|分)' # 分数表达式 RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') From d21e03c03e4fb29cbd6ce3b708de19a6d542a04a Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 26 Aug 2022 18:06:12 +0800 Subject: [PATCH 025/101] update tts3 readme, test=doc (#2315) --- docs/source/released_model.md | 6 ++++-- examples/aishell3/tts3/README.md | 15 ++++++++------- examples/aishell3/tts3/local/synthesize_e2e.sh | 6 +++--- examples/other/g2p/README.md | 2 +- examples/vctk/tts3/README.md | 16 +++++++++------- examples/zh_en_tts/tts3/README.md | 14 ++++++++------ 6 files changed, 33 insertions(+), 26 deletions(-) diff --git a/docs/source/released_model.md b/docs/source/released_model.md index 8d0ff1d47..d6691812e 100644 --- a/docs/source/released_model.md +++ b/docs/source/released_model.md @@ -42,9 +42,11 @@ SpeedySpeech| CSMSC | [speedyspeech-csmsc](https://github.com/PaddlePaddle/Paddl FastSpeech2| CSMSC |[fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)|[fastspeech2_nosil_baker_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_baker_ckpt_0.4.zip)|[fastspeech2_csmsc_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_csmsc_static_0.2.0.zip)
[fastspeech2_csmsc_onnx_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_csmsc_onnx_0.2.0.zip)|157MB| FastSpeech2-Conformer| CSMSC |[fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)|[fastspeech2_conformer_baker_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip)||| FastSpeech2-CNNDecoder| CSMSC| [fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)| [fastspeech2_cnndecoder_csmsc_ckpt_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_ckpt_1.0.0.zip) | [fastspeech2_cnndecoder_csmsc_static_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_static_1.0.0.zip)
[fastspeech2_cnndecoder_csmsc_streaming_static_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_static_1.0.0.zip)
[fastspeech2_cnndecoder_csmsc_onnx_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_onnx_1.0.0.zip)
[fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip) | 84MB| -FastSpeech2| AISHELL-3 |[fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3)|[fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip)|[fastspeech2_aishell3_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_static_1.1.0.zip)
[fastspeech2_aishell3_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_onnx_1.1.0.zip)|147MB| +FastSpeech2| AISHELL-3 |[fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3)|[fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip)|[fastspeech2_aishell3_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_static_1.1.0.zip)
[fastspeech2_aishell3_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_onnx_1.1.0.zip)|147MB| FastSpeech2| LJSpeech |[fastspeech2-ljspeech](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/ljspeech/tts3)|[fastspeech2_nosil_ljspeech_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip)|[fastspeech2_ljspeech_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_ljspeech_static_1.1.0.zip)
[fastspeech2_ljspeech_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_ljspeech_onnx_1.1.0.zip)|145MB| -FastSpeech2| VCTK |[fastspeech2-vctk](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/tts3)|[fastspeech2_nosil_vctk_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_vctk_ckpt_0.5.zip)|[fastspeech2_vctk_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_static_1.1.0.zip)
[fastspeech2_vctk_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_onnx_1.1.0.zip) | 145MB| +FastSpeech2| VCTK |[fastspeech2-vctk](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/tts3)|[fastspeech2_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_ckpt_1.2.0.zip)|[fastspeech2_vctk_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_static_1.1.0.zip)
[fastspeech2_vctk_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_onnx_1.1.0.zip) | 145MB| +FastSpeech2| ZH_EN |[fastspeech2-zh_en](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/zh_en_tts/tts3)|[fastspeech2_mix_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_1.2.0.zip)|[fastspeech2_mix_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_static_0.2.0.zip)
[fastspeech2_mix_onnx_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_onnx_0.2.0.zip) | 145MB| + ### Vocoders Model Type | Dataset| Example Link | Pretrained Models| Static/ONNX Models|Size (static) diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md index 6ef2870c2..3e1dee2fb 100644 --- a/examples/aishell3/tts3/README.md +++ b/examples/aishell3/tts3/README.md @@ -229,9 +229,11 @@ The ONNX model can be downloaded here: FastSpeech2 checkpoint contains files listed below. ```text -fastspeech2_nosil_aishell3_ckpt_0.4 +fastspeech2_aishell3_ckpt_1.1.0 ├── default.yaml # default config used to train fastspeech2 +├── energy_stats.npy # statistics used to normalize energy when training fastspeech2 ├── phone_id_map.txt # phone vocabulary file when training fastspeech2 +├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2 ├── snapshot_iter_96400.pdz # model parameters and optimizer states ├── speaker_id_map.txt # speaker id map file when training a multi-speaker fastspeech2 └── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2 @@ -244,9 +246,9 @@ FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/../synthesize_e2e.py \ --am=fastspeech2_aishell3 \ - --am_config=fastspeech2_nosil_aishell3_ckpt_0.4/default.yaml \ - --am_ckpt=fastspeech2_nosil_aishell3_ckpt_0.4/snapshot_iter_96400.pdz \ - --am_stat=fastspeech2_nosil_aishell3_ckpt_0.4/speech_stats.npy \ + --am_config=fastspeech2_aishell3_ckpt_1.1.0/default.yaml \ + --am_ckpt=fastspeech2_aishell3_ckpt_1.1.0/snapshot_iter_96400.pdz \ + --am_stat=fastspeech2_aishell3_ckpt_1.1.0/speech_stats.npy \ --voc=pwgan_aishell3 \ --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ @@ -254,9 +256,8 @@ python3 ${BIN_DIR}/../synthesize_e2e.py \ --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=exp/default/test_e2e \ - --phones_dict=fastspeech2_nosil_aishell3_ckpt_0.4/phone_id_map.txt \ - --speaker_dict=fastspeech2_nosil_aishell3_ckpt_0.4/speaker_id_map.txt \ + --phones_dict=fastspeech2_aishell3_ckpt_1.1.0/phone_id_map.txt \ + --speaker_dict=fastspeech2_aishell3_ckpt_1.1.0/speaker_id_map.txt \ --spk_id=0 \ --inference_dir=exp/default/inference - ``` diff --git a/examples/aishell3/tts3/local/synthesize_e2e.sh b/examples/aishell3/tts3/local/synthesize_e2e.sh index ff3608be7..158350ae4 100755 --- a/examples/aishell3/tts3/local/synthesize_e2e.sh +++ b/examples/aishell3/tts3/local/synthesize_e2e.sh @@ -38,7 +38,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --am=fastspeech2_aishell3 \ --am_config=${config_path} \ --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --am_stat=fastspeech2_nosil_aishell3_ckpt_0.4/speech_stats.npy \ + --am_stat=dump/train/speech_stats.npy \ --voc=hifigan_aishell3 \ --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ @@ -46,8 +46,8 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --phones_dict=fastspeech2_nosil_aishell3_ckpt_0.4/phone_id_map.txt \ - --speaker_dict=fastspeech2_nosil_aishell3_ckpt_0.4/speaker_id_map.txt \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ --spk_id=0 \ --inference_dir=${train_output_path}/inference fi diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index a8f8f7340..882943504 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -12,7 +12,7 @@ Run the command below to get the results of the test. ./run.sh ``` -The `avg WER` of g2p is: 0.024219452438490413 +The `avg WER` of g2p is: 0.024169315564825305 ```text ,--------------------------------------------------------------------. diff --git a/examples/vctk/tts3/README.md b/examples/vctk/tts3/README.md index 9c0d75616..2a2f27fd4 100644 --- a/examples/vctk/tts3/README.md +++ b/examples/vctk/tts3/README.md @@ -216,7 +216,7 @@ optional arguments: ## Pretrained Model Pretrained FastSpeech2 model with no silence in the edge of audios: -- [fastspeech2_nosil_vctk_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_vctk_ckpt_0.5.zip) +- [fastspeech2_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_ckpt_1.2.0.zip) The static model can be downloaded here: - [fastspeech2_vctk_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_static_1.1.0.zip) @@ -226,9 +226,11 @@ The ONNX model can be downloaded here: FastSpeech2 checkpoint contains files listed below. ```text -fastspeech2_nosil_vctk_ckpt_0.5 +fastspeech2_vctk_ckpt_1.2.0 ├── default.yaml # default config used to train fastspeech2 +├── energy_stats.npy # statistics used to normalize energy when training fastspeech2 ├── phone_id_map.txt # phone vocabulary file when training fastspeech2 +├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2 ├── snapshot_iter_66200.pdz # model parameters and optimizer states ├── speaker_id_map.txt # speaker id map file when training a multi-speaker fastspeech2 └── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2 @@ -241,9 +243,9 @@ FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/../synthesize_e2e.py \ --am=fastspeech2_vctk \ - --am_config=fastspeech2_nosil_vctk_ckpt_0.5/default.yaml \ - --am_ckpt=fastspeech2_nosil_vctk_ckpt_0.5/snapshot_iter_66200.pdz \ - --am_stat=fastspeech2_nosil_vctk_ckpt_0.5/speech_stats.npy \ + --am_config=fastspeech2_vctk_ckpt_1.2.0/default.yaml \ + --am_ckpt=fastspeech2_vctk_ckpt_1.2.0/snapshot_iter_66200.pdz \ + --am_stat=fastspeech2_vctk_ckpt_1.2.0/speech_stats.npy \ --voc=pwgan_vctk \ --voc_config=pwg_vctk_ckpt_0.1.1/default.yaml \ --voc_ckpt=pwg_vctk_ckpt_0.1.1/snapshot_iter_1500000.pdz \ @@ -251,8 +253,8 @@ python3 ${BIN_DIR}/../synthesize_e2e.py \ --lang=en \ --text=${BIN_DIR}/../sentences_en.txt \ --output_dir=exp/default/test_e2e \ - --phones_dict=fastspeech2_nosil_vctk_ckpt_0.5/phone_id_map.txt \ - --speaker_dict=fastspeech2_nosil_vctk_ckpt_0.5/speaker_id_map.txt \ + --phones_dict=fastspeech2_vctk_ckpt_1.2.0/phone_id_map.txt \ + --speaker_dict=fastspeech2_vctk_ckpt_1.2.0/speaker_id_map.txt \ --spk_id=0 \ --inference_dir=exp/default/inference ``` diff --git a/examples/zh_en_tts/tts3/README.md b/examples/zh_en_tts/tts3/README.md index e7365baa2..b4b683089 100644 --- a/examples/zh_en_tts/tts3/README.md +++ b/examples/zh_en_tts/tts3/README.md @@ -262,9 +262,11 @@ The ONNX model can be downloaded here: FastSpeech2 checkpoint contains files listed below. ```text -fastspeech2_mix_ckpt_0.2.0 +fastspeech2_mix_ckpt_1.2.0 ├── default.yaml # default config used to train fastspeech2 +├── energy_stats.npy # statistics used to energy spectrogram when training fastspeech2 ├── phone_id_map.txt # phone vocabulary file when training fastspeech2 +├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2 ├── snapshot_iter_99200.pdz # model parameters and optimizer states ├── speaker_id_map.txt # speaker id map file when training a multi-speaker fastspeech2 └── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2 @@ -281,9 +283,9 @@ FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/../synthesize_e2e.py \ --am=fastspeech2_mix \ - --am_config=fastspeech2_mix_ckpt_0.2.0/default.yaml \ - --am_ckpt=fastspeech2_mix_ckpt_0.2.0/snapshot_iter_99200.pdz \ - --am_stat=fastspeech2_mix_ckpt_0.2.0/speech_stats.npy \ + --am_config=fastspeech2_mix_ckpt_1.2.0/default.yaml \ + --am_ckpt=fastspeech2_mix_ckpt_1.2.0/snapshot_iter_99200.pdz \ + --am_stat=fastspeech2_mix_ckpt_1.2.0/speech_stats.npy \ --voc=pwgan_aishell3 \ --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ @@ -291,8 +293,8 @@ python3 ${BIN_DIR}/../synthesize_e2e.py \ --lang=mix \ --text=${BIN_DIR}/../sentences_mix.txt \ --output_dir=exp/default/test_e2e \ - --phones_dict=fastspeech2_mix_ckpt_0.2.0/phone_id_map.txt \ - --speaker_dict=fastspeech2_mix_ckpt_0.2.0/speaker_id_map.txt \ + --phones_dict=fastspeech2_mix_ckpt_1.2.0/phone_id_map.txt \ + --speaker_dict=fastspeech2_mix_ckpt_1.2.0/speaker_id_map.txt \ --spk_id=174 \ --inference_dir=exp/default/inference ``` From 7b864e8f38646d702d2993b456e5868c011aa902 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 26 Aug 2022 22:48:42 +0800 Subject: [PATCH 026/101] clean old ernie sat inference scripts (#2316) --- README.md | 10 +- README_cn.md | 10 +- examples/ernie_sat/.meta/framework.png | Bin 143263 -> 0 bytes examples/ernie_sat/README.md | 137 ---- examples/ernie_sat/local/align.py | 454 ------------- examples/ernie_sat/local/inference.py | 609 ----------------- examples/ernie_sat/local/inference_new.py | 622 ------------------ examples/ernie_sat/local/sedit_arg_parser.py | 97 --- examples/ernie_sat/local/utils.py | 175 ----- examples/ernie_sat/path.sh | 13 - examples/ernie_sat/prompt/dev/text | 3 - examples/ernie_sat/prompt/dev/wav.scp | 3 - examples/ernie_sat/run_clone_en_to_zh.sh | 27 - examples/ernie_sat/run_clone_en_to_zh_new.sh | 27 - examples/ernie_sat/run_gen_en.sh | 26 - examples/ernie_sat/run_gen_en_new.sh | 26 - examples/ernie_sat/run_sedit_en.sh | 27 - examples/ernie_sat/run_sedit_en_new.sh | 27 - examples/ernie_sat/test_run.sh | 6 - examples/ernie_sat/test_run_new.sh | 6 - examples/ernie_sat/tools/.gitkeep | 0 paddlespeech/t2s/datasets/am_batch_fn.py | 186 ------ paddlespeech/t2s/models/ernie_sat/__init__.py | 1 - paddlespeech/t2s/models/ernie_sat/mlm.py | 579 ---------------- 24 files changed, 16 insertions(+), 3055 deletions(-) delete mode 100644 examples/ernie_sat/.meta/framework.png delete mode 100644 examples/ernie_sat/README.md delete mode 100755 examples/ernie_sat/local/align.py delete mode 100644 examples/ernie_sat/local/inference.py delete mode 100644 examples/ernie_sat/local/inference_new.py delete mode 100644 examples/ernie_sat/local/sedit_arg_parser.py delete mode 100644 examples/ernie_sat/local/utils.py delete mode 100755 examples/ernie_sat/path.sh delete mode 100644 examples/ernie_sat/prompt/dev/text delete mode 100644 examples/ernie_sat/prompt/dev/wav.scp delete mode 100755 examples/ernie_sat/run_clone_en_to_zh.sh delete mode 100755 examples/ernie_sat/run_clone_en_to_zh_new.sh delete mode 100755 examples/ernie_sat/run_gen_en.sh delete mode 100755 examples/ernie_sat/run_gen_en_new.sh delete mode 100755 examples/ernie_sat/run_sedit_en.sh delete mode 100755 examples/ernie_sat/run_sedit_en_new.sh delete mode 100755 examples/ernie_sat/test_run.sh delete mode 100755 examples/ernie_sat/test_run_new.sh delete mode 100644 examples/ernie_sat/tools/.gitkeep delete mode 100644 paddlespeech/t2s/models/ernie_sat/mlm.py diff --git a/README.md b/README.md index 7f10fc02e..1d3666f53 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ - ([简体中文](./README_cn.md)|English)

@@ -535,7 +534,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - Acoustic Model + Acoustic Model Tacotron2 LJSpeech / CSMSC @@ -563,6 +562,13 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en + + ERNIE-SAT + VCTK / AISHELL-3 / ZH_EN + + ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en + + Vocoder WaveFlow diff --git a/README_cn.md b/README_cn.md index b4bd53f36..e9cbc888a 100644 --- a/README_cn.md +++ b/README_cn.md @@ -1,4 +1,3 @@ - (简体中文|[English](./README.md))

@@ -530,7 +529,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 - 声学模型 + 声学模型 Tacotron2 LJSpeech / CSMSC @@ -558,6 +557,13 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en + + ERNIE-SAT + VCTK / AISHELL-3 / ZH_EN + + ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en + + 声码器 WaveFlow diff --git a/examples/ernie_sat/.meta/framework.png b/examples/ernie_sat/.meta/framework.png deleted file mode 100644 index c68f62467952aaca91f290e5dead723078343d64..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 143263 zcmeFZcR*9Ynm2rC(m|xxC`CX7q<4Y^5Ks^lkRnk*0qFt)5{QC;bU^_@rB^8tX;LFy zL5%d?drzo=wD)-1-rc?5-F@%g@7?`l2L?_~nVDzinWz3v$Rp$#;H06RfgV6X0RUIQ zAAmdyT+{V;eh2{9uLI`*0H6aXBAfwg@CdvG-~%fFP-Rj8RN#M#-{&$Z|9q9|c_!6g zj?I4GNZtfgZ#a4Qcz8Q`c%D{}xd5nMGPq9tyEU-<;aKbshbIRFV@BhF&(y2W=-+PO zE(Mdn0&H|tW>n9qD1-q@HVP^>3UV6&0sEw(_(S=_W8fbON-Aoa6SQ>n42<9nRVM*T z3Mwi}YAPC<-`!9If!6_QHkwmso-ZsDQ{ELvU76t@(T)!K7OvKtg5d0Qd`&3+ScCD`R#jGU;n`1(D2CU z*zDZ=!s62M%IX?+XLoP^;1G9o{F^RtKL1D-`1g;L{TsU2K)NWYsi~-Gf73-l>Gzv( zHfowPawkq*G^M@g#V#!WoQ~sCLRNV*y@-MthV%a8UIs2v#aS`zZ_@ri*?*3(kbj7> zzYz8>bd3X-0m?rVDoSvbsHnh|q6P~MEzR#jOGo>MLieY_@Q1?qyDF}k?aQ&=F%~w@;za0%RphM?DiB%w%CE@)4I@UT814$4JvB7rRy(m{GasL`^q~yV ze5rvZTaJ0w&CXN(P4Wmr1=%%CGtog3t(~AGL^vVD_ubs?Cn#MlF=p18@^Cpk37#(f@CM`>A`%f z)h*{h!vsVik+rG?>*{=0NC6_w4?RmV5IOK}g<)!v=#v$^JL~r`r+;&S{d{Dg%Ip{+v!fEqvuZ$Isz8uWCAapy1xG^LfQ^LQga9!Y> zX|#_BWz+q1DhkcM+veQgyV8bHKbM!$Z9jakjgOiZHpVpZ7fx^QLa$XqagvbMMi0wS z0~7l73yRwnKN4NsinqMZy7a*>;@$iqI&o^Ezp5k&KhterF$KFzVeqYI+%!!bCCq6Y zG@qJs5yWZ=m$NxZn-)+_B^1%s+BQ{v62 zK=IPa!yV|AZPZK-l=VE_E?m0gWMHHl3b$wu!%6D5EPaqB1G9WXHCH{gUEq%EOO^|p zu@(M_4-GDhy1crewpSzzKkKu4CW*>0T6jU|%W>2p;*)&gchkbOjA9LE?7*tQJ-Mn< z>v1vu0QZvM=26o06lH@|8kG6beAidC#Oe1jWB_g0F@TuKCIhUMD-FY#{Lf@S?oP2- zwf{EhGfJsJOTdkj^zAB@hj^kYXSaV;?>l2yQh;BlRxMJN@y*PGs3hL@QKjL!+MOf6 zNVSp*u?VEnci%qAwX$PKWgYis#4NL)VW>sK#X*rcUnY*DYkGkXZbua%NuwmDU#Hp8 zvqWn%W97m~W3AwaoJTpckW*8uQ+zcclk}7qXf;_roa>BI8gDc*w6PLS82Qwe9`WT6 z*>f+VN94V_@A(z)Dwo`&;PusY@1MxIGFK$sE!*#8;M`WwxGaG~AS}OMk8^lWUja4d>n-O~tf?Z9rCnt-3#5^PUw%D!qu3APbmi6bl?Mlg- z!ir`-!#`KeyL64;NhcZ1=_#=H80TGCc67+D!IuPA*^q%|J#C>0yJ)%DJ9kfAoqAZ+ z2{L5eRj3D)68%7I4d-!-k)LyXX?$V6e;L`ye2$;l+OqMg6`NpucS8G9t_=4KOk9^e zZMUUoQ{y^QzgV;B;(>F4PX>C)zD@h|63$A(G~;~AxkRtH*r{V;=PD!E<5g|Jc8$xj zVM+;=>s*w%>)boYV^3Y72)WPo}jz8gpLIO~uMwCfh`#yp9N!HYHD`xIA7ef0Vc%qAlb zGZkRNf2XfVV@z^0Z~6%0Z>r0QSstU|FxC0*sI-dSx*QbbU;hPZ+(rh*U7T?HACu3~ zj+C4TBm+g(#81d}=(vksLj!KNXhJXUzOpi~$t!S)cGhm2T4ebv#2 zCGYT5L1vG2)1c6_nN6Vx9sYvX;Zu@t|D!PzaJ?vwR#fx9qnP4jEo%Hikg zKWCixTZ30g_Wb08j8+^;L1cAL)(oH7snvL{gqjyLzL+<^#KbSsmp`ykI`iK6Sqk+% zR0LMIcGFJH{fwHpyZ`d=ui-V!0@Cb*>u6XMg3%>kg{Un#zsFB3@G+Z0x?#w9BhMeRdW`l0EdENJ z{-t?WjO^)bPfZ3rs!p1da^7Bu3+l4Pd_`yqC8f@dzuKXWjih<{`a8Adue*=e-e3>Y zCdG%dYrKmkyW|urp8BkJjEN7I7Zk^t)OTsr^^Bo!W{SM!c+sjCgvBh& z*gxt?oh4!r?O0whkZ|ifqr_J-@Tj4?y?XPYKuABiRpSc}TB~@_2gYr!D_L{(L<_(? z-CypSHm_-=^&{a*6v|Hwmd6$&GpY6E$Au}#12yP(J zpVqP_-crw=^!+@%gjhbJ_Olv$P?&r+!CS^!D2T0j2RaRB0Cw+fK z&oPdoeZ|U&7r_I%95Bj)+lEP`=>gX6{#Dz<@ss|5Z zdlOl=^e4UgvAGyVUWq=pQje@7M6??jxUVaFbOhLAw> zb6f>A49%G&;!aYSsbnE>Dl4ZZ$7@h6^jz zA|H_S=a01Y8T?0vd}T@zT1oLbl~IoA&1DK--X1f>Jeh0MZq*NP7ftu4h&0f6@#IK# zdsl!AST9t*78ol_M!x870alNW@xzsm>>xf?3(ChF&k`?kiz*<#UiJ(shoxYHg9MZw z-40OVR>UKeD(mByceLF<@VyOR+gat`TqC3|kS>h%>r0mya-6+jdQj9br!@ITaxF4T2DBCOjRG>#T7SptdhNouGM#EdvD6NWk4g#Rhwetsiin`$=MZtM|Sn+-|v)5pl!W+ z+}DMis>8(6Xur+0#z4q`IXqnWMR2X2=WC0mw6csLGJu(WKk!0w?(S+Me&qn)b<9jq zwn*{1^3reXytZCPP<+o3UspwKh*52#PQ9mz>Uzf0;#{jbF?0`m#i>>VGcHJqGo(3C zdx=&}%(C?90Or;Q>jtEy{a856u<9u$rflN_Y2LAwa$xOnl1{mAzMv2}3LCLgik=BD zr*^bz)Qou&hhnR&IIkkDXj4o5>;#Jfq0!}#|3Jiwd*+3n`B+cUWchO0C2u<@XX|M3 z^jytM>M@kI*9VuRBvo)7S=TxI##9%shJR(M%K`j%RBwGnQymDo?{7;SJ9q@2!QG#*9asK7yX)w)D{`MjP@&eCg^q(_6wX1+X_B^s`wv1NPLDCRs%t0A z6;_(g2`ZRk7AI1y-4)O>%m*^bgQrwdmJFuL z-;|4ZD8+O=F0iK$P#ouBqYqx;D#Saz@mj$@>k#3x4Ht890S&q(leo04+ipqL7X`RH z&ol9$&zv*Rl(_dExE`dBY3-<6en}F<1-3y>?%_R{{Ub~(rV;tl3zlrnUR+m-IX-TE zK0W^BWLuc#4s);xTmfe}!)$V}kFBmKa<1mf&vL{R8)0u*zo0aa-S7T3W3^8g%6Ydx zQh?B<>VAuFH$K9S@t3w3PU_$S#xh1Yc~@`H7s4mK{~3K=cuBQqApKS>ztr1_LYDeZ z-AS6KB&ZI{I?ej`jdW4{?)U1d;hSmnTvx|EXqj@0nhy*2i1vnqFA9trYU)p_#oeSX zlIavQuVxPyibx~KWA~9SgP946L?lU&z~fRX-Z~|SWetn1$}8EOH|SS#x*ud(T3unp zC;uAdc!rU!uK=et+XHQGx?sW)jBZlE1=$x(r(*$GiM^bMCaAB{J1TNKD;J_IwV=MY*E(E<~4<_n8MV&9o!r^Cgtx{Ld}-E*up zJ;B=30+?>`w;$suXF8t&YRlrss_{f?BJEBlZSTL?m6nUNn_hW}(_B^jDTzyp$z8vg zt~i8Mx6z7HRW6|{*LP6|aRaVy!J?C`+6BH2n{FQXERSW-)T-w}ca7|6NiE}|mD?Wd zIS;osf3FpDc-PlMeA}}}vfP;<9Jc)s)HF8EGP5l*(CS;ke?hhpst3+f2Bfl-bW$VX zkRKLG?TL}k-(`nH6*U~CJ!nb|2O8eog&uBJO@bM6pIzCgXdki<~-L5v+cQSqBmEgAP{II&_ zuxJ40+vF%Vhh{Lj7gTM+{4OYpr{uLlIqkHc;m<*WX3^~at8pHbUz4J*TlW`ic^w;W z*lPZ0W1CBjH2P6$gZ}f1{Fmk0(@d6`Yr|s*f-q#XZ}MfT>zhjU%Z;I&v_2jyPxp~B z8C(zjZG~*qn!C(CTo&X#`z8&g*r!R%%X!19$c{=u=v@tfUdiYmWq?C^RFg#Gz3&P>ClaK z@QYL-uHNW*beWYeQp?(6xOi9XliHMUEKiqgXCqk9fp6 z2hp=iUo38m*3^Do(3yb}cfJOjPZ4;MfA;LtHl~(l^IuIC-BtUFZ>5b1wEQ?Z>XeqH zZCsRVyX7A5?k2F@`_-0@5D`VtZ(lsxx0Nc8b>!qX&~tlKwlyGq*dvtW%6ZM(f5hC- zZ#+$MwfcP*>_|#m4#Co;#FNS2{O~j-<@{b*oCo94<8$&CBx*Ind-t_F#~PF1(k^cE z{N`ykelKc{b7bF*?AB{ZRSoaC(KUGRMsW(b%O7Ev2WUj&vyhkNu*Pg{_g4C^7qzCX zK3#2`nq*gfSi!gVL0_>}Yp9wiJzN@}xGyNg6{qKWI*k8G=tEow`t!KYY2AtZelSeRY=5`&GifbDThm^r^E&iT`%=dt7!L7t+pdnzaQ+$d=ighe zKW&cWUDNF7v~z)Uq-ZM*l-}bbB>}$F9--amP=}Hly)S82 zV9gn{mU8{hW_sw;6iR=cdV?~MVDB!=Ns^z3fWU`X`|AvKA_$cM^h;xnQC!JyW zi%g$$B@c5QE;ZBEvpwH(*{M13#Kq%p1^+roMsUK_uzs!f8zhknE})Qu=s)Mb)_;) zIQQkW0srF?HQKxYHexa2;U3m?iaKRZKYVt0rXi4C@u-R}J5{E)n&;}{OjSe*?|Qp0 z893FX0vE+;xHtx4FSpm3gx?>yotHJ>p2+q7`Rm0pqYG3;R8+MKH)xIKA%MTg9A|73 zD4}VMhd141%yy;(^~N;bgfGd~p1-g2oyPjy5%*I2w9|2N{D~}&yyX5Zt^n(;h1l|!JN%269G-Lx z##%*w&1B7#CP8p~!)+A z1oN(RXUe>ubrb&KZ&e1la?(ndr{8+pFWt>E=ezut+oD{(xRy(3{6Q}9!$NO~<|Wk0 z25rS2GpVDJt+9PB-uZsHgU@jzwBNt6PMo3@eQx1M%ttwndr*xM48P#QW=9(23a=du z)xBFgS;B6@*4p->dR`cDYW|$sX{H;rd1E;7<^#*Ir99{Ck;{H_H+hF18acJDosb~p z;tj^}o8^SiV9y4hGRtNdwKfYuWnjw}89}1|rKwhUC){eQvPXSt$eA*_{S~SAMNrz1 zPME{v+e{F-6BMFcK+Jm!bQmanVIkVenBa=}ZF=0e?jRgGyXZIQvFYA-)ir@Tz!|%% zr@X_+#P;6t#51kLsH3^@gDPUGhpZAKBP?=?E748n$Mj_3zzn6v1M`RQWQGwYd5&b> zRl9i1{2a11BYKP-_ii@jWhKbxs|i1bc5GMsOTN86ORwK~?(0cUote3=ZljrKD$sxYw~D&rKI@YsztoCQ<1C^;zui6c#fTB z(oAl>Vj97OvW}S2L~U0ah-9$&W`I_hh+mZWC?EViqhg>y-d}wr0kqbx)_8kAyUWFpRS@`B_TL zG%^4gNKfHrK0c}~62-A%vRx3Ldop!JFJ7VvC~%e7e0|nsxUhtaiXn&RoN1-6QIZp( z9v&IMVm{`Aw{YQY3h}x_ z&^_NEcHO7Zye|#AJhmM=BX+(N>oc2>0RSA2U~ceF2#u!ZXM_Xp5x=RpY;|A=iQrjl=8I zk8x%#bIy8?B-=}6k|&!;AJ7q{Eu-#2#d(_2N}^;S5xU?XoP}3`vkzLOfm->XF9ko{ znCeXi;NQ+MYaBepoyj$JuS7BFx=7v$o2#ugvn-Q8dw}%It~WD`9~^G3ku|Be7kl-< zdGcNs0;5le7koDZX?gcNveMCO#lsy-268^WBmG32Scv$BV3pD*_WSwhlj4x>*`Er+ zbFN#zKecDT5^%ywCE#8r8QAV9PH;}vc?}0~<&avsT~h{U3l8l``sjXAH;h8&fpY;SsiJ<_ zOJTx09knym=+R=widlYM>3{)UE7#;!lgQ{?hMzNB9RW}5NBqjtqP9L5To)7fsovRB zYFShjP^`u5+0#}Hm?1*I7-{C-Uv}kR@6i7T$5d%*ph-a<_;-q(GQ7x7Dei-JfCts2 z5w-l)SNeww4ra~VlP|v>D&d^5+#M)J?b8H1jCh;JXmo43G{ IG+9XP-8YtjoQcE*NGKAE^(* zsA)f2qLt6;Y*=iR^AzyE+|u)uBf+A6Sku5hD68tt?&@y1u)k0HE$M=bj~A1n z=OrhCuQ6E`2lr|#?iv|jKsY0#wdwlZ3H%tu%{t6M)@-BvL3`zTkiB)^ zQcdF0)e^@YRIiK`uc)s11iz=*jl!{Cdqmkem9LO^GQhX#i=vqH4{f)j{nQWU*g0^4 z{BJ%*eJk8`KjXtqxpWE7|H$ntm3T~WIjJ8}u|xW$&D!e|ef^Hlb(>bc$%2=P#gQBH z1FqL6-#)liUy-tq&c*ULf>yPea}h#K1{{#5a0>G`2?4m*b8h0du+)4aE6PuTd4A^m7W;)dTt>Aw!4c*PmzJ{Yw5K}A7W~Q26q_Z z(e#+n^**1ae#Pim{nnIMx&qA1#XU0V`HsmE(P_DFToDOuGICf1DG)|`F)H9g!viRII zJY9~T*M)E6)q?von8<)Lir#1rN`Y3YI>-c*Q zXqP1~o+`d~oM~o9T{O=f_#>p=AM^wb!)QPhgMl{pBhc9<0}mP<@szE2OH)kVenw<= z{{gS|u+aOqi7V#84^Pm(de!j$?hP8~==>aP;{jfaAkwSNhqIi6g=XP7KPnNw4Z1aq zj~|`nkB+qQJimR`D?N%b2B3i(fzSpbZWZGKnuTm6HsL#}yyY6VsZg;QF{oY=#jQ9imwJaQG57IOjoryDz`|*!YzhQ7^*8xwzDx zfG=CdPvOIp!A|>+w}K7dCy$b@?IB;nP6jjLSPy1!Fl@y^Fg7J(I|mnC$V%_OY}kv; zetU=Uvo<sclhNsB>$4f&Go?EIH66#{sr(OrGAh(D8aT*&PZ2XZeH5x0q(-%0+y-Ud+`hB5 z7a4O??!5h*WIgR4k6ZUSyEE=9oKtnw{G9flFuhPmTwgk$AIDJoK)`3hegUP@zlD!C zsHnB+zUn?`SkhbaOeph0k5w5T`fHl{n2t=MhpJx4^@=7HKp{rJUdfSk(v9}h7c%fQ z5{mmMG3TvsEOSdFP_}xQG|%wn0)v}gNhyhw415<1&>HfLQeo`8!d(SoXr_ER$gJxm6kfx(VvwWUA3Fq@2Y*qW2MR(i0StsRvhGxN19#bY)BBH4<&uFH+MQL4I5JR^Qm|jr zN`lK0Sjj*Y>%A9X14z(a05M0tZ!AT?VFe*wHgyo=-DVKl=5f;jT4IBeZ0QnYj|GKM zB3(XiVkaS>1X>dPaIdW&;SBa*Pzj{i)>=Ap8!c8K4M)P zF*zcFc5lV!>@O(Xp1aw262K0 zbc@p%u}lbF1?NN@rW;U= z@|e?Ny%wbFTbnP}U~jL>L#;ux#y!=@CvU}(o=astv&(H)xnlh8MfU6A6vsfg^nt^i zr(u%nV9da4H|LD)3&bcB0tu=v=rS1|5=vP0#WzUVoaOz)|D3WMFlr*rkbM=d$Cu z%O`G5q2T#s;1(I!%2@N=8gxCPz2Ka5@)sqJf@Ez*@TSVu^?1}PhbgxPYP zt9;iWl9v&u`KzdCwWB^=|O$A?!oUQ`S_VOn%yb>R^3JS1+P7-M1pRrwz)q^Vp{XSl~RHqSqH=a01y3`N_ z+fghg(IHmNSq%&DFQDzRUDN}nx=)wW{*liA-ZOvNb^j68uyE~ZY<_(H;BOi;qKG?rP8U;JGuj| zREQXsU6Xn;Owt08|6Yh!$g>%Qm^DNQZwF_qdoec4XFH2fhDP8 zaG=pB3I?7s4j3B9K(91R1$Ty>bdt!C1i>z{-Xo43L+7A`*I@1eF>-d73|y&#FV4gt zu~)*5v7k|S5t%!SoHzM(WG6*p(n4I$-|5E}A2BG4A6o8k97}C%z_mI8oQZ>kFn;#m z7lJr$%P4f4Y#|pUMlvR0-)^pCH+!X9BYY8O^x8;kq)QpPT#PaQsu2k=-JAUTUej`D z9qLs+$fpDaVjP?we|mkI zfpDiN_*ZDlyhTKt$~ynr@VZNIwF}?L`>_uVQZI9jnHt-Oq$e%S%b80K#;QLV!XF8x zGcp>NN)8qUN!NTpPq=a4{?%ZRZ?kR(X{Z{g#OJZn4CW@FnW6%Vvrdx&IE(&360$R6 zgtn8!iF3TnTrboe6{Y!gS?~{yoPX>UB zlQceP?o09uZ<2mG-7RMid4G58Da7&UuULYBC%y$;FhSb+{GvR6sO2vuBUUySOUo&v z=7*g|QFPZ5uarKvU3?)yrF;RFDPXyxWdUI`ZU|Vd&z4ht^d$hV{_)4$9vXt9wZhuv z&D;^c1BNSgb0pj@8;1Lj?F<@EIYe(u91YiCS*k!i-mAB z(M_ljL?JBs)kC@Eu~t*0Z=Ej8ZD+V6^*Xyd0v^zM{GbVL4DKNVmHIDGPGlhBf7}zUK^%9d{3BC;6`TI5g5I*U?i)6|X7(|c&H67#E8y~aM3sG zviE@%+~`DFPoBuKvm5?J7{wNQ!Z)5$N&8j2#6U@(I~hpj{JhpQc56jkv=mpIkty_7 zd$5U%ScGsx(-G)<6(tDL5Q3=sa3t5ytzYf@-K6C9mnrEiy zI`|1t=lNmDqAcMf%5j3Zh#`_89PET;664?UFjxvK!59k zNzkDwU;bMW#sA8=KRGo2A28P6(M7QS4PBs$gOILla+4aez!AHwOuqzaF(DX`fxGX} z#8ZR2DIw4W<6gvi2ZVT{>2)|N7J7NnABnrLPh`mlr>0Vy=kGyUdWN72YzQ)7mjXQ& zTgReF^k5uFg>VX2h1qUvI){s&)zAvOS%tm6S#t9ZFWqfZVb6D*gI6cob~J`Je)Q^l zF*Zwd8OFqDnKxTNUY>23pY>~DjhH^+V}oKPsP&P7&n2=*(E%CZU59r;ZJ8(F-+t&_ zdj8B)uJ4P~46)SzR-&<_)=z@6Qg4>JmzzP=$DKwr+5{(I5(3wnfrLSseHs}rM;qdt z^ZZ+eOxT={Zwd3z)X&rOU0`$QIB>Fe^ugl(u%&M!b zi!ovuUJy6dwS|!+=D(xKs4>K;w>oNCJy+`iayx} zubFKYSf}@#Ap5uEY4hs1sjE5Dio8~WygLkpT=te~-JtJS$AduJMQlr_Nk2)<|EZv< zVrENwpMi8XmTxv}))F?5hmb+OA)ie^X5s*>WwySiShJJb4&4W+d^;skau zOfbb#K}>?n&JJR^5?to!-F-ErY^0*UaV<=JS1e@+plRq&G;f-v2`aXn?RZWGX!vTd zOQF!V=~FoM^_KGKD`O4Hre9?oDrpa+d2OzodAS-+JJtBQ6;Y1Z7@vg*UBrsCPMwTG z+f>!5ByXjt2wmZ(e%++1iI%=9I%Ds&AqO)X`U2;)yV_Aw+FkW-c%YL#E^kv@-4vzV zv}0)(u@tefk0x+%AG{Q7YJULcx6ezxBxVPn!l4gLaiH$2%Gfw`f?ceI`wzF?pp>4X zhsggdKMTZ0P8fnO&7~ZME=b4sB3a>Qa6z*kFSUhnIa$sj(`{2ySK=)icO5(`&p(vU zv6s_rj?`We)UH}dd6HanP;q%)g>VU2a((AS>v z8l5kG-oA4Cr25Rz^4DVi3<#;)BApJ*jUUajGU4ve8>e@gW43Z~oD=R?D`@auiPs*@ zef3`br;42Fna?MnR@z)}7Tg_-b-#H(iO(rvWvFfvJ%+qzAeyZ5iZX{$MAe+ZljE*^ zmH!k9PaE9TB#LG3jI5HSvMHUMDAinb^YZd=-%;yUdCd_O{bk`*ChJ98FxKh&=3a%f z;=YStaD;tNp`mwg?wIqaN_I7RSDS;FrHEp|sT>&LIPc})vSm3}ti@x`&qdhX9^-jX z9b%KN6MV%h>|8;X`L{Wg9YJN@g9evw<*F%LwwhMeo_nF&ODMySrvFOf^YV=EevI|i zSCzW_LuVXYU>tCjh*_@k*=Sz4m}^3(N4_vmPR&l7X|}_e-e%El<}Znljg22Sv5`J?Ju*1Q9% zt|?H)+nVf^x+yN3R73YVphRj5%jf{i6liSP@>TBTe31V|2Kwnxdu(=BpbO5><3kAX zoPM6{A-MRlBvaX5UC#A%r+@Za8h?5N<8R>;|K)1}s1mRQ_DO&KHszt-%Fo^Tce3@O ztWwj?0)RYd9&zqihWoC6kZK)sf9IFwlI>#SH1|p-)l+X#U}j^fX+FE&^Y+?XD>bvK zY3~R~N?&{XrP7UhhybF+h78=04g>{;AB3PQOAsp{J+nR}F>FC`QFy+nJ;cy?Sjr@r z0^1=2gE5c{(1QSQ^Q2SL$Io4+aZ+FoVNMb;D-GSF97bU*y+AL^a%8}iRDPe(w2eDM zD!EG_kpUcSCj^rWM&lUmK&$=^xbN7&^a{N*8OY>n1oIm8@g(7P}w;u&fV=fbKH~&s~U|8dzv!Lrx_?LmCkWTf3 z`HH{HNl>gVWSvX4dkY*WWUYf0y$y!g;2@>+9z>l#jfDe+NzOQ?`16cNZ9D?ar`zSh zFh9acGGK8b7=a*A&Bx=65Nm%K447hg0pi_${mY!d>`nEL+oN~zsF{f4^3Yv8m;m{Y zmk9dj6@vbcTb~Da)~)Y^ZB}Rac_vKUNff>m;EFiE$GhfVn8AMkG*X2Fg;X$vi-#8; z-OZg&NhY$s23hGpSPR&nW+;^m{O_Uz)c^Aq{eR4+iCtql*f+%#o=DiI^3JV9TKg1DE+`LRLu5MZ1eX>Z+9Ub>qv-x z8c+0m^sD!$&pVNI4{p*kDQG)lN9GZ9ACw4PNT&#Ot;qfA2vB5j&UcdZK7+{(7tr5H zSVbLiBeAIOd_JbSfwXV`UUuN$UgMmf7PNkV3-J8ZbN+&-r>jpb*`WqWl! zklBB`6LusH%AtGG3Sd`rC2RE;D^<6=F3+IDuhQc6ZCAmll~FwDTs-ckHZLe;!@($V zDCpDRKJMq?b54T<5a~>$7Il=oG99yA# zCx@XJXDE2WcDY41?f}818HknrI&^%k4xYiH^wKBNUC2DagU@O7@MGK!<}c0r$IrL- zOX1rpIogIv3+eH}YwMW%u?+H-ruvD9mQ)OyNbQRz@Yy1;=3V{;U(@_TbAS1|iQE{B zL6@x#ZHNz?O;(Px^88xlDlpd+j+Dg}fDuO_2pK4U4cdymRVxd;TlXyGvW!ceRcod4 z4;9*O|~9EU~WSr ztXX4xWm(}$813?TEtM;2R~iSi67D|F4y$hHm0+2=;reXHl}T+yH)tC#8$1YO2GL1W zzmfHo2Ps?%jBX}|jbk-WZnD+-xjwYi7sx3(Q!8*%2RObT1x7~WGk6qH&tP;7`dB~f z{^h~*4QkXkyPY4cyfzd|R|PCyg;DcTEbIwY9f7)+wF(E$^Of~BD2^92o7;cUZYFVJ z<2Rn;9dVWisB)~>VYsG7;R@qyZRx!?ux_R%uR6WEmRzN2^MdBFD>1oQ|L&v&tGJY|JfkknT*k$FXdC6*VR)#x{*nR-vMG+KKV zu36LPb-N>tYP`2`;(`%*X19_wdh#cby)F(VzED<8SKqhX`hy1Cy)tYg_CIw&6Sxy$ z*HpEZWPaXt-b>U|ucRS_?II-Z++dI~>{1mULsG!5($G-ePlzZ1=osOL@W@3r}KO&Y4 za5vnOCTL4wuqRizx&8e7xKOVCj!_0_7IO5ORYysWEuYZ;)cuWZqInhO0d+=GjCPr% z$VRxmiq|>pr-~xQH)x%@oVAMSxKQLmStxLqZh!$8MGE3TOI;K+n>NJq5v#W58|L#U zLBv7^iuj@uO_YkNC?%*pWzqN`Pwd^}$SG9{UO8s)MO;u=MYn@VA~L`dtOCD?!{d$G zF{rs)8LY17;Wu8}@@w#HdC1D@HiS=F8{B%iIY8AQ`=e8^cyoHhejXAVtVE!$!m_p* zWY=R^UVnVVC%CzBRfIeC-f8m>2|DI}7W^U4=y|{Pc)(BL*ydQnA#B0&{+;Hi%DQS? zLTP)^2)HxkrzB?myLxJMCX-30^dyK0+SItvgQqxq#0=up^w=X6zu?mY`@YHDh-BeL zUx%AZ=RSO8eHY`Hg)HjvX3`dZF`wfyy0e1BSX+;6+j#7#Ip2KakbztZGO~kVO-Pz> zP(Iq9O#=O1mjS76k|gM`M}R6}!=Vqg=jMqSCd7Jzma^X4j9H^!$1+V$#LvF(z|BwU zn&$KklGh7gn{G)fND)58Yt-`Xsz^dB-?3do#XO>Q35UcWI$+F+x=|*lR#L`XAFH+G zrkwoTVH3I&(c!-e>!tmfkpf28pb=ACwZwOt=Ez5UK~)0!&$29JoY`YfwR}k+?1{`5wd_r$EJ+Q*Js^5=| z*bqNXUr#qVVRjG)lanPLMb%cuK0&m0;Da&;a&gj1R+;J-xZ>Iyid9Y6V# z&$bljl>H1!{&(<5JyA+sK$;U55QH}*4GVGO0zubgOgC8A-{l5oIu!2qaRr76UmLUa zoVa5(x(0n{9i##z-HN8;Fa4Q zw%;Y($~PczHM2&n?cuW~4LCn8$FFFFpn*+~p5u?@;cAL%*-=ZQz8SJXK?W>e$IeTp zE9eN7>jtERlN1T3v2l&EVFdAc80)%v8+GLQ^GZf8?-D8>p!@`4Iz6y=1q(t$ZAmD+ z0h|(3U3@ld4*F7Cu)m7Omvy7a&0UGdK22(hHA$Fj;)Of!dCHiW1RbGq-2jvLU^Rju z?v8S*85+J9Yo>SXCu!>v}tQ$NFT%+ zCre)xc_EnG1ba5_JBi%2IsKiDg?`>c^!c6GjJ6D5O?iyC81YhZ{iWu)pLG-E6C!8R z%{ae!XokLMJ1@G7yQ(EYRh`{{Y(ofuFUaXP4?$1Dug?qeU&kyJ@b%ai*-q)bx+4(?=8!I^xxJ`obE{cR`=isx0jK}LQA4z3j0-lS5p!F z#Zzq%#TUT)ng7Gydq*|-ZtJ2b0#XF2(xcLpDk@S0A|hSHMi-*ega}BJ77_sgsgW*S zrAZAv^w5#sL3$VI2_*zlJl}7xwa;pM?X~Y1_l$eS9qYdklCQj(^Zn+0<};s3y*?%J zon(yLUBLKPNJ+Fc%HWDav(}TBv_zBKJbXFqz4@!JU<0Pp&MHctdZ;LDtX>RzNN`(3 z(dD)Y__j^yBZIRdZIMRFdz@v_{5DK(PLrazqq}V8B{xsa4nfXR`XLp5^Bn_Ix0mTc zDp=S|y+WGO1uL)zQ)d$N_%w@r8B;RmzA>%TE(C+GaxXRR{G}f&OrjI8gqOpO3|_YL zZ6}Qx9(n36ZxaF}?)tYXl{?-3&VB5#J`%5UMa`m|$w)jwFs@)C{wvz9WACs11lA-Q2S@CCL}#XVp?TIn-^7H zBX@bELO}Y*#o4V%v2UM37AbZpJ=xYV$GgU8;7WuJ8pB9)CBR|gV7Er4W5#6|p{LT~ zU2UqCT{__8x4f9`-39%NS7Td!3jKp!%hZ}kmNoe+glGNF>QuWOM;esArDAzprY7qb$*mL_DV9*7Ymgoi07v1{B@` z5KbA?gjs39f!5?o%;b+?=D|{h-M!M{mLq%()-S$3@jB4fe($l@(=~w_mCe?Um0A(U>AoeSeoa?2(sGYPUJi^|IpU z4?v=ye7^j5kSOp!TCq@CcY!;Ik@}48oWXu~DEn`!2ROv9%SlV8(wIqqynjNUJ;>~? zNBze?01n*cdc<+^Z>kwEx%vR{lbSM5KKBPhBe4H}7Niz~JLgofAuRy$cSH3y?q@C+ z^7FcEAv&=<_$phuxb$5KS&>gvRKl@$K>fKS&ZS=v%7i7&uvA<_n-Xo(UX;#_`GE?! z0%{$k@wiq$xuls38%Vf3u@BAF100_QNJ9fYniA%l6O2Jvf3L@D1_|ct%Pt_iW$@BH znZ1ulUzZH&Tdj_8A6d}wuTz>-P5e|>PC>iiwA?71HSuq>UUqfF8-!GMljbJ!w30rT*T3Q67m^7rF zUey&L7Fd-Xbu`)m)+fZomA|8M0YyiM`N=nM9ymy@O3gVYbi$h%<=3|}wIiOA&pC&i zyEx+wq?FU@lp8W_=k=>QR7#Dmd}c^U;4)(9vN`jRPVohs_-U=;!5fd-bL_D*Dc_5} z=u8WLFa7LRS8T$xKc9^G3(feyrU>VtXTjhF_E`iMlH3b7=xdySrhxR>HmFe78Sk{0ceo>7gs%O7*)M}8*OozBxaE0T zc>9#(EzEf#?0c|d#ZZN!dzv0md~RcO?nf6mn(KI$Gx3F|=f&-QZ*pjPS;fc#*Z9NE zGk0Rxgr7dju%Uv-TMlRsAFNv^cMLOnix9Utbok`qy5GcbpIKTmSU2a(k!{;Ti*)`7 zYlE|Txg_%;W%VyJJcaIx2h$AA&}D`bm#0hWL}7EFz}u z+Ehib<7s(Nt`kb7KsI?S&duUp$+}B-0FMP@&ZO(|3p!5~7f5NGINDO?oHQ*HManK*b*{P>Bec@-2O*X|QWy^B3g_XNt~zl~`Dfi?eo#j!_i*L6LOGps$xAs>5gr z6S%sjgmf+}9F3*y%|8u!IHa+~HI?MAUzFWa$d-`I9d8xlC7I8`^xU5)ry8DRahB_6 z_uQk`RPU9qv?#kz$KQ5|`jNL=F@5fVT*yOo*eWYpypQ)gHS@x?iy$8=QYAS}l>;U~ z=pt&uhXiGCkoN~A5yb} zzTVSuW^$>h(~Hx1JgrHqO|8Jsg#RXev*^~&g2l{Kvt)O$tWaCTBKLV?mop11D|Q^G z2;jxzdd39{bv%@iMAUrs*a5+Kk)ThTXz$^X)0JnRZ1+Jp)+J`+6n_}6HuI>hIR6wNHbLQ%2{{hzWK>(1Tz%e!45#rg$y_xenWEQ|dMdXhnc+WYG1*jb zc9`3EJqN(^NkHPP+RWNixd{48$WtbhUN=lel{u2}v8P)lk_y}7N13V~tMZI~h$}l! z*KO|zySm`a=%v0OA5obwq3nyj&_WTzNlvAr8$s!FbM(&2<0Z9@60alN3E3}&L>Ph; zrlVMP#`df&CI{;niN>kBY_;#<$<)E3j3VhT-;wo(dnPNOZzY(;4c$m+ZFGF5&-^VJ# zs>l>u{9#`iT@)Xhb=$2&)7N3XIx#r7ZQu4<-+E)d!B12^{YTjC$_dN zZ`x`_##|!c<}ixAZcZ>K!Ma%IIY`1UQV1yhrHjXlaFqPj+`9QL(ACXb%}(@QE&LR@ z1E(YuHw+~3SwjNm;74`XU@1Bpo;sk*LtrV zD2{Cl57aN2KJ8rnq^RyYHJ8V~auPUi;TI-6-4ONQwjJjew}mhK1~;YKN`!;?tyx}} z!0D6C8=M?}Qz@akseRTe{Uq>uYa@3%ovs_!TB%5+sP@|=80J1mi}9VE-dC(nx!G>* z_2W6#ay!OpYVKsytrI5W%>U#^%X6#C<)>L59AXNmofCE$t_i(HswZO{L6dTP>MqOz z5Meu>1)Sc}Rbg#uF`|OnMTzGA8-$4zV;=EOzq(b75xYJa0v|dD*rS3x>Vas!zIE|I zwrV7PK!o1*V+;pbX`%=mj-Bb29;eL$w zfm{bbuZ)@D5w7;@Otlg^K-rDF_TV0bZ)tlid>l($B6l7cXTZneQ|U8-MX~>OPCwx%BRqM)iDM zdU||IyQ8OI1Jg&Xj=O1@-*!`t3)XL)KKEdI#*xi@O|fY}v@t}rP28QIeNKLN3}oDH z@=Y`1)kw-}oQxB6vloRJp`2Q^6sh#~xR&|e^yzt@Wx2zNt*g%b;EKiRcg?zh{ zlF_5IYT{qP%XT zg8s491b9(qV=4VUIXP1q{`C-q%`4L;$aN_##$3yE+R3lBthyIgUsO}UpCNQANkfQc zvxStq-2Lm|T5s@b@B}GoeB(FK`6hOq{OmoOjQ+(JT02z<~G*5ea{Vc(G1ynLWA=mA?emC%d%vj~LkL6o58$=0h0lf;O_=u4Yz#Yg6 zHDKFhr<{P6A<3X0W(dS>0I>wZ`K3)E%Mq!oU;bb z*_sA1)dmZerI~inSGRO^>x$#Q%@!XHr4lgAzPr&ooPsU!}DG4hSqNEaFjK|}=U7EbH~3FwHV2l*GmSn{Xl zXXn>@e6uyYMl5d1cH3xu(A&RyN7MVvLt6Q5d7>dn38ymAqC&Tj(bk|!eA-AzTl^u% zXuSzn`xi|&Kluj|<3+!oLs$1I^9D1Mj^4jts~|IN!4a;|5Q_^Hr+ z^TR!R`{ys!-ms>jd$VG0pPJY~fnf5ux7j!}-eK%!0~bX?hGku8HofTE)bye|s^S;w zj3soL6{PU)9j>1F3iZw`)NB~0m}R+na=4-dPsvpi>6;yE{jeCo9ammg_a;A|UU=fW zB|EthVJE}yHLr#h3px~dxwqRkv7NInAEMC^@}NFVo_*CqkL})OkeF+X?11jRK`5pa z-Bvdrn85~<&4!ubJfl2h*k4_rG_i+^)}o0F{fXyiuRLLrEvpD%r`4I_QcqU|-Pp`v z_dv3fjL5Od=TI;i9JsYXt7T5)R-%1@{kmh0^573|8c8SCNjrmBuZv7q1%6QPwjua<@mpTzE$E|Ur)=<~$bQUH$1l@fHh$+fq6%l2So_|jE0b$5b^{@TQovW# z+olq7Vy7dOH{`2M3}-tsvH{g`VOO*8>8yx$M(vm{&593O6;S=VJoXTNk{nS9W=l?3 zAb^+1P_uE^lY))px1~8|!YB7mY%}w+H}0bapUYf4yr&zY*B<^ zSI~gmDP0IM_<#;OqslcbHP`yAUR#Ry#KIOA*E?+fDZ=GWVc~rTDg_>@lO7 zQ=y=jx6L~fGNcn>Rm%I>5X#Wbt2RO$C$d8ll z>bf#$k7s4l!pplgVRv}c{cBtmAG4@KqJR*b&R?2xn=+WeZ~&px$R`N3fq0Zvr5Z00 zTz6>nzSYa-SV&EE!Cp$|$zJ<(r{Bv^jE9qG`t{Y6U5LA{0JR2MPamvb6+ApXcd7Dn zz!}z#u-oGK@946r9R8X^XWR;Oy%)u|AGN3`Njz`wC})J#r+T{@&v5P`w_#YZ))aPy z$KZ

1zLh1hV}38-y_}9!bfB%M3e8KaSI{=ZSlZin`3+Bi;1lh5Gp#L=W|gSWMSZ z`JMA|4oeu;d)a+AReE_^XkxB{WbBgLm4ELAV}qe5n=g@RoT~<5PY!eaTzhS{!lM~b z7_n#ac;;SGourmq*8mEkXstA$UNv)`)nMHU_O zxCyJ3wQY9___)jU{PNE#tKN(Iz?4KCvC}!;psqC{EhD*=a8`Njhdg}evX&Udhm{P? z{2===Q}B;Hg!g#H4AVCtXI_`^b= zljB2mwU8f=25N`IEzx=dYdXr+d&K zJ68-vWA(C=E=H%k5iqz5?R6H`-nmy=!ib6S?sL}jN>p;wTnT7nIIw)8o02W=Fq3V( z=)++XT%>bb6m^7GT#Zip@VZ1M)!OZ#&)vtdE&5q2L&C#;dFFaMKm)$qsvK=@a4ofp z#1!NEbZDmRZ18q|jLft2!*!!d?dmK)+80+|UR)oEJTjyB8GHrMD>nNRXxpWT@0#+? z%E0shWOpF}{UnAme*2S^M`#EA+4k!J&5 z^rz+0rY=IembdpSD4fUAGMBytw?FRKLrY8^&Yn!Gz`9 z)%rOz2{*F0)YxAkWRzQy_W-v#UrZF-*1nrHM*pNCv$1#aFipEJFLyH0Y5n}WCudS8 zX2B)vk&P;f$pVQ&di67t-<8Dpee*uaDiSvkwu|!Ioh^sAVXG{gO;uu1a^fL&D zy(q>6k%Wr8k`?Z4XW35}_pq~lRd;09=7|1&4$V_6wqVzmN0EU^4pJ`9{K!d@M)CKL z57k6l?PYsa+petHdLI@c!V!DAFdJb00|i*stTN*@^i~h>%?)AlN; z%u&k6PFs&_#}QXuyC1!YtuQ=%phTV^4pgu&%Z*Tm`mk5lqr*M0NBDU>^3$B{mjCK8 z!ju1SQwPhqtTEBy2X4PGp=9mz0(ENadb|zA`>n5%DV~RYZ!qPr$TFPd^ z_P8L9Y_N`xuP^pn#Az){&9}~nf|yD4v`L&RaCuBjzj;V}bWuFBym%CMhPq|l67rbJ z;x|=o1|W&Bf1?wE9Ry*44C@R)LGWZ@rlHIrN)XR0B1!7K`>=slCy9fBb{l}qNt}pM?EBm+I^EZyY)i?$_8hTgW|?1eUOeFp>{X1_71t3h9L9T+ z4#C--+|2tZ;QXCZS{;ym2BaK7{)hi7+tIsO4SDEfoy z7$q)aKE=&j^f=LP$iHiX1C;Q`!>`2zAa2R*UC)n}5?`8&9LfnJk zcl;DlqXs_$@t9!*G$S}s@KQpz3aD*Lc+-^P4L4KBLn` zhRHiZm}>3P=}*KH1}btXVH)S$ia6cKNkA@P!sV;d7B_FzcmW%SsdC|6bq2f0;qLBt z>9nq1oCN3v5{Ej4fpKYo#Q`D!W5-VDI|nOS?+ka5({AEKDwve}CP1%DAaEBVOu-X8EUY(?L>3N`EwAxv+2{1GPdx0U3~OHH1*$)nQEv#6;M4g(_tfc zB*6;niUY zs>&D2*l#MjZHmx(f=o7K`dL@b#*}Z>A-W;RFr^2v96B}@jZ{!j4um|JSouvwVxu!6 zK;*Y(5nl+r=0C@`D&J_ucB~w z+fQ;1VHaMlI?Sw`A*bB?o2sVHbN#{C<;D#C+K~J4&+EKwOoU&=>*$`3(E9wEmn26{ zgVABx29}Icu=DuDA&&tX%HRni)Ork6HbR7GAe6RiHivyYb-FN8ahSQn7zEr0ROhq+Hw)}t$8TJFT&AK$!~(Q~JJyOY5W zI@)-N^20|u97XVK#?UKE1z(FL`XsJ~z?y{xvm#e5F^@p~ord3fN@eJ!I!#*+$ zQH26hl~W|CI-KEr(=O06P4dAbGh6NRXUw`@4Q-T(7q)oWPHpMz8FHkRU*El?{*Z2+ z#w)R%#sB0&JD3lqgljDLHaj^(k{=fxfBgc@`l;9^86=!FJ(Ti_KQib6!XS-gL3|Ch zJk@{|3np1$k!@9tk~qRxqCWPpp)YN0Aw_?BRfu~3-->8X)nzuKcl10F0(WW4Bt1kymKmuDeKI|Gy`VJ)JF?~ z)>3-*GK2~WXTyP89A)Jkja$+C=3SEb*d{yw#p=*x_MhU>T?@e^hveimguIkNI7sOS(9 zFn6;ee2FtfZ=q_|c5hSO;RC>Llch(S1r%X=Q>e5;&oX$Ou;5|juSFCEgQa;Ehen=J zWuB!0YKN=2P#?i;;U`^;2A~wC(3?y{-($os=8?||Q?CO;>wlSA1+?DH--dJ9Z>N2B zz*%q2QSGg4C@nSiqGB+wq4Ma zb6t{JQOLr?XM`map7?TAKP}a1^38aNqbdA@RNjX0|5# zAvN6(E5)P7baJhIHnm)m!`w$PTCg?$s20rAZrsz2!CbjjvOB3!YrkJVGHZSK@tliDBYi9?FUI5`1&}liSEtW7uGYknbY-me=C&8bmDCTR!p13ZCh@ibQjTg!zli z`*ok#ubtBBWfAD})5`L?yGLv_liEgyY*%LA`o^VMEvxJYe@x9x?t`l>Re5UBFu0{b z?D`%Rpt({0hjHsDQksuSl&*rQRXM+UE}e9hPGdJd!umHAt+!L%A~87Xal(mTy~DhZ zM8G5D=N{e#tBO6+VVk%whj1SFQ7Z2diqlJUX?&nBvE}hp!fU&2Nt|`m4~OLnXBOdv zBjNRJXoa^EA1qp)*qPSYQB&<4QBrw*pindSrc**p;6`mn5x~4Pck)V#I^D=y-r6sv zvuT@EPZ@m)(4N-(UN%Q;i;m64rh8w6%k|ARozYc4_lcWoq7dryFzl!M-lvOi4Vx7@ z(UmT-_NN^S7_8n*UGwhulQ?2RUk}oKly;!nDKxOit@bp&22s3rgR{0y()XGC9TPjT zsoEP^y`_$t%(=U6BiNf{nY+G`mJ;`vt?d;Id4vSuISo#`EiZ)k?&G&FHp%wGLNVv> zY);kOaJtyK=+Kh7R$zRLu=Gz~+sl2-5MDcf0_X&&!fVR>!Ea5fpQKgyvlN-%I-9V# zH}H<-zN?S1cQN6m`K{+h+QdEDv*2y!%y~JV`IbF|U8nGe?VmKA{8mx&?gOR8AsH{o zapHNu`owXLU@Q9lhC>U4u=i4n#urbq-8ZsPE^~Rxl6&3?w_RCA1rOH~m3TY2E?Ayg z<4;e+&DDIk886>*cE2>6ip^D1TVV6Gs-14uvypu!%!hAdKa+im^nP}aU5>h;@b-I4 zOSRaKRQJQ;Vvd>+-B`Ej%l=0f1YZ)9#~ZM5__EKn>Em;!@61KzXo7YkQYnVT6cF~) zh?q&{sG!$iNwvpidGa+dF9GI!e$kG>NviKwBlo@t%8!j?imQ&$stPj{sj7MlK;kDS z>0p5ZodPXcukRhQVzVbW!^2Pajic*09&<-fH`9%r82e?tI&b-)baL8zx9-E7p`pT- ztJ?bg36dms-Yu#T25PZ(&4{>ocm-MYOYG z&||UV)aTeVpp5bfpG)|TA?Yi4K^}rUEioir5ILcaS0aQ+O*0IXK&UeSR=GP+y?^Ob z2{hMrlA^#A)`q8kvw*JOSp?n;vUTn?@W&c1>ITrVI|0!EZ%G20!6aq`=_966RuB*r zM0{UClq%m<-KmpVX0khZ!l#bAstnyj5?_D_##S1jA6`!T0Kq37e0AjQYp7*MnkQ!av0rLULBCqRux)fcS?Pc)hC_o*W7&0Joh3p#u`^ZxuQwt*+$&Mj}5qR@5>&TNTusMR#;Bf$)R4*HdRu_QGW$!L!IB%R4xD+9Z7XNFKSj@<}Q7(l}W}mlrbvzY}<)mnxcdYlD+I{ew3ge0hQczoN z9$(f6W9h*yb!Os5dZ1(S6JU|8C?km^VhH7Q6z|eV=Y?B)(>kK5Mo+DU=})zuR#Dl^ z94r(C#COf&xYepGmt2&k^63`N0 zg6n$frGLRs`_KHE#9Qd4< zr_L}+M4dm8>((`M-Cx=wrb`+x>8YYqt&JfXRGj~q7hfW*Wo}HZwHC~RHOdnx+qsn< z0jFwyXkEYY-1pS8AK!J|#m?@GkRq>iiAOuvV0Xh0xNrvVD^W~YEt8dwN*itWpH@fA zv<>ZwpOr$tb%{=1YNUhROj$T;*=AYnoeJ1MWp-ZYtN74Vc3f7UwAUm1%(-(;MdAmb zJxK_o1urs~csV9z!w|BMD(x5x?&L`BDvkEdi0CQX}hv!@|mX^^-tIEy4cQ0?PN-%3;I2 zQSa3?(rt$P3Ws877rAH^^PmbK#;(nWfoP>wFT*qle#_OB)+Gki^Icx}KBwW7k`;k3 zt=}RdJT4WC*FOSsPDacTU~-yL9$;(~NduUfe8U7VPAE{>Gjb|gWC`U>NRtcm0voAc zdB~IbjOm(F-?n8glb2OzX}@cdE%#M%1&5DS733ej5PPY#xuOQA6X_3YyJWG3tp>D>V!WqKG02$&ST$>5`K{~*}`q1-@Fazwycm|pg?AM_t) zS9&gk#Ge6qaKT3jA6%#0{4Wi8=E#?ZFcsK3DO4QfJ9&$Lw z1`&}_HnG)=6YGrqQ4i)MJ<8Gd$XMu;? zvX2Oc0~MBLEmd9^YX<(IL11nCvPfh{piR-O6c6R`ae1}OSL0r(lZ|a}O|q`#DM8>2fDvaN2EfYRG~RieGGS>}`j z!ww(a#0b5`$N$_lRcX zPpUWOU{|pQAC%b_Giba(ODw=6gU?{8Pog#X^@z@w88v12{gP+Tt|vf~ViV$M_-Z*f zEgTKG7)0@O5%Xr%xZrQmvzCC3|PgK|g~Q;ovPgA1r^i|o-J7lnwkfuEX?)L)K~ zK4dO{c&RMe$O|9Bu&aV$Ai@YvtL5ZEuod<(A?)1U?oLCZZzm_Y-ep>;G}5!K+OHYD zv>e;#OtL2E;3it;D^zl~rLfFBS~$6}wGg^sxdG-)V>^R=sl}w+U|;hud}rRQ&>Ag7 zbt?NUYEZ!iVSa=cIOAmTlaK49j#nm4t3e2Bwj;S zN#;2@X!gE%+f3$K7bIo@IG$dYt1yo$l}3pw;Hb&%>kQs zv^qQ8^GIGnmdCV+mlRRbRe~Q5*(wD@3!zFkdFSG6qgF4+r^^Q~NglhYOWu!Z7q{Ma zw|NoU_hy~}kOcz=AZQNjQ0y=;mT0yq|IshOQ5S!*|D@p-G0=5mj^_ z6~Kyb0|HerpEWtUsF4>Yj0w@ft>y%NU2l0>OT2b@NNTwunD29LxBX=uHpM$MI!p|y ztT>rqgbjF#xeqS4{+2>BRIic?Gr-<7Gc1NZ5-m_xDk`eeN(s%4=(;h}y}I(TYE*#H z9X082g?e#-okUCU&xdA|i;%O7f@OkYFW~5*?-;1FiZXU+!M?;nAFC)RnxKP4cBnFw zzfx|(bjLHREZ95QUp=;$;%NT);)QjI{?AVTRnl@qY;>QW>0hY1e{>3S?k8@`yV0QD zZRb?iCU1ivrVYyg+Y- z6IE)roEbFL6v6AB5Ry-0NKnB@&ovILM+z)|>dG|sA;kR=A%t`{b;@wf%<#qT?O$sr zITSqEpe>xQv!Vj}3SWFojZ{Z=hMlO=N8gO(x14mCbC~R)Ho4!!p7%?ej^`&v=K}r? zIc+dTSp)<=IPwJeIt9g5>_%R84E7`Do6*iQsRO^E6>t3gX}J0yZnaL9+q*lJUc*NUbrOm zbHyO38kou9m$Q&(HBYM!Nqwnsxe}INWuYiCAg8iZF>h?(NnVv1-N{|taW>#%dgky1 zRtoh->0>mJ9T)+r_sQ+cQp554Chwx-@1t{7B>aaitL1CPUjP0DeI5=;z&Pz9Z07xw zBO;~Nf*(!Rh;7RY9qURcz`q0BXKZYrAg9HT4)~ zvK~oCxg_dD&WtyMChvNykgeQsoh=0ngyAWZcY7o}59o&Yek#jDi4FZbzmx)B>suMT zWvnPV486`$5X5;DkKD?{UeK>ICxVzHpL~%lmfk6&Kk2j3gRu}E7nXKqYMSm>DDTDm zSXq{dZHIvbtvPQLI`UhxWg^Rw()H+u-&73@8G+#{^_W)J2n+p_xV*lbXb&YsNFuK0 zSYnxl-aPziH9EbIqohYm(d(02OD3BdIQy;M=U1(*B^M*v3`FRyQ<>h}sJ-qA|7?rV zMS<;P0x$fss>O~n7frDBVosO>fM;E>H$fRzHYX62%*ha8`{^Uy>*J(iNlo<>qn3wX zl#MD@Rz3WDr4EnRR|l6uf6zRyGMSeRPAJQ*SOR8Q)-%4&I&P@#8hLpA&v?vdA+9-_X z#8&k~Na3V`0pDr;OL9WeX$#&vyQ6_W^{!r{J#4<@&|i=byPKk2UD1d=%x?H{U-IFx z`NRaD4mb@Si)k_lX0GGYubv(OIoE-RW_ifTfyJ}7E?;|Us)grgV-Si3dp^o^A6#>l z^>QDzjC%<9M4BHz%g<(sGd-wOV3fnVeAyU*b3? zBSrZvK|e`SY^-M8@~9z=tXT5|9a{_Lg1HIgLnDbvR>#Jxu>lI}eSR9m^WS%Imb=fR zzi{1nr>1K5kv%{Kx-0brZ$%I)(p%er^O>$J|CBernCd6OIqU^mK(>NHcmp>YnTCSX z+{DZ33iowBHc_|5T-#oUSZHFTjSc8k>^M0NYljLZcSrP99*{y>`8%nThC9ac=(xM_ z56mr3q|Kil0=+Xe$eFwU6OQ2jfzSLoDEse~@BW|a0Gv)E9~LLsNgZwlD7<*w+c}7M zGRSZYX#<8}9!7e|LG5{Gr66R^Q^a?IS?(g3@ad@dgJR5L4-4C!gxc`lOPaG~R~cwk z!mWyL8~b~qT9O|imx|=I%hqj^qbKM*^qA2vO2eN)M6c_D}RZ#?a_4M6)UY z-)ARwxZ?A=3k-6?BoHY8o-%`4o&tDW}T=nunmR^!a#?!}eE=rv@t+ zS4VGl)I=H5Xy0Hf%q?erZw!SzN%RATJa?FyzzhZ@7bQ}&q5dN1U4#Hhs`=;dqA}k$$EJ)hzh43yVN68+<-zkOQLZQZA}WH zXb6#!=o~$vPA?wqdYeI<(R+AF_A*?kT294Jj^SYQiJ|@N_Kl>TE7f&?PKm@L;BNkg z2K#3*#Q$DV3NVTGQDvRQZBZA;{EfE>Zr4Lt2b4714b+1#E{9P~eXVgOJWvKG>1wlo z5Gua{gvxIKqieZ2<3L~pgB6#dh`&=xmH@gm|K>g9KYWQg@85^|e|xR4NDzQuk-z#m z0+Tm9d^|~Ff~q zrRj6_{N1}+fB&shqyPF)cLe@@pntX8|6Zei+tL4JzQ&O-s5_l?r*m}Q zaKoV7EU&`SnU0uhW+9XKOy~0MTz7|!1=pv8besBr1QW0zoV?D6Tt;N{)W&$J<=sX> zW;Pq= zot!d3%m)EU*n5^{$iZoVH-=%fu;DzpF!l;)RwE+&RMC**`MG{Q%6TviVvwPgLXwgp zU4#P=&u7jMGQn)~O6EodBHFL{u=Y1qKLzg!!TJ*9p&9sl zrc4tj`~P}*|JH&apJYjI$@$>d;0sV*Ao_D*Fc6leU0NeZR>*&kNH|I8oqW`!(< z&u5N)q*MJFJuAeQ^*r;U%?IjqI*8ey9OXYvLkmd^#oLI|t@O zk-3D#0zSu{yZOE8&*`iRtV9er*OnX>e~rFzN7;Bo z|IDBkS*95A5xC0EE%{86H&$S#k!Ql=OXhvc8M-!OXMedrLo62aBARhxPUuY8c9oO1;AtVJJx z{BJgtOBYWYsEsDC|Hnq@()XVW_)m;vT&%s%`Qad(-K(?BjI#^fIC3DMxih?^EF1O8 zadv3pn~2G2rLaBFkGf7(NkS}9jsWg}D~wu?RTgQ;==ZHOtM{zV_VKPU?ogjW=B>s{9WO>2g#kFj8aFfXo)Zhi*e0vwC1o4#6jE z6Jx3XZV|0@?>AM6M=m8)m#}jwm)kfjpemz4bv{`9ftR;p-R6l<_8)S3_0;Pu|0t;U zr^y~z1bzvP3yB2A?8s1idEyO&_j;C9{WIwgRUiXVjAysFm7^tF?sRuY>)xw5qokcz znZo_Ek=~qSighM&AX=eB=Ze~a$Vxvp6J?%nllqMIZ)!{pb+y<&GdywZCm1 ze!R9L#UpDjaN4!$T12+lQ2HPH>umrOLvgbLDOZ3fywk#ko@>$R2#1pMfT1JlBPbSi1-h z#CZba5_8tgNUeM0JLh_uC2Dh%SevMd7rL9MKN_NGQEv%7L~+ui@TE3T#f7c0r4$Rg zj>jggxf^)165En7A%CuG{dcub+1#5cyk@F?)o5jz0K%IQ)^LnV_&{7Dz67V4l&`ZW zw(kyRd8ON9#j&9-S;qFE9^89pruXh@Rb*w45~qOHjw&3Q2hoE}R3#`ahGylV{Cri- z6{gLnM;g&Ji?#ACdNN2b8q$F}OX#tjkg8YAYPItaP0E??u@{)GF!~mLNxen0{~1k( z2@|fl4$HE*I96{=yhrKs!8a7m5N@=NOwHNbZTHJag`^4_>Ku!6i-&Sh6$M6jp->bq z*l8Rx0O0@$L-5H#iyIK*0If!k)C{Nc)vNPhy z<&2ZIs6PTL0_=xX^#5Y-&EuhN`~G3AT2Yb^Q`z?GkRy6@lb^*qn-_59Jx9H#Hrv3!ot z{&|0>dv+kyHA*5Wt7S1wLfx_Suu-9qtsAwr<1>GhV+kEE!`fN7!O#5c+NR?kMiX~M zGdd{Qvl-7xyGORA1`;Kq%C>R`q`QYCZ7QR?!embIBypaRF)-ip2-xSl(tsWkTe(Sp zjl2fp*v6HmRc8L1I%(?bN#-h{QdY(fV)$-NK&>3YnmPFSPlu8HAmgA^hWS(4Nzfw& zRO*DJalL6Et3Jth2klGD47ytC%|c%rzj}B4gmnnxq`6GW`9ij5Ia-ygi`B4&oU+?? zISNtnDqOA(+ih8;z8*?1Ku=4XX6X<(NgEUD@k>#xBFjH5d*ip?%z%qau;!N3_z?fC zAWi6n{3f1H`-;_tNB#FP+FYpUt5h>`MGKm(RvQ)uDrC!y6U2)X>>qwif7Zapq;v9o z*ivPp;--y<8a#*5yhcS)uHmpYnBeXBx|xu&t#NGqK$)7CMRgr}i$GzEuG-CmeEnvb zH;aL2kdMPc83k?H}Cwk^UevN6hKg-l)cEC+ZcA zO#hFV$C&yRvYM1Q6Y#Itf)7Z zIhu}+98EPJs~#L2n?0YKJ@skw^nl|KV*Dt6iG3W)^%!-M=1lavjw3ARp$G;02l02k zEXuqp)F}x&o#Oj1>$_af{&Kk$x=D|N?xJ>#-$xzeY}r(B2Yrje9Ln;f7q_eut!syd zi??Fb%I|pZaoWs5S;GVB&r{D@l37Mt5*7S9s3k4(!=s7c1!J!!;P?*|uS6*qvy40J z=^woOq|PK!$#Z&|E@dAIs(!*3XK1z90<7M6^j`mODE41&lf88E`wZLs`(}uGZn8ky zi9Yu$-AS(d3Yv>PJXne~zvy`^F1G5_P6nT%e4x8BD`D{I1nN0Ab89fQ%x+L2V}aAh zezN*exaDG(&+}4)r`Jv#9Xf(oXC`NMVApR6VDBUxtzWu*84bVU;uNpSwF+Zm-&fhb^Ygh59 zN^|YJ*3Dcm$5Pp$0Q}@xD60J}b!Gv0PUafN>e(EJ-cWj|=p6;BHG~L_%cDxJAC=cB zA(hYNC!2~C^ggz?j+eANVsM^w`c>gLi)$bpRDhN_HaZ@oIa67Q+ApBT)4E3V2RhL) zk~_5?3l!fE@9h^u`H$qXozG73LriE%kHcsu%tsoyQH@l8elK}d(neY;OH_%G>XJ9d zE18h1#?a+;|Tnjy7` z07wi=x^p;sQTdSck#>R>;ueWM7`LbNy^c7uv0 zi;r(ECJ`XVeo!@U;%Y@L^B0DL0=Ep~t|u(*yvCoIu!6Zc89#jzbgl+=%)eoCTWvAY zdk2P88YqyWUCTwBOzuk0wTqyT?%!8Ack%V=M9hFbn}ppp<@R>DF{C@lnlzmFPTNO5 zOo$FWN5VF*1aG^SMP{un`zpRBgdaWeTJ?c^LKBCWPLlB*ui)V3GH*$5OJ7@GdG}53 zd;<=3k&ZjI!`s7i#@M7a);3rmp9np8u~a(>zrWTKV{U9>=i$LU7FuY|ljl1tWorBB z(A#DHy~nCts}7v2W)0^w7o-;eN3gOy1;ux+A1O1g%-VF)T(IAMQRw`Ayz*2n=UL4o z%V*5gPW?Fa*5HBsF*Ey>#i|`W#y9AhZv=WS&ElQf_|ooW#in;uTjM+F7u5nqS7QvF z#)~Y2gza*!VASd9C~IwqS0K?NlDtWABXeXwr@3but7U8L8;-H`vppaZvZH%d<|Mz^ zynehPk4E6lj~l10<{EN6%~uTZnCuS|VehjzbZz@f>y|@nOqR#0M>G?<7rP1FLz@KY zmN^CRPHEpy5w8OO0zM=F^Rj)a~8C@wlXr;xJs zj)#6w!xSrUwbwjM`Yt(oyNh45S}Of)A~)IAfn9z@pQm3Sp#o5d?h{nKwq&(76r0yW z3k=m|;Z{#6o%>SM0cj~t$6?{b_M^`-bC~LPedd@+fPIWG!7pM7+CgdOYfAeVXQ&s= zv@@u2>Yk3p_v`vAj)}a=bg31n9Lu_9uoZV?msBBl<}l@$vw@GjpFS?WJtHi8pI~V6 ztEz;Wkx^TiBE*Syf$9$WLz1A0W*zC!u3`S{{o zp}zG<(b~Yu5=6FU1^*Sa9Q^+Izv?^q-}#Q{&ksV%;RPsM4(J0iSf4|(p@k_3(*p}& z>l%h%@SrSVQe_Yn2KR%Cd`s#z;OGdiDt;bE#H{x42t|@T8Yov5v-uCYVS;XJzt4mOYm6>7 zMRf~+xR@FhB+VN;!#W^sv?jViBkE%P21BhC)Swh0!P&s{P~(E#pz0)paZ>s@YG&S% zwzmqXO8OMFAxZ2^EDFDeCi0J2K^CNhw=6+f^lMb^ckSuKG3;k5{?AW>zPx`6m;IY3VzmJEiMTF=Q-F0)F5pQ+Gd} zdJtH(tsj6Y8BTy~ZG-%uT~a9OfL{zn({XDOy$Ph_A!xh(PbQ@}#ukL2{ok6~&_9;P zO3L`h+)(+I=&9%lh6N+f0+tN-0d)@(sJ|@izalUW?i-e8-Bm&(Y!FN*mM(X(rnd+; ze~&Eq?L_q(K}8XkbrlM%-aju6@|P3F6#spNR0!ekr^``EvHHtWC@+6d+3ew>TmY#s4#bDGKedLrnqMDgGD1 z^!F>?_7}$W|53s8KVj^w8wBd;ce&M10f z%9OYdQ8iTz_8!YN+0D8aBB+dT{SWJq<1!NyGm~L_RGT)3oI9>tyT6G!=ORCVY z!!|${JL{d1EKC#KJGn!eezk)FP?4mhKPtQIFY4aPk&&XU0K+|aZbPv+4>xPE`LeLY*q8)iha{5;lAPOPSyqm4b$)QMe5GH4N;;M z60e&sJo9S1Y5y7FK_)7lPx|x>`;wruo;`c7N_a&+uK_+d=9oYIR#4#G7Nul#@%tN%K0IAaC-gL`LlCwjL*M5kdnAV`CZ^w=W6EEYzQzx!ZlkIzM?2my?=Tk}#~+;YSx%K4n0uvg`|cC_45mGF#E`vx zOF^v%VeA@r(2*I5s~gG-(ScKuULyHgDU!=I3l81u3e@+PRl?(%Ml~3#<%!E!-4=in zZi4&-z(#kqdSP2+W)139s*d`eZ_y@snfdb))gth91I{c&lJN-jJG`!Ei#+G*^<3wihkTX$qx&?OcTG3RZU+~PNob_FW6(IT*nuD zp`MGFj@}(wW0*+6?PpaOcCRRW6GccbxH9hCDY`cB-B@|j^0Erm?vWnF=bP0nt;f|y ze%gIo?CG~%QTx1yvIoa_+jb`^&X0}xXZxFOz#Xz89An(fkkRK!GZ8Cpbgjw$OxuEG zw5&~R1S(Xm!b#Dv7No2;mt1e?`DFdT<$!Vva*e=yyxVjjtGI5A|JLY>roi|I`+Un| zG8-~X=wSnsUQY#YM%Fm^?wMPHi#Ko-^}QXn>AbVtwVjQd?9L)?8(khWZ#T(`A1Fm+ zcXGedQSlohTNpvl;+EVFv%6jy<+oDd^Y#FS|Bu0qhjsu}y39DJHT{Y3%cE~gs`=Ov#7+~uiM@5E)avP5&uJ0p(Y6PQ zKHQs2!uq9yPhz(_iwiart1I03i#R4vR!b!cM+^6wk7~Vh965PsKR~dTo2sAlqSbRZG}BnzE&Xa)O$cNVRqv--2PoGvQt{d z;x2V@xby;Wjqs2Yzm@Awn_nS2evc{jlLI)8ryts$02l;%v@<`MycmIFb)x;%7xs%+ z9;@<+*yNakijf@RvTZqi@+B3!f=n-{R~}#9B^>30>vC2q%G(Fa8G8I9bwgd!ej2nH zmi7&{eC}0Aw_CXvTOL`@xHj&{$vO0H-Z5d$QJfFED5FegC773(i;j06oJ$@{*{pb> zA1@^KUVbEiiJ#n#dGtbB=tQ`$deQY|*M!T<+O9^V_}foD%4`p$*#CB+zKw(uo=07P z8}{JG40`Ff97%AWbdleG-KS3Kn`Q$Br07C9AMtjGSbKq z2;YjYLzC&S=8SR$%ig*h`lLsReW1ieDmP=;w9Xq;2CnZ#&=EVf!n?BBVN|+ z-6piY3wdv9ms1|}dN|0`r&NzI9ENH6{(*^oUj}t_^|T7Eq?Tbdy~WcO51cvn`C^&2 zcCi+;SMl zt9^3@?N;TQQaD`x(O?rYD))jf3Gw}+Jqzx7P(l7={5#8=p{D&{91?q+DdGtpCZ?)s_=`_^R5&(mT7LBAA$<99I|jU*?z^J4ISX z=Peixa{AlEJ7OOVHf&i9%Tf(zjCVT6tW|pLM9be|Mm(kUskYO8)3=Jt0&;WznZEY_ ziM~u{%dwVav4$bX@7)?HpZUNSI9!HUKXm-VT(-W6nZY*tnDI5OTnF;pH*h@tz*V0l z<{eXN5$3Sh!TdnC^NRY?+rgRFUNbG&M}|FaIQqg7?Q`f#R#M=+@Ae_br^x#b92d>* z-2RfmcB^gWDQ)j|)L5NZcDNF6maU0$Yqmq*#re6f(KUiUc59zbvqL|liN8mf_>PU1 z=1I85>+vkL?tbZCCAXECXCu?CYws%OZ@N=kF=D?yHbQTkmC1@%N~(e0Tk5p;u30Is z@lY-dixQA&ElRK%X8Q3XE^1~h6Vp35j7BT31DM{|5d}7er9Benl%^gi9{>yTNm$#zzcl$ml5n?=1D@>Sj(vhG5#f;dFQD2wYC>E zFOj1uTzHCO=|#Bj%>ZaOPQ=(=({<&Q?7zM%JTwHY3e!Sg|x-3@*_k132 zqbki7i#D~ZS8IKkp`suAb|hMJ8L(ov6oLwe$!fp(h`L=DIX|zo)CTcjqhU$!Qom_f zjhN`AI1jZLFMP|AGymi5d(TyP9@^fePeU3HEV9D&%;cZd8dcZsxl{i@`r&Ezw&V=# zP7`+KK8RI{zJGj)?Z@{d&-P2LVw_VmXwFUre^a}Z2F=o-La#VasV|dNe{|-K8zEpSD(}fjHcorJ{ z>puKm(fRG~p_Em06b@Hy)1b{aZ}MbycyyTRsGoe0;Ci~~RzmTGYK50yzFa-?>;Py= zI`9EA!>me;u^_+$(TDuc0_>0(BAy-TIIwcjXa&hBg@u;cdkcPjYu4awEuA!Z?!AlV z5sxdA{-s{o6goYX#!pn%Yyy9*g{Hvqt!U7_K>Aj7eUDqm+v~NS@bPC#@N_(ZC$I5epyw)9lBWu)3)Q!inRXn) zXyxIcFDSm%acHM?Osba?Jbn~ba;`EBa=huCU;Qabi~fwV&~$I-y|@eFy9CalrCnV# z4EtehAaW}G9M8NXuI7zXLi47G2gujo13TDcUskm+ z`RS-2<4`*e{@vv;X3mMXgHnaa^U0;$yx*U7-SD$S$u6EBH92qm&Z}GMP+Zi#OR8q$ zDctdyv(;N)mx7p%q%nad#H(-+@ue*3s4SYq zfV5jjQi0TQO|#+#ewB06R?J5-kHs`h2PnS}qxG3~v&HYF2AfQ>ZiRp_Q%f%$(+pcb zzW}G62LNs1n8Wmkjox%TNxKIZRMoct0J}iu<~D!9tbqx=+7!;UTXm-`VdRc;^bHe_ zLGrMv3gV>4^>FT)I53qRGW-n+L1v~OQNS^d*nnE%*&TQYYSv;9OR|80b}~k4S2`y2 z2P5FJJvEMwNu{+HyR)=(T%yH>T%lP0^};Y(KX?H4+XI%r9xxx-QJVK@X$yBB;k}_( zSN}p>+*75dD%LN*yPc=KjbI+deknaPXK`+9CNRzEj^!&teOcJlRJDm>4K{Ys&rgM8 zYsh;78(Lvf_~8NBo?fbB$N94Dc>YZ>71`VXjhO(Z!difr!FPJYEQV;!bxB$lvw=MA zorPFVKwqjszjIg9>l+_NSJpgDr zx->*`>?E9^cn93j(g&KK^{pM+CADT|zkeK&Al}-MhygCi_Z; zzb741OoUoGTsgyfZO&9~3mnOxU!+D8f}Oz5y7j|We!yv3T1Q5tGPHCvmpF<`K*DXF zmtV8~qxpMT9-GK-Aaonu6DGdar%Ik_&O@=!vYAnHx0lCO?v+&}a2gF53piJvmhwJr zIBVupiPTacQC_9?ta=yfg>@9M9u1F{7)UKlV--(C&r@HT_)hz&5_VYGkBAq2Un7$JZ46H(vt@4wRYIid8adFD|70*hL z{(P77CB+*JOQyqyM)I`X{KGVlckPY3m203nKK7nMDM5A*Shs0eITc>dxHOK;4PRc5pL4cw6h<|`tqCRicohnM## zrEBQt>XbfwQS_Xwr1 z_g}62pI%&en-3=5s7EpKe7vo>q2P=>*j$;LHdrT)CEuWR|01GznhjZ9=M95h`9^^D zkMIiG{`v;0ZT@^UuF*{1Xuu(}sy?mNGuHEr;)T)x_R?J`yL#)8QVLN*jFz#n?5{IQ zSE9dELF-t|5hM2~WfaG%g^PyMERlJ?I*8s0TSl=hK&{@CRNPHrITiJ0L3_#nEC7%T zHUU^?&jFgXnUj9^$I@wn_^55sx)hN62nFRpcX=bf~5V%#s&KO z+mk1^uH3DYnN>yr(W_lX4P?CFMF4n4S&$dt=JzM|R%pW21y_;NkTfe_9|bu{evU_F z=Q|Z*!;6bI(>kn+whdMM?Z6|D-#DVoFOFCk{mBO#@_?E|&{UVJ(&5rhqz5an)stwa_0D@;J0~7<;Hnp0(U{lFm zI;HW>Ud|?4Z4Ek{_~OdcmE4z3T8bFoiq8O7Z#j@NI6tjR)Z{@gm;8CARi-wG2LAtU z7e6%37#%v1p&Dx^ZtX}FG$i#w3v>qZM6M{)SE$T$W*l2@qzZ_pqV~e!&D>3pa+7pc zl%l9X(yqOnlWd6fFLn8R7t)0IdecTATxpY+Q?l7Uey?$r^W$CjG)sK&u3mtO&shj3 zE0{t14EiMs`n1Ylsv+DZMC+2CD(VEZZyP55S!fmBHgFDZ>UYaEX9SnTISJV$5iG-- zTvltA7uP)&{o`?>=~M$=7RL0pV(_aUGh$zu?w2p07hn6ywB!#=^uLi^#4oagAjqtL zlU~BVT8niRyqOxXyIydsLS$^ra4T&}`e@0vQ~YI(gPMMx74&NV_#SvN`cD#XzHU05 zcvK=QiFb3O8}_<#wR}Be(PSjI+xzA)uZ)R z7phol0xV*p#siJvq-q8S>RYG9PbL&AkU(%@nn784CAEyG4HbxU{7$aBVM*szu;M1d zN-nN!Z3Hst(p1C1h;EDE{MkzLF`Kz7oWUGxN=IZq7QV~!lG)C%h4} z%>_z*31Ig@fucK2Oo^2pR2Me^8H)pn6@PfSIKTPFqVQW)UQ0yPgAMoYUi&@I9 zLeO=77C12C`n8p67}8`h|B&x2^xjzayQ=MYMf3c30O10=(4zC_q2Y8ijei?P+r~`eC)9rXxFZcd z!j&1tV6pKG$h3b35V?tfX`z86Z~xU#kLPwdiSnjk456O)X~h#Olk{+x=SPPFS0*_6 zQI3z%>ulhnV?l}yS$_*Wdw>!BlL?x)1)9wMeb500I4F-|g9VYG`bJ}kHb&PX^Km}f zn9Me)V*CYwp~qpFy55tb5*9IAd@`)oip&z}}Ei7vg{-Pn26L;mCg z%6L}*t_4ks23pkGBK8d`()RTuH&T??I|q7{KNsPMfhTutQG#3VZwn+NY>dRp z&KhK0`r)yP{kpvk<(xS|5Oc!p&8pu9YF74~CxsFUKywbmcL3f2Kub~k%NU*v``eu7 zf6W>4hxxnxO7B5Up@O$wqnZUliuKGoY}Ju*lYY6ZU&2xIuwqEI_~vsL@d;>+dW(tY zt)9%7XSvUeLq9RO3*g$YB`UR-(uBy$b=8XzrbBFRV=rjh6!Z1%FI$EADk*)lcc!mk zI^l!>XXyc646tC6B|{erWzj}Izon}w&bwzW)TAkxT09ac5~ z-_q{!bMf1Azt5`AYCw``eCYVXMXShso^{gU@n}Ysfv#5~lCBddq0_JABVbZCCE`aTo^kdzRM#7-ivq zIsmjV*g+ehw^C#;h~?lM;? z9qMF^^}uG&0yP6Q0eDqjDE0F#su@w2h4s4tBYul+n{Io-h}RH5v2y$gg$MA_Di$-ZD0)sh{hJ1Q%BUK%irV0t5*?) zGb4mrnXbpKzTBkyU%LGa8N2-x|I9e<{dxFb@)5)Bj$no%6_wQaS3Wg7qSuEJkTEg4 zA^SA8=yRd-LV$YS0BL1$$Hv~03?+)+TkR>eH40GQyVVmliTlC2$uo=i0m|;pWAk{E zw#FP2`a8&R4Fk4cq&f>~l7h{8uARXTKLf#cIE32!*C|HYfW}9*5}W6lW0s{{n#=D9 zIWHIhZT~k0_51x@a6Wp-T~MoKR1sV^*<~l_sYjRd<&@RZ-#&_7*i zCpGy`YHVbUH#2sfTYi*TpIAVy@%vu56F#T*;!w1`bg4g+OYW_*R3Yz^ryT7kW4bsD ztR4w}-xCKxNsGt^)3LaLXWLKWXoWpry+M`c||uSC_o}4kjjqsA3i@> z8r&8y2!bPfavn}7y_|uTk=p{@x`(C5wHq*}7#-he?EOjz_YYR zBtf^TeiLt9t5_NNm0j2Gd}6+hTRf_V?U!)N6D{MtLloCEwYs3Cv|OaZEiIVzJ?O4t z?A5odZP8)ql9fo>F_IcVyQ!4K8IIJRfbH?}O66~Z?(df*)b*5A#J?U^=WYI&^yGE% zGpDA9>&(d;AU&A{`#4g2qPtN$6vDZ#s-@aZxRajg6@K@5qaRB<=897TJ_^!#ao}5WV>}P|1L8jA&|5{3F$*U zjOJ#vZNd&JU3Kr^&(U@)5E!(758p~|G`pUVy|i1+mj97bZqOkY*A_pBI{`pNmuxO^ z`jN4>sIECQwQoqv@y^4lmEo$%-|eaeDe>I=H_uhYp0S@EsVbM*iqal!phNRJ#Z7wU zK3iAzVC*sPUf5K`EWKBc7YZTtgvm0^ds!<*5d&b{?&zV8iMB8r*90oW2mQ=cZNUE= z&5(o*A(G;2KWbe{&9c>d*Wv7@PeUu*)DuWIt`bPA#ANzkqA5~8qwKu>MQN8wI7*5K zY=2q7xklpn%_r8lA2D48d-}3-gOOt!Ms0}1U6Rh+ix>W5sQ~-I61Zx)GNmcu_ofy2)sRxg{v!nbz z+gV)0zk6TZCr!lOPeucG6_2wF3Y7T`>>TxCZ^0i$J%TWMNw@jjAHGzP4KV48D@Clr zzt$$->}s{1I>mQz0dCTBb)Oe}=`83Z5Dl(;VL5tRS$tF+=xesDTLL~6boWxw18eRo z|D@GU_&SH0{(yv=meh*zfRL%L;wizZ%J}+ul|zXh(1vSr?oc9qA982M5I<^*EkQ>Y z=6}z`4oRL8@0aLpV`n^Gq8d)Uvfa@r3ihA+T&E6ut5 zt>?hyPv^Cir{-tICe~QxR~UHC`YoBa>?oo>uMr{JB%nO@#81R8qDm@g(NM6bn8R>7 zmnmsx2j9*JenUjlLJg@CTe6GEw6E~31M@$bC>&T2-3%39@50J*GOlxD=|PSFneLs& z=H+PmfDfjhw$q5&I^98IT?>6!TSLUQLUz-*NnaU)K zEazoAk!GwmU3CRZlm45pYjxi;vK>$>PiogeSOn|(nx4pzLqPbTd}CfC#vd#C(v%-A ze>o+&`SMJNYR`()S4{$``N8qayX&JcL8x)`9>_kqtFT#{e$@H_#B%P)`Rr2Vi5iR0p@MBd{)1ktoebKutdIF-nFAr8u~EEq(%|5z7+I@e5dcNz?J-3vo@&2ihybPM^IG zX^%1OiuGn9E3hsJlJy!;$JSxN*iys5#ad{ZXpH@Oq^WwP{7HM4&Qi6rwgOxd`d(*p z9^4Xhf#%ncvY%e;X8VD>jqS^qbPtgjKDd#iulsnh$KTr9D1iUam2Rd(RCZIGG(g{p z_UA4p-M)ZotfnG9L|$Jru@XtF=&7#%Y!vC0& zP+5{?)Gh&f)Wv;9l?u~d(fJvlx&d%iGWIG^vq!~zWVhm;!zSH>fC-~iT+2)epw1vj z+D*gA^5iGUGa<-p9v7E5Z$~WcsC1AHf1ErQD;uSL=7>Arj*$;0h$de+W@N`bm}ukY zp7lkzIGMON!PHhwk?5iO*>zUM-(B@?MBj(P8c2WTw)_0uSLget-V6;>mY2pxywM!$ zJGW@AREvUHUH@w*#ot^+yM8F|5NWNE|ai4u@iFICwwd2t$*PVcnxoOTv z9Rc4To!%4So5JZv7tcs@>8~}htV>lES@5t+Sj{3Ta(_;2pbHl zuK*z-yU8Mq2@5*p)F^7Mr(P*AnE}cc%ge@+)A~qgqWlt!2;(-jg|jnwhCtVVl_M*c z()abm_ujm2c^CUQZl!{G3KrxChwn!!QrpNCpCEh^^?Q&$X-)Ux$4uo~gA2Z2FQ1|u zJirq2;ry81*?pekma#|uX7r-pPv|HLB=_P<*7|Y5GBSC=vHHTIs#;KAQI)xjV$Dv+ zdf+nY?PFl`!JYRpoSX8uuv3XF|x;Y*?$&ESk?${< zmHIN*lcbSwX8ib}7DMUVp5|Z5k z@6=iwhkGnFH&l}6Gc&rik6L$#jLufr*5LhEA)E?@QNnUFiKW?w2?{kLIjTyv6(w4i z#*%;RP>O!pwPz66P1uJ5bb+N;Fykr?zKqf_zr#u=qH}58W!DuXI@C0$VS@fb^$`4GOf8y}W3=acm z&JXaeu6i*yp;!pDeMn~S+;)T{jpS`(Y<2y<6v?9zEydJiHC6NplBG{dZ1a=91Dt7( z;Q($xv)A6DxTG9g7scc2H7JsCHRI5S0I}!?;>*M9GK}tolpNJ|%)l}d$D*WL5SAUD z(=8}h*lvEzL-QSO!zbK)YX|Vu&o^4I8)JQd!-SE(6-Z2`mdCiu#Q zpG>Ut(TQYBhRQ`y+erjkhLtA(mD8rhjsStWRs2h9hM&dZ(Es!+ZFbr{1}c4udV{89 zm=B{&1XGhHE>b{Its zfLV5f%xqv#7GLm@vVBu`cdWW>#JkU7KJp!Wp3nn_kWY?e{$%pJBU)cDAG@_Q?}tCu zn{}e-RBrEGjW28tQhL6|E${I+%jdgQ=!yQafW2}vc*?7Yh+Fv^okH98;yR|NjSE^# zdedjxhfkgVG8yn`B4XFNpBH>_iDUXDbphYZRyl?s~1cDz`}#DjLziaq0zu5Ga0N?KWx6%nPT zO*wYSDXwLo2lnRY3O;g;7ZbSt_6S?^OCAruE0+zL?r4Z>8cX)py>ZlJ!1yO^8|#_n z(>2_M`nO(X343*xxn=N_ovh=3htONURE-aUtQsr9fQE>}Z%g3#%{&{UwS*VAsgm5e_F;^y_?!F6MAI=bx+e!cE;}YW#5e&^BMa7(K97i!jn1zy~397G~RC z!dO%09G>V}AzoYh=3FXiWtn2;-y7iyw;ME?b$KX!BEJwQ2^GOIEcJ%Ld7`fJ2S&%X zhs+s%-Fj3Yumm+6cXk6AnlXVX!!OH!!?i$m0Wp>X?4$?AsC5^^;gw(Rz~$BGO$Gnp zwdm7;pKY=OG_W;R4dB8D`aR|intpmC%edN*<_n`E?*lPCQy@mYjoRtdHrzt@`~Xt* zT#7v1H<%F{l=&`fQaK%=_A7>SxArWM1ztd|GUpHXlGz9oQc5M z;mb_6g+ejUof_B#_6QVTcsChvu>Fh`?koN<)?OTag1U+)bT>!u{y`Ns!d(m>b!|#7 zGv(ElR2^>FTim>4d-28Cq^jqs6gTE%3TzlVewCi-zn22Zzp+HnU!zCi>YJ_)I-+&v^_)F=o7R4!X*QR!Nb8r^_?s= zLT{(6M>@P&4xE~*EW}YF=t)q1nh`adh+(JNuTx8xTnQ|g@i*R5*&-de=0Ea_9Q9Q# zPWE&JIQiaXPGxjr%P?U|`~=592rE>aCNn@2Bw#pfxi38hKeRj@-R=E+%GD=B(?dH( z!e2gbsgbz0H$XNjqXj~`OP5s}M5^S^Lib_^>rZc^?!l=S&BEcaxRZWU<@7N$yc zxH}(_8kfzDSiN`bKKdETrh7Pp$V9z1t{s^G+KXYI*0YV;CI=`ePgZP4m3U^DhDh#` zI&_U|XVpz$a#tUp2foO#6%uSNMt&4Djtzz%W}Kz^HY*HFW3muq(rS#JKF+&-EBn4a zMq{Hm1>WAD^qgVZvj_`?m$h`IPK?SEv_Ez4g(L5h^{2y`Lzdr_RKI9!ZPTb4wn#wa zd^TGX7mZ}Pa^wmZCK6^XJ$y;}8M>rA?*p7_sW(Won1F@z3G}YR56yjzHS5mN7Kta= zY95Hae16O-bBW2yMs6Jy;=h~bO|>T5;U-X@LX@QQ$L8yg_bJH`VXX7om)<;vXx?M{ zVcUB%yF=wW`xn=MG`f=KCv4Ho(w?_SnQyh42&t!;G+od<+RwFh+d95^)ohOQ()O3v zGo=YuK!mh@<@h9ci)-QKu1Ri8Gm70DX+h6K+LALGe6j-zpD@^Mg*I>YZM^PWd6@{2 zNZ1^l%zrXYa*la+U}NWo5k0R~li+y5%XGZ0api?ccT`!b+9wmw&?Tn+CE{wq4~c7z zQc}iu?(M3EC#8P3xTDU;i(z!4%UE0dv?mh7Yf z-hlIxFo(K5x>P-aBbP1T;lT~olT=KzVcM}$QUt3*MRN4E1~j9s*VxQ!DjHhA~- z{VsJ!eLS0H9cVhFg;sb8x(Oa+;_6j#b+C7b$oiuGtQ~%Y|9<$r=kc}S#^n;+GoA^b zo6}Y9?ql*#qj8h*VT^NBA;>YWzD7gn;ep$|H-z3eyYE{qet$^oXiG`0ZqvDPou^zg zVV~%ku*sNt{Kg>AE7h{RQHyFM8zSlx{<)|5F>0#co{mc;-oBZ0Cvm#?Yw5Oe_N!0$ ztKV$K=3B|`R^Q{XOKu)g8cd5-Hf2-~qh{CN9$UTO*Rp+xz!0J;z(QcGNVV}KF1;Ou zR#l0bbG*91jj1Av@g;O)jTQgHweNbX%81nu68s@Asc^O*$)o>LAZHw5KYOs(B12w_!V!l5$)@chU!EkF-m%K6U*^ z*9v_8Gs$J~g^^GHyg~kSPD|^dc4I|5yrEV6`g*?!MS5t2p@s&gB`yd9%n_c?8yG8k zY82A-KOC&1P1qHb@HRxA_y&aPtQnUXw|9hobDt{whx?Q+i~0IVjq(BmbPpNRpGL^9hGc(T(_9@MvV6`fCCA9A;r^b$s4~hZ7jGvQUs<_ONr8dBNm_V17iB#u2v*N^gew=dCFt>6pc@+En46OqnI?m?qe&v@LpL z255}$66$Z#(_tVf07BW1qMshv0FIhT^FG)@7jS~9NFsxGuA|zudtr-5fUR{UZ~aI& zy*E1pmgIGVp=JapXLN{9p@uE>sC@(h60VswNOeJ{&rKXJ+ib#{%ir#6ncDI!UTWmt zh1C25pD$k)@YDL+vCp`P`CiG9LkXdDADx2de~agk-%qifPxge94E58uBFPmKFLHJ? zsh6Y!AqQw&81)a-7YMx%KjstmkKwgnh@MoF$HpoE$c(?7C|Q=jnz*-x|~0lZRImKKo)blm($u~ve{4QSk38a*5(S$ z>(*6QrA%pvEw`I(*jKn*!4AN7vbPMsqYt1aL+2nH1ZS|jnsDB)YOjDIe(;~Xhu=u8 z15kL*mS^|~0R`N+^ipa#*yVi{pxF)3_*Bb4U(VXHqtif~$3rg8X(7)Ot$3}MF0Pbu`iE@vTgrX zM6^hQfPB7`LSGA&9-Dr6foQubvmA?uVi>)3b3E=!bcvK#wu#yXgp-qZa& z_jCX5`}e-j=l!G4XReQl>pHLFJkDeJe!oYM(QFF_;61TH*ku`Kax)?lH`|gAV8GOC zAZGnEirZk@lKV}wpHO2oi;%--kWr5oM8AdBo**)`Fb6>nfd)tiQNGofDB2kED!@r$ zMf&GUorDoFpJ*x7*3KNx2(^LxYIbYKQKpRdufI}HgX{3*JRDbiiQY6<@%^{b$>=4Bm>2*23)iwqc(xV=Iv|t7?ud z3x#*^2I>l-LYt8EP_hPJIQr`* zXx3qtpcI~{JvZ1V(*VxDu03F|M&v4{pL&NZ)9kQ&UX^@oBb?CPGQKOM|C{F5WgLJx z+7gidmXbxF6p{O+WzQ$~`&KRL1y0;-Fe6sc>y&hI1pON~J}-KW38O0VOOWZ6yhiW+ zyTM0BI zq2b&<zyD=M;mYbViYPfFd zhDMOVK@j1H$L!OCV%y=W_6aHnvQ3UKPI)ha-8^J5gZPYu9I#j*hsfZMMifoL_8oym zJLM@?>ITL6B>FX@StxmWkWa$f(>$+VCV7`bNp~cXySuP9Y(p)xY{Nj3_?zH!rcR#W z6yCbQTKZ58l|tX(9GnzzClATB*&QFn`O zMQfFSvh85}ke6Lwu7pz`S?MaOu5|NsRb`uBp&Q{8FRkY?T& z1-t$L>>tJY$_CQms?oxRmwORM?nY$_=~GXw1;RG@7a=?AV8R=Uq*{9g`6K2la{L>X zlm*=LKI=I$IuNAwdkz%jH|RVFM)=Vn%Z9%vqVc$3P;>@>z#yk{w9aX+wtK4<_a`*Y z204YR^%#7Tu+C^_#AIdv_rEcaZh%U7!l8>d9Cg7u`WZdE>xP;9O?H{Y+96mHzX8IAEw~0fx#njBN1cK^rk5G9t}E4cob$U+oxv zKke+B<_f9)79&JyAne@|tO!L8L4X~Gv4hO^Z(>8R(>gfl@;Tsxk}W}37R2j8P8JR@ zFt#wo+~$hQi7XX?t1h29Ue+5bNx)P{g#(lOjc02TQ=gg4F2*td@)bIe_VKg$qLjRjd38L;}9o*AZ?L@nI&ji60{+otVyW(K7xx=T8{IsceI;B((Shw zJ)tRl7d)L!^V%|dUz*HDob!bnO@geRBfX4Aj2L^2KPrXZx4lnXHaP6MxtneI5if)3 z2yIUg7W~(xcQ$Bv#u^+`sL}B@*M&5^S}^@cB9H5fA?x0B+F6%ZLHB1uHhV|32Ni_M z!0s|5q)@d4{6IfoXdlF>am~QzDF8kXqq`r49CQGkox`hplRW@T{mwuM_T=b7v|;`z z?{_h^dse|d`#HJ-p;2yXCIKmdA)5JxMvGNYiTDswm0+vY*7aYeBfaoa!-ye`Ee(hU z`M(reW16$Ive$gaE*2H~Y6Z*`I)@5|RaKpTmGwCS;az*WB%gRj`sXbw&p_Fws|hw6 zTObc&n}1DzHuw3>`$2DO$9ZSTV&7l6p6#Cr`uaI8ByYvDvzvwFqDFk_p7ykYb5jIqd4F zb1LD*)&sT88J>}VcOq&BBV7Zo)<1P$0B}1j0d>v-3S=zQc#nhtl0~A-Tg4(JJozL$ zrzz*BH_iN&xbMT+i|#RX$la^@$Y1O*3WVDeJk{%3-&%1JbNZ4VSuKgWqlqmnJ9{_W zE#_@uU0UbY8%O$%PbC-)Vh&2LE8>-JeA!RM72P4z1`#C-xSHMj_|n)n?W~ib*Ecbw zZRFl=1#rOK<}&`Z*Wq#mbjT*s&y-rkmf`O_k>1joQV^!VcDFF|!C1S>D(~f*)597> zk+A4781vHcj@Pf=M6(u#^;FPtClKty2M}!Yh>Jv>*eQF*IvHH$YOvTNq;!meHccC^ z&G4@XBe@}++wf@KYq`SO8kz}l_Cw=--zZhWlffs1vrlM7Uyh3lX1(vh7bD(_aS+dF zLIqmVlQh1lPwGMz?b3c}y^1dQrZ*!h;jQ8mVuE7QkgYphmRD_t?+|5boAQXC?^O+K z-Pgd#iBV!YHsJN|BBULItxF{gs;voAI;4QqYd%vbTHCYz-kp&-9PZ|;L07Hqm_MGn z!S~9I?tOi(9qO32^B1h|OZdgiw$zRnFUH!6D}w2NWyH+97!0YBZA}9)&oiSLSXI>C zo*Pg69Bqx$%6=~$FeEPK!V`MzFtd9YZDG*R8h3jVmTBZ z)o;ua=I(SMa1#w;fHoC^AH`=1WHvnI$fF09E&2csUozK3u9kZ5MVD}qT&K35wBO!z zBJOLk_C;vB3%uXNoEmZ6Z|Ic{$Qo|6&ukSg)O{-XI@$hFMDukyrKK{_IzwM^iD_Gz zrM)Bu*4LM5S;G216Hi4Mu{m?3xEK|VP)-81gq7~Ou!CV7UeMqF@A$GlF?h*k>m ztkIpd_Cq$xlZ5~;z_Xi5KY}E-L-C0SHn6VBb0F<~2_?-Sbg84DN(^PPb$=T_0rPc1 zT>Bx_eS$RGXImy_ru&GkIXzLhz`P-n7bRu&lv5{Ee$QwDxjedWNS3Fh;RkpL-03AR zs1PmPl0?hq<(>GjVR&KF+RRB$*{zfarT4>q)Msy@gf1)-NM<3xNZyq}$Zx8c8Q`J> z7q8Vk2Sq76zSE=0twI`#kxw5?GbhHM$9?{(eOn|f&G3z3BoomL1H_d*XGV647It{- zJ28?z4g&Wq)r4CQ5yUkxw#mMcBcHxEj|u<(tPI8sLG16T|1hv9gGiP%5Kx;qWjD?JANW4a_nW1p3F9 zK!NJr4wQ86HDJC7ZNR9E!~2dTYh#kzgswM2v4W^mGxhyGb#HLXtjZ>DCE>fHWd3eU zUQ<~3BOS!5*bw&F^J&z>?Ja0*l0*jiFzIPJYzRy$W(^9GiMu8_`H$rUdoI*&8s6;|9s&)X9ZvD zIz6!cE_B(|<)5aK@{h+dYJ!%HuM|y;`%+Jkdr?_d{Xsp{4!aI{^hvcLy&16Kkj;NVuP7w%(cC{k$o-GRz4~L{j9va=pEhKcG;6z zj+d&C8m$N=zk|h8E&*ffok>Pu4&gqUe4-qP{C@P?b|l?zJdt6}GVk#6=s~o<`arLP zr*5gww(Bm&&hA81v0?Poi+jl}@8udkGtukj=P|TEn5sFZYE-!Oo1!DS9UYj%&$TLs zKUQik`0$h=!tUqSED7xlr~i+KF2{X?6psUzI&S+_M0X=qq6GJ>7U#5|xjfzTf@gtu zy@`lYFC|@8OQ_?9N60b7gJkk+ikvNiN34xZ*0Vi1p^qFojX= z3x*f+-WG+q3Y56doawti$YhY-AOFZwE|YwOWRtyUrWtrqo3Yhi&kKgw3pL>&t98}#-*A1D3 z(xd#qXujXWa1 zej#JTAzA#C}kwC+6i+$|jG=J*!@i<|dA4 zo%$79*KnPC36$mkaeq;(lJ_Q)=D+qQ7(^;6{KGZ;Z;v-CI&8L`2VvY4Wg}k;!UJ*g zlFTg!MwlYv!jEOTN{h&`;SrT|vv*e|pB<))I3=F#ebTNDIgtP$i8Wm*geiP$9Yw1S zAy-pf&Jm_Dxv)$gJ%tu`Yb+UVH~&i8(KU{3)dZ^vbLG%&j~P zc9!nJnNrgpDEH)-5;*pry^Ty8eD3}<50!B-bfAmh_l&S{)`dDQIRTXE&*Z?A14$%W zWlsUE%8%WS)^vr^;%lcR&cANsm15pL39{lF`>=f?gk)H9=1_!_$f@J;fbMp1tsFeY zKs+O1{+vOxFS#ssCEY`}Xhhgiy|JZ5sop}@0I(CP6}F>DX~YhEVv;W~90bRA;*~A- zY733WHCoVoJ}s_*&~=&Jv`?>lt;;d(s9(%e)+<8zeNmQqWfqbs)L&A>dQybEH;_{q z{3_(KN5vPj^1Ck_1qStJ`8*eCC==9rK)-_60h%cqV2697rRWTmj`CC6zYT$IYyH<=AgQPv3m>`f`-@+gTxxpnjD{p+?7j z^w5Vw@$W|N(aHNYL1}H2MSJ0gGM6enF#%HkmXnQ|(fHHrvz$8)uE_$9S<5fN6d;<* z*}FZY_$kz|``k93WRy#a$B+BiEAtJ_a}ejxDsrq2d5)%>!%Az+?3aHH}@ANkgrHS(1zZ5-_G>P(6du>uY-AHo^WL$Sslo3X(R@VK4p%D z(zw>ZiK6h$_;U-#4RIW*=S~|ja~UX3uflQ>%%h~G?OBK6u152CeD{amNq5)N&oBMN z7V}ZRd-8RfWJq}Cpk-y+IXWS$b8$3O-+7E2Nh1@k(NE=`uS7MkTy9k*!U7}dsR>oa2{{ckoY-CR>bQ$EdVe! zG1Iw1^?w$_&MZZrH2^tJ9-=jM*rF?3%kvdI{41NkxH9w2?{C@21ZrQB0;NJqGzda_ ztyU1QR?;K$j7e$fgp!ibFMRpq5Lk@^;z6w$oFgI-zH8fgfV}f|HDiIX@t(15l_CyXWyzN zo=mYxH&YB^`tGLqmKY21xzSUY`D|Gta;Vgfpg?8PA_i^yr(@H zBKD~CknU<{=Eh4!CW=2Lp;iEHPl`#i<3OH8aCH!d7Q1F;mvd)ov>IJlFNj-)g|3bH`k^_I=xtnV&xAh9z1-d zUh`47 z>ARiR->$m6yfJMgmV&HTb>>D4<2U6Y4NAxd{nX25wq>R#Yq|Ax2EwzeA0s@;UHyWA!+1uIK-u;T84p~>f3k-+aO7`iHB|ccuZ-f&+u2( zGl8>m;Zg5Yj*g6Y^qXK`LV4<^6vlld$VZ7zmdcd{XItJSOARD+c%((#EHAGxOF5CL zk}A&rr0S^hOF`OAoj}qX$Z&kUz=T0e^Dz3P9m^8O*d-)=Gye9eM2%|oGr7kGS8do& zWwjf(cbyRsULvxS`GcwkdD9mAsXi zXoh<|g0GC(US5AryU7zsQmBWP)_%z)t=%B_g&^2S@eM4bxjPm8aBijPQhkk_a|+1f zHiwzpuf}fmT%h!PGkR-;_}-9>0R5~I-R5yrEkBVP6F}v(d~SNgwbLUPq3{B1wVkR)Hcx>jF2)ygxkj zt3P4O(L3!x_!$n{o50TDsM=tF9-5n4irL))Ra9i1V)k}H5y@o#4hhqSc*F7e5# zY?jP*M|^VJ}((Aj9r*&)@}7H8K3fwL27+EhX2j@m8#lp^UzX zg`I7r**D^y^eb;L=hzqq_5wF0%v%>9cov0h zm2OkG#$#t8`Y9`J4!LDzyGg%kfO6L{LJ;*mc-;t437iy)Cn8&5AC#{UY76lANzO-} z`nr_`CUHYXER=MCpx5WbZ;VDL;J9W~!T68fgYh5gksLd$)Lj?>lL!!+Mr6RJXv6O7 zgWP?fA7~z6*BwXspuCK(GxTgR~{8> zltp$)z_k8p0jSH@h#8cFIW3n(SO3trR^OZa^X2>P4gLEg&9;)l6sBRtkUD-he;~pv zKJz|j*lod)^;22m`sX7pt6)qd9~Ng6Bb$LfXmYdp-C!Wfe8yB}FNk*{ae`OdrPmME z+>_lbbZ-=5-{AnB=iB?d3+IfmZvN`w`3*zNHp&mKbxn=<(O_xyl)1%?BC3l zc;Y4t^_uIFyKmFB(hcZ(z31&0ZeqyS|C*+-F2TcRY<}yjyh}+*O(m>IF#*XUEaG-= z+ojBGIBlN`T?`GRvXWnsM5dU143mUQdtSnI(`y1@+SZZ_v=Tz}RtCrG?B`F|!#POr z3GsOK1|$Pnrkr@|Y%rDMr?68@b?CtJ=(oiIPp)OPo2arRu!i4ry9YS@09g)(+vrV$ zIx=``V5EsTY$*`*(_p(GXtf2!f!u7WCvtk6zzkB}|I3bq^D0SAL$_OiaaX6b`~7<} zl#lzfs41SyV!KUutL6;z?(^FI+>^ovwk_zwk|7v}%e=j6Ae(7H#&Yi4XbM$*^eyiC zm|3&)%i-i$_FGEgd50L#_v~VbE}>+R31g8}xSqRkO6A+o%Yxk%A7Ap$Fy!Y#0W9NN zO*#^nhy~3y0;piulHL75$hwtx0=re+$(YJ9{A z`L``uCVokj0_1JSDDUmgbamyw|b z{)dN|+tzUKK^Ga-59|7#jR_HvS_-n)(!=h*V3%Q^zrDP2^|QvAe8H29-AdUD#?V2RQ{ubu*6! zWeZyToMGzl4SxB^mxiE}J&XG536_Kj$i=BOIv1 zKm8TD{<9{#Z2d8H@<4f=ZSJvb?)~hmmhId24#U%Kq)@>JS?%b;ns~GA-BWOPqI$h9 z3Uahsi-6K6dMrrS@|9|tIEbuRnml?CE%)Z@Lh)Qjs}d^L33*(ZpL~||gs{{IJGoqa z-(i9<7Gsmv>MT-~8p)q($NBbwc9!wAJLj3|oJ&JozFoS_{p%{K)9XrutcmK?iws|w zQju3%DX(A~O8`O+RDh>?|O@ zE!Dj~-GmEawzk`<*@G2q&$wO71X-1D$k&kt`RKy@QB%zy-?;)+K|R%1CG3@_&_Fgm zk)NenZ%U;-2Z_H}xp;_rW{bWip&PO#e-bOQVlV0duuQGzwlq(ciBJKZQeqs5|AJsa z{&0h?bz;f4T}4-fJP6S3NRnzcoOeLc=30oBL88_cX{7=J-zcy(VOiK)c?%dfA`H>-;|+bfr>Z< zSLY@hejwJNDk?lJ2yv{CT8;vNNlJ*5KU4Di?R}m=26DCf3d{xfW5y(+<)`xl``j{M zxJaQ+LgPP^N3hheX`U$R-Up!R1nDFFM`P$!a)EwFp zWk~YFU$0}?)>ieFpmdwo{l|>!cM10@8Ox7#MC`#l5W$OZB=O0_ z9IE=*q$sV0D|-1A$xpMk$ErMZOx@LRkHPC(R$Flta~LySsGScbsbtL?RI1FFaWasrCu|IfGr z_4fy})cflc*354%hX?c_L0yswievlN!AtFf{*do9A0cUbka~M zoc=P@c>K`Bz?Q4b@+csZ76Qti19sB`SB)FuUA}jqGmW7#&uQJ%wyDBq9eIf7596q+vq7%LcLd zHfw&kl#^St?~cA0OQh#L5fH_B+r$Oypr4BjjqClQM$n$+JcaDF-=|7VLFkNbk&dJ+ zRYKlXbLX}>Ome^7Y2vWH*DvzywF~^gH{Pq^KEd|q9(b$2(Up=3t-VBI#Ycx6NU}6> zziqwI8{{ADF?h>%ok!)uOk%!-4;@a7Fg4X$KOR&xVhWcbzKqA14`swk)l?Pg1SGr^ z)6BeRo?vx_t))Dk2fxB5)iKsJ?%8F~^l6Uyd1kgtQQ52~Skr$!=H)*hGYf|J5f>eA zBeJ#j-htQq$S5zRkhbIv{i~B6I2m&k3OcO~BA}&l0Mj$gkvTJ&W#}WrIapk|g-B^6 zN?%Sh?tN`&f2{_Vxjh@}E+rz5yZ1mW=+l!=SLB3d1z%X#N|?tqLCu)Sr zwoCXs82Enxnt;zr9`Z*d=;XfxXdIw(C2H?T{gB;FK2Zkf1k-{TK*ckAjXW5A*Ztu) zjcEbw;A5A*Wn?{-pOkx0JFDz&G}|RV!h2}sNLCTEuL`qo?E3UI6dpw@+pV|X>E=G- zj{EzsU9Yigu{=YV%}jkBx1DYG}8leHx+Of3G zCjmnhBQZBhkV4JWYfiNt2icrj=d)MuXJ0a0u8ya2l!ES+ioqO}ei%Hu6Renn9I1ub zqvQkJ)TgBmbhEie?eCg>=l1*8D#ZVLan~yx6t6&= ze<9@C2ty=N&k6ow*)8T3114eavQK1`M4Nz%Lg34tyy$8>6H2l&ovVv^WTX5Vimdb` zOZ7sanMujf58>Zluh(*BmGOGpHDok%E>x++AwU^0c2YHYpNy$1e8l1 zI#g}g-S2-zXO zC*Hyxsvk>w?}CR0@?0Xs1XkM#9;U8kl*8<4(b_l$vUG!H4ib9_@Z>Aklqo95IdL)Z3DV#G_R=!c@}XHvEX z7=|kGb!f5DerU~mwaY4ri=*Fy5#pz3Tr1e}HM7#TI~TqCsrEAXEGy&=-B9BF$D1+k zRhA#MSlUzb`AV`}th3#*u{poyd^|9xE+NW1(n|=G+m--&ofi4)ttjPCHb!Lft{N{Z zlSE+A76=P4*Cx;ZkIQAqX}fJp_8%Cy>erkQ`$sHsdZ&)h80G!ALr0aG%-+A_db>~l zdF{og50CQd@||*{!^kpYxGggbCk6e}Av&XOC~t&}bZ} z4S;-c54*AA336%XW0V(};0VJ3*GaPm{6I>z0H}EDvLGj_4|GG3jq0wtRIelu+0_Go zHo4R(sg1lvl#XXF!jGy9KgdtW4_)jsNa7HDTZ&V8FcEy7=Y*Z3HqrUP@`5OqN~0|F zH8D|Y2Q;UZV(MX>iV9PSw%QCjg+Q7irOYl_G`HGEEcaNxhXG2|Z3VW?f!x#McOJoYOqQpoUF}WH%y>3Hsa5~}*fq&1 zTQefP*xun6URlN*&Q2mL3Mx9Ly0(i=th`}9Wivjw-Eac;cH+b7Rpv@%H@#xC>5eYx z`5GhWpR+c>K)D|1wGO7lHBw&vXGL58$Lg;BuePp0oFH$>G?j z$*$ho!#f=un+)mNZl!i7=b(f*R{?5c0kDQxyCTs^o5|NKpC_WoUDXUM-7Nkg9CVmj z&jK)n1BW&||MNTG(*N%NmvlNN9Qgw614J-ExT*BBmW6cUk)Jy8GWHR35XLI@lAY&a z`i(p~TVu%(q2$YbZS_h;Fbe=&O@wknCla7VrA34?%N{B#iC3F29^BK>^~2fDgg?+# z>YTaO%ex^LX%zk0+RoDpV5DZEkwuslfyq(zNldV!WAfmz9YT%-&2ZH-a29gAHAKll znAa7G9XtQQVGIF_=g!_r&mJ8$`!&?z;c}^&?r2XN@A<3AhdZ10Xc#3Akz|N6b+t!DT4@|P;c&jH+C-a2FZocc$cW|BjpLzD-VX*#6=H2zFxS7EH3J}yb~?`l)sJd z{JSk&0D>0L3Gza5n{ke_{Zf84B_3wTQ_tn2#^R-BQgw)u;<`_TMHDp`J4#jRo^T6| z)$)+#2*SSxU>Auge& z#1~90QvF(>Y;dv3L|*4j#@PjT8~>3zgASvGE3#^h(y|P}lqXqaUSbVyrQkE%)^~sv zuf*G1GDXE8~J3j z+6^1eqlT{h)a5`f4W7+IyH6)Ye??BB0&0&fRa!;ME4X6rkZSVCH-0#c8X}=u{Z9LB zj9PXnjFj;qUcNUsRr~VZh+V@z9Y^tq0nH|}8_4%q=34G09$vz-yjtu@ANOZ{q8S{~ zWz2IH)hN2RUmm~tYAj8u=r*O8SCJwo6qq@oUZT#!oQk)3KJE1`^mLwf?oF{ry-|I5 zG^$+q4sDI|c9|h=;HTwadt|-5aJUvLgStAGq2Ybi!zXXjc?;6w(P!buNFh07Ey|~A ztw+|h#$<6b{G^PGSoRI;p5zi;5sl8ls5`q7@47V`De=nOh+YUCRnRrN&J^Y6bPnqq zw-)5Qb_T_I`7>g4W-;|UTjAj9aXt;rY>8_l^!3mym>*e!K*;6qMN*Hnf68hJ(tc;9 znEs}zgoa_Dq>`ymwOnK|qHYtTffco`Cl3>T9&c&o*<<0|y)I8%32jwT(2R^v} z#Ae4mW4VfWwX0dH(@8z->edyypLao9Zc+*%KM9OeZ<$#1Z))A4=^J)-BMZ z$fqY@+x{eNNDDSlS(Ye_kS8YS+nT=Ej#)X9=q+)*c$n$^enIi+EVX|8D>b_X*e{n! zTp;x_{A!=1gFlCe+a!|CPR5d_?;5Btnn4E%Pa<@5U^c(ZNP(x_+LTfIe2eVv=;aPV zyT=u_21J`S`Lek@%s zsbvN=N?js^^8P>}s@DfO4kN+lZ5knKXut;3H)>Qq^P5IL3;FBGeGu&VVJfomXeMN4 zKXO?t6S<&;`}rES56CyC$+2rh_5C>TiK3HUt4v8pK6l`P%VSGO7KEUNm@xt3DmJ6d z<=FzG-3jG!r||e7zi(Xn=T4<&1s`rb56RD#F$je&FD_1*Nxl=`Np*rs>`-Z zJ)3$_PxJMx1?od%*QvOVQ=L^1CS<)V`Szs488}ma36jHBMm{}5G_V9J?~!S0EqtH; z;W5^CToXp)CA%P8r~3i0(+t^wk};fxoIhnW@98|dAZ&lDD$yiMi$m|)oWLY> z_qeMTwMB?|wtwd~Ty#?O&ua=}_G;jM5(mI4TITFqp8ZYp#xjSLyie5}55+Fkps4qd$$ziBL%k((+5{ct1;@>@ej$GUnOkzt>r3sh0!kk7|aJJ=(|D?%ECGG8fX z4lo4lgj8)nhDlR5OZ4?a5%`ZYPt+cT$tiz+>emg;X9!g0Bc<<5e(!F!8(f8}G6$R% z`eqdae_JH{$dWTk|^tnHDP(%}nPbst;e%B!n^FV`SUi*k=JR_6BMH zH_emp-5XMzZoRHo3-tO;*8u3!TjGnXBDe9^#axU9SRBL$BW~g`>aFia)Cy5*Fz6Ns zlfbET=^0osbEDrhY9g0O0Qd_6$Y=2>||+5N_YV*g% zplmKj_UXzsZ^$|?5X)$k$E@dRR(2m;y8i1Dhzj&1$IH$}A4XC9>404PY>7XVpRGX3* z`<(;^K9}}imN7c}v^8dSLZfA5xt{Y-R9|>iBnT;sGgkC$2+^}RJhpLEsoI1HwbbK@ zNgm4lXcl{uQE8wtN^i&8`Jwo*zK8+zDQ2+Dlw{|U4L2uMAf(0vl||{b?=R;^C~#U` zq;?!?k*zZlT7RS>(4L}<84%dA(PDYZa86Ab`F(w@KM7qH*QjZ*oZO!RfzAT9en0w| z;b~->4uOO6ArHEFGk39GclXK|fIR1ibW{DMx*NyWuV-wKV|`Vr05&qWE3uMAa!?0m zm(qgWzCMbazXE$>)aVW+hww=KrYZ0Rnet7|{cD$h)4YlS5Oy|PV#(paV z1iOeI5FY0?Wf9xB^!J2p)v{11^RMT7fj5ReOar#X?(O!n&gnHSnybObar11hIRCs_ zZnEP~z+uk5OEgcOZMkE=g<3w>YoBRcsd@fl*x{eiRztrQQ!(yjfF7}QN}XZW8rsG= z!Ff&>FKDzwJTVSZvvB==IZHbzBi0VZM(yI^BuecBk&p3&P;xn|%lDtmmwBnRu*Zin zip=D5(iIZR&r=g|(}20j*u8%NF&HOMk70r#UyzI@eS%|N;)u5|XV?oPpAT2guvvW6 zc&VejBxIGh7Kb&>#>i^)2T!gh=wlHvb}5zej#i0xy5uv?tc|A)t-c&YnVEtF`)}cv^UhOG7htCqSR%9yV$v|aiX!W-t9SgS0sDCR3rUMw zBL$&E#`^obc@XWg}Kt-SzzcJ3tT$M^w#DQ!L1 z_f*ZYAZjUOPbW$QVb%m)zn-}cgkzGBz%=xMIN^FnbLGj{{o5cDx-m;&4*hZ#<)eId zp7Cwv-9hOxjyFfgoiY<-#mzGxx@R3xd}TxZl|)Ox)&XYlF;I}b-YY_uaAxp-*i-T@ zT}=dEQ5+M&7eJ$S`NF&_7iZTUIwvHvCXnxAdO|$i{ooRpq8{GgI-j{UA(J~kF?MOa zZVt7I)~o_%z@MaK)_^@x%X4#MoP=s&4`QO-un*=8dgvI^^Da#2XPTU_1(U{p=rNsW zoAE7$1syFmwFtzNcW+u8ZVRgMIQ&__W!y;2^EL^cDkZTY&E*kJGl{ z2)8>@v2;eS1vzmvn<5lToFfNRVksWKX_l8@(09LS+z(WwQX1-!)d1Fan%rFk7yWPL zF-r^{xy!zZ*;@z2Fp%=6;kwN0`ToM0Wylq;dt#ZGLC z|GT=iG{qbk115TwmrbT^P?(iTn9roLX6T7(4LtfVtTD}QC@)bdDr9qcKDtLonoCA4 zXCjMz=my{Uv%4}4z$>AacL>81FNToK77S|r&O-LrCYNAD?lEw3KL54Gmp{^0cqi)I zVMTiZ+6xhm+pAd2bI`&RC2LF``?~D({o0UORaLCcY0gLYyE;8-AopF*hr?ptjFL6L zN$yR0w_*lv&^Zeu zjC_HicEYFj#S%Azz`_^fBnc3DGB+c)W-^eMG3n2K(|~|t6+Hu!fCK*$;WvrhTkpk2 z8udybvyeNw>zKYm^v+cn9u~g$%!oRtMHX6DK1u?C$faQD)&_{RQsSznB4n3_m&}

BV~O_~uin2Ni56C`UqcDYbe`~(!9&iGk5e!JA9$BlEdQU;OB6btrMmxyl^WVe}~~6IrY_$-$5Kyj3qo zhqZXS@?{D&bcXfy!yLZwl%)jj3|Y=CHq9tO!!(mxr)U&jL#l9opXa0PgG`kY`P_}v z#H2PM`@wCfzR+P%G(-JaH2eJb8d}grz`e^@+LMUk1z?AR5A(lig0bsba4q0xP9rsU zfj9Nnz3z za1g4hV5N}uut3Wz#HP56WgNj5ZrPYTx|Af*H`NxT)3b8XjoRMjC&b^?Y-n!fXF$E& zI*L8Bg=Hf-;=4gtm)0gX$JMt(yFBJ@fx$KF%47b0f;0K8vTMOcQ{!IMTA;D0-k=K^ zBn3^N{I?*dNL@I?3Zg(D#yC5_-1qi~nt1;37hw+BE^SvX<+CfepT+%=Hg@BdFpeDCX)2f9_{t``F*Wh(=Akg;C~!&FV6 z3{b}pQl<}>EcAh#R6F_vb~+z9au`MtG~LUkPI;a{1c0`Ex&9_a_lLOd&RGOHlTZfS z_q3(%9tj*IK*1Q3rwY90y69hpv8~!IvhD^{p%Xclg$$rRzW?*~tAxjf{H^5B zB+&HAUqINdkzmMl%f);1|f)&4fWbWBpdqy}CRk zq1z-Bm=;&3%xmN++9dDU75|p^BCM@4B;|4Hd*4Ha82}hPd4Rw#l3C#;^xh|Q`#RKK zTd0ijA`3Y(46dmAuJoE#_O6&+AJRn(9#T1?GXJGLnEuS64KlinVMW7%5MxgHV%A`J z0$oQvL$vSqD>)`RSEFsZeiY-A5l?UR{Nk0_3;e%U9~ujqGi@tChFGU^%bE%Zj#KGA z_-sK%j|tBwO_n_IjlCls{9lTF;tE};lS7_ zLS7lZEr;6moxjm`fO;dAZ?uHYQ!~u5SMtzq$G$Aw9jHWFgHbX@_?;$Cvqm1SKE#PZ zTjumpshZ|)GN0a{rEO%BgTJtdmsx=Ha=pThZ(JivYm+-G<5NM*0hUsPvIfVAcncx? z<|#^UwTYWE+iI2Tk7F@qy(y)#ch&0IW;n%(y@9Y}i289Zbp2VN@aYd@d$Gm*!IVBV z2BgYE&w{p<)b?awj%|tLT^k+Wv2=V|8XVrs$+l=IL5Zvtnn$vcwTQgiE<|cPUI=!U zl4S3AX8`nb;ti_H0v>Tl$6kHxcvkT08^$izIvUP0WGP1zI?%nf5lMsSLY(ixHBqGq z$)t`4BGW(YqCYCEd=u$hdh%7-61w7hz(4oHoG-y#UR32yhnl8xt%b;zA$A zw3=o#9U-AoNhtN+xE>`LlTg7QEOx>X!r_U*TpIO;b<@whl%EjOGl@twN+#Jkn~dnK z7RXv0K+=>9qw}9*?Wr9VB^p7Wt|`uG6q>z}3yBf(PUW#1bz&Y;n7So1POHpBK1Rs+ z@B&o!rAc-i{wj+O9^BE9(}J9+j%W=lpELGHUG8+=6!miKh5VYD@(WgUo9y<7uv2;G zE636>WQBf3h8IH!pKD3^{JSHO-)cRwKR#~uUKvdi3*3VGjTgSnp1=l-Ggf=b_832WGsBcapbYS80pek%@k z_?f<6tD)-}Ge~KLCmj4*^dfbT8dkd!mlyF9}NI0%{6*kBY^rZQ4+Z0u*~vx zbWm+5UTwuwGLtXqY0fDLuG8ZD>X7TSQghIfwFV0#ytOx6n~-Gz)3gG8zANTSZMjP{ z3wzSJ|Oz}Z^AgJoe&0HFGZFd*ZbTu#zYkaH;Iv?p5pJ+R!OS#lpJNJb`vMcz_1F_+29je2T!GNr=E$AtmWKhEw1ta{*r;%KXgrDu+wl41 zAjTLlu#KifDaWK?Brz0VV5d-7V8Pf;;X%M@Y=Kg*R*7+0+M6mCd9PC0Tt`T_YZA3X zuc~rjO-d@duKh8Kx$vV0yFXioeH)X+y6JFz1`Om&ST5weNg;^SgTcrh2qEx*r~c(x zHy9l#b}&>c7ak-IDAx&62s`q3yJ@!&)6i6W_6BU$(Go;=VXYsq%a8M^_An=-79;_K z5L&Dp@VS=u#dHV=d*fHM&xnt3dIZ%`T zpJM=;l))?uvwGwY1tc30kQ(Voi}a>+iGUD7DAE&32#{o-<$m^l-go>b#u;av^CJTjxw-GGHP>8oUURPN8owYE>dkagIy2;LeJnveT*rD^l1Q5me=yyc zZc>-#;*@gtnR_DdbMIC^kFVR`y1nxwGsNmlLzur$FLXbWJhVCldouT^@}I3_r*F6V z6+&_v^O?Fwa|AOeIgNb11$okVqMlZyf<_MDM&;vf;iD_m%k@0R1p@@n3*UyyIQJ^2 zPOQMphyQJMz{3A9J4?jhU6EC#w`_Nwf{L2eg!^{d*Uk;WjQajjd*4!3U)+A^i~-4j z7U2tu?Kx4|AN5z1m&&-adTi(yX}ebi=A}H%Y@T zDcOH)!PlU=WyeE|#lpK-$idH`56X_1xjfXE2$Gccl4&r$9B>^<8=iKO?Szi5vr<#d zVfHM3xsS1rCf4ks|6z>N1aDT~%lgJIBI2rR*B1RLpJRnA>gNyL9}g;-pM8cXe8>*NHYE|dski&ln$g{bZ6xdoUtF-?}*0q@qCWv3muJ#nJSz!#-tfyT$ zLYJc6h;5q`jS+g5ZenugaH7HaQUI@x^&PYGv05?E_Q976Q3gF=%B>gue6^pwi%V#azMRW&q(LHz;~ol><;?t zV`pqKaT2(8F@re7@+|<6^jkE}pyTEBAn@x$aHDCN1_jVHT=7-acI{Tenp0er-o9Bs zuNTUFI@rPaSH?9NVmU?*6A=A3w_I;4mL#9@dt`@>po`Iaphx9G6dkFLXn>ZTIgF}b z+DdHS4Nc3rG# zKp{%eRZ+(I!RLf%}r~ld9wEBPxc*CdE8&<(+MDIhtj8 z>S?(&wV;4MgLkKJB{1xtJjxjX31`O}Ly;*9;&tNEJ8F0BZVVw)7HoylzhJ5E@ay?h zT}nf7+kxcXCoY~xYpY(**i*iR$6WVmU-duO_!V7*r}8|I7Z@rkTE$Hv^E)5PsI#RdTC__`VF?Uk%$D3Z2oT+(E)V5~)w~c@V_YWg+yo7jpyHq2` z>bXZzEn2y|&%4M<>{!9oxUZ?#=f9JnqZ=&{fqKa)-a)!1B_7vG7gOIpRy65+X}#1KTWycg#Ol|EnsOdZfEML7Hqe{w>^d!l2?% zckbs#L^!%!av{jZgIkIA-734me19oPJ4^9&Qe=G+IwiA-}0?-ye|OX`Ol5> z-ySTx86$A6k1F<87*n)3Bk*i_LiKG`oLu9*>)x`>kL!73##Au5f!4o5*GW_!Vpa9j z8l(OFkQ3p@8{+2{tuH3rxl-2MTXR?Zk?Io9uEjSR&jXNTpFxQNt>1rR*Q$YhF?wvS zcJnb?5UzzxvDC|2sY!F^QwEF~BUI1zJPoo^r=G@fBl?%TerE_(19GpX@Oy48HP6kY zeetK9-Pc9~ASYfE>0(^b$@h(Hw=td%@{OTJ)x|N%c&avff}-3JB1pK5ePA6F za0^I+pS3J|`sFD1h1Q;j)GfNR) zAYkPt5K)UIGCSE6@Xw7rq}~yV(=-Q?%0&{gsi}{>N|gJI)q+3Nl2xi8uu8B#Z)G1h z#+F)x@i?O12HDj5j+kNttAh)KB!BvXqQ*|_!6w~r5dm5lUz)$CotnUGnx z_Y@)EGR)%;OVeOuHyZr)`2d&{0ZupN5`}k*Cq@b-3cU*I&Bc+&^pR5g*pD*yTNfwH z_!6&A7#FL2t-$!s1b&n^v1hg&em`Vt4;n--nXeoG4bg*3bc`%?GgoVUpx+u4)Q{E+ z>-gaVs2Uz)J-vwn7rUQyzq+9@MGWAXf&HA~Av%`{lFC>OLSZ-fo@8;{!$)aWjoV93 z4>qI0u{*ff>Bj(E}k_z@@~2AcMUcLSga*rRu?7KftC& zc9v^#&g5J_1z^9mz@|1IA$UcA9y%tfy;VlP?N%|&Er?#?V z?3DGHcR|;mmhc^7KQo--tpq^#h-;c(|{TX0Mm~4rX0*@nNspyNr2_Dk^S= zCxjK?s24~uT7V0#t8n48ZEUgD znnmMp$lgXQ(|OB>#2mRn!#!CMzDv)fOdcib9sC6r-yI*qI2St`R_?LCp5B?NW?#)G zYuF#WBpYy7GQaRY)*p#7$=_zxYF2btNDb$R5t|`ps7Dt%`Z}E+t=iq%fgFUNr$qxU z@}Bl;+|hdF693pwfs;j$Ju^>MTyUI%oq6Nn2`fevg*o^kzIi6BFFA^2qTrwALJH2|DGk&%v^PI``3~CkD;_&XYGANds&bNv8G?h z)PL2BDoeiJ^K$}c-Tnq~4?^*mKoLID{!g28?M)@BTUL?S6G~h&Pp8oy#kSVP^H#y(F+k{r?DE?u^dF*|&z56NQxekZd1Q&*u8?7e^JDuDC=-0x%uy}=zo zsTSCwMktYni>VhVq&#jS#1pPm`8&B79TF(*I?jb`ZhTlxxk@S34I#}>cj$xE@fAr{Y|4>XqF z^3Cg!wb|g5e|!IR>O+|t=FUzrU43wttFTcW9Alf0M6A8WP|~1yYbr2z6pkIx3MeB; zsvBqUaBn}Wy}pbas&QtEI0TicOYaa>Y@qQVTy02CfD*F4bft7~5>Ke9U#HIPT`ZPj zn|-Tr;Dcg=uG@$d*eCzz8>lQk9Jys-=2PzZxgbL`J|koH9u7s_x7OmhBLmtvekw>A zA8RZ?)Zi|_bm6KLPr?_b_C$YF8&>d6J}r}MSfzJU(}kz$#4vkD#;$e$XQg-S(~UUU zmfi?nZdZ>4vaN^cBgOt4-2o!{?`U{iqnJN|96Au9PElx{+)wmvES%CcvRtZdefC<; z$nV(A=%WU+zggYL_LO8AY($n)G)b{W;YAfo6U@ugwr_NBny6!jeO-s34c7lLYU{}q z1TRTd=5$-`M9JQko@3R882{+6S7S{Za6a>WW6(B~kdDyQX}yiCN6uC?O$;10w=tQ}stO||LRc9cS8V7=I3pwf8EAv6Y&hiZQrKFTR5%VAW*cKzFB{_a>MFE zrLCb*5_7~B&uD0;mt|>btEv8_?XsXi5r1JMm$`u8x{>%A=$sT=Ts-}@@!Q_6Gy9_0C6q$QI`sgd*QKNJ4%f;C6!E?I7?Zm)x7?K@clZJ#b2^% z@?fdEa#JAh>)%G}Jl%rjdxcKNLZ+qmolIJLj=ieS0D`JNUhrFccSF8<(R(4xTjs(P zogcMxnA}A*E7Kf@iIVf*jm2J6JnWWW4V~vFV0gAT!r?CXHD0@myHtpHuden*r$FMAQ~$q#C3NqjBtN_*yCRg3i|u~+sSmaC74kDg$>@#3AK zcJOJYrI^!iLy!D?6$U?&Et;#lD|;et=D6<(?kcEpB@99zhLnTdK{Tz!CU#mfR)=53 zUgkZ$2S1+kRH*#8!s{oOWG|^b7mWzjhK~A?V+NrvxQHRR!N8-~H@uS~b{ocOSa0z} zzH(`nIi*WNeoSMHap)#biUe*sDW8)4sn)qIwl7LgnYi)8QK1$}MB1B^gVl&OU0nC> zJE$w7R=fJd`eSGdzwP^zMS&gBmPQ3fRvOXM0n;vo!3Xl_O!4E&#YN^)SIikc++Zbr z35Z;tCzYE51efCnjISb2Em#8Uz;{sFB-Q8?a(SbRvFC)ew^L__(-KUF4gj#SFMaLy zy5`fY5Kg!Q9=UAJ>S$jQm-W5KN7-myN3Cj_WnfC%xj;YsaZhrQ37^-3 zjITx=g*)9LuTOqge>OH{kS&sQ<1FWIci1X1BccHw_nF9ve?IoU7@2vIspxqW`3&v3FAU<}F+Dv2Hzr{tn4iIYvGoGMXPbgHz@Vp(8x!$Lkw#kDN5n5E>eB2XX-ZcxVd|WmlCm9aZ@S(xRSH#jB;3 zC9a0$*zePKtg=idpk!TdkXc@_F?k(FL& zG>|y$?{v(@L{QNh$rjeaRT~O*I{g>@NH<*rZh&|OIRuB1^kg{mtHU>Ai?7(e)=AK; zIzP@Dsx~oMqp^v$M1Mw)my=b9=+-r6>N!f6bHH>DhEE?qs@Iae;8)i0rj`6~CFYEi zpytai`lI!SX%pJtoB?38}Xb`32;FTpGIwl#dP{FzHMtYX?XjEsa*Tpe=#6(lO zRvKpSVo3Moep+^Kg8K7H1gA?}R`I85LD9G5ljcyN`u>6o2^H?Enj&99Zqq+uK7=3( zegsCwhnzw*d*a`Cwlu2NxNCKrq`#3DDzuk~FqBA~G#vh1rg$Rb;gr)zDAe~aYL~yp zc&;9ZMjWC@56Z0ZyV-?AyX$0LzUHca)vkU0X4O|0+jH)}Ww1Y#!S{`3lZOTyPg6Rk z8zV`4t$MtJ59+0+lipFh4Oa3c`ns3_sXY6^PvV0uV&+3iN_&3TW=<6E~HUAXkbSN#|_wylQ3=es5lrawODPa zEuDIFn{Vg|?Pd5VYl!}cE#qN&wcHXQkh#Z`JgjxJ`_oZ_<6&m+e~Xauw>V_&dMS%L z@Bly4I*;5Ro}0&ReI09FnYO!Y`E+tpJ;nCUiL1vHZC^)E9k^no_q6D%{kV+jr9n5A zA1OgWQIk?mUdjG57Dey;4j3_yg+dh@73erR<3OV>{3H?7VsX)#+_glta=(yF#oG9} zv1#r5PEoH|Qw5;TtW>6i8qy%F0NZ@Oe&$>3-C=e~8% zO{+H6^lIKxCZ?wYo#N9$!87rK5Z`g9VPV=2s|5HcWN=+x0R zZmc&8@gPY*IFx4}>*pVjz4F8G zlsR)h*5QWV6Fmq;29J!qMMSpHc>B%{6i*tREd3C5VV_Q(&x`C(n{~I+e{C^Oy}I7M z2))E{;Vk48P)u6E;W;9!9hnE|N2wQypSK4oBR$3>%Y8vXU2-Z}Qt_mmYe@{x^I2(@ zF?)S@Y&#?z#pk}}LFe~J8<8$+`E-bUPc!}QXTsFYa`NNzH#wo=&LuKakg*~1A)+VJ zgUVS57o`N?f1nAZ;*|X9{7*51CdE=YN{)wF66OQKqwg$^PB-pQ+=$M2Kxp8$1PyQG z9nw)am?!Q%(UCh|#G>7LBwNEV=iKot%((-9kzIA5Dsd|pvf<1WZ32Xoa>0Q}>xiR5 z+3M`?W3gAeWQ=6{^R26lES&c8Sr`<(!Uh{#(4P_Gd1L{KJLv)*iv&R}^QQ|ol#hi5 zE^;~F)qP|)%!OZ^Z#6j8Y|megGB=_Du2=*Jr!!GCmoHrLSDs7Oo3fJ#&tYF`C}jeV>Z_Mq^SK$42MZAVU&@&8 zj5-Hnnu@HgnBSc%E7h4)Uvw{Oe2vTowAUe#nUW+>+4`L!a{fA{uxPQ$&%(ie(pd7f z-F5qjQQhY8C$sTqudaLv4%^eFcOwpjFq{RAzLA;7Vq0p-`8i`_u4Or2TE9qqjW;-d zD$NqoxS#r)&&pZI)t^jcKVLouD&WY}@BRPFp!BaGm8l)>Nn&aPl4VoG)bK|p!B#FR zq>vR=>?j%iYT$*EVjw$hiI2{OdXCzZ_=cG3M}*sqfRbKK3sC0hqfe;%Rqj-Ckshln zbm*ua;1iN8@Waj_P1v4=8^Z^6mVRD{`RhUmF@p*s=PWgF?ZGgr%)os#3eZf1zy6&; z_9xYAmk4BZv%XIY6k6v&q4jVTgrWlIWvG%+0q76Y4`9H41KGo--RZww#7quWQ0~Rx zHUB6>x<}I4m?;2QI^5T8zRUFhO1uwkZs3i9PM^4k==sSUj@ITN_vi*xKwnB=+=>Ci7rE ziSC;H87~wFr5oyH8I$XD_Woh$^8dd#2m|-C=_h%esUP;hw2J?4&7~v91$_wVXgPE? z5*(k{IZp4(nG3-FoN>f|%{UayZ5Jpp|7&io-(r)NFrs>g{hh2n_QQ<`YitXF$w@nY zynboL6YQ0Zj1jf0-b#Bw%Wif0>#& zF|rL6jyvjkZwSsZy~5S1{&*l@<6JJR$xqzN<8VWY?x=-aK!|71}m&M;EmGaVV<+);m&{m^E5+-)znybD1-!W@2@?PB3K!o)I=$=)so zt*j$v1JCR?f#pr^OI-ZS@eh`o_kTL*cFG3Si?V$?x3+<`LCY5cGJXG8FR;JX3-ae3 z5&eHI4W51qtX=OIsXcQS6XsjU_PAX$b7J+;641c2&BhZH1a69$U(JX~Z*7z&_Jyeg zz|T$7?0PN^PTet5P(3_2HX&$r@d$Bye+T#cGq{IYYQ zSyCKwZ3a4p3EpAIk;V{8KXiK34Y6vESh%dZCa6w*3fj4rCLuINw@p#5Ubi`aBkNZ2 zwB(AFJ9~}usMu^lpV{aEOPJlk83FY`-FqeeCp! zD6ueG1hJM1ssR@Xt+1Csw*m@k!HGEp)b=KhCSkm(MaAh79+)wS=*UX7y&ty@$=93D zzHmVBg~fvn6vZI~K$vg6rWLyXkWcyB+Xlgyb?EfZvf=zN0u`N7r5qun9Bg5=xAaXh{*O1d-L`=VcwNIawara)F$(5?X zM|;H(MZ5)TQk6 zPrQGyP~4lLb0KG2d?-GUB18`W05L@hEGxqGWa0-7iWOZDG{!y1LK%YMxw4di;1f@I}w?Thy5zP%_ves#7<<0u=O`A7I6J54b{>raX7% z3qdHJy9SO4TVH+h11hbkD}$n+)HmNDVu%Wq0|;k>op>0X5B&r)pNR%N#pxKjsvR9d z`8c)4<@V4&$joiH;iHjwwETm-D%R3{pU)+83p3hjh=#4B)fx!|gD_w)9oImaoh@(& zBG{+ce&NW1_vG&6p2QwMwn_S-&*fd-`f@u;8<0J-t?O4{qo$_F=4*}H^gVpX7 za;y6T`WiZus>Vo3BCIOY(}8b)S86>x|1rIX=-qaD!*0yIuC(WTYDHR@76-+#@gQJ# z3dGQlcz}WW3dk|wY=94K3qj?c042GCMMYia*-OkhAqdc>buy1ifl>IVO#Q${HiR)$ z1)%QQrwaq^OX>)!EArRB=F1PhS2oiik`m1Wod*TFPnaoY@*~8nnOw>akv=AoIlpJY zj}^)^Ra)TmJ}YqVIb+wV+qYtm+x1m3tNSkdoa}t|{8$MSyIw?NAf=y~DosPsYg)$X znJ3ZS#CdAyqqW`5YH(J?4d&irJ5zpHrLbGLD!CeS5gY`H zI(-l4mA+Y--_t{RY02h)^)nDsSOQ%?zuHMRT!mp#Ppyx}x{cbEm)_FhFy1K|kIxHh zw`?wuTCW4gZ;0##8ax_OFdvW+|6&#(&SJrS!fo_fgW${*2WVtUvYGlrEN=!{zxnS& zXukXdh$sH1q2*f>(4o&u^(Dy}dXGw*i`CM7j(H|OJuOQL7r4zVQ48ldw`2VpZpTFD zJlzYOlga@*#*OQvsiaMkMuESdzWlx9wTbak9>pVPyc;{pfePpq|7$IQk(X zXco&hZhIF(%BBnMwIDVXa0uKFM0?}mx1s<18&E?Xu07-XA!;ODWQ5!!w>lTkHwkwW zZAPtKQ)Z<4TzC1d53i`Wt1Ve!Zl{cR>HPEn7X@xJ%n)VPm!>4C$!HD)HUPSL4f|ue z6}fX%M;}Nr_n>LK!;BsJq@_>Fmy*uc76D?`k~D{L8K%zTQBlXqCqcmV%S04_rXU6o z-mtWw$VbXoerLFxQpd_+Os(Ub7BTj{YyM462*&4`uxOxQD*4$P%>Y(ErFaPU(wQHEw;D`FZwDmE7tK|%CJ?+1T3+$W{2ByI=(F9|3A~2fqzbCnzk@I@`|9v zvix_3YEy@mhjut>DnDY`_IHNy+!8W|ozS-(?_~*DWZU0fCu5Hwj?0j*?FP6t=Pw}s|j@hAJz=&7!b)5Fh zs9r?eauBik%EtXPhxr)aq?G$@pC1qERwy0Nm?(EGS!USCJJ|@75j zLrx|mW9ko_Ax@TZ|5lW}9OhmT&mmrM;yTjrQlBQecWpEk4_V0~`B;$*R(y%*HZ8tq z@{K1K`3lTR-N~_$CqvTFt@@+l;xMn*j)XtX0;-&pfS&_|;j7UGw>ZH)+cz8UGIGc1UI7e6{r%#o+&$nmI{;o%A z<=RI~b#jh;%&usj0+qX*tP=~SZJ#p~xcg z6G!Hg!_i|peP#GprJyNc&-qO|Sk4*Nvuar&#s#jLXzYG+-o_+v=n*J>N$=W!-)H-% zw80Wd*6UwCy^?sAz%vxl`N8IOM}nD@>A4Nn;?wre_TgcqVFiir}?>YRbA3N4#dvN3jACjVZB}*RJ9D`r_E?6E7!W zKhC=E%`fVd+Y8!Wa|@?1F^#<50|(8pIwqi7^>+p#j5A8;el4tRZNERv+#bc@I&`jT z`*Pw$x1v$gkvmrY^qZWKq-@sA8wbswB}-rTGA(gd9Sy3goOqKwkzOvJ6EN|92Vz1& zl2>S`3d9q22t;MV(ZR%U@8WK795Ufl!`58RzL~KirSQ9A$LpUxxTqcdgChy9Li?!4 zMjwP7-omjgrd8$RahzRKEB4p0@u|)}0z-0H(bOF|d0XT95AC0F56ia1tQW%KLLfj2 zb}I9C27c-#9m!mOr&cZ92((*?t;-0hIDfd+t9+u_QuN*pUQO9U$IDsIY(gWns-H$u zxgL*c+ZUfvmBB;=79GhKXYr9O9s3>-#dp%`?wB8|@Xe&bT1VUPY-8g`?+Te&-X5#Jaf-_-s;ZnGmy{cG(}vMZ&b5HUMswA?ESJpe8@=N zYNXb6a>eDhA75VtJ+?t z7hRx+R&uFYF0Z;}|GIB7&anBZ;ish=ZCENdmaJceG_Nr~2K#;v9<^?IF!>Q_G+H28 zziTuKH`_aDBgHI?6(Jx-*T^musXa}qdp$+uKyd>-Z_2?=dw{*jm-+tf@K^9Sm7KE= zycsqD--^i;=Ex+XNm70~)wmquPJCSb{<~6>V14!Djqmk7+6TT5N<}NS&Kv+rH;;-W z*y}^eqmqp#y~4+h7OVZ{#&h3EebKWSb@BOu<^f^TCCnYnAxaE{!+}z{))K{<7V-M7 z?i`AFjK_9=Drf%Xm76y5#*ZEx)rS*6=y{x!7omdYr1v6@maGvxIqWDUa<*$(wB>tU z@YmGg(Km}uI}zXKE1f8wO>`b8(Xen7S|%ITgJ=0-VZcjT3;MCjU)-WL(SEh!R#zvJ zh)i>c7%kG80@>r#pVp6<>vp&570Rjq)~~}NeZqD>>V#<+vmgIbo8Rn5k7M!Ds;BdV z$}&X8{3?WM2keI|q%G6qQ1;539e9J4y**{>9a39$3+AvJ_KKVJO!2e8H_u0|PAWZL z37=s!NqAH6?Ow;ey1sZ^W%Z}E(ZR(I6H0P%7jI$yiInlnAHasP^>#n<%-blDD8u8* zFZeh6REd&Q?$>m|t^nfW9z2%p1UEM4by>>p$lifIu;(|MZoQYTTwm|deG61M@h`_ptWJorZ*CA!}cy>?)2B2 z_4mbSV+l&(vLW~GN#?5#Ae!Wm&9Si2#~N>gPwAL?>jZL{ z?%3~LUmg6l&syr!)$q&4a~XWnM%vn4YOCKLTa`T?eveIe)mB1Nxg^^i`G~0H^l5|; z1$8ydxPMvaYT@LK$qDsT?kg+N)n{~j&oX!$G3+z-cU~FV^9||42qT=$IpOM*AVh(~ zqF-PH;3XegNE#nAtE%?ukr;J2%dao)(H>;|MT#B7pF}ejQ2X9#Id2}z>WeG)Aec|x ztwJ!o$g5guAi;ivR9{3LPNg4S9YL^&4+>p~u2&ngb6H{B$K}KPnMd#D4AW*6Fs;4Z(${$rgGKo`&26D&=qou!@*zuc%zp97ieUc398ekN83|P(IrW zR+@8*e51a$e*VO*qa@`(Wzx4yA8a@L5a ztowXp1&RBi{r9e8^`-hc+Y*+SZWS*%w- zKv>Uq^0x&u5yJ?4p&?on363ZznSm-HBl88ovr-e3Hqo#oHA799*A21%0hr1f~xq#_y! z0I7OmilPO9BFArX{?PU(;4ypb0e{js*!6Cq{Rc2JT+X1q9aM0Cu)xYFz5w)DL=h0X zm(BUk&`)lvfP(nmMkrBEh;E!3AGrrQdXr8C@&wF*R{K*X9niG}%7mt}uu1)*h7a_XM1J|p51V+pUQ{IQ++p51E02E8a3J_i z$yiVDTUTC1AQ|c6b&RRwi?~_gZUXEL=7V{3$O(U@PXPE!*YP5Dv<1p%Wpz0$b;?VWSR%3RIEN2mK3?aZXUsJgq~b#chSS~hN4eowxb)K8N@ zt5X{KTA-W=EQ&sCycx5Bh{dg6rh**U5wUkwCLI9wA=z{d`u*CUv>aK>lYF4z_`lSa zQG~S}&iV@=V=lzej%{WAjpDrl6kvnSUr_v%jcLSlIyZGcQ4cc} z1ml>PocQjN2G5_?y{eLa)l;kUMu$pbv*=CB2hY#QDu;SG_5kb<2{}j?aU5TO9EH36 zMnPEA#7iB>S)pF-iRL~!&f;+-NSWc#yy_0{83zF3l%zwkN9_typ#YWiSeo%fimDNF z8%R!soM$5;97LI#P%=}%O^-FUpV~gkwq4?|t)7$1}5K2Y}8jmSE@ zVPq?Brf|7toUYHK>jY?q z#E?wXJAwM}22v^+Ls2I}+XWH89dLcgUkni+mZOi-QnOsv7yYi-Jx@{@Xi1E%G-2G1 ziTgJGCHMmDrKt8$$SF7zt!`@#=}z2mq&&fU3i`XUN%wlP+ZC^TN&fD(kN=I$d!&zj zLgK@#O-o^+v>!A z{7u{FM!VTi*z+)0p{nKxdnTWN7I!0$`^2`6j}t`9q%Qr=u+5vX-eQZ?=QzmQ4ebxcGUT|!R{aLZ(FfQ#B3^I>nitf_H@fY%}C8J*ia zoh|^kC;7#MNWd=>F)`D`iOle+@luiSUQ}V5Z{W@SYr*FkmD4X>7icL8rVvoP6qdb* z3(yMY6_IK5Gf~a~ESTFXb|>O-X{ouh!ot0 zRt#q(T1}7PtVZguIeESh&YfSrZF;&->SOf1u{PFMmJe>el+~VEr1uIMba?Kk*F`Cumq@pRoLQw*M27MfP}{+4E4 z+3>BdGNiiNv%&tg^_xNa8?;HZ5~UJs7>S5)*Gz!f)f5i7eQuOPbml;4;VKxKOiZKG zaWMHQ5ULk-01KmdZ)&5T!`4l7jDSt4D3sa6eP%SZhkHBE72j0fgo_qXvg&1svhw9GOIugmE8bJ=+^prV|d$$^u! z7wz7sShnHIqNO{iy3RGq$Z8Ua%l2#0mJCG5&0YK|#cs`TsDA4FPPvM44mqsRB0JT? zxI`JNn@GJ(v=r>4o|rd|F=vTWg{$}`_-Jc_fw3i{%bmK#9g-wg>Z&$Y&WgJDPA$1Z zTgZWpc_lVuj*C{XF}1)4vFho%`7hc9?i{+!-e$zPI`QNi$!ZJ8CPzH2T^T_K94JnS z)f&y|iELXTyfyq_FQKk|0RFLkm|-Snyz(nm(|Jm0g~OVp8eiukk|ZMkY21XoCt`US zN|?OB-%C`Oeb!qhsH0IGv;Y=gvly(5{Z`bQEQX2n^<|fxi+=mO%hNklq@-8q zPLoeUWxQ7CEh-aOK9%^j-4#iCOdw5^k_6_NISR8Z03w99V;GH)eZo{b;x^3>{D;17 z2oi1xCYq@gMrH+@;eh*DVe^(gpuC~^bN7P+X2Ne+5#k2@q5?bOn{*$zM9y~*nku9x zQ1$fj`yOG|DfSsMdhP+@U!;4xc8Glt{QwmRk0YVkQHW$iM8t4YVy{M;Yq_~lFhkD< zbrAO(A`z?ZL-*$488T8Gs$akPm!j+M7W(V_7|W?$uu4h2V3~=#m+nU{n`-k^Bg~nw z_f3`@oMX$*khyGUHy62rTBA zg%Q)>AR>9|Re+dssHP13axuJP_e}<(57iExdqnd_B9>Nltzy>E6jQyEV4gI-LW%O@ zKYb-o^;4j6#a;WIVc!a-9l52g0S+`kpCf?Ig#l)3TeMI=z?v$7;zaz&a79u0-03Qa zW)~74J-Qt{?vuwjg2BJ<1G$ln+ucK6M=Mh>L{u|Qh|2h$szNHqNkkX&hm6$v_Sl(9 zUpUz*rgU1;Nhz-9xgx)+J)O%u3bSdZYEESVp|l_ruR@9aS#h-lrK>tmQnA#-OkXqC z94%AT0>Aj*8T>Ql!>k@G!da$J>{NB4N8FUcq4D6POZ~E0AIhFp&Dq>?$o3JnOA@?`)(U1-icLG*mK`Ejjcjn`dpa7mr*z0mN*GX;j? zJCFC@R^;~GwMP6hqmaKp-v}r#=ol=ApnK%azIRXTZA*^h(O-5U5>P4^GuaSvRK#tM z#nN8chn$^VlCLqO?)#P)^!Z{t9r2Y(3B1f1DD^8>DF@rBtgV;|j?glJkCxnCs`g)~%8XA01JGah)#U%Gzp`&r#S z)y?X^kIgS5XL~_x+EBAbO5U9|i$>S0!&p}kopX=ON7JX~kVNBgav9q+4HHwAw5F^U^FA+Zctoq7G|jR1rhQM8q>%69OFoy9RY_98CHS$tGdbWRw zmIhdSHf$*g)8o4CmBoRC6?L>*LDzm$iGGZ<^o#5x8t9x~7j5=0i@N7sUNf&V-Ao{r7T*;1pu#UC|K6iP!eb!d?q@|KJ&2 zU?-=wdbgY;3j({fi!*5l1S$7dc3%OSc5F*5tIM|nO_sf*jd{f@XG_D4(4Wz?yB~O(|YNro8)G9y0){s zB;5`&teM0-JJ4(avV9>cZ-OU(KP z8*{n)+c(GcAwT?mx*5<|c?`^&cTcPn+xP9({?X)Tz%S>5@HqhDsP66i=? z`QZ&^`JLe|Xq3bqZsc=C9Hc^XjNbd3+oVkgEs5NQRm;xJUcX}{pFMlxphl-oSf<;& zt~vd1z}~6FYi3SEs^dZ~@03_%OkMHrPU5T0m-dLiZ3<|hOEAA|Ilo%(qcRVPJWb2o zcNvu->tA;kC9RJPnr~^kp+@uY!B`}mnRL>|6A&^r3N3^g_P=uY5bkazc8JRWIa$@R z9WA_Jhhl4F1k4!M2rxl#GP#-4(V&n*Cn;2>)O>V#>yq%Mf9*YeNE4QL#SFXA5EiGEK-WqD?i$%ENB977fFo*P%;P)xacSGpPlqafaG8O&i z+{ubG?`n&t2c6wnP6-I{HEqjb?wlD>;RtWdrm*8lN59+mDau=gZTyXp3 z>X_gP>z?!(pQdAc?YWbV|T+SH{I-c`jc| zZc>RVfH&F`qe5u~^@pcCW1$u7$b1(lCmODPi?>uQH9#$0hQ6kEqyK4VgHQE+X`8Si zrX}<<2y=)i{0Pys8f3H)b`}eE`sEcleidbX+M%K@@4W4dAosg;Lm3zt-Xrj;e~T!7 zonF!zO^%~hk!8v1T|w_RWE8|d);`iY0ps9%v_-E+UVmF(_=`9D)!U(=G_zjld>;X_ zp|U$G3bOO>{~PfcWbv^V&=x`PgQn2n|2N~a=n!Sl;wntPALrEZTusTvapbU#vaX~Za%xy)j+%Ajr#`#@%ueM<8oHlVm-y)?Ftf7+? zbEzc*vKM1ERaDOPq{%ErTBpq0^U-G6GS2C9``71l8MYxYr_moY`t||HG2j0OqB<93 z*Gkq|)(5~8lZG=Dzfw)`=A7d$39dM2r3-SYV}}$kl_FoEJicC|A2olD+tjiF4PeD? z8|=rYzCYH6T;sWPbvC)$V^_RRG#4UZ#yt#B4gs_}a4K8`bsBpLlp4o-5Bnc$ zbe7~UHS2LV4!2hZy6R}&|O_iKw|F{vD=!10e7xZOS0 za^#KT8B4R!L~n%Pz~6?98q4=-^v&gqi_Km}RolU^tfdP=M*0%*-EvG9LhTPdS~6@) zR~}!fj)tdriYDnPe%@|mM|RaO*q+eoWFhkh(Im- zJM7`$pQ&0iWQzbYjvoh8|NCUQu0_y={6}!@wVttnzDdaaipz)Z7lf~j(lS5~o|J_* zyN09=wzB0rB>octAA>$!Q@A`qb}Nk+<1>5o{dQ0#d^dNGY*=zU-5 zg8oGRYC94~c)gK(ZcC8~cNwtD%uj8{+V!01Yp#3jVm+<|B3JW(+w1Y*3YVP>f7h5d z`ck6dXC*5GFx2BH0nCp_rJ$^82Zs2NWV`3Ch%AC=Rk?$rUijRzleLXm*M+VealC6D z2|cI>p-K(cn-}_@Y&X$H@w<8!xqja0QcPPn6ByP!!AM`6vd+h*gN@@bDV-i^0-^F0 zLx@k|_do;r;7@GV&DuKD$1ZU`-$HJunttA9*W}`B7!X!Vqy>S_y`OyJ)-@DA`dKYM zS|F$Z{HR1TU_-b;M~OtT*IQS*Vp&S>59#jM+-I^p$sM2HadyY-DqOB`W78VxdZk$% zp%Kd%YJBp?|+f>^~oYBU`ojrUW^%>k9(3|Vd(_>>A zEb2c|%WZ`o9OVJr}*mz&aG zJQF46_T<=MTV{PF_sO%Hl040b|K)f&I#VxDhjs{H7F2d=@;}{yLM&`uO7c(eD(7#@ zBKtjDmVzn~=M7woeX#lrp>8Qq5u#0a2HFDb=}#(uPtNWQzYB9 zJfmkv@Uij!#*Zf((MnQNw3DM=y86eDy05r-b`|m^b7GM0_^M#6qF9lc-DrI0 zb;yMd)7#ceUY0k>R+WS%h+=bWP&$j;94okcvIX8SF+cJq*ldrV~jvF$`CiqU?fi_{#ReijQw28fFk0MgXr@8HN8 z@zC~ko`~i7<IM9El_ctLNsygRl)n`S|8}v+#-l}Cn+cg@C5e)WgDE9z)RwX z3E~-F4N0}-<*n8R7Sd+&2G=IP|9Zf*M(=V{W0~<-SOws6`03B6bGDMQ9qT^_ZuYl> z(aaxjUm^^(R%NN+MiR66?vnx%OOA_{lnjWfr{*f%pOWLnbzUZ3Uw z2>>gs-T~85fnr=s3Cw#^?BnWTtM`8Bg9}f*=zFAB;uWq-w&X|pzelWCRX(?u*4(YPK=@r;x3!B3An5A;inqgeCU#2;q3m)+5T8 z*`^tQPlPC(xn`gl>3E%R#XxHdfO34v_P)*RF$3S=IOFDQg(>c@3LlXjOTFpgMmOmR|M&bu##`!~fK1kY|k4 z8_lO(@T?fE(A8y0Q*4dFSVu?1Yp6UH7pHj&Z)(HQolwi$kj`k5q=5Qnk6r~s0?OuW zo&(L4!~97jpX#-jBIY*Kbc&lAO16Vl#M+7g0eduQTHpOyOTnpz;eUyHw zs`QPS%>WozT2Bu6%Czr*h%T_YM1%hR4cG>-SUbOhuTtd!uDB$es3-bg&OJ^l8X zBmU_-vX{KE&KYbr4rMC}4I2a32%&QVKXM_jB5y0Gfbe^uN2ER2?mySYPPHIL*0N%K zZ5{vZ;+QE-j@n_n{(Pndn##eIe~oTRq5q@s|1nAbzt)O>r#k&>g!|umhuj#^=iy)R zv?3*E*1&d!yE?eUC#lYXKo-bkBp_4uv>Cm${t9aBz4NAIKWD0Dwr zuOel6jT~Yny(GAwbDZP1ZIKuMfs{?Wc@lxVMp(hD!~9Rmh{iyw*<6fJBQJo-E)F7bI`9Wr6F{}@M!RhoM=@66;|80p$f{E3bfb5`@~G^D-`^uy}p6Hu`XwC)oliGn5@7fSHIf z^vdO?bdryEm=Rjq9ye>uD2F~*WQNZ}CfHhzoFkniCuhr6{Sx0hRo$~I{rJki{fXXS zLR?o88LX#e8JYQ4e6HVcy67@cwul#TFjt&sWooXy_I%%ETXes@JY`t?s6t`!^>seB z#pmEt*|Ba6lU|xVr$A@AfvnmhpSYz*bNg02geP4afi@K& z-=W-B>ECk>LRDzC0ugTbCB=JIh{m}z=3i`e53*1yJ?ou*cQo32+mt$m5IhWQRQ?7X;*7o zrJUw=*}TMa$}P{Ou`6D4-77e2f&f?l++S3-A0R(UrKQRf$9}QMp|ev_-~)~QZzyU% zj)d9$#SvviJ2-_XbU(1PEWzwup6-dk(g@E$d22=5mI$`nBia7YzWGMo&WHC_n-Q2m zURxOer^Firr(QFoSuW19XObpaIYOkDXS9ag-Cxv%Kpv@pdRJZ`uVl~@xc3!@>On>3 zKAJ7cGNX!j{rxctk_@v{yPh|oc6@5P)e(%U2~ma+=H3NIFs8K+h;>Cn>4|m=ePJ22 zfn3I@S41}F+~TecuMHk|b)PEgyN)AseQ#ZU*<#aXd9>e_2+RcGB-^>31_x5Bz|SPk zIcx_f$$zS;EsI7VxRAr5TTgZ!jWtTEr3bGOH|xxJjK27wN$00a%n1I?f#L)UJiien znv*WYKN7^^;KY}3M>o`tbM(Mo&h~!C2Fum1tY?m$&&k00<&*i_<;P~-sdhJV)2}SW zPZ;ECSqzH-ZhhVBv^OjwDv^{jlEGNE%ZEBACwCcd$xTDzH_8*7Q0Z7MoeReI6uFv=Wp z6;IO_Dy0iQKQ%eNVKDwJ#j|zlshN+{gfxA9ST>V7jEVfy-kJ)i$GO=-57Gj@qznq| zn2{PRA@VMu2&O5?(TWz{`EOec%(Hnn)oW~&Z9OPxX?O1DZlcRcqZt%%djvI6odC*% zsS$=@cj3i^iee9{${_>MP5+)oQbM_g;IV`6hI0iYX#5%P~4FL$K$zb~opu7}Bq(l(9TP&G? z1im9~ww2syfIG}Q?Mac9*el&o+c8}Q9-X2d0{;ztTk=iF_|QDiMGg%OygV$4hz?W+ z(^oh|?kZIrR+vGqjkQEvohYa|97zRZjru+Qwo#^(bXUK8>Nl3~-)~F*3a0oYqC>tc z8V21l5r%I9wfvLG`S#%7y%gH^fuHpHmF@mOG7Sg}1L}wN`^bKS7yIp)mA@`6FP-XL zxbDu|kQQ}mYD({Fljdasi}CPP#`_7-chE0J*ntw53t%=Oq3I$KqeI%mts^Tz(^qe5 z^DxDKd28`5HU)bjxnoiM^QxFd2Jv#%f&xsMK*t{>`EEkkrrjO>^lI#O&dWzzvq7&} zT$wq%Gt{7OXD*h;W1P#eK~PGNRIxFTg;A`fiYlEF&O~WmDoJw1@t2E5x50>hH!MAS##JEsKByu|Yno7cnR*uKMM&*EOlHpzF_tx{(Zo;{|<$=doGyyzql#6Esn& zpc@p7b;@&iRqlfRz+3Ldq<)qDvdZZsLyCK(f#{Eimc=AyfXzU$QpX@x{NO&6bjG=B zWj)P0+FxCM9@l|) z@#En@m4Gwi98o^C#V=?1g#&Kgq{Q(_HT>E-W@6^qR5$ImaIO_6riCGW4>L;#Om^M= z8Xgi=?+eGvnBnMNdI&#W(C#mvrMr#OYCEm$3#*FtqA!$CkYM5wFl5PNZ_J%bcvCWH z>Tz-|$@2aE#o@;vQ~mawCcOKBoLv~)X*(7SV8GdIhopn>GR+T4Ep!d>)bi#T&Oz9V zzT%Tp+D%*L9C(ECJH$t2Dj$iq3_23X#7DDzhCjJYzW5bdKq|b^B(FlewCa)*%S@c( z1G#5$2dVi4HPVHumDrc*G}4o|XL0W)p3w|>kBjWDOv?5;#uiEiC8}ptP1z(3yej>i zC!`Hs{1!g~p3D2V45~mi&vN%bWNZ&Bo!CmZt7H)ab#U+|J|K_Nt7TXE`)s*Wd*$ZB z1Pbgi4IruB@f1=mSOu!RMmzfGYsB=IS`^SdlgE7xWwHkdK0>N9-T`BSUHU|1?N`3G zkxtWZgYzt&-my!lIww4k!-OvtYn6;?KKO8!Tvo2c)CcjnY(a8b>+?LSFIOl^ zZM-O!G|qV3=&aMY>14W=r?)5OYZ|>ImqU@zkX|*Zwl5w*oFJUwg%)DJLW99H%I69G z(AX9b1JN~KJ5wE_`|7y##vIklFCr9me8J*fW6f1PD*Vq0IXL)!$w`_bl_$QtX_nO! z!o1E(VfTp;q&YGqAJF=RQ6h|SJfxL>3mYC`olo;ZKi8^j`ona*v!m$Y7rUhbZJE0w zPlvtHIr{^C8AXZs4D4NeUpx4mq65xb75Acy2`PgI1B6%mpA?s+C$UBcnl58w?`l>m zV0b)YKNW~qLIIbBg2YLrMJh9oA|)oNx_cqnDFNlT`=>M+9=*MIO~)J2S3yR>)PY0A zLd)8UaS9m3SGhg9FeD%T>j=!|Su_*M4SjL4mwNx%jta}$ywvKm?6|B1^Rb@BZ!~xG zyU>Bf0Az5bSU#vo%DdNg@(bLn5C?W$MYL`MRqm|YNI}xzj4;7ufi65{rJ3#&fnB&< zcfQV{)7g`4Z-ON|{+pnQkx2W>)n7iH0tzYzz)$I-W~9=8w4XFNY%|&;#RnC|K3J|J z8S#cE#l6K-N!_%~Yz-~0UDnZy4-Cd18g@Lcb1io_b;`SDYEd}tR9|)YR+m4Y)pDTW zSz6J4pg+_ic4R^bZvzxPSsd#J4$hwX?#yNmvl^`zKhj%j@w=dYW2a)H)y32y`j-IG zvkVEzyO}c)rBMie@RwTkoY9Qzabu`O=O^TpMHpwgS4QtD^PS6Uz8X21J0=1MyhZZqV_O01%!!ZUld4 zFa4Da^|2wgR3pDPi^0pXhY;8~;Ae^XtedA4d4u=C;uJi`L zF?}I3=kc*2QqJeskEGK_O|m_3|8}H;G}J0st6tYbTK+ z!1KyD$O+{Q`1WdvDg0in)MBb*l~+sw$}Bau8hu^cL}F(_^&|@DISSf$MWbxCSsN;X z(XZ-nd7{WRpwG<0a#XB2Z-)ivyfBuaH_;g4hrB_T!j$kf#LIW3Ky*G303rtWc?c`A zIX^oC7Wek#%GzdDQtN?&#Id3*Q=Ma*0 z@t-d_AlXK8>Ce=dGw?jKWPjQ0miStJ>j_tWA&5r#3~2zw2=Fb_FzmlN$;}#4R00Lf z*_PAM+KC@FTDt()N5#m8WOgj69>L?!*#%a%;XjMW?}R7+Zj<}$gq{DyI+~%yN=|sQ zq5{#i?T8W$0qePMw&31ain%xy?ELbRLc3lx$`?~$7^YMU~ZjWIfs zyvoIs`-;P~hM47X9UTDmzA^@&lch9x(#*HiaF>*N~=BI;}$j&vAbJ*?gS4kfw&W@XYvu*L^@$DD8fImwiA^4z?47~~_^3R+HQa($uCno@y z#KuD66OQL~|n?;1$Uo)3kA( zW60hZ9G_}mYA@8I?!IwXt-iyGhU-*_hFq_&bU3hbqD~TFo|sQ4n`v%!JU|T$`P5mU znrH6~wMl97o#cTRSGnAQWoL=YKi8s3IRUx?|G)-ZPow6;7uxvGtRHRZv^?JTC@D&; zu_*V3zUziDAqjQ)KD09j3n3YKc7fMZmX~tN2H$Ht($u^@yl_P^^{r-fyDQHgA{Fe6 zdI`RSlqZVeRzi`?#*^!;>B$%^DLJc}nurmKi)QA0-a1|9XV!1hx&#z%{P8r_UhktZ zh|H8v@hg1)J&7;P)ADlKP(#rs?3yCb0dq!dN&>g>wYsB^c$+zobQbNI1-n8>1YtYf z@soEyS3#(z2;wxnB?ik9Tqq{|jj*dz))PDrPL)QT>J6~_ovMNUrh^Zzr?ctXqsx%R z9*1o5ZzGj;q-!0tkSK{I8ihIU7znlu6B{L;7{%Y1(jF8NmV6o`AvAR}*n*nt@cARN zWo}R!YQRdrUnzbv;w+Kkb^yYDgwv2u|cZd8%t@NX6 zD;}dY#2?9uj>qTX8L4Mm(?b^C*QM}wZe58Z@nw+QSqTgz3Pq+YL+S^eB{E|m^yYTy zY=rK)VhxL@UrkU)r@NQ8%v`U%^IGB#3*`;jJFMnGC;6;2&*JU?nn~U?i_3%AeDJYp zl;_O0sz7^g=w|XHKWKarP%7Ng?y#_|HQ?>Rt15K_Y^Z;-Nk0vgLB1xH4ABg#Sui@? zl+MjakzCr4>M-pGwE)je0pM$*n*j*5IUqVPk+FFUcg_RuZPSi3c7HcabRMjT?;7Kh z{IUg4Js;}1z!l4b2P$j&CZP+67P??RL`&=`g7dAR`tuk7e%3!t)X0QWE2?tYORU>n zblTYa`Ui@HSe+B;Ka^vE{LseEHG^(Fh6nu~rLrBoe%|)`jtYg_hh;#%1_S+ym=KzW zV9%GpU?(ZhzGqyT@cBkz0aqF2H4aTr)%JL58&2K9p`*oO{Fy7Ly@04N1T-$hvWyWh z(C8GSP(|>gPXxDJy#Yxjj)6rPG}Vh6LtAgKlW&@J8=qkpxYtpCXqgHmaRrWT#R;S6 z2gBiptvVOPbG+h>mc09Mv18ry@TfrAz&=Y(LcqeLanW9}vcp08m8ZI|4AYmj87^t) zyw5uA;tMrz^R zaqeeI7ZTU6-%p$zTdEMIo3~5|aL9wM*4}cTt{lGigpM;frtb~ISN;YkV6%t9U0oqv zoEG|(j4Tr{XF^NGmfQD+rOu01KqasYY3+WAvYVIbZ&Ip89K?ny3n6R5C57z;Uwx{s zN*j3YLB3VWy?v_xfb%5qo*?EErqm$QKrx>PUL?*E$v4Yl=#m&H+0_NzB^#EG5WMJZ zc|f#gekWSsc_@K_6#TavKzu}gCAhx^jD5fy^qnWPBlp?O3`_yw2 zHF_|;{Z}&T3p*}yR?V2g$Lt6!t5@ZyQc<(C@q1os@#uyW`-7Q%V8h>F)%vg8_Wy6r z^_P&r-|#ER!vW+ghZ#14F9uFeFdf1keS4Q>&zusF+e8YOZm5q};~eOAj=TTR&HV*5 zF5}D7a6AOy_3;pmo}k1E7f<=D)95ImE)(PzHBZv)SS=p=Gk>1PL}#9{N_ccTF6uGr zVZ9>;Oi&|oln}}`@2x|}Y8_`X_Z1%1`p1508a%uUfI@nG+R{QaL873SP)`vY#JdDf zyu-d+{jdijhBYt3W6aZnLHl!Uc|7Ku&7@y#T1}W)h-ypBd1`UvjCwM#C5ay((kMeU zA}*-{eqXqy+~9Y;r><-P5k@vimy;DZU0G4_(J`!gS(&R2x%fUTISw+92$Ae#A*{63 zO2>V4ut(%t_IZUUELzX7&OWlbIi}im`lfmy|GLqHXtO9#Z`lWkxGgAbWj+xd<*89u zSv?FBc0^2Um+Lcs5t~&-K7QM+e()AOKD}#A zM??BG^?dh3gbJ~36i;oxvNehIshsL$)EL@dJBddWdG>0|V!VPE2|h1;kx!$?#4hAg zJfU%82{XJI+6gfKghS6?#nElPm`@F`GQw?#<>Hgu()cZqkR6Q5GObw3-4<8o`^da(C~FABvg!Uf{gzy8OykNAOJ0i^j7xkPuKs z>?RhU+ET?yN3dHc7Bw4nY(|Y?ytJRjC_4L4M5Mck_}U)OHUtNPVX8QzhzdmLY~?`^ zl6gXh1{Gc2y(Wp3(~%heWld>ao!#f#YGNc8Nd9HDl294Yg$aH>dr?`Cco{jQEM>8g zE`S@^D>u~)`QAq{Y3D8Nz{6?STD-Ymi9d%O@#YE6enLrMy{iJsPD#f09rLZRMw#ko z;)OIqwsa3YM#IWeGpk?p=<_uNu93g2o3&5qkLy*#2klRvq$AG}wwp8Ph>jRDTj#yi ziFK35uENMP>qmBv)0Y;e%gyNvFFZGCraXS1@hv0Nk`7k^usj{?$Bk@BrV5HK5Qa=8RxRh^DYT!|UstiXUNVaj;OrVZX3gG#WOmsPyzYgMqpoRo{1wn@Dm zEgbK4vBP+d>>VYUYkxl_wl7psEz74$We9d}zB?N^SmAH%{H!GFTW9eWy`r0litH1v zs265ZEdZ%!RS-aO!mWkRYd%h_$8F_NzRS~p-B7E2T0-MO`!(+COSDs;7ARaJu%F21 z#wtWXgdlVLiqt79ICJp5w=PYz?xORBGsCFTfCGR)fE`_NA9^bEI=EMbn2BR3j zHo8D4Bef|36_;v^V@50QN<5=;BJ=g%ZzgP_Itc82vTX*f`e}0G{NwPro`6UU-3i^% z$_j{ZcV9FN{2jcJ@g6=8RbJ1ijNLrQBC5bcnQMtC=a55>&TWHHqam&atE!IpO*&z7 znJ&mQWj_1DdhpdZ;;*|@xME&$W~8}|e3Q`8mXEhTwh-NsvtJ%hlT6c=3Mhvzx`Q7R zf`Gds)f~2y7+2&oB9OesK7&I|;R||?7HVu?yzR(L?xFlZ&i=Teww|&DzGEzjI>5r? zN{CsqZ4dIL47|&&_sz0U*LzDn?e*@bVVQteBgj@fB`Ng8eK(dUaLn8SIs)zWeFNi4 zK1S@IMyZ=gfq_)SSkYr|px_h?K10$J{pmS1$B@bWs#L1t$`T%!izONI}Az83i~c@B_OdUmY;| zFcNigCt+)WLtkh>`EL&<)D3L*;wMFcEvRF#|Fqw`T-T_a&4%|_AN2;N&F{O4lgzr~ z{&EwzNI&86?vww1QZ3Y7`ic4kXIIQw+DRz8;!+lcG zD3$#{ENKKZh3eSNN?WG0L)F0Jvi^9`JTC*~kVPz^e|jW_le$;{ui{VR(Mp73oWqq_ zM?8w7uAgP5mSSo1>r_~+dyo44)Pa7Mtzjb?x|Z3H1J*-aF)E_m#Pjte zZF+5RP0f_-TH_!i#R z*9eaTPgVn*B!|GA);7dx~M5z|*BkH^o*&W&U5GpZ480xsJ43I;H zc>V^6h3v@a?GJp9*kZa38B$2%mKRU}zRi5%4mqJPDvs>Mzizx!$Z|ZmjEpo>lIP47 zm2PdO{3Y9_s0Q(I^@P^@9|@NbgDKFNQZHK>jiJ#W(qldl8=pw683ZXWLu)Skcg%8I z1+(zg=VUykQ@;lYpl_sNdjlf`coyA%bk_e?WQFp5GBAd*(O5-3q|_*1lvx1*%kEZsU3%nSfD z3pOwv{lh68tjo3?>}b4uxlDqctUz3w{{xvdI$ac~#R{gzNa2&78>fAs(Vyf=h_dS0 zNEhmj@qecabPw>7G}G8OZYDR&9Ek47{E?|hPs%%a*aKc!nL~AC{8c-JGYJ_O{o(~0 z-Dc4Ri^sB?u<5cJ5hlb+Ll%mMHs1E%LBH4WUAnVCBd-yjau6ut(9pEpw#;>WZzV7i zW%sSKP4r2fWoTZ?@>s2*6|4IfSICnQ_J1V88HKRh1osaPVgz?{yhe21dKvwtu65Fk zP>689@3Z-ugI2G(kUL$f6xQ@^=al5wS$8e{-{yGV-B<+FHr@LzulJ89R$OU0ij9Ry zWOWyIFQoa+Keo1rzzZtG$j8N8htgU49JH`XRhBZxRE#}X6uVf~(&|-esdWW=n*eln zUVDNDc!GbdBJT55#Bf&VaXfsr9ihEE*0;G7wx52WRB2|q@?qB;I?Nad_fMIY1~?Xf zKO#R{uullZ49sj(w_SE+4L&63@_?8&cYw~giHZJ&*VY|eI8}tS+}0D(5JzJ2At%he zzR64vP|j%7AD;0#DHJ12-m-6V7q^^aS6~`y+q3VU)A9(-VlhxAqM@PTZYG%yf6H(4 zL*)6>twv+=fazT#{bW^9gsX*wTnL*_tSY)W%wJ@6p8%I=!H9`4=6OEdw;0%SynGjG9}xC`)JXLg zUatS?Z~g*!{}b9#iG6z`l3_^7P6`Xw7{XQvR{18oOsH`*Zy!(Q+^2P)74Gv}v;9Ep zZjK8LuD^}z#Bf||zUKft)YutiOW^dwif1_z#h_$?1K=;{7UzpZpD%>zIWQeTqPysGFQ>2u)DpUXGoc6>OZ&3 z994S6HIo6JZoA~Eo!HHw1^F&WJ^^Ky%E9nPmb@II?PtsG{Qr{=U}ec8-X0? zY%~VXTav8Qu(eWNA#N7V;jQTNDDI_vuMlBQPQ0G?tUe%UFe_R`tDcjn05-yqxv6*q3OqYBhr{v~a;6OK_;TPjEE;do<18*bI;w@&f z9?eHPLq5_PItPFk=8Z;gDd9NpY;6izv5k#G-bAaF6}8tsj!O#@&LpXB z`NB};4&{d{g+D0g&ig<~zaWJHjJ{YPzuDhvjPE_!N}S7}Ur(`5t}7e#q>A+9xLsf>-?BtJEOO7Lb3-ae&dQOpkh=_DXJ)VscTT4l9bR@@| ziB4{eHZykA(US!Dbc-8`%Fbt(h1^MneVJBvtXVvu{0`R~d?v|pt^Jq5Of!FB`3W{% z+dGe}0C>p(6%I1P(-l4V%Xv(hB2IR4o&ySY8zhZ)(SB}`cM`YxMQ>sA5QnT%bIL#5 zyv7_LZSP2o_CN8V3j;>tvmJbU0`M;(S4qMUoK{gRp#t2 ze@|zT{rgh^-=;F$p>~!+O4+N=lWcRyXtUouq+EgCAhph1#B07(J~Oe(DLPnE9-u81 z{q$)?LsY9kjQ_ZGbY*%^(K)3?ga%`3qfV;7juNV2*Cvy-cWz0qAq6PHMDDJyn+hOg zcl#(Hhr;nE;QcY92J}l{Yrr7wz+j>dp5 z`cSr*Qoa(sr#HGMEWSS2w4>pv>mR;gZ9>KMl+D|Q>k`0Q(gpNwo+r{0q>dHVz;v76 zCxuXvRUca=&59K~Qug+%pJ*g4SsU>0RBbQ4EV)TG4b6r#E2?7psEFctzJ@tHz5w@@ z2T7MWL?M+?y*JSMw_Gt|-4w#hH@6BLc2nn)3X5UVsU(KjE5vg^P1JU{X~a7z$fb2A z`aXdqwWNq1ZE4{-XzdF|{TVv;dtE2l57*UK(>7D|Pznt}H&&vQvMs}BIjRi5eISQ(^H=DN4&&uS$G)$59t+?e)x@na+H>B)_Q9ArqzPdWnjPuT+vW z*M;6LlD+0QKha!9ZnV*wRGhAuo)-)=Y$x*kU~wt|*5@2S%Z-_OAq;{T_1Cw~Tdo7G zv2Wou^9{VfH#m{tlRmmN(Vj)3cjksAFV3=NewKC}K9-W6_sG@G;nSuaO?E=2s(|73 zmJg3L&T-8JLE~o2t6m@}fov-DBavai<18fnbO2Z5LilF(m#P?&oq>t-(-G^Y=Yfy! zNL;2kEnb5drHe3yMtLGaEw2rdIM+ermVDJi%NEynxqOWtIz=spkh`+Esgip+%t69{ zXp9Wem>}qj0~r%UF@iAnBD;Y#=85R<^wG3}AuR#wPTg@Mw+ml1bIP_k2)tR~bS+%ufb+L7X#yM)4lik()J@?)g^CwHR*r!B_1pf4H`P{lwpR`;hq& z3cKC2FE+b%#rfEOm&Ml`u#6Bm{JPnsnZ~Z3WDz5GjMR*O6BV zLl_V_%(pfP)7My;LH}Ket}%(`ZR=8LE!ckKi5eP98_e}h?9vMR?hza>@zvD36?Do0 zPFLYySSTdyHBv0XU*?if^i0(;N|k+qlIta)8Ha>%pzgI#7dQJ-GWLwmY-m zB;DBU(VgZT0qZ@^G< zB+I;Y8phE1G)^d;kH21ji4;|XrG@gSf(;7w#J?~%>S4EpfT0~*AwAiCaTk8V=!;^q z!>W&F75J_Q#2W*|Qh_-t<%IzRt| z9!uI+rX5ai;bSyc6{DzXMV&BhvMRGYArP1C%OB4aA$8$HX}9dE(#9_Ld~PWY4)CG( zk@w;=_C0C_zuQy*bv7`^d}A}cWy|i^?~!4yD=o2CJ8Trf8YkahIME{@=vuT%91i)o zl;sI@YqvdA$G>{enAK<8tlKlr1niUfn(4}!7Yi(iS0sMoFNE1+6{2wRoFeF4y(+LB zfa>ZBJKrD4j1gvPyq3hDjuDDp30z)5gU%0x%Kb4liAzoaY*R>kGq6 zsGdJR@6N^LBG>p$x&P!{AoF_0S<+b_$ox#z%?yK~8{IRnCkI4CtnISH*=jZVK68e$ zyP7`TiTW^28cmtmKqW&&tB0iG-VB8)@}lP1e&kb=295ISwal6gnp?#L%`$-iYYG zKtQ)zQsRxG;f~<*K9TnaMPpwnQq2Pzt#A+($vo z;QoYL6XJZHoRS(GhcIy(|~kRDDXQn-lq4V62Eo8D5}w`;IW z$5+p_c->rspgBls0(=e;&U)LFF6O(Lus0-Roox(@_{eapifP5e;EbY^T8C2pGvcYa z25_5SXo~^|Qy1N(^~zEc%k_MEW^bl`daDR+`7c+=$t%bL&jEPNYw*&lh&n_(XBswIfoheq3KZ7&SV?B{t0GT}9a2CW z62dw1_KTfcz-#l9_es;JywqJAxfQ9Sk&i?P0(^d&A83bx(%Ds}IH7#q;RU9e&(hVg zBlA5weQc+{Nak&CQ7Uab$K`FJjui=Ptu8(bjkB~wg^wGfeLSz%nWQhOy0Yoym}d#F zc?;3Ij`dUS9^;yu0@?5@U;7x>Kun76xaP(gSx0Y!YW&$uOrh>uqN7!Tnuqo7#~PD? zvHZKGk2K6L%BCYUXXj>)*brL|1&>Y zYTU zS4>E4;^Z9q9sp^HkBl~5?m82^{nS7!-m#kc@UeT;uEieNWq_F;MxN=ws-ICrC}BIE z^tWSckqVT7o+f%oAT40~2-@`ko|>weDDfPi0ucy0^q!J5G@{m#Xyl^w zMXGWwNnI@Z!^#cXvkK_&0gGx8_Pmfc*}2n0X5R8Rb1;NXP+PAqw{k0>21uvMgaD{h zLQjBY_aYF3y#IF(qp9u+FSv|aSvZdCB;^B(bw=!#@)S^Jl?sp^GJXM;Xkb%9C;-p% z1C)qoo}qBpz@+O)?vJs1za$fha5uU={U1ly{S_k5%+JpT?EKFEKy?1wr48{Mm;Z9D z*YE<5AJ>d^)`RGQuJ8;8qAAD4NT?pDz^`OkFAjFA5FCE!N+3}FyOGya_p*|8O6QK> zbx67nc11_duX`SfO4mHjsw^ z?{3_K@UFlv8F>x&ebmSW!Y)D`d%0jeW9?kvz(GE@ zj5(xIYifYPRpimK!*?3_mlyG>|mW^&(6L-RL=n@Nbl zBM@Z>23AnXWL&7n>&Q`C=MBCC@SO7fB(t#uZQ}2gD!inmTsPGhQ|=X`PE)DM&vF_W zh#IYUC33Byem?BX6L5%_C4>%_rigy0VGtBu9&ZuACWk^(7fuKW^v)J6{YoYouu!s| zVvS<*m?V_U`QO(f*Z(3I^#~;QZ0_MPy8Q2?H_+O2-LH}0oDc1EbrI$Eh1HnErjU7% zQ?8}#L>9x_EUmTAmbq+NJ-E)>zOoIqu9rL7%ft@u2QE)envwXYmSubBzBJXw+vzZy zIj#(S2V0^3`v(O5#cFglP+8)6V!cG2a&CO8JBv}7bKfhWG60~rQjK&K0t~ip?pzE` zAa8JcL=@Vuc%p4YFx++)$`DOnsqqqJP*l91e7!CZTkP=NW;6DmD>ot1e*wgCQoK1g z;(4i~1@?vEeISPrADo%^OI3L9OwFgZuTiJ2t_FT=&?1QP15B_&Kq`&y&BOIiD2J!m zCpmjylX~6)yCeW8#&dFfYc6KyuKQBY&h@s#xO2@6LGN9A{f++Kd@dP)N&6#Eou8QmIB+JMF5GI4_RTm0p8PuPATK9I!vF>6y|I1g-kzQzX zY>7d-h~ePPY4|y3%XFru8CNHwi5|I06&5M_e1=fVeTllF~Z-M+0m%Y znbK)U`b92#0EjmfMtjXnSQyMNNN?&S@kr1D$`Z)fdUt<=zSveC>z(w~qq@5J)l(oK!u=bLDlzxh@mZm3 z%sbj&>uWL|cFq->S=Qmr%x0l_Qq`9p%RDxE(at&I-{u!n=LmPJG;(iw9{p1KD@cG; zzVcP)x|~%pI%R*W*lM(8r6%A;dYXy5o6biIQE#e)ilV>_U=;O!TIE2v1j0`%7c`>E z2+pKDI0$z%Z+9H=>Y6Mi7G67OwUn;74WhTFI5=6`uqY?8?`ND91jnsCvVCHl%&2y; zXVyW%Z|nrv9Q?!e1jT`5@F{FbCh<@e+b4Thmc9{YAB;c8pK^3WwQ+3|uM*#QGJ5~-_G4~LrGI`+7hoVs%MJE?JE1r_wMvmvE5*_;pcUXUoQzr{x7!1aDYf# zSJ40jb|zEhveNlVhrY)1(Pqccp5t+G*wt*ux@TP}aTHn7%2_QK+vv032N2%9&4xt- zB`hcUd|hwYsMWf&b+w9@slGIl<*|v1Q821&BFgC-J7dk2G%%R&4t=CkC@TC@THp9) zgB)c$|0-DJ=(i(@y+HuE?%~Pn-@1R3AI#SQjQ-#jT*)>Nf2{ z$@7|wqjT=g>AMy(5sDxT_^gH5xKC1V(Q<8-*;~V~hXY`>L>_C-bL(`bSpd_L8D1lg zDSG}`&>6woY5bAps^$Z$f&1P(FI294H_GxUoEL2oJ+n4DF>`H@=Vb>yLS9}cE=l%O zmxN>I0*{qc?=;~av=q0L94S()V`mywOjFUecTH38YxIY$D-(V(!>7ItSa^pLsW6Dk zcIXLY^y4R`L#i@H;##kpuTWJlEk3#TqO(zUd)VmOIqsN%cD0{S-tMZR@Q+9=DbbI0@YgBrx1=?X5x6%FbO2tKymn=1@ zuOGXdZ(KVv>RrKw80zCu4deTpvFSZuQr>E}C2SlpRwx)h!f9kt3)ftbqi`KbjyTNI zF`mPdh1jvGa$OLIP!6RW4YeeBy(ah_O;GzPL#GVZ>5P)To(hq#1Nq;;xs)d8OxE? zO0R=rZ^`+jTU?u~uDwQRtg}=3yT0k`UGn}XLen7+6SY)xhVE^AHrLOJ%G7rai8lc1lg-&l?3=cdJgzLgoY z#~#{n@r-(XxeBHKF0fIxSQ+q{lJ zfY3e`Qkp)YCRK8KE-fp%tsZ4Ou$ftzVlpOi+O=~Jk&V_GQh!;AF>aIVQ;XQ4f$I-d z>Rx{9w64ACZi<@;z!vH-`|p?TOv$85H(+6imr}@<~m{(u2Jz4K~VJx zpV#eF^NhNJq-fiKl(&@wq>&Rrh`;ZH{a3#JE2PF{0Q&hA@h<+L3I4o) zBamXaMDo<9jiaZDMGpI)4|i9O3@5)#S!h(tdXkZi&Aq1nBrIYG3a z+{3_|CezL??=Nmhul&OWj&2!0nun+ zWdwkbnazzYEcbePM5XKhc2U{0=c;iz+iUK*OETmUL|du-(%YB-SA zKI1~Yb^ylgPO?yz-BI5--wx9dRv@ZmmcQ#1m&7MtO)^liGQ|ntUq3;F`<(!pToydH zI&uVIy;EGw`JgJvm+n^7?a~mdegQfD=jThZFe_LE9QT~zNqQpwUK^7<kkVjF2;+T9_rp|u)o*9#0ffVgh$jTOV_TnDbHc*HJ0zznCYE}&)OLoa7WbW% zl;MA-A)9`BLa_SfN~g2D-1&1^UbNfw=gCfzM}Vd|P(O)P6(4Dc_eyY{tNWCdLSt8Q zf8zF+v`^fR;(xsEodqNEm;@$80e)rt>^^9?T-x&AYJH9^{WbZ$AcGj9}t~KX;=JPyb z`jt_w!umsgPfK??@(41D@Agx*7Nk#!28()9@zw3|M6f;O))eG@qoOk7 zOTy=;v&qeYcLZ>`pVBWAoHH#p(;57(Ky@)5W#94f4yim^&#qbUsoflI7p4$>`L%)f z&2&r;dH=^_iFOE(%c_XnX2Mb}s)e83l}*QJdxI~{-kV;pe;B3F@-oV-(_$MVb%(3t znv?Y)YX+SIkP)3flfmz!7YVAmOw)GN@m4z8dQ@>y-FfC4VR~~EA(?tx&H2KQ{AqO* z5UN<4ZQU~^n98`KdM5Ni*BzZqPqa{W?YM0Ja)-l3QD)7s@=L+r)b5&$g?#LEz#%Y^ zo!}4i4Df|?;x+tJpyO?Bzx|Zck{UuksbVA67{9{tU3A%GNs>o8&1!+Buid&y+jsHI z6~a!m&^h{&u z@M|2m8F_S0UGOV5eLfeFFEa|HrEgU_uONECQ&bq#))0_>o2Nl?_;q``d8RXpR-H3w zQ;G?yCv|SNVa+sh7iRii1U(GO2)=xDF}xj@w4fwzWU3MCgygSXz~+|uSDSJq?*{~+0v{P}X7ulmu^d30ql`6C?vYI~}XiG#}2;LCo# zzOuUTN%XV6=mPm0p#|?%VRZ#CQEWD-dUb1mA^e3HLDLHD{CNUcf3;~sb0VRk7fL(a zOP#{XR#eY^y`6W?zT?j`-ur#RhaLsuZ=%5$*FX%4DvLByBdtWiG%80yBdMK=`gpX- zPtlMo<^{4^&&7v1E*d9H`_0amYtKa%ZblIv4J-A3(}=bZN!0VO)8XB_ZreT*2zG0a zpes>8bj@FKP-4N^Z0Vm#Lq+>9=gUUo5;RK3-@JOj({YqMU1!7=H}PVg`a{S&*=knT zkh2DJtmkJCkunv%Hm_D@JA$_8@5!tV`Zmb5_^|xabd7iCqkzu)DQ=ke;QM~81S7l; zF1Zy*$fDzG91*rPv^cT;GO%b_ilS2()6hhH`Ntny4nGU`&4?6dc9#SSvKzC=&)v<3j{(0vgj7Fz6 zN0toyb~1-`FhsO%kuh0Tjccl$GY2aOnYufUiL@G>R=08ickHua!o04xqhfZ+DuS_>R`1 z>rrAqLrw-4;GbUNJli?=k%o7iqVn{Sb5a3!-%;jo2gEwL{$k%yViL}4(G7i+MPh13 zLjGcjeWOvZ_yJabVbo8ce&ojJp)-d?&|+CV{HAFOeSjII+3V(KVb2rDhTlsQd@xiJ zkzg8kXC^xjjF@iAR8Hrw*hK!}8T2dO*!_nOaGeHcfG~nhzp)kzP#90%uzD~KQ_Ta+ zOHcG#m(WGlp-9s18}BzbVLRpKo1rVQvK;R>ouX%SXcT+cJhNpd+74_ zS92{ox}=4Jhmjw+1(Ms9M1EFYxwMo9stwR^eBhYmAd)e7En1RS^dqI3$`5%3%==p~ zyKg`l9F~A&!5I>>r#$kj6_kct_?L4gzHIU|I^T3Gq5Rll)TzRX$RNmI7xnO|SoP*4 z{uKYYDXvEwn4^X0uNidmcU8^?G)dJYV`H0(Qe_k!K+TcNzX;Dm+)B$dIzP*1OZpx~ z1ha~ImKS}vFL@FqDrMl3C>PSGzA*S6PDfBdhgMfL)r^(YDt1UQ*Unxm9Exs;$ohW$ zy)9V)z6{5rJ1VA+hPUCT+FIG~p6Fkt)ciiv{_AzDycO$P9gc(-wP7S8ZN#`kod&?1 zXbC<)^KcXIuWN%l#x-cGGMC5{$P#(B+7FF6!Lx7f!7BLPsq#J$mVT$!VZq5{Z7qNO z<6rQq}L|(Eww6`+(+Vix!67jos%SMS& zB&=kyM5cF_>zO;4bop!)K&Ew!nnyO_UMC;;Xj6P9CWiydwC_(y`6!mI5G*H!sQardTFwFyH+?`gxmPOX|+toG7syrKKqT-e=+ zbQQ}7x$9*c-rqVrI;<#Jr5Kj{nAT5W0T7beit=j3j+4tIZM5ViB3)wQvoeqhA?3_> zj}xD09y9GLXj7<#d%LBJXcHm`#Bj*>g*Y+3Qrv_^N$-T|EBX!HOO5T9-(05dy60lP z#eTac#IbnABihuQYf?d8=H14IM%6_9}9e%+_Au6T~Ox@-p|6NZ>bT6L=zoIyqYYAt%@ zt~*j$Ip%n{ot-cL@^%~J)k_gibCf5^R}48&!jWCE%D7Leq4xPxR@L#~b4rv{`^v8n zKrlp1K|0IHk*j*QYA_<7GEQ*20H&SYu%0^|IaU&+icSvjdR zpYn7}ckH^FYTiSSYP$~tyKR*yN(*35A!d(nX>tG|!RBDr$c=zqKyd1^R33TK)YclSuS=4n%1KL{=i3$)i;27 zy130r)_qX_5&N+1`avtBiTWiylfa6U!=)yo&8(IXLW$)wdzFk~GAe;r$vd}RPNC?S89T>+1DB*q{TQ}P#1fe3omhV-;~-mD}Jk zBF+0M`FhyIEEucvH8XUve`nws-eaHd9eBdb6A|o`<(nBKzuU##i3D7TzekrSgv;5O zL>=H&K18#!f;NjcId_bE9UP8KPA{v6yi1kecL!<*C0*O+K)4i|p7}o+9{u^Oe}zlq z=lf0Nm7|UVrKo_rp>{vJMD@OPb7TSOtq(>3h9hJzotq#zg7CYFpJ?X3Fe1Gz0H?S0 z&IzM$x#;1svavU=L7%<%gxcvv{FDw@_z-A=U4c(JFIXU2;2+Z{u!BL;;XGjGHE&UN zzpU@iROr2|l^?uZcopj1g-=SJqJ=g`oIZcpgadtDfP=uaoYI=Q^~dd8)Owab1G3+%@*G4#WDF!ee! z`y-tPkE&%$rviC1UoZ&hj@Jtdk%dH}QSW&IR_4Go>l9pD`S*In)88=LSvjulk#cjz z)lpAg-%}_VA!FUvQYCR^SNA9IW_rL*DQlkRl&0@IAU4px2EoYWuieqBS# z+~jA9Fvie@u2%RZD9EfKw4HtTvs-j->g#7;`V7A|bWukj?Ul2)ReziqyfQ{?AX%|1 zb)Gi9Me82M6L)+P#RTfAL+q}*hE+Q9RNRo~%)eX%;Gm7@$e^-QQzCbL>9j+xID;#B zgGDn8v>I0j4Jkf|d+n3V?wS^7t_>DR1hIGi+OVUjHfy8g_&{+t?4D1-)2`?Vq*=9iQ3{DK(KSvg9p(e9mH7StQW&=Q^UnW6 zAF6uo$)#y7W7A3N5ID`2{hP`(AGG!?oq${vBR zv-8S(wqYC}@9qD&vC99ohSUANn8f)-FNbfHvA{u!*%wbnhEvE7J5p?IWEed z@Q{Bp7h3S9%u^-d6-QSt`F!-hvZli6FJev5#+=AVk1yf4OQuqu24ywL#rS5vycqSN zWto(3TY+=@-Zj#)B7AIz62I+Wa8*X;o$A321h;iC{7If+6&dncEW-gLA)aE&K7h@pqYBd?!cJzPgzu3Z2uw$8V zOdeuAHhXy9S2LA5b3CD6z3wCq>5Erek9S%nD^^kckGlL{wfVpPq*TsoKwC9?GJ8s8 zd9xjg$meex<~a=ZV_f=uJ;TDhaf6Tcb(x^G+pwdIy`Pd z+H!2t+r>#cez$`M%iNaxg*`%HJ)7<$r|O?VHMLtzc!hiJN2l!FT`=~P_Ec7-dc^vQ zlv67}@F#9A(eZT+d%KXrtSkGDvnS{IJ%@FN!2qJ5oP#L4Z$SEmxYwgx@A;u80h+6* z3#FJjh2FiSFE1kUSru-|^z&6$PmJwW>iel@g~UrpF2%FQCJ2vrH$Vo>92?43J;rN( zLK!yL93L~3*J&hr2|s%ty=w`1k-MSmQ66Naf9xtQ>-QQNK7L?OUYY(lRNvxZ{bipo zCtvQmKW#p%>VoRF8k@#vJKiYMesa^QR8mh_=~IM5YpY8DbA*Zht(2IsgnLsiLx|nl zY2uvSaoHx$yIns5;{QoP;b})pLN^i;>^g$DiQf)_{SA7bb84vK6mPmG^j z9js+`@z>=8EgvE>57Q|hBL%bs%HqX#GP7cL#qXWIY}{>UXEfW~;}N+YqI}sq%y@jM z(F};ZuB~@lK3SQ)r=*=O_i3Q;Nod=+d9}9XEWBhDfbsrZE&sXWlxda|d(pY_0bdX8 ztw*j=4ifF-39;opt44c!4`kZ;7}PValZceyT`)|6YIXM~#$&QrJ7$x}t$A&iy#|-{ z@Qa2?7GkdgO~!1#-0>HGv6qmtM?2JwXp6T=d=P5inImJT)1$4q?zF*RbQtrSgwB|0 z?P-2|!Eo~GSXB-Jt{`}!Uj93|^rd*lRys@H8qVUF(O>rC?apf)E>19wbviK|9rlZ7$O$l+ZH@xlbaSUBI&I`#2&Z{ZU1+tZ^$>z58{%fT@WvR8i zANNTI4evE=_G^bPnFequ9QrBgGbt-9rMOroT*7^EWrqfv&cwvP1qi=MxEdg?tEGpV%foMDv~?8c{jwTqti@2l zj@Hmbrs4Y(tVEFIp>D@pO^NUwdZvhz1y06@Ady+|Mp5Z|YOQYW&wH9by?OuWpvzEc zY`H#$baC+p=`Zv928=CdAIYa0q$2pTzSg~F^|kr|Kn8tB%i=OyizmXf3uTMRGn$}k&)(fch&jv2u5;JjMeUQtkb;&!~J2R2YlATPkF3zeXHck-cs0mxvq0^fVcF$^_6hP-@Ze0GG6X>8-R#S<4dflyalLOs;uUx4>owJMOrBoAaKW zL!DUUuO6XqC{b|6{r?w4Hh)q>{CWMqPjs_4-36kCdATl%d2DcTTU|R1pD8PYCcM+U zU0zKE>{z@Z%W$+IK^Z_!0l3T~5K#g0n(hkJrUSyH_-^jTNCrxF*zOynkl=HlcV?`- z`4{S)@doKM@HNZw-y}%0zYtX6{ZN3jsY91aFC{s=%BVVDiD=^}dTo`7Z}21VY}ntP ztxga|1%VC#r9)!dZ;~#NT?xDa{O=C~I8FrHAg3%vs9gZ9fGHdVNI1ej0H_>20=RGF z?2hjcfY}vB{QVheb=aC9(ZjEe_gr7&Hg12RZ#JQl{Y}#F<7MMn(m5k=*i*+&5I=89 zoGqj91t*amNCxa|LYE^)n@18%(k`~?Ys{F+-Y*T>t+3VfVs8a!_7^S{DK|%AmP6yK zCt-aB&vPy&+k63P_yu_2O_YHik}WXdT_oNReigh%BXNEn_?slccn*U7{#Vz)&n5s4 z2CMlGM4GT}0O~-(1U@hJ7a%PaNM&=70m`U3cz7b>$n7j^VIFS?`P)MPTo)w*Lw-&Q z;HXfg0HUntte40H`rCj2_@mF7fBQLL4DWOJ>L`kCpKfdYSiJ>>EkIs}{{&hL&**-W za00^#X{S3S(*hU}U;HLnj5|&s2!sFjQh$A8EE;i6H4ZuMg#!q-cD3f;B!l^WpcT;M z$WG({8V#oBdW(11(ml_;tc^UF4R1KznLhT~1g0KaK>XWxAxuMCmDs_RigPS?+yphB zcI4810S_^q+JK>}2-8!mKW6r4pZo~b$0OLm(g`NJ*fCxESSru|%dSmD*`G5k1pH=W z92xQbFaV+b1;;P@0{9@x$iKgJYxZB_;r9;{&iCdCh5#-FM_&rw-+%<3BmqRny7fu` zi0iZA-=6{ca=X zT2AK9@S+afs&UGG`k26ncSt|^PvigD;ZnjJz#YM3uxuY(RUPj!p)d42=casK8P?{X zi{SsvA@bZ*x`v|*mlMJ<6f;@;hB+-BpPC6hQ#*bo8?U z@7-6S_9>>gTbP0&hU)DyH?El_aN@XTI(<;on=F*xX5o7M zR%9bYB?kbD`QN+DUxV`d68-l2`KC-f3rXfN%R|$ZjWtHrKe0aIIoyVN+FpC80rJ&| zoCUKpUu#8vy1?9#ym~{)_4zj;ntU@9)B^kLk_~Lj0g|Ssa)2}7dNxDVi9O1(1FIZ4 z-XC0USyW4P;%_rXr|N|FL2|rc z``*SNy7pylpuxzC7q|R1JllUL-wo9O>X%Yv>}!f9SZ9nM;@UeZiK=>y9Mt0QO;($d zhs{0{-=XBK+-##nS!h<<@u?f*`uNNJAJuPRwS>BthSLMki;c4w=RPRAQwRRp%)LTw#)n{U;dhmks|be{+o3lgC9c#-1GqpPY8 zWS_mBq8VP)T5QBr>p*`nEqg#NDe>WBA%X4n9-k(>@AVcIZMr&hNabEzQ0!@2(Nnm9 z+{6!Ik6TR1+s(@3qmXmU|9<%(M|g3!$>Ls& zbI_^jh&b#4O5?>6%}bV4%gAxVRJ&>dP`~7L-LnIDuOcqBK&Ws(KmJ7#+qJoo{%aRa z-i{z8=rxQHEHZ)3*+esYra9I(ckWF?FO@LnrbQs7SEmR-WBX4pSL95Aces!3>Z}!$ zrV=W8k&FcVC>pc%jiQikRAG`%IW{$^}ob0|Ha{k=u5xmH!@!Z4?@|2g5VdGE<)v6>N{%Q zW6IJ@s(Pmj?*_;0v8IsdsjxW?D4!^ku^~q63CbVQc^v@b7a02l6d4buK=RKJ|Hdf-PoK_t7CXew*2LNo3E-XN}lUR*yd8dbbni9uzejJ_{3$#+Zc`7W;zqZLl)-J zvApP#bPU(K)4w!kn8J^P6g7^Oj0)Yod>qS7^bA1tH!hFziZ;D7OFD3r?OZ(4KF>$; z;X!Er@nTo>F&^x`sNrUz^`v$*i#S;pV_A+>1xZapG-gu^(Uew^{txO{3HOw!BGu=Cf?A8?V70-@`)5 z)WhQC$)ZFU2A=>nWuG<((kraZHcR?AmaA)I}2tJ3?lJ{Vo;yIu>NR&#HAbGPY9a;_7u! z^|qq0!R_zj(=4Kw&3?LeALTH%zLrIg{>YHjZhK3Pyn{iJiuuv56Q$3AldLT$!n7+G zrlqKP)u~*o`kqC`2@(zfPLlll5dX6sDvTaUPi7bRVe^z3~$!(h~a&UQU>PNph0JLg92V_d?c@0Y`==JoN z@+*qM-PKtUOw&hG5!5cGn*;3hl%Fz z)lp18Ct;R&92#*Ic595_@O@owN}^qXC-h8dc%;YYgd3dux3Z{dWPh{kgzJ3AOnRhZe#ckcV#qCcYunxI%t4`w0v4*V8fWFT zOk8>qBtF-cG=><^4MS4k=zxAITTnpKP7Nk>)`JVuK3y!jqx`HA|EZzfh4j4|?d`BA zkprjv+bgxn0i`Fe>BvIbjE@Q$`2~jU1acRx-O}R6s*qD49w4zHs15%4n`E6~V#08b z54`7AvG}h`_d|1iBnz>z?NEu4stWHJ<>wbmOX8E z46!zR#d`Jtp{}fAGvJ!?irToR#%c?16M(8hmg(hK>!h-}KGIfj7HZ*VDs0SdVw#UW zK4;p4Y&m0S;O*(u5(t=j{;d7P(@o>Ip`mrAHbSA?B(DeAy2<8{m6>-7Q()TBx&r+o z%kBa>!g|Y&wyE_Su!J+f`hJ`&M4s-XpPJD4IwYNl906&M==~PZ;UxH=oc(n45RpFs zUo!b{kWTyp>!8Doo=`ly0||jC6mPHx#Aa=H)YQ~J_7~ZN^3#b`h7Y}Z+7r8LHg+;C z*yVxAW|V=39!U*Qd->0AhiT(S>cFI+c_0TU7CH-b&6PO&UO+tuWr8!K|RX zPdbLzGca*+GX-kw^Lb5v@w02b$K*lM9L1J!x@mjan2q@F{Av9HYPG?%BsO7OJxVXS zxjGysR#>(s^4GjswiycmdbxrLGvtJup@d(#Q^cl0eLP!EfI#zX#UwI#^4bGZpZNPQi+! z*JWAml;pW^<)o%a>AJYwz8E6(O>bbQi`3A7L38cWYbd0{j}2EH=!kJ9x3Bh|i}q%n zo`3eZhI#+u%P_r%{AScw%Rk@!qW<@L_j^JXKyTSgCIZGPU$32jI%bF?>{ou1 zX2} z+Lq zs(w7{5bTrPkbS>F(6vLirlCU!8%zl`qLy0HlG{%cP@D@f&*pYD9W2c*FuU{?1?7CJ zd2lXw6ntJtAjO5EFQ?;@4Y6aZpZz2zDYyr<%Vo=LS2X07bhfmlUqnwPhH#cPcRKW@ zQ(@43xtJtmf~QAFY;|htVU1&dyfB^BLE0*2x3y50Iq$s_;YH>HjL8+MY|VVLVw_;c z-s9vwU&(a)gGWR!umk7=HyK_F>+NXVT+cbD#C?5&XS_-x9OMwNVp&8qw1v?2~WAJNFRC=#wW((1jEUZnfYLM9@IE4)=_+*9i~ z^Xm-(1#>Gu^p)jKEOdD^%f0T*yY@XFu8FE8WLTil151GfrXlDD*rYe3M{ao4# zHoDTw%q#j#i1+*Xhx0@w;SDf{9~o2v6Hsx{Xey#1H@l{+Ip+n7WQFR@Fot$sDF#wO z5=`CgH!;stw|V&%-5N(+Rz~X$>t>TA{CZaM?6f5{GZ71+(qG60@|o2Ub~ zxWqGMc(>#zc(WKq3}ec}suMMx{xZE>fLvhM*bTLm5_t}w`=&L2lYEU#*zm4mI-xkz z21s?;V01XS&Lrkw9AnEIO)0jcG*)fsc^RC3)3kBzLyrZ2ZIlW5YOw4ZbH+_w-RX~u zHjzrq1gf8YAd|If2m?}L#?ovM?nQ-6wxUf}jOxCl(wI8@X69NvRn>bo5%V&1ig|u( zf&K8|h`LCVn1i4XqntHyd8uE`V9ez+CFTg?+H77j82cEI}jiiYUi&{5fV>j-2MpW7^ z4igGRGRibp#oB_tMHQCnDYsmFwX6+`#(~g?4=9_+aQFpj2rX0xeg$u%j$;Z2F*B=p z3I-dwF>G5`iE;DWOvjtQS@PReSZ~E{y`B}WI8JJGEU=$IRy$Y4tZcsI5BAB)1eg_5 zf5E$5s{jfGC-}M0ql3q*QBZ_pDKI0eKJ+ouZ_U}q-k;!A9vn%(hQ2(de`9^aXaJ?khnzyZXYiw zx0UzZL5}?kk`oouxpRDW-Go3EVboWCZz913-8W*7IFZOn2V>twnBo_Sn!oe zW@h-0E%lG5xWmTx6^+Kn84GzIiv0`9CdV%1o} zahg&1Z&clum^rLE{U=h!n6mId&~?uJQA*q(sb4d^!nrN|njNP*F*EK5225#&Dd)dbQ)xj|+GfyQ9s#5u2S1%c+R|BJH!DVVjN6cRk)ww+PGnw^ zF%IG8AKmmUrWa|Io9|yZ%EJd9Ipd#VBB`KME%lwLF5(H;@qHZ>cFVz2N~ZDD+)}$N zdaUg+ll+Ztn$e|+27tg!0A_F=j3j|UaP24tMa`TIW^Uew4`K-C18u#{M=?j&IPTB* zCOu?;W!&U*x=xxVBGC>ig>P*lY`HnHTc(zUS<`r3sharCo`%WnY_Mpb zCf`KnUmOJ|Xn=M=7^vrLmc;!g$?k08X^eat+673!04HcOO>E9Um35I!tg5u})mHLJ z^skw_ul9WJ8lv!8N&@hqyGWpwe-kJgpsr*5*!s#$QcdyR#-UZI6B;>VX`LL5kFI@r zb$Gkc9dM$9?b=>2Z48H5~agir!NQt0A%=h88mkvEo za*MA`+uuCU+^U4QY{&qpp>TkpWXX;0blr-O)Py233x?b~tdfMO%VbrX3_F{54Uxq|qOjG5)V!&5(Awq+pO5kw+S_S|f^_NDdrqJPGwNZEf1J z_r=&PNPuvf{;Uu5(`>z7kv&F-F>wOO6VT53sfJSe~q?>?!=wcp~O9j(3< z7@=Vk@@_%o((}hY_ZKc*^frbAqUA=&8z0d{G{wCncvn%qPStX`X;|i!HtWE+bV0o9 z#8d@0Jjz-XTj)M<3OjNu1xWYORW~&cTGtf#P{unu@BSwq) zxg+C1=<3e!KqwKGc;Vtn0;+cy8HErBka-mtn{&p7T>@=2P(}X!=Z$=E z>H&uGEPNNC|MreRyz^J1KAg7CqsYWbX~eP*(I&$0CwNZ@%1dm>BgQ{~n64n@@WswJ zx^|=>c05b&qYQ^^Y22gEq8c7Oao*TVn4YdK%uPRv=ww}}Or-Q93ODv5N)T?t53`>X- z12p8g{kB-YFeRm{OW8+J>4)O;@&duH+n$Y}inn$rL9NdW!JKesK(r5R11Jx`)e zVcg+X@$%1!=@$(|o8}rYRy$nN?e6YpQ{zr9zg{$VR(^G!+$Ur&^=GZe0(*LEsV!3E z@*8AKVli+nf+Ro?)k5SLCJ1IcIP#TKtEefh4CSv@ab)je?Ouv1kkZvRH~u%1WwQ8= z`$k6&`@v3pLyP^fQ@_PYlR70H>cDc6CW9R$>M(YK04&Bi7me>58dxgYnTBA1-EH@E z=+c9V??4Zqx>Qd(IgVxR&w}D5K8Al?pKJQ~$FD6R@}N_C8G}Bp4c9*jnRSY9s8Ey@ z`0{czUB8XMa>i#zq{R+)!f8}7b~~|)Oml_q(*3W4rtHS9Lmzz$1+OUJu4IFo$JLc zlwWp|wKutj%Qt_Gy}Y$)09ZZm(R&~`u=)<4FoU17ifAna5@Ke_iFMjQESFt!8^c3* zg6sP11XE!7;4H@O8UCr*(`mu$4M)avuR_0z$r$*@M!`J16NGebRwm|7qS4+gnX`xA zXZ+bOgpRpPBK)POVEOTRo=i#ga@BG53dAIDyAF{P`IVq`Mvjl!#mKcgGvPs)xcLBS zc!-wJ#FnvoRa1R?b6^Bje!$w|OC686H=M35`RPQ8{j3&jB>MP+8-#P?j&TT)b$%9t z_vwg_cTrbV+j*UciJ58O?W&E$405$tgJ1lyI{qAzz#|(*)0+<*;DV~X2w$!$^NguaV6$7)HpX~Evn;VQ zr(D^06vvI`Xax=(jUhqjNT%CR=-ezW}+ za{hep{|8Xq-#ER$g$VsGjig%VU`q-xeIs2Vl+VE%5lCBjCGbrDVT$Pn^DlSDpN|Zy z<$A*wu-bsQ>f!<@3?_4C4O2W%74=hq832tn&VlYUmU}x8dda2;AGNwGWw*lAt_B(3 zh>jGz=<=R!)>~wMvklIPdOi4SxkQN)dbUVggK>NV;W+C>(D%9&&Dr&OL>H*Lx;i>p zRey54WOSc$mv4b{V#LL72t>}056&WpaPxkEsbj-(cf=Qh^O-$yAPuP97v|$Lot$8#SMQ9<#*g&z)J1)c7!sgAp>yR;{l*_v zdL#6?2z}O>9B~XlOQGzFFGBFNM-TF+CZ{dzliaG0>8nD`BY35{8B}kQ`&+#>KagK} zuax|sLk0|8(YC(v@PXC?c#%bm)tjPg{jS0fSjB!azsnM=k;$@@#LesDpXTCOh!Ucf z@W2v@>p`7*#xySb(y;pvaFu7qO5!zTS;94sdJD@>kU@u1=HwGD2c;<$35qe5y!~=S z)>(@qW6L}UpusDFUOlIt0u!mlfXI1S+Zo5$?5W@7;D`|uFv)d~kRLvsO`MIm@STA{ zQ;>vK6Y&ETaR;%L=$gIu7A8<;=FmCr7?C9uvQ4?Z+0YMadPxgqmaY53) zSX;1riB{*hX|Pi1SIlj@H!1GEEv<)4UP}vHSH1RQNz=;Y$+G!n(1U&@tc7e0cq8Te>$P^ZeT@ z9C@7^<<=bgmwll>vX-Ly^6$ZLFXPPLu2`%IcSnyQ%7G z(jh23nmTH_;*ccgp{wSH@uz!`WO{-)GQ%3yd?Ipj{HpA(43=q#^%&o?&ojOslF1Wu z*Iz?;jkFnq{DLTLdC712Nkm7nxNsUWCWz<74qWfGT|S9>LqnN1d@D9mCZ?%oSPI|@ z)s#N2eX7bd3jNQLySZQlKn4rOTU}0c$B{wGe!d)cMh|bXA3eQV+nBCou9>DKtMsvs zX?6nRyshPe(IxjAF$H&^B70fF-W<$dHC;zh_;J`V3Z*}ZF^crda|^uAYF@3_w%EP)g(x%AhV5G^XcVp_u#>j=D?C1VcH0}TwYa(_N zLtn>DhEt2gNR(}zsUH~^ z(;l;~7+rYn+kcIUrrj2s&&SLWn1})rIVkYL7Z-K%_W6jY4%!cIg1_4}tD^0a79Xl4 z%9qC_PA-62wLrxre$zvLQ!6kAV^B;`cr%e>6i|6-_;s)iQm=~NJKZtg3bCRVjrM=vxNWaTN@=FKTpA|K%W^WB zHYrVd3bMk%(Llui!EHXc44V8nuyv}n^T@P#w$H<(frQ(5~06ISP#752M(3lrC4Nh>~|IVhiF;+KUWGr$AYwf_{@!#}NzB z?O0S>WFVN_T|;k(XPl$~Ctl`oD~7E-c1DBdd-MZ3(Y=Ced)t*{r#HSuo0vSfd67O! z{XUJ@KiXFQ@Aj4dFaC25^&LN|pUvBU`%!SE=d-)~j*{4V8Vzo@8SW3Rplhb0aW%((@vv87 zosh9z8j;bZwYxQ=qkXtlTJV+OS=SEc&1;^>8jn8W_RRW`TqI|L5$^F2*HC3piNR_9 z^@Vd`7wSjE=c+VeP9!$mi5Yb5bf09CRMWRy0j_6bBsMHby;Fi}{Z9WtgPoMjY$xjN z#z1{>~dV?i_gBXXBP<$c$G!?r!+isRBYLf0_ zX;P=ZgXHS~e!3aFQ zNeRs%uQn5A? z{snG*&;2aq5b~Z|_NVlEWbarIvlGxakJ#R*OaDGUc zn4Dt%m<6@btp3>uwx&(bo6Lxy@^Pc@47o>gxS^}(r8tpFcmjCNW6WVwIkdC6X)G3S zre+HMz*)M%zNc}^Y2%&3kCqmY40QmK>-=5Vm%_XI5RLA;cH=YE$CgN+^g@iHdhfoF zVl0=A8>Uv>sw#CtuEV$b#rPh&}~L3PiBHvR?hp8*)08QD0Tl zR2@ICBPn`2OPxnf_{Tya*_KL&*Vo1@3d`FoIox)F2;ruAi^!tx+UYyJ8$)h5ci&Vw zmLCP0YTa|=O~u30AW^@8bmm48U-a%Jm}rHa&DcmQvv+d$Rl19|sCwJkFEQmJv?^S< ziICI!#s_HUtwZd{!V#jtA&s&kiKE!yjYpc*tpKiR!uN_EfgP8DxA!jj__Ey-HB=O} zTNLZ79B~k%H+Ko^k@RXZ*U=o@%G;H@E3hmuQ>i^^F)7l77zKvsU@-bW6)Fi@=iikm zO!2pm2zXwc6a1=uQpaw1qoaMHosW=@qttk}71gEEPD834m<^fd#;B{7kuGx|}6C;R@&A0n; zV#Pj>mrgi)%g{WN&2ixzI6HpmL{;Qt7WXTQJbF-OZpyC@=oPA*4_`eB7_Pihr0Bq( za^yN^S{5Mk!S(&VHYJ+-U6%7#H4Mdot~BYc!M{2T|FSEl6Cj?r1jbe;gIlykFyUQc znOI~FcF{P{a;R8>*JSI^QB4*iFqzTf#n0!rOH!~L8eYeHP>uA%51vU9Zs5<0@w(>t z%$)85)JKHnQRifybyZ@DD_5UlcG(W)uj>rP7vn^;$#DBeG+2O-c}fq=4HZ24hMgVA z!AY1;On1pW*@&{R!)e(lQgJdUuMNHAGYk9`p*$^gv+k|>i!&a-E>s01(ocrqI|S2S z=(SMS!P3jbPgL-WJiTMF;G=1}H-Hwol;7o1P4H|kTPR^EC1J^QKg z0nFKB5u5Kx4-aUCjr#aAy)Qk@K&WzGiB8YZ|P@yDiIkb9*^$ zPsQ?rPmt92t?%!Uj>g!Mt=TSWSUt<)cuHc({hpfA_kR$X`0r4U|3WVQdzQohE!Ek- OhY|k=ekS`p`F{ZiZ(~0I diff --git a/examples/ernie_sat/README.md b/examples/ernie_sat/README.md deleted file mode 100644 index d3bd13372..000000000 --- a/examples/ernie_sat/README.md +++ /dev/null @@ -1,137 +0,0 @@ -ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 - -## 模型框架 -ERNIE-SAT 中我们提出了两项创新: -- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 -- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 - -[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-3lOXKJXE-1655380879339)(.meta/framework.png)] - -## 使用说明 - -### 1.安装飞桨与环境依赖 - -- 本项目的代码基于 Paddle(version>=2.0) -- 本项目开放提供加载 torch 版本的 vocoder 的功能 - - torch version>=1.8 - -- 安装 htk: 在[官方地址](https://htk.eng.cam.ac.uk/)注册完成后,即可进行下载较新版本的 htk (例如 3.4.1)。同时提供[历史版本 htk 下载地址](https://htk.eng.cam.ac.uk/ftp/software/) - - - 1.注册账号,下载 htk - - 2.解压 htk 文件,**放入项目根目录的 tools 文件夹中, 以 htk 文件夹名称放入** - - 3.**注意**: 如果您下载的是 3.4.1 或者更高版本, 需要进入 HTKLib/HRec.c 文件中, **修改 1626 行和 1650 行**, 即把**以下两行的 dur<=0 都修改为 dur<0**,如下所示: - ```bash - 以htk3.4.1版本举例: - (1)第1626行: if (dur<=0 && labid != splabid) HError(8522,"LatFromPaths: Align have dur<=0"); - 修改为: if (dur<0 && labid != splabid) HError(8522,"LatFromPaths: Align have dur<0"); - - (2)1650行: if (dur<=0 && labid != splabid) HError(8522,"LatFromPaths: Align have dur<=0 "); - 修改为: if (dur<0 && labid != splabid) HError(8522,"LatFromPaths: Align have dur<0 "); - ``` - - 4.**编译**: 详情参见解压后的 htk 中的 README 文件(如果未编译, 则无法正常运行) - - - -- 安装 ParallelWaveGAN: 参见[官方地址](https://github.com/kan-bayashi/ParallelWaveGAN):按照该官方链接的安装流程,直接在**项目的根目录下** git clone ParallelWaveGAN 项目并且安装相关依赖即可。 - - -- 安装其他依赖: **sox, libsndfile**等 - -### 2.预训练模型 -预训练模型 ERNIE-SAT 的模型如下所示: -- [ERNIE-SAT_ZH](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/old/model-ernie-sat-base-zh.tar.gz) -- [ERNIE-SAT_EN](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/old/model-ernie-sat-base-en.tar.gz) -- [ERNIE-SAT_ZH_and_EN](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/old/model-ernie-sat-base-en_zh.tar.gz) - - -创建 pretrained_model 文件夹,下载上述 ERNIE-SAT 预训练模型并将其解压: -```bash -mkdir pretrained_model -cd pretrained_model -tar -zxvf model-ernie-sat-base-en.tar.gz -tar -zxvf model-ernie-sat-base-zh.tar.gz -tar -zxvf model-ernie-sat-base-en_zh.tar.gz -``` - -### 3.下载 - -1. 本项目使用 parallel wavegan 作为声码器(vocoder): - - [pwg_aishell3_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_aishell3_ckpt_0.5.zip) - - 创建 download 文件夹,下载上述预训练的声码器(vocoder)模型并将其解压: - - ```bash - mkdir download - cd download - unzip pwg_aishell3_ckpt_0.5.zip - ``` - -2. 本项目使用 [FastSpeech2](https://arxiv.org/abs/2006.04558) 作为音素(phoneme)的持续时间预测器: - - [fastspeech2_conformer_baker_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip) 中文场景下使用 - - [fastspeech2_nosil_ljspeech_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip) 英文场景下使用 - - 下载上述预训练的 fastspeech2 模型并将其解压: - - ```bash - cd download - unzip fastspeech2_conformer_baker_ckpt_0.5.zip - unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip - ``` - -3. 本项目使用 HTK 获取输入音频和文本的对齐信息: - - - [aligner.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/old/aligner.zip) - - 下载上述文件到 tools 文件夹并将其解压: - ```bash - cd tools - unzip aligner.zip - ``` - - -### 4.推理 - -本项目当前开源了语音编辑、个性化语音合成、跨语言语音合成的推理代码,后续会逐步开源。 -注:当前英文场下的合成语音采用的声码器默认为 vctk_parallel_wavegan.v1.long, 可在[该链接](https://github.com/kan-bayashi/ParallelWaveGAN)中找到; 若 use_pt_vocoder 参数设置为 False,则英文场景下使用 paddle 版本的声码器。 - -我们提供特定音频文件, 以及其对应的文本、音素相关文件: -- prompt_wav: 提供的音频文件 -- prompt/dev: 基于上述特定音频对应的文本、音素相关文件 - - -```text -prompt_wav -├── p299_096.wav # 样例语音文件1 -├── p243_313.wav # 样例语音文件2 -└── ... -``` - -```text -prompt/dev -├── text # 样例语音对应文本 -├── wav.scp # 样例语音路径 -├── mfa_text # 样例语音对应音素 -├── mfa_start # 样例语音中各个音素的开始时间 -└── mfa_end # 样例语音中各个音素的结束时间 -``` -1. `--am` 声学模型格式符合 {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` 和 `--phones_dict` 是声学模型的参数,对应于 fastspeech2 预训练模型中的 4 个文件。 -3. `--voc` 声码器(vocoder)格式是否符合 {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` 是声码器的参数,对应于 parallel wavegan 预训练模型中的 3 个文件。 -5. `--lang` 对应模型的语言可以是 `zh` 或 `en` 。 -6. `--ngpu` 要使用的 GPU 数,如果 ngpu==0,则使用 cpu。 -7. `--model_name` 模型名称 -8. `--uid` 特定提示(prompt)语音的 id -9. `--new_str` 输入的文本(本次开源暂时先设置特定的文本) -10. `--prefix` 特定音频对应的文本、音素相关文件的地址 -11. `--source_lang` , 源语言 -12. `--target_lang` , 目标语言 -13. `--output_name` , 合成语音名称 -14. `--task_name` , 任务名称, 包括:语音编辑任务、个性化语音合成任务、跨语言语音合成任务 - -运行以下脚本即可进行实验 -```shell -./run_sedit_en.sh # 语音编辑任务(英文) -./run_gen_en.sh # 个性化语音合成任务(英文) -./run_clone_en_to_zh.sh # 跨语言语音合成任务(英文到中文的语音克隆) -``` diff --git a/examples/ernie_sat/local/align.py b/examples/ernie_sat/local/align.py deleted file mode 100755 index ff47cac5b..000000000 --- a/examples/ernie_sat/local/align.py +++ /dev/null @@ -1,454 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Usage: - align.py wavfile trsfile outwordfile outphonefile -""" -import os -import sys - -PHONEME = 'tools/aligner/english_envir/english2phoneme/phoneme' -MODEL_DIR_EN = 'tools/aligner/english' -MODEL_DIR_ZH = 'tools/aligner/mandarin' -HVITE = 'tools/htk/HTKTools/HVite' -HCOPY = 'tools/htk/HTKTools/HCopy' - - -def get_unk_phns(word_str: str): - tmpbase = '/tmp/tp.' - f = open(tmpbase + 'temp.words', 'w') - f.write(word_str) - f.close() - os.system(PHONEME + ' ' + tmpbase + 'temp.words' + ' ' + tmpbase + - 'temp.phons') - f = open(tmpbase + 'temp.phons', 'r') - lines2 = f.readline().strip().split() - f.close() - phns = [] - for phn in lines2: - phons = phn.replace('\n', '').replace(' ', '') - seq = [] - j = 0 - while (j < len(phons)): - if (phons[j] > 'Z'): - if (phons[j] == 'j'): - seq.append('JH') - elif (phons[j] == 'h'): - seq.append('HH') - else: - seq.append(phons[j].upper()) - j += 1 - else: - p = phons[j:j + 2] - if (p == 'WH'): - seq.append('W') - elif (p in ['TH', 'SH', 'HH', 'DH', 'CH', 'ZH', 'NG']): - seq.append(p) - elif (p == 'AX'): - seq.append('AH0') - else: - seq.append(p + '1') - j += 2 - phns.extend(seq) - return phns - - -def words2phns(line: str): - ''' - Args: - line (str): input text. - eg: for that reason cover is impossible to be given. - Returns: - List[str]: phones of input text. - eg: - ['F', 'AO1', 'R', 'DH', 'AE1', 'T', 'R', 'IY1', 'Z', 'AH0', 'N', 'K', 'AH1', 'V', 'ER0', - 'IH1', 'Z', 'IH2', 'M', 'P', 'AA1', 'S', 'AH0', 'B', 'AH0', 'L', 'T', 'UW1', 'B', 'IY1', - 'G', 'IH1', 'V', 'AH0', 'N'] - - Dict(str, str): key - idx_word - value - phones - eg: - {'0_FOR': ['F', 'AO1', 'R'], '1_THAT': ['DH', 'AE1', 'T'], '2_REASON': ['R', 'IY1', 'Z', 'AH0', 'N'], - '3_COVER': ['K', 'AH1', 'V', 'ER0'], '4_IS': ['IH1', 'Z'], '5_IMPOSSIBLE': ['IH2', 'M', 'P', 'AA1', 'S', 'AH0', 'B', 'AH0', 'L'], - '6_TO': ['T', 'UW1'], '7_BE': ['B', 'IY1'], '8_GIVEN': ['G', 'IH1', 'V', 'AH0', 'N']} - ''' - dictfile = MODEL_DIR_EN + '/dict' - line = line.strip() - words = [] - for pun in [',', '.', ':', ';', '!', '?', '"', '(', ')', '--', '---']: - line = line.replace(pun, ' ') - for wrd in line.split(): - if (wrd[-1] == '-'): - wrd = wrd[:-1] - if (wrd[0] == "'"): - wrd = wrd[1:] - if wrd: - words.append(wrd) - ds = set([]) - word2phns_dict = {} - with open(dictfile, 'r') as fid: - for line in fid: - word = line.split()[0] - ds.add(word) - if word not in word2phns_dict.keys(): - word2phns_dict[word] = " ".join(line.split()[1:]) - - phns = [] - wrd2phns = {} - for index, wrd in enumerate(words): - if wrd == '[MASK]': - wrd2phns[str(index) + "_" + wrd] = [wrd] - phns.append(wrd) - elif (wrd.upper() not in ds): - wrd2phns[str(index) + "_" + wrd.upper()] = get_unk_phns(wrd) - phns.extend(get_unk_phns(wrd)) - else: - wrd2phns[str(index) + - "_" + wrd.upper()] = word2phns_dict[wrd.upper()].split() - phns.extend(word2phns_dict[wrd.upper()].split()) - return phns, wrd2phns - - -def words2phns_zh(line: str): - dictfile = MODEL_DIR_ZH + '/dict' - line = line.strip() - words = [] - for pun in [ - ',', '.', ':', ';', '!', '?', '"', '(', ')', '--', '---', u',', - u'。', u':', u';', u'!', u'?', u'(', u')' - ]: - line = line.replace(pun, ' ') - for wrd in line.split(): - if (wrd[-1] == '-'): - wrd = wrd[:-1] - if (wrd[0] == "'"): - wrd = wrd[1:] - if wrd: - words.append(wrd) - - ds = set([]) - word2phns_dict = {} - with open(dictfile, 'r') as fid: - for line in fid: - word = line.split()[0] - ds.add(word) - if word not in word2phns_dict.keys(): - word2phns_dict[word] = " ".join(line.split()[1:]) - - phns = [] - wrd2phns = {} - for index, wrd in enumerate(words): - if wrd == '[MASK]': - wrd2phns[str(index) + "_" + wrd] = [wrd] - phns.append(wrd) - elif (wrd.upper() not in ds): - print("出现非法词错误,请输入正确的文本...") - else: - wrd2phns[str(index) + "_" + wrd] = word2phns_dict[wrd].split() - phns.extend(word2phns_dict[wrd].split()) - - return phns, wrd2phns - - -def prep_txt_zh(line: str, tmpbase: str, dictfile: str): - - words = [] - line = line.strip() - for pun in [ - ',', '.', ':', ';', '!', '?', '"', '(', ')', '--', '---', u',', - u'。', u':', u';', u'!', u'?', u'(', u')' - ]: - line = line.replace(pun, ' ') - for wrd in line.split(): - if (wrd[-1] == '-'): - wrd = wrd[:-1] - if (wrd[0] == "'"): - wrd = wrd[1:] - if wrd: - words.append(wrd) - - ds = set([]) - with open(dictfile, 'r') as fid: - for line in fid: - ds.add(line.split()[0]) - - unk_words = set([]) - with open(tmpbase + '.txt', 'w') as fwid: - for wrd in words: - if (wrd not in ds): - unk_words.add(wrd) - fwid.write(wrd + ' ') - fwid.write('\n') - return unk_words - - -def prep_txt_en(line: str, tmpbase, dictfile): - - words = [] - - line = line.strip() - for pun in [',', '.', ':', ';', '!', '?', '"', '(', ')', '--', '---']: - line = line.replace(pun, ' ') - for wrd in line.split(): - if (wrd[-1] == '-'): - wrd = wrd[:-1] - if (wrd[0] == "'"): - wrd = wrd[1:] - if wrd: - words.append(wrd) - - ds = set([]) - with open(dictfile, 'r') as fid: - for line in fid: - ds.add(line.split()[0]) - - unk_words = set([]) - with open(tmpbase + '.txt', 'w') as fwid: - for wrd in words: - if (wrd.upper() not in ds): - unk_words.add(wrd.upper()) - fwid.write(wrd + ' ') - fwid.write('\n') - - #generate pronounciations for unknows words using 'letter to sound' - with open(tmpbase + '_unk.words', 'w') as fwid: - for unk in unk_words: - fwid.write(unk + '\n') - try: - os.system(PHONEME + ' ' + tmpbase + '_unk.words' + ' ' + tmpbase + - '_unk.phons') - except Exception: - print('english2phoneme error!') - sys.exit(1) - - #add unknown words to the standard dictionary, generate a tmp dictionary for alignment - fw = open(tmpbase + '.dict', 'w') - with open(dictfile, 'r') as fid: - for line in fid: - fw.write(line) - f = open(tmpbase + '_unk.words', 'r') - lines1 = f.readlines() - f.close() - f = open(tmpbase + '_unk.phons', 'r') - lines2 = f.readlines() - f.close() - for i in range(len(lines1)): - wrd = lines1[i].replace('\n', '') - phons = lines2[i].replace('\n', '').replace(' ', '') - seq = [] - j = 0 - while (j < len(phons)): - if (phons[j] > 'Z'): - if (phons[j] == 'j'): - seq.append('JH') - elif (phons[j] == 'h'): - seq.append('HH') - else: - seq.append(phons[j].upper()) - j += 1 - else: - p = phons[j:j + 2] - if (p == 'WH'): - seq.append('W') - elif (p in ['TH', 'SH', 'HH', 'DH', 'CH', 'ZH', 'NG']): - seq.append(p) - elif (p == 'AX'): - seq.append('AH0') - else: - seq.append(p + '1') - j += 2 - - fw.write(wrd + ' ') - for s in seq: - fw.write(' ' + s) - fw.write('\n') - fw.close() - - -def prep_mlf(txt: str, tmpbase: str): - - with open(tmpbase + '.mlf', 'w') as fwid: - fwid.write('#!MLF!#\n') - fwid.write('"' + tmpbase + '.lab"\n') - fwid.write('sp\n') - wrds = txt.split() - for wrd in wrds: - fwid.write(wrd.upper() + '\n') - fwid.write('sp\n') - fwid.write('.\n') - - -def _get_user(): - return os.path.expanduser('~').split("/")[-1] - - -def alignment(wav_path: str, text: str): - ''' - intervals: List[phn, start, end] - ''' - tmpbase = '/tmp/' + _get_user() + '_' + str(os.getpid()) - - #prepare wav and trs files - try: - os.system('sox ' + wav_path + ' -r 16000 ' + tmpbase + '.wav remix -') - except Exception: - print('sox error!') - return None - - #prepare clean_transcript file - try: - prep_txt_en(line=text, tmpbase=tmpbase, dictfile=MODEL_DIR_EN + '/dict') - except Exception: - print('prep_txt error!') - return None - - #prepare mlf file - try: - with open(tmpbase + '.txt', 'r') as fid: - txt = fid.readline() - prep_mlf(txt, tmpbase) - except Exception: - print('prep_mlf error!') - return None - - #prepare scp - try: - os.system(HCOPY + ' -C ' + MODEL_DIR_EN + '/16000/config ' + tmpbase + - '.wav' + ' ' + tmpbase + '.plp') - except Exception: - print('HCopy error!') - return None - - #run alignment - try: - os.system(HVITE + ' -a -m -t 10000.0 10000.0 100000.0 -I ' + tmpbase + - '.mlf -H ' + MODEL_DIR_EN + '/16000/macros -H ' + MODEL_DIR_EN - + '/16000/hmmdefs -i ' + tmpbase + '.aligned ' + tmpbase + - '.dict ' + MODEL_DIR_EN + '/monophones ' + tmpbase + - '.plp 2>&1 > /dev/null') - except Exception: - print('HVite error!') - return None - - with open(tmpbase + '.txt', 'r') as fid: - words = fid.readline().strip().split() - words = txt.strip().split() - words.reverse() - - with open(tmpbase + '.aligned', 'r') as fid: - lines = fid.readlines() - i = 2 - intervals = [] - word2phns = {} - current_word = '' - index = 0 - while (i < len(lines)): - splited_line = lines[i].strip().split() - if (len(splited_line) >= 4) and (splited_line[0] != splited_line[1]): - phn = splited_line[2] - pst = (int(splited_line[0]) / 1000 + 125) / 10000 - pen = (int(splited_line[1]) / 1000 + 125) / 10000 - intervals.append([phn, pst, pen]) - # splited_line[-1]!='sp' - if len(splited_line) == 5: - current_word = str(index) + '_' + splited_line[-1] - word2phns[current_word] = phn - index += 1 - elif len(splited_line) == 4: - word2phns[current_word] += ' ' + phn - i += 1 - return intervals, word2phns - - -def alignment_zh(wav_path: str, text: str): - tmpbase = '/tmp/' + _get_user() + '_' + str(os.getpid()) - - #prepare wav and trs files - try: - os.system('sox ' + wav_path + ' -r 16000 -b 16 ' + tmpbase + - '.wav remix -') - - except Exception: - print('sox error!') - return None - - #prepare clean_transcript file - try: - unk_words = prep_txt_zh( - line=text, tmpbase=tmpbase, dictfile=MODEL_DIR_ZH + '/dict') - if unk_words: - print('Error! Please add the following words to dictionary:') - for unk in unk_words: - print("非法words: ", unk) - except Exception: - print('prep_txt error!') - return None - - #prepare mlf file - try: - with open(tmpbase + '.txt', 'r') as fid: - txt = fid.readline() - prep_mlf(txt, tmpbase) - except Exception: - print('prep_mlf error!') - return None - - #prepare scp - try: - os.system(HCOPY + ' -C ' + MODEL_DIR_ZH + '/16000/config ' + tmpbase + - '.wav' + ' ' + tmpbase + '.plp') - except Exception: - print('HCopy error!') - return None - - #run alignment - try: - os.system(HVITE + ' -a -m -t 10000.0 10000.0 100000.0 -I ' + tmpbase + - '.mlf -H ' + MODEL_DIR_ZH + '/16000/macros -H ' + MODEL_DIR_ZH - + '/16000/hmmdefs -i ' + tmpbase + '.aligned ' + MODEL_DIR_ZH - + '/dict ' + MODEL_DIR_ZH + '/monophones ' + tmpbase + - '.plp 2>&1 > /dev/null') - - except Exception: - print('HVite error!') - return None - - with open(tmpbase + '.txt', 'r') as fid: - words = fid.readline().strip().split() - words = txt.strip().split() - words.reverse() - - with open(tmpbase + '.aligned', 'r') as fid: - lines = fid.readlines() - - i = 2 - intervals = [] - word2phns = {} - current_word = '' - index = 0 - while (i < len(lines)): - splited_line = lines[i].strip().split() - if (len(splited_line) >= 4) and (splited_line[0] != splited_line[1]): - phn = splited_line[2] - pst = (int(splited_line[0]) / 1000 + 125) / 10000 - pen = (int(splited_line[1]) / 1000 + 125) / 10000 - intervals.append([phn, pst, pen]) - # splited_line[-1]!='sp' - if len(splited_line) == 5: - current_word = str(index) + '_' + splited_line[-1] - word2phns[current_word] = phn - index += 1 - elif len(splited_line) == 4: - word2phns[current_word] += ' ' + phn - i += 1 - return intervals, word2phns diff --git a/examples/ernie_sat/local/inference.py b/examples/ernie_sat/local/inference.py deleted file mode 100644 index e6a0788fd..000000000 --- a/examples/ernie_sat/local/inference.py +++ /dev/null @@ -1,609 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import random -from typing import Dict -from typing import List - -import librosa -import numpy as np -import paddle -import soundfile as sf -from align import alignment -from align import alignment_zh -from align import words2phns -from align import words2phns_zh -from paddle import nn -from sedit_arg_parser import parse_args -from utils import eval_durs -from utils import get_voc_out -from utils import is_chinese -from utils import load_num_sequence_text -from utils import read_2col_text - -from paddlespeech.t2s.datasets.am_batch_fn import build_mlm_collate_fn -from paddlespeech.t2s.models.ernie_sat.mlm import build_model_from_file - -random.seed(0) -np.random.seed(0) - - -def get_wav(wav_path: str, - source_lang: str='english', - target_lang: str='english', - model_name: str="paddle_checkpoint_en", - old_str: str="", - new_str: str="", - non_autoreg: bool=True): - wav_org, output_feat, old_span_bdy, new_span_bdy, fs, hop_length = get_mlm_output( - source_lang=source_lang, - target_lang=target_lang, - model_name=model_name, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - use_teacher_forcing=non_autoreg) - - masked_feat = output_feat[new_span_bdy[0]:new_span_bdy[1]] - - alt_wav = get_voc_out(masked_feat) - - old_time_bdy = [hop_length * x for x in old_span_bdy] - - wav_replaced = np.concatenate( - [wav_org[:old_time_bdy[0]], alt_wav, wav_org[old_time_bdy[1]:]]) - - data_dict = {"origin": wav_org, "output": wav_replaced} - - return data_dict - - -def load_model(model_name: str="paddle_checkpoint_en"): - config_path = './pretrained_model/{}/config.yaml'.format(model_name) - model_path = './pretrained_model/{}/model.pdparams'.format(model_name) - mlm_model, conf = build_model_from_file( - config_file=config_path, model_file=model_path) - return mlm_model, conf - - -def read_data(uid: str, prefix: os.PathLike): - # 获取 uid 对应的文本 - mfa_text = read_2col_text(prefix + '/text')[uid] - # 获取 uid 对应的音频路径 - mfa_wav_path = read_2col_text(prefix + '/wav.scp')[uid] - if not os.path.isabs(mfa_wav_path): - mfa_wav_path = prefix + mfa_wav_path - return mfa_text, mfa_wav_path - - -def get_align_data(uid: str, prefix: os.PathLike): - mfa_path = prefix + "mfa_" - mfa_text = read_2col_text(mfa_path + 'text')[uid] - mfa_start = load_num_sequence_text( - mfa_path + 'start', loader_type='text_float')[uid] - mfa_end = load_num_sequence_text( - mfa_path + 'end', loader_type='text_float')[uid] - mfa_wav_path = read_2col_text(mfa_path + 'wav.scp')[uid] - return mfa_text, mfa_start, mfa_end, mfa_wav_path - - -# 获取需要被 mask 的 mel 帧的范围 -def get_masked_mel_bdy(mfa_start: List[float], - mfa_end: List[float], - fs: int, - hop_length: int, - span_to_repl: List[List[int]]): - align_start = np.array(mfa_start) - align_end = np.array(mfa_end) - align_start = np.floor(fs * align_start / hop_length).astype('int') - align_end = np.floor(fs * align_end / hop_length).astype('int') - if span_to_repl[0] >= len(mfa_start): - span_bdy = [align_end[-1], align_end[-1]] - else: - span_bdy = [ - align_start[span_to_repl[0]], align_end[span_to_repl[1] - 1] - ] - return span_bdy, align_start, align_end - - -def recover_dict(word2phns: Dict[str, str], tp_word2phns: Dict[str, str]): - dic = {} - keys_to_del = [] - exist_idx = [] - sp_count = 0 - add_sp_count = 0 - for key in word2phns.keys(): - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - exist_idx.append(int(idx)) - else: - keys_to_del.append(key) - - for key in keys_to_del: - del word2phns[key] - - cur_id = 0 - for key in tp_word2phns.keys(): - if cur_id in exist_idx: - dic[str(cur_id) + "_sp"] = 'sp' - cur_id += 1 - add_sp_count += 1 - idx, wrd = key.split('_') - dic[str(cur_id) + "_" + wrd] = tp_word2phns[key] - cur_id += 1 - - if add_sp_count + 1 == sp_count: - dic[str(cur_id) + "_sp"] = 'sp' - add_sp_count += 1 - - assert add_sp_count == sp_count, "sp are not added in dic" - return dic - - -def get_max_idx(dic): - return sorted([int(key.split('_')[0]) for key in dic.keys()])[-1] - - -def get_phns_and_spans(wav_path: str, - old_str: str="", - new_str: str="", - source_lang: str="english", - target_lang: str="english"): - is_append = (old_str == new_str[:len(old_str)]) - old_phns, mfa_start, mfa_end = [], [], [] - # source - if source_lang == "english": - intervals, word2phns = alignment(wav_path, old_str) - elif source_lang == "chinese": - intervals, word2phns = alignment_zh(wav_path, old_str) - _, tp_word2phns = words2phns_zh(old_str) - - for key, value in tp_word2phns.items(): - idx, wrd = key.split('_') - cur_val = " ".join(value) - tp_word2phns[key] = cur_val - - word2phns = recover_dict(word2phns, tp_word2phns) - else: - assert source_lang == "chinese" or source_lang == "english", \ - "source_lang is wrong..." - - for item in intervals: - old_phns.append(item[0]) - mfa_start.append(float(item[1])) - mfa_end.append(float(item[2])) - # target - if is_append and (source_lang != target_lang): - cross_lingual_clone = True - else: - cross_lingual_clone = False - - if cross_lingual_clone: - str_origin = new_str[:len(old_str)] - str_append = new_str[len(old_str):] - - if target_lang == "chinese": - phns_origin, origin_word2phns = words2phns(str_origin) - phns_append, append_word2phns_tmp = words2phns_zh(str_append) - - elif target_lang == "english": - # 原始句子 - phns_origin, origin_word2phns = words2phns_zh(str_origin) - # clone 句子 - phns_append, append_word2phns_tmp = words2phns(str_append) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "cloning is not support for this language, please check it." - - new_phns = phns_origin + phns_append - - append_word2phns = {} - length = len(origin_word2phns) - for key, value in append_word2phns_tmp.items(): - idx, wrd = key.split('_') - append_word2phns[str(int(idx) + length) + '_' + wrd] = value - new_word2phns = origin_word2phns.copy() - new_word2phns.update(append_word2phns) - - else: - if source_lang == target_lang and target_lang == "english": - new_phns, new_word2phns = words2phns(new_str) - elif source_lang == target_lang and target_lang == "chinese": - new_phns, new_word2phns = words2phns_zh(new_str) - else: - assert source_lang == target_lang, \ - "source language is not same with target language..." - - span_to_repl = [0, len(old_phns) - 1] - span_to_add = [0, len(new_phns) - 1] - left_idx = 0 - new_phns_left = [] - sp_count = 0 - # find the left different index - for key in word2phns.keys(): - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - new_phns_left.append('sp') - else: - idx = str(int(idx) - sp_count) - if idx + '_' + wrd in new_word2phns: - left_idx += len(new_word2phns[idx + '_' + wrd]) - new_phns_left.extend(word2phns[key].split()) - else: - span_to_repl[0] = len(new_phns_left) - span_to_add[0] = len(new_phns_left) - break - - # reverse word2phns and new_word2phns - right_idx = 0 - new_phns_right = [] - sp_count = 0 - word2phns_max_idx = get_max_idx(word2phns) - new_word2phns_max_idx = get_max_idx(new_word2phns) - new_phns_mid = [] - if is_append: - new_phns_right = [] - new_phns_mid = new_phns[left_idx:] - span_to_repl[0] = len(new_phns_left) - span_to_add[0] = len(new_phns_left) - span_to_add[1] = len(new_phns_left) + len(new_phns_mid) - span_to_repl[1] = len(old_phns) - len(new_phns_right) - # speech edit - else: - for key in list(word2phns.keys())[::-1]: - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - new_phns_right = ['sp'] + new_phns_right - else: - idx = str(new_word2phns_max_idx - (word2phns_max_idx - int(idx) - - sp_count)) - if idx + '_' + wrd in new_word2phns: - right_idx -= len(new_word2phns[idx + '_' + wrd]) - new_phns_right = word2phns[key].split() + new_phns_right - else: - span_to_repl[1] = len(old_phns) - len(new_phns_right) - new_phns_mid = new_phns[left_idx:right_idx] - span_to_add[1] = len(new_phns_left) + len(new_phns_mid) - if len(new_phns_mid) == 0: - span_to_add[1] = min(span_to_add[1] + 1, len(new_phns)) - span_to_add[0] = max(0, span_to_add[0] - 1) - span_to_repl[0] = max(0, span_to_repl[0] - 1) - span_to_repl[1] = min(span_to_repl[1] + 1, - len(old_phns)) - break - new_phns = new_phns_left + new_phns_mid + new_phns_right - ''' - For that reason cover should not be given. - For that reason cover is impossible to be given. - span_to_repl: [17, 23] "should not" - span_to_add: [17, 30] "is impossible to" - ''' - return mfa_start, mfa_end, old_phns, new_phns, span_to_repl, span_to_add - - -# mfa 获得的 duration 和 fs2 的 duration_predictor 获取的 duration 可能不同 -# 此处获得一个缩放比例, 用于预测值和真实值之间的缩放 -def get_dur_adj_factor(orig_dur: List[int], - pred_dur: List[int], - phns: List[str]): - length = 0 - factor_list = [] - for orig, pred, phn in zip(orig_dur, pred_dur, phns): - if pred == 0 or phn == 'sp': - continue - else: - factor_list.append(orig / pred) - factor_list = np.array(factor_list) - factor_list.sort() - if len(factor_list) < 5: - return 1 - length = 2 - avg = np.average(factor_list[length:-length]) - return avg - - -def prep_feats_with_dur(wav_path: str, - source_lang: str="English", - target_lang: str="English", - old_str: str="", - new_str: str="", - mask_reconstruct: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False, - fs: int=24000, - hop_length: int=300): - ''' - Returns: - np.ndarray: new wav, replace the part to be edited in original wav with 0 - List[str]: new phones - List[float]: mfa start of new wav - List[float]: mfa end of new wav - List[int]: masked mel boundary of original wav - List[int]: masked mel boundary of new wav - ''' - wav_org, _ = librosa.load(wav_path, sr=fs) - - mfa_start, mfa_end, old_phns, new_phns, span_to_repl, span_to_add = get_phns_and_spans( - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - source_lang=source_lang, - target_lang=target_lang) - - if start_end_sp: - if new_phns[-1] != 'sp': - new_phns = new_phns + ['sp'] - # 中文的 phns 不一定都在 fastspeech2 的字典里, 用 sp 代替 - if target_lang == "english" or target_lang == "chinese": - old_durs = eval_durs(old_phns, target_lang=source_lang) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "calculate duration_predict is not support for this language..." - - orig_old_durs = [e - s for e, s in zip(mfa_end, mfa_start)] - if '[MASK]' in new_str: - new_phns = old_phns - span_to_add = span_to_repl - d_factor_left = get_dur_adj_factor( - orig_dur=orig_old_durs[:span_to_repl[0]], - pred_dur=old_durs[:span_to_repl[0]], - phns=old_phns[:span_to_repl[0]]) - d_factor_right = get_dur_adj_factor( - orig_dur=orig_old_durs[span_to_repl[1]:], - pred_dur=old_durs[span_to_repl[1]:], - phns=old_phns[span_to_repl[1]:]) - d_factor = (d_factor_left + d_factor_right) / 2 - new_durs_adjusted = [d_factor * i for i in old_durs] - else: - if duration_adjust: - d_factor = get_dur_adj_factor( - orig_dur=orig_old_durs, pred_dur=old_durs, phns=old_phns) - d_factor = d_factor * 1.25 - else: - d_factor = 1 - - if target_lang == "english" or target_lang == "chinese": - new_durs = eval_durs(new_phns, target_lang=target_lang) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "calculate duration_predict is not support for this language..." - - new_durs_adjusted = [d_factor * i for i in new_durs] - - new_span_dur_sum = sum(new_durs_adjusted[span_to_add[0]:span_to_add[1]]) - old_span_dur_sum = sum(orig_old_durs[span_to_repl[0]:span_to_repl[1]]) - dur_offset = new_span_dur_sum - old_span_dur_sum - new_mfa_start = mfa_start[:span_to_repl[0]] - new_mfa_end = mfa_end[:span_to_repl[0]] - for i in new_durs_adjusted[span_to_add[0]:span_to_add[1]]: - if len(new_mfa_end) == 0: - new_mfa_start.append(0) - new_mfa_end.append(i) - else: - new_mfa_start.append(new_mfa_end[-1]) - new_mfa_end.append(new_mfa_end[-1] + i) - new_mfa_start += [i + dur_offset for i in mfa_start[span_to_repl[1]:]] - new_mfa_end += [i + dur_offset for i in mfa_end[span_to_repl[1]:]] - - # 3. get new wav - # 在原始句子后拼接 - if span_to_repl[0] >= len(mfa_start): - left_idx = len(wav_org) - right_idx = left_idx - # 在原始句子中间替换 - else: - left_idx = int(np.floor(mfa_start[span_to_repl[0]] * fs)) - right_idx = int(np.ceil(mfa_end[span_to_repl[1] - 1] * fs)) - blank_wav = np.zeros( - (int(np.ceil(new_span_dur_sum * fs)), ), dtype=wav_org.dtype) - # 原始音频,需要编辑的部分替换成空音频,空音频的时间由 fs2 的 duration_predictor 决定 - new_wav = np.concatenate( - [wav_org[:left_idx], blank_wav, wav_org[right_idx:]]) - - # 4. get old and new mel span to be mask - # [92, 92] - - old_span_bdy, mfa_start, mfa_end = get_masked_mel_bdy( - mfa_start=mfa_start, - mfa_end=mfa_end, - fs=fs, - hop_length=hop_length, - span_to_repl=span_to_repl) - # [92, 174] - # new_mfa_start, new_mfa_end 时间级别的开始和结束时间 -> 帧级别 - new_span_bdy, new_mfa_start, new_mfa_end = get_masked_mel_bdy( - mfa_start=new_mfa_start, - mfa_end=new_mfa_end, - fs=fs, - hop_length=hop_length, - span_to_repl=span_to_add) - - # old_span_bdy, new_span_bdy 是帧级别的范围 - return new_wav, new_phns, new_mfa_start, new_mfa_end, old_span_bdy, new_span_bdy - - -def prep_feats(wav_path: str, - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - duration_adjust: bool=True, - start_end_sp: bool=False, - mask_reconstruct: bool=False, - fs: int=24000, - hop_length: int=300, - token_list: List[str]=[]): - wav, phns, mfa_start, mfa_end, old_span_bdy, new_span_bdy = prep_feats_with_dur( - source_lang=source_lang, - target_lang=target_lang, - old_str=old_str, - new_str=new_str, - wav_path=wav_path, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - mask_reconstruct=mask_reconstruct, - fs=fs, - hop_length=hop_length) - - token_to_id = {item: i for i, item in enumerate(token_list)} - text = np.array( - list(map(lambda x: token_to_id.get(x, token_to_id['']), phns))) - span_bdy = np.array(new_span_bdy) - - batch = [('1', { - "speech": wav, - "align_start": mfa_start, - "align_end": mfa_end, - "text": text, - "span_bdy": span_bdy - })] - - return batch, old_span_bdy, new_span_bdy - - -def decode_with_model(mlm_model: nn.Layer, - collate_fn, - wav_path: str, - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - use_teacher_forcing: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False, - fs: int=24000, - hop_length: int=300, - token_list: List[str]=[]): - batch, old_span_bdy, new_span_bdy = prep_feats( - source_lang=source_lang, - target_lang=target_lang, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - fs=fs, - hop_length=hop_length, - token_list=token_list) - - feats = collate_fn(batch)[1] - - if 'text_masked_pos' in feats.keys(): - feats.pop('text_masked_pos') - - output = mlm_model.inference( - text=feats['text'], - speech=feats['speech'], - masked_pos=feats['masked_pos'], - speech_mask=feats['speech_mask'], - text_mask=feats['text_mask'], - speech_seg_pos=feats['speech_seg_pos'], - text_seg_pos=feats['text_seg_pos'], - span_bdy=new_span_bdy, - use_teacher_forcing=use_teacher_forcing) - - # 拼接音频 - output_feat = paddle.concat(x=output, axis=0) - wav_org, _ = librosa.load(wav_path, sr=fs) - return wav_org, output_feat, old_span_bdy, new_span_bdy, fs, hop_length - - -def get_mlm_output(wav_path: str, - model_name: str="paddle_checkpoint_en", - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - use_teacher_forcing: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False): - mlm_model, train_conf = load_model(model_name) - mlm_model.eval() - - collate_fn = build_mlm_collate_fn( - sr=train_conf.feats_extract_conf['fs'], - n_fft=train_conf.feats_extract_conf['n_fft'], - hop_length=train_conf.feats_extract_conf['hop_length'], - win_length=train_conf.feats_extract_conf['win_length'], - n_mels=train_conf.feats_extract_conf['n_mels'], - fmin=train_conf.feats_extract_conf['fmin'], - fmax=train_conf.feats_extract_conf['fmax'], - mlm_prob=train_conf['mlm_prob'], - mean_phn_span=train_conf['mean_phn_span'], - seg_emb=train_conf.encoder_conf['input_layer'] == 'sega_mlm') - - return decode_with_model( - source_lang=source_lang, - target_lang=target_lang, - mlm_model=mlm_model, - collate_fn=collate_fn, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - use_teacher_forcing=use_teacher_forcing, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - fs=train_conf.feats_extract_conf['fs'], - hop_length=train_conf.feats_extract_conf['hop_length'], - token_list=train_conf.token_list) - - -def evaluate(uid: str, - source_lang: str="english", - target_lang: str="english", - prefix: os.PathLike="./prompt/dev/", - model_name: str="paddle_checkpoint_en", - new_str: str="", - prompt_decoding: bool=False, - task_name: str=None): - - # get origin text and path of origin wav - old_str, wav_path = read_data(uid=uid, prefix=prefix) - - if task_name == 'edit': - new_str = new_str - elif task_name == 'synthesize': - new_str = old_str + new_str - else: - new_str = old_str + ' '.join([ch for ch in new_str if is_chinese(ch)]) - - print('new_str is ', new_str) - - results_dict = get_wav( - source_lang=source_lang, - target_lang=target_lang, - model_name=model_name, - wav_path=wav_path, - old_str=old_str, - new_str=new_str) - return results_dict - - -if __name__ == "__main__": - # parse config and args - args = parse_args() - - data_dict = evaluate( - uid=args.uid, - source_lang=args.source_lang, - target_lang=args.target_lang, - prefix=args.prefix, - model_name=args.model_name, - new_str=args.new_str, - task_name=args.task_name) - sf.write(args.output_name, data_dict['output'], samplerate=24000) - print("finished...") diff --git a/examples/ernie_sat/local/inference_new.py b/examples/ernie_sat/local/inference_new.py deleted file mode 100644 index 525967eb1..000000000 --- a/examples/ernie_sat/local/inference_new.py +++ /dev/null @@ -1,622 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import random -from typing import Dict -from typing import List - -import librosa -import numpy as np -import paddle -import soundfile as sf -import yaml -from align import alignment -from align import alignment_zh -from align import words2phns -from align import words2phns_zh -from paddle import nn -from sedit_arg_parser import parse_args -from utils import eval_durs -from utils import get_voc_out -from utils import is_chinese -from utils import load_num_sequence_text -from utils import read_2col_text -from yacs.config import CfgNode - -from paddlespeech.t2s.datasets.am_batch_fn import build_mlm_collate_fn -from paddlespeech.t2s.models.ernie_sat.ernie_sat import ErnieSAT - -random.seed(0) -np.random.seed(0) - - -def get_wav(wav_path: str, - source_lang: str='english', - target_lang: str='english', - model_name: str="paddle_checkpoint_en", - old_str: str="", - new_str: str="", - non_autoreg: bool=True): - wav_org, output_feat, old_span_bdy, new_span_bdy, fs, hop_length = get_mlm_output( - source_lang=source_lang, - target_lang=target_lang, - model_name=model_name, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - use_teacher_forcing=non_autoreg) - - masked_feat = output_feat[new_span_bdy[0]:new_span_bdy[1]] - - alt_wav = get_voc_out(masked_feat) - - old_time_bdy = [hop_length * x for x in old_span_bdy] - - wav_replaced = np.concatenate( - [wav_org[:old_time_bdy[0]], alt_wav, wav_org[old_time_bdy[1]:]]) - - data_dict = {"origin": wav_org, "output": wav_replaced} - - return data_dict - - -def load_model(model_name: str="paddle_checkpoint_en"): - config_path = './pretrained_model/{}/default.yaml'.format(model_name) - model_path = './pretrained_model/{}/model.pdparams'.format(model_name) - with open(config_path) as f: - conf = CfgNode(yaml.safe_load(f)) - token_list = list(conf.token_list) - vocab_size = len(token_list) - odim = conf.n_mels - mlm_model = ErnieSAT(idim=vocab_size, odim=odim, **conf["model"]) - state_dict = paddle.load(model_path) - new_state_dict = {} - for key, value in state_dict.items(): - new_key = "model." + key - new_state_dict[new_key] = value - mlm_model.set_state_dict(new_state_dict) - mlm_model.eval() - - return mlm_model, conf - - -def read_data(uid: str, prefix: os.PathLike): - # 获取 uid 对应的文本 - mfa_text = read_2col_text(prefix + '/text')[uid] - # 获取 uid 对应的音频路径 - mfa_wav_path = read_2col_text(prefix + '/wav.scp')[uid] - if not os.path.isabs(mfa_wav_path): - mfa_wav_path = prefix + mfa_wav_path - return mfa_text, mfa_wav_path - - -def get_align_data(uid: str, prefix: os.PathLike): - mfa_path = prefix + "mfa_" - mfa_text = read_2col_text(mfa_path + 'text')[uid] - mfa_start = load_num_sequence_text( - mfa_path + 'start', loader_type='text_float')[uid] - mfa_end = load_num_sequence_text( - mfa_path + 'end', loader_type='text_float')[uid] - mfa_wav_path = read_2col_text(mfa_path + 'wav.scp')[uid] - return mfa_text, mfa_start, mfa_end, mfa_wav_path - - -# 获取需要被 mask 的 mel 帧的范围 -def get_masked_mel_bdy(mfa_start: List[float], - mfa_end: List[float], - fs: int, - hop_length: int, - span_to_repl: List[List[int]]): - align_start = np.array(mfa_start) - align_end = np.array(mfa_end) - align_start = np.floor(fs * align_start / hop_length).astype('int') - align_end = np.floor(fs * align_end / hop_length).astype('int') - if span_to_repl[0] >= len(mfa_start): - span_bdy = [align_end[-1], align_end[-1]] - else: - span_bdy = [ - align_start[span_to_repl[0]], align_end[span_to_repl[1] - 1] - ] - return span_bdy, align_start, align_end - - -def recover_dict(word2phns: Dict[str, str], tp_word2phns: Dict[str, str]): - dic = {} - keys_to_del = [] - exist_idx = [] - sp_count = 0 - add_sp_count = 0 - for key in word2phns.keys(): - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - exist_idx.append(int(idx)) - else: - keys_to_del.append(key) - - for key in keys_to_del: - del word2phns[key] - - cur_id = 0 - for key in tp_word2phns.keys(): - if cur_id in exist_idx: - dic[str(cur_id) + "_sp"] = 'sp' - cur_id += 1 - add_sp_count += 1 - idx, wrd = key.split('_') - dic[str(cur_id) + "_" + wrd] = tp_word2phns[key] - cur_id += 1 - - if add_sp_count + 1 == sp_count: - dic[str(cur_id) + "_sp"] = 'sp' - add_sp_count += 1 - - assert add_sp_count == sp_count, "sp are not added in dic" - return dic - - -def get_max_idx(dic): - return sorted([int(key.split('_')[0]) for key in dic.keys()])[-1] - - -def get_phns_and_spans(wav_path: str, - old_str: str="", - new_str: str="", - source_lang: str="english", - target_lang: str="english"): - is_append = (old_str == new_str[:len(old_str)]) - old_phns, mfa_start, mfa_end = [], [], [] - # source - if source_lang == "english": - intervals, word2phns = alignment(wav_path, old_str) - elif source_lang == "chinese": - intervals, word2phns = alignment_zh(wav_path, old_str) - _, tp_word2phns = words2phns_zh(old_str) - - for key, value in tp_word2phns.items(): - idx, wrd = key.split('_') - cur_val = " ".join(value) - tp_word2phns[key] = cur_val - - word2phns = recover_dict(word2phns, tp_word2phns) - else: - assert source_lang == "chinese" or source_lang == "english", \ - "source_lang is wrong..." - - for item in intervals: - old_phns.append(item[0]) - mfa_start.append(float(item[1])) - mfa_end.append(float(item[2])) - # target - if is_append and (source_lang != target_lang): - cross_lingual_clone = True - else: - cross_lingual_clone = False - - if cross_lingual_clone: - str_origin = new_str[:len(old_str)] - str_append = new_str[len(old_str):] - - if target_lang == "chinese": - phns_origin, origin_word2phns = words2phns(str_origin) - phns_append, append_word2phns_tmp = words2phns_zh(str_append) - - elif target_lang == "english": - # 原始句子 - phns_origin, origin_word2phns = words2phns_zh(str_origin) - # clone 句子 - phns_append, append_word2phns_tmp = words2phns(str_append) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "cloning is not support for this language, please check it." - - new_phns = phns_origin + phns_append - - append_word2phns = {} - length = len(origin_word2phns) - for key, value in append_word2phns_tmp.items(): - idx, wrd = key.split('_') - append_word2phns[str(int(idx) + length) + '_' + wrd] = value - new_word2phns = origin_word2phns.copy() - new_word2phns.update(append_word2phns) - - else: - if source_lang == target_lang and target_lang == "english": - new_phns, new_word2phns = words2phns(new_str) - elif source_lang == target_lang and target_lang == "chinese": - new_phns, new_word2phns = words2phns_zh(new_str) - else: - assert source_lang == target_lang, \ - "source language is not same with target language..." - - span_to_repl = [0, len(old_phns) - 1] - span_to_add = [0, len(new_phns) - 1] - left_idx = 0 - new_phns_left = [] - sp_count = 0 - # find the left different index - for key in word2phns.keys(): - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - new_phns_left.append('sp') - else: - idx = str(int(idx) - sp_count) - if idx + '_' + wrd in new_word2phns: - left_idx += len(new_word2phns[idx + '_' + wrd]) - new_phns_left.extend(word2phns[key].split()) - else: - span_to_repl[0] = len(new_phns_left) - span_to_add[0] = len(new_phns_left) - break - - # reverse word2phns and new_word2phns - right_idx = 0 - new_phns_right = [] - sp_count = 0 - word2phns_max_idx = get_max_idx(word2phns) - new_word2phns_max_idx = get_max_idx(new_word2phns) - new_phns_mid = [] - if is_append: - new_phns_right = [] - new_phns_mid = new_phns[left_idx:] - span_to_repl[0] = len(new_phns_left) - span_to_add[0] = len(new_phns_left) - span_to_add[1] = len(new_phns_left) + len(new_phns_mid) - span_to_repl[1] = len(old_phns) - len(new_phns_right) - # speech edit - else: - for key in list(word2phns.keys())[::-1]: - idx, wrd = key.split('_') - if wrd == 'sp': - sp_count += 1 - new_phns_right = ['sp'] + new_phns_right - else: - idx = str(new_word2phns_max_idx - (word2phns_max_idx - int(idx) - - sp_count)) - if idx + '_' + wrd in new_word2phns: - right_idx -= len(new_word2phns[idx + '_' + wrd]) - new_phns_right = word2phns[key].split() + new_phns_right - else: - span_to_repl[1] = len(old_phns) - len(new_phns_right) - new_phns_mid = new_phns[left_idx:right_idx] - span_to_add[1] = len(new_phns_left) + len(new_phns_mid) - if len(new_phns_mid) == 0: - span_to_add[1] = min(span_to_add[1] + 1, len(new_phns)) - span_to_add[0] = max(0, span_to_add[0] - 1) - span_to_repl[0] = max(0, span_to_repl[0] - 1) - span_to_repl[1] = min(span_to_repl[1] + 1, - len(old_phns)) - break - new_phns = new_phns_left + new_phns_mid + new_phns_right - ''' - For that reason cover should not be given. - For that reason cover is impossible to be given. - span_to_repl: [17, 23] "should not" - span_to_add: [17, 30] "is impossible to" - ''' - return mfa_start, mfa_end, old_phns, new_phns, span_to_repl, span_to_add - - -# mfa 获得的 duration 和 fs2 的 duration_predictor 获取的 duration 可能不同 -# 此处获得一个缩放比例, 用于预测值和真实值之间的缩放 -def get_dur_adj_factor(orig_dur: List[int], - pred_dur: List[int], - phns: List[str]): - length = 0 - factor_list = [] - for orig, pred, phn in zip(orig_dur, pred_dur, phns): - if pred == 0 or phn == 'sp': - continue - else: - factor_list.append(orig / pred) - factor_list = np.array(factor_list) - factor_list.sort() - if len(factor_list) < 5: - return 1 - length = 2 - avg = np.average(factor_list[length:-length]) - return avg - - -def prep_feats_with_dur(wav_path: str, - source_lang: str="English", - target_lang: str="English", - old_str: str="", - new_str: str="", - mask_reconstruct: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False, - fs: int=24000, - hop_length: int=300): - ''' - Returns: - np.ndarray: new wav, replace the part to be edited in original wav with 0 - List[str]: new phones - List[float]: mfa start of new wav - List[float]: mfa end of new wav - List[int]: masked mel boundary of original wav - List[int]: masked mel boundary of new wav - ''' - wav_org, _ = librosa.load(wav_path, sr=fs) - - mfa_start, mfa_end, old_phns, new_phns, span_to_repl, span_to_add = get_phns_and_spans( - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - source_lang=source_lang, - target_lang=target_lang) - - if start_end_sp: - if new_phns[-1] != 'sp': - new_phns = new_phns + ['sp'] - # 中文的 phns 不一定都在 fastspeech2 的字典里, 用 sp 代替 - if target_lang == "english" or target_lang == "chinese": - old_durs = eval_durs(old_phns, target_lang=source_lang) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "calculate duration_predict is not support for this language..." - - orig_old_durs = [e - s for e, s in zip(mfa_end, mfa_start)] - if '[MASK]' in new_str: - new_phns = old_phns - span_to_add = span_to_repl - d_factor_left = get_dur_adj_factor( - orig_dur=orig_old_durs[:span_to_repl[0]], - pred_dur=old_durs[:span_to_repl[0]], - phns=old_phns[:span_to_repl[0]]) - d_factor_right = get_dur_adj_factor( - orig_dur=orig_old_durs[span_to_repl[1]:], - pred_dur=old_durs[span_to_repl[1]:], - phns=old_phns[span_to_repl[1]:]) - d_factor = (d_factor_left + d_factor_right) / 2 - new_durs_adjusted = [d_factor * i for i in old_durs] - else: - if duration_adjust: - d_factor = get_dur_adj_factor( - orig_dur=orig_old_durs, pred_dur=old_durs, phns=old_phns) - d_factor = d_factor * 1.25 - else: - d_factor = 1 - - if target_lang == "english" or target_lang == "chinese": - new_durs = eval_durs(new_phns, target_lang=target_lang) - else: - assert target_lang == "chinese" or target_lang == "english", \ - "calculate duration_predict is not support for this language..." - - new_durs_adjusted = [d_factor * i for i in new_durs] - - new_span_dur_sum = sum(new_durs_adjusted[span_to_add[0]:span_to_add[1]]) - old_span_dur_sum = sum(orig_old_durs[span_to_repl[0]:span_to_repl[1]]) - dur_offset = new_span_dur_sum - old_span_dur_sum - new_mfa_start = mfa_start[:span_to_repl[0]] - new_mfa_end = mfa_end[:span_to_repl[0]] - for i in new_durs_adjusted[span_to_add[0]:span_to_add[1]]: - if len(new_mfa_end) == 0: - new_mfa_start.append(0) - new_mfa_end.append(i) - else: - new_mfa_start.append(new_mfa_end[-1]) - new_mfa_end.append(new_mfa_end[-1] + i) - new_mfa_start += [i + dur_offset for i in mfa_start[span_to_repl[1]:]] - new_mfa_end += [i + dur_offset for i in mfa_end[span_to_repl[1]:]] - - # 3. get new wav - # 在原始句子后拼接 - if span_to_repl[0] >= len(mfa_start): - left_idx = len(wav_org) - right_idx = left_idx - # 在原始句子中间替换 - else: - left_idx = int(np.floor(mfa_start[span_to_repl[0]] * fs)) - right_idx = int(np.ceil(mfa_end[span_to_repl[1] - 1] * fs)) - blank_wav = np.zeros( - (int(np.ceil(new_span_dur_sum * fs)), ), dtype=wav_org.dtype) - # 原始音频,需要编辑的部分替换成空音频,空音频的时间由 fs2 的 duration_predictor 决定 - new_wav = np.concatenate( - [wav_org[:left_idx], blank_wav, wav_org[right_idx:]]) - - # 4. get old and new mel span to be mask - # [92, 92] - - old_span_bdy, mfa_start, mfa_end = get_masked_mel_bdy( - mfa_start=mfa_start, - mfa_end=mfa_end, - fs=fs, - hop_length=hop_length, - span_to_repl=span_to_repl) - # [92, 174] - # new_mfa_start, new_mfa_end 时间级别的开始和结束时间 -> 帧级别 - new_span_bdy, new_mfa_start, new_mfa_end = get_masked_mel_bdy( - mfa_start=new_mfa_start, - mfa_end=new_mfa_end, - fs=fs, - hop_length=hop_length, - span_to_repl=span_to_add) - - # old_span_bdy, new_span_bdy 是帧级别的范围 - return new_wav, new_phns, new_mfa_start, new_mfa_end, old_span_bdy, new_span_bdy - - -def prep_feats(wav_path: str, - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - duration_adjust: bool=True, - start_end_sp: bool=False, - mask_reconstruct: bool=False, - fs: int=24000, - hop_length: int=300, - token_list: List[str]=[]): - wav, phns, mfa_start, mfa_end, old_span_bdy, new_span_bdy = prep_feats_with_dur( - source_lang=source_lang, - target_lang=target_lang, - old_str=old_str, - new_str=new_str, - wav_path=wav_path, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - mask_reconstruct=mask_reconstruct, - fs=fs, - hop_length=hop_length) - - token_to_id = {item: i for i, item in enumerate(token_list)} - text = np.array( - list(map(lambda x: token_to_id.get(x, token_to_id['']), phns))) - span_bdy = np.array(new_span_bdy) - - batch = [('1', { - "speech": wav, - "align_start": mfa_start, - "align_end": mfa_end, - "text": text, - "span_bdy": span_bdy - })] - - return batch, old_span_bdy, new_span_bdy - - -def decode_with_model(mlm_model: nn.Layer, - collate_fn, - wav_path: str, - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - use_teacher_forcing: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False, - fs: int=24000, - hop_length: int=300, - token_list: List[str]=[]): - batch, old_span_bdy, new_span_bdy = prep_feats( - source_lang=source_lang, - target_lang=target_lang, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - fs=fs, - hop_length=hop_length, - token_list=token_list) - - feats = collate_fn(batch)[1] - - if 'text_masked_pos' in feats.keys(): - feats.pop('text_masked_pos') - - output = mlm_model.inference( - text=feats['text'], - speech=feats['speech'], - masked_pos=feats['masked_pos'], - speech_mask=feats['speech_mask'], - text_mask=feats['text_mask'], - speech_seg_pos=feats['speech_seg_pos'], - text_seg_pos=feats['text_seg_pos'], - span_bdy=new_span_bdy, - use_teacher_forcing=use_teacher_forcing) - - # 拼接音频 - output_feat = paddle.concat(x=output, axis=0) - wav_org, _ = librosa.load(wav_path, sr=fs) - return wav_org, output_feat, old_span_bdy, new_span_bdy, fs, hop_length - - -def get_mlm_output(wav_path: str, - model_name: str="paddle_checkpoint_en", - source_lang: str="english", - target_lang: str="english", - old_str: str="", - new_str: str="", - use_teacher_forcing: bool=False, - duration_adjust: bool=True, - start_end_sp: bool=False): - mlm_model, train_conf = load_model(model_name) - - collate_fn = build_mlm_collate_fn( - sr=train_conf.fs, - n_fft=train_conf.n_fft, - hop_length=train_conf.n_shift, - win_length=train_conf.win_length, - n_mels=train_conf.n_mels, - fmin=train_conf.fmin, - fmax=train_conf.fmax, - mlm_prob=train_conf.mlm_prob, - mean_phn_span=train_conf.mean_phn_span, - seg_emb=train_conf.model['enc_input_layer'] == 'sega_mlm') - - return decode_with_model( - source_lang=source_lang, - target_lang=target_lang, - mlm_model=mlm_model, - collate_fn=collate_fn, - wav_path=wav_path, - old_str=old_str, - new_str=new_str, - use_teacher_forcing=use_teacher_forcing, - duration_adjust=duration_adjust, - start_end_sp=start_end_sp, - fs=train_conf.fs, - hop_length=train_conf.n_shift, - token_list=train_conf.token_list) - - -def evaluate(uid: str, - source_lang: str="english", - target_lang: str="english", - prefix: os.PathLike="./prompt/dev/", - model_name: str="paddle_checkpoint_en", - new_str: str="", - prompt_decoding: bool=False, - task_name: str=None): - - # get origin text and path of origin wav - old_str, wav_path = read_data(uid=uid, prefix=prefix) - - if task_name == 'edit': - new_str = new_str - elif task_name == 'synthesize': - new_str = old_str + new_str - else: - new_str = old_str + ' '.join([ch for ch in new_str if is_chinese(ch)]) - - print('new_str is ', new_str) - - results_dict = get_wav( - source_lang=source_lang, - target_lang=target_lang, - model_name=model_name, - wav_path=wav_path, - old_str=old_str, - new_str=new_str) - return results_dict - - -if __name__ == "__main__": - # parse config and args - args = parse_args() - - data_dict = evaluate( - uid=args.uid, - source_lang=args.source_lang, - target_lang=args.target_lang, - prefix=args.prefix, - model_name=args.model_name, - new_str=args.new_str, - task_name=args.task_name) - sf.write(args.output_name, data_dict['output'], samplerate=24000) - print("finished...") diff --git a/examples/ernie_sat/local/sedit_arg_parser.py b/examples/ernie_sat/local/sedit_arg_parser.py deleted file mode 100644 index ad7e57191..000000000 --- a/examples/ernie_sat/local/sedit_arg_parser.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse - - -def parse_args(): - # parse args and config and redirect to train_sp - parser = argparse.ArgumentParser( - description="Synthesize with acoustic model & vocoder") - # acoustic model - parser.add_argument( - '--am', - type=str, - default='fastspeech2_csmsc', - choices=[ - 'speedyspeech_csmsc', 'fastspeech2_csmsc', 'fastspeech2_ljspeech', - 'fastspeech2_aishell3', 'fastspeech2_vctk', 'tacotron2_csmsc', - 'tacotron2_ljspeech', 'tacotron2_aishell3' - ], - help='Choose acoustic model type of tts task.') - parser.add_argument( - '--am_config', - type=str, - default=None, - help='Config of acoustic model. Use deault config when it is None.') - parser.add_argument( - '--am_ckpt', - type=str, - default=None, - help='Checkpoint file of acoustic model.') - parser.add_argument( - "--am_stat", - type=str, - default=None, - help="mean and standard deviation used to normalize spectrogram when training acoustic model." - ) - parser.add_argument( - "--phones_dict", type=str, default=None, help="phone vocabulary file.") - parser.add_argument( - "--tones_dict", type=str, default=None, help="tone vocabulary file.") - parser.add_argument( - "--speaker_dict", type=str, default=None, help="speaker id map file.") - - # vocoder - parser.add_argument( - '--voc', - type=str, - default='pwgan_aishell3', - choices=[ - 'pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', 'pwgan_vctk', - 'mb_melgan_csmsc', 'wavernn_csmsc', 'hifigan_csmsc', - 'hifigan_ljspeech', 'hifigan_aishell3', 'hifigan_vctk', - 'style_melgan_csmsc' - ], - help='Choose vocoder type of tts task.') - parser.add_argument( - '--voc_config', - type=str, - default=None, - help='Config of voc. Use deault config when it is None.') - parser.add_argument( - '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') - parser.add_argument( - "--voc_stat", - type=str, - default=None, - help="mean and standard deviation used to normalize spectrogram when training voc." - ) - # other - parser.add_argument( - "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") - - parser.add_argument("--model_name", type=str, help="model name") - parser.add_argument("--uid", type=str, help="uid") - parser.add_argument("--new_str", type=str, help="new string") - parser.add_argument("--prefix", type=str, help="prefix") - parser.add_argument( - "--source_lang", type=str, default="english", help="source language") - parser.add_argument( - "--target_lang", type=str, default="english", help="target language") - parser.add_argument("--output_name", type=str, help="output name") - parser.add_argument("--task_name", type=str, help="task name") - - # pre - args = parser.parse_args() - return args diff --git a/examples/ernie_sat/local/utils.py b/examples/ernie_sat/local/utils.py deleted file mode 100644 index f2dce504a..000000000 --- a/examples/ernie_sat/local/utils.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from pathlib import Path -from typing import Dict -from typing import List -from typing import Union - -import numpy as np -import paddle -import yaml -from sedit_arg_parser import parse_args -from yacs.config import CfgNode - -from paddlespeech.t2s.exps.syn_utils import get_am_inference -from paddlespeech.t2s.exps.syn_utils import get_voc_inference - - -def read_2col_text(path: Union[Path, str]) -> Dict[str, str]: - """Read a text file having 2 column as dict object. - - Examples: - wav.scp: - key1 /some/path/a.wav - key2 /some/path/b.wav - - >>> read_2col_text('wav.scp') - {'key1': '/some/path/a.wav', 'key2': '/some/path/b.wav'} - - """ - - data = {} - with Path(path).open("r", encoding="utf-8") as f: - for linenum, line in enumerate(f, 1): - sps = line.rstrip().split(maxsplit=1) - if len(sps) == 1: - k, v = sps[0], "" - else: - k, v = sps - if k in data: - raise RuntimeError(f"{k} is duplicated ({path}:{linenum})") - data[k] = v - return data - - -def load_num_sequence_text(path: Union[Path, str], loader_type: str="csv_int" - ) -> Dict[str, List[Union[float, int]]]: - """Read a text file indicating sequences of number - - Examples: - key1 1 2 3 - key2 34 5 6 - - >>> d = load_num_sequence_text('text') - >>> np.testing.assert_array_equal(d["key1"], np.array([1, 2, 3])) - """ - if loader_type == "text_int": - delimiter = " " - dtype = int - elif loader_type == "text_float": - delimiter = " " - dtype = float - elif loader_type == "csv_int": - delimiter = "," - dtype = int - elif loader_type == "csv_float": - delimiter = "," - dtype = float - else: - raise ValueError(f"Not supported loader_type={loader_type}") - - # path looks like: - # utta 1,0 - # uttb 3,4,5 - # -> return {'utta': np.ndarray([1, 0]), - # 'uttb': np.ndarray([3, 4, 5])} - d = read_2column_text(path) - # Using for-loop instead of dict-comprehension for debuggability - retval = {} - for k, v in d.items(): - try: - retval[k] = [dtype(i) for i in v.split(delimiter)] - except TypeError: - print(f'Error happened with path="{path}", id="{k}", value="{v}"') - raise - return retval - - -def is_chinese(ch): - if u'\u4e00' <= ch <= u'\u9fff': - return True - else: - return False - - -def get_voc_out(mel): - # vocoder - args = parse_args() - with open(args.voc_config) as f: - voc_config = CfgNode(yaml.safe_load(f)) - voc_inference = get_voc_inference( - voc=args.voc, - voc_config=voc_config, - voc_ckpt=args.voc_ckpt, - voc_stat=args.voc_stat) - - with paddle.no_grad(): - wav = voc_inference(mel) - return np.squeeze(wav) - - -def eval_durs(phns, target_lang="chinese", fs=24000, hop_length=300): - args = parse_args() - - if target_lang == 'english': - args.am = "fastspeech2_ljspeech" - args.am_config = "download/fastspeech2_nosil_ljspeech_ckpt_0.5/default.yaml" - args.am_ckpt = "download/fastspeech2_nosil_ljspeech_ckpt_0.5/snapshot_iter_100000.pdz" - args.am_stat = "download/fastspeech2_nosil_ljspeech_ckpt_0.5/speech_stats.npy" - args.phones_dict = "download/fastspeech2_nosil_ljspeech_ckpt_0.5/phone_id_map.txt" - - elif target_lang == 'chinese': - args.am = "fastspeech2_csmsc" - args.am_config = "download/fastspeech2_conformer_baker_ckpt_0.5/conformer.yaml" - args.am_ckpt = "download/fastspeech2_conformer_baker_ckpt_0.5/snapshot_iter_76000.pdz" - args.am_stat = "download/fastspeech2_conformer_baker_ckpt_0.5/speech_stats.npy" - args.phones_dict = "download/fastspeech2_conformer_baker_ckpt_0.5/phone_id_map.txt" - - if args.ngpu == 0: - paddle.set_device("cpu") - elif args.ngpu > 0: - paddle.set_device("gpu") - else: - print("ngpu should >= 0 !") - - # Init body. - with open(args.am_config) as f: - am_config = CfgNode(yaml.safe_load(f)) - - am_inference, am = get_am_inference( - am=args.am, - am_config=am_config, - am_ckpt=args.am_ckpt, - am_stat=args.am_stat, - phones_dict=args.phones_dict, - tones_dict=args.tones_dict, - speaker_dict=args.speaker_dict, - return_am=True) - - vocab_phones = {} - with open(args.phones_dict, "r") as f: - phn_id = [line.strip().split() for line in f.readlines()] - for tone, id in phn_id: - vocab_phones[tone] = int(id) - vocab_size = len(vocab_phones) - phonemes = [phn if phn in vocab_phones else "sp" for phn in phns] - - phone_ids = [vocab_phones[item] for item in phonemes] - phone_ids.append(vocab_size - 1) - phone_ids = paddle.to_tensor(np.array(phone_ids, np.int64)) - _, d_outs, _, _ = am.inference(phone_ids, spk_id=None, spk_emb=None) - pre_d_outs = d_outs - phu_durs_new = pre_d_outs * hop_length / fs - phu_durs_new = phu_durs_new.tolist()[:-1] - return phu_durs_new diff --git a/examples/ernie_sat/path.sh b/examples/ernie_sat/path.sh deleted file mode 100755 index d46d2f612..000000000 --- a/examples/ernie_sat/path.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -export MAIN_ROOT=`realpath ${PWD}/../../` - -export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} -export LC_ALL=C - -export PYTHONDONTWRITEBYTECODE=1 -# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C -export PYTHONIOENCODING=UTF-8 -export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} - -MODEL=ernie_sat -export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} \ No newline at end of file diff --git a/examples/ernie_sat/prompt/dev/text b/examples/ernie_sat/prompt/dev/text deleted file mode 100644 index f79cdcb42..000000000 --- a/examples/ernie_sat/prompt/dev/text +++ /dev/null @@ -1,3 +0,0 @@ -p243_new For that reason cover should not be given. -Prompt_003_new This was not the show for me. -p299_096 We are trying to establish a date. diff --git a/examples/ernie_sat/prompt/dev/wav.scp b/examples/ernie_sat/prompt/dev/wav.scp deleted file mode 100644 index eb0e8e48d..000000000 --- a/examples/ernie_sat/prompt/dev/wav.scp +++ /dev/null @@ -1,3 +0,0 @@ -p243_new ../../prompt_wav/p243_313.wav -Prompt_003_new ../../prompt_wav/this_was_not_the_show_for_me.wav -p299_096 ../../prompt_wav/p299_096.wav diff --git a/examples/ernie_sat/run_clone_en_to_zh.sh b/examples/ernie_sat/run_clone_en_to_zh.sh deleted file mode 100755 index 68b1c7544..000000000 --- a/examples/ernie_sat/run_clone_en_to_zh.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# en --> zh 的 语音合成 -# 根据 Prompt_003_new 作为提示语音: This was not the show for me. 来合成: '今天天气很好' -# 注: 输入的 new_str 需为中文汉字, 否则会通过预处理只保留中文汉字, 即合成预处理后的中文语音。 - -python local/inference.py \ - --task_name=cross-lingual_clone \ - --model_name=paddle_checkpoint_dual_mask_enzh \ - --uid=Prompt_003_new \ - --new_str='今天天气很好.' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=chinese \ - --output_name=pred_clone.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_csmsc \ - --am_config=download/fastspeech2_conformer_baker_ckpt_0.5/conformer.yaml \ - --am_ckpt=download/fastspeech2_conformer_baker_ckpt_0.5/snapshot_iter_76000.pdz \ - --am_stat=download/fastspeech2_conformer_baker_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_conformer_baker_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/run_clone_en_to_zh_new.sh b/examples/ernie_sat/run_clone_en_to_zh_new.sh deleted file mode 100755 index 12fdf23f1..000000000 --- a/examples/ernie_sat/run_clone_en_to_zh_new.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# en --> zh 的 语音合成 -# 根据 Prompt_003_new 作为提示语音: This was not the show for me. 来合成: '今天天气很好' -# 注: 输入的 new_str 需为中文汉字, 否则会通过预处理只保留中文汉字, 即合成预处理后的中文语音。 - -python local/inference_new.py \ - --task_name=cross-lingual_clone \ - --model_name=paddle_checkpoint_dual_mask_enzh \ - --uid=Prompt_003_new \ - --new_str='今天天气很好.' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=chinese \ - --output_name=pred_clone.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_csmsc \ - --am_config=download/fastspeech2_conformer_baker_ckpt_0.5/conformer.yaml \ - --am_ckpt=download/fastspeech2_conformer_baker_ckpt_0.5/snapshot_iter_76000.pdz \ - --am_stat=download/fastspeech2_conformer_baker_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_conformer_baker_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/run_gen_en.sh b/examples/ernie_sat/run_gen_en.sh deleted file mode 100755 index a0641bc7f..000000000 --- a/examples/ernie_sat/run_gen_en.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# 纯英文的语音合成 -# 样例为根据 p299_096 对应的语音作为提示语音: This was not the show for me. 来合成: 'I enjoy my life.' - -python local/inference.py \ - --task_name=synthesize \ - --model_name=paddle_checkpoint_en \ - --uid=p299_096 \ - --new_str='I enjoy my life, do you?' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=english \ - --output_name=pred_gen.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_ljspeech \ - --am_config=download/fastspeech2_nosil_ljspeech_ckpt_0.5/default.yaml \ - --am_ckpt=download/fastspeech2_nosil_ljspeech_ckpt_0.5/snapshot_iter_100000.pdz \ - --am_stat=download/fastspeech2_nosil_ljspeech_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_nosil_ljspeech_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/run_gen_en_new.sh b/examples/ernie_sat/run_gen_en_new.sh deleted file mode 100755 index d76b00430..000000000 --- a/examples/ernie_sat/run_gen_en_new.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# 纯英文的语音合成 -# 样例为根据 p299_096 对应的语音作为提示语音: This was not the show for me. 来合成: 'I enjoy my life.' - -python local/inference_new.py \ - --task_name=synthesize \ - --model_name=paddle_checkpoint_en \ - --uid=p299_096 \ - --new_str='I enjoy my life, do you?' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=english \ - --output_name=pred_gen.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_ljspeech \ - --am_config=download/fastspeech2_nosil_ljspeech_ckpt_0.5/default.yaml \ - --am_ckpt=download/fastspeech2_nosil_ljspeech_ckpt_0.5/snapshot_iter_100000.pdz \ - --am_stat=download/fastspeech2_nosil_ljspeech_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_nosil_ljspeech_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/run_sedit_en.sh b/examples/ernie_sat/run_sedit_en.sh deleted file mode 100755 index eec7d6402..000000000 --- a/examples/ernie_sat/run_sedit_en.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# 纯英文的语音编辑 -# 样例为把 p243_new 对应的原始语音: For that reason cover should not be given.编辑成 'for that reason cover is impossible to be given.' 对应的语音 -# NOTE: 语音编辑任务暂支持句子中 1 个位置的替换或者插入文本操作 - -python local/inference.py \ - --task_name=edit \ - --model_name=paddle_checkpoint_en \ - --uid=p243_new \ - --new_str='for that reason cover is impossible to be given.' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=english \ - --output_name=pred_edit.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_ljspeech \ - --am_config=download/fastspeech2_nosil_ljspeech_ckpt_0.5/default.yaml \ - --am_ckpt=download/fastspeech2_nosil_ljspeech_ckpt_0.5/snapshot_iter_100000.pdz \ - --am_stat=download/fastspeech2_nosil_ljspeech_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_nosil_ljspeech_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/run_sedit_en_new.sh b/examples/ernie_sat/run_sedit_en_new.sh deleted file mode 100755 index 0952d280c..000000000 --- a/examples/ernie_sat/run_sedit_en_new.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -# 纯英文的语音编辑 -# 样例为把 p243_new 对应的原始语音: For that reason cover should not be given.编辑成 'for that reason cover is impossible to be given.' 对应的语音 -# NOTE: 语音编辑任务暂支持句子中 1 个位置的替换或者插入文本操作 - -python local/inference_new.py \ - --task_name=edit \ - --model_name=paddle_checkpoint_en \ - --uid=p243_new \ - --new_str='for that reason cover is impossible to be given.' \ - --prefix='./prompt/dev/' \ - --source_lang=english \ - --target_lang=english \ - --output_name=pred_edit.wav \ - --voc=pwgan_aishell3 \ - --voc_config=download/pwg_aishell3_ckpt_0.5/default.yaml \ - --voc_ckpt=download/pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ - --voc_stat=download/pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --am=fastspeech2_ljspeech \ - --am_config=download/fastspeech2_nosil_ljspeech_ckpt_0.5/default.yaml \ - --am_ckpt=download/fastspeech2_nosil_ljspeech_ckpt_0.5/snapshot_iter_100000.pdz \ - --am_stat=download/fastspeech2_nosil_ljspeech_ckpt_0.5/speech_stats.npy \ - --phones_dict=download/fastspeech2_nosil_ljspeech_ckpt_0.5/phone_id_map.txt diff --git a/examples/ernie_sat/test_run.sh b/examples/ernie_sat/test_run.sh deleted file mode 100755 index 75b6a5691..000000000 --- a/examples/ernie_sat/test_run.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -rm -rf *.wav -./run_sedit_en.sh # 语音编辑任务(英文) -./run_gen_en.sh # 个性化语音合成任务(英文) -./run_clone_en_to_zh.sh # 跨语言语音合成任务(英文到中文的语音克隆) \ No newline at end of file diff --git a/examples/ernie_sat/test_run_new.sh b/examples/ernie_sat/test_run_new.sh deleted file mode 100755 index bf8a4e02d..000000000 --- a/examples/ernie_sat/test_run_new.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -rm -rf *.wav -./run_sedit_en_new.sh # 语音编辑任务(英文) -./run_gen_en_new.sh # 个性化语音合成任务(英文) -./run_clone_en_to_zh_new.sh # 跨语言语音合成任务(英文到中文的语音克隆) \ No newline at end of file diff --git a/examples/ernie_sat/tools/.gitkeep b/examples/ernie_sat/tools/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/paddlespeech/t2s/datasets/am_batch_fn.py b/paddlespeech/t2s/datasets/am_batch_fn.py index 2cb7a11a2..c4c9e5d73 100644 --- a/paddlespeech/t2s/datasets/am_batch_fn.py +++ b/paddlespeech/t2s/datasets/am_batch_fn.py @@ -11,19 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection -from typing import Dict -from typing import List -from typing import Tuple - import numpy as np import paddle from paddlespeech.t2s.datasets.batch import batch_sequences -from paddlespeech.t2s.datasets.get_feats import LogMelFBank from paddlespeech.t2s.modules.nets_utils import get_seg_pos from paddlespeech.t2s.modules.nets_utils import make_non_pad_mask -from paddlespeech.t2s.modules.nets_utils import pad_list from paddlespeech.t2s.modules.nets_utils import phones_masking from paddlespeech.t2s.modules.nets_utils import phones_text_masking @@ -490,182 +483,3 @@ def vits_single_spk_batch_fn(examples): "speech": speech } return batch - - -# for ERNIE SAT -class MLMCollateFn: - """Functor class of common_collate_fn()""" - - def __init__( - self, - feats_extract, - mlm_prob: float=0.8, - mean_phn_span: int=8, - seg_emb: bool=False, - text_masking: bool=False, - attention_window: int=0, - not_sequence: Collection[str]=(), ): - self.mlm_prob = mlm_prob - self.mean_phn_span = mean_phn_span - self.feats_extract = feats_extract - self.not_sequence = set(not_sequence) - self.attention_window = attention_window - self.seg_emb = seg_emb - self.text_masking = text_masking - - def __call__(self, data: Collection[Tuple[str, Dict[str, np.ndarray]]] - ) -> Tuple[List[str], Dict[str, paddle.Tensor]]: - return mlm_collate_fn( - data, - feats_extract=self.feats_extract, - mlm_prob=self.mlm_prob, - mean_phn_span=self.mean_phn_span, - seg_emb=self.seg_emb, - text_masking=self.text_masking, - not_sequence=self.not_sequence) - - -def mlm_collate_fn( - data: Collection[Tuple[str, Dict[str, np.ndarray]]], - feats_extract=None, - mlm_prob: float=0.8, - mean_phn_span: int=8, - seg_emb: bool=False, - text_masking: bool=False, - pad_value: int=0, - not_sequence: Collection[str]=(), -) -> Tuple[List[str], Dict[str, paddle.Tensor]]: - uttids = [u for u, _ in data] - data = [d for _, d in data] - - assert all(set(data[0]) == set(d) for d in data), "dict-keys mismatching" - assert all(not k.endswith("_lens") - for k in data[0]), f"*_lens is reserved: {list(data[0])}" - - output = {} - for key in data[0]: - - array_list = [d[key] for d in data] - - # Assume the first axis is length: - # tensor_list: Batch x (Length, ...) - tensor_list = [paddle.to_tensor(a) for a in array_list] - # tensor: (Batch, Length, ...) - tensor = pad_list(tensor_list, pad_value) - output[key] = tensor - - # lens: (Batch,) - if key not in not_sequence: - lens = paddle.to_tensor( - [d[key].shape[0] for d in data], dtype=paddle.int64) - output[key + "_lens"] = lens - - feats = feats_extract.get_log_mel_fbank(np.array(output["speech"][0])) - feats = paddle.to_tensor(feats) - print("feats.shape:", feats.shape) - feats_lens = paddle.shape(feats)[0] - feats = paddle.unsqueeze(feats, 0) - - text = output["text"] - text_lens = output["text_lens"] - align_start = output["align_start"] - align_start_lens = output["align_start_lens"] - align_end = output["align_end"] - - max_tlen = max(text_lens) - max_slen = max(feats_lens) - - speech_pad = feats[:, :max_slen] - - text_pad = text - text_mask = make_non_pad_mask( - text_lens, text_pad, length_dim=1).unsqueeze(-2) - speech_mask = make_non_pad_mask( - feats_lens, speech_pad[:, :, 0], length_dim=1).unsqueeze(-2) - - span_bdy = None - if 'span_bdy' in output.keys(): - span_bdy = output['span_bdy'] - - # dual_mask 的是混合中英时候同时 mask 语音和文本 - # ernie sat 在实现跨语言的时候都 mask 了 - if text_masking: - masked_pos, text_masked_pos = phones_text_masking( - xs_pad=speech_pad, - src_mask=speech_mask, - text_pad=text_pad, - text_mask=text_mask, - align_start=align_start, - align_end=align_end, - align_start_lens=align_start_lens, - mlm_prob=mlm_prob, - mean_phn_span=mean_phn_span, - span_bdy=span_bdy) - # 训练纯中文和纯英文的 -> a3t 没有对 phoneme 做 mask, 只对语音 mask 了 - # a3t 和 ernie sat 的区别主要在于做 mask 的时候 - else: - masked_pos = phones_masking( - xs_pad=speech_pad, - src_mask=speech_mask, - align_start=align_start, - align_end=align_end, - align_start_lens=align_start_lens, - mlm_prob=mlm_prob, - mean_phn_span=mean_phn_span, - span_bdy=span_bdy) - text_masked_pos = paddle.zeros(paddle.shape(text_pad)) - - output_dict = {} - - speech_seg_pos, text_seg_pos = get_seg_pos( - speech_pad=speech_pad, - text_pad=text_pad, - align_start=align_start, - align_end=align_end, - align_start_lens=align_start_lens, - seg_emb=seg_emb) - output_dict['speech'] = speech_pad - output_dict['text'] = text_pad - output_dict['masked_pos'] = masked_pos - output_dict['text_masked_pos'] = text_masked_pos - output_dict['speech_mask'] = speech_mask - output_dict['text_mask'] = text_mask - output_dict['speech_seg_pos'] = speech_seg_pos - output_dict['text_seg_pos'] = text_seg_pos - output = (uttids, output_dict) - return output - - -def build_mlm_collate_fn( - sr: int=24000, - n_fft: int=2048, - hop_length: int=300, - win_length: int=None, - n_mels: int=80, - fmin: int=80, - fmax: int=7600, - mlm_prob: float=0.8, - mean_phn_span: int=8, - seg_emb: bool=False, - epoch: int=-1, ): - feats_extract_class = LogMelFBank - - feats_extract = feats_extract_class( - sr=sr, - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - n_mels=n_mels, - fmin=fmin, - fmax=fmax) - - if epoch == -1: - mlm_prob_factor = 1 - else: - mlm_prob_factor = 0.8 - - return MLMCollateFn( - feats_extract=feats_extract, - mlm_prob=mlm_prob * mlm_prob_factor, - mean_phn_span=mean_phn_span, - seg_emb=seg_emb) diff --git a/paddlespeech/t2s/models/ernie_sat/__init__.py b/paddlespeech/t2s/models/ernie_sat/__init__.py index 7e795370e..87e7afe85 100644 --- a/paddlespeech/t2s/models/ernie_sat/__init__.py +++ b/paddlespeech/t2s/models/ernie_sat/__init__.py @@ -13,4 +13,3 @@ # limitations under the License. from .ernie_sat import * from .ernie_sat_updater import * -from .mlm import * diff --git a/paddlespeech/t2s/models/ernie_sat/mlm.py b/paddlespeech/t2s/models/ernie_sat/mlm.py deleted file mode 100644 index 647fdd9b4..000000000 --- a/paddlespeech/t2s/models/ernie_sat/mlm.py +++ /dev/null @@ -1,579 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -from typing import Dict -from typing import List -from typing import Optional - -import paddle -import yaml -from paddle import nn -from yacs.config import CfgNode - -from paddlespeech.t2s.modules.activation import get_activation -from paddlespeech.t2s.modules.conformer.convolution import ConvolutionModule -from paddlespeech.t2s.modules.conformer.encoder_layer import EncoderLayer -from paddlespeech.t2s.modules.layer_norm import LayerNorm -from paddlespeech.t2s.modules.masked_fill import masked_fill -from paddlespeech.t2s.modules.nets_utils import initialize -from paddlespeech.t2s.modules.tacotron2.decoder import Postnet -from paddlespeech.t2s.modules.transformer.attention import LegacyRelPositionMultiHeadedAttention -from paddlespeech.t2s.modules.transformer.attention import MultiHeadedAttention -from paddlespeech.t2s.modules.transformer.attention import RelPositionMultiHeadedAttention -from paddlespeech.t2s.modules.transformer.embedding import LegacyRelPositionalEncoding -from paddlespeech.t2s.modules.transformer.embedding import PositionalEncoding -from paddlespeech.t2s.modules.transformer.embedding import RelPositionalEncoding -from paddlespeech.t2s.modules.transformer.embedding import ScaledPositionalEncoding -from paddlespeech.t2s.modules.transformer.multi_layer_conv import Conv1dLinear -from paddlespeech.t2s.modules.transformer.multi_layer_conv import MultiLayeredConv1d -from paddlespeech.t2s.modules.transformer.positionwise_feed_forward import PositionwiseFeedForward -from paddlespeech.t2s.modules.transformer.repeat import repeat -from paddlespeech.t2s.modules.transformer.subsampling import Conv2dSubsampling - - -# MLM -> Mask Language Model -class mySequential(nn.Sequential): - def forward(self, *inputs): - for module in self._sub_layers.values(): - if type(inputs) == tuple: - inputs = module(*inputs) - else: - inputs = module(inputs) - return inputs - - -class MaskInputLayer(nn.Layer): - def __init__(self, out_features: int) -> None: - super().__init__() - self.mask_feature = paddle.create_parameter( - shape=(1, 1, out_features), - dtype=paddle.float32, - default_initializer=paddle.nn.initializer.Assign( - paddle.normal(shape=(1, 1, out_features)))) - - def forward(self, input: paddle.Tensor, - masked_pos: paddle.Tensor=None) -> paddle.Tensor: - masked_pos = paddle.expand_as(paddle.unsqueeze(masked_pos, -1), input) - masked_input = masked_fill(input, masked_pos, 0) + masked_fill( - paddle.expand_as(self.mask_feature, input), ~masked_pos, 0) - return masked_input - - -class MLMEncoder(nn.Layer): - """Conformer encoder module. - - Args: - idim (int): Input dimension. - attention_dim (int): Dimension of attention. - attention_heads (int): The number of heads of multi head attention. - linear_units (int): The number of units of position-wise feed forward. - num_blocks (int): The number of decoder blocks. - dropout_rate (float): Dropout rate. - positional_dropout_rate (float): Dropout rate after adding positional encoding. - attention_dropout_rate (float): Dropout rate in attention. - input_layer (Union[str, paddle.nn.Layer]): Input layer type. - normalize_before (bool): Whether to use layer_norm before the first block. - concat_after (bool): Whether to concat attention layer's input and output. - if True, additional linear will be applied. - i.e. x -> x + linear(concat(x, att(x))) - if False, no additional linear will be applied. i.e. x -> x + att(x) - positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". - positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. - macaron_style (bool): Whether to use macaron style for positionwise layer. - pos_enc_layer_type (str): Encoder positional encoding layer type. - selfattention_layer_type (str): Encoder attention layer type. - activation_type (str): Encoder activation function type. - use_cnn_module (bool): Whether to use convolution module. - zero_triu (bool): Whether to zero the upper triangular part of attention matrix. - cnn_module_kernel (int): Kernerl size of convolution module. - padding_idx (int): Padding idx for input_layer=embed. - stochastic_depth_rate (float): Maximum probability to skip the encoder layer. - - """ - - def __init__(self, - idim: int, - vocab_size: int=0, - pre_speech_layer: int=0, - attention_dim: int=256, - attention_heads: int=4, - linear_units: int=2048, - num_blocks: int=6, - dropout_rate: float=0.1, - positional_dropout_rate: float=0.1, - attention_dropout_rate: float=0.0, - input_layer: str="conv2d", - normalize_before: bool=True, - concat_after: bool=False, - positionwise_layer_type: str="linear", - positionwise_conv_kernel_size: int=1, - macaron_style: bool=False, - pos_enc_layer_type: str="abs_pos", - selfattention_layer_type: str="selfattn", - activation_type: str="swish", - use_cnn_module: bool=False, - zero_triu: bool=False, - cnn_module_kernel: int=31, - padding_idx: int=-1, - stochastic_depth_rate: float=0.0, - text_masking: bool=False): - """Construct an Encoder object.""" - super().__init__() - self._output_size = attention_dim - self.text_masking = text_masking - if self.text_masking: - self.text_masking_layer = MaskInputLayer(attention_dim) - activation = get_activation(activation_type) - if pos_enc_layer_type == "abs_pos": - pos_enc_class = PositionalEncoding - elif pos_enc_layer_type == "scaled_abs_pos": - pos_enc_class = ScaledPositionalEncoding - elif pos_enc_layer_type == "rel_pos": - assert selfattention_layer_type == "rel_selfattn" - pos_enc_class = RelPositionalEncoding - elif pos_enc_layer_type == "legacy_rel_pos": - pos_enc_class = LegacyRelPositionalEncoding - assert selfattention_layer_type == "legacy_rel_selfattn" - else: - raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) - - self.conv_subsampling_factor = 1 - if input_layer == "linear": - self.embed = nn.Sequential( - nn.Linear(idim, attention_dim), - nn.LayerNorm(attention_dim), - nn.Dropout(dropout_rate), - nn.ReLU(), - pos_enc_class(attention_dim, positional_dropout_rate), ) - elif input_layer == "conv2d": - self.embed = Conv2dSubsampling( - idim, - attention_dim, - dropout_rate, - pos_enc_class(attention_dim, positional_dropout_rate), ) - self.conv_subsampling_factor = 4 - elif input_layer == "embed": - self.embed = nn.Sequential( - nn.Embedding(idim, attention_dim, padding_idx=padding_idx), - pos_enc_class(attention_dim, positional_dropout_rate), ) - elif input_layer == "mlm": - self.segment_emb = None - self.speech_embed = mySequential( - MaskInputLayer(idim), - nn.Linear(idim, attention_dim), - nn.LayerNorm(attention_dim), - nn.ReLU(), - pos_enc_class(attention_dim, positional_dropout_rate)) - self.text_embed = nn.Sequential( - nn.Embedding( - vocab_size, attention_dim, padding_idx=padding_idx), - pos_enc_class(attention_dim, positional_dropout_rate), ) - elif input_layer == "sega_mlm": - self.segment_emb = nn.Embedding( - 500, attention_dim, padding_idx=padding_idx) - self.speech_embed = mySequential( - MaskInputLayer(idim), - nn.Linear(idim, attention_dim), - nn.LayerNorm(attention_dim), - nn.ReLU(), - pos_enc_class(attention_dim, positional_dropout_rate)) - self.text_embed = nn.Sequential( - nn.Embedding( - vocab_size, attention_dim, padding_idx=padding_idx), - pos_enc_class(attention_dim, positional_dropout_rate), ) - elif isinstance(input_layer, nn.Layer): - self.embed = nn.Sequential( - input_layer, - pos_enc_class(attention_dim, positional_dropout_rate), ) - elif input_layer is None: - self.embed = nn.Sequential( - pos_enc_class(attention_dim, positional_dropout_rate)) - else: - raise ValueError("unknown input_layer: " + input_layer) - self.normalize_before = normalize_before - - # self-attention module definition - if selfattention_layer_type == "selfattn": - encoder_selfattn_layer = MultiHeadedAttention - encoder_selfattn_layer_args = (attention_heads, attention_dim, - attention_dropout_rate, ) - elif selfattention_layer_type == "legacy_rel_selfattn": - assert pos_enc_layer_type == "legacy_rel_pos" - encoder_selfattn_layer = LegacyRelPositionMultiHeadedAttention - encoder_selfattn_layer_args = (attention_heads, attention_dim, - attention_dropout_rate, ) - elif selfattention_layer_type == "rel_selfattn": - assert pos_enc_layer_type == "rel_pos" - encoder_selfattn_layer = RelPositionMultiHeadedAttention - encoder_selfattn_layer_args = (attention_heads, attention_dim, - attention_dropout_rate, zero_triu, ) - else: - raise ValueError("unknown encoder_attn_layer: " + - selfattention_layer_type) - - # feed-forward module definition - if positionwise_layer_type == "linear": - positionwise_layer = PositionwiseFeedForward - positionwise_layer_args = (attention_dim, linear_units, - dropout_rate, activation, ) - elif positionwise_layer_type == "conv1d": - positionwise_layer = MultiLayeredConv1d - positionwise_layer_args = (attention_dim, linear_units, - positionwise_conv_kernel_size, - dropout_rate, ) - elif positionwise_layer_type == "conv1d-linear": - positionwise_layer = Conv1dLinear - positionwise_layer_args = (attention_dim, linear_units, - positionwise_conv_kernel_size, - dropout_rate, ) - else: - raise NotImplementedError("Support only linear or conv1d.") - - # convolution module definition - convolution_layer = ConvolutionModule - convolution_layer_args = (attention_dim, cnn_module_kernel, activation) - - self.encoders = repeat( - num_blocks, - lambda lnum: EncoderLayer( - attention_dim, - encoder_selfattn_layer(*encoder_selfattn_layer_args), - positionwise_layer(*positionwise_layer_args), - positionwise_layer(*positionwise_layer_args) if macaron_style else None, - convolution_layer(*convolution_layer_args) if use_cnn_module else None, - dropout_rate, - normalize_before, - concat_after, - stochastic_depth_rate * float(1 + lnum) / num_blocks, ), ) - self.pre_speech_layer = pre_speech_layer - self.pre_speech_encoders = repeat( - self.pre_speech_layer, - lambda lnum: EncoderLayer( - attention_dim, - encoder_selfattn_layer(*encoder_selfattn_layer_args), - positionwise_layer(*positionwise_layer_args), - positionwise_layer(*positionwise_layer_args) if macaron_style else None, - convolution_layer(*convolution_layer_args) if use_cnn_module else None, - dropout_rate, - normalize_before, - concat_after, - stochastic_depth_rate * float(1 + lnum) / self.pre_speech_layer, ), - ) - if self.normalize_before: - self.after_norm = LayerNorm(attention_dim) - - def forward(self, - speech: paddle.Tensor, - text: paddle.Tensor, - masked_pos: paddle.Tensor, - speech_mask: paddle.Tensor=None, - text_mask: paddle.Tensor=None, - speech_seg_pos: paddle.Tensor=None, - text_seg_pos: paddle.Tensor=None): - """Encode input sequence. - - """ - if masked_pos is not None: - speech = self.speech_embed(speech, masked_pos) - else: - speech = self.speech_embed(speech) - if text is not None: - text = self.text_embed(text) - if speech_seg_pos is not None and text_seg_pos is not None and self.segment_emb: - speech_seg_emb = self.segment_emb(speech_seg_pos) - text_seg_emb = self.segment_emb(text_seg_pos) - text = (text[0] + text_seg_emb, text[1]) - speech = (speech[0] + speech_seg_emb, speech[1]) - if self.pre_speech_encoders: - speech, _ = self.pre_speech_encoders(speech, speech_mask) - - if text is not None: - xs = paddle.concat([speech[0], text[0]], axis=1) - xs_pos_emb = paddle.concat([speech[1], text[1]], axis=1) - masks = paddle.concat([speech_mask, text_mask], axis=-1) - else: - xs = speech[0] - xs_pos_emb = speech[1] - masks = speech_mask - - xs, masks = self.encoders((xs, xs_pos_emb), masks) - - if isinstance(xs, tuple): - xs = xs[0] - if self.normalize_before: - xs = self.after_norm(xs) - - return xs, masks - - -class MLMDecoder(MLMEncoder): - def forward(self, xs: paddle.Tensor, masks: paddle.Tensor): - """Encode input sequence. - - Args: - xs (paddle.Tensor): Input tensor (#batch, time, idim). - masks (paddle.Tensor): Mask tensor (#batch, time). - - Returns: - paddle.Tensor: Output tensor (#batch, time, attention_dim). - paddle.Tensor: Mask tensor (#batch, time). - - """ - xs = self.embed(xs) - xs, masks = self.encoders(xs, masks) - - if isinstance(xs, tuple): - xs = xs[0] - if self.normalize_before: - xs = self.after_norm(xs) - - return xs, masks - - -# encoder and decoder is nn.Layer, not str -class MLM(nn.Layer): - def __init__(self, - odim: int, - encoder: nn.Layer, - decoder: Optional[nn.Layer], - postnet_layers: int=0, - postnet_chans: int=0, - postnet_filts: int=0, - text_masking: bool=False): - - super().__init__() - self.odim = odim - self.encoder = encoder - self.decoder = decoder - self.vocab_size = encoder.text_embed[0]._num_embeddings - - if self.decoder is None or not (hasattr(self.decoder, - 'output_layer') and - self.decoder.output_layer is not None): - self.sfc = nn.Linear(self.encoder._output_size, odim) - else: - self.sfc = None - if text_masking: - self.text_sfc = nn.Linear( - self.encoder.text_embed[0]._embedding_dim, - self.vocab_size, - weight_attr=self.encoder.text_embed[0]._weight_attr) - else: - self.text_sfc = None - - self.postnet = (None if postnet_layers == 0 else Postnet( - idim=self.encoder._output_size, - odim=odim, - n_layers=postnet_layers, - n_chans=postnet_chans, - n_filts=postnet_filts, - use_batch_norm=True, - dropout_rate=0.5, )) - - def inference( - self, - speech: paddle.Tensor, - text: paddle.Tensor, - masked_pos: paddle.Tensor, - speech_mask: paddle.Tensor, - text_mask: paddle.Tensor, - speech_seg_pos: paddle.Tensor, - text_seg_pos: paddle.Tensor, - span_bdy: List[int], - use_teacher_forcing: bool=False, ) -> Dict[str, paddle.Tensor]: - ''' - Args: - speech (paddle.Tensor): input speech (1, Tmax, D). - text (paddle.Tensor): input text (1, Tmax2). - masked_pos (paddle.Tensor): masked position of input speech (1, Tmax) - speech_mask (paddle.Tensor): mask of speech (1, 1, Tmax). - text_mask (paddle.Tensor): mask of text (1, 1, Tmax2). - speech_seg_pos (paddle.Tensor): n-th phone of each mel, 0<=n<=Tmax2 (1, Tmax). - text_seg_pos (paddle.Tensor): n-th phone of each phone, 0<=n<=Tmax2 (1, Tmax2). - span_bdy (List[int]): masked mel boundary of input speech (2,) - use_teacher_forcing (bool): whether to use teacher forcing - Returns: - List[Tensor]: - eg: - [Tensor(shape=[1, 181, 80]), Tensor(shape=[80, 80]), Tensor(shape=[1, 67, 80])] - ''' - - z_cache = None - if use_teacher_forcing: - before_outs, zs, *_ = self.forward( - speech=speech, - text=text, - masked_pos=masked_pos, - speech_mask=speech_mask, - text_mask=text_mask, - speech_seg_pos=speech_seg_pos, - text_seg_pos=text_seg_pos) - if zs is None: - zs = before_outs - - speech = speech.squeeze(0) - outs = [speech[:span_bdy[0]]] - outs += [zs[0][span_bdy[0]:span_bdy[1]]] - outs += [speech[span_bdy[1]:]] - return outs - return None - - -class MLMEncAsDecoder(MLM): - def forward(self, - speech: paddle.Tensor, - text: paddle.Tensor, - masked_pos: paddle.Tensor, - speech_mask: paddle.Tensor, - text_mask: paddle.Tensor, - speech_seg_pos: paddle.Tensor, - text_seg_pos: paddle.Tensor): - # feats: (Batch, Length, Dim) - # -> encoder_out: (Batch, Length2, Dim2) - encoder_out, h_masks = self.encoder( - speech=speech, - text=text, - masked_pos=masked_pos, - speech_mask=speech_mask, - text_mask=text_mask, - speech_seg_pos=speech_seg_pos, - text_seg_pos=text_seg_pos) - if self.decoder is not None: - zs, _ = self.decoder(encoder_out, h_masks) - else: - zs = encoder_out - speech_hidden_states = zs[:, :paddle.shape(speech)[1], :] - if self.sfc is not None: - before_outs = paddle.reshape( - self.sfc(speech_hidden_states), - (paddle.shape(speech_hidden_states)[0], -1, self.odim)) - else: - before_outs = speech_hidden_states - if self.postnet is not None: - after_outs = before_outs + paddle.transpose( - self.postnet(paddle.transpose(before_outs, [0, 2, 1])), - [0, 2, 1]) - else: - after_outs = None - return before_outs, after_outs, None - - -class MLMDualMaksing(MLM): - def forward(self, - speech: paddle.Tensor, - text: paddle.Tensor, - masked_pos: paddle.Tensor, - speech_mask: paddle.Tensor, - text_mask: paddle.Tensor, - speech_seg_pos: paddle.Tensor, - text_seg_pos: paddle.Tensor): - # feats: (Batch, Length, Dim) - # -> encoder_out: (Batch, Length2, Dim2) - encoder_out, h_masks = self.encoder( - speech=speech, - text=text, - masked_pos=masked_pos, - speech_mask=speech_mask, - text_mask=text_mask, - speech_seg_pos=speech_seg_pos, - text_seg_pos=text_seg_pos) - if self.decoder is not None: - zs, _ = self.decoder(encoder_out, h_masks) - else: - zs = encoder_out - speech_hidden_states = zs[:, :paddle.shape(speech)[1], :] - if self.text_sfc: - text_hiddent_states = zs[:, paddle.shape(speech)[1]:, :] - text_outs = paddle.reshape( - self.text_sfc(text_hiddent_states), - (paddle.shape(text_hiddent_states)[0], -1, self.vocab_size)) - if self.sfc is not None: - before_outs = paddle.reshape( - self.sfc(speech_hidden_states), - (paddle.shape(speech_hidden_states)[0], -1, self.odim)) - else: - before_outs = speech_hidden_states - if self.postnet is not None: - after_outs = before_outs + paddle.transpose( - self.postnet(paddle.transpose(before_outs, [0, 2, 1])), - [0, 2, 1]) - else: - after_outs = None - return before_outs, after_outs, text_outs - - -def build_model_from_file(config_file, model_file): - - state_dict = paddle.load(model_file) - model_class = MLMDualMaksing if 'conformer_combine_vctk_aishell3_dual_masking' in config_file \ - else MLMEncAsDecoder - - # 构建模型 - with open(config_file) as f: - conf = CfgNode(yaml.safe_load(f)) - model = build_model(conf, model_class) - model.set_state_dict(state_dict) - return model, conf - - -# select encoder and decoder here -def build_model(args: argparse.Namespace, model_class=MLMEncAsDecoder) -> MLM: - if isinstance(args.token_list, str): - with open(args.token_list, encoding="utf-8") as f: - token_list = [line.rstrip() for line in f] - - # Overwriting token_list to keep it as "portable". - args.token_list = list(token_list) - elif isinstance(args.token_list, (tuple, list)): - token_list = list(args.token_list) - else: - raise RuntimeError("token_list must be str or list") - - vocab_size = len(token_list) - odim = 80 - - # Encoder - encoder_class = MLMEncoder - - if 'text_masking' in args.model_conf.keys() and args.model_conf[ - 'text_masking']: - args.encoder_conf['text_masking'] = True - else: - args.encoder_conf['text_masking'] = False - - encoder = encoder_class( - args.input_size, vocab_size=vocab_size, **args.encoder_conf) - - # Decoder - if args.decoder != 'no_decoder': - decoder_class = MLMDecoder - decoder = decoder_class( - idim=0, - input_layer=None, - **args.decoder_conf, ) - else: - decoder = None - - # Build model - model = model_class( - odim=odim, - encoder=encoder, - decoder=decoder, - **args.model_conf, ) - - # Initialize - if args.init is not None: - initialize(model, args.init) - - return model From b81cb5d71e685a9c655dd1ca0e7f6ed5398a2071 Mon Sep 17 00:00:00 2001 From: Ming Date: Fri, 26 Aug 2022 23:09:18 +0800 Subject: [PATCH 027/101] Update README.md (#2314) * Update README.md add Contributors * Update README_cn.md --- README.md | 101 ++++++++++++++++++++++++++++++++++----------------- README_cn.md | 101 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 134 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 1d3666f53..bca758688 100644 --- a/README.md +++ b/README.md @@ -799,40 +799,73 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P ### Contributors

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

## Acknowledgement diff --git a/README_cn.md b/README_cn.md index e9cbc888a..996c546ac 100644 --- a/README_cn.md +++ b/README_cn.md @@ -803,40 +803,73 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 ### 贡献者

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

## 致谢 From 090117e30d004cf571deedf07ba972c7809bc5e5 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Sat, 27 Aug 2022 20:42:37 +0800 Subject: [PATCH 028/101] add github issue template (#2317) * Create feature-request.md * Create question.md * Update bug_report.md --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/feature-request.md | 16 ++++++++++++++++ .github/ISSUE_TEMPLATE/question.md | 16 ++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/question.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index b31d98631..136160240 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Bug report about: Create a report to help us improve title: '' -labels: '' +labels: 'Bug' assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 000000000..3c9dd487c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,16 @@ +--- +name: "\U0001F680 Feature Request" +about: As a user, I want to request a New Feature on the product. +labels: "feature request" +--- + +## Feature Request + +**Is your feature request related to a problem? Please describe:** + + +**Describe the feature you'd like:** + + +**Describe alternatives you've considered:** + diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000..423ed34fd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,16 @@ +--- +name: "\U0001F914 Ask a Question" +about: I want to ask a question. +labels: Question +--- + +## General Question + + From c40e29f847ee343e56ffe0f9a9bc238ba1c46beb Mon Sep 17 00:00:00 2001 From: TianYuan Date: Sat, 27 Aug 2022 21:01:01 +0800 Subject: [PATCH 029/101] Update issue templates (#2318) * Update issue templates * Delete bug_report.md * Delete --tts-bug-report.md * Rename ---asr-bug-report.md to bug-report-asr.md * Rename ---tts-bug-report.md to bug-report-tts.md --- .../{bug_report.md => bug-report-asr.md} | 8 ++-- .github/ISSUE_TEMPLATE/bug-report-tts.md | 42 +++++++++++++++++++ .github/ISSUE_TEMPLATE/feature-request.md | 5 ++- .github/ISSUE_TEMPLATE/question.md | 3 ++ 4 files changed, 53 insertions(+), 5 deletions(-) rename .github/ISSUE_TEMPLATE/{bug_report.md => bug-report-asr.md} (87%) create mode 100644 .github/ISSUE_TEMPLATE/bug-report-tts.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug-report-asr.md similarity index 87% rename from .github/ISSUE_TEMPLATE/bug_report.md rename to .github/ISSUE_TEMPLATE/bug-report-asr.md index 136160240..287234351 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug-report-asr.md @@ -1,9 +1,9 @@ --- -name: Bug report +name: "\U0001F41B ASR Bug Report" about: Create a report to help us improve title: '' -labels: 'Bug' -assignees: '' +labels: Bug, T2S +assignees: zh794390558 --- @@ -27,7 +27,7 @@ A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. -** Environment (please complete the following information):** +**Environment (please complete the following information):** - OS: [e.g. Ubuntu] - GCC/G++ Version [e.g. 8.3] - Python Version [e.g. 3.7] diff --git a/.github/ISSUE_TEMPLATE/bug-report-tts.md b/.github/ISSUE_TEMPLATE/bug-report-tts.md new file mode 100644 index 000000000..d8f7afa82 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report-tts.md @@ -0,0 +1,42 @@ +--- +name: "\U0001F41B TTS Bug Report" +about: Create a report to help us improve +title: '' +labels: Bug, T2S +assignees: yt605155624 + +--- + +For support and discussions, please use our [Discourse forums](https://github.com/PaddlePaddle/DeepSpeech/discussions). + +If you've found a bug then please create an issue with the following information: + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Environment (please complete the following information):** + - OS: [e.g. Ubuntu] + - GCC/G++ Version [e.g. 8.3] + - Python Version [e.g. 3.7] + - PaddlePaddle Version [e.g. 2.0.0] + - Model Version [e.g. 2.0.0] + - GPU/DRIVER Informationo [e.g. Tesla V100-SXM2-32GB/440.64.00] + - CUDA/CUDNN Version [e.g. cuda-10.2] + - MKL Version +- TensorRT Version + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 3c9dd487c..8f7e094da 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -1,7 +1,10 @@ --- name: "\U0001F680 Feature Request" about: As a user, I want to request a New Feature on the product. -labels: "feature request" +title: '' +labels: feature request +assignees: '' + --- ## Feature Request diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 423ed34fd..445905c61 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -1,7 +1,10 @@ --- name: "\U0001F914 Ask a Question" about: I want to ask a question. +title: '' labels: Question +assignees: '' + --- ## General Question From 2c4c254f718c2b7a4d68fae98a0e5fc3861313b4 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Sat, 27 Aug 2022 21:02:07 +0800 Subject: [PATCH 030/101] Update bug-report-asr.md --- .github/ISSUE_TEMPLATE/bug-report-asr.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report-asr.md b/.github/ISSUE_TEMPLATE/bug-report-asr.md index 287234351..44f3c1401 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-asr.md +++ b/.github/ISSUE_TEMPLATE/bug-report-asr.md @@ -2,7 +2,7 @@ name: "\U0001F41B ASR Bug Report" about: Create a report to help us improve title: '' -labels: Bug, T2S +labels: Bug, S2T assignees: zh794390558 --- From c28064fec2e1800cd141ee22aab5976565a50226 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Mon, 29 Aug 2022 10:47:15 +0800 Subject: [PATCH 031/101] Update asr_engine.py (#2302) * Update asr_engine.py * Update asr_engine.py * Update application.yaml must add parameter "num_decoding_left_chunks" so as to modify this in other scenarios. * Update asr_engine.py * Update application.yaml * Update application.yaml * Update asr_engine.py --- paddlespeech/server/conf/application.yaml | 2 ++ paddlespeech/server/engine/asr/python/asr_engine.py | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/paddlespeech/server/conf/application.yaml b/paddlespeech/server/conf/application.yaml index 8650154e9..55f241ec7 100644 --- a/paddlespeech/server/conf/application.yaml +++ b/paddlespeech/server/conf/application.yaml @@ -25,6 +25,7 @@ asr_python: cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' + num_decoding_left_chunks: -1 force_yes: True device: # set 'gpu:id' or 'cpu' @@ -38,6 +39,7 @@ asr_inference: lang: 'zh' sample_rate: 16000 cfg_path: + num_decoding_left_chunks: -1 decode_method: force_yes: True diff --git a/paddlespeech/server/engine/asr/python/asr_engine.py b/paddlespeech/server/engine/asr/python/asr_engine.py index 02c40fd12..9ce05d97a 100644 --- a/paddlespeech/server/engine/asr/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/python/asr_engine.py @@ -66,11 +66,12 @@ class ASREngine(BaseEngine): ) logger.error(e) return False - + self.executor._init_from_path( - self.config.model, self.config.lang, self.config.sample_rate, - self.config.cfg_path, self.config.decode_method, - self.config.ckpt_path) + model_type = self.config.model, lang = self.config.lang, sample_rate = self.config.sample_rate, + cfg_path = self.config.cfg_path, decode_method = self.config.decode_method, + ckpt_path = self.config.ckpt_path) + logger.info("Initialize ASR server engine successfully on device: %s." % (self.device)) From 3d4dce8b73e0de4b0aa8c0954d070bc3e99062ce Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 11:31:31 +0800 Subject: [PATCH 032/101] update readme, add get_contributors (#2321) --- README.md | 4 +- README_cn.md | 4 +- tools/get_contributors.ipynb | 146 +++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+), 4 deletions(-) create mode 100644 tools/get_contributors.ipynb diff --git a/README.md b/README.md index bca758688..acbe12309 100644 --- a/README.md +++ b/README.md @@ -557,9 +557,9 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r FastSpeech2 - LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN + LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN / finetune - fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en + fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en / fastspeech2-finetune diff --git a/README_cn.md b/README_cn.md index 996c546ac..dbbc13ac0 100644 --- a/README_cn.md +++ b/README_cn.md @@ -552,9 +552,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 FastSpeech2 - LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN + LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN / finetune - fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en + fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en / fastspeech2-finetune diff --git a/tools/get_contributors.ipynb b/tools/get_contributors.ipynb new file mode 100644 index 000000000..a8ad99efa --- /dev/null +++ b/tools/get_contributors.ipynb @@ -0,0 +1,146 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "automotive-trailer", + "metadata": {}, + "outputs": [], + "source": [ + "from selenium import webdriver\n", + "chromeOptions = webdriver.ChromeOptions()\n", + "driver = webdriver.Chrome('./chromedriver', chrome_options=chromeOptions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "physical-croatia", + "metadata": {}, + "outputs": [], + "source": [ + "driver.get(\"https://github.com/PaddlePaddle/PaddleSpeech/graphs/contributors\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "seventh-latitude", + "metadata": {}, + "outputs": [], + "source": [ + "

\n", + " \n", + " \"zh794390558\"\n", + " \n", + " #1\n", + " zh794390558\n", + " \n", + " \n", + "
\n", + " 655 commits\n", + "   \n", + " 3,671,956 ++\n", + "   \n", + " 1,966,288 --\n", + "
\n", + "
\n", + "
\n", + "

" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "modified-argument", + "metadata": {}, + "outputs": [], + "source": [ + "from selenium.webdriver.common.by import By" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "demonstrated-aging", + "metadata": {}, + "outputs": [], + "source": [ + "elements = driver.find_elements(By.CLASS_NAME, 'lh-condensed')\n", + "for element in elements:\n", + " zhuye = element.find_elements(By.CLASS_NAME, 'd-inline-block')[0].get_attribute(\"href\")\n", + " img = element.find_elements(By.CLASS_NAME, 'avatar')[0].get_attribute(\"src\")\n", + " mkdown = f\"\"\"\"\"\"\n", + " print(mkdown)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "general-torture", + "metadata": {}, + "outputs": [], + "source": [ + "element.find_elements(By.CLASS_NAME, 'd-inline-block')[0].get_attribute(\"href\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "downtown-institute", + "metadata": {}, + "outputs": [], + "source": [ + "element.find_elements(By.CLASS_NAME, 'avatar')[0].get_attribute(\"src\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "worthy-planet", + "metadata": {}, + "outputs": [], + "source": [ + "len(elements)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + }, + "toc": { + "base_numbering": 1, + "nav_menu": {}, + "number_sections": true, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": {}, + "toc_section_display": true, + "toc_window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a3bc5a2923af4729c4a5a07790437eacded1ba85 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:11:43 +0800 Subject: [PATCH 033/101] Update issue templates --- .github/ISSUE_TEMPLATE/others.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/others.md diff --git a/.github/ISSUE_TEMPLATE/others.md b/.github/ISSUE_TEMPLATE/others.md new file mode 100644 index 000000000..edfe14eb5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/others.md @@ -0,0 +1,32 @@ +--- +name: Others +about: Report any other non-support related issues. +title: '' +labels: '' +assignees: '' + +--- + +name: 🧩 其他 Others +description: 提出其他问题。 Report any other non-support related issues. +labels: + +body: +- type: markdown + attributes: + value: > + #### 你可以在这里提出任何前面几类模板不适用的问题,包括但不限于:优化性建议、框架使用体验反馈、版本兼容性问题、报错信息不清楚等。 + + #### You can report any issues that are not applicable to the previous types of templates, including but not limited to: enhancement suggestions, feedback on the use of the framework, version compatibility issues, unclear error information, etc. + +- type: textarea + id: others + attributes: + label: 问题描述 Please describe your issue + validations: + required: true + +- type: markdown + attributes: + value: > + 感谢你的贡献 🎉! Thanks for your contribution 🎉! From bf46c0340e82fbaf7bd40ecce1b12b6bfb287762 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:13:14 +0800 Subject: [PATCH 034/101] Rename others.md to others.yaml --- .github/ISSUE_TEMPLATE/{others.md => others.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{others.md => others.yaml} (100%) diff --git a/.github/ISSUE_TEMPLATE/others.md b/.github/ISSUE_TEMPLATE/others.yaml similarity index 100% rename from .github/ISSUE_TEMPLATE/others.md rename to .github/ISSUE_TEMPLATE/others.yaml From 49cc4145d240bfd422921c0639224cdd19dbded5 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:14:22 +0800 Subject: [PATCH 035/101] Rename others.yaml to others.md --- .github/ISSUE_TEMPLATE/{others.yaml => others.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{others.yaml => others.md} (100%) diff --git a/.github/ISSUE_TEMPLATE/others.yaml b/.github/ISSUE_TEMPLATE/others.md similarity index 100% rename from .github/ISSUE_TEMPLATE/others.yaml rename to .github/ISSUE_TEMPLATE/others.md From 937b07c917d47791e14e18e10da0f1e91551de7f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:16:08 +0800 Subject: [PATCH 036/101] Update issue templates --- .github/ISSUE_TEMPLATE/---others.md | 15 +++++++++++++++ .github/ISSUE_TEMPLATE/bug-report-asr.md | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .github/ISSUE_TEMPLATE/---others.md diff --git a/.github/ISSUE_TEMPLATE/---others.md b/.github/ISSUE_TEMPLATE/---others.md new file mode 100644 index 000000000..e135a2689 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/---others.md @@ -0,0 +1,15 @@ +--- +name: "\U0001F9E9 Others" +about: Report any other non-support related issues. +title: '' +labels: '' +assignees: '' + +--- + +## Others + + diff --git a/.github/ISSUE_TEMPLATE/bug-report-asr.md b/.github/ISSUE_TEMPLATE/bug-report-asr.md index 44f3c1401..731cc4b63 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-asr.md +++ b/.github/ISSUE_TEMPLATE/bug-report-asr.md @@ -2,7 +2,7 @@ name: "\U0001F41B ASR Bug Report" about: Create a report to help us improve title: '' -labels: Bug, S2T +labels: Bug, Question, S2T assignees: zh794390558 --- From 02e8ab10a73c58ce94230b72c9142764f102661a Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:16:45 +0800 Subject: [PATCH 037/101] Delete others.md --- .github/ISSUE_TEMPLATE/others.md | 32 -------------------------------- 1 file changed, 32 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/others.md diff --git a/.github/ISSUE_TEMPLATE/others.md b/.github/ISSUE_TEMPLATE/others.md deleted file mode 100644 index edfe14eb5..000000000 --- a/.github/ISSUE_TEMPLATE/others.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Others -about: Report any other non-support related issues. -title: '' -labels: '' -assignees: '' - ---- - -name: 🧩 其他 Others -description: 提出其他问题。 Report any other non-support related issues. -labels: - -body: -- type: markdown - attributes: - value: > - #### 你可以在这里提出任何前面几类模板不适用的问题,包括但不限于:优化性建议、框架使用体验反馈、版本兼容性问题、报错信息不清楚等。 - - #### You can report any issues that are not applicable to the previous types of templates, including but not limited to: enhancement suggestions, feedback on the use of the framework, version compatibility issues, unclear error information, etc. - -- type: textarea - id: others - attributes: - label: 问题描述 Please describe your issue - validations: - required: true - -- type: markdown - attributes: - value: > - 感谢你的贡献 🎉! Thanks for your contribution 🎉! From 4f996611a74c7ba60d4c8cb665c3b2f872d40ce8 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:17:24 +0800 Subject: [PATCH 038/101] Rename ---others.md to others.md --- .github/ISSUE_TEMPLATE/{---others.md => others.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{---others.md => others.md} (100%) diff --git a/.github/ISSUE_TEMPLATE/---others.md b/.github/ISSUE_TEMPLATE/others.md similarity index 100% rename from .github/ISSUE_TEMPLATE/---others.md rename to .github/ISSUE_TEMPLATE/others.md From 0d25c92c285f18f31609c17f96a2fd2dec963f83 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:19:54 +0800 Subject: [PATCH 039/101] Update and rename bug-report-asr.md to bug-report-s2t.md --- .../ISSUE_TEMPLATE/{bug-report-asr.md => bug-report-s2t.md} | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename .github/ISSUE_TEMPLATE/{bug-report-asr.md => bug-report-s2t.md} (95%) diff --git a/.github/ISSUE_TEMPLATE/bug-report-asr.md b/.github/ISSUE_TEMPLATE/bug-report-s2t.md similarity index 95% rename from .github/ISSUE_TEMPLATE/bug-report-asr.md rename to .github/ISSUE_TEMPLATE/bug-report-s2t.md index 731cc4b63..597e9a521 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-asr.md +++ b/.github/ISSUE_TEMPLATE/bug-report-s2t.md @@ -1,7 +1,7 @@ --- -name: "\U0001F41B ASR Bug Report" +name: "\U0001F41B S2T Bug Report" about: Create a report to help us improve -title: '' +title: [S2T]XXXX labels: Bug, Question, S2T assignees: zh794390558 From 3acbdd0dd7c0a6f41b9f348cb77e079f1b9cad35 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:20:39 +0800 Subject: [PATCH 040/101] Update bug-report-s2t.md --- .github/ISSUE_TEMPLATE/bug-report-s2t.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report-s2t.md b/.github/ISSUE_TEMPLATE/bug-report-s2t.md index 597e9a521..d3c808ea7 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-s2t.md +++ b/.github/ISSUE_TEMPLATE/bug-report-s2t.md @@ -1,7 +1,7 @@ --- name: "\U0001F41B S2T Bug Report" about: Create a report to help us improve -title: [S2T]XXXX +title: "[S2T]XXXX" labels: Bug, Question, S2T assignees: zh794390558 From 2e4fa5cba885a300da15d3cc3c4f008883244df5 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:21:27 +0800 Subject: [PATCH 041/101] Update bug-report-tts.md --- .github/ISSUE_TEMPLATE/bug-report-tts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report-tts.md b/.github/ISSUE_TEMPLATE/bug-report-tts.md index d8f7afa82..aec202c96 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-tts.md +++ b/.github/ISSUE_TEMPLATE/bug-report-tts.md @@ -1,7 +1,7 @@ --- name: "\U0001F41B TTS Bug Report" about: Create a report to help us improve -title: '' +title: '[TTS]XXXX' labels: Bug, T2S assignees: yt605155624 From a08ad4d1a6285d8dd3908b78e1bb0098d7595d44 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:24:10 +0800 Subject: [PATCH 042/101] Update issue templates --- .github/ISSUE_TEMPLATE/bug-report-tts.md | 2 +- .github/ISSUE_TEMPLATE/feature-request.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report-tts.md b/.github/ISSUE_TEMPLATE/bug-report-tts.md index aec202c96..64b33c32e 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-tts.md +++ b/.github/ISSUE_TEMPLATE/bug-report-tts.md @@ -1,7 +1,7 @@ --- name: "\U0001F41B TTS Bug Report" about: Create a report to help us improve -title: '[TTS]XXXX' +title: "[TTS]XXXX" labels: Bug, T2S assignees: yt605155624 diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 8f7e094da..10b0f3f02 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -3,7 +3,7 @@ name: "\U0001F680 Feature Request" about: As a user, I want to request a New Feature on the product. title: '' labels: feature request -assignees: '' +assignees: D-DanielYang, iftaken --- From ed16f96a9cb027d250bdcf1d3d33ff7151e2d301 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 29 Aug 2022 17:25:08 +0800 Subject: [PATCH 043/101] Update bug-report-s2t.md --- .github/ISSUE_TEMPLATE/bug-report-s2t.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report-s2t.md b/.github/ISSUE_TEMPLATE/bug-report-s2t.md index d3c808ea7..512cdbb01 100644 --- a/.github/ISSUE_TEMPLATE/bug-report-s2t.md +++ b/.github/ISSUE_TEMPLATE/bug-report-s2t.md @@ -2,7 +2,7 @@ name: "\U0001F41B S2T Bug Report" about: Create a report to help us improve title: "[S2T]XXXX" -labels: Bug, Question, S2T +labels: Bug, S2T assignees: zh794390558 --- From f5367f5efb32ea8a811146bb9088ab303291cf5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20An=20=EF=BC=88An=20Hongliang=EF=BC=89?= Date: Tue, 30 Aug 2022 13:54:00 +0800 Subject: [PATCH 044/101] [TTS]fix bug of tone modify (#2323) * add special tone modifed case Co-authored-by: TianYuan --- paddlespeech/t2s/frontend/tone_sandhi.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py index e5ef617a9..ee3aa84ad 100644 --- a/paddlespeech/t2s/frontend/tone_sandhi.py +++ b/paddlespeech/t2s/frontend/tone_sandhi.py @@ -42,7 +42,7 @@ class ToneSandhi(): '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打扮', '打听', '打发', '扎实', '扁担', - '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', '念叨', + '戒指', '懒得', '意识', '意思', '悟性', '怪物', '思量', '怎么', '念头', '念叨', '别人', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', '实在', @@ -60,7 +60,7 @@ class ToneSandhi(): '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', '扫把', '惦记', '戏弄', - '将军', '别人' + '将军' } self.must_not_neural_tone_words = { '男子', '女子', '分子', '原子', '量子', '莲子', '石子', '瓜子', '电子', '人人', '虎虎', @@ -84,7 +84,9 @@ class ToneSandhi(): if j - 1 >= 0 and item == word[j - 1] and pos[0] in {"n", "v", "a"}: finals[j] = finals[j][:-1] + "5" ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": + if (len(word) > 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒滴哩哟喽啰耶喔诶") or ( + len(word) > 1 and word[-2] in '好是帅酷棒衰烂臭狗糗' and + word[-1] == '额') or (len(word) == 1 and word[-1] in "额嗯"): finals[-1] = finals[-1][:-1] + "5" elif len(word) >= 1 and word[-1] in "的地得": finals[-1] = finals[-1][:-1] + "5" @@ -169,6 +171,7 @@ class ToneSandhi(): return new_word_list def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: + if len(word) == 2 and self._all_tone_three(finals): finals[0] = finals[0][:-1] + "2" elif len(word) == 3: @@ -346,6 +349,7 @@ class ToneSandhi(): def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]: + finals = self._bu_sandhi(word, finals) finals = self._yi_sandhi(word, finals) finals = self._neural_sandhi(word, pos, finals) From 733ec7f2bc82c62be5c2959230bc43092be02435 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Tue, 30 Aug 2022 07:59:55 +0000 Subject: [PATCH 045/101] fix conformer multi-gpu training test=asr --- paddlespeech/s2t/models/u2/u2.py | 4 +-- paddlespeech/s2t/modules/attention.py | 25 ++++++------- .../s2t/modules/conformer_convolution.py | 12 +++---- paddlespeech/s2t/modules/decoder_layer.py | 17 ++++++--- paddlespeech/s2t/modules/encoder.py | 36 ++++++++++--------- paddlespeech/s2t/modules/encoder_layer.py | 15 ++++---- 6 files changed, 58 insertions(+), 51 deletions(-) diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index e19f411cf..a812abcbd 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -605,8 +605,8 @@ class U2BaseModel(ASRInterface, nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), - cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + att_cache: paddle.Tensor, + cnn_cache: paddle.Tensor, ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Export interface for c++ call, give input chunk xs, and return output from time 0 to current chunk. diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py index b6d615867..cbcaccc26 100644 --- a/paddlespeech/s2t/modules/attention.py +++ b/paddlespeech/s2t/modules/attention.py @@ -15,7 +15,6 @@ # Modified from wenet(https://github.com/wenet-e2e/wenet) """Multi-Head Attention layer definition.""" import math -from typing import Optional from typing import Tuple import paddle @@ -83,11 +82,11 @@ class MultiHeadedAttention(nn.Layer): return q, k, v - def forward_attention(self, - value: paddle.Tensor, + def forward_attention( + self, + value: paddle.Tensor, scores: paddle.Tensor, - mask: paddle.Tensor = paddle.ones([0, 0, 0], dtype=paddle.bool), - ) -> paddle.Tensor: + mask: paddle.Tensor, ) -> paddle.Tensor: """Compute attention context vector. Args: value (paddle.Tensor): Transformed value, size @@ -108,7 +107,7 @@ class MultiHeadedAttention(nn.Layer): # When will `if mask.size(2) > 0` be False? # 1. onnx(16/-1, -1/-1, 16/0) # 2. jit (16/-1, -1/-1, 16/0, 16/4) - if paddle.shape(mask)[2] > 0: # time2 > 0 + if paddle.shape(mask)[2] > 0: # time2 > 0 mask = mask.unsqueeze(1).equal(0) # (batch, 1, *, time2) # for last chunk, time2 might be larger than scores.size(-1) mask = mask[:, :, :, :paddle.shape(scores)[-1]] @@ -131,10 +130,9 @@ class MultiHeadedAttention(nn.Layer): query: paddle.Tensor, key: paddle.Tensor, value: paddle.Tensor, - mask: paddle.Tensor = paddle.ones([0,0,0], dtype=paddle.bool), - pos_emb: paddle.Tensor = paddle.empty([0]), - cache: paddle.Tensor = paddle.zeros([0,0,0,0]) - ) -> Tuple[paddle.Tensor, paddle.Tensor]: + mask: paddle.Tensor, + pos_emb: paddle.Tensor, + cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute scaled dot product attention. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). @@ -247,10 +245,9 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): query: paddle.Tensor, key: paddle.Tensor, value: paddle.Tensor, - mask: paddle.Tensor = paddle.ones([0,0,0], dtype=paddle.bool), - pos_emb: paddle.Tensor = paddle.empty([0]), - cache: paddle.Tensor = paddle.zeros([0,0,0,0]) - ) -> Tuple[paddle.Tensor, paddle.Tensor]: + mask: paddle.Tensor, + pos_emb: paddle.Tensor, + cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). diff --git a/paddlespeech/s2t/modules/conformer_convolution.py b/paddlespeech/s2t/modules/conformer_convolution.py index c384b9c78..23aecd7f1 100644 --- a/paddlespeech/s2t/modules/conformer_convolution.py +++ b/paddlespeech/s2t/modules/conformer_convolution.py @@ -14,7 +14,6 @@ # limitations under the License. # Modified from wenet(https://github.com/wenet-e2e/wenet) """ConvolutionModule definition.""" -from typing import Optional from typing import Tuple import paddle @@ -108,9 +107,8 @@ class ConvolutionModule(nn.Layer): def forward(self, x: paddle.Tensor, - mask_pad: paddle.Tensor= paddle.ones([0,0,0], dtype=paddle.bool), - cache: paddle.Tensor= paddle.zeros([0,0,0]), - ) -> Tuple[paddle.Tensor, paddle.Tensor]: + mask_pad: paddle.Tensor, + cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute convolution module. Args: x (paddle.Tensor): Input tensor (#batch, time, channels). @@ -127,11 +125,11 @@ class ConvolutionModule(nn.Layer): x = x.transpose([0, 2, 1]) # [B, C, T] # mask batch padding - if paddle.shape(mask_pad)[2] > 0: # time > 0 + if paddle.shape(mask_pad)[2] > 0: # time > 0 x = x.masked_fill(mask_pad, 0.0) if self.lorder > 0: - if paddle.shape(cache)[2] == 0: # cache_t == 0 + if paddle.shape(cache)[2] == 0: # cache_t == 0 x = nn.functional.pad( x, [self.lorder, 0], 'constant', 0.0, data_format='NCL') else: @@ -161,7 +159,7 @@ class ConvolutionModule(nn.Layer): x = self.pointwise_conv2(x) # mask batch padding - if paddle.shape(mask_pad)[2] > 0: # time > 0 + if paddle.shape(mask_pad)[2] > 0: # time > 0 x = x.masked_fill(mask_pad, 0.0) x = x.transpose([0, 2, 1]) # [B, T, C] diff --git a/paddlespeech/s2t/modules/decoder_layer.py b/paddlespeech/s2t/modules/decoder_layer.py index 37b124e84..c8843b723 100644 --- a/paddlespeech/s2t/modules/decoder_layer.py +++ b/paddlespeech/s2t/modules/decoder_layer.py @@ -121,11 +121,16 @@ class DecoderLayer(nn.Layer): if self.concat_after: tgt_concat = paddle.cat( - (tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0]), dim=-1) + (tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask, + paddle.empty([0]), + paddle.zeros([0, 0, 0, 0]))[0]), + dim=-1) x = residual + self.concat_linear1(tgt_concat) else: x = residual + self.dropout( - self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0]) + self.self_attn(tgt_q, tgt, tgt, tgt_q_mask, + paddle.empty([0]), paddle.zeros([0, 0, 0, 0]))[ + 0]) if not self.normalize_before: x = self.norm1(x) @@ -134,11 +139,15 @@ class DecoderLayer(nn.Layer): x = self.norm2(x) if self.concat_after: x_concat = paddle.cat( - (x, self.src_attn(x, memory, memory, memory_mask)[0]), dim=-1) + (x, self.src_attn(x, memory, memory, memory_mask, + paddle.empty([0]), + paddle.zeros([0, 0, 0, 0]))[0]), + dim=-1) x = residual + self.concat_linear2(x_concat) else: x = residual + self.dropout( - self.src_attn(x, memory, memory, memory_mask)[0]) + self.src_attn(x, memory, memory, memory_mask, + paddle.empty([0]), paddle.zeros([0, 0, 0, 0]))[0]) if not self.normalize_before: x = self.norm2(x) diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index bff2d69bb..6001afd4b 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -14,8 +14,6 @@ # limitations under the License. # Modified from wenet(https://github.com/wenet-e2e/wenet) """Encoder definition.""" -from typing import List -from typing import Optional from typing import Tuple import paddle @@ -177,7 +175,9 @@ class BaseEncoder(nn.Layer): decoding_chunk_size, self.static_chunk_size, num_decoding_left_chunks) for layer in self.encoders: - xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad, + paddle.zeros([0, 0, 0, 0]), + paddle.zeros([0, 0, 0, 0])) if self.normalize_before: xs = self.after_norm(xs) # Here we assume the mask is not changed in encoder layers, so just @@ -190,9 +190,9 @@ class BaseEncoder(nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor = paddle.zeros([0,0,0,0]), - cnn_cache: paddle.Tensor = paddle.zeros([0,0,0,0]), - att_mask: paddle.Tensor = paddle.ones([0,0,0], dtype=paddle.bool), + att_cache: paddle.Tensor, + cnn_cache: paddle.Tensor, + att_mask: paddle.Tensor, ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Forward just one chunk Args: @@ -227,7 +227,7 @@ class BaseEncoder(nn.Layer): xs = self.global_cmvn(xs) # before embed, xs=(B, T, D1), pos_emb=(B=1, T, D) - xs, pos_emb, _ = self.embed(xs, tmp_masks, offset=offset) + xs, pos_emb, _ = self.embed(xs, tmp_masks, offset=offset) # after embed, xs=(B=1, chunk_size, hidden-dim) elayers = paddle.shape(att_cache)[0] @@ -252,14 +252,16 @@ class BaseEncoder(nn.Layer): # att_cache[i:i+1] = (1, head, cache_t1, d_k*2) # cnn_cache[i:i+1] = (1, B=1, hidden-dim, cache_t2) xs, _, new_att_cache, new_cnn_cache = layer( - xs, att_mask, pos_emb, - att_cache=att_cache[i:i+1] if elayers > 0 else att_cache, - cnn_cache=cnn_cache[i:i+1] if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, - ) + xs, + att_mask, + pos_emb, + att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache, + cnn_cache=cnn_cache[i:i + 1] + if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, ) # new_att_cache = (1, head, attention_key_size, d_k*2) # new_cnn_cache = (B=1, hidden-dim, cache_t2) - r_att_cache.append(new_att_cache[:,:, next_cache_start:, :]) - r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) # add elayer dim + r_att_cache.append(new_att_cache[:, :, next_cache_start:, :]) + r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) # add elayer dim if self.normalize_before: xs = self.after_norm(xs) @@ -270,7 +272,6 @@ class BaseEncoder(nn.Layer): r_cnn_cache = paddle.concat(r_cnn_cache, axis=0) return xs, r_att_cache, r_cnn_cache - def forward_chunk_by_chunk( self, xs: paddle.Tensor, @@ -315,8 +316,8 @@ class BaseEncoder(nn.Layer): num_frames = xs.shape[1] required_cache_size = decoding_chunk_size * num_decoding_left_chunks - att_cache: paddle.Tensor = paddle.zeros([0,0,0,0]) - cnn_cache: paddle.Tensor = paddle.zeros([0,0,0,0]) + att_cache: paddle.Tensor = paddle.zeros([0, 0, 0, 0]) + cnn_cache: paddle.Tensor = paddle.zeros([0, 0, 0, 0]) outputs = [] offset = 0 @@ -326,7 +327,8 @@ class BaseEncoder(nn.Layer): chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( - chunk_xs, offset, required_cache_size, att_cache, cnn_cache) + chunk_xs, offset, required_cache_size, att_cache, cnn_cache, + paddle.ones([0, 0, 0], dtype=paddle.bool)) outputs.append(y) offset += y.shape[1] diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index 5f810dfde..8fd991ec6 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -76,9 +76,9 @@ class TransformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), - att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), - cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + mask_pad: paddle.Tensor, + att_cache: paddle.Tensor, + cnn_cache: paddle.Tensor, ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: @@ -105,7 +105,8 @@ class TransformerEncoderLayer(nn.Layer): if self.normalize_before: x = self.norm1(x) - x_att, new_att_cache = self.self_attn(x, x, x, mask, cache=att_cache) + x_att, new_att_cache = self.self_attn( + x, x, x, mask, paddle.empty([0]), cache=att_cache) if self.concat_after: x_concat = paddle.concat((x, x_att), axis=-1) @@ -193,9 +194,9 @@ class ConformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), - att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), - cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + mask_pad: paddle.Tensor, + att_cache: paddle.Tensor, + cnn_cache: paddle.Tensor, ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: From 4ceea2c78da2e3a7fb47184ed4c47daf85a0462c Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 30 Aug 2022 20:29:08 +0800 Subject: [PATCH 046/101] Update README.md --- demos/speaker_verification/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/demos/speaker_verification/README.md b/demos/speaker_verification/README.md index 900b5ae40..55f9a7360 100644 --- a/demos/speaker_verification/README.md +++ b/demos/speaker_verification/README.md @@ -19,6 +19,7 @@ The input of this cli demo should be a WAV file(`.wav`), and the sample rate mus Here are sample files for this demo that can be downloaded: ```bash wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav +wget -c https://paddlespeech.bj.bcebos.com/vector/audio/123456789.wav ``` ### 3. Usage From e147b96cf08df04f079105377d2348933dec5f0b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 30 Aug 2022 20:30:15 +0800 Subject: [PATCH 047/101] Update README_cn.md --- demos/speaker_verification/README_cn.md | 1 + 1 file changed, 1 insertion(+) diff --git a/demos/speaker_verification/README_cn.md b/demos/speaker_verification/README_cn.md index f6afa86ac..85224699c 100644 --- a/demos/speaker_verification/README_cn.md +++ b/demos/speaker_verification/README_cn.md @@ -19,6 +19,7 @@ ```bash # 该音频的内容是数字串 85236145389 wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav +wget -c https://paddlespeech.bj.bcebos.com/vector/audio/123456789.wav ``` ### 3. 使用方法 - 命令行 (推荐使用) From ed80b0e2c3a01382effb8f0f85a4a135679ca980 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Tue, 30 Aug 2022 12:41:59 +0000 Subject: [PATCH 048/101] fix multigpu training test=asr --- paddlespeech/s2t/models/u2/u2.py | 4 +-- paddlespeech/s2t/modules/attention.py | 35 +++++++++++-------- .../s2t/modules/conformer_convolution.py | 10 +++--- paddlespeech/s2t/modules/encoder.py | 6 ++-- paddlespeech/s2t/modules/encoder_layer.py | 14 ++++---- 5 files changed, 39 insertions(+), 30 deletions(-) diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index a812abcbd..813e1e529 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -605,8 +605,8 @@ class U2BaseModel(ASRInterface, nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor, - cnn_cache: paddle.Tensor, + att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Export interface for c++ call, give input chunk xs, and return output from time 0 to current chunk. diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py index cbcaccc26..92990048d 100644 --- a/paddlespeech/s2t/modules/attention.py +++ b/paddlespeech/s2t/modules/attention.py @@ -86,7 +86,8 @@ class MultiHeadedAttention(nn.Layer): self, value: paddle.Tensor, scores: paddle.Tensor, - mask: paddle.Tensor, ) -> paddle.Tensor: + mask: paddle.Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) + ) -> paddle.Tensor: """Compute attention context vector. Args: value (paddle.Tensor): Transformed value, size @@ -126,13 +127,15 @@ class MultiHeadedAttention(nn.Layer): return self.linear_out(x) # (batch, time1, d_model) - def forward(self, - query: paddle.Tensor, - key: paddle.Tensor, - value: paddle.Tensor, - mask: paddle.Tensor, - pos_emb: paddle.Tensor, - cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: + def forward( + self, + query: paddle.Tensor, + key: paddle.Tensor, + value: paddle.Tensor, + mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) + pos_emb: paddle.Tensor, # paddle.empty([0]) + cache: paddle.Tensor # paddle.zeros([0,0,0,0]) + ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute scaled dot product attention. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). @@ -241,13 +244,15 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): return x - def forward(self, - query: paddle.Tensor, - key: paddle.Tensor, - value: paddle.Tensor, - mask: paddle.Tensor, - pos_emb: paddle.Tensor, - cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: + def forward( + self, + query: paddle.Tensor, + key: paddle.Tensor, + value: paddle.Tensor, + mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) + pos_emb: paddle.Tensor, # paddle.empty([0]) + cache: paddle.Tensor # paddle.zeros([0,0,0,0]) + ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). diff --git a/paddlespeech/s2t/modules/conformer_convolution.py b/paddlespeech/s2t/modules/conformer_convolution.py index 23aecd7f1..b35fea5b9 100644 --- a/paddlespeech/s2t/modules/conformer_convolution.py +++ b/paddlespeech/s2t/modules/conformer_convolution.py @@ -105,10 +105,12 @@ class ConvolutionModule(nn.Layer): ) self.activation = activation - def forward(self, - x: paddle.Tensor, - mask_pad: paddle.Tensor, - cache: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]: + def forward( + self, + x: paddle.Tensor, + mask_pad: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) + cache: paddle.Tensor # paddle.zeros([0,0,0,0]) + ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute convolution module. Args: x (paddle.Tensor): Input tensor (#batch, time, channels). diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index 6001afd4b..abdaf5ea7 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -190,9 +190,9 @@ class BaseEncoder(nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor, - cnn_cache: paddle.Tensor, - att_mask: paddle.Tensor, + att_cache: paddle.Tensor, # paddle.zeros([0,0,0,0]) + cnn_cache: paddle.Tensor, # paddle.zeros([0,0,0,0]), + att_mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Forward just one chunk Args: diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index 8fd991ec6..3972ff90a 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -76,9 +76,10 @@ class TransformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle.Tensor, - att_cache: paddle.Tensor, - cnn_cache: paddle.Tensor, + mask_pad: paddle. + Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) + att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: @@ -194,9 +195,10 @@ class ConformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle.Tensor, - att_cache: paddle.Tensor, - cnn_cache: paddle.Tensor, + mask_pad: paddle. + Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) + att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: From 1dfca4ef736493a99e2ac35f4d985b20472aa197 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Wed, 31 Aug 2022 02:43:54 +0000 Subject: [PATCH 049/101] fix multigpu training --- .../server/engine/asr/online/python/asr_engine.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/paddlespeech/server/engine/asr/online/python/asr_engine.py b/paddlespeech/server/engine/asr/online/python/asr_engine.py index 4df38f09d..96d4823e2 100644 --- a/paddlespeech/server/engine/asr/online/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/python/asr_engine.py @@ -21,10 +21,10 @@ import paddle from numpy import float32 from yacs.config import CfgNode +from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger from paddlespeech.resource import CommonTaskResource -from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.modules.ctc import CTCDecoder from paddlespeech.s2t.utils.tensor_utils import add_sos_eos @@ -130,8 +130,8 @@ class PaddleASRConnectionHanddler: ## conformer # cache for conformer online - self.att_cache = paddle.zeros([0,0,0,0]) - self.cnn_cache = paddle.zeros([0,0,0,0]) + self.att_cache = paddle.zeros([0, 0, 0, 0]) + self.cnn_cache = paddle.zeros([0, 0, 0, 0]) self.encoder_out = None # conformer decoding state @@ -474,9 +474,10 @@ class PaddleASRConnectionHanddler: # cur chunk chunk_xs = self.cached_feat[:, cur:end, :] # forward chunk - (y, self.att_cache, self.cnn_cache) = self.model.encoder.forward_chunk( - chunk_xs, self.offset, required_cache_size, - self.att_cache, self.cnn_cache) + (y, self.att_cache, + self.cnn_cache) = self.model.encoder.forward_chunk( + chunk_xs, self.offset, required_cache_size, self.att_cache, + self.cnn_cache, paddle.ones([0, 0, 0], dtype=paddle.bool)) outputs.append(y) # update the global offset, in decoding frame unit From e0081b7e504fbe4a9cb82e88e2e3aa9595066b95 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 31 Aug 2022 11:12:45 +0800 Subject: [PATCH 050/101] [vec][spk] add speechbrain ecapa-tdnn result --- examples/voxceleb/sv0/RESULT.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/voxceleb/sv0/RESULT.md b/examples/voxceleb/sv0/RESULT.md index 56ee887c6..aa55584d2 100644 --- a/examples/voxceleb/sv0/RESULT.md +++ b/examples/voxceleb/sv0/RESULT.md @@ -5,3 +5,7 @@ | Model | Number of Params | Release | Config | dim | Test set | Cosine | Cosine + S-Norm | | --- | --- | --- | --- | --- | --- | --- | ---- | | ECAPA-TDNN | 85M | 0.2.1 | conf/ecapa_tdnn.yaml | 192 | test | 0.8188 | 0.7815| + +> [SpeechBrain result](https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxCeleb/SpeakerRec#speaker-verification-using-ecapa-tdnn-embeddings): +> EER = 0.90% (voxceleb1 + voxceleb2) without s-norm +> EER = 0.80% (voxceleb1 + voxceleb2) with s-norm. From b4bb785b17087555d588195cb8326b89390e8758 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 31 Aug 2022 11:19:09 +0800 Subject: [PATCH 051/101] Update README.md --- demos/audio_searching/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/demos/audio_searching/README.md b/demos/audio_searching/README.md index db38d14ed..0fc901432 100644 --- a/demos/audio_searching/README.md +++ b/demos/audio_searching/README.md @@ -226,6 +226,12 @@ recall and elapsed time statistics are shown in the following figure: The retrieval framework based on Milvus takes about 2.9 milliseconds to retrieve on the premise of 90% recall rate, and it takes about 500 milliseconds for feature extraction (testing audio takes about 5 seconds), that is, a single audio test takes about 503 milliseconds in total, which can meet most application scenarios. +* compute embeding takes 500 ms +* retrieval with cosine takes 2.9 ms +* total takes 503 ms + +> test audio is 5 sec + ### 6.Pretrained Models Here is a list of pretrained models released by PaddleSpeech : From ed2819d7afe1784eb0baa3e11111bc51b1a04dde Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Wed, 31 Aug 2022 06:20:24 +0000 Subject: [PATCH 052/101] fix format test=asr --- paddlespeech/s2t/modules/encoder_layer.py | 3 +-- .../server/engine/asr/online/python/asr_engine.py | 8 ++++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index 3972ff90a..4555b535f 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -195,8 +195,7 @@ class ConformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle. - Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) + mask_pad: paddle.Tensor, #paddle.ones([0, 0, 0],dtype=paddle.bool) att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: diff --git a/paddlespeech/server/engine/asr/online/python/asr_engine.py b/paddlespeech/server/engine/asr/online/python/asr_engine.py index 96d4823e2..87d88ee60 100644 --- a/paddlespeech/server/engine/asr/online/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/python/asr_engine.py @@ -476,8 +476,12 @@ class PaddleASRConnectionHanddler: # forward chunk (y, self.att_cache, self.cnn_cache) = self.model.encoder.forward_chunk( - chunk_xs, self.offset, required_cache_size, self.att_cache, - self.cnn_cache, paddle.ones([0, 0, 0], dtype=paddle.bool)) + chunk_xs, + self.offset, + required_cache_size, + att_cache=self.att_cache, + cnn_cache=self.cnn_cache, + att_mask=paddle.ones([0, 0, 0], dtype=paddle.bool)) outputs.append(y) # update the global offset, in decoding frame unit From cdcb1a531659e46ccae84d02388da3f72057a3c3 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Wed, 31 Aug 2022 06:55:49 +0000 Subject: [PATCH 053/101] s2t: fix encoder.py --- paddlespeech/s2t/modules/encoder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index abdaf5ea7..cf4e32fa4 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -255,6 +255,7 @@ class BaseEncoder(nn.Layer): xs, att_mask, pos_emb, + mask_pad=paddle.ones([0, 0, 0], dtype=paddle.bool), att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache, cnn_cache=cnn_cache[i:i + 1] if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, ) From bdd7da98cc95108513e00a2b6f2e83e6b2954535 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:58:32 +0800 Subject: [PATCH 054/101] Update README.md --- examples/iwslt2012/punc0/README.md | 44 +++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index 6caa9710b..288b2c1fb 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -19,11 +19,53 @@ ``` ## Pretrained Model The pretrained model can be downloaded here [ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). +[ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) +[ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) +[ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) +[ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) +[ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) +[ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) ### Test Result -- Ernie +- Ernie 1.0 | |COMMA | PERIOD | QUESTION | OVERALL| |:-----:|:-----:|:-----:|:-----:|:-----:| |Precision |0.510955 |0.526462 |0.820755 |0.619391| |Recall |0.517433 |0.564179 |0.861386 |0.647666| |F1 |0.514173 |0.544669 |0.840580 |0.633141| +- Ernie-tiny + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.733177 |0.721448 |0.754717 |0.736447| + |Recall |0.380740 |0.524646 |0.733945 |0.546443| + |F1 |0.501204 |0.607506 |0.744186 |0.617632| +- Ernie-3.0-base-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.805947 |0.764160 |0.858491 |0.809532| + |Recall |0.399070 |0.567978 |0.850467 |0.605838| + |F1 |0.533817 |0.651623 |0.854460 |0.679967| +- Ernie-3.0-medium-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.730829 |0.699164 |0.707547 |0.712514| + |Recall |0.388196 |0.533286 |0.797872 |0.573118| + |F1 |0.507058 |0.605062 |0.750000 |0.620707| +- Ernie-3.0-mini-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.757433 |0.708449 |0.707547 |0.724477| + |Recall |0.355752 |0.506977 |0.735294 |0.532674| + |F1 |0.484121 |0.591015 |0.721154 |0.598763| +- Ernie-3.0-micro-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.733959 |0.679666 |0.726415 |0.713347| + |Recall |0.332742 |0.483487 |0.712963 |0.509731| + |F1 |0.457896 |0.565033 |0.719626 |0.580852| +- Ernie-3.0-nano-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.693271 |0.682451 |0.754717 |0.710146| + |Recall |0.327784 |0.491968 |0.666667 |0.495473| + |F1 |0.445114 |0.571762 |0.707965 |0.574947| From adfc035dc496134bd1b5df3c0bb1b0d95e8b2394 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:59:16 +0800 Subject: [PATCH 055/101] Update README.md --- examples/iwslt2012/punc0/README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index 288b2c1fb..21f736ee3 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -18,12 +18,19 @@ ./run.sh --stage 3 --stop-stage 3 ``` ## Pretrained Model -The pretrained model can be downloaded here [ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). +The pretrained model can be downloaded here: +[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). + [ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) + [ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) + [ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) + [ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) + [ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) + [ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) ### Test Result From c9bf219e0bd75edb02806c4d8c58bb41c5622b21 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 14:59:43 +0800 Subject: [PATCH 056/101] Update README.md --- examples/iwslt2012/punc0/README.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index 21f736ee3..6ff86e05f 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -19,19 +19,20 @@ ``` ## Pretrained Model The pretrained model can be downloaded here: -[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). -[ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) + [ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). -[ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) + [ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) -[ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) + [ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) -[ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) + [ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) -[ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) + [ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) -[ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) + [ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) + + [ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) ### Test Result - Ernie 1.0 From b8713a6faa2bf3fcbfb8b3f6710122985631399b Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 15:00:13 +0800 Subject: [PATCH 057/101] Update README.md --- examples/iwslt2012/punc0/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index 6ff86e05f..b37a25caa 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -20,19 +20,19 @@ ## Pretrained Model The pretrained model can be downloaded here: - [ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). +[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). - [ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) +[ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) - [ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) +[ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) - [ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) +[ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) - [ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) +[ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) - [ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) +[ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) - [ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) +[ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) ### Test Result - Ernie 1.0 From d1f81e8dbb66b106c165cbf2743c93e4c3716125 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 15:00:37 +0800 Subject: [PATCH 058/101] Update README.md --- examples/iwslt2012/punc0/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index b37a25caa..9b9f3a914 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -20,7 +20,7 @@ ## Pretrained Model The pretrained model can be downloaded here: -[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). +[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip) [ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) From 3e7e247ad0a98697e9a3269583dc9d330a051d94 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 15:35:11 +0800 Subject: [PATCH 059/101] Update application.yaml --- demos/streaming_asr_server/conf/application.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/demos/streaming_asr_server/conf/application.yaml b/demos/streaming_asr_server/conf/application.yaml index 683d86f03..a89d312ab 100644 --- a/demos/streaming_asr_server/conf/application.yaml +++ b/demos/streaming_asr_server/conf/application.yaml @@ -28,6 +28,7 @@ asr_online: sample_rate: 16000 cfg_path: decode_method: + num_decoding_left_chunks: -1 force_yes: True device: 'cpu' # cpu or gpu:id decode_method: "attention_rescoring" From cb74803957d822229a13b4af214723d2a80a6252 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Wed, 31 Aug 2022 20:03:44 +0800 Subject: [PATCH 060/101] fix the bug "can't start ASR streaming_server" (#2337) * Update application.yaml --- .../conf/application.yaml | 1 + examples/iwslt2012/punc0/README.md | 54 ++++++++++++++++++- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/demos/streaming_asr_server/conf/application.yaml b/demos/streaming_asr_server/conf/application.yaml index 683d86f03..a89d312ab 100644 --- a/demos/streaming_asr_server/conf/application.yaml +++ b/demos/streaming_asr_server/conf/application.yaml @@ -28,6 +28,7 @@ asr_online: sample_rate: 16000 cfg_path: decode_method: + num_decoding_left_chunks: -1 force_yes: True device: 'cpu' # cpu or gpu:id decode_method: "attention_rescoring" diff --git a/examples/iwslt2012/punc0/README.md b/examples/iwslt2012/punc0/README.md index 6caa9710b..9b9f3a914 100644 --- a/examples/iwslt2012/punc0/README.md +++ b/examples/iwslt2012/punc0/README.md @@ -18,12 +18,62 @@ ./run.sh --stage 3 --stop-stage 3 ``` ## Pretrained Model -The pretrained model can be downloaded here [ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip). +The pretrained model can be downloaded here: + +[ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_iwslt2012_zh_ckpt_0.1.1.zip) + +[ernie-3.0-base.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-base.tar.gz) + +[ernie-3.0-medium.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-medium.tar.gz) + +[ernie-3.0-micro.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-3.0-micro.tar.gz) + +[ernie-mini.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-mini.tar.gz) + +[ernie-nano.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-nano.tar.gz) + +[ernie-tiny.tar.gz](https://paddlespeech.bj.bcebos.com/punc_restore/ernie-tiny.tar.gz) ### Test Result -- Ernie +- Ernie 1.0 | |COMMA | PERIOD | QUESTION | OVERALL| |:-----:|:-----:|:-----:|:-----:|:-----:| |Precision |0.510955 |0.526462 |0.820755 |0.619391| |Recall |0.517433 |0.564179 |0.861386 |0.647666| |F1 |0.514173 |0.544669 |0.840580 |0.633141| +- Ernie-tiny + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.733177 |0.721448 |0.754717 |0.736447| + |Recall |0.380740 |0.524646 |0.733945 |0.546443| + |F1 |0.501204 |0.607506 |0.744186 |0.617632| +- Ernie-3.0-base-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.805947 |0.764160 |0.858491 |0.809532| + |Recall |0.399070 |0.567978 |0.850467 |0.605838| + |F1 |0.533817 |0.651623 |0.854460 |0.679967| +- Ernie-3.0-medium-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.730829 |0.699164 |0.707547 |0.712514| + |Recall |0.388196 |0.533286 |0.797872 |0.573118| + |F1 |0.507058 |0.605062 |0.750000 |0.620707| +- Ernie-3.0-mini-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.757433 |0.708449 |0.707547 |0.724477| + |Recall |0.355752 |0.506977 |0.735294 |0.532674| + |F1 |0.484121 |0.591015 |0.721154 |0.598763| +- Ernie-3.0-micro-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.733959 |0.679666 |0.726415 |0.713347| + |Recall |0.332742 |0.483487 |0.712963 |0.509731| + |F1 |0.457896 |0.565033 |0.719626 |0.580852| +- Ernie-3.0-nano-zh + | |COMMA | PERIOD | QUESTION | OVERALL| + |:-----:|:-----:|:-----:|:-----:|:-----:| + |Precision |0.693271 |0.682451 |0.754717 |0.710146| + |Recall |0.327784 |0.491968 |0.666667 |0.495473| + |F1 |0.445114 |0.571762 |0.707965 |0.574947| From 5d5888af8ec0e327f7cad41ddf5887d0eaf678dc Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 31 Aug 2022 20:23:17 +0800 Subject: [PATCH 061/101] fix tone, update readme (#2335) --- examples/other/g2p/README.md | 4 ++-- paddlespeech/t2s/frontend/tone_sandhi.py | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index 882943504..85c9535d1 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -12,13 +12,13 @@ Run the command below to get the results of the test. ./run.sh ``` -The `avg WER` of g2p is: 0.024169315564825305 +The `avg WER` of g2p is: 0.024075726733983775 ```text ,--------------------------------------------------------------------. | ./exp/g2p/text.g2p | |--------------------------------------------------------------------| | SPKR | # Snt # Wrd | Corr Sub Del Ins Err S.Err | - | Sum/Avg| 9996 299181 | 97.6 2.4 0.0 0.0 2.4 49.2 | + | Sum/Avg| 9996 299181 | 97.6 2.4 0.0 0.0 2.4 49.0 | `--------------------------------------------------------------------' ``` diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py index ee3aa84ad..9fff4272c 100644 --- a/paddlespeech/t2s/frontend/tone_sandhi.py +++ b/paddlespeech/t2s/frontend/tone_sandhi.py @@ -84,9 +84,7 @@ class ToneSandhi(): if j - 1 >= 0 and item == word[j - 1] and pos[0] in {"n", "v", "a"}: finals[j] = finals[j][:-1] + "5" ge_idx = word.find("个") - if (len(word) > 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒滴哩哟喽啰耶喔诶") or ( - len(word) > 1 and word[-2] in '好是帅酷棒衰烂臭狗糗' and - word[-1] == '额') or (len(word) == 1 and word[-1] in "额嗯"): + if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒滴哩哟喽啰耶喔诶": finals[-1] = finals[-1][:-1] + "5" elif len(word) >= 1 and word[-1] in "的地得": finals[-1] = finals[-1][:-1] + "5" From 795eb7bd1012f0c6110357726d18c0fab1582708 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 1 Sep 2022 10:49:06 +0800 Subject: [PATCH 062/101] format paddlespeech with pre-commit (#2331) --- demos/audio_searching/src/operations/load.py | 5 +- demos/speech_web/API.md | 2 +- demos/speech_web/speech_server/main.py | 160 +++++------ .../speech_web/speech_server/requirements.txt | 11 +- .../speech_server/src/AudioManeger.py | 87 +++--- .../speech_server/src/SpeechBase/asr.py | 18 +- .../speech_server/src/SpeechBase/nlp.py | 18 +- .../src/SpeechBase/sql_helper.py | 56 ++-- .../speech_server/src/SpeechBase/tts.py | 92 +++---- .../speech_server/src/SpeechBase/vpr.py | 54 ++-- .../src/SpeechBase/vpr_encode.py | 9 +- .../speech_server/src/WebsocketManeger.py | 3 +- demos/speech_web/speech_server/src/robot.py | 44 +-- demos/speech_web/speech_server/src/util.py | 17 +- .../local/rtf_from_log.py | 2 +- docs/requirements.txt | 35 ++- docs/source/conf.py | 5 +- examples/iwslt2012/punc0/local/preprocess.py | 46 ++-- examples/other/tts_finetune/tts3/finetune.py | 9 +- paddlespeech/__init__.py | 2 - paddlespeech/audio/__init__.py | 6 +- paddlespeech/audio/streamdata/__init__.py | 125 +++++---- paddlespeech/audio/streamdata/autodecode.py | 19 +- paddlespeech/audio/streamdata/cache.py | 63 ++--- paddlespeech/audio/streamdata/compat.py | 68 +++-- .../audio/streamdata/extradatasets.py | 13 +- paddlespeech/audio/streamdata/filters.py | 256 +++++++++++------- paddlespeech/audio/streamdata/gopen.py | 62 ++--- paddlespeech/audio/streamdata/handlers.py | 5 +- paddlespeech/audio/streamdata/mix.py | 9 +- paddlespeech/audio/streamdata/paddle_utils.py | 14 +- paddlespeech/audio/streamdata/pipeline.py | 14 +- paddlespeech/audio/streamdata/shardlists.py | 77 +++--- paddlespeech/audio/streamdata/tariterators.py | 81 +++--- paddlespeech/audio/streamdata/utils.py | 32 ++- paddlespeech/audio/streamdata/writer.py | 77 +++--- paddlespeech/audio/text/text_featurizer.py | 2 +- paddlespeech/audio/transform/perturb.py | 11 +- paddlespeech/audio/transform/spec_augment.py | 1 + paddlespeech/cli/executor.py | 2 +- paddlespeech/s2t/__init__.py | 1 + paddlespeech/s2t/exps/u2/model.py | 23 +- paddlespeech/s2t/exps/u2_kaldi/model.py | 26 +- paddlespeech/s2t/exps/u2_st/model.py | 19 +- paddlespeech/s2t/io/dataloader.py | 145 +++++----- paddlespeech/s2t/models/u2_st/u2_st.py | 13 +- paddlespeech/s2t/modules/align.py | 39 ++- paddlespeech/s2t/modules/initializer.py | 2 +- .../server/engine/asr/online/ctc_endpoint.py | 6 +- .../engine/asr/online/onnx/asr_engine.py | 2 +- .../asr/online/paddleinference/asr_engine.py | 2 +- .../server/engine/asr/python/asr_engine.py | 12 +- paddlespeech/t2s/datasets/sampler.py | 7 +- paddlespeech/t2s/exps/ernie_sat/train.py | 1 - paddlespeech/t2s/exps/ernie_sat/utils.py | 11 +- paddlespeech/t2s/exps/syn_utils.py | 8 +- paddlespeech/t2s/frontend/g2pw/__init__.py | 1 - paddlespeech/t2s/frontend/mix_frontend.py | 7 +- .../t2s/training/updaters/standard_updater.py | 3 +- setup.py | 9 +- .../ds2_ol/onnx/local/onnx_infer_shape.py | 43 ++- 61 files changed, 1052 insertions(+), 940 deletions(-) diff --git a/demos/audio_searching/src/operations/load.py b/demos/audio_searching/src/operations/load.py index 0d9edb784..d1ea00576 100644 --- a/demos/audio_searching/src/operations/load.py +++ b/demos/audio_searching/src/operations/load.py @@ -26,8 +26,9 @@ def get_audios(path): """ supported_formats = [".wav", ".mp3", ".ogg", ".flac", ".m4a"] return [ - item for sublist in [[os.path.join(dir, file) for file in files] - for dir, _, files in list(os.walk(path))] + item + for sublist in [[os.path.join(dir, file) for file in files] + for dir, _, files in list(os.walk(path))] for item in sublist if os.path.splitext(item)[1] in supported_formats ] diff --git a/demos/speech_web/API.md b/demos/speech_web/API.md index c51446749..f66ec138e 100644 --- a/demos/speech_web/API.md +++ b/demos/speech_web/API.md @@ -401,4 +401,4 @@ curl -X 'GET' \ "code": 0, "result":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "message": "ok" -``` \ No newline at end of file +``` diff --git a/demos/speech_web/speech_server/main.py b/demos/speech_web/speech_server/main.py index b10176670..d4750d598 100644 --- a/demos/speech_web/speech_server/main.py +++ b/demos/speech_web/speech_server/main.py @@ -3,48 +3,48 @@ # 2. 接收录音音频,返回识别结果 # 3. 接收ASR识别结果,返回NLP对话结果 # 4. 接收NLP对话结果,返回TTS音频 - +import argparse import base64 -import yaml -import os -import json import datetime +import json +import os +from typing import List + +import aiofiles import librosa import soundfile as sf -import numpy as np -import argparse import uvicorn -import aiofiles -from typing import Optional, List -from pydantic import BaseModel -from fastapi import FastAPI, Header, File, UploadFile, Form, Cookie, WebSocket, WebSocketDisconnect +from fastapi import FastAPI +from fastapi import File +from fastapi import Form +from fastapi import UploadFile +from fastapi import WebSocket +from fastapi import WebSocketDisconnect from fastapi.responses import StreamingResponse -from starlette.responses import FileResponse -from starlette.middleware.cors import CORSMiddleware -from starlette.requests import Request -from starlette.websockets import WebSocketState as WebSocketState - +from pydantic import BaseModel from src.AudioManeger import AudioMannger -from src.util import * from src.robot import Robot -from src.WebsocketManeger import ConnectionManager from src.SpeechBase.vpr import VPR +from src.util import * +from src.WebsocketManeger import ConnectionManager +from starlette.middleware.cors import CORSMiddleware +from starlette.requests import Request +from starlette.responses import FileResponse +from starlette.websockets import WebSocketState as WebSocketState from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler from paddlespeech.server.utils.audio_process import float2pcm - # 解析配置 -parser = argparse.ArgumentParser( - prog='PaddleSpeechDemo', add_help=True) +parser = argparse.ArgumentParser(prog='PaddleSpeechDemo', add_help=True) parser.add_argument( - "--port", - action="store", - type=int, - help="port of the app", - default=8010, - required=False) + "--port", + action="store", + type=int, + help="port of the app", + default=8010, + required=False) args = parser.parse_args() port = args.port @@ -60,39 +60,41 @@ ie_model_path = "source/model" UPLOAD_PATH = "source/vpr" WAV_PATH = "source/wav" - -base_sources = [ - UPLOAD_PATH, WAV_PATH -] +base_sources = [UPLOAD_PATH, WAV_PATH] for path in base_sources: os.makedirs(path, exist_ok=True) - # 初始化 app = FastAPI() -chatbot = Robot(asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path) +chatbot = Robot( + asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path) manager = ConnectionManager() aumanager = AudioMannger(chatbot) aumanager.init() -vpr = VPR(db_path, dim = 192, top_k = 5) +vpr = VPR(db_path, dim=192, top_k=5) + # 服务配置 class NlpBase(BaseModel): chat: str + class TtsBase(BaseModel): - text: str + text: str + class Audios: def __init__(self) -> None: self.audios = b"" + audios = Audios() ###################################################################### ########################### ASR 服务 ################################# ##################################################################### + # 接收文件,返回ASR结果 # 上传文件 @app.post("/asr/offline") @@ -101,7 +103,8 @@ async def speech2textOffline(files: List[UploadFile]): asr_res = "" for file in files[:1]: # 生成时间戳 - now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + now_name = "asr_offline_" + datetime.datetime.strftime( + datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" out_file_path = os.path.join(WAV_PATH, now_name) async with aiofiles.open(out_file_path, 'wb') as out_file: content = await file.read() # async read @@ -110,10 +113,9 @@ async def speech2textOffline(files: List[UploadFile]): # 返回ASR识别结果 asr_res = chatbot.speech2text(out_file_path) return SuccessRequest(result=asr_res) - # else: - # return ErrorRequest(message="文件不是.wav格式") return ErrorRequest(message="上传文件为空") + # 接收文件,同时将wav强制转成16k, int16类型 @app.post("/asr/offlinefile") async def speech2textOfflineFile(files: List[UploadFile]): @@ -121,7 +123,8 @@ async def speech2textOfflineFile(files: List[UploadFile]): asr_res = "" for file in files[:1]: # 生成时间戳 - now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + now_name = "asr_offline_" + datetime.datetime.strftime( + datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" out_file_path = os.path.join(WAV_PATH, now_name) async with aiofiles.open(out_file_path, 'wb') as out_file: content = await file.read() # async read @@ -132,22 +135,18 @@ async def speech2textOfflineFile(files: List[UploadFile]): wav = float2pcm(wav) # float32 to int16 wav_bytes = wav.tobytes() # to bytes wav_base64 = base64.b64encode(wav_bytes).decode('utf8') - + # 将文件重新写入 now_name = now_name[:-4] + "_16k" + ".wav" out_file_path = os.path.join(WAV_PATH, now_name) - sf.write(out_file_path,wav,16000) + sf.write(out_file_path, wav, 16000) # 返回ASR识别结果 asr_res = chatbot.speech2text(out_file_path) - response_res = { - "asr_result": asr_res, - "wav_base64": wav_base64 - } + response_res = {"asr_result": asr_res, "wav_base64": wav_base64} return SuccessRequest(result=response_res) - - return ErrorRequest(message="上传文件为空") + return ErrorRequest(message="上传文件为空") # 流式接收测试 @@ -161,15 +160,17 @@ async def speech2textOnlineRecive(files: List[UploadFile]): print(f"audios长度变化: {len(audios.audios)}") return SuccessRequest(message="接收成功") + # 采集环境噪音大小 @app.post("/asr/collectEnv") async def collectEnv(files: List[UploadFile]): - for file in files[:1]: + for file in files[:1]: content = await file.read() # async read # 初始化, wav 前44字节是头部信息 aumanager.compute_env_volume(content[44:]) vad_ = aumanager.vad_threshold - return SuccessRequest(result=vad_,message="采集环境噪音成功") + return SuccessRequest(result=vad_, message="采集环境噪音成功") + # 停止录音 @app.get("/asr/stopRecord") @@ -179,6 +180,7 @@ async def stopRecord(): print("Online录音暂停") return SuccessRequest(message="停止成功") + # 恢复录音 @app.get("/asr/resumeRecord") async def resumeRecord(): @@ -187,7 +189,7 @@ async def resumeRecord(): return SuccessRequest(message="Online录音恢复") -# 聊天用的ASR +# 聊天用的 ASR @app.websocket("/ws/asr/offlineStream") async def websocket_endpoint(websocket: WebSocket): await manager.connect(websocket) @@ -210,9 +212,9 @@ async def websocket_endpoint(websocket: WebSocket): # print(f"用户-{user}-离开") -# Online识别的ASR + # 流式识别的 ASR @app.websocket('/ws/asr/onlineStream') -async def websocket_endpoint(websocket: WebSocket): +async def websocket_endpoint_online(websocket: WebSocket): """PaddleSpeech Online ASR Server api Args: @@ -298,12 +300,14 @@ async def websocket_endpoint(websocket: WebSocket): except WebSocketDisconnect: pass + ###################################################################### ########################### NLP 服务 ################################# ##################################################################### + @app.post("/nlp/chat") -async def chatOffline(nlp_base:NlpBase): +async def chatOffline(nlp_base: NlpBase): chat = nlp_base.chat if not chat: return ErrorRequest(message="传入文本为空") @@ -311,8 +315,9 @@ async def chatOffline(nlp_base:NlpBase): res = chatbot.chat(chat) return SuccessRequest(result=res) + @app.post("/nlp/ie") -async def ieOffline(nlp_base:NlpBase): +async def ieOffline(nlp_base: NlpBase): nlp_text = nlp_base.chat if not nlp_text: return ErrorRequest(message="传入文本为空") @@ -320,17 +325,20 @@ async def ieOffline(nlp_base:NlpBase): res = chatbot.ie(nlp_text) return SuccessRequest(result=res) + ###################################################################### ########################### TTS 服务 ################################# ##################################################################### + @app.post("/tts/offline") -async def text2speechOffline(tts_base:TtsBase): +async def text2speechOffline(tts_base: TtsBase): text = tts_base.text if not text: return ErrorRequest(message="文本为空") else: - now_name = "tts_"+ datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + now_name = "tts_" + datetime.datetime.strftime( + datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" out_file_path = os.path.join(WAV_PATH, now_name) # 保存为文件,再转成base64传输 chatbot.text2speech(text, outpath=out_file_path) @@ -339,12 +347,14 @@ async def text2speechOffline(tts_base:TtsBase): base_str = base64.b64encode(data_bin) return SuccessRequest(result=base_str) + # http流式TTS @app.post("/tts/online") async def stream_tts(request_body: TtsBase): text = request_body.text return StreamingResponse(chatbot.text2speechStreamBytes(text=text)) + # ws流式TTS @app.websocket("/ws/tts/online") async def stream_ttsWS(websocket: WebSocket): @@ -356,17 +366,11 @@ async def stream_ttsWS(websocket: WebSocket): if text: for sub_wav in chatbot.text2speechStream(text=text): # print("发送sub wav: ", len(sub_wav)) - res = { - "wav": sub_wav, - "done": False - } + res = {"wav": sub_wav, "done": False} await websocket.send_json(res) - + # 输送结束 - res = { - "wav": sub_wav, - "done": True - } + res = {"wav": sub_wav, "done": True} await websocket.send_json(res) # manager.disconnect(websocket) @@ -396,8 +400,9 @@ async def vpr_enroll(table_name: str=None, return {'status': False, 'msg': "spk_id can not be None"} # Save the upload data to server. content = await audio.read() - now_name = "vpr_enroll_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" - audio_path = os.path.join(UPLOAD_PATH, now_name) + now_name = "vpr_enroll_" + datetime.datetime.strftime( + datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + audio_path = os.path.join(UPLOAD_PATH, now_name) with open(audio_path, "wb+") as f: f.write(content) @@ -413,20 +418,19 @@ async def vpr_recog(request: Request, audio: UploadFile=File(...)): # Voice print recognition online # try: - # Save the upload data to server. + # Save the upload data to server. content = await audio.read() - now_name = "vpr_query_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" - query_audio_path = os.path.join(UPLOAD_PATH, now_name) + now_name = "vpr_query_" + datetime.datetime.strftime( + datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav" + query_audio_path = os.path.join(UPLOAD_PATH, now_name) with open(query_audio_path, "wb+") as f: - f.write(content) + f.write(content) spk_ids, paths, scores = vpr.do_search_vpr(query_audio_path) res = dict(zip(spk_ids, zip(paths, scores))) # Sort results by distance metric, closest distances first res = sorted(res.items(), key=lambda item: item[1][1], reverse=True) return res - # except Exception as e: - # return {'status': False, 'msg': e}, 400 @app.post('/vpr/del') @@ -460,17 +464,18 @@ async def vpr_database64(vprId: int): return {'status': False, 'msg': "vpr_id can not be None"} audio_path = vpr.do_get_wav(vprId) # 返回base64 - + # 将文件转成16k, 16bit类型的wav文件 wav, sr = librosa.load(audio_path, sr=16000) wav = float2pcm(wav) # float32 to int16 wav_bytes = wav.tobytes() # to bytes wav_base64 = base64.b64encode(wav_bytes).decode('utf8') - + return SuccessRequest(result=wav_base64) except Exception as e: return {'status': False, 'msg': e}, 400 + @app.get('/vpr/data') async def vpr_data(vprId: int): # Get the audio file from path by spk_id in MySQL @@ -482,11 +487,6 @@ async def vpr_data(vprId: int): except Exception as e: return {'status': False, 'msg': e}, 400 + if __name__ == '__main__': uvicorn.run(app=app, host='0.0.0.0', port=port) - - - - - - diff --git a/demos/speech_web/speech_server/requirements.txt b/demos/speech_web/speech_server/requirements.txt index 7e7bd1680..607f0d4d0 100644 --- a/demos/speech_web/speech_server/requirements.txt +++ b/demos/speech_web/speech_server/requirements.txt @@ -1,14 +1,13 @@ aiofiles +faiss-cpu fastapi librosa numpy +paddlenlp +paddlepaddle +paddlespeech pydantic -scikit_learn +python-multipartscikit_learn SoundFile starlette uvicorn -paddlepaddle -paddlespeech -paddlenlp -faiss-cpu -python-multipart \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/AudioManeger.py b/demos/speech_web/speech_server/src/AudioManeger.py index 0deb03699..8fe512cfd 100644 --- a/demos/speech_web/speech_server/src/AudioManeger.py +++ b/demos/speech_web/speech_server/src/AudioManeger.py @@ -1,15 +1,19 @@ -import imp -from queue import Queue -import numpy as np +import datetime import os import wave -import random -import datetime + +import numpy as np + from .util import randName class AudioMannger: - def __init__(self, robot, frame_length=160, frame=10, data_width=2, vad_default = 300): + def __init__(self, + robot, + frame_length=160, + frame=10, + data_width=2, + vad_default=300): # 二进制 pcm 流 self.audios = b'' self.asr_result = "" @@ -20,8 +24,9 @@ class AudioMannger: os.makedirs(self.file_dir, exist_ok=True) self.vad_deafult = vad_default self.vad_threshold = vad_default - self.vad_threshold_path = os.path.join(self.file_dir, "vad_threshold.npy") - + self.vad_threshold_path = os.path.join(self.file_dir, + "vad_threshold.npy") + # 10ms 一帧 self.frame_length = frame_length # 10帧,检测一次 vad @@ -30,67 +35,64 @@ class AudioMannger: self.data_width = data_width # window self.window_length = frame_length * frame * data_width - + # 是否开始录音 self.on_asr = False - self.silence_cnt = 0 + self.silence_cnt = 0 self.max_silence_cnt = 4 self.is_pause = False # 录音暂停与恢复 - - - + def init(self): if os.path.exists(self.vad_threshold_path): # 平均响度文件存在 self.vad_threshold = np.load(self.vad_threshold_path) - - + def clear_audio(self): # 清空 pcm 累积片段与 asr 识别结果 self.audios = b'' - + def clear_asr(self): self.asr_result = "" - - + def compute_chunk_volume(self, start_index, pcm_bins): # 根据帧长计算能量平均值 - pcm_bin = pcm_bins[start_index: start_index + self.window_length] + pcm_bin = pcm_bins[start_index:start_index + self.window_length] # 转成 numpy pcm_np = np.frombuffer(pcm_bin, np.int16) # 归一化 + 计算响度 x = pcm_np.astype(np.float32) x = np.abs(x) - return np.mean(x) - - + return np.mean(x) + def is_speech(self, start_index, pcm_bins): # 检查是否没 if start_index > len(pcm_bins): return False # 检查从这个 start 开始是否为静音帧 - energy = self.compute_chunk_volume(start_index=start_index, pcm_bins=pcm_bins) + energy = self.compute_chunk_volume( + start_index=start_index, pcm_bins=pcm_bins) # print(energy) if energy > self.vad_threshold: return True else: return False - + def compute_env_volume(self, pcm_bins): max_energy = 0 start = 0 while start < len(pcm_bins): - energy = self.compute_chunk_volume(start_index=start, pcm_bins=pcm_bins) + energy = self.compute_chunk_volume( + start_index=start, pcm_bins=pcm_bins) if energy > max_energy: max_energy = energy start += self.window_length self.vad_threshold = max_energy + 100 if max_energy > self.vad_deafult else self.vad_deafult - + # 保存成文件 np.save(self.vad_threshold_path, self.vad_threshold) print(f"vad 阈值大小: {self.vad_threshold}") print(f"环境采样保存: {os.path.realpath(self.vad_threshold_path)}") - + def stream_asr(self, pcm_bin): # 先把 pcm_bin 送进去做端点检测 start = 0 @@ -99,7 +101,7 @@ class AudioMannger: self.on_asr = True self.silence_cnt = 0 print("录音中") - self.audios += pcm_bin[ start : start + self.window_length] + self.audios += pcm_bin[start:start + self.window_length] else: if self.on_asr: self.silence_cnt += 1 @@ -110,41 +112,42 @@ class AudioMannger: print("录音停止") # audios 保存为 wav, 送入 ASR if len(self.audios) > 2 * 16000: - file_path = os.path.join(self.file_dir, "asr_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav") + file_path = os.path.join( + self.file_dir, + "asr_" + datetime.datetime.strftime( + datetime.datetime.now(), + '%Y%m%d%H%M%S') + randName() + ".wav") self.save_audio(file_path=file_path) self.asr_result = self.robot.speech2text(file_path) self.clear_audio() - return self.asr_result + return self.asr_result else: # 正常接收 print("录音中 静音") - self.audios += pcm_bin[ start : start + self.window_length] + self.audios += pcm_bin[start:start + self.window_length] start += self.window_length return "" - + def save_audio(self, file_path): print("保存音频") - wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav" - wf.setnchannels(1) # 设置声道数为2 - wf.setsampwidth(2) # 设置采样深度为 - wf.setframerate(16000) # 设置采样率为16000 + wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav" + wf.setnchannels(1) # 设置声道数为2 + wf.setsampwidth(2) # 设置采样深度为 + wf.setframerate(16000) # 设置采样率为16000 # 将数据写入创建的音频文件 wf.writeframes(self.audios) # 写完后将文件关闭 wf.close() - + def end(self): # audios 保存为 wav, 送入 ASR file_path = os.path.join(self.file_dir, "asr.wav") self.save_audio(file_path=file_path) return self.robot.speech2text(file_path) - + def stop(self): self.is_pause = True self.audios = b'' - + def resume(self): self.is_pause = False - - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/asr.py b/demos/speech_web/speech_server/src/SpeechBase/asr.py index 8d4c0cffc..5213ea787 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/asr.py +++ b/demos/speech_web/speech_server/src/SpeechBase/asr.py @@ -1,13 +1,10 @@ -from re import sub import numpy as np -import paddle -import librosa -import soundfile from paddlespeech.server.engine.asr.online.python.asr_engine import ASREngine from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler from paddlespeech.server.utils.config import get_config + def readWave(samples): x_len = len(samples) @@ -31,20 +28,23 @@ def readWave(samples): class ASR: - def __init__(self, config_path, ) -> None: + def __init__( + self, + config_path, ) -> None: self.config = get_config(config_path)['asr_online'] self.engine = ASREngine() self.engine.init(self.config) self.connection_handler = PaddleASRConnectionHanddler(self.engine) - + def offlineASR(self, samples, sample_rate=16000): - x_chunk, x_chunk_lens = self.engine.preprocess(samples=samples, sample_rate=sample_rate) + x_chunk, x_chunk_lens = self.engine.preprocess( + samples=samples, sample_rate=sample_rate) self.engine.run(x_chunk, x_chunk_lens) result = self.engine.postprocess() self.engine.reset() return result - def onlineASR(self, samples:bytes=None, is_finished=False): + def onlineASR(self, samples: bytes=None, is_finished=False): if not is_finished: # 流式开始 self.connection_handler.extract_feat(samples) @@ -58,5 +58,3 @@ class ASR: asr_results = self.connection_handler.get_result() self.connection_handler.reset() return asr_results - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/nlp.py b/demos/speech_web/speech_server/src/SpeechBase/nlp.py index 4ece63256..b642a51d6 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/nlp.py +++ b/demos/speech_web/speech_server/src/SpeechBase/nlp.py @@ -1,23 +1,23 @@ from paddlenlp import Taskflow + class NLP: def __init__(self, ie_model_path=None): schema = ["时间", "出发地", "目的地", "费用"] if ie_model_path: - self.ie_model = Taskflow("information_extraction", - schema=schema, task_path=ie_model_path) + self.ie_model = Taskflow( + "information_extraction", + schema=schema, + task_path=ie_model_path) else: - self.ie_model = Taskflow("information_extraction", - schema=schema) - + self.ie_model = Taskflow("information_extraction", schema=schema) + self.dialogue_model = Taskflow("dialogue") - + def chat(self, text): result = self.dialogue_model([text]) return result[0] - + def ie(self, text): result = self.ie_model(text) return result - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py index 6937def58..bd8d58970 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py +++ b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py @@ -1,18 +1,19 @@ import base64 -import sqlite3 import os +import sqlite3 + import numpy as np -from pkg_resources import resource_stream -def dict_factory(cursor, row): - d = {} - for idx, col in enumerate(cursor.description): - d[col[0]] = row[idx] - return d +def dict_factory(cursor, row): + d = {} + for idx, col in enumerate(cursor.description): + d[col[0]] = row[idx] + return d + class DataBase(object): - def __init__(self, db_path:str): + def __init__(self, db_path: str): db_path = os.path.realpath(db_path) if os.path.exists(db_path): @@ -21,12 +22,12 @@ class DataBase(object): db_path_dir = os.path.dirname(db_path) os.makedirs(db_path_dir, exist_ok=True) self.db_path = db_path - + self.conn = sqlite3.connect(self.db_path) self.conn.row_factory = dict_factory self.cursor = self.conn.cursor() self.init_database() - + def init_database(self): """ 初始化数据库, 若表不存在则创建 @@ -41,20 +42,21 @@ class DataBase(object): """ self.cursor.execute(sql) self.conn.commit() - + def execute_base(self, sql, data_dict): self.cursor.execute(sql, data_dict) self.conn.commit() - - def insert_one(self, username, vector_base64:str, wav_path): + + def insert_one(self, username, vector_base64: str, wav_path): if not os.path.exists(wav_path): return None, "wav not exists" else: - sql = f""" + sql = """ insert into vprtable (username, vector, wavpath) values (?, ?, ?) """ + try: self.cursor.execute(sql, (username, vector_base64, wav_path)) self.conn.commit() @@ -63,25 +65,27 @@ class DataBase(object): except Exception as e: print(e) return None, e - + def select_all(self): sql = """ SELECT * from vprtable """ result = self.cursor.execute(sql).fetchall() return result - + def select_by_id(self, vpr_id): sql = f""" SELECT * from vprtable WHERE `id` = {vpr_id} """ + result = self.cursor.execute(sql).fetchall() return result - + def select_by_username(self, username): sql = f""" SELECT * from vprtable WHERE `username` = '{username}' """ + result = self.cursor.execute(sql).fetchall() return result @@ -89,28 +93,30 @@ class DataBase(object): sql = f""" DELETE from vprtable WHERE `username`='{username}' """ + self.cursor.execute(sql) self.conn.commit() - + def drop_all(self): - sql = f""" + sql = """ DELETE from vprtable """ + self.cursor.execute(sql) self.conn.commit() - + def drop_table(self): - sql = f""" + sql = """ DROP TABLE vprtable """ + self.cursor.execute(sql) self.conn.commit() - - def encode_vector(self, vector:np.ndarray): + + def encode_vector(self, vector: np.ndarray): return base64.b64encode(vector).decode('utf8') - + def decode_vector(self, vector_base64, dtype=np.float32): b = base64.b64decode(vector_base64) vc = np.frombuffer(b, dtype=dtype) return vc - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/tts.py b/demos/speech_web/speech_server/src/SpeechBase/tts.py index d5ba0c802..eb32bca0e 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/tts.py +++ b/demos/speech_web/speech_server/src/SpeechBase/tts.py @@ -5,18 +5,19 @@ # 2. 加载模型 # 3. 端到端推理 # 4. 流式推理 - import base64 -import math import logging +import math + import numpy as np -from paddlespeech.server.utils.onnx_infer import get_sess -from paddlespeech.t2s.frontend.zh_frontend import Frontend -from paddlespeech.server.utils.util import denorm, get_chunks + +from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine from paddlespeech.server.utils.audio_process import float2pcm from paddlespeech.server.utils.config import get_config +from paddlespeech.server.utils.util import denorm +from paddlespeech.server.utils.util import get_chunks +from paddlespeech.t2s.frontend.zh_frontend import Frontend -from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine class TTS: def __init__(self, config_path): @@ -26,12 +27,12 @@ class TTS: self.engine.init(self.config) self.executor = self.engine.executor #self.engine.warm_up() - + # 前端初始化 self.frontend = Frontend( - phone_vocab_path=self.engine.executor.phones_dict, - tone_vocab_path=None) - + phone_vocab_path=self.engine.executor.phones_dict, + tone_vocab_path=None) + def depadding(self, data, chunk_num, chunk_id, block, pad, upsample): """ Streaming inference removes the result of pad inference @@ -48,39 +49,37 @@ class TTS: data = data[front_pad * upsample:(front_pad + block) * upsample] return data - + def offlineTTS(self, text): get_tone_ids = False merge_sentences = False - + input_ids = self.frontend.get_input_ids( - text, - merge_sentences=merge_sentences, - get_tone_ids=get_tone_ids) + text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids) phone_ids = input_ids["phone_ids"] wav_list = [] for i in range(len(phone_ids)): orig_hs = self.engine.executor.am_encoder_infer_sess.run( - None, input_feed={'text': phone_ids[i].numpy()} - ) + None, input_feed={'text': phone_ids[i].numpy()}) hs = orig_hs[0] am_decoder_output = self.engine.executor.am_decoder_sess.run( - None, input_feed={'xs': hs}) + None, input_feed={'xs': hs}) am_postnet_output = self.engine.executor.am_postnet_sess.run( - None, - input_feed={ - 'xs': np.transpose(am_decoder_output[0], (0, 2, 1)) - }) + None, + input_feed={ + 'xs': np.transpose(am_decoder_output[0], (0, 2, 1)) + }) am_output_data = am_decoder_output + np.transpose( am_postnet_output[0], (0, 2, 1)) normalized_mel = am_output_data[0][0] - mel = denorm(normalized_mel, self.engine.executor.am_mu, self.engine.executor.am_std) + mel = denorm(normalized_mel, self.engine.executor.am_mu, + self.engine.executor.am_std) wav = self.engine.executor.voc_sess.run( - output_names=None, input_feed={'logmel': mel})[0] + output_names=None, input_feed={'logmel': mel})[0] wav_list.append(wav) wavs = np.concatenate(wav_list) return wavs - + def streamTTS(self, text): get_tone_ids = False @@ -88,9 +87,7 @@ class TTS: # front input_ids = self.frontend.get_input_ids( - text, - merge_sentences=merge_sentences, - get_tone_ids=get_tone_ids) + text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids) phone_ids = input_ids["phone_ids"] for i in range(len(phone_ids)): @@ -105,14 +102,15 @@ class TTS: mel = mel[0] # voc streaming - mel_chunks = get_chunks(mel, self.config.voc_block, self.config.voc_pad, "voc") + mel_chunks = get_chunks(mel, self.config.voc_block, + self.config.voc_pad, "voc") voc_chunk_num = len(mel_chunks) for i, mel_chunk in enumerate(mel_chunks): sub_wav = self.executor.voc_sess.run( output_names=None, input_feed={'logmel': mel_chunk}) - sub_wav = self.depadding(sub_wav[0], voc_chunk_num, i, - self.config.voc_block, self.config.voc_pad, - self.config.voc_upsample) + sub_wav = self.depadding( + sub_wav[0], voc_chunk_num, i, self.config.voc_block, + self.config.voc_pad, self.config.voc_upsample) yield self.after_process(sub_wav) @@ -130,7 +128,8 @@ class TTS: end = min(self.config.voc_block + self.config.voc_pad, mel_len) # streaming am - hss = get_chunks(orig_hs, self.config.am_block, self.config.am_pad, "am") + hss = get_chunks(orig_hs, self.config.am_block, + self.config.am_pad, "am") am_chunk_num = len(hss) for i, hs in enumerate(hss): am_decoder_output = self.executor.am_decoder_sess.run( @@ -147,7 +146,8 @@ class TTS: sub_mel = denorm(normalized_mel, self.executor.am_mu, self.executor.am_std) sub_mel = self.depadding(sub_mel, am_chunk_num, i, - self.config.am_block, self.config.am_pad, 1) + self.config.am_block, + self.config.am_pad, 1) if i == 0: mel_streaming = sub_mel @@ -165,23 +165,22 @@ class TTS: output_names=None, input_feed={'logmel': voc_chunk}) sub_wav = self.depadding( sub_wav[0], voc_chunk_num, voc_chunk_id, - self.config.voc_block, self.config.voc_pad, self.config.voc_upsample) + self.config.voc_block, self.config.voc_pad, + self.config.voc_upsample) yield self.after_process(sub_wav) voc_chunk_id += 1 - start = max( - 0, voc_chunk_id * self.config.voc_block - self.config.voc_pad) - end = min( - (voc_chunk_id + 1) * self.config.voc_block + self.config.voc_pad, - mel_len) + start = max(0, voc_chunk_id * self.config.voc_block - + self.config.voc_pad) + end = min((voc_chunk_id + 1) * self.config.voc_block + + self.config.voc_pad, mel_len) else: logging.error( "Only support fastspeech2_csmsc or fastspeech2_cnndecoder_csmsc on streaming tts." - ) + ) - def streamTTSBytes(self, text): for wav in self.engine.executor.infer( text=text, @@ -191,19 +190,14 @@ class TTS: wav = float2pcm(wav) # float32 to int16 wav_bytes = wav.tobytes() # to bytes yield wav_bytes - - + def after_process(self, wav): # for tvm wav = float2pcm(wav) # float32 to int16 wav_bytes = wav.tobytes() # to bytes wav_base64 = base64.b64encode(wav_bytes).decode('utf8') # to base64 return wav_base64 - + def streamTTS_TVM(self, text): # 用 TVM 优化 pass - - - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr.py b/demos/speech_web/speech_server/src/SpeechBase/vpr.py index 29ee986e3..cf3367991 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/vpr.py +++ b/demos/speech_web/speech_server/src/SpeechBase/vpr.py @@ -1,11 +1,13 @@ # vpr Demo 没有使用 mysql 与 muilvs, 仅用于docker演示 import logging + import faiss -from matplotlib import use import numpy as np + from .sql_helper import DataBase from .vpr_encode import get_audio_embedding + class VPR: def __init__(self, db_path, dim, top_k) -> None: # 初始化 @@ -14,15 +16,15 @@ class VPR: self.top_k = top_k self.dtype = np.float32 self.vpr_idx = 0 - + # db 初始化 self.db = DataBase(db_path) - + # faiss 初始化 index_ip = faiss.IndexFlatIP(dim) self.index_ip = faiss.IndexIDMap(index_ip) self.init() - + def init(self): # demo 初始化,把 mysql中的向量注册到 faiss 中 sql_dbs = self.db.select_all() @@ -34,12 +36,13 @@ class VPR: if len(vc.shape) == 1: vc = np.expand_dims(vc, axis=0) # 构建数据库 - self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64')) + self.index_ip.add_with_ids(vc, np.array( + (idx, )).astype('int64')) logging.info("faiss 构建完毕") - + def faiss_enroll(self, idx, vc): - self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64')) - + self.index_ip.add_with_ids(vc, np.array((idx, )).astype('int64')) + def vpr_enroll(self, username, wav_path): # 注册声纹 emb = get_audio_embedding(wav_path) @@ -53,21 +56,22 @@ class VPR: else: last_idx, mess = None return last_idx - + def vpr_recog(self, wav_path): # 识别声纹 emb_search = get_audio_embedding(wav_path) - + if emb_search is not None: emb_search = np.expand_dims(emb_search, axis=0) D, I = self.index_ip.search(emb_search, self.top_k) D = D.tolist()[0] - I = I.tolist()[0] - return [(round(D[i] * 100, 2 ), I[i]) for i in range(len(D)) if I[i] != -1] + I = I.tolist()[0] + return [(round(D[i] * 100, 2), I[i]) for i in range(len(D)) + if I[i] != -1] else: logging.error("识别失败") return None - + def do_search_vpr(self, wav_path): spk_ids, paths, scores = [], [], [] recog_result = self.vpr_recog(wav_path) @@ -78,41 +82,39 @@ class VPR: scores.append(score) paths.append("") return spk_ids, paths, scores - + def vpr_del(self, username): # 根据用户username, 删除声纹 # 查用户ID,删除对应向量 res = self.db.select_by_username(username) for r in res: idx = r['id'] - self.index_ip.remove_ids(np.array((idx,)).astype('int64')) - + self.index_ip.remove_ids(np.array((idx, )).astype('int64')) + self.db.drop_by_username(username) - + def vpr_list(self): # 获取数据列表 return self.db.select_all() - + def do_list(self): spk_ids, vpr_ids = [], [] for res in self.db.select_all(): spk_ids.append(res['username']) vpr_ids.append(res['id']) - return spk_ids, vpr_ids - + return spk_ids, vpr_ids + def do_get_wav(self, vpr_idx): - res = self.db.select_by_id(vpr_idx) - return res[0]['wavpath'] - - + res = self.db.select_by_id(vpr_idx) + return res[0]['wavpath'] + def vpr_data(self, idx): # 获取对应ID的数据 res = self.db.select_by_id(idx) return res - + def vpr_droptable(self): # 删除表 self.db.drop_table() # 清空 faiss self.index_ip.reset() - diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py index a6a00e4d0..9d052fd98 100644 --- a/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py +++ b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py @@ -1,9 +1,12 @@ -from paddlespeech.cli.vector import VectorExecutor -import numpy as np import logging +import numpy as np + +from paddlespeech.cli.vector import VectorExecutor + vector_executor = VectorExecutor() + def get_audio_embedding(path): """ Use vpr_inference to generate embedding of audio @@ -16,5 +19,3 @@ def get_audio_embedding(path): except Exception as e: logging.error(f"Error with embedding:{e}") return None - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/WebsocketManeger.py b/demos/speech_web/speech_server/src/WebsocketManeger.py index 5edde8430..954d849a5 100644 --- a/demos/speech_web/speech_server/src/WebsocketManeger.py +++ b/demos/speech_web/speech_server/src/WebsocketManeger.py @@ -2,6 +2,7 @@ from typing import List from fastapi import WebSocket + class ConnectionManager: def __init__(self): # 存放激活的ws连接对象 @@ -28,4 +29,4 @@ class ConnectionManager: await connection.send_text(message) -manager = ConnectionManager() \ No newline at end of file +manager = ConnectionManager() diff --git a/demos/speech_web/speech_server/src/robot.py b/demos/speech_web/speech_server/src/robot.py index b971c57b5..dd8c56e0c 100644 --- a/demos/speech_web/speech_server/src/robot.py +++ b/demos/speech_web/speech_server/src/robot.py @@ -1,60 +1,64 @@ -from paddlespeech.cli.asr.infer import ASRExecutor -import soundfile as sf import os -import librosa +import soundfile as sf from src.SpeechBase.asr import ASR -from src.SpeechBase.tts import TTS from src.SpeechBase.nlp import NLP +from src.SpeechBase.tts import TTS + +from paddlespeech.cli.asr.infer import ASRExecutor class Robot: - def __init__(self, asr_config, tts_config,asr_init_path, + def __init__(self, + asr_config, + tts_config, + asr_init_path, ie_model_path=None) -> None: self.nlp = NLP(ie_model_path=ie_model_path) self.asr = ASR(config_path=asr_config) self.tts = TTS(config_path=tts_config) self.tts_sample_rate = 24000 self.asr_sample_rate = 16000 - + # 流式识别效果不如端到端的模型,这里流式模型与端到端模型分开 self.asr_model = ASRExecutor() self.asr_name = "conformer_wenetspeech" self.warm_up_asrmodel(asr_init_path) - - def warm_up_asrmodel(self, asr_init_path): + def warm_up_asrmodel(self, asr_init_path): if not os.path.exists(asr_init_path): path_dir = os.path.dirname(asr_init_path) if not os.path.exists(path_dir): os.makedirs(path_dir, exist_ok=True) - + # TTS生成,采样率24000 text = "生成初始音频" self.text2speech(text, asr_init_path) - + # asr model初始化 - self.asr_model(asr_init_path, model=self.asr_name,lang='zh', - sample_rate=16000, force_yes=True) - - + self.asr_model( + asr_init_path, + model=self.asr_name, + lang='zh', + sample_rate=16000, + force_yes=True) + def speech2text(self, audio_file): self.asr_model.preprocess(self.asr_name, audio_file) self.asr_model.infer(self.asr_name) res = self.asr_model.postprocess() return res - + def text2speech(self, text, outpath): wav = self.tts.offlineTTS(text) - sf.write( - outpath, wav, samplerate=self.tts_sample_rate) + sf.write(outpath, wav, samplerate=self.tts_sample_rate) res = wav return res - + def text2speechStream(self, text): for sub_wav_base64 in self.tts.streamTTS(text=text): yield sub_wav_base64 - + def text2speechStreamBytes(self, text): for wav_bytes in self.tts.streamTTSBytes(text=text): yield wav_bytes @@ -66,5 +70,3 @@ class Robot: def ie(self, text): result = self.nlp.ie(text) return result - - \ No newline at end of file diff --git a/demos/speech_web/speech_server/src/util.py b/demos/speech_web/speech_server/src/util.py index 34005d919..4a566b6ee 100644 --- a/demos/speech_web/speech_server/src/util.py +++ b/demos/speech_web/speech_server/src/util.py @@ -1,18 +1,13 @@ import random + def randName(n=5): - return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba',n)) + return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba', n)) + def SuccessRequest(result=None, message="ok"): - return { - "code": 0, - "result":result, - "message": message - } + return {"code": 0, "result": result, "message": message} + def ErrorRequest(result=None, message="error"): - return { - "code": -1, - "result":result, - "message": message - } \ No newline at end of file + return {"code": -1, "result": result, "message": message} diff --git a/demos/streaming_asr_server/local/rtf_from_log.py b/demos/streaming_asr_server/local/rtf_from_log.py index 4b89b48fd..09a9c9750 100755 --- a/demos/streaming_asr_server/local/rtf_from_log.py +++ b/demos/streaming_asr_server/local/rtf_from_log.py @@ -34,7 +34,7 @@ if __name__ == '__main__': n = 0 for m in rtfs: # not accurate, may have duplicate log - n += 1 + n += 1 T += m['T'] P += m['P'] diff --git a/docs/requirements.txt b/docs/requirements.txt index ee116a9b6..11e94f48d 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,6 @@ -myst-parser -numpydoc -recommonmark>=0.5.0 -sphinx -sphinx-autobuild -sphinx-markdown-tables -sphinx_rtd_theme -paddlepaddle>=2.2.2 +braceexpandcolorlog editdistance +fastapi g2p_en g2pM h5py @@ -14,40 +8,45 @@ inflect jieba jsonlines kaldiio +keyboard librosa==0.8.1 loguru matplotlib +myst-parser nara_wpe +numpydoc onnxruntime==1.10.0 opencc -pandas paddlenlp +paddlepaddle>=2.2.2 paddlespeech_feat +pandas +pathos == 0.2.8 +pattern_singleton Pillow>=9.0.0 praatio==5.0.0 +prettytable pypinyin pypinyin-dict python-dateutil pyworld==0.2.12 +recommonmark>=0.5.0 resampy==0.2.2 sacrebleu scipy sentencepiece~=0.1.96 soundfile~=0.10 +sphinx +sphinx-autobuild +sphinx-markdown-tables +sphinx_rtd_theme textgrid timer tqdm typeguard +uvicorn visualdl webrtcvad +websockets yacs~=0.1.8 -prettytable zhon -colorlog -pathos == 0.2.8 -fastapi -websockets -keyboard -uvicorn -pattern_singleton -braceexpand \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index c94cf0b86..cd9b1807b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -20,10 +20,11 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. +import os +import sys + import recommonmark.parser import sphinx_rtd_theme -import sys -import os sys.path.insert(0, os.path.abspath('../..')) autodoc_mock_imports = ["soundfile", "librosa"] diff --git a/examples/iwslt2012/punc0/local/preprocess.py b/examples/iwslt2012/punc0/local/preprocess.py index 03b27e89f..3df07c72a 100644 --- a/examples/iwslt2012/punc0/local/preprocess.py +++ b/examples/iwslt2012/punc0/local/preprocess.py @@ -1,27 +1,29 @@ import argparse -import os + def process_sentence(line): - if line == '': return '' - res = line[0] - for i in range(1, len(line)): - res += (' ' + line[i]) - return res + if line == '': + return '' + res = line[0] + for i in range(1, len(line)): + res += (' ' + line[i]) + return res + if __name__ == "__main__": - paser = argparse.ArgumentParser(description = "Input filename") - paser.add_argument('-input_file') - paser.add_argument('-output_file') - sentence_cnt = 0 - args = paser.parse_args() - with open(args.input_file, 'r') as f: - with open(args.output_file, 'w') as write_f: - while True: - line = f.readline() - if line: - sentence_cnt += 1 - write_f.write(process_sentence(line)) - else: - break - print('preprocess over') - print('total sentences number:', sentence_cnt) + paser = argparse.ArgumentParser(description="Input filename") + paser.add_argument('-input_file') + paser.add_argument('-output_file') + sentence_cnt = 0 + args = paser.parse_args() + with open(args.input_file, 'r') as f: + with open(args.output_file, 'w') as write_f: + while True: + line = f.readline() + if line: + sentence_cnt += 1 + write_f.write(process_sentence(line)) + else: + break + print('preprocess over') + print('total sentences number:', sentence_cnt) diff --git a/examples/other/tts_finetune/tts3/finetune.py b/examples/other/tts_finetune/tts3/finetune.py index f05ba9435..0f060b44d 100644 --- a/examples/other/tts_finetune/tts3/finetune.py +++ b/examples/other/tts_finetune/tts3/finetune.py @@ -17,15 +17,14 @@ from pathlib import Path from typing import Union import yaml -from paddle import distributed as dist -from yacs.config import CfgNode - -from paddlespeech.t2s.exps.fastspeech2.train import train_sp - from local.check_oov import get_check_result from local.extract import extract_feature from local.label_process import get_single_label from local.prepare_env import generate_finetune_env +from paddle import distributed as dist +from yacs.config import CfgNode + +from paddlespeech.t2s.exps.fastspeech2.train import train_sp from utils.gen_duration_from_textgrid import gen_duration_from_textgrid DICT_EN = 'tools/aligner/cmudict-0.7b' diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py index 4b1c0ef3d..b781c4a8e 100644 --- a/paddlespeech/__init__.py +++ b/paddlespeech/__init__.py @@ -14,5 +14,3 @@ import _locale _locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8']) - - diff --git a/paddlespeech/audio/__init__.py b/paddlespeech/audio/__init__.py index 83be8e32e..a91958105 100644 --- a/paddlespeech/audio/__init__.py +++ b/paddlespeech/audio/__init__.py @@ -14,12 +14,12 @@ from . import compliance from . import datasets from . import features -from . import text -from . import transform -from . import streamdata from . import functional from . import io from . import metric from . import sox_effects +from . import streamdata +from . import text +from . import transform from .backends import load from .backends import save diff --git a/paddlespeech/audio/streamdata/__init__.py b/paddlespeech/audio/streamdata/__init__.py index 753fcc11b..47a2e79b3 100644 --- a/paddlespeech/audio/streamdata/__init__.py +++ b/paddlespeech/audio/streamdata/__init__.py @@ -4,67 +4,66 @@ # Modified from https://github.com/webdataset/webdataset # # flake8: noqa - -from .cache import ( - cached_tarfile_samples, - cached_tarfile_to_samples, - lru_cleanup, - pipe_cleaner, -) -from .compat import WebDataset, WebLoader, FluidWrapper -from .extradatasets import MockDataset, with_epoch, with_length -from .filters import ( - associate, - batched, - decode, - detshuffle, - extract_keys, - getfirst, - info, - map, - map_dict, - map_tuple, - pipelinefilter, - rename, - rename_keys, - audio_resample, - select, - shuffle, - slice, - to_tuple, - transform_with, - unbatched, - xdecode, - audio_data_filter, - audio_tokenize, - audio_resample, - audio_compute_fbank, - audio_spec_aug, - sort, - audio_padding, - audio_cmvn, - placeholder, -) -from .handlers import ( - ignore_and_continue, - ignore_and_stop, - reraise_exception, - warn_and_continue, - warn_and_stop, -) +from .cache import cached_tarfile_samples +from .cache import cached_tarfile_to_samples +from .cache import lru_cleanup +from .cache import pipe_cleaner +from .compat import FluidWrapper +from .compat import WebDataset +from .compat import WebLoader +from .extradatasets import MockDataset +from .extradatasets import with_epoch +from .extradatasets import with_length +from .filters import associate +from .filters import audio_cmvn +from .filters import audio_compute_fbank +from .filters import audio_data_filter +from .filters import audio_padding +from .filters import audio_resample +from .filters import audio_spec_aug +from .filters import audio_tokenize +from .filters import batched +from .filters import decode +from .filters import detshuffle +from .filters import extract_keys +from .filters import getfirst +from .filters import info +from .filters import map +from .filters import map_dict +from .filters import map_tuple +from .filters import pipelinefilter +from .filters import placeholder +from .filters import rename +from .filters import rename_keys +from .filters import select +from .filters import shuffle +from .filters import slice +from .filters import sort +from .filters import to_tuple +from .filters import transform_with +from .filters import unbatched +from .filters import xdecode +from .handlers import ignore_and_continue +from .handlers import ignore_and_stop +from .handlers import reraise_exception +from .handlers import warn_and_continue +from .handlers import warn_and_stop +from .mix import RandomMix +from .mix import RoundRobin from .pipeline import DataPipeline -from .shardlists import ( - MultiShardSample, - ResampledShards, - SimpleShardList, - non_empty, - resampled, - shardspec, - single_node_only, - split_by_node, - split_by_worker, -) -from .tariterators import tarfile_samples, tarfile_to_samples -from .utils import PipelineStage, repeatedly -from .writer import ShardWriter, TarWriter, numpy_dumps -from .mix import RandomMix, RoundRobin +from .shardlists import MultiShardSample +from .shardlists import non_empty +from .shardlists import resampled +from .shardlists import ResampledShards +from .shardlists import shardspec +from .shardlists import SimpleShardList +from .shardlists import single_node_only +from .shardlists import split_by_node +from .shardlists import split_by_worker +from .tariterators import tarfile_samples +from .tariterators import tarfile_to_samples +from .utils import PipelineStage +from .utils import repeatedly +from .writer import numpy_dumps +from .writer import ShardWriter +from .writer import TarWriter diff --git a/paddlespeech/audio/streamdata/autodecode.py b/paddlespeech/audio/streamdata/autodecode.py index ca0e2ea2f..d7f7937bd 100644 --- a/paddlespeech/audio/streamdata/autodecode.py +++ b/paddlespeech/audio/streamdata/autodecode.py @@ -5,18 +5,19 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset # - """Automatically decode webdataset samples.""" - -import io, json, os, pickle, re, tempfile +import io +import json +import os +import pickle +import re +import tempfile from functools import partial import numpy as np - """Extensions passed on to the image decoder.""" image_extensions = "jpg jpeg png ppm pgm pbm pnm".split() - ################################################################ # handle basic datatypes ################################################################ @@ -128,7 +129,7 @@ def call_extension_handler(key, data, f, extensions): target = target.split(".") if len(target) > len(extension): continue - if extension[-len(target) :] == target: + if extension[-len(target):] == target: return f(data) return None @@ -268,7 +269,6 @@ def imagehandler(imagespec, extensions=image_extensions): ################################################################ # torch video ################################################################ - ''' def torch_video(key, data): """Decode video using the torchvideo library. @@ -289,7 +289,6 @@ def torch_video(key, data): return torchvision.io.read_video(fname, pts_unit="sec") ''' - ################################################################ # paddlespeech.audio ################################################################ @@ -359,7 +358,6 @@ def gzfilter(key, data): # decode entire training amples ################################################################ - default_pre_handlers = [gzfilter] default_post_handlers = [basichandlers] @@ -387,7 +385,8 @@ class Decoder: pre = default_pre_handlers if post is None: post = default_post_handlers - assert all(callable(h) for h in handlers), f"one of {handlers} not callable" + assert all(callable(h) + for h in handlers), f"one of {handlers} not callable" assert all(callable(h) for h in pre), f"one of {pre} not callable" assert all(callable(h) for h in post), f"one of {post} not callable" self.handlers = pre + handlers + post diff --git a/paddlespeech/audio/streamdata/cache.py b/paddlespeech/audio/streamdata/cache.py index e7bbffa1b..faa196398 100644 --- a/paddlespeech/audio/streamdata/cache.py +++ b/paddlespeech/audio/streamdata/cache.py @@ -2,7 +2,10 @@ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset -import itertools, os, random, re, sys +import os +import random +import re +import sys from urllib.parse import urlparse from . import filters @@ -40,7 +43,7 @@ def lru_cleanup(cache_dir, cache_size, keyfn=os.path.getctime, verbose=False): os.remove(fname) -def download(url, dest, chunk_size=1024 ** 2, verbose=False): +def download(url, dest, chunk_size=1024**2, verbose=False): """Download a file from `url` to `dest`.""" temp = dest + f".temp{os.getpid()}" with gopen.gopen(url) as stream: @@ -65,12 +68,11 @@ def pipe_cleaner(spec): def get_file_cached( - spec, - cache_size=-1, - cache_dir=None, - url_to_name=pipe_cleaner, - verbose=False, -): + spec, + cache_size=-1, + cache_dir=None, + url_to_name=pipe_cleaner, + verbose=False, ): if cache_size == -1: cache_size = default_cache_size if cache_dir is None: @@ -107,15 +109,14 @@ verbose_cache = int(os.environ.get("WDS_VERBOSE_CACHE", "0")) def cached_url_opener( - data, - handler=reraise_exception, - cache_size=-1, - cache_dir=None, - url_to_name=pipe_cleaner, - validator=check_tar_format, - verbose=False, - always=False, -): + data, + handler=reraise_exception, + cache_size=-1, + cache_dir=None, + url_to_name=pipe_cleaner, + validator=check_tar_format, + verbose=False, + always=False, ): """Given a stream of url names (packaged in `dict(url=url)`), yield opened streams.""" verbose = verbose or verbose_cache for sample in data: @@ -132,8 +133,7 @@ def cached_url_opener( cache_size=cache_size, cache_dir=cache_dir, url_to_name=url_to_name, - verbose=verbose, - ) + verbose=verbose, ) if verbose: print("# opening %s" % dest, file=sys.stderr) assert os.path.exists(dest) @@ -143,9 +143,8 @@ def cached_url_opener( data = f.read(200) os.remove(dest) raise ValueError( - "%s (%s) is not a tar archive, but a %s, contains %s" - % (dest, url, ftype, repr(data)) - ) + "%s (%s) is not a tar archive, but a %s, contains %s" % + (dest, url, ftype, repr(data))) try: stream = open(dest, "rb") sample.update(stream=stream) @@ -158,7 +157,7 @@ def cached_url_opener( continue raise exn except Exception as exn: - exn.args = exn.args + (url,) + exn.args = exn.args + (url, ) if handler(exn): continue else: @@ -166,14 +165,13 @@ def cached_url_opener( def cached_tarfile_samples( - src, - handler=reraise_exception, - cache_size=-1, - cache_dir=None, - verbose=False, - url_to_name=pipe_cleaner, - always=False, -): + src, + handler=reraise_exception, + cache_size=-1, + cache_dir=None, + verbose=False, + url_to_name=pipe_cleaner, + always=False, ): streams = cached_url_opener( src, handler=handler, @@ -181,8 +179,7 @@ def cached_tarfile_samples( cache_dir=cache_dir, verbose=verbose, url_to_name=url_to_name, - always=always, - ) + always=always, ) samples = tar_file_and_group_expander(streams, handler=handler) return samples diff --git a/paddlespeech/audio/streamdata/compat.py b/paddlespeech/audio/streamdata/compat.py index deda53384..9012eeb10 100644 --- a/paddlespeech/audio/streamdata/compat.py +++ b/paddlespeech/audio/streamdata/compat.py @@ -2,17 +2,17 @@ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset -from dataclasses import dataclass -from itertools import islice -from typing import List - -import braceexpand, yaml +import yaml from . import autodecode -from . import cache, filters, shardlists, tariterators +from . import cache +from . import filters +from . import shardlists +from . import tariterators from .filters import reraise_exception +from .paddle_utils import DataLoader +from .paddle_utils import IterableDataset from .pipeline import DataPipeline -from .paddle_utils import DataLoader, IterableDataset class FluidInterface: @@ -26,7 +26,8 @@ class FluidInterface: return self.compose(filters.unbatched()) def listed(self, batchsize, partial=True): - return self.compose(filters.batched(), batchsize=batchsize, collation_fn=None) + return self.compose( + filters.batched(), batchsize=batchsize, collation_fn=None) def unlisted(self): return self.compose(filters.unlisted()) @@ -43,9 +44,19 @@ class FluidInterface: def map(self, f, handler=reraise_exception): return self.compose(filters.map(f, handler=handler)) - def decode(self, *args, pre=None, post=None, only=None, partial=False, handler=reraise_exception): - handlers = [autodecode.ImageHandler(x) if isinstance(x, str) else x for x in args] - decoder = autodecode.Decoder(handlers, pre=pre, post=post, only=only, partial=partial) + def decode(self, + *args, + pre=None, + post=None, + only=None, + partial=False, + handler=reraise_exception): + handlers = [ + autodecode.ImageHandler(x) if isinstance(x, str) else x + for x in args + ] + decoder = autodecode.Decoder( + handlers, pre=pre, post=post, only=only, partial=partial) return self.map(decoder, handler=handler) def map_dict(self, handler=reraise_exception, **kw): @@ -80,12 +91,12 @@ class FluidInterface: def audio_data_filter(self, *args, **kw): return self.compose(filters.audio_data_filter(*args, **kw)) - + def audio_tokenize(self, *args, **kw): return self.compose(filters.audio_tokenize(*args, **kw)) def resample(self, *args, **kw): - return self.compose(filters.resample(*args, **kw)) + return self.compose(filters.resample(*args, **kw)) def audio_compute_fbank(self, *args, **kw): return self.compose(filters.audio_compute_fbank(*args, **kw)) @@ -102,27 +113,28 @@ class FluidInterface: def audio_cmvn(self, cmvn_file): return self.compose(filters.audio_cmvn(cmvn_file)) + class WebDataset(DataPipeline, FluidInterface): """Small fluid-interface wrapper for DataPipeline.""" def __init__( - self, - urls, - handler=reraise_exception, - resampled=False, - repeat=False, - shardshuffle=None, - cache_size=0, - cache_dir=None, - detshuffle=False, - nodesplitter=shardlists.single_node_only, - verbose=False, - ): + self, + urls, + handler=reraise_exception, + resampled=False, + repeat=False, + shardshuffle=None, + cache_size=0, + cache_dir=None, + detshuffle=False, + nodesplitter=shardlists.single_node_only, + verbose=False, ): super().__init__() if isinstance(urls, IterableDataset): assert not resampled self.append(urls) - elif isinstance(urls, str) and (urls.endswith(".yaml") or urls.endswith(".yml")): + elif isinstance(urls, str) and (urls.endswith(".yaml") or + urls.endswith(".yml")): with (open(urls)) as stream: spec = yaml.safe_load(stream) assert "datasets" in spec @@ -152,9 +164,7 @@ class WebDataset(DataPipeline, FluidInterface): handler=handler, verbose=verbose, cache_size=cache_size, - cache_dir=cache_dir, - ) - ) + cache_dir=cache_dir, )) class FluidWrapper(DataPipeline, FluidInterface): diff --git a/paddlespeech/audio/streamdata/extradatasets.py b/paddlespeech/audio/streamdata/extradatasets.py index e6d617724..76361c24a 100644 --- a/paddlespeech/audio/streamdata/extradatasets.py +++ b/paddlespeech/audio/streamdata/extradatasets.py @@ -5,20 +5,10 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset # - - """Train PyTorch models directly from POSIX tar archive. Code works locally or over HTTP connections. """ - -import itertools as itt -import os -import random -import sys - -import braceexpand - from . import utils from .paddle_utils import IterableDataset from .utils import PipelineStage @@ -63,8 +53,7 @@ class repeatedly(IterableDataset, PipelineStage): return utils.repeatedly( source, nepochs=self.nepochs, - nbatches=self.nbatches, - ) + nbatches=self.nbatches, ) class with_epoch(IterableDataset): diff --git a/paddlespeech/audio/streamdata/filters.py b/paddlespeech/audio/streamdata/filters.py index 82b9c6bab..68d6830bb 100644 --- a/paddlespeech/audio/streamdata/filters.py +++ b/paddlespeech/audio/streamdata/filters.py @@ -3,7 +3,6 @@ # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # - # Modified from https://github.com/webdataset/webdataset # Modified from wenet(https://github.com/wenet-e2e/wenet) """A collection of iterators for data transformations. @@ -12,28 +11,29 @@ These functions are plain iterator functions. You can find curried versions in webdataset.filters, and you can find IterableDataset wrappers in webdataset.processing. """ - import io -from fnmatch import fnmatch +import itertools +import os +import random import re -import itertools, os, random, sys, time -from functools import reduce, wraps +import sys +import time +from fnmatch import fnmatch +from functools import reduce -import numpy as np +import paddle from . import autodecode -from . import utils -from .paddle_utils import PaddleTensor -from .utils import PipelineStage - +from . import utils from .. import backends from ..compliance import kaldi -import paddle from ..transform.cmvn import GlobalCMVN -from ..utils.tensor_utils import pad_sequence -from ..transform.spec_augment import time_warp -from ..transform.spec_augment import time_mask from ..transform.spec_augment import freq_mask +from ..transform.spec_augment import time_mask +from ..transform.spec_augment import time_warp +from ..utils.tensor_utils import pad_sequence +from .utils import PipelineStage + class FilterFunction(object): """Helper class for currying pipeline stages. @@ -159,10 +159,12 @@ def transform_with(sample, transformers): result[i] = f(sample[i]) return result + ### # Iterators ### + def _info(data, fmt=None, n=3, every=-1, width=50, stream=sys.stderr, name=""): """Print information about the samples that are passing through. @@ -278,10 +280,16 @@ def _log_keys(data, logfile=None): log_keys = pipelinefilter(_log_keys) +def _minedecode(x): + if isinstance(x, str): + return autodecode.imagehandler(x) + else: + return x + + def _decode(data, *args, handler=reraise_exception, **kw): """Decode data based on the decoding functions given as arguments.""" - - decoder = lambda x: autodecode.imagehandler(x) if isinstance(x, str) else x + decoder = _minedecode handlers = [decoder(x) for x in args] f = autodecode.Decoder(handlers, **kw) @@ -325,15 +333,24 @@ def _rename(data, handler=reraise_exception, keep=True, **kw): for sample in data: try: if not keep: - yield {k: getfirst(sample, v, missing_is_error=True) for k, v in kw.items()} + yield { + k: getfirst(sample, v, missing_is_error=True) + for k, v in kw.items() + } else: def listify(v): return v.split(";") if isinstance(v, str) else v to_be_replaced = {x for v in kw.values() for x in listify(v)} - result = {k: v for k, v in sample.items() if k not in to_be_replaced} - result.update({k: getfirst(sample, v, missing_is_error=True) for k, v in kw.items()}) + result = { + k: v + for k, v in sample.items() if k not in to_be_replaced + } + result.update({ + k: getfirst(sample, v, missing_is_error=True) + for k, v in kw.items() + }) yield result except Exception as exn: if handler(exn): @@ -381,7 +398,11 @@ def _map_dict(data, handler=reraise_exception, **kw): map_dict = pipelinefilter(_map_dict) -def _to_tuple(data, *args, handler=reraise_exception, missing_is_error=True, none_is_error=None): +def _to_tuple(data, + *args, + handler=reraise_exception, + missing_is_error=True, + none_is_error=None): """Convert dict samples to tuples.""" if none_is_error is None: none_is_error = missing_is_error @@ -390,7 +411,10 @@ def _to_tuple(data, *args, handler=reraise_exception, missing_is_error=True, non for sample in data: try: - result = tuple([getfirst(sample, f, missing_is_error=missing_is_error) for f in args]) + result = tuple([ + getfirst(sample, f, missing_is_error=missing_is_error) + for f in args + ]) if none_is_error and any(x is None for x in result): raise ValueError(f"to_tuple {args} got {sample.keys()}") yield result @@ -463,19 +487,28 @@ rsample = pipelinefilter(_rsample) slice = pipelinefilter(itertools.islice) -def _extract_keys(source, *patterns, duplicate_is_error=True, ignore_missing=False): +def _extract_keys(source, + *patterns, + duplicate_is_error=True, + ignore_missing=False): for sample in source: result = [] for pattern in patterns: - pattern = pattern.split(";") if isinstance(pattern, str) else pattern - matches = [x for x in sample.keys() if any(fnmatch("." + x, p) for p in pattern)] + pattern = pattern.split(";") if isinstance(pattern, + str) else pattern + matches = [ + x for x in sample.keys() + if any(fnmatch("." + x, p) for p in pattern) + ] if len(matches) == 0: if ignore_missing: continue else: - raise ValueError(f"Cannot find {pattern} in sample keys {sample.keys()}.") + raise ValueError( + f"Cannot find {pattern} in sample keys {sample.keys()}.") if len(matches) > 1 and duplicate_is_error: - raise ValueError(f"Multiple sample keys {sample.keys()} match {pattern}.") + raise ValueError( + f"Multiple sample keys {sample.keys()} match {pattern}.") value = sample[matches[0]] result.append(value) yield tuple(result) @@ -484,7 +517,12 @@ def _extract_keys(source, *patterns, duplicate_is_error=True, ignore_missing=Fal extract_keys = pipelinefilter(_extract_keys) -def _rename_keys(source, *args, keep_unselected=False, must_match=True, duplicate_is_error=True, **kw): +def _rename_keys(source, + *args, + keep_unselected=False, + must_match=True, + duplicate_is_error=True, + **kw): renamings = [(pattern, output) for output, pattern in args] renamings += [(pattern, output) for output, pattern in kw.items()] for sample in source: @@ -504,11 +542,15 @@ def _rename_keys(source, *args, keep_unselected=False, must_match=True, duplicat continue if new_name in new_sample: if duplicate_is_error: - raise ValueError(f"Duplicate value in sample {sample.keys()} after rename.") + raise ValueError( + f"Duplicate value in sample {sample.keys()} after rename." + ) continue new_sample[new_name] = value if must_match and not all(matched.values()): - raise ValueError(f"Not all patterns ({matched}) matched sample keys ({sample.keys()}).") + raise ValueError( + f"Not all patterns ({matched}) matched sample keys ({sample.keys()})." + ) yield new_sample @@ -541,18 +583,18 @@ def find_decoder(decoders, path): if fname.startswith("__"): return lambda x: x for pattern, fun in decoders[::-1]: - if fnmatch(fname.lower(), pattern) or fnmatch("." + fname.lower(), pattern): + if fnmatch(fname.lower(), pattern) or fnmatch("." + fname.lower(), + pattern): return fun return None def _xdecode( - source, - *args, - must_decode=True, - defaults=default_decoders, - **kw, -): + source, + *args, + must_decode=True, + defaults=default_decoders, + **kw, ): decoders = list(defaults) + list(args) decoders += [("*." + k, v) for k, v in kw.items()] for sample in source: @@ -575,18 +617,18 @@ def _xdecode( new_sample[path] = value yield new_sample -xdecode = pipelinefilter(_xdecode) +xdecode = pipelinefilter(_xdecode) def _audio_data_filter(source, - frame_shift=10, - max_length=10240, - min_length=10, - token_max_length=200, - token_min_length=1, - min_output_input_ratio=0.0005, - max_output_input_ratio=1): + frame_shift=10, + max_length=10240, + min_length=10, + token_max_length=200, + token_min_length=1, + min_output_input_ratio=0.0005, + max_output_input_ratio=1): """ Filter sample according to feature and label length Inplace operation. @@ -613,7 +655,8 @@ def _audio_data_filter(source, assert 'wav' in sample assert 'label' in sample # sample['wav'] is paddle.Tensor, we have 100 frames every second (default) - num_frames = sample['wav'].shape[1] / sample['sample_rate'] * (1000 / frame_shift) + num_frames = sample['wav'].shape[1] / sample['sample_rate'] * ( + 1000 / frame_shift) if num_frames < min_length: continue if num_frames > max_length: @@ -629,13 +672,15 @@ def _audio_data_filter(source, continue yield sample + audio_data_filter = pipelinefilter(_audio_data_filter) + def _audio_tokenize(source, - symbol_table, - bpe_model=None, - non_lang_syms=None, - split_with_space=False): + symbol_table, + bpe_model=None, + non_lang_syms=None, + split_with_space=False): """ Decode text to chars or BPE Inplace operation @@ -693,8 +738,10 @@ def _audio_tokenize(source, sample['label'] = label yield sample + audio_tokenize = pipelinefilter(_audio_tokenize) + def _audio_resample(source, resample_rate=16000): """ Resample data. Inplace operation. @@ -713,18 +760,22 @@ def _audio_resample(source, resample_rate=16000): waveform = sample['wav'] if sample_rate != resample_rate: sample['sample_rate'] = resample_rate - sample['wav'] = paddle.to_tensor(backends.soundfile_backend.resample( - waveform.numpy(), src_sr = sample_rate, target_sr = resample_rate - )) + sample['wav'] = paddle.to_tensor( + backends.soundfile_backend.resample( + waveform.numpy(), + src_sr=sample_rate, + target_sr=resample_rate)) yield sample + audio_resample = pipelinefilter(_audio_resample) + def _audio_compute_fbank(source, - num_mel_bins=80, - frame_length=25, - frame_shift=10, - dither=0.0): + num_mel_bins=80, + frame_length=25, + frame_shift=10, + dither=0.0): """ Extract fbank Args: @@ -746,30 +797,33 @@ def _audio_compute_fbank(source, waveform = sample['wav'] waveform = waveform * (1 << 15) # Only keep fname, feat, label - mat = kaldi.fbank(waveform, - n_mels=num_mel_bins, - frame_length=frame_length, - frame_shift=frame_shift, - dither=dither, - energy_floor=0.0, - sr=sample_rate) + mat = kaldi.fbank( + waveform, + n_mels=num_mel_bins, + frame_length=frame_length, + frame_shift=frame_shift, + dither=dither, + energy_floor=0.0, + sr=sample_rate) yield dict(fname=sample['fname'], label=sample['label'], feat=mat) audio_compute_fbank = pipelinefilter(_audio_compute_fbank) -def _audio_spec_aug(source, - max_w=5, - w_inplace=True, - w_mode="PIL", - max_f=30, - num_f_mask=2, - f_inplace=True, - f_replace_with_zero=False, - max_t=40, - num_t_mask=2, - t_inplace=True, - t_replace_with_zero=False,): + +def _audio_spec_aug( + source, + max_w=5, + w_inplace=True, + w_mode="PIL", + max_f=30, + num_f_mask=2, + f_inplace=True, + f_replace_with_zero=False, + max_t=40, + num_t_mask=2, + t_inplace=True, + t_replace_with_zero=False, ): """ Do spec augmentation Inplace operation @@ -793,12 +847,23 @@ def _audio_spec_aug(source, for sample in source: x = sample['feat'] x = x.numpy() - x = time_warp(x, max_time_warp=max_w, inplace = w_inplace, mode= w_mode) - x = freq_mask(x, F = max_f, n_mask = num_f_mask, inplace = f_inplace, replace_with_zero = f_replace_with_zero) - x = time_mask(x, T = max_t, n_mask = num_t_mask, inplace = t_inplace, replace_with_zero = t_replace_with_zero) + x = time_warp(x, max_time_warp=max_w, inplace=w_inplace, mode=w_mode) + x = freq_mask( + x, + F=max_f, + n_mask=num_f_mask, + inplace=f_inplace, + replace_with_zero=f_replace_with_zero) + x = time_mask( + x, + T=max_t, + n_mask=num_t_mask, + inplace=t_inplace, + replace_with_zero=t_replace_with_zero) sample['feat'] = paddle.to_tensor(x, dtype=paddle.float32) yield sample + audio_spec_aug = pipelinefilter(_audio_spec_aug) @@ -829,8 +894,10 @@ def _sort(source, sort_size=500): for x in buf: yield x + sort = pipelinefilter(_sort) + def _batched(source, batch_size=16): """ Static batch the data by `batch_size` @@ -850,8 +917,10 @@ def _batched(source, batch_size=16): if len(buf) > 0: yield buf + batched = pipelinefilter(_batched) + def dynamic_batched(source, max_frames_in_batch=12000): """ Dynamic batch the data until the total frames in batch reach `max_frames_in_batch` @@ -892,8 +961,8 @@ def _audio_padding(source): """ for sample in source: assert isinstance(sample, list) - feats_length = paddle.to_tensor([x['feat'].shape[0] for x in sample], - dtype="int64") + feats_length = paddle.to_tensor( + [x['feat'].shape[0] for x in sample], dtype="int64") order = paddle.argsort(feats_length, descending=True) feats_lengths = paddle.to_tensor( [sample[i]['feat'].shape[0] for i in order], dtype="int64") @@ -902,20 +971,20 @@ def _audio_padding(source): sorted_labels = [ paddle.to_tensor(sample[i]['label'], dtype="int32") for i in order ] - label_lengths = paddle.to_tensor([x.shape[0] for x in sorted_labels], - dtype="int64") - padded_feats = pad_sequence(sorted_feats, - batch_first=True, - padding_value=0) - padding_labels = pad_sequence(sorted_labels, - batch_first=True, - padding_value=-1) - - yield (sorted_keys, padded_feats, feats_lengths, padding_labels, + label_lengths = paddle.to_tensor( + [x.shape[0] for x in sorted_labels], dtype="int64") + padded_feats = pad_sequence( + sorted_feats, batch_first=True, padding_value=0) + padding_labels = pad_sequence( + sorted_labels, batch_first=True, padding_value=-1) + + yield (sorted_keys, padded_feats, feats_lengths, padding_labels, label_lengths) + audio_padding = pipelinefilter(_audio_padding) + def _audio_cmvn(source, cmvn_file): global_cmvn = GlobalCMVN(cmvn_file) for batch in source: @@ -923,13 +992,16 @@ def _audio_cmvn(source, cmvn_file): padded_feats = padded_feats.numpy() padded_feats = global_cmvn(padded_feats) padded_feats = paddle.to_tensor(padded_feats, dtype=paddle.float32) - yield (sorted_keys, padded_feats, feats_lengths, padding_labels, - label_lengths) + yield (sorted_keys, padded_feats, feats_lengths, padding_labels, + label_lengths) + audio_cmvn = pipelinefilter(_audio_cmvn) + def _placeholder(source): for data in source: yield data + placeholder = pipelinefilter(_placeholder) diff --git a/paddlespeech/audio/streamdata/gopen.py b/paddlespeech/audio/streamdata/gopen.py index 457d048a6..60a434603 100644 --- a/paddlespeech/audio/streamdata/gopen.py +++ b/paddlespeech/audio/streamdata/gopen.py @@ -3,12 +3,12 @@ # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # - - """Open URLs by calling subcommands.""" - -import os, sys, re -from subprocess import PIPE, Popen +import os +import re +import sys +from subprocess import PIPE +from subprocess import Popen from urllib.parse import urlparse # global used for printing additional node information during verbose output @@ -31,14 +31,13 @@ class Pipe: """ def __init__( - self, - *args, - mode=None, - timeout=7200.0, - ignore_errors=False, - ignore_status=[], - **kw, - ): + self, + *args, + mode=None, + timeout=7200.0, + ignore_errors=False, + ignore_status=[], + **kw, ): """Create an IO Pipe.""" self.ignore_errors = ignore_errors self.ignore_status = [0] + ignore_status @@ -75,8 +74,7 @@ class Pipe: if verbose: print( f"pipe exit [{self.status} {os.getpid()}:{self.proc.pid}] {self.args} {info}", - file=sys.stderr, - ) + file=sys.stderr, ) if self.status not in self.ignore_status and not self.ignore_errors: raise Exception(f"{self.args}: exit {self.status} (read) {info}") @@ -114,9 +112,11 @@ class Pipe: self.close() -def set_options( - obj, timeout=None, ignore_errors=None, ignore_status=None, handler=None -): +def set_options(obj, + timeout=None, + ignore_errors=None, + ignore_status=None, + handler=None): """Set options for Pipes. This function can be called on any stream. It will set pipe options only @@ -168,16 +168,14 @@ def gopen_pipe(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141], - ) # skipcq: BAN-B604 + ignore_status=[141], ) # skipcq: BAN-B604 elif mode[0] == "w": return Pipe( cmd, mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141], - ) # skipcq: BAN-B604 + ignore_status=[141], ) # skipcq: BAN-B604 else: raise ValueError(f"{mode}: unknown mode") @@ -196,8 +194,7 @@ def gopen_curl(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141, 23], - ) # skipcq: BAN-B604 + ignore_status=[141, 23], ) # skipcq: BAN-B604 elif mode[0] == "w": cmd = f"curl -s -L -T - '{url}'" return Pipe( @@ -205,8 +202,7 @@ def gopen_curl(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141, 26], - ) # skipcq: BAN-B604 + ignore_status=[141, 26], ) # skipcq: BAN-B604 else: raise ValueError(f"{mode}: unknown mode") @@ -226,15 +222,13 @@ def gopen_htgs(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141, 23], - ) # skipcq: BAN-B604 + ignore_status=[141, 23], ) # skipcq: BAN-B604 elif mode[0] == "w": raise ValueError(f"{mode}: cannot write") else: raise ValueError(f"{mode}: unknown mode") - def gopen_gsutil(url, mode="rb", bufsize=8192): """Open a URL with `curl`. @@ -249,8 +243,7 @@ def gopen_gsutil(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141, 23], - ) # skipcq: BAN-B604 + ignore_status=[141, 23], ) # skipcq: BAN-B604 elif mode[0] == "w": cmd = f"gsutil cp - '{url}'" return Pipe( @@ -258,13 +251,11 @@ def gopen_gsutil(url, mode="rb", bufsize=8192): mode=mode, shell=True, bufsize=bufsize, - ignore_status=[141, 26], - ) # skipcq: BAN-B604 + ignore_status=[141, 26], ) # skipcq: BAN-B604 else: raise ValueError(f"{mode}: unknown mode") - def gopen_error(url, *args, **kw): """Raise a value error. @@ -285,8 +276,7 @@ gopen_schemes = dict( ftps=gopen_curl, scp=gopen_curl, gs=gopen_gsutil, - htgs=gopen_htgs, -) + htgs=gopen_htgs, ) def gopen(url, mode="rb", bufsize=8192, **kw): diff --git a/paddlespeech/audio/streamdata/handlers.py b/paddlespeech/audio/streamdata/handlers.py index 7f3d28b62..0173e5373 100644 --- a/paddlespeech/audio/streamdata/handlers.py +++ b/paddlespeech/audio/streamdata/handlers.py @@ -3,7 +3,6 @@ # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # - """Pluggable exception handlers. These are functions that take an exception as an argument and then return... @@ -14,8 +13,8 @@ These are functions that take an exception as an argument and then return... They are used as handler= arguments in much of the library. """ - -import time, warnings +import time +import warnings def reraise_exception(exn): diff --git a/paddlespeech/audio/streamdata/mix.py b/paddlespeech/audio/streamdata/mix.py index 7d790f00f..37556ed94 100644 --- a/paddlespeech/audio/streamdata/mix.py +++ b/paddlespeech/audio/streamdata/mix.py @@ -5,17 +5,12 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset # - """Classes for mixing samples from multiple sources.""" - -import itertools, os, random, time, sys -from functools import reduce, wraps +import random import numpy as np -from . import autodecode, utils -from .paddle_utils import PaddleTensor, IterableDataset -from .utils import PipelineStage +from .paddle_utils import IterableDataset def round_robin_shortest(*sources): diff --git a/paddlespeech/audio/streamdata/paddle_utils.py b/paddlespeech/audio/streamdata/paddle_utils.py index 02bc4c841..c2ad8756b 100644 --- a/paddlespeech/audio/streamdata/paddle_utils.py +++ b/paddlespeech/audio/streamdata/paddle_utils.py @@ -5,12 +5,11 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset # - """Mock implementations of paddle interfaces when paddle is not available.""" - try: - from paddle.io import DataLoader, IterableDataset + from paddle.io import DataLoader + from paddle.io import IterableDataset except ModuleNotFoundError: class IterableDataset: @@ -22,12 +21,3 @@ except ModuleNotFoundError: """Empty implementation of DataLoader when paddle is not available.""" pass - -try: - from paddle import Tensor as PaddleTensor -except ModuleNotFoundError: - - class TorchTensor: - """Empty implementation of PaddleTensor when paddle is not available.""" - - pass diff --git a/paddlespeech/audio/streamdata/pipeline.py b/paddlespeech/audio/streamdata/pipeline.py index 7339a762a..ff16760ae 100644 --- a/paddlespeech/audio/streamdata/pipeline.py +++ b/paddlespeech/audio/streamdata/pipeline.py @@ -3,15 +3,12 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset #%% -import copy, os, random, sys, time -from dataclasses import dataclass +import copy +import sys from itertools import islice -from typing import List -import braceexpand, yaml - -from .handlers import reraise_exception -from .paddle_utils import DataLoader, IterableDataset +from .paddle_utils import DataLoader +from .paddle_utils import IterableDataset from .utils import PipelineStage @@ -22,8 +19,7 @@ def add_length_method(obj): Combined = type( obj.__class__.__name__ + "_Length", (obj.__class__, IterableDataset), - {"__len__": length}, - ) + {"__len__": length}, ) obj.__class__ = Combined return obj diff --git a/paddlespeech/audio/streamdata/shardlists.py b/paddlespeech/audio/streamdata/shardlists.py index cfaf9a64b..54f501052 100644 --- a/paddlespeech/audio/streamdata/shardlists.py +++ b/paddlespeech/audio/streamdata/shardlists.py @@ -4,28 +4,30 @@ # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # - # Modified from https://github.com/webdataset/webdataset - """Train PyTorch models directly from POSIX tar archive. Code works locally or over HTTP connections. """ - -import os, random, sys, time -from dataclasses import dataclass, field +import os +import random +import sys +import time +from dataclasses import dataclass +from dataclasses import field from itertools import islice from typing import List -import braceexpand, yaml +import braceexpand +import yaml from . import utils +from ..utils.log import Logger from .filters import pipelinefilter from .paddle_utils import IterableDataset +logger = Logger(__name__) -from ..utils.log import Logger -logger = Logger(__name__) def expand_urls(urls): if isinstance(urls, str): urllist = urls.split("::") @@ -64,7 +66,8 @@ class SimpleShardList(IterableDataset): def split_by_node(src, group=None): - rank, world_size, worker, num_workers = utils.paddle_worker_info(group=group) + rank, world_size, worker, num_workers = utils.paddle_worker_info( + group=group) logger.info(f"world_size:{world_size}, rank:{rank}") if world_size > 1: for s in islice(src, rank, None, world_size): @@ -75,9 +78,11 @@ def split_by_node(src, group=None): def single_node_only(src, group=None): - rank, world_size, worker, num_workers = utils.paddle_worker_info(group=group) + rank, world_size, worker, num_workers = utils.paddle_worker_info( + group=group) if world_size > 1: - raise ValueError("input pipeline needs to be reconfigured for multinode training") + raise ValueError( + "input pipeline needs to be reconfigured for multinode training") for s in src: yield s @@ -104,7 +109,8 @@ def resampled_(src, n=sys.maxsize): rng = random.Random(seed) print("# resampled loading", file=sys.stderr) items = list(src) - print(f"# resampled got {len(items)} samples, yielding {n}", file=sys.stderr) + print( + f"# resampled got {len(items)} samples, yielding {n}", file=sys.stderr) for i in range(n): yield rng.choice(items) @@ -118,7 +124,9 @@ def non_empty(src): yield s count += 1 if count == 0: - raise ValueError("pipeline stage received no data at all and this was declared as an error") + raise ValueError( + "pipeline stage received no data at all and this was declared as an error" + ) @dataclass @@ -138,10 +146,6 @@ def expand(s): return os.path.expanduser(os.path.expandvars(s)) -class MultiShardSample(IterableDataset): - def __init__(self, fname): - """Construct a shardlist from multiple sources using a YAML spec.""" - self.epoch = -1 class MultiShardSample(IterableDataset): def __init__(self, fname): """Construct a shardlist from multiple sources using a YAML spec.""" @@ -156,20 +160,23 @@ class MultiShardSample(IterableDataset): else: with open(fname) as stream: spec = yaml.safe_load(stream) - assert set(spec.keys()).issubset(set("prefix datasets buckets".split())), list(spec.keys()) + assert set(spec.keys()).issubset( + set("prefix datasets buckets".split())), list(spec.keys()) prefix = expand(spec.get("prefix", "")) self.sources = [] for ds in spec["datasets"]: - assert set(ds.keys()).issubset(set("buckets name shards resample choose".split())), list( - ds.keys() - ) + assert set(ds.keys()).issubset( + set("buckets name shards resample choose".split())), list( + ds.keys()) buckets = ds.get("buckets", spec.get("buckets", [])) if isinstance(buckets, str): buckets = [buckets] buckets = [expand(s) for s in buckets] if buckets == []: buckets = [""] - assert len(buckets) == 1, f"{buckets}: FIXME support for multiple buckets unimplemented" + assert len( + buckets + ) == 1, f"{buckets}: FIXME support for multiple buckets unimplemented" bucket = buckets[0] name = ds.get("name", "@" + bucket) urls = ds["shards"] @@ -177,15 +184,19 @@ class MultiShardSample(IterableDataset): urls = [urls] # urls = [u for url in urls for u in braceexpand.braceexpand(url)] urls = [ - prefix + os.path.join(bucket, u) for url in urls for u in braceexpand.braceexpand(expand(url)) + prefix + os.path.join(bucket, u) + for url in urls for u in braceexpand.braceexpand(expand(url)) ] resample = ds.get("resample", -1) nsample = ds.get("choose", -1) if nsample > len(urls): - raise ValueError(f"perepoch {nsample} must be no greater than the number of shards") + raise ValueError( + f"perepoch {nsample} must be no greater than the number of shards" + ) if (nsample > 0) and (resample > 0): raise ValueError("specify only one of perepoch or choose") - entry = MSSource(name=name, urls=urls, perepoch=nsample, resample=resample) + entry = MSSource( + name=name, urls=urls, perepoch=nsample, resample=resample) self.sources.append(entry) print(f"# {name} {len(urls)} {nsample}", file=sys.stderr) @@ -203,7 +214,7 @@ class MultiShardSample(IterableDataset): # sample without replacement l = list(source.urls) self.rng.shuffle(l) - l = l[: source.perepoch] + l = l[:source.perepoch] else: l = list(source.urls) result += l @@ -227,12 +238,11 @@ class ResampledShards(IterableDataset): """An iterable dataset yielding a list of urls.""" def __init__( - self, - urls, - nshards=sys.maxsize, - worker_seed=None, - deterministic=False, - ): + self, + urls, + nshards=sys.maxsize, + worker_seed=None, + deterministic=False, ): """Sample shards from the shard list with replacement. :param urls: a list of URLs as a Python list or brace notation string @@ -252,7 +262,8 @@ class ResampledShards(IterableDataset): if self.deterministic: seed = utils.make_seed(self.worker_seed(), self.epoch) else: - seed = utils.make_seed(self.worker_seed(), self.epoch, os.getpid(), time.time_ns(), os.urandom(4)) + seed = utils.make_seed(self.worker_seed(), self.epoch, + os.getpid(), time.time_ns(), os.urandom(4)) if os.environ.get("WDS_SHOW_SEED", "0") == "1": print(f"# ResampledShards seed {seed}") self.rng = random.Random(seed) diff --git a/paddlespeech/audio/streamdata/tariterators.py b/paddlespeech/audio/streamdata/tariterators.py index b1616918c..79b81c0ce 100644 --- a/paddlespeech/audio/streamdata/tariterators.py +++ b/paddlespeech/audio/streamdata/tariterators.py @@ -3,13 +3,12 @@ # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). - # Modified from https://github.com/webdataset/webdataset # Modified from wenet(https://github.com/wenet-e2e/wenet) - """Low level iteration functions for tar archives.""" - -import random, re, tarfile +import random +import re +import tarfile import braceexpand @@ -27,6 +26,7 @@ import numpy as np AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma']) + def base_plus_ext(path): """Split off all file extensions. @@ -47,12 +47,8 @@ def valid_sample(sample): :param sample: sample to be checked """ - return ( - sample is not None - and isinstance(sample, dict) - and len(list(sample.keys())) > 0 - and not sample.get("__bad__", False) - ) + return (sample is not None and isinstance(sample, dict) and + len(list(sample.keys())) > 0 and not sample.get("__bad__", False)) # FIXME: UNUSED @@ -79,16 +75,16 @@ def url_opener(data, handler=reraise_exception, **kw): sample.update(stream=stream) yield sample except Exception as exn: - exn.args = exn.args + (url,) + exn.args = exn.args + (url, ) if handler(exn): continue else: break -def tar_file_iterator( - fileobj, skip_meta=r"__[^/]*__($|/)", handler=reraise_exception -): +def tar_file_iterator(fileobj, + skip_meta=r"__[^/]*__($|/)", + handler=reraise_exception): """Iterate over tar file, yielding filename, content pairs for the given tar stream. :param fileobj: byte stream suitable for tarfile @@ -103,11 +99,8 @@ def tar_file_iterator( continue if fname is None: continue - if ( - "/" not in fname - and fname.startswith(meta_prefix) - and fname.endswith(meta_suffix) - ): + if ("/" not in fname and fname.startswith(meta_prefix) and + fname.endswith(meta_suffix)): # skipping metadata for now continue if skip_meta is not None and re.match(skip_meta, fname): @@ -118,8 +111,10 @@ def tar_file_iterator( assert pos > 0 prefix, postfix = name[:pos], name[pos + 1:] if postfix == 'wav': - waveform, sample_rate = paddlespeech.audio.load(stream.extractfile(tarinfo), normal=False) - result = dict(fname=prefix, wav=waveform, sample_rate = sample_rate) + waveform, sample_rate = paddlespeech.audio.load( + stream.extractfile(tarinfo), normal=False) + result = dict( + fname=prefix, wav=waveform, sample_rate=sample_rate) else: txt = stream.extractfile(tarinfo).read().decode('utf8').strip() result = dict(fname=prefix, txt=txt) @@ -128,16 +123,17 @@ def tar_file_iterator( stream.members = [] except Exception as exn: if hasattr(exn, "args") and len(exn.args) > 0: - exn.args = (exn.args[0] + " @ " + str(fileobj),) + exn.args[1:] + exn.args = (exn.args[0] + " @ " + str(fileobj), ) + exn.args[1:] if handler(exn): continue else: break del stream -def tar_file_and_group_iterator( - fileobj, skip_meta=r"__[^/]*__($|/)", handler=reraise_exception -): + +def tar_file_and_group_iterator(fileobj, + skip_meta=r"__[^/]*__($|/)", + handler=reraise_exception): """ Expand a stream of open tar files into a stream of tar file contents. And groups the file with same prefix @@ -167,8 +163,11 @@ def tar_file_and_group_iterator( if postfix == 'txt': example['txt'] = file_obj.read().decode('utf8').strip() elif postfix in AUDIO_FORMAT_SETS: - waveform, sample_rate = paddlespeech.audio.load(file_obj, normal=False) - waveform = paddle.to_tensor(np.expand_dims(np.array(waveform),0), dtype=paddle.float32) + waveform, sample_rate = paddlespeech.audio.load( + file_obj, normal=False) + waveform = paddle.to_tensor( + np.expand_dims(np.array(waveform), 0), + dtype=paddle.float32) example['wav'] = waveform example['sample_rate'] = sample_rate @@ -176,19 +175,21 @@ def tar_file_and_group_iterator( example[postfix] = file_obj.read() except Exception as exn: if hasattr(exn, "args") and len(exn.args) > 0: - exn.args = (exn.args[0] + " @ " + str(fileobj),) + exn.args[1:] + exn.args = (exn.args[0] + " @ " + str(fileobj), + ) + exn.args[1:] if handler(exn): continue else: break valid = False - # logging.warning('error to parse {}'.format(name)) + # logging.warning('error to parse {}'.format(name)) prev_prefix = prefix if prev_prefix is not None: example['fname'] = prev_prefix yield example stream.close() + def tar_file_expander(data, handler=reraise_exception): """Expand a stream of open tar files into a stream of tar file contents. @@ -200,9 +201,8 @@ def tar_file_expander(data, handler=reraise_exception): assert isinstance(source, dict) assert "stream" in source for sample in tar_file_iterator(source["stream"]): - assert ( - isinstance(sample, dict) and "data" in sample and "fname" in sample - ) + assert (isinstance(sample, dict) and "data" in sample and + "fname" in sample) sample["__url__"] = url yield sample except Exception as exn: @@ -213,8 +213,6 @@ def tar_file_expander(data, handler=reraise_exception): break - - def tar_file_and_group_expander(data, handler=reraise_exception): """Expand a stream of open tar files into a stream of tar file contents. @@ -226,9 +224,8 @@ def tar_file_and_group_expander(data, handler=reraise_exception): assert isinstance(source, dict) assert "stream" in source for sample in tar_file_and_group_iterator(source["stream"]): - assert ( - isinstance(sample, dict) and "wav" in sample and "txt" in sample and "fname" in sample - ) + assert (isinstance(sample, dict) and "wav" in sample and + "txt" in sample and "fname" in sample) sample["__url__"] = url yield sample except Exception as exn: @@ -239,7 +236,11 @@ def tar_file_and_group_expander(data, handler=reraise_exception): break -def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): +def group_by_keys(data, + keys=base_plus_ext, + lcase=True, + suffixes=None, + handler=None): """Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) @@ -254,8 +255,8 @@ def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=N print( prefix, suffix, - current_sample.keys() if isinstance(current_sample, dict) else None, - ) + current_sample.keys() + if isinstance(current_sample, dict) else None, ) if prefix is None: continue if lcase: diff --git a/paddlespeech/audio/streamdata/utils.py b/paddlespeech/audio/streamdata/utils.py index c7294f2bf..94dab9052 100644 --- a/paddlespeech/audio/streamdata/utils.py +++ b/paddlespeech/audio/streamdata/utils.py @@ -4,22 +4,23 @@ # This file is part of the WebDataset library. # See the LICENSE file for licensing terms (BSD-style). # - # Modified from https://github.com/webdataset/webdataset - """Miscellaneous utility functions.""" - import importlib import itertools as itt import os import re import sys -from typing import Any, Callable, Iterator, Optional, Union +from typing import Any +from typing import Callable +from typing import Iterator +from typing import Union from ..utils.log import Logger logger = Logger(__name__) + def make_seed(*args): seed = 0 for arg in args: @@ -37,7 +38,7 @@ def identity(x: Any) -> Any: return x -def safe_eval(s: str, expr: str = "{}"): +def safe_eval(s: str, expr: str="{}"): """Evaluate the given expression more safely.""" if re.sub("[^A-Za-z0-9_]", "", s) != s: raise ValueError(f"safe_eval: illegal characters in: '{s}'") @@ -54,9 +55,9 @@ def lookup_sym(sym: str, modules: list): return None -def repeatedly0( - loader: Iterator, nepochs: int = sys.maxsize, nbatches: int = sys.maxsize -): +def repeatedly0(loader: Iterator, + nepochs: int=sys.maxsize, + nbatches: int=sys.maxsize): """Repeatedly returns batches from a DataLoader.""" for epoch in range(nepochs): for sample in itt.islice(loader, nbatches): @@ -69,12 +70,11 @@ def guess_batchsize(batch: Union[tuple, list]): def repeatedly( - source: Iterator, - nepochs: int = None, - nbatches: int = None, - nsamples: int = None, - batchsize: Callable[..., int] = guess_batchsize, -): + source: Iterator, + nepochs: int=None, + nbatches: int=None, + nsamples: int=None, + batchsize: Callable[..., int]=guess_batchsize, ): """Repeatedly yield samples from an iterator.""" epoch = 0 batch = 0 @@ -93,6 +93,7 @@ def repeatedly( if nepochs is not None and epoch >= nepochs: return + def paddle_worker_info(group=None): """Return node and worker info for PyTorch and some distributed environments.""" rank = 0 @@ -116,7 +117,7 @@ def paddle_worker_info(group=None): else: try: from paddle.io import get_worker_info - worker_info = paddle.io.get_worker_info() + worker_info = get_worker_info() if worker_info is not None: worker = worker_info.id num_workers = worker_info.num_workers @@ -126,6 +127,7 @@ def paddle_worker_info(group=None): return rank, world_size, worker, num_workers + def paddle_worker_seed(group=None): """Compute a distinct, deterministic RNG seed for each worker and node.""" rank, world_size, worker, num_workers = paddle_worker_info(group=group) diff --git a/paddlespeech/audio/streamdata/writer.py b/paddlespeech/audio/streamdata/writer.py index 7d4f7703b..3928a3ba6 100644 --- a/paddlespeech/audio/streamdata/writer.py +++ b/paddlespeech/audio/streamdata/writer.py @@ -5,18 +5,24 @@ # See the LICENSE file for licensing terms (BSD-style). # Modified from https://github.com/webdataset/webdataset # - """Classes and functions for writing tar files and WebDataset files.""" - -import io, json, pickle, re, tarfile, time -from typing import Any, Callable, Optional, Union +import io +import json +import pickle +import re +import tarfile +import time +from typing import Any +from typing import Callable +from typing import Optional +from typing import Union import numpy as np from . import gopen -def imageencoder(image: Any, format: str = "PNG"): # skipcq: PYL-W0622 +def imageencoder(image: Any, format: str="PNG"): # skipcq: PYL-W0622 """Compress an image using PIL and return it as a string. Can handle float or uint8 images. @@ -67,6 +73,7 @@ def bytestr(data: Any): return data.encode("ascii") return str(data).encode("ascii") + def paddle_dumps(data: Any): """Dump data into a bytestring using paddle.dumps. @@ -82,6 +89,7 @@ def paddle_dumps(data: Any): paddle.save(data, stream) return stream.getvalue() + def numpy_dumps(data: np.ndarray): """Dump data into a bytestring using numpy npy format. @@ -139,9 +147,8 @@ def add_handlers(d, keys, value): def make_handlers(): """Create a list of handlers for encoding data.""" handlers = {} - add_handlers( - handlers, "cls cls2 class count index inx id", lambda x: str(x).encode("ascii") - ) + add_handlers(handlers, "cls cls2 class count index inx id", + lambda x: str(x).encode("ascii")) add_handlers(handlers, "txt text transcript", lambda x: x.encode("utf-8")) add_handlers(handlers, "html htm", lambda x: x.encode("utf-8")) add_handlers(handlers, "pyd pickle", pickle.dumps) @@ -152,7 +159,8 @@ def make_handlers(): add_handlers(handlers, "json jsn", lambda x: json.dumps(x).encode("utf-8")) add_handlers(handlers, "mp msgpack msg", mp_dumps) add_handlers(handlers, "cbor", cbor_dumps) - add_handlers(handlers, "jpg jpeg img image", lambda data: imageencoder(data, "jpg")) + add_handlers(handlers, "jpg jpeg img image", + lambda data: imageencoder(data, "jpg")) add_handlers(handlers, "png", lambda data: imageencoder(data, "png")) add_handlers(handlers, "pbm", lambda data: imageencoder(data, "pbm")) add_handlers(handlers, "pgm", lambda data: imageencoder(data, "pgm")) @@ -192,7 +200,8 @@ def encode_based_on_extension(sample: dict, handlers: dict): :param handlers: handlers for encoding """ return { - k: encode_based_on_extension1(v, k, handlers) for k, v in list(sample.items()) + k: encode_based_on_extension1(v, k, handlers) + for k, v in list(sample.items()) } @@ -258,15 +267,14 @@ class TarWriter: """ def __init__( - self, - fileobj, - user: str = "bigdata", - group: str = "bigdata", - mode: int = 0o0444, - compress: Optional[bool] = None, - encoder: Union[None, bool, Callable] = True, - keep_meta: bool = False, - ): + self, + fileobj, + user: str="bigdata", + group: str="bigdata", + mode: int=0o0444, + compress: Optional[bool]=None, + encoder: Union[None, bool, Callable]=True, + keep_meta: bool=False, ): """Create a tar writer. :param fileobj: stream to write data to @@ -330,8 +338,7 @@ class TarWriter: continue if not isinstance(v, (bytes, bytearray, memoryview)): raise ValueError( - f"{k} doesn't map to a bytes after encoding ({type(v)})" - ) + f"{k} doesn't map to a bytes after encoding ({type(v)})") key = obj["__key__"] for k in sorted(obj.keys()): if k == "__key__": @@ -349,7 +356,8 @@ class TarWriter: ti.uname = self.user ti.gname = self.group if not isinstance(v, (bytes, bytearray, memoryview)): - raise ValueError(f"converter didn't yield bytes: {k}, {type(v)}") + raise ValueError( + f"converter didn't yield bytes: {k}, {type(v)}") stream = io.BytesIO(v) self.tarstream.addfile(ti, stream) total += ti.size @@ -360,14 +368,13 @@ class ShardWriter: """Like TarWriter but splits into multiple shards.""" def __init__( - self, - pattern: str, - maxcount: int = 100000, - maxsize: float = 3e9, - post: Optional[Callable] = None, - start_shard: int = 0, - **kw, - ): + self, + pattern: str, + maxcount: int=100000, + maxsize: float=3e9, + post: Optional[Callable]=None, + start_shard: int=0, + **kw, ): """Create a ShardWriter. :param pattern: output file pattern @@ -400,8 +407,7 @@ class ShardWriter: self.fname, self.count, "%.1f GB" % (self.size / 1e9), - self.total, - ) + self.total, ) self.shard += 1 stream = open(self.fname, "wb") self.tarstream = TarWriter(stream, **self.kw) @@ -413,11 +419,8 @@ class ShardWriter: :param obj: sample to be written """ - if ( - self.tarstream is None - or self.count >= self.maxcount - or self.size >= self.maxsize - ): + if (self.tarstream is None or self.count >= self.maxcount or + self.size >= self.maxsize): self.next_stream() size = self.tarstream.write(obj) self.count += 1 diff --git a/paddlespeech/audio/text/text_featurizer.py b/paddlespeech/audio/text/text_featurizer.py index 91c4d75c3..bcd6df54b 100644 --- a/paddlespeech/audio/text/text_featurizer.py +++ b/paddlespeech/audio/text/text_featurizer.py @@ -17,6 +17,7 @@ from typing import Union import sentencepiece as spm +from ..utils.log import Logger from .utility import BLANK from .utility import EOS from .utility import load_dict @@ -24,7 +25,6 @@ from .utility import MASKCTC from .utility import SOS from .utility import SPACE from .utility import UNK -from ..utils.log import Logger logger = Logger(__name__) diff --git a/paddlespeech/audio/transform/perturb.py b/paddlespeech/audio/transform/perturb.py index 8044dc36f..0825caec8 100644 --- a/paddlespeech/audio/transform/perturb.py +++ b/paddlespeech/audio/transform/perturb.py @@ -12,15 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # Modified from espnet(https://github.com/espnet/espnet) +import io +import os + +import h5py import librosa import numpy +import numpy as np import scipy import soundfile -import io -import os -import h5py -import numpy as np class SoundHDF5File(): """Collecting sound files to a HDF5 file @@ -109,6 +110,7 @@ class SoundHDF5File(): def close(self): self.file.close() + class SpeedPerturbation(): """SpeedPerturbation @@ -558,4 +560,3 @@ class RIRConvolve(): [scipy.convolve(x, r, mode="same") for r in rir], axis=-1) else: return scipy.convolve(x, rir, mode="same") - diff --git a/paddlespeech/audio/transform/spec_augment.py b/paddlespeech/audio/transform/spec_augment.py index 029e7b8f5..b2635066f 100644 --- a/paddlespeech/audio/transform/spec_augment.py +++ b/paddlespeech/audio/transform/spec_augment.py @@ -14,6 +14,7 @@ # Modified from espnet(https://github.com/espnet/espnet) """Spec Augment module for preprocessing i.e., data augmentation""" import random + import numpy from PIL import Image diff --git a/paddlespeech/cli/executor.py b/paddlespeech/cli/executor.py index 3800c36db..b53eed88c 100644 --- a/paddlespeech/cli/executor.py +++ b/paddlespeech/cli/executor.py @@ -191,7 +191,7 @@ class BaseExecutor(ABC): line = line.strip() if not line: continue - k, v = line.split() # space or \t + k, v = line.split() # space or \t job_contents[k] = v return job_contents diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index f6476b9aa..5fe2e16b9 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -114,6 +114,7 @@ if not hasattr(paddle.Tensor, 'new_full'): paddle.Tensor.new_full = new_full paddle.static.Variable.new_full = new_full + def contiguous(xs: paddle.Tensor) -> paddle.Tensor: return xs diff --git a/paddlespeech/s2t/exps/u2/model.py b/paddlespeech/s2t/exps/u2/model.py index cdad3b8f7..db60083b0 100644 --- a/paddlespeech/s2t/exps/u2/model.py +++ b/paddlespeech/s2t/exps/u2/model.py @@ -25,8 +25,6 @@ import paddle from paddle import distributed as dist from paddlespeech.s2t.frontend.featurizer import TextFeaturizer -from paddlespeech.s2t.io.dataloader import BatchDataLoader -from paddlespeech.s2t.io.dataloader import StreamDataLoader from paddlespeech.s2t.io.dataloader import DataLoaderFactory from paddlespeech.s2t.models.u2 import U2Model from paddlespeech.s2t.training.optimizer import OptimizerFactory @@ -109,7 +107,8 @@ class U2Trainer(Trainer): def valid(self): self.model.eval() if not self.use_streamdata: - logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}") + logger.info( + f"Valid Total Examples: {len(self.valid_loader.dataset)}") valid_losses = defaultdict(list) num_seen_utts = 1 total_loss = 0.0 @@ -136,7 +135,8 @@ class U2Trainer(Trainer): msg += "epoch: {}, ".format(self.epoch) msg += "step: {}, ".format(self.iteration) if not self.use_streamdata: - msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader)) + msg += "batch: {}/{}, ".format(i + 1, + len(self.valid_loader)) msg += ', '.join('{}: {:>.6f}'.format(k, v) for k, v in valid_dump.items()) logger.info(msg) @@ -157,7 +157,8 @@ class U2Trainer(Trainer): self.before_train() if not self.use_streamdata: - logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") + logger.info( + f"Train Total Examples: {len(self.train_loader.dataset)}") while self.epoch < self.config.n_epoch: with Timer("Epoch-Train Time Cost: {}"): self.model.train() @@ -225,14 +226,18 @@ class U2Trainer(Trainer): config = self.config.clone() self.use_streamdata = config.get("use_stream_data", False) if self.train: - self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args) - self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args) + self.train_loader = DataLoaderFactory.get_dataloader( + 'train', config, self.args) + self.valid_loader = DataLoaderFactory.get_dataloader( + 'valid', config, self.args) logger.info("Setup train/valid Dataloader!") else: decode_batch_size = config.get('decode', dict()).get( 'decode_batch_size', 1) - self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args) - self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args) + self.test_loader = DataLoaderFactory.get_dataloader('test', config, + self.args) + self.align_loader = DataLoaderFactory.get_dataloader( + 'align', config, self.args) logger.info("Setup test/align Dataloader!") def setup_model(self): diff --git a/paddlespeech/s2t/exps/u2_kaldi/model.py b/paddlespeech/s2t/exps/u2_kaldi/model.py index cb015c116..073d74293 100644 --- a/paddlespeech/s2t/exps/u2_kaldi/model.py +++ b/paddlespeech/s2t/exps/u2_kaldi/model.py @@ -105,7 +105,8 @@ class U2Trainer(Trainer): def valid(self): self.model.eval() if not self.use_streamdata: - logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}") + logger.info( + f"Valid Total Examples: {len(self.valid_loader.dataset)}") valid_losses = defaultdict(list) num_seen_utts = 1 total_loss = 0.0 @@ -133,7 +134,8 @@ class U2Trainer(Trainer): msg += "epoch: {}, ".format(self.epoch) msg += "step: {}, ".format(self.iteration) if not self.use_streamdata: - msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader)) + msg += "batch: {}/{}, ".format(i + 1, + len(self.valid_loader)) msg += ', '.join('{}: {:>.6f}'.format(k, v) for k, v in valid_dump.items()) logger.info(msg) @@ -153,7 +155,8 @@ class U2Trainer(Trainer): self.before_train() if not self.use_streamdata: - logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") + logger.info( + f"Train Total Examples: {len(self.train_loader.dataset)}") while self.epoch < self.config.n_epoch: with Timer("Epoch-Train Time Cost: {}"): self.model.train() @@ -165,8 +168,8 @@ class U2Trainer(Trainer): msg += "epoch: {}, ".format(self.epoch) msg += "step: {}, ".format(self.iteration) if not self.use_streamdata: - msg += "batch : {}/{}, ".format(batch_index + 1, - len(self.train_loader)) + msg += "batch : {}/{}, ".format( + batch_index + 1, len(self.train_loader)) msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) msg += "data time: {:>.3f}s, ".format(dataload_time) self.train_batch(batch_index, batch, msg) @@ -204,21 +207,24 @@ class U2Trainer(Trainer): self.use_streamdata = config.get("use_stream_data", False) if self.train: config = self.config.clone() - self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args) + self.train_loader = DataLoaderFactory.get_dataloader( + 'train', config, self.args) config = self.config.clone() config['preprocess_config'] = None - self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args) + self.valid_loader = DataLoaderFactory.get_dataloader( + 'valid', config, self.args) logger.info("Setup train/valid Dataloader!") else: config = self.config.clone() config['preprocess_config'] = None - self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args) + self.test_loader = DataLoaderFactory.get_dataloader('test', config, + self.args) config = self.config.clone() config['preprocess_config'] = None - self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args) + self.align_loader = DataLoaderFactory.get_dataloader( + 'align', config, self.args) logger.info("Setup test/align Dataloader!") - def setup_model(self): config = self.config diff --git a/paddlespeech/s2t/exps/u2_st/model.py b/paddlespeech/s2t/exps/u2_st/model.py index 603825435..d57c49546 100644 --- a/paddlespeech/s2t/exps/u2_st/model.py +++ b/paddlespeech/s2t/exps/u2_st/model.py @@ -121,7 +121,8 @@ class U2STTrainer(Trainer): def valid(self): self.model.eval() if not self.use_streamdata: - logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}") + logger.info( + f"Valid Total Examples: {len(self.valid_loader.dataset)}") valid_losses = defaultdict(list) num_seen_utts = 1 total_loss = 0.0 @@ -155,7 +156,8 @@ class U2STTrainer(Trainer): msg += "epoch: {}, ".format(self.epoch) msg += "step: {}, ".format(self.iteration) if not self.use_streamdata: - msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader)) + msg += "batch: {}/{}, ".format(i + 1, + len(self.valid_loader)) msg += ', '.join('{}: {:>.6f}'.format(k, v) for k, v in valid_dump.items()) logger.info(msg) @@ -175,7 +177,8 @@ class U2STTrainer(Trainer): self.before_train() if not self.use_streamdata: - logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") + logger.info( + f"Train Total Examples: {len(self.train_loader.dataset)}") while self.epoch < self.config.n_epoch: with Timer("Epoch-Train Time Cost: {}"): self.model.train() @@ -248,14 +251,16 @@ class U2STTrainer(Trainer): config['load_transcript'] = load_transcript self.use_streamdata = config.get("use_stream_data", False) if self.train: - self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args) - self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args) + self.train_loader = DataLoaderFactory.get_dataloader( + 'train', config, self.args) + self.valid_loader = DataLoaderFactory.get_dataloader( + 'valid', config, self.args) logger.info("Setup train/valid Dataloader!") else: - self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args) + self.test_loader = DataLoaderFactory.get_dataloader('test', config, + self.args) logger.info("Setup test Dataloader!") - def setup_model(self): config = self.config model_conf = config diff --git a/paddlespeech/s2t/io/dataloader.py b/paddlespeech/s2t/io/dataloader.py index 735d29da2..4cc8274f9 100644 --- a/paddlespeech/s2t/io/dataloader.py +++ b/paddlespeech/s2t/io/dataloader.py @@ -22,17 +22,16 @@ import paddle from paddle.io import BatchSampler from paddle.io import DataLoader from paddle.io import DistributedBatchSampler +from yacs.config import CfgNode +import paddlespeech.audio.streamdata as streamdata +from paddlespeech.audio.text.text_featurizer import TextFeaturizer from paddlespeech.s2t.io.batchfy import make_batchset from paddlespeech.s2t.io.converter import CustomConverter from paddlespeech.s2t.io.dataset import TransformDataset from paddlespeech.s2t.io.reader import LoadInputsAndTargets from paddlespeech.s2t.utils.log import Log -import paddlespeech.audio.streamdata as streamdata -from paddlespeech.audio.text.text_featurizer import TextFeaturizer -from yacs.config import CfgNode - __all__ = ["BatchDataLoader", "StreamDataLoader"] logger = Log(__name__).getlog() @@ -61,6 +60,7 @@ def batch_collate(x): """ return x[0] + def read_preprocess_cfg(preprocess_conf_file): augment_conf = dict() preprocess_cfg = CfgNode(new_allowed=True) @@ -82,7 +82,8 @@ def read_preprocess_cfg(preprocess_conf_file): augment_conf['num_t_mask'] = process['n_mask'] augment_conf['t_inplace'] = process['inplace'] augment_conf['t_replace_with_zero'] = process['replace_with_zero'] - return augment_conf + return augment_conf + class StreamDataLoader(): def __init__(self, @@ -95,12 +96,12 @@ class StreamDataLoader(): frame_length=25, frame_shift=10, dither=0.0, - minlen_in: float=0.0, + minlen_in: float=0.0, maxlen_in: float=float('inf'), minlen_out: float=0.0, maxlen_out: float=float('inf'), resample_rate: int=16000, - shuffle_size: int=10000, + shuffle_size: int=10000, sort_size: int=1000, n_iter_processes: int=1, prefetch_factor: int=2, @@ -116,11 +117,11 @@ class StreamDataLoader(): text_featurizer = TextFeaturizer(unit_type, vocab_filepath) symbol_table = text_featurizer.vocab_dict - self.feat_dim = num_mel_bins - self.vocab_size = text_featurizer.vocab_size - + self.feat_dim = num_mel_bins + self.vocab_size = text_featurizer.vocab_size + augment_conf = read_preprocess_cfg(preprocess_conf) - + # The list of shard shardlist = [] with open(manifest_file, "r") as f: @@ -128,58 +129,68 @@ class StreamDataLoader(): shardlist.append(line.strip()) world_size = 1 try: - world_size = paddle.distributed.get_world_size() + world_size = paddle.distributed.get_world_size() except Exception as e: logger.warninig(e) - logger.warninig("can not get world_size using paddle.distributed.get_world_size(), use world_size=1") - assert(len(shardlist) >= world_size, "the length of shard list should >= number of gpus/xpus/...") + logger.warninig( + "can not get world_size using paddle.distributed.get_world_size(), use world_size=1" + ) + assert len(shardlist) >= world_size, \ + "the length of shard list should >= number of gpus/xpus/..." - update_n_iter_processes = int(max(min(len(shardlist)/world_size - 1, self.n_iter_processes), 0)) + update_n_iter_processes = int( + max(min(len(shardlist) / world_size - 1, self.n_iter_processes), 0)) logger.info(f"update_n_iter_processes {update_n_iter_processes}") if update_n_iter_processes != self.n_iter_processes: - self.n_iter_processes = update_n_iter_processes + self.n_iter_processes = update_n_iter_processes logger.info(f"change nun_workers to {self.n_iter_processes}") if self.dist_sampler: base_dataset = streamdata.DataPipeline( - streamdata.SimpleShardList(shardlist), - streamdata.split_by_node if train_mode else streamdata.placeholder(), + streamdata.SimpleShardList(shardlist), streamdata.split_by_node + if train_mode else streamdata.placeholder(), streamdata.split_by_worker, - streamdata.tarfile_to_samples(streamdata.reraise_exception) - ) + streamdata.tarfile_to_samples(streamdata.reraise_exception)) else: base_dataset = streamdata.DataPipeline( streamdata.SimpleShardList(shardlist), streamdata.split_by_worker, - streamdata.tarfile_to_samples(streamdata.reraise_exception) - ) + streamdata.tarfile_to_samples(streamdata.reraise_exception)) self.dataset = base_dataset.append_list( streamdata.audio_tokenize(symbol_table), - streamdata.audio_data_filter(frame_shift=frame_shift, max_length=maxlen_in, min_length=minlen_in, token_max_length=maxlen_out, token_min_length=minlen_out), + streamdata.audio_data_filter( + frame_shift=frame_shift, + max_length=maxlen_in, + min_length=minlen_in, + token_max_length=maxlen_out, + token_min_length=minlen_out), streamdata.audio_resample(resample_rate=resample_rate), - streamdata.audio_compute_fbank(num_mel_bins=num_mel_bins, frame_length=frame_length, frame_shift=frame_shift, dither=dither), - streamdata.audio_spec_aug(**augment_conf) if train_mode else streamdata.placeholder(), # num_t_mask=2, num_f_mask=2, max_t=40, max_f=30, max_w=80) + streamdata.audio_compute_fbank( + num_mel_bins=num_mel_bins, + frame_length=frame_length, + frame_shift=frame_shift, + dither=dither), + streamdata.audio_spec_aug(**augment_conf) + if train_mode else streamdata.placeholder( + ), # num_t_mask=2, num_f_mask=2, max_t=40, max_f=30, max_w=80) streamdata.shuffle(shuffle_size), streamdata.sort(sort_size=sort_size), streamdata.batched(batch_size), streamdata.audio_padding(), - streamdata.audio_cmvn(cmvn_file) - ) + streamdata.audio_cmvn(cmvn_file)) if paddle.__version__ >= '2.3.2': self.loader = streamdata.WebLoader( - self.dataset, - num_workers=self.n_iter_processes, - prefetch_factor = self.prefetch_factor, - batch_size=None - ) + self.dataset, + num_workers=self.n_iter_processes, + prefetch_factor=self.prefetch_factor, + batch_size=None) else: self.loader = streamdata.WebLoader( - self.dataset, - num_workers=self.n_iter_processes, - batch_size=None - ) + self.dataset, + num_workers=self.n_iter_processes, + batch_size=None) def __iter__(self): return self.loader.__iter__() @@ -188,7 +199,9 @@ class StreamDataLoader(): return self.__iter__() def __len__(self): - logger.info("Stream dataloader does not support calculate the length of the dataset") + logger.info( + "Stream dataloader does not support calculate the length of the dataset" + ) return -1 @@ -347,7 +360,7 @@ class DataLoaderFactory(): config['train_mode'] = True elif mode == 'valid': config['manifest'] = config.dev_manifest - config['train_mode'] = False + config['train_mode'] = False elif model == 'test' or mode == 'align': config['manifest'] = config.test_manifest config['train_mode'] = False @@ -358,30 +371,31 @@ class DataLoaderFactory(): config['maxlen_out'] = float('inf') config['dist_sampler'] = False else: - raise KeyError("not valid mode type!!, please input one of 'train, valid, test, align'") - return StreamDataLoader( - manifest_file=config.manifest, - train_mode=config.train_mode, - unit_type=config.unit_type, - preprocess_conf=config.preprocess_config, - batch_size=config.batch_size, - num_mel_bins=config.feat_dim, - frame_length=config.window_ms, - frame_shift=config.stride_ms, - dither=config.dither, - minlen_in=config.minlen_in, - maxlen_in=config.maxlen_in, - minlen_out=config.minlen_out, - maxlen_out=config.maxlen_out, - resample_rate=config.resample_rate, - shuffle_size=config.shuffle_size, - sort_size=config.sort_size, - n_iter_processes=config.num_workers, - prefetch_factor=config.prefetch_factor, - dist_sampler=config.dist_sampler, - cmvn_file=config.cmvn_file, - vocab_filepath=config.vocab_filepath, + raise KeyError( + "not valid mode type!!, please input one of 'train, valid, test, align'" ) + return StreamDataLoader( + manifest_file=config.manifest, + train_mode=config.train_mode, + unit_type=config.unit_type, + preprocess_conf=config.preprocess_config, + batch_size=config.batch_size, + num_mel_bins=config.feat_dim, + frame_length=config.window_ms, + frame_shift=config.stride_ms, + dither=config.dither, + minlen_in=config.minlen_in, + maxlen_in=config.maxlen_in, + minlen_out=config.minlen_out, + maxlen_out=config.maxlen_out, + resample_rate=config.resample_rate, + shuffle_size=config.shuffle_size, + sort_size=config.sort_size, + n_iter_processes=config.num_workers, + prefetch_factor=config.prefetch_factor, + dist_sampler=config.dist_sampler, + cmvn_file=config.cmvn_file, + vocab_filepath=config.vocab_filepath, ) else: if mode == 'train': config['manifest'] = config.train_manifest @@ -411,7 +425,7 @@ class DataLoaderFactory(): config['train_mode'] = False config['sortagrad'] = False config['batch_size'] = config.get('decode', dict()).get( - 'decode_batch_size', 1) + 'decode_batch_size', 1) config['maxlen_in'] = float('inf') config['maxlen_out'] = float('inf') config['minibatches'] = 0 @@ -427,8 +441,10 @@ class DataLoaderFactory(): config['dist_sampler'] = False config['shortest_first'] = False else: - raise KeyError("not valid mode type!!, please input one of 'train, valid, test, align'") - + raise KeyError( + "not valid mode type!!, please input one of 'train, valid, test, align'" + ) + return BatchDataLoader( json_file=config.manifest, train_mode=config.train_mode, @@ -450,4 +466,3 @@ class DataLoaderFactory(): num_encs=config.num_encs, dist_sampler=config.dist_sampler, shortest_first=config.shortest_first) - diff --git a/paddlespeech/s2t/models/u2_st/u2_st.py b/paddlespeech/s2t/models/u2_st/u2_st.py index e86bbedfa..e8b61bc0d 100644 --- a/paddlespeech/s2t/models/u2_st/u2_st.py +++ b/paddlespeech/s2t/models/u2_st/u2_st.py @@ -18,7 +18,6 @@ Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recogni """ import time from typing import Dict -from typing import List from typing import Optional from typing import Tuple @@ -26,6 +25,8 @@ import paddle from paddle import jit from paddle import nn +from paddlespeech.audio.utils.tensor_utils import add_sos_eos +from paddlespeech.audio.utils.tensor_utils import th_accuracy from paddlespeech.s2t.frontend.utility import IGNORE_ID from paddlespeech.s2t.frontend.utility import load_cmvn from paddlespeech.s2t.modules.cmvn import GlobalCMVN @@ -38,8 +39,6 @@ from paddlespeech.s2t.modules.mask import subsequent_mask from paddlespeech.s2t.utils import checkpoint from paddlespeech.s2t.utils import layer_tools from paddlespeech.s2t.utils.log import Log -from paddlespeech.audio.utils.tensor_utils import add_sos_eos -from paddlespeech.audio.utils.tensor_utils import th_accuracy from paddlespeech.s2t.utils.utility import UpdateConfig __all__ = ["U2STModel", "U2STInferModel"] @@ -401,8 +400,8 @@ class U2STBaseModel(nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor = paddle.zeros([0, 0, 0, 0]), - cnn_cache: paddle.Tensor = paddle.zeros([0, 0, 0, 0]), + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Export interface for c++ call, give input chunk xs, and return output from time 0 to current chunk. @@ -435,8 +434,8 @@ class U2STBaseModel(nn.Layer): paddle.Tensor: new conformer cnn cache required for next chunk, with same shape as the original cnn_cache. """ - return self.encoder.forward_chunk( - xs, offset, required_cache_size, att_cache, cnn_cache) + return self.encoder.forward_chunk(xs, offset, required_cache_size, + att_cache, cnn_cache) # @jit.to_static def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor: diff --git a/paddlespeech/s2t/modules/align.py b/paddlespeech/s2t/modules/align.py index cacda2461..34d796145 100644 --- a/paddlespeech/s2t/modules/align.py +++ b/paddlespeech/s2t/modules/align.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import math + import paddle from paddle import nn -import math """ To align the initializer between paddle and torch, the API below are set defalut initializer with priority higger than global initializer. @@ -81,10 +82,18 @@ class Linear(nn.Linear): name=None): if weight_attr is None: if global_init_type == "kaiming_uniform": - weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) if bias_attr is None: if global_init_type == "kaiming_uniform": - bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + bias_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) super(Linear, self).__init__(in_features, out_features, weight_attr, bias_attr, name) @@ -104,10 +113,18 @@ class Conv1D(nn.Conv1D): data_format='NCL'): if weight_attr is None: if global_init_type == "kaiming_uniform": - weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) if bias_attr is None: if global_init_type == "kaiming_uniform": - bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + bias_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) super(Conv1D, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, padding_mode, weight_attr, bias_attr, data_format) @@ -128,10 +145,18 @@ class Conv2D(nn.Conv2D): data_format='NCHW'): if weight_attr is None: if global_init_type == "kaiming_uniform": - weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) if bias_attr is None: if global_init_type == "kaiming_uniform": - bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu')) + bias_attr = paddle.ParamAttr( + initializer=nn.initializer.KaimingUniform( + fan_in=None, + negative_slope=math.sqrt(5), + nonlinearity='leaky_relu')) super(Conv2D, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, padding_mode, weight_attr, bias_attr, data_format) diff --git a/paddlespeech/s2t/modules/initializer.py b/paddlespeech/s2t/modules/initializer.py index cdcf2e052..6eae5713e 100644 --- a/paddlespeech/s2t/modules/initializer.py +++ b/paddlespeech/s2t/modules/initializer.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np + class DefaultInitializerContext(object): """ diff --git a/paddlespeech/server/engine/asr/online/ctc_endpoint.py b/paddlespeech/server/engine/asr/online/ctc_endpoint.py index b87dbe805..1b8ad1cb7 100644 --- a/paddlespeech/server/engine/asr/online/ctc_endpoint.py +++ b/paddlespeech/server/engine/asr/online/ctc_endpoint.py @@ -102,8 +102,10 @@ class OnlineCTCEndpoint: assert self.num_frames_decoded >= self.trailing_silence_frames assert self.frame_shift_in_ms > 0 - - decoding_something = (self.num_frames_decoded > self.trailing_silence_frames) and decoding_something + + decoding_something = ( + self.num_frames_decoded > self.trailing_silence_frames + ) and decoding_something utterance_length = self.num_frames_decoded * self.frame_shift_in_ms trailing_silence = self.trailing_silence_frames * self.frame_shift_in_ms diff --git a/paddlespeech/server/engine/asr/online/onnx/asr_engine.py b/paddlespeech/server/engine/asr/online/onnx/asr_engine.py index ab4f11305..6daae5be3 100644 --- a/paddlespeech/server/engine/asr/online/onnx/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/onnx/asr_engine.py @@ -21,12 +21,12 @@ import paddle from numpy import float32 from yacs.config import CfgNode +from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger from paddlespeech.resource import CommonTaskResource from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.modules.ctc import CTCDecoder -from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.s2t.utils.utility import UpdateConfig from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils import onnx_infer diff --git a/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py b/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py index 182e64180..0fd5d1bc6 100644 --- a/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/paddleinference/asr_engine.py @@ -21,10 +21,10 @@ import paddle from numpy import float32 from yacs.config import CfgNode +from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger from paddlespeech.resource import CommonTaskResource -from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer from paddlespeech.s2t.modules.ctc import CTCDecoder from paddlespeech.s2t.utils.utility import UpdateConfig diff --git a/paddlespeech/server/engine/asr/python/asr_engine.py b/paddlespeech/server/engine/asr/python/asr_engine.py index 9ce05d97a..e297e5c21 100644 --- a/paddlespeech/server/engine/asr/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/python/asr_engine.py @@ -66,12 +66,14 @@ class ASREngine(BaseEngine): ) logger.error(e) return False - - self.executor._init_from_path( - model_type = self.config.model, lang = self.config.lang, sample_rate = self.config.sample_rate, - cfg_path = self.config.cfg_path, decode_method = self.config.decode_method, - ckpt_path = self.config.ckpt_path) + self.executor._init_from_path( + model_type=self.config.model, + lang=self.config.lang, + sample_rate=self.config.sample_rate, + cfg_path=self.config.cfg_path, + decode_method=self.config.decode_method, + ckpt_path=self.config.ckpt_path) logger.info("Initialize ASR server engine successfully on device: %s." % (self.device)) diff --git a/paddlespeech/t2s/datasets/sampler.py b/paddlespeech/t2s/datasets/sampler.py index a69bc8600..3c97d1dc4 100644 --- a/paddlespeech/t2s/datasets/sampler.py +++ b/paddlespeech/t2s/datasets/sampler.py @@ -1,8 +1,9 @@ -import paddle import math + import numpy as np from paddle.io import BatchSampler + class ErnieSATSampler(BatchSampler): """Sampler that restricts data loading to a subset of the dataset. In such case, each process can pass a DistributedBatchSampler instance @@ -110,8 +111,8 @@ class ErnieSATSampler(BatchSampler): subsampled_indices.extend(indices[i:i + self.batch_size]) indices = indices[len(indices) - last_batch_size:] - subsampled_indices.extend(indices[ - self.local_rank * last_local_batch_size:( + subsampled_indices.extend( + indices[self.local_rank * last_local_batch_size:( self.local_rank + 1) * last_local_batch_size]) return subsampled_indices diff --git a/paddlespeech/t2s/exps/ernie_sat/train.py b/paddlespeech/t2s/exps/ernie_sat/train.py index af653ef89..75a666bb1 100644 --- a/paddlespeech/t2s/exps/ernie_sat/train.py +++ b/paddlespeech/t2s/exps/ernie_sat/train.py @@ -25,7 +25,6 @@ from paddle import DataParallel from paddle import distributed as dist from paddle import nn from paddle.io import DataLoader -from paddle.io import DistributedBatchSampler from paddle.optimizer import Adam from yacs.config import CfgNode diff --git a/paddlespeech/t2s/exps/ernie_sat/utils.py b/paddlespeech/t2s/exps/ernie_sat/utils.py index 9169efa36..6805e513c 100644 --- a/paddlespeech/t2s/exps/ernie_sat/utils.py +++ b/paddlespeech/t2s/exps/ernie_sat/utils.py @@ -11,32 +11,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import hashlib +import os from pathlib import Path from typing import Dict from typing import List from typing import Union -import os import numpy as np import paddle import yaml from yacs.config import CfgNode -import hashlib - from paddlespeech.t2s.exps.syn_utils import get_am_inference from paddlespeech.t2s.exps.syn_utils import get_voc_inference + def _get_user(): return os.path.expanduser('~').split('/')[-1] + def str2md5(string): md5_val = hashlib.md5(string.encode('utf8')).hexdigest() return md5_val -def get_tmp_name(text:str): + +def get_tmp_name(text: str): return _get_user() + '_' + str(os.getpid()) + '_' + str2md5(text) + def get_dict(dictfile: str): word2phns_dict = {} with open(dictfile, 'r') as fid: diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index c8eb1c64a..15d8dfb78 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -298,8 +298,8 @@ def am_to_static(am_inference, am_name = am[:am.rindex('_')] am_dataset = am[am.rindex('_') + 1:] if am_name == 'fastspeech2': - if am_dataset in {"aishell3", "vctk", "mix" - } and speaker_dict is not None: + if am_dataset in {"aishell3", "vctk", + "mix"} and speaker_dict is not None: am_inference = jit.to_static( am_inference, input_spec=[ @@ -311,8 +311,8 @@ def am_to_static(am_inference, am_inference, input_spec=[InputSpec([-1], dtype=paddle.int64)]) elif am_name == 'speedyspeech': - if am_dataset in {"aishell3", "vctk", "mix" - } and speaker_dict is not None: + if am_dataset in {"aishell3", "vctk", + "mix"} and speaker_dict is not None: am_inference = jit.to_static( am_inference, input_spec=[ diff --git a/paddlespeech/t2s/frontend/g2pw/__init__.py b/paddlespeech/t2s/frontend/g2pw/__init__.py index 6e1ee0db8..0eaeee5df 100644 --- a/paddlespeech/t2s/frontend/g2pw/__init__.py +++ b/paddlespeech/t2s/frontend/g2pw/__init__.py @@ -1,2 +1 @@ from paddlespeech.t2s.frontend.g2pw.onnx_api import G2PWOnnxConverter - diff --git a/paddlespeech/t2s/frontend/mix_frontend.py b/paddlespeech/t2s/frontend/mix_frontend.py index a681445c7..101a1e503 100644 --- a/paddlespeech/t2s/frontend/mix_frontend.py +++ b/paddlespeech/t2s/frontend/mix_frontend.py @@ -61,8 +61,11 @@ class MixFrontend(): return False def is_end(self, before_char, after_char) -> bool: - if ((self.is_alphabet(before_char) or before_char == " ") and - (self.is_alphabet(after_char) or after_char == " ")): + flag = 0 + for char in (before_char, after_char): + if self.is_alphabet(char) or char == " ": + flag += 1 + if flag == 2: return True else: return False diff --git a/paddlespeech/t2s/training/updaters/standard_updater.py b/paddlespeech/t2s/training/updaters/standard_updater.py index 668d2fc69..6d3aa7099 100644 --- a/paddlespeech/t2s/training/updaters/standard_updater.py +++ b/paddlespeech/t2s/training/updaters/standard_updater.py @@ -24,10 +24,11 @@ from paddle.nn import Layer from paddle.optimizer import Optimizer from timer import timer +from paddlespeech.t2s.datasets.sampler import ErnieSATSampler from paddlespeech.t2s.training.reporter import report from paddlespeech.t2s.training.updater import UpdaterBase from paddlespeech.t2s.training.updater import UpdaterState -from paddlespeech.t2s.datasets.sampler import ErnieSATSampler + class StandardUpdater(UpdaterBase): """An example of over-simplification. Things may not be that simple, but diff --git a/setup.py b/setup.py index 079803b7e..fac9e1207 100644 --- a/setup.py +++ b/setup.py @@ -77,12 +77,7 @@ base = [ "pybind11", ] -server = [ - "fastapi", - "uvicorn", - "pattern_singleton", - "websockets" -] +server = ["fastapi", "uvicorn", "pattern_singleton", "websockets"] requirements = { "install": @@ -330,4 +325,4 @@ setup_info = dict( }) with version_info(): - setup(**setup_info,include_package_data=True) + setup(**setup_info, include_package_data=True) diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py index 4426d1be8..c53e9ec92 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py @@ -490,18 +490,10 @@ class SymbolicShapeInference: def _onnx_infer_single_node(self, node): # skip onnx shape inference for some ops, as they are handled in _infer_* skip_infer = node.op_type in [ - 'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', \ - # contrib ops - - - - - 'Attention', 'BiasGelu', \ - 'EmbedLayerNormalization', \ - 'FastGelu', 'Gelu', 'LayerNormalization', \ - 'LongformerAttention', \ - 'SkipLayerNormalization', \ - 'PythonOp' + 'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', 'Attention', + 'BiasGelu', 'EmbedLayerNormalization', 'FastGelu', 'Gelu', + 'LayerNormalization', 'LongformerAttention', + 'SkipLayerNormalization', 'PythonOp' ] if not skip_infer: @@ -514,8 +506,8 @@ class SymbolicShapeInference: if (get_opset(self.out_mp_) >= 9) and node.op_type in ['Unsqueeze']: initializers = [ self.initializers_[name] for name in node.input - if (name in self.initializers_ and - name not in self.graph_inputs_) + if (name in self.initializers_ and name not in + self.graph_inputs_) ] # run single node inference with self.known_vi_ shapes @@ -601,8 +593,8 @@ class SymbolicShapeInference: for o in symbolic_shape_inference.out_mp_.graph.output ] subgraph_new_symbolic_dims = set([ - d for s in subgraph_shapes if s for d in s - if type(d) == str and not d in self.symbolic_dims_ + d for s in subgraph_shapes + if s for d in s if type(d) == str and not d in self.symbolic_dims_ ]) new_dims = {} for d in subgraph_new_symbolic_dims: @@ -729,8 +721,9 @@ class SymbolicShapeInference: for d, s in zip(sympy_shape[-rank:], strides) ] total_pads = [ - max(0, (k - s) if r == 0 else (k - r)) for k, s, r in - zip(effective_kernel_shape, strides, residual) + max(0, (k - s) if r == 0 else (k - r)) + for k, s, r in zip(effective_kernel_shape, strides, + residual) ] except TypeError: # sympy may throw TypeError: cannot determine truth value of Relational total_pads = [ @@ -1276,8 +1269,9 @@ class SymbolicShapeInference: if pads is not None: assert len(pads) == 2 * rank new_sympy_shape = [ - d + pad_up + pad_down for d, pad_up, pad_down in - zip(sympy_shape, pads[:rank], pads[rank:]) + d + pad_up + pad_down + for d, pad_up, pad_down in zip(sympy_shape, pads[:rank], pads[ + rank:]) ] self._update_computed_dims(new_sympy_shape) else: @@ -1590,8 +1584,8 @@ class SymbolicShapeInference: scales = list(scales) new_sympy_shape = [ sympy.simplify(sympy.floor(d * (end - start) * scale)) - for d, start, end, scale in - zip(input_sympy_shape, roi_start, roi_end, scales) + for d, start, end, scale in zip(input_sympy_shape, + roi_start, roi_end, scales) ] self._update_computed_dims(new_sympy_shape) else: @@ -2204,8 +2198,9 @@ class SymbolicShapeInference: # topological sort nodes, note there might be dead nodes so we check if all graph outputs are reached to terminate sorted_nodes = [] sorted_known_vi = set([ - i.name for i in list(self.out_mp_.graph.input) + - list(self.out_mp_.graph.initializer) + i.name + for i in list(self.out_mp_.graph.input) + list( + self.out_mp_.graph.initializer) ]) if any([o.name in sorted_known_vi for o in self.out_mp_.graph.output]): # Loop/Scan will have some graph output in graph inputs, so don't do topological sort From e8656fdfbaea44aafb6b4e593bdceb23695d104f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 5 Sep 2022 19:57:02 +0800 Subject: [PATCH 063/101] update version of paddle2onnx, test=tts (#2347) --- examples/aishell3/tts3/run.sh | 4 ++-- examples/csmsc/tts2/run.sh | 4 ++-- examples/csmsc/tts3/run.sh | 4 ++-- examples/csmsc/tts3/run_cnndecoder.sh | 8 ++++---- examples/ljspeech/tts3/run.sh | 4 ++-- examples/vctk/tts3/run.sh | 4 ++-- examples/zh_en_tts/tts3/run.sh | 4 ++-- 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/examples/aishell3/tts3/run.sh b/examples/aishell3/tts3/run.sh index 24715fee1..f730f3761 100755 --- a/examples/aishell3/tts3/run.sh +++ b/examples/aishell3/tts3/run.sh @@ -44,8 +44,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3 # considering the balance between speed and quality, we recommend that you use hifigan as vocoder diff --git a/examples/csmsc/tts2/run.sh b/examples/csmsc/tts2/run.sh index e51913496..557dd4ff3 100755 --- a/examples/csmsc/tts2/run.sh +++ b/examples/csmsc/tts2/run.sh @@ -46,8 +46,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index 2662b5811..80acf8200 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -46,8 +46,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder diff --git a/examples/csmsc/tts3/run_cnndecoder.sh b/examples/csmsc/tts3/run_cnndecoder.sh index c5ce41a9c..bae833157 100755 --- a/examples/csmsc/tts3/run_cnndecoder.sh +++ b/examples/csmsc/tts3/run_cnndecoder.sh @@ -59,8 +59,8 @@ fi if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder @@ -79,8 +79,8 @@ fi if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi # streaming acoustic model ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer diff --git a/examples/ljspeech/tts3/run.sh b/examples/ljspeech/tts3/run.sh index 260f06c8b..956185935 100755 --- a/examples/ljspeech/tts3/run.sh +++ b/examples/ljspeech/tts3/run.sh @@ -46,8 +46,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_ljspeech # considering the balance between speed and quality, we recommend that you use hifigan as vocoder diff --git a/examples/vctk/tts3/run.sh b/examples/vctk/tts3/run.sh index b45afd7be..b5184aed8 100755 --- a/examples/vctk/tts3/run.sh +++ b/examples/vctk/tts3/run.sh @@ -44,8 +44,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_vctk # considering the balance between speed and quality, we recommend that you use hifigan as vocoder diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh index 204042b12..12f99081a 100755 --- a/examples/zh_en_tts/tts3/run.sh +++ b/examples/zh_en_tts/tts3/run.sh @@ -47,8 +47,8 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '0.9.8' ]]; then - pip install paddle2onnx==0.9.8 + if [[ -z "$version" || ${version} != '1.0.0' ]]; then + pip install paddle2onnx==1.0.0 fi ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix # considering the balance between speed and quality, we recommend that you use hifigan as vocoder From ea9ee93739f023916ef7736ad8f4d8d3a34d4f8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=89=BE=E6=A2=A6?= Date: Mon, 5 Sep 2022 19:57:48 +0800 Subject: [PATCH 064/101] [TTS]Update VITS to support VITS and its voice cloning training on AIShell-3 (#2268) * code for training vits voice clone on aishell3. Co-authored-by: TianYuan --- README.md | 13 +- README_cn.md | 13 +- examples/aishell3/vits-vc/README.md | 154 +++++++++++++ examples/aishell3/vits-vc/conf/default.yaml | 185 +++++++++++++++ examples/aishell3/vits-vc/local/preprocess.sh | 79 +++++++ examples/aishell3/vits-vc/local/synthesize.sh | 19 ++ examples/aishell3/vits-vc/local/train.sh | 18 ++ .../aishell3/vits-vc/local/voice_cloning.sh | 22 ++ examples/aishell3/vits-vc/path.sh | 13 ++ examples/aishell3/vits-vc/run.sh | 45 ++++ examples/aishell3/vits/README.md | 202 +++++++++++++++++ examples/aishell3/vits/conf/default.yaml | 184 +++++++++++++++ examples/aishell3/vits/local/preprocess.sh | 69 ++++++ examples/aishell3/vits/local/synthesize.sh | 19 ++ .../aishell3/vits/local/synthesize_e2e.sh | 24 ++ examples/aishell3/vits/local/train.sh | 18 ++ examples/aishell3/vits/path.sh | 13 ++ examples/aishell3/vits/run.sh | 36 +++ examples/csmsc/vits/run.sh | 2 +- paddlespeech/t2s/datasets/am_batch_fn.py | 55 +++++ paddlespeech/t2s/exps/vits/synthesize.py | 40 +++- paddlespeech/t2s/exps/vits/synthesize_e2e.py | 23 +- paddlespeech/t2s/exps/vits/train.py | 37 ++- paddlespeech/t2s/exps/vits/voice_cloning.py | 213 ++++++++++++++++++ paddlespeech/t2s/models/vits/generator.py | 76 +++++++ paddlespeech/t2s/models/vits/vits.py | 42 +++- paddlespeech/t2s/models/vits/vits_updater.py | 4 + 27 files changed, 1603 insertions(+), 15 deletions(-) create mode 100644 examples/aishell3/vits-vc/README.md create mode 100644 examples/aishell3/vits-vc/conf/default.yaml create mode 100755 examples/aishell3/vits-vc/local/preprocess.sh create mode 100755 examples/aishell3/vits-vc/local/synthesize.sh create mode 100755 examples/aishell3/vits-vc/local/train.sh create mode 100755 examples/aishell3/vits-vc/local/voice_cloning.sh create mode 100755 examples/aishell3/vits-vc/path.sh create mode 100755 examples/aishell3/vits-vc/run.sh create mode 100644 examples/aishell3/vits/README.md create mode 100644 examples/aishell3/vits/conf/default.yaml create mode 100755 examples/aishell3/vits/local/preprocess.sh create mode 100755 examples/aishell3/vits/local/synthesize.sh create mode 100755 examples/aishell3/vits/local/synthesize_e2e.sh create mode 100755 examples/aishell3/vits/local/train.sh create mode 100755 examples/aishell3/vits/path.sh create mode 100755 examples/aishell3/vits/run.sh create mode 100644 paddlespeech/t2s/exps/vits/voice_cloning.py diff --git a/README.md b/README.md index acbe12309..5c62925e2 100644 --- a/README.md +++ b/README.md @@ -613,7 +613,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - Voice Cloning + Voice Cloning GE2E Librispeech, etc. @@ -633,13 +633,20 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r ge2e-fastspeech2-aishell3 + + + GE2E + VITS + AISHELL-3 + + ge2e-vits-aishell3 + End-to-End VITS - CSMSC + CSMSC / AISHELL-3 - VITS-csmsc + VITS-csmsc / VITS-aishell3 diff --git a/README_cn.md b/README_cn.md index dbbc13ac0..18bce43c4 100644 --- a/README_cn.md +++ b/README_cn.md @@ -608,7 +608,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 - 声音克隆 + 声音克隆 GE2E Librispeech, etc. @@ -629,13 +629,20 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 ge2e-fastspeech2-aishell3 + + GE2E + VITS + AISHELL-3 + + ge2e-vits-aishell3 + + 端到端 VITS - CSMSC + CSMSC / AISHELL-3 - VITS-csmsc + VITS-csmsc / VITS-aishell3 diff --git a/examples/aishell3/vits-vc/README.md b/examples/aishell3/vits-vc/README.md new file mode 100644 index 000000000..84f874006 --- /dev/null +++ b/examples/aishell3/vits-vc/README.md @@ -0,0 +1,154 @@ +# VITS with AISHELL-3 +This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). The trained model can be used in Voice Cloning Task, We refer to the model structure of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf). The general steps are as follows: +1. Speaker Encoder: We use Speaker Verification to train a speaker encoder. Datasets used in this task are different from those used in `VITS` because the transcriptions are not needed, we use more datasets, refer to [ge2e](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/ge2e). +2. Synthesizer and Vocoder: We use the trained speaker encoder to generate speaker embedding for each sentence in AISHELL-3. This embedding is an extra input of `VITS` which will be concated with encoder outputs. The vocoder is part of `VITS` due to its special structure. + +## Dataset +### Download and Extract +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + +### Get MFA Result and Extract +We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here. +You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Pretrained GE2E Model +We use pretrained GE2E model to generate speaker embedding for each sentence. + +Download pretrained GE2E model from here [ge2e_ckpt_0.3.zip](https://bj.bcebos.com/paddlespeech/Parakeet/released_models/ge2e/ge2e_ckpt_0.3.zip), and `unzip` it. + +## Get Started +Assume the path to the dataset is `~/datasets/data_aishell3`. +Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`. +Assume the path to the pretrained ge2e model is `./ge2e_ckpt_0.3`. + +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize waveform from `metadata.jsonl`. +5. start a voice cloning inference. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` + +### Data Preprocessing +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${ge2e_ckpt_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│   ├── norm +│   └── raw +├── embed +│ ├── SSB0005 +│ ├── SSB0009 +│ ├── ... +│ └── ... +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│   ├── norm +│   └── raw +└── train + ├── feats_stats.npy + ├── norm + └── raw +``` +The `embed` contains the generated speaker embedding for each sentence in AISHELL-3, which has the same file structure with wav files and the format is `.npy`. + +The computing time of utterance embedding can be x hours. + +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance. + +The preprocessing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but there is one more `ge2e/inference` step here. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +The training step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/train.py`. + +### Synthesizing + +`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`. + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT] + [--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT] + [--voice-cloning VOICE_CLONING] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + +Synthesize with VITS + +optional arguments: + -h, --help show this help message and exit + --config CONFIG Config of VITS. + --ckpt CKPT Checkpoint file of VITS. + --phones_dict PHONES_DICT + phone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --voice-cloning VOICE_CLONING + whether training voice cloning model. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. +``` +The synthesizing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`. + +### Voice Cloning +Assume there are some reference audios in `./ref_audio` +```text +ref_audio +├── 001238.wav +├── LJ015-0254.wav +└── audio_self_test.mp3 +``` +`./local/voice_cloning.sh` calls `${BIN_DIR}/voice_cloning.py` + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir} +``` + +If you want to convert a speaker audio file to refered speaker, run: + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path} +``` + + + diff --git a/examples/aishell3/vits-vc/conf/default.yaml b/examples/aishell3/vits-vc/conf/default.yaml new file mode 100644 index 000000000..c71e071d2 --- /dev/null +++ b/examples/aishell3/vits-vc/conf/default.yaml @@ -0,0 +1,185 @@ +# This configuration tested on 4 GPUs (V100) with 32GB GPU +# memory. It takes around 2 weeks to finish the training +# but 100k iters model should generate reasonable results. +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 22050 # sr +n_fft: 1024 # FFT size (samples). +n_shift: 256 # Hop size (samples). 12.5ms +win_length: null # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + + +########################################################## +# TTS MODEL SETTING # +########################################################## +model: + # generator related + generator_type: vits_generator + generator_params: + hidden_channels: 192 + spk_embed_dim: 256 + global_channels: 256 + segment_size: 32 + text_encoder_attention_heads: 2 + text_encoder_ffn_expand: 4 + text_encoder_blocks: 6 + text_encoder_positionwise_layer_type: "conv1d" + text_encoder_positionwise_conv_kernel_size: 3 + text_encoder_positional_encoding_layer_type: "rel_pos" + text_encoder_self_attention_layer_type: "rel_selfattn" + text_encoder_activation_type: "swish" + text_encoder_normalize_before: True + text_encoder_dropout_rate: 0.1 + text_encoder_positional_dropout_rate: 0.0 + text_encoder_attention_dropout_rate: 0.1 + use_macaron_style_in_text_encoder: True + use_conformer_conv_in_text_encoder: False + text_encoder_conformer_kernel_size: -1 + decoder_kernel_size: 7 + decoder_channels: 512 + decoder_upsample_scales: [8, 8, 2, 2] + decoder_upsample_kernel_sizes: [16, 16, 4, 4] + decoder_resblock_kernel_sizes: [3, 7, 11] + decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + use_weight_norm_in_decoder: True + posterior_encoder_kernel_size: 5 + posterior_encoder_layers: 16 + posterior_encoder_stacks: 1 + posterior_encoder_base_dilation: 1 + posterior_encoder_dropout_rate: 0.0 + use_weight_norm_in_posterior_encoder: True + flow_flows: 4 + flow_kernel_size: 5 + flow_base_dilation: 1 + flow_layers: 4 + flow_dropout_rate: 0.0 + use_weight_norm_in_flow: True + use_only_mean_in_flow: True + stochastic_duration_predictor_kernel_size: 3 + stochastic_duration_predictor_dropout_rate: 0.5 + stochastic_duration_predictor_flows: 4 + stochastic_duration_predictor_dds_conv_layers: 3 + # discriminator related + discriminator_type: hifigan_multi_scale_multi_period_discriminator + discriminator_params: + scales: 1 + scale_downsample_pooling: "AvgPool1D" + scale_downsample_pooling_params: + kernel_size: 4 + stride: 2 + padding: 2 + scale_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [15, 41, 5, 3] + channels: 128 + max_downsample_channels: 1024 + max_groups: 16 + bias: True + downsample_scales: [2, 2, 4, 4, 1] + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + follow_official_norm: False + periods: [2, 3, 5, 7, 11] + period_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [5, 3] + channels: 32 + downsample_scales: [3, 3, 3, 3, 1] + max_downsample_channels: 1024 + bias: True + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + # others + sampling_rate: 22050 # needed in the inference for saving wav + cache_generator_outputs: True # whether to cache generator outputs in the training + +########################################################### +# LOSS SETTING # +########################################################### +# loss function related +generator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +discriminator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +feat_match_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + average_by_layers: False # whether to average loss value by #layers of each discriminator + include_final_outputs: True # whether to include final outputs for loss calculation +mel_loss_params: + fs: 22050 # must be the same as the training data + fft_size: 1024 # fft points + hop_size: 256 # hop size + win_length: null # window length + window: hann # window type + num_mels: 80 # number of Mel basis + fmin: 0 # minimum frequency for Mel basis + fmax: null # maximum frequency for Mel basis + log_base: null # null represent natural log + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 1.0 # loss scaling coefficient for adversarial loss +lambda_mel: 45.0 # loss scaling coefficient for Mel loss +lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss +lambda_dur: 1.0 # loss scaling coefficient for duration loss +lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss +# others +sampling_rate: 22050 # needed in the inference for saving wav +cache_generator_outputs: True # whether to cache generator outputs in the training + + +########################################################### +# DATA LOADER SETTING # +########################################################### +batch_size: 50 # Batch size. +num_workers: 4 # Number of workers in DataLoader. + +########################################################## +# OPTIMIZER & SCHEDULER SETTING # +########################################################## +# optimizer setting for generator +generator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +generator_scheduler: exponential_decay +generator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 + +# optimizer setting for discriminator +discriminator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +discriminator_scheduler: exponential_decay +discriminator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 +generator_first: False # whether to start updating generator first + +########################################################## +# OTHER TRAINING SETTING # +########################################################## +num_snapshots: 10 # max number of snapshots to keep while training +train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000 +save_interval_steps: 1000 # Interval steps to save checkpoint. +eval_interval_steps: 250 # Interval steps to evaluate the network. +seed: 777 # random seed number diff --git a/examples/aishell3/vits-vc/local/preprocess.sh b/examples/aishell3/vits-vc/local/preprocess.sh new file mode 100755 index 000000000..2f3772863 --- /dev/null +++ b/examples/aishell3/vits-vc/local/preprocess.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 +add_blank=$2 +ge2e_ckpt_path=$3 + +# gen speaker embedding +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + python3 ${MAIN_ROOT}/paddlespeech/vector/exps/ge2e/inference.py \ + --input=~/datasets/data_aishell3/train/wav/ \ + --output=dump/embed \ + --checkpoint_path=${ge2e_ckpt_path} +fi + +# copy from tts3/preprocess +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # get durations from MFA's result + echo "Generate durations.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=./aishell3_alignment_tone \ + --output durations.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=aishell3 \ + --rootdir=~/datasets/data_aishell3/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --spk_emb_dir=dump/embed +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="feats" +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # normalize and covert phone/speaker to id, dev and test should use train's stats + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy +fi diff --git a/examples/aishell3/vits-vc/local/synthesize.sh b/examples/aishell3/vits-vc/local/synthesize.sh new file mode 100755 index 000000000..01a74fa3b --- /dev/null +++ b/examples/aishell3/vits-vc/local/synthesize.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +stage=0 +stop_stage=0 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --phones_dict=dump/phone_id_map.txt \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --voice-cloning=True +fi diff --git a/examples/aishell3/vits-vc/local/train.sh b/examples/aishell3/vits-vc/local/train.sh new file mode 100755 index 000000000..eeb6f0871 --- /dev/null +++ b/examples/aishell3/vits-vc/local/train.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +# install monotonic_align +cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align +python3 setup.py build_ext --inplace +cd - + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=4 \ + --phones-dict=dump/phone_id_map.txt \ + --voice-cloning=True diff --git a/examples/aishell3/vits-vc/local/voice_cloning.sh b/examples/aishell3/vits-vc/local/voice_cloning.sh new file mode 100755 index 000000000..68ea54914 --- /dev/null +++ b/examples/aishell3/vits-vc/local/voice_cloning.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +ge2e_params_path=$4 +add_blank=$5 +ref_audio_dir=$6 +src_audio_path=$7 + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/voice_cloning.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --ge2e_params_path=${ge2e_params_path} \ + --phones_dict=dump/phone_id_map.txt \ + --text="凯莫瑞安联合体的经济崩溃迫在眉睫。" \ + --audio-path=${src_audio_path} \ + --input-dir=${ref_audio_dir} \ + --output-dir=${train_output_path}/vc_syn \ + --add-blank=${add_blank} diff --git a/examples/aishell3/vits-vc/path.sh b/examples/aishell3/vits-vc/path.sh new file mode 100755 index 000000000..52d0c3783 --- /dev/null +++ b/examples/aishell3/vits-vc/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=vits +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} \ No newline at end of file diff --git a/examples/aishell3/vits-vc/run.sh b/examples/aishell3/vits-vc/run.sh new file mode 100755 index 000000000..fff0c27d3 --- /dev/null +++ b/examples/aishell3/vits-vc/run.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0,1,2,3 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_153.pdz +add_blank=true +ref_audio_dir=ref_audio +src_audio_path='' + +# not include ".pdparams" here +ge2e_ckpt_path=./ge2e_ckpt_0.3/step-3000000 + +# include ".pdparams" here +ge2e_params_path=${ge2e_ckpt_path}.pdparams + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${add_blank} ${ge2e_ckpt_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} \ + ${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path} || exit -1 +fi diff --git a/examples/aishell3/vits/README.md b/examples/aishell3/vits/README.md new file mode 100644 index 000000000..dc80e18bc --- /dev/null +++ b/examples/aishell3/vits/README.md @@ -0,0 +1,202 @@ +# VITS with AISHELL-3 +This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). + +AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus that could be used to train multi-speaker Text-to-Speech (TTS) systems. + +We use AISHELL-3 to train a multi-speaker VITS model here. +## Dataset +### Download and Extract +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + +### Get MFA Result and Extract +We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here. +You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Get Started +Assume the path to the dataset is `~/datasets/data_aishell3`. +Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from a text file. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` + +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│   ├── norm +│   └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│   ├── norm +│   └── raw +└── train + ├── feats_stats.npy + ├── norm + └── raw +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. +Here's the complete help message. +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--phones-dict PHONES_DICT] + [--speaker-dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] + +Train a VITS model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG config file to overwrite default config. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu == 0, use cpu. + --phones-dict PHONES_DICT + phone vocabulary file. + --speaker-dict SPEAKER_DICT + speaker id map file for multiple speaker model. + --voice-cloning VOICE_CLONING + whether training voice cloning model. +``` +1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. +2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder. +3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory. +4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. +5. `--phones-dict` is the path of the phone vocabulary file. +6. `--speaker-dict` is the path of the speaker id map file when training a multi-speaker VITS. + +### Synthesizing + +`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`. + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT] + [--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT] + [--voice-cloning VOICE_CLONING] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + +Synthesize with VITS + +optional arguments: + -h, --help show this help message and exit + --config CONFIG Config of VITS. + --ckpt CKPT Checkpoint file of VITS. + --phones_dict PHONES_DICT + phone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --voice-cloning VOICE_CLONING + whether training voice cloning model. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. +``` +`./local/synthesize_e2e.sh` calls `${BIN_DIR}/synthesize_e2e.py`, which can synthesize waveform from text file. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize_e2e.py [-h] [--config CONFIG] [--ckpt CKPT] + [--phones_dict PHONES_DICT] + [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] + [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + +Synthesize with VITS + +optional arguments: + -h, --help show this help message and exit + --config CONFIG Config of VITS. + --ckpt CKPT Checkpoint file of VITS. + --phones_dict PHONES_DICT + phone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --spk_id SPK_ID spk id for multi speaker acoustic model + --lang LANG Choose model language. zh or en + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize, a 'utt_id sentence' pair per line. + --output_dir OUTPUT_DIR + output dir. +``` +1. `--config`, `--ckpt`, `--phones_dict` and `--speaker_dict` are arguments for acoustic model, which correspond to the 3 files in the VITS pretrained model. +2. `--lang` is the model language, which can be `zh` or `en`. +3. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. +4. `--text` is the text file, which contains sentences to synthesize. +5. `--output_dir` is the directory to save synthesized audio files. +6. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + + + diff --git a/examples/aishell3/vits/conf/default.yaml b/examples/aishell3/vits/conf/default.yaml new file mode 100644 index 000000000..bc0f224d0 --- /dev/null +++ b/examples/aishell3/vits/conf/default.yaml @@ -0,0 +1,184 @@ +# This configuration tested on 4 GPUs (V100) with 32GB GPU +# memory. It takes around 2 weeks to finish the training +# but 100k iters model should generate reasonable results. +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 22050 # sr +n_fft: 1024 # FFT size (samples). +n_shift: 256 # Hop size (samples). 12.5ms +win_length: null # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + + +########################################################## +# TTS MODEL SETTING # +########################################################## +model: + # generator related + generator_type: vits_generator + generator_params: + hidden_channels: 192 + global_channels: 256 + segment_size: 32 + text_encoder_attention_heads: 2 + text_encoder_ffn_expand: 4 + text_encoder_blocks: 6 + text_encoder_positionwise_layer_type: "conv1d" + text_encoder_positionwise_conv_kernel_size: 3 + text_encoder_positional_encoding_layer_type: "rel_pos" + text_encoder_self_attention_layer_type: "rel_selfattn" + text_encoder_activation_type: "swish" + text_encoder_normalize_before: True + text_encoder_dropout_rate: 0.1 + text_encoder_positional_dropout_rate: 0.0 + text_encoder_attention_dropout_rate: 0.1 + use_macaron_style_in_text_encoder: True + use_conformer_conv_in_text_encoder: False + text_encoder_conformer_kernel_size: -1 + decoder_kernel_size: 7 + decoder_channels: 512 + decoder_upsample_scales: [8, 8, 2, 2] + decoder_upsample_kernel_sizes: [16, 16, 4, 4] + decoder_resblock_kernel_sizes: [3, 7, 11] + decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + use_weight_norm_in_decoder: True + posterior_encoder_kernel_size: 5 + posterior_encoder_layers: 16 + posterior_encoder_stacks: 1 + posterior_encoder_base_dilation: 1 + posterior_encoder_dropout_rate: 0.0 + use_weight_norm_in_posterior_encoder: True + flow_flows: 4 + flow_kernel_size: 5 + flow_base_dilation: 1 + flow_layers: 4 + flow_dropout_rate: 0.0 + use_weight_norm_in_flow: True + use_only_mean_in_flow: True + stochastic_duration_predictor_kernel_size: 3 + stochastic_duration_predictor_dropout_rate: 0.5 + stochastic_duration_predictor_flows: 4 + stochastic_duration_predictor_dds_conv_layers: 3 + # discriminator related + discriminator_type: hifigan_multi_scale_multi_period_discriminator + discriminator_params: + scales: 1 + scale_downsample_pooling: "AvgPool1D" + scale_downsample_pooling_params: + kernel_size: 4 + stride: 2 + padding: 2 + scale_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [15, 41, 5, 3] + channels: 128 + max_downsample_channels: 1024 + max_groups: 16 + bias: True + downsample_scales: [2, 2, 4, 4, 1] + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + follow_official_norm: False + periods: [2, 3, 5, 7, 11] + period_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [5, 3] + channels: 32 + downsample_scales: [3, 3, 3, 3, 1] + max_downsample_channels: 1024 + bias: True + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + # others + sampling_rate: 22050 # needed in the inference for saving wav + cache_generator_outputs: True # whether to cache generator outputs in the training + +########################################################### +# LOSS SETTING # +########################################################### +# loss function related +generator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +discriminator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +feat_match_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + average_by_layers: False # whether to average loss value by #layers of each discriminator + include_final_outputs: True # whether to include final outputs for loss calculation +mel_loss_params: + fs: 22050 # must be the same as the training data + fft_size: 1024 # fft points + hop_size: 256 # hop size + win_length: null # window length + window: hann # window type + num_mels: 80 # number of Mel basis + fmin: 0 # minimum frequency for Mel basis + fmax: null # maximum frequency for Mel basis + log_base: null # null represent natural log + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 1.0 # loss scaling coefficient for adversarial loss +lambda_mel: 45.0 # loss scaling coefficient for Mel loss +lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss +lambda_dur: 1.0 # loss scaling coefficient for duration loss +lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss +# others +sampling_rate: 22050 # needed in the inference for saving wav +cache_generator_outputs: True # whether to cache generator outputs in the training + + +########################################################### +# DATA LOADER SETTING # +########################################################### +batch_size: 50 # Batch size. +num_workers: 4 # Number of workers in DataLoader. + +########################################################## +# OPTIMIZER & SCHEDULER SETTING # +########################################################## +# optimizer setting for generator +generator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +generator_scheduler: exponential_decay +generator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 + +# optimizer setting for discriminator +discriminator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +discriminator_scheduler: exponential_decay +discriminator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 +generator_first: False # whether to start updating generator first + +########################################################## +# OTHER TRAINING SETTING # +########################################################## +num_snapshots: 10 # max number of snapshots to keep while training +train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000 +save_interval_steps: 1000 # Interval steps to save checkpoint. +eval_interval_steps: 250 # Interval steps to evaluate the network. +seed: 777 # random seed number diff --git a/examples/aishell3/vits/local/preprocess.sh b/examples/aishell3/vits/local/preprocess.sh new file mode 100755 index 000000000..70ee064f8 --- /dev/null +++ b/examples/aishell3/vits/local/preprocess.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 +add_blank=$2 + +# copy from tts3/preprocess +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # get durations from MFA's result + echo "Generate durations.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=./aishell3_alignment_tone \ + --output durations.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=aishell3 \ + --rootdir=~/datasets/data_aishell3/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="feats" +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # normalize and covert phone/speaker to id, dev and test should use train's stats + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --add-blank=${add_blank} \ + --skip-wav-copy +fi diff --git a/examples/aishell3/vits/local/synthesize.sh b/examples/aishell3/vits/local/synthesize.sh new file mode 100755 index 000000000..07f873594 --- /dev/null +++ b/examples/aishell3/vits/local/synthesize.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +stage=0 +stop_stage=0 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test +fi diff --git a/examples/aishell3/vits/local/synthesize_e2e.sh b/examples/aishell3/vits/local/synthesize_e2e.sh new file mode 100755 index 000000000..f0136991f --- /dev/null +++ b/examples/aishell3/vits/local/synthesize_e2e.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +add_blank=$4 + +stage=0 +stop_stage=0 + + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --spk_id=0 \ + --output_dir=${train_output_path}/test_e2e \ + --text=${BIN_DIR}/../sentences.txt \ + --add-blank=${add_blank} +fi diff --git a/examples/aishell3/vits/local/train.sh b/examples/aishell3/vits/local/train.sh new file mode 100755 index 000000000..8d3fcdae3 --- /dev/null +++ b/examples/aishell3/vits/local/train.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +# install monotonic_align +cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align +python3 setup.py build_ext --inplace +cd - + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=4 \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt diff --git a/examples/aishell3/vits/path.sh b/examples/aishell3/vits/path.sh new file mode 100755 index 000000000..52d0c3783 --- /dev/null +++ b/examples/aishell3/vits/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=vits +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} \ No newline at end of file diff --git a/examples/aishell3/vits/run.sh b/examples/aishell3/vits/run.sh new file mode 100755 index 000000000..157a7d4ac --- /dev/null +++ b/examples/aishell3/vits/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0,1,2,3 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_153.pdz +add_blank=true + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} ${add_blank}|| exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} ${add_blank}|| exit -1 +fi diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index c284b7b23..74505d9b9 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -3,7 +3,7 @@ set -e source path.sh -gpus=0,1 +gpus=0,1,2,3 stage=0 stop_stage=100 diff --git a/paddlespeech/t2s/datasets/am_batch_fn.py b/paddlespeech/t2s/datasets/am_batch_fn.py index c4c9e5d73..c00648b1f 100644 --- a/paddlespeech/t2s/datasets/am_batch_fn.py +++ b/paddlespeech/t2s/datasets/am_batch_fn.py @@ -483,3 +483,58 @@ def vits_single_spk_batch_fn(examples): "speech": speech } return batch + + +def vits_multi_spk_batch_fn(examples): + """ + Returns: + Dict[str, Any]: + - text (Tensor): Text index tensor (B, T_text). + - text_lengths (Tensor): Text length tensor (B,). + - feats (Tensor): Feature tensor (B, T_feats, aux_channels). + - feats_lengths (Tensor): Feature length tensor (B,). + - speech (Tensor): Speech waveform tensor (B, T_wav). + - spk_id (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + - spk_emb (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + """ + # fields = ["text", "text_lengths", "feats", "feats_lengths", "speech", "spk_id"/"spk_emb"] + text = [np.array(item["text"], dtype=np.int64) for item in examples] + feats = [np.array(item["feats"], dtype=np.float32) for item in examples] + speech = [np.array(item["wave"], dtype=np.float32) for item in examples] + text_lengths = [ + np.array(item["text_lengths"], dtype=np.int64) for item in examples + ] + feats_lengths = [ + np.array(item["feats_lengths"], dtype=np.int64) for item in examples + ] + + text = batch_sequences(text) + feats = batch_sequences(feats) + speech = batch_sequences(speech) + + # convert each batch to paddle.Tensor + text = paddle.to_tensor(text) + feats = paddle.to_tensor(feats) + text_lengths = paddle.to_tensor(text_lengths) + feats_lengths = paddle.to_tensor(feats_lengths) + + batch = { + "text": text, + "text_lengths": text_lengths, + "feats": feats, + "feats_lengths": feats_lengths, + "speech": speech + } + # spk_emb has a higher priority than spk_id + if "spk_emb" in examples[0]: + spk_emb = [ + np.array(item["spk_emb"], dtype=np.float32) for item in examples + ] + spk_emb = batch_sequences(spk_emb) + spk_emb = paddle.to_tensor(spk_emb) + batch["spk_emb"] = spk_emb + elif "spk_id" in examples[0]: + spk_id = [np.array(item["spk_id"], dtype=np.int64) for item in examples] + spk_id = paddle.to_tensor(spk_id) + batch["spk_id"] = spk_id + return batch diff --git a/paddlespeech/t2s/exps/vits/synthesize.py b/paddlespeech/t2s/exps/vits/synthesize.py index 074b890f9..968684b25 100644 --- a/paddlespeech/t2s/exps/vits/synthesize.py +++ b/paddlespeech/t2s/exps/vits/synthesize.py @@ -15,6 +15,7 @@ import argparse from pathlib import Path import jsonlines +import numpy as np import paddle import soundfile as sf import yaml @@ -23,6 +24,7 @@ from yacs.config import CfgNode from paddlespeech.t2s.datasets.data_table import DataTable from paddlespeech.t2s.models.vits import VITS +from paddlespeech.t2s.utils import str2bool def evaluate(args): @@ -40,8 +42,26 @@ def evaluate(args): print(config) fields = ["utt_id", "text"] + converters = {} + + spk_num = None + if args.speaker_dict is not None: + print("multiple speaker vits!") + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id) + fields += ["spk_id"] + elif args.voice_cloning: + print("Evaluating voice cloning!") + fields += ["spk_emb"] + else: + print("single speaker vits!") + print("spk_num:", spk_num) - test_dataset = DataTable(data=test_metadata, fields=fields) + test_dataset = DataTable( + data=test_metadata, + fields=fields, + converters=converters, ) with open(args.phones_dict, "r") as f: phn_id = [line.strip().split() for line in f.readlines()] @@ -49,6 +69,7 @@ def evaluate(args): print("vocab_size:", vocab_size) odim = config.n_fft // 2 + 1 + config["model"]["generator_params"]["spks"] = spk_num vits = VITS(idim=vocab_size, odim=odim, **config["model"]) vits.set_state_dict(paddle.load(args.ckpt)["main_params"]) @@ -65,7 +86,15 @@ def evaluate(args): phone_ids = paddle.to_tensor(datum["text"]) with timer() as t: with paddle.no_grad(): - out = vits.inference(text=phone_ids) + spk_emb = None + spk_id = None + # multi speaker + if args.voice_cloning and "spk_emb" in datum: + spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) + elif "spk_id" in datum: + spk_id = paddle.to_tensor(datum["spk_id"]) + out = vits.inference( + text=phone_ids, sids=spk_id, spembs=spk_emb) wav = out["wav"] wav = wav.numpy() N += wav.size @@ -90,6 +119,13 @@ def parse_args(): '--ckpt', type=str, default=None, help='Checkpoint file of VITS.') parser.add_argument( "--phones_dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker_dict", type=str, default=None, help="speaker id map file.") + parser.add_argument( + "--voice-cloning", + type=str2bool, + default=False, + help="whether training voice cloning model.") # other parser.add_argument( "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") diff --git a/paddlespeech/t2s/exps/vits/synthesize_e2e.py b/paddlespeech/t2s/exps/vits/synthesize_e2e.py index 33a413751..f9d10ea62 100644 --- a/paddlespeech/t2s/exps/vits/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/vits/synthesize_e2e.py @@ -42,12 +42,23 @@ def evaluate(args): # frontend frontend = get_frontend(lang=args.lang, phones_dict=args.phones_dict) + spk_num = None + if args.speaker_dict is not None: + print("multiple speaker vits!") + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id) + else: + print("single speaker vits!") + print("spk_num:", spk_num) + with open(args.phones_dict, "r") as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) print("vocab_size:", vocab_size) odim = config.n_fft // 2 + 1 + config["model"]["generator_params"]["spks"] = spk_num vits = VITS(idim=vocab_size, odim=odim, **config["model"]) vits.set_state_dict(paddle.load(args.ckpt)["main_params"]) @@ -78,7 +89,10 @@ def evaluate(args): flags = 0 for i in range(len(phone_ids)): part_phone_ids = phone_ids[i] - out = vits.inference(text=part_phone_ids) + spk_id = None + if spk_num is not None: + spk_id = paddle.to_tensor(args.spk_id) + out = vits.inference(text=part_phone_ids, sids=spk_id) wav = out["wav"] if flags == 0: wav_all = wav @@ -109,6 +123,13 @@ def parse_args(): '--ckpt', type=str, default=None, help='Checkpoint file of VITS.') parser.add_argument( "--phones_dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker_dict", type=str, default=None, help="speaker id map file.") + parser.add_argument( + '--spk_id', + type=int, + default=0, + help='spk id for multi speaker acoustic model') # other parser.add_argument( '--lang', diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py index 1a68d1326..c994faa5a 100644 --- a/paddlespeech/t2s/exps/vits/train.py +++ b/paddlespeech/t2s/exps/vits/train.py @@ -28,6 +28,7 @@ from paddle.io import DistributedBatchSampler from paddle.optimizer import Adam from yacs.config import CfgNode +from paddlespeech.t2s.datasets.am_batch_fn import vits_multi_spk_batch_fn from paddlespeech.t2s.datasets.am_batch_fn import vits_single_spk_batch_fn from paddlespeech.t2s.datasets.data_table import DataTable from paddlespeech.t2s.models.vits import VITS @@ -43,6 +44,7 @@ from paddlespeech.t2s.training.extensions.visualizer import VisualDL from paddlespeech.t2s.training.optimizer import scheduler_classes from paddlespeech.t2s.training.seeding import seed_everything from paddlespeech.t2s.training.trainer import Trainer +from paddlespeech.t2s.utils import str2bool def train_sp(args, config): @@ -72,6 +74,23 @@ def train_sp(args, config): "wave": np.load, "feats": np.load, } + spk_num = None + if args.speaker_dict is not None: + print("multiple speaker vits!") + collate_fn = vits_multi_spk_batch_fn + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id) + fields += ["spk_id"] + elif args.voice_cloning: + print("Training voice cloning!") + collate_fn = vits_multi_spk_batch_fn + fields += ["spk_emb"] + converters["spk_emb"] = np.load + else: + print("single speaker vits!") + collate_fn = vits_single_spk_batch_fn + print("spk_num:", spk_num) # construct dataset for training and validation with jsonlines.open(args.train_metadata, 'r') as reader: @@ -100,18 +119,16 @@ def train_sp(args, config): drop_last=False) print("samplers done!") - train_batch_fn = vits_single_spk_batch_fn - train_dataloader = DataLoader( train_dataset, batch_sampler=train_sampler, - collate_fn=train_batch_fn, + collate_fn=collate_fn, num_workers=config.num_workers) dev_dataloader = DataLoader( dev_dataset, batch_sampler=dev_sampler, - collate_fn=train_batch_fn, + collate_fn=collate_fn, num_workers=config.num_workers) print("dataloaders done!") @@ -121,6 +138,7 @@ def train_sp(args, config): print("vocab_size:", vocab_size) odim = config.n_fft // 2 + 1 + config["model"]["generator_params"]["spks"] = spk_num model = VITS(idim=vocab_size, odim=odim, **config["model"]) gen_parameters = model.generator.parameters() dis_parameters = model.discriminator.parameters() @@ -240,6 +258,17 @@ def main(): "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") parser.add_argument( "--phones-dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker-dict", + type=str, + default=None, + help="speaker id map file for multiple speaker model.") + + parser.add_argument( + "--voice-cloning", + type=str2bool, + default=False, + help="whether training voice cloning model.") args = parser.parse_args() diff --git a/paddlespeech/t2s/exps/vits/voice_cloning.py b/paddlespeech/t2s/exps/vits/voice_cloning.py new file mode 100644 index 000000000..bdda4d687 --- /dev/null +++ b/paddlespeech/t2s/exps/vits/voice_cloning.py @@ -0,0 +1,213 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +from pathlib import Path + +import librosa +import numpy as np +import paddle +import soundfile as sf +import yaml +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.get_feats import LinearSpectrogram +from paddlespeech.t2s.exps.syn_utils import get_frontend +from paddlespeech.t2s.models.vits import VITS +from paddlespeech.t2s.utils import str2bool +from paddlespeech.vector.exps.ge2e.audio_processor import SpeakerVerificationPreprocessor +from paddlespeech.vector.models.lstm_speaker_encoder import LSTMSpeakerEncoder + + +def voice_cloning(args): + + # Init body. + with open(args.config) as f: + config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(config) + + # speaker encoder + spec_extractor = LinearSpectrogram( + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window) + p = SpeakerVerificationPreprocessor( + sampling_rate=16000, + audio_norm_target_dBFS=-30, + vad_window_length=30, + vad_moving_average_width=8, + vad_max_silence_length=6, + mel_window_length=25, + mel_window_step=10, + n_mels=40, + partial_n_frames=160, + min_pad_coverage=0.75, + partial_overlap_ratio=0.5) + print("Audio Processor Done!") + + speaker_encoder = LSTMSpeakerEncoder( + n_mels=40, num_layers=3, hidden_size=256, output_size=256) + speaker_encoder.set_state_dict(paddle.load(args.ge2e_params_path)) + speaker_encoder.eval() + print("GE2E Done!") + + frontend = get_frontend(lang=args.lang, phones_dict=args.phones_dict) + print("frontend done!") + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + odim = config.n_fft // 2 + 1 + + vits = VITS(idim=vocab_size, odim=odim, **config["model"]) + vits.set_state_dict(paddle.load(args.ckpt)["main_params"]) + vits.eval() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + input_dir = Path(args.input_dir) + + if args.audio_path == "": + args.audio_path = None + if args.audio_path is None: + sentence = args.text + merge_sentences = True + add_blank = args.add_blank + + if args.lang == 'zh': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences, add_blank=add_blank) + elif args.lang == 'en': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences) + phone_ids = input_ids["phone_ids"][0] + else: + wav, _ = librosa.load(str(args.audio_path), sr=config.fs) + feats = paddle.to_tensor(spec_extractor.get_linear_spectrogram(wav)) + + mel_sequences = p.extract_mel_partials( + p.preprocess_wav(args.audio_path)) + with paddle.no_grad(): + spk_emb_src = speaker_encoder.embed_utterance( + paddle.to_tensor(mel_sequences)) + + for name in os.listdir(input_dir): + utt_id = name.split(".")[0] + ref_audio_path = input_dir / name + mel_sequences = p.extract_mel_partials(p.preprocess_wav(ref_audio_path)) + # print("mel_sequences: ", mel_sequences.shape) + with paddle.no_grad(): + spk_emb = speaker_encoder.embed_utterance( + paddle.to_tensor(mel_sequences)) + # print("spk_emb shape: ", spk_emb.shape) + + with paddle.no_grad(): + if args.audio_path is None: + out = vits.inference(text=phone_ids, spembs=spk_emb) + else: + out = vits.voice_conversion( + feats=feats, spembs_src=spk_emb_src, spembs_tgt=spk_emb) + wav = out["wav"] + + sf.write( + str(output_dir / (utt_id + ".wav")), + wav.numpy(), + samplerate=config.fs) + print(f"{utt_id} done!") + # Randomly generate numbers of 0 ~ 0.2, 256 is the dim of spk_emb + random_spk_emb = np.random.rand(256) * 0.2 + random_spk_emb = paddle.to_tensor(random_spk_emb, dtype='float32') + utt_id = "random_spk_emb" + with paddle.no_grad(): + if args.audio_path is None: + out = vits.inference(text=phone_ids, spembs=random_spk_emb) + else: + out = vits.voice_conversion( + feats=feats, spembs_src=spk_emb_src, spembs_tgt=random_spk_emb) + wav = out["wav"] + sf.write( + str(output_dir / (utt_id + ".wav")), wav.numpy(), samplerate=config.fs) + print(f"{utt_id} done!") + + +def parse_args(): + # parse args and config + parser = argparse.ArgumentParser(description="") + parser.add_argument( + '--config', type=str, default=None, help='Config of VITS.') + parser.add_argument( + '--ckpt', type=str, default=None, help='Checkpoint file of VITS.') + parser.add_argument( + "--phones_dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--text", + type=str, + default="每当你觉得,想要批评什么人的时候,你切要记着,这个世界上的人,并非都具备你禀有的条件。", + help="text to synthesize, a line") + parser.add_argument( + '--lang', + type=str, + default='zh', + help='Choose model language. zh or en') + parser.add_argument( + "--audio-path", + type=str, + default=None, + help="audio as content to synthesize") + + parser.add_argument( + "--ge2e_params_path", type=str, help="ge2e params path.") + + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu=0, use cpu.") + + parser.add_argument( + "--input-dir", + type=str, + help="input dir of *.wav, the sample rate will be resample to 16k.") + parser.add_argument("--output-dir", type=str, help="output dir.") + + parser.add_argument( + "--add-blank", + type=str2bool, + default=True, + help="whether to add blank between phones") + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.ngpu == 0: + paddle.set_device("cpu") + elif args.ngpu > 0: + paddle.set_device("gpu") + else: + print("ngpu should >= 0 !") + + voice_cloning(args) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py index f87de91a2..359b66258 100644 --- a/paddlespeech/t2s/models/vits/generator.py +++ b/paddlespeech/t2s/models/vits/generator.py @@ -522,6 +522,82 @@ class VITSGenerator(nn.Layer): return wav.squeeze(1), attn.squeeze(1), dur.squeeze(1) + def voice_conversion( + self, + feats: paddle.Tensor=None, + feats_lengths: paddle.Tensor=None, + sids_src: Optional[paddle.Tensor]=None, + sids_tgt: Optional[paddle.Tensor]=None, + spembs_src: Optional[paddle.Tensor]=None, + spembs_tgt: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, ) -> paddle.Tensor: + """Run voice conversion. + Args: + feats (Tensor): Feature tensor (B, aux_channels, T_feats,). + feats_lengths (Tensor): Feature length tensor (B,). + sids_src (Optional[Tensor]): Speaker index tensor of source feature (B,) or (B, 1). + sids_tgt (Optional[Tensor]): Speaker index tensor of target feature (B,) or (B, 1). + spembs_src (Optional[Tensor]): Speaker embedding tensor of source feature (B, spk_embed_dim). + spembs_tgt (Optional[Tensor]): Speaker embedding tensor of target feature (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + Returns: + Tensor: Generated waveform tensor (B, T_wav). + """ + # encoder + g_src = None + g_tgt = None + if self.spks is not None: + # (B, global_channels, 1) + g_src = self.global_emb( + paddle.reshape(sids_src, [-1])).unsqueeze(-1) + g_tgt = self.global_emb( + paddle.reshape(sids_tgt, [-1])).unsqueeze(-1) + + if self.spk_embed_dim is not None: + # (B, global_channels, 1) + g_src_ = self.spemb_proj( + F.normalize(spembs_src.unsqueeze(0))).unsqueeze(-1) + if g_src is None: + g_src = g_src_ + else: + g_src = g_src + g_src_ + + # (B, global_channels, 1) + g_tgt_ = self.spemb_proj( + F.normalize(spembs_tgt.unsqueeze(0))).unsqueeze(-1) + if g_tgt is None: + g_tgt = g_tgt_ + else: + g_tgt = g_tgt + g_tgt_ + + if self.langs is not None: + # (B, global_channels, 1) + g_ = self.lang_emb(paddle.reshape(lids, [-1])).unsqueeze(-1) + + if g_src is None: + g_src = g_ + else: + g_src = g_src + g_ + + if g_tgt is None: + g_tgt = g_ + else: + g_tgt = g_tgt + g_ + + # forward posterior encoder + z, m_q, logs_q, y_mask = self.posterior_encoder( + feats, feats_lengths, g=g_src) + + # forward flow + # (B, H, T_feats) + z_p = self.flow(z, y_mask, g=g_src) + + # decoder + z_hat = self.flow(z_p, y_mask, g=g_tgt, inverse=True) + wav = self.decoder(z_hat * y_mask, g=g_tgt) + + return wav.squeeze(1) + def _generate_path(self, dur: paddle.Tensor, mask: paddle.Tensor) -> paddle.Tensor: """Generate path a.k.a. monotonic attention. diff --git a/paddlespeech/t2s/models/vits/vits.py b/paddlespeech/t2s/models/vits/vits.py index 5c476be77..983bf0a36 100644 --- a/paddlespeech/t2s/models/vits/vits.py +++ b/paddlespeech/t2s/models/vits/vits.py @@ -381,7 +381,7 @@ class VITS(nn.Layer): if use_teacher_forcing: assert feats is not None feats = feats[None].transpose([0, 2, 1]) - feats_lengths = paddle.to_tensor([paddle.shape(feats)[2]]) + feats_lengths = paddle.to_tensor(paddle.shape(feats)[2]) wav, att_w, dur = self.generator.inference( text=text, text_lengths=text_lengths, @@ -406,3 +406,43 @@ class VITS(nn.Layer): max_len=max_len, ) return dict( wav=paddle.reshape(wav, [-1]), att_w=att_w[0], duration=dur[0]) + + def voice_conversion( + self, + feats: paddle.Tensor, + sids_src: Optional[paddle.Tensor]=None, + sids_tgt: Optional[paddle.Tensor]=None, + spembs_src: Optional[paddle.Tensor]=None, + spembs_tgt: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, ) -> paddle.Tensor: + """Run voice conversion. + Args: + feats (Tensor): Feature tensor (T_feats, aux_channels). + sids_src (Optional[Tensor]): Speaker index tensor of source feature (1,). + sids_tgt (Optional[Tensor]): Speaker index tensor of target feature (1,). + spembs_src (Optional[Tensor]): Speaker embedding tensor of source feature (spk_embed_dim,). + spembs_tgt (Optional[Tensor]): Speaker embedding tensor of target feature (spk_embed_dim,). + lids (Optional[Tensor]): Language index tensor (1,). + Returns: + Dict[str, Tensor]: + * wav (Tensor): Generated waveform tensor (T_wav,). + """ + assert feats is not None + feats = feats[None].transpose([0, 2, 1]) + feats_lengths = paddle.to_tensor(paddle.shape(feats)[2]) + + sids_none = sids_src is None and sids_tgt is None + spembs_none = spembs_src is None and spembs_tgt is None + + assert not sids_none or not spembs_none + + wav = self.generator.voice_conversion( + feats, + feats_lengths, + sids_src, + sids_tgt, + spembs_src, + spembs_tgt, + lids, ) + + return dict(wav=paddle.reshape(wav, [-1])) diff --git a/paddlespeech/t2s/models/vits/vits_updater.py b/paddlespeech/t2s/models/vits/vits_updater.py index 76271fd97..9f8be6803 100644 --- a/paddlespeech/t2s/models/vits/vits_updater.py +++ b/paddlespeech/t2s/models/vits/vits_updater.py @@ -111,6 +111,8 @@ class VITSUpdater(StandardUpdater): text_lengths=batch["text_lengths"], feats=batch["feats"], feats_lengths=batch["feats_lengths"], + sids=batch.get("spk_id", None), + spembs=batch.get("spk_emb", None), forward_generator=turn == "generator") # Generator if turn == "generator": @@ -268,6 +270,8 @@ class VITSEvaluator(StandardEvaluator): text_lengths=batch["text_lengths"], feats=batch["feats"], feats_lengths=batch["feats_lengths"], + sids=batch.get("spk_id", None), + spembs=batch.get("spk_emb", None), forward_generator=turn == "generator") # Generator if turn == "generator": From ab92e2c98ceab54f9aca9c94d083d10de2c44caa Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Tue, 6 Sep 2022 01:19:51 +0000 Subject: [PATCH 065/101] fix deepspeech2 decode_wav --- .../s2t/exps/deepspeech2/bin/test_wav.py | 65 ++++++++++--------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py index 90b7d8a18..66ea29d08 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py @@ -20,8 +20,8 @@ import paddle import soundfile from yacs.config import CfgNode +from paddlespeech.audio.transform.transformation import Transformation from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer -from paddlespeech.s2t.io.collator import SpeechCollator from paddlespeech.s2t.models.ds2 import DeepSpeech2Model from paddlespeech.s2t.training.cli import default_argument_parser from paddlespeech.s2t.utils import mp_tools @@ -38,24 +38,24 @@ class DeepSpeech2Tester_hub(): self.args = args self.config = config self.audio_file = args.audio_file - self.collate_fn_test = SpeechCollator.from_config(config) - self._text_featurizer = TextFeaturizer( - unit_type=config.unit_type, vocab=None) - def compute_result_transcripts(self, audio, audio_len, vocab_list, cfg): - result_transcripts = self.model.decode( - audio, - audio_len, - vocab_list, - decoding_method=cfg.decoding_method, - lang_model_path=cfg.lang_model_path, - beam_alpha=cfg.alpha, - beam_beta=cfg.beta, - beam_size=cfg.beam_size, - cutoff_prob=cfg.cutoff_prob, - cutoff_top_n=cfg.cutoff_top_n, - num_processes=cfg.num_proc_bsearch) + self.preprocess_conf = config.preprocess_config + self.preprocess_args = {"train": False} + self.preprocessing = Transformation(self.preprocess_conf) + + self.text_feature = TextFeaturizer( + unit_type=config.unit_type, + vocab=config.vocab_filepath, + spm_model_prefix=config.spm_model_prefix) + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') + def compute_result_transcripts(self, audio, audio_len, vocab_list, cfg): + decode_batch_size = cfg.decode_batch_size + self.model.decoder.init_decoder( + decode_batch_size, vocab_list, cfg.decoding_method, + cfg.lang_model_path, cfg.alpha, cfg.beta, cfg.beam_size, + cfg.cutoff_prob, cfg.cutoff_top_n, cfg.num_proc_bsearch) + result_transcripts = self.model.decode(audio, audio_len) return result_transcripts @mp_tools.rank_zero_only @@ -64,16 +64,23 @@ class DeepSpeech2Tester_hub(): self.model.eval() cfg = self.config audio_file = self.audio_file - collate_fn_test = self.collate_fn_test - audio, _ = collate_fn_test.process_utterance( - audio_file=audio_file, transcript=" ") - audio_len = audio.shape[0] - audio = paddle.to_tensor(audio, dtype='float32') - audio_len = paddle.to_tensor(audio_len) - audio = paddle.unsqueeze(audio, axis=0) - vocab_list = collate_fn_test.vocab_list + + audio, sample_rate = soundfile.read( + self.audio_file, dtype="int16", always_2d=True) + + audio = audio[:, 0] + logger.info(f"audio shape: {audio.shape}") + + # fbank + feat = self.preprocessing(audio, **self.preprocess_args) + logger.info(f"feat shape: {feat.shape}") + + audio_len = paddle.to_tensor(feat.shape[0]) + audio = paddle.to_tensor(feat, dtype='float32').unsqueeze(axis=0) + result_transcripts = self.compute_result_transcripts( - audio, audio_len, vocab_list, cfg.decode) + audio, audio_len, self.text_feature.vocab_list, cfg.decode) + logger.info("result_transcripts: " + result_transcripts[0]) def run_test(self): @@ -109,11 +116,9 @@ class DeepSpeech2Tester_hub(): def setup_model(self): config = self.config.clone() with UpdateConfig(config): - config.input_dim = self.collate_fn_test.feature_size - config.output_dim = self.collate_fn_test.vocab_size - + config.input_dim = config.feat_dim + config.output_dim = self.text_feature.vocab_size model = DeepSpeech2Model.from_config(config) - self.model = model def setup_checkpointer(self): From 975ceb811a9f5ec6045f9bbfe541fe0c16f086c7 Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Tue, 6 Sep 2022 12:50:34 +0800 Subject: [PATCH 066/101] fix the bug of training mdtc_bs16_fp32 solve the bug of training mdtc_bs16_fp32 --- tests/test_tipc/prepare.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index b38bbcba1..2a2272813 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -22,7 +22,7 @@ model_name=$(func_parser_value "${lines[1]}") echo "model_name:"${model_name} trainer_list=$(func_parser_value "${lines[14]}") -if [ ${MODE} = "benchmark_train" ];then +if [[ ${MODE} = "benchmark_train" ]];then curPath=$(readlink -f "$(dirname "$0")") echo "curPath:"${curPath} # /PaddleSpeech/tests/test_tipc cd ${curPath}/../.. @@ -36,10 +36,10 @@ if [ ${MODE} = "benchmark_train" ];then pip install jsonlines pip list cd - - if [ ${model_name} == "conformer" ]; then + if [[ ${model_name} == "conformer" ]]; then # set the URL for aishell_tiny dataset conformer_aishell_URL=${conformer_aishell_URL:-"None"} - if [ ${conformer_aishell_URL} == 'None' ];then + if [[ ${conformer_aishell_URL} == 'None' ]];then echo "please contact author to get the URL.\n" exit else @@ -66,7 +66,7 @@ if [ ${MODE} = "benchmark_train" ];then sed -i "s#data/#test_tipc/conformer/benchmark_train/data/#g" ${curPath}/conformer/benchmark_train/conf/preprocess.yaml fi - if [ ${model_name} == "pwgan" ]; then + if [[ ${model_name} == "pwgan" ]]; then # 下载 csmsc 数据集并解压缩 wget -nc https://weixinxcxdb.oss-cn-beijing.aliyuncs.com/gwYinPinKu/BZNSYP.rar mkdir -p BZNSYP @@ -80,9 +80,10 @@ if [ ${MODE} = "benchmark_train" ];then python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/test/raw/metadata.jsonl --dumpdir=dump/test/norm --stats=dump/train/feats_stats.npy fi - if [ ${model_name} == "mdtc" ]; then + if [[ ${model_name} == "mdtc" ]]; then # 下载 Snips 数据集并解压缩 - wget -nc https://paddlespeech.bj.bcebos.com/datasets/hey_snips_kws_4.0.tar.gz.1 https://paddlespeech.bj.bcebos.com/datasets/hey_snips_https://paddlespeech.bj.bcebos.com/datasets/hey_snips_kws_4.0.tar.gz.2 + wget https://paddlespeech.bj.bcebos.com/datasets/hey_snips_kws_4.0.tar.gz.1 + wget https://paddlespeech.bj.bcebos.com/datasets/hey_snips_kws_4.0.tar.gz.2 cat hey_snips_kws_4.0.tar.gz.* > hey_snips_kws_4.0.tar.gz rm hey_snips_kws_4.0.tar.gz.* tar -xzf hey_snips_kws_4.0.tar.gz From 59e7444efef261e1a0ee69bf96c34e128052139d Mon Sep 17 00:00:00 2001 From: Zhao Yuting <91456992+THUzyt21@users.noreply.github.com> Date: Tue, 6 Sep 2022 12:51:56 +0800 Subject: [PATCH 067/101] solve the bug of training mdtc_bs16_fp32 fix the filepath bug --- examples/hey_snips/kws0/conf/mdtc.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/hey_snips/kws0/conf/mdtc.yaml b/examples/hey_snips/kws0/conf/mdtc.yaml index 76e47bc7c..54d059472 100644 --- a/examples/hey_snips/kws0/conf/mdtc.yaml +++ b/examples/hey_snips/kws0/conf/mdtc.yaml @@ -3,7 +3,7 @@ # Data # ########################################### dataset: 'paddlespeech.audio.datasets:HeySnips' -data_dir: '/PATH/TO/DATA/hey_snips_research_6k_en_train_eval_clean_ter' +data_dir: '../tests/hey_snips_research_6k_en_train_eval_clean_ter' ############################################ # Network Architecture # @@ -46,4 +46,4 @@ num_workers: 16 checkpoint: './checkpoint/epoch_100/model.pdparams' score_file: './scores.txt' stats_file: './stats.0.txt' -img_file: './det.png' \ No newline at end of file +img_file: './det.png' From 025faa03067f561d1fd5b32d000f493bb7d97a04 Mon Sep 17 00:00:00 2001 From: Ming Date: Tue, 6 Sep 2022 14:01:13 +0800 Subject: [PATCH 068/101] add finance info (#2353) --- README_cn.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/README_cn.md b/README_cn.md index 18bce43c4..21cd00a99 100644 --- a/README_cn.md +++ b/README_cn.md @@ -164,6 +164,22 @@ - 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。 +### 近期活动 + + ❗️重磅❗️飞桨智慧金融行业系列直播课 +✅ 覆盖智能风控、智能运维、智能营销、智能客服四大金融主流场景 + +📆 9月6日-9月29日每周二、四19:00 ++ 智慧金融行业深入洞察 ++ 8节理论+实践精品直播课 ++ 10+真实产业场景范例教学及实践 ++ 更有免费算力+结业证书等礼品等你来拿 +扫码报名码住直播链接,与行业精英深度交流 + +
+ +
+ ### 近期更新 - 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md) 流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md) 流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md) 全链路声纹识别系统 From efaa5400e8f69acfd04c2a6fc1bcf3089e7d4d7f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 6 Sep 2022 15:57:45 +0800 Subject: [PATCH 069/101] Update requirements.txt --- docs/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 11e94f48d..bd071e7e2 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ -braceexpandcolorlog +braceexpand +colorlog editdistance fastapi g2p_en From 1c30cff1bf02ef18dd84231cc8f999bfb1378ecb Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 6 Sep 2022 19:22:10 +0800 Subject: [PATCH 070/101] fix gpus of ernie_sat, test=tts (#2355) --- examples/aishell3/ernie_sat/run.sh | 2 +- examples/aishell3_vctk/ernie_sat/run.sh | 2 +- examples/vctk/ernie_sat/run.sh | 2 +- paddlespeech/t2s/datasets/sampler.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/aishell3/ernie_sat/run.sh b/examples/aishell3/ernie_sat/run.sh index cb354de41..d3efefe0c 100755 --- a/examples/aishell3/ernie_sat/run.sh +++ b/examples/aishell3/ernie_sat/run.sh @@ -3,7 +3,7 @@ set -e source path.sh -gpus=0,1 +gpus=0,1,2,3,4,5,6,7 stage=0 stop_stage=100 diff --git a/examples/aishell3_vctk/ernie_sat/run.sh b/examples/aishell3_vctk/ernie_sat/run.sh index 5509fc4ad..8cd9d8d1b 100755 --- a/examples/aishell3_vctk/ernie_sat/run.sh +++ b/examples/aishell3_vctk/ernie_sat/run.sh @@ -3,7 +3,7 @@ set -e source path.sh -gpus=0,1 +gpus=0,1,2,3,4,5,6,7 stage=0 stop_stage=100 diff --git a/examples/vctk/ernie_sat/run.sh b/examples/vctk/ernie_sat/run.sh index 94d130d41..cb80d1ad8 100755 --- a/examples/vctk/ernie_sat/run.sh +++ b/examples/vctk/ernie_sat/run.sh @@ -3,7 +3,7 @@ set -e source path.sh -gpus=0,1 +gpus=0,1,2,3,4,5,6,7 stage=0 stop_stage=100 diff --git a/paddlespeech/t2s/datasets/sampler.py b/paddlespeech/t2s/datasets/sampler.py index 3c97d1dc4..cbc9764c5 100644 --- a/paddlespeech/t2s/datasets/sampler.py +++ b/paddlespeech/t2s/datasets/sampler.py @@ -71,7 +71,7 @@ class ErnieSATSampler(BatchSampler): assert isinstance(drop_last, bool), \ "drop_last should be a boolean number" - from paddle.fluid.dygraph.parallel import ParallelEnv + from paddle.distributed import ParallelEnv if num_replicas is not None: assert isinstance(num_replicas, int) and num_replicas > 0, \ From 79f017aedd36c31637bd77604a99a6a8b8842235 Mon Sep 17 00:00:00 2001 From: WongLaw Date: Wed, 7 Sep 2022 05:49:17 +0000 Subject: [PATCH 071/101] Add Chinese doc and language switcher for demos of metaverse, style_fs2 and story talker, test=doc --- demos/story_talker/README.md | 2 ++ demos/story_talker/README_cn.md | 20 ++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 demos/story_talker/README_cn.md diff --git a/demos/story_talker/README.md b/demos/story_talker/README.md index 62414383b..58d2db959 100644 --- a/demos/story_talker/README.md +++ b/demos/story_talker/README.md @@ -1,3 +1,5 @@ +([简体中文](./README_cn.md)|English) + # Story Talker ## Introduction Storybooks are very important children's enlightenment books, but parents usually don't have enough time to read storybooks for their children. For very young children, they may not understand the Chinese characters in storybooks. Or sometimes, children just want to "listen" but don't want to "read". diff --git a/demos/story_talker/README_cn.md b/demos/story_talker/README_cn.md new file mode 100644 index 000000000..1f14c6e3b --- /dev/null +++ b/demos/story_talker/README_cn.md @@ -0,0 +1,20 @@ + +(简体中文|[English](./README.md)) + +# Story Talker + +## 简介 + +故事书是非常重要的儿童启蒙书,但家长通常没有足够的时间为孩子读故事书。对于非常小的孩子,他们可能不理解故事书中的汉字。或有时,孩子们只是想“听”,而不想“读”。 + +您可以使用`PaddleOCR` 获取故事书的文本,并通过`PaddleSpeech`的 `TTS` 模块进行阅读。 + +## 使用 + +运行以下命令行开始: + +``` +./run.sh +``` + +结果已显示在 [notebook](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/tutorial/tts/tts_tutorial.ipynb)。 From 1459162a726415679c8bbc4edbf32b60c68bb75f Mon Sep 17 00:00:00 2001 From: WongLaw Date: Wed, 7 Sep 2022 06:02:55 +0000 Subject: [PATCH 072/101] Add Chinese doc and language switcher for demos of metaverse, style_fs2 and story talker, test=doc --- demos/metaverse/README.md | 2 ++ demos/metaverse/README_cn.md | 27 +++++++++++++++++++++++++++ demos/style_fs2/README.md | 2 ++ demos/style_fs2/README_cn.md | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 demos/metaverse/README_cn.md create mode 100644 demos/style_fs2/README_cn.md diff --git a/demos/metaverse/README.md b/demos/metaverse/README.md index e458256a8..2c6b0d3ee 100644 --- a/demos/metaverse/README.md +++ b/demos/metaverse/README.md @@ -1,3 +1,5 @@ +([简体中文](./README_cn.md)|English) + # Metaverse ## Introduction Metaverse is a new Internet application and social form integrating virtual reality produced by integrating a variety of new technologies. diff --git a/demos/metaverse/README_cn.md b/demos/metaverse/README_cn.md new file mode 100644 index 000000000..f252789bf --- /dev/null +++ b/demos/metaverse/README_cn.md @@ -0,0 +1,27 @@ +(简体中文|[English](./README.md)) + +# Metaverse + +## 简介 + +Metaverse是一种新的互联网应用和社交形式,融合了多种新技术,产生了虚拟现实。 + +这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 和 `PaddleGAN`的 `TTS` 模块的组合,我们集成了安装和特定模块到一个shell脚本中。 + +## 使用 + +您可以使用 `PaddleSpeech` 和`PaddleGAN`的 `TTS` 模块让您最喜欢的人说出指定的内容,并构建您的虚拟人。 + +运行 `run.sh` 完成所有基本程序,包括安装。 + +```bash +./run.sh +``` + +在 `run.sh`, 先会执行 `source path.sh` 来设置好环境变量。 + +如果您想尝试您的句子,请替换`sentences.txt`中的句子。 + +如果您想尝试图像,请将图像替换shell脚本中的`download/Lamarr.png`。 + +结果已显示在我们的 [notebook](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/tutorial/tts/tts_tutorial.ipynb)。 diff --git a/demos/style_fs2/README.md b/demos/style_fs2/README.md index 123230b8f..618c74789 100644 --- a/demos/style_fs2/README.md +++ b/demos/style_fs2/README.md @@ -1,3 +1,5 @@ +([简体中文](./README_cn.md)|English) + # Style FastSpeech2 ## Introduction [FastSpeech2](https://arxiv.org/abs/2006.04558) is a classical acoustic model for Text-to-Speech synthesis, which introduces controllable speech input, including `phoneme duration`、 `energy` and `pitch`. diff --git a/demos/style_fs2/README_cn.md b/demos/style_fs2/README_cn.md new file mode 100644 index 000000000..c2d36cddd --- /dev/null +++ b/demos/style_fs2/README_cn.md @@ -0,0 +1,33 @@ +(简体中文|[English](./README.md)) + +# Style FastSpeech2 + +## 简介 + +[FastSpeech2](https://arxiv.org/abs/2006.04558) 是用于语音合成的经典声学模型,它引入了可控语音输入,包括 `phoneme duration`、 `energy` 和 `pitch`。 + +在预测阶段,您可以更改这些变量以获得一些有趣的结果。 + +例如: + +1. `FastSpeech2` 中的`duration` 可以控制音频的速度 ,并保持`pitch`。(在某些语音工具中,增加速度将增加音调,反之亦然。) +2. 当我们将一个句子的`pitch` 设置为平均值并将音素的`tones`设置为 `1`时,我们将获得 `robot-style` 的音色。 +3. 当我们提高成年女性的`pitch` (比例固定)时,我们会得到 `child-style` 的音色。 + +句子中不同音素的 `duration` 和 `pitch`可以具有不同的比例。您可以设置不同的音阶比例来强调或削弱某些音素的发音。 + +## 运行 + +运行以下命令行开始: + +``` +./run.sh +``` + +在 `run.sh`, 会首先执行 `source path.sh` 去设置好环境变量。 + +如果您想尝试您的句子,请替换 `sentences.txt`中的句子。 + +更多的细节,请查看 `style_syn.py`。 + +语音样例可以在 [style-control-in-fastspeech2](https://paddlespeech.readthedocs.io/en/latest/tts/demo.html#style-control-in-fastspeech2) 查看。 From fecde703712407ef07f495a759b55eda93313b65 Mon Sep 17 00:00:00 2001 From: WongLaw Date: Wed, 7 Sep 2022 07:07:34 +0000 Subject: [PATCH 073/101] Revised the Chinese doc, test=doc --- demos/metaverse/README_cn.md | 10 +++++----- demos/story_talker/README_cn.md | 2 +- demos/style_fs2/README_cn.md | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/demos/metaverse/README_cn.md b/demos/metaverse/README_cn.md index f252789bf..02e6ea9a3 100644 --- a/demos/metaverse/README_cn.md +++ b/demos/metaverse/README_cn.md @@ -4,13 +4,13 @@ ## 简介 -Metaverse是一种新的互联网应用和社交形式,融合了多种新技术,产生了虚拟现实。 +Metaverse 是一种新的互联网应用和社交形式,融合了多种新技术,产生了虚拟现实。 -这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 和 `PaddleGAN`的 `TTS` 模块的组合,我们集成了安装和特定模块到一个shell脚本中。 +这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 和 `PaddleGAN` 的 `TTS` 模块的组合,我们集成了安装和特定模块到一个 shell 脚本中。 ## 使用 -您可以使用 `PaddleSpeech` 和`PaddleGAN`的 `TTS` 模块让您最喜欢的人说出指定的内容,并构建您的虚拟人。 +您可以使用 `PaddleSpeech` 和 `PaddleGAN` 的 `TTS` 模块让您最喜欢的人说出指定的内容,并构建您的虚拟人。 运行 `run.sh` 完成所有基本程序,包括安装。 @@ -20,8 +20,8 @@ Metaverse是一种新的互联网应用和社交形式,融合了多种新技 在 `run.sh`, 先会执行 `source path.sh` 来设置好环境变量。 -如果您想尝试您的句子,请替换`sentences.txt`中的句子。 +如果您想尝试您的句子,请替换 `sentences.txt` 中的句子。 -如果您想尝试图像,请将图像替换shell脚本中的`download/Lamarr.png`。 +如果您想尝试图像,请将图像替换 shell 脚本中的 `download/Lamarr.png` 。 结果已显示在我们的 [notebook](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/tutorial/tts/tts_tutorial.ipynb)。 diff --git a/demos/story_talker/README_cn.md b/demos/story_talker/README_cn.md index 1f14c6e3b..7c4789ddf 100644 --- a/demos/story_talker/README_cn.md +++ b/demos/story_talker/README_cn.md @@ -7,7 +7,7 @@ 故事书是非常重要的儿童启蒙书,但家长通常没有足够的时间为孩子读故事书。对于非常小的孩子,他们可能不理解故事书中的汉字。或有时,孩子们只是想“听”,而不想“读”。 -您可以使用`PaddleOCR` 获取故事书的文本,并通过`PaddleSpeech`的 `TTS` 模块进行阅读。 +您可以使用 `PaddleOCR` 获取故事书的文本,并通过 `PaddleSpeech` 的 `TTS` 模块进行阅读。 ## 使用 diff --git a/demos/style_fs2/README_cn.md b/demos/style_fs2/README_cn.md index c2d36cddd..5c74f691a 100644 --- a/demos/style_fs2/README_cn.md +++ b/demos/style_fs2/README_cn.md @@ -4,17 +4,17 @@ ## 简介 -[FastSpeech2](https://arxiv.org/abs/2006.04558) 是用于语音合成的经典声学模型,它引入了可控语音输入,包括 `phoneme duration`、 `energy` 和 `pitch`。 +[FastSpeech2](https://arxiv.org/abs/2006.04558) 是用于语音合成的经典声学模型,它引入了可控语音输入,包括 `phoneme duration` 、 `energy` 和 `pitch` 。 在预测阶段,您可以更改这些变量以获得一些有趣的结果。 例如: -1. `FastSpeech2` 中的`duration` 可以控制音频的速度 ,并保持`pitch`。(在某些语音工具中,增加速度将增加音调,反之亦然。) -2. 当我们将一个句子的`pitch` 设置为平均值并将音素的`tones`设置为 `1`时,我们将获得 `robot-style` 的音色。 -3. 当我们提高成年女性的`pitch` (比例固定)时,我们会得到 `child-style` 的音色。 +1. `FastSpeech2` 中的 `duration` 可以控制音频的速度 ,并保持 `pitch` 。(在某些语音工具中,增加速度将增加音调,反之亦然。) +2. 当我们将一个句子的 `pitch` 设置为平均值并将音素的 `tones` 设置为 `1` 时,我们将获得 `robot-style` 的音色。 +3. 当我们提高成年女性的 `pitch` (比例固定)时,我们会得到 `child-style` 的音色。 -句子中不同音素的 `duration` 和 `pitch`可以具有不同的比例。您可以设置不同的音阶比例来强调或削弱某些音素的发音。 +句子中不同音素的 `duration` 和 `pitch` 可以具有不同的比例。您可以设置不同的音阶比例来强调或削弱某些音素的发音。 ## 运行 From 8dd4e13f5f3115828dee260e9f018eb9d4f42aa2 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 7 Sep 2022 15:24:15 +0800 Subject: [PATCH 074/101] Update README_cn.md --- demos/metaverse/README_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/metaverse/README_cn.md b/demos/metaverse/README_cn.md index 02e6ea9a3..de10f9a53 100644 --- a/demos/metaverse/README_cn.md +++ b/demos/metaverse/README_cn.md @@ -6,7 +6,7 @@ Metaverse 是一种新的互联网应用和社交形式,融合了多种新技术,产生了虚拟现实。 -这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 和 `PaddleGAN` 的 `TTS` 模块的组合,我们集成了安装和特定模块到一个 shell 脚本中。 +这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 的 `TTS` 模块和 `PaddleGAN` 的组合,我们集成了安装和特定模块到一个 shell 脚本中。 ## 使用 From 5f0c0b680e7f90c8150f069f5f17a9755800b8f1 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 7 Sep 2022 15:24:45 +0800 Subject: [PATCH 075/101] Update README_cn.md --- demos/metaverse/README_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/metaverse/README_cn.md b/demos/metaverse/README_cn.md index de10f9a53..a716109f1 100644 --- a/demos/metaverse/README_cn.md +++ b/demos/metaverse/README_cn.md @@ -10,7 +10,7 @@ Metaverse 是一种新的互联网应用和社交形式,融合了多种新技 ## 使用 -您可以使用 `PaddleSpeech` 和 `PaddleGAN` 的 `TTS` 模块让您最喜欢的人说出指定的内容,并构建您的虚拟人。 +您可以使用 `PaddleSpeech` 的 `TTS` 模块和 `PaddleGAN` 让您最喜欢的人说出指定的内容,并构建您的虚拟人。 运行 `run.sh` 完成所有基本程序,包括安装。 From e622f42d92e52e9f63c3cce74ababf68d676b366 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 7 Sep 2022 11:20:41 +0000 Subject: [PATCH 076/101] add aishell3 voice cloning with ECAPA-TDNN spk encoder --- examples/aishell3/README.md | 1 + examples/aishell3/vc1/README.md | 4 +- examples/aishell3/vc2/README.md | 126 ++++++++++++++++++ examples/aishell3/vc2/conf/default.yaml | 104 +++++++++++++++ examples/aishell3/vc2/local/preprocess.sh | 85 ++++++++++++ examples/aishell3/vc2/local/synthesize.sh | 22 +++ examples/aishell3/vc2/local/train.sh | 13 ++ examples/aishell3/vc2/local/voice_cloning.sh | 23 ++++ examples/aishell3/vc2/path.sh | 13 ++ examples/aishell3/vc2/run.sh | 39 ++++++ paddlespeech/cli/asr/infer.py | 20 ++- paddlespeech/cli/vector/infer.py | 69 ++++++++-- .../engine/vector/python/vector_engine.py | 3 +- .../t2s/exps/fastspeech2/vc2_infer.py | 70 ++++++++++ paddlespeech/t2s/exps/voice_cloning.py | 126 +++++++++++------- 15 files changed, 651 insertions(+), 67 deletions(-) create mode 100644 examples/aishell3/vc2/README.md create mode 100644 examples/aishell3/vc2/conf/default.yaml create mode 100755 examples/aishell3/vc2/local/preprocess.sh create mode 100755 examples/aishell3/vc2/local/synthesize.sh create mode 100755 examples/aishell3/vc2/local/train.sh create mode 100755 examples/aishell3/vc2/local/voice_cloning.sh create mode 100755 examples/aishell3/vc2/path.sh create mode 100755 examples/aishell3/vc2/run.sh create mode 100644 paddlespeech/t2s/exps/fastspeech2/vc2_infer.py diff --git a/examples/aishell3/README.md b/examples/aishell3/README.md index e022cef42..dd09bdfb2 100644 --- a/examples/aishell3/README.md +++ b/examples/aishell3/README.md @@ -10,4 +10,5 @@ * voc3 - MultiBand MelGAN * vc0 - Tacotron2 Voice Cloning with GE2E * vc1 - FastSpeech2 Voice Cloning with GE2E +* vc2 - FastSpeech2 Voice Cloning with ECAPA-TDNN * ernie_sat - ERNIE-SAT diff --git a/examples/aishell3/vc1/README.md b/examples/aishell3/vc1/README.md index aab525103..93e0fd7ec 100644 --- a/examples/aishell3/vc1/README.md +++ b/examples/aishell3/vc1/README.md @@ -99,7 +99,7 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p The synthesizing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`. ### Voice Cloning -Assume there are some reference audios in `./ref_audio` +Assume there are some reference audios in `./ref_audio` ```text ref_audio ├── 001238.wav @@ -116,7 +116,7 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_outpu Model | Step | eval/loss | eval/l1_loss | eval/duration_loss | eval/pitch_loss| eval/energy_loss :-------------:| :------------:| :-----: | :-----: | :--------: |:--------:|:---------: -default|2(gpu) x 96400|0.99699|0.62013|0.53057|0.11954| 0.20426| +default|2(gpu) x 96400|0.99699|0.62013|0.053057|0.11954| 0.20426| FastSpeech2 checkpoint contains files listed below. (There is no need for `speaker_id_map.txt` here ) diff --git a/examples/aishell3/vc2/README.md b/examples/aishell3/vc2/README.md new file mode 100644 index 000000000..774823674 --- /dev/null +++ b/examples/aishell3/vc2/README.md @@ -0,0 +1,126 @@ +# FastSpeech2 + AISHELL-3 Voice Cloning (ECAPA-TDNN) +This example contains code used to train a [FastSpeech2](https://arxiv.org/abs/2006.04558) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). The trained model can be used in Voice Cloning Task, We refer to the model structure of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf). The general steps are as follows: +1. Speaker Encoder: We use Speaker Verification to train a speaker encoder. Datasets used in this task are different from those used in `FastSpeech2` because the transcriptions are not needed, we use more datasets, refer to [ECAPA-TDNN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0). +2. Synthesizer: We use the trained speaker encoder to generate speaker embedding for each sentence in AISHELL-3. This embedding is an extra input of `FastSpeech2` which will be concated with encoder outputs. +3. Vocoder: We use [Parallel Wave GAN](http://arxiv.org/abs/1910.11480) as the neural Vocoder, refer to [voc1](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1). + +## Dataset +### Download and Extract +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + +### Get MFA Result and Extract +We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. +You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Get Started +Assume the path to the dataset is `~/datasets/data_aishell3`. +Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`. + +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize waveform from `metadata.jsonl`. +5. start a voice cloning inference. +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. +```text +dump +├── dev +│ ├── norm +│ └── raw +├── embed +│ ├── SSB0005 +│ ├── SSB0009 +│ ├── ... +│ └── ... +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── energy_stats.npy + ├── norm + ├── pitch_stats.npy + ├── raw + └── speech_stats.npy +``` +The `embed` contains the generated speaker embedding for each sentence in AISHELL-3, which has the same file structure with wav files and the format is `.npy`. + +The computing time of utterance embedding can be x hours. + +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech、pitch and energy features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, the path of pitch features, the path of energy features, speaker, and id of each utterance. + +The preprocessing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but there is one more `ECAPA-TDNN/inference` step here. + +### Model Training +`./local/train.sh` calls `${BIN_DIR}/train.py`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +The training step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/train.py`. + +### Synthesizing +We use [parallel wavegan](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1) as the neural vocoder. +Download pretrained parallel wavegan model from [pwg_aishell3_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_aishell3_ckpt_0.5.zip) and unzip it. +```bash +unzip pwg_aishell3_ckpt_0.5.zip +``` +Parallel WaveGAN checkpoint contains files listed below. +```text +pwg_aishell3_ckpt_0.5 +├── default.yaml # default config used to train parallel wavegan +├── feats_stats.npy # statistics used to normalize spectrogram when training parallel wavegan +└── snapshot_iter_1000000.pdz # generator parameters of parallel wavegan +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +The synthesizing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`. + +### Voice Cloning +Assume there are some reference audios in `./ref_audio` (the format must be wav here) +```text +ref_audio +├── 001238.wav +├── LJ015-0254.wav +└── audio_self_test.wav +``` +`./local/voice_cloning.sh` calls `${BIN_DIR}/../voice_cloning.py` + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ref_audio_dir} +``` +## Pretrained Model +- [fastspeech2_aishell3_ckpt_vc2_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_vc2_1.2.0.zip) + +Model | Step | eval/loss | eval/l1_loss | eval/duration_loss | eval/pitch_loss| eval/energy_loss +:-------------:| :------------:| :-----: | :-----: | :--------: |:--------:|:---------: +default|2(gpu) x 96400|0.991855|0.599517|0.052142|0.094877| 0.245318| + +FastSpeech2 checkpoint contains files listed below. +(There is no need for `speaker_id_map.txt` here ) + +```text +fastspeech2_aishell3_ckpt_vc2_1.2.0 +├── default.yaml # default config used to train fastspeech2 +├── energy_stats.npy # statistics used to normalize energy when training fastspeech2 +├── phone_id_map.txt # phone vocabulary file when training fastspeech2 +├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2 +├── snapshot_iter_96400.pdz # model parameters and optimizer states +└── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2 +``` diff --git a/examples/aishell3/vc2/conf/default.yaml b/examples/aishell3/vc2/conf/default.yaml new file mode 100644 index 000000000..5ef37f812 --- /dev/null +++ b/examples/aishell3/vc2/conf/default.yaml @@ -0,0 +1,104 @@ +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 24000 # sr +n_fft: 2048 # FFT size (samples). +n_shift: 300 # Hop size (samples). 12.5ms +win_length: 1200 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + +# Only used for feats_type != raw + +fmin: 80 # Minimum frequency of Mel basis. +fmax: 7600 # Maximum frequency of Mel basis. +n_mels: 80 # The number of mel basis. + +# Only used for the model using pitch features (e.g. FastSpeech2) +f0min: 80 # Minimum f0 for pitch extraction. +f0max: 400 # Maximum f0 for pitch extraction. + + +########################################################### +# DATA SETTING # +########################################################### +batch_size: 64 +num_workers: 2 + + +########################################################### +# MODEL SETTING # +########################################################### +model: + adim: 384 # attention dimension + aheads: 2 # number of attention heads + elayers: 4 # number of encoder layers + eunits: 1536 # number of encoder ff units + dlayers: 4 # number of decoder layers + dunits: 1536 # number of decoder ff units + positionwise_layer_type: conv1d # type of position-wise layer + positionwise_conv_kernel_size: 3 # kernel size of position wise conv layer + duration_predictor_layers: 2 # number of layers of duration predictor + duration_predictor_chans: 256 # number of channels of duration predictor + duration_predictor_kernel_size: 3 # filter size of duration predictor + postnet_layers: 5 # number of layers of postnset + postnet_filts: 5 # filter size of conv layers in postnet + postnet_chans: 256 # number of channels of conv layers in postnet + use_scaled_pos_enc: True # whether to use scaled positional encoding + encoder_normalize_before: True # whether to perform layer normalization before the input + decoder_normalize_before: True # whether to perform layer normalization before the input + reduction_factor: 1 # reduction factor + init_type: xavier_uniform # initialization type + init_enc_alpha: 1.0 # initial value of alpha of encoder scaled position encoding + init_dec_alpha: 1.0 # initial value of alpha of decoder scaled position encoding + transformer_enc_dropout_rate: 0.2 # dropout rate for transformer encoder layer + transformer_enc_positional_dropout_rate: 0.2 # dropout rate for transformer encoder positional encoding + transformer_enc_attn_dropout_rate: 0.2 # dropout rate for transformer encoder attention layer + transformer_dec_dropout_rate: 0.2 # dropout rate for transformer decoder layer + transformer_dec_positional_dropout_rate: 0.2 # dropout rate for transformer decoder positional encoding + transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer + pitch_predictor_layers: 5 # number of conv layers in pitch predictor + pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor + pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor + pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor + pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch + pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch + stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder + energy_predictor_layers: 2 # number of conv layers in energy predictor + energy_predictor_chans: 256 # number of channels of conv layers in energy predictor + energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor + energy_predictor_dropout: 0.5 # dropout rate in energy predictor + energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy + energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy + stop_gradient_from_energy_predictor: False # whether to stop the gradient from energy predictor to encoder + spk_embed_dim: 192 # speaker embedding dimension + spk_embed_integration_type: concat # speaker embedding integration type + + + +########################################################### +# UPDATER SETTING # +########################################################### +updater: + use_masking: True # whether to apply masking for padded part in loss calculation + + +########################################################### +# OPTIMIZER SETTING # +########################################################### +optimizer: + optim: adam # optimizer type + learning_rate: 0.001 # learning rate + +########################################################### +# TRAINING SETTING # +########################################################### +max_epoch: 200 +num_snapshots: 5 + + +########################################################### +# OTHER SETTING # +########################################################### +seed: 10086 diff --git a/examples/aishell3/vc2/local/preprocess.sh b/examples/aishell3/vc2/local/preprocess.sh new file mode 100755 index 000000000..f5262a26d --- /dev/null +++ b/examples/aishell3/vc2/local/preprocess.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 + +# gen speaker embedding +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + python3 ${BIN_DIR}/vc2_infer.py \ + --input=~/datasets/data_aishell3/train/wav/ \ + --output=dump/embed \ + --num-cpu=20 +fi + +# copy from tts3/preprocess +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # get durations from MFA's result + echo "Generate durations.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=./aishell3_alignment_tone \ + --output durations.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=aishell3 \ + --rootdir=~/datasets/data_aishell3/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --spk_emb_dir=dump/embed +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="speech" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="pitch" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="energy" +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # normalize and covert phone/speaker to id, dev and test should use train's stats + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt +fi diff --git a/examples/aishell3/vc2/local/synthesize.sh b/examples/aishell3/vc2/local/synthesize.sh new file mode 100755 index 000000000..8c61e3f3e --- /dev/null +++ b/examples/aishell3/vc2/local/synthesize.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_aishell3 \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_aishell3 \ + --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ + --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ + --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --voice-cloning=True diff --git a/examples/aishell3/vc2/local/train.sh b/examples/aishell3/vc2/local/train.sh new file mode 100755 index 000000000..c775fcadc --- /dev/null +++ b/examples/aishell3/vc2/local/train.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=2 \ + --phones-dict=dump/phone_id_map.txt \ + --voice-cloning=True \ No newline at end of file diff --git a/examples/aishell3/vc2/local/voice_cloning.sh b/examples/aishell3/vc2/local/voice_cloning.sh new file mode 100755 index 000000000..09c5e4369 --- /dev/null +++ b/examples/aishell3/vc2/local/voice_cloning.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +ref_audio_dir=$4 + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../voice_cloning.py \ + --am=fastspeech2_aishell3 \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_aishell3 \ + --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ + --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ + --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ + --text="凯莫瑞安联合体的经济崩溃迫在眉睫。" \ + --input-dir=${ref_audio_dir} \ + --output-dir=${train_output_path}/vc_syn \ + --phones-dict=dump/phone_id_map.txt \ + --use_ecapa=True diff --git a/examples/aishell3/vc2/path.sh b/examples/aishell3/vc2/path.sh new file mode 100755 index 000000000..fb7e8411c --- /dev/null +++ b/examples/aishell3/vc2/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=fastspeech2 +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} diff --git a/examples/aishell3/vc2/run.sh b/examples/aishell3/vc2/run.sh new file mode 100755 index 000000000..06d562988 --- /dev/null +++ b/examples/aishell3/vc2/run.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0,1 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_96400.pdz +ref_audio_dir=ref_audio + + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # synthesize, vocoder is pwgan + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # synthesize, vocoder is pwgan + CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ref_audio_dir} || exit -1 +fi diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index f9b4439ec..7296776f9 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -99,8 +99,9 @@ class ASRExecutor(BaseExecutor): '-y', action="store_true", default=False, - help='No additional parameters required. Once set this parameter, it means accepting the request of the program by default, which includes transforming the audio sample rate' - ) + help='No additional parameters required. \ + Once set this parameter, it means accepting the request of the program by default, \ + which includes transforming the audio sample rate') self.parser.add_argument( '--rtf', action="store_true", @@ -340,7 +341,7 @@ class ASRExecutor(BaseExecutor): audio = np.round(audio).astype("int16") return audio - def _check(self, audio_file: str, sample_rate: int, force_yes: bool): + def _check(self, audio_file: str, sample_rate: int, force_yes: bool=False): self.sample_rate = sample_rate if self.sample_rate != 16000 and self.sample_rate != 8000: logger.error( @@ -434,8 +435,17 @@ class ASRExecutor(BaseExecutor): for id_, input_ in task_source.items(): try: - res = self(input_, model, lang, sample_rate, config, ckpt_path, - decode_method, force_yes, rtf, device) + res = self( + audio_file=input_, + model=model, + lang=lang, + sample_rate=sample_rate, + config=config, + ckpt_path=ckpt_path, + decode_method=decode_method, + force_yes=force_yes, + rtf=rtf, + device=device) task_results[id_] = res except Exception as e: has_exceptions = True diff --git a/paddlespeech/cli/vector/infer.py b/paddlespeech/cli/vector/infer.py index 48ca1f98d..111987246 100644 --- a/paddlespeech/cli/vector/infer.py +++ b/paddlespeech/cli/vector/infer.py @@ -70,6 +70,14 @@ class VectorExecutor(BaseExecutor): type=str, default=None, help="Checkpoint file of model.") + self.parser.add_argument( + '--yes', + '-y', + action="store_true", + default=False, + help='No additional parameters required. \ + Once set this parameter, it means accepting the request of the program by default, \ + which includes transforming the audio sample rate') self.parser.add_argument( '--config', type=str, @@ -109,6 +117,7 @@ class VectorExecutor(BaseExecutor): sample_rate = parser_args.sample_rate config = parser_args.config ckpt_path = parser_args.ckpt_path + force_yes = parser_args.yes device = parser_args.device # stage 1: configurate the verbose flag @@ -128,8 +137,14 @@ class VectorExecutor(BaseExecutor): # extract the speaker audio embedding if parser_args.task == "spk": logger.debug("do vector spk task") - res = self(input_, model, sample_rate, config, ckpt_path, - device) + res = self( + audio_file=input_, + model=model, + sample_rate=sample_rate, + config=config, + ckpt_path=ckpt_path, + force_yes=force_yes, + device=device) task_result[id_] = res elif parser_args.task == "score": logger.debug("do vector score task") @@ -145,10 +160,22 @@ class VectorExecutor(BaseExecutor): logger.debug( f"score task, enroll audio: {enroll_audio}, test audio: {test_audio}" ) - enroll_embedding = self(enroll_audio, model, sample_rate, - config, ckpt_path, device) - test_embedding = self(test_audio, model, sample_rate, - config, ckpt_path, device) + enroll_embedding = self( + audio_file=enroll_audio, + model=model, + sample_rate=sample_rate, + config=config, + ckpt_path=ckpt_path, + force_yes=force_yes, + device=device) + test_embedding = self( + audio_file=test_audio, + model=model, + sample_rate=sample_rate, + config=config, + ckpt_path=ckpt_path, + force_yes=force_yes, + device=device) # get the score res = self.get_embeddings_score(enroll_embedding, @@ -222,6 +249,7 @@ class VectorExecutor(BaseExecutor): sample_rate: int=16000, config: os.PathLike=None, ckpt_path: os.PathLike=None, + force_yes: bool=False, device=paddle.get_device()): """Extract the audio embedding @@ -240,7 +268,7 @@ class VectorExecutor(BaseExecutor): """ # stage 0: check the audio format audio_file = os.path.abspath(audio_file) - if not self._check(audio_file, sample_rate): + if not self._check(audio_file, sample_rate, force_yes): sys.exit(-1) # stage 1: set the paddle runtime host device @@ -418,7 +446,7 @@ class VectorExecutor(BaseExecutor): logger.debug("audio extract the feat success") - def _check(self, audio_file: str, sample_rate: int): + def _check(self, audio_file: str, sample_rate: int, force_yes: bool=False): """Check if the model sample match the audio sample rate Args: @@ -462,13 +490,34 @@ class VectorExecutor(BaseExecutor): logger.debug(f"The sample rate is {audio_sample_rate}") if audio_sample_rate != self.sample_rate: - logger.error("The sample rate of the input file is not {}.\n \ + logger.debug("The sample rate of the input file is not {}.\n \ The program will resample the wav file to {}.\n \ If the result does not meet your expectations,\n \ Please input the 16k 16 bit 1 channel wav file. \ ".format(self.sample_rate, self.sample_rate)) - sys.exit(-1) + if force_yes is False: + while (True): + logger.debug( + "Whether to change the sample rate and the channel. Y: change the sample. N: exit the prgream." + ) + content = input("Input(Y/N):") + if content.strip() == "Y" or content.strip( + ) == "y" or content.strip() == "yes" or content.strip( + ) == "Yes": + logger.debug( + "change the sampele rate, channel to 16k and 1 channel" + ) + break + elif content.strip() == "N" or content.strip( + ) == "n" or content.strip() == "no" or content.strip( + ) == "No": + logger.debug("Exit the program") + return False + else: + logger.warning("Not regular input, please input again") + self.change_format = True else: logger.debug("The audio file format is right") + self.change_format = False return True diff --git a/paddlespeech/server/engine/vector/python/vector_engine.py b/paddlespeech/server/engine/vector/python/vector_engine.py index f7d60648d..7b8f667db 100644 --- a/paddlespeech/server/engine/vector/python/vector_engine.py +++ b/paddlespeech/server/engine/vector/python/vector_engine.py @@ -105,7 +105,8 @@ class PaddleVectorConnectionHandler: # we can not reuse the cache io.BytesIO(audio) data, # because the soundfile will change the io.BytesIO(audio) to the end # thus we should convert the base64 string to io.BytesIO when we need the audio data - if not self.executor._check(io.BytesIO(audio), sample_rate): + if not self.executor._check( + io.BytesIO(audio), sample_rate, force_yes=True): logger.debug("check the audio sample rate occurs error") return np.array([0.0]) diff --git a/paddlespeech/t2s/exps/fastspeech2/vc2_infer.py b/paddlespeech/t2s/exps/fastspeech2/vc2_infer.py new file mode 100644 index 000000000..3d0a83666 --- /dev/null +++ b/paddlespeech/t2s/exps/fastspeech2/vc2_infer.py @@ -0,0 +1,70 @@ +import argparse +from concurrent.futures import ThreadPoolExecutor +from pathlib import Path + +import numpy as np +import tqdm + +from paddlespeech.cli.vector import VectorExecutor + + +def _process_utterance(ifpath: Path, + input_dir: Path, + output_dir: Path, + vec_executor): + rel_path = ifpath.relative_to(input_dir) + ofpath = (output_dir / rel_path).with_suffix(".npy") + ofpath.parent.mkdir(parents=True, exist_ok=True) + embed = vec_executor(audio_file=ifpath, force_yes=True) + np.save(ofpath, embed) + return ofpath + + +def main(args): + # input output preparation + input_dir = Path(args.input).expanduser() + ifpaths = list(input_dir.rglob(args.pattern)) + print(f"{len(ifpaths)} utterances in total") + output_dir = Path(args.output).expanduser() + output_dir.mkdir(parents=True, exist_ok=True) + vec_executor = VectorExecutor() + nprocs = args.num_cpu + + # warm up + vec_executor(audio_file=ifpaths[0], force_yes=True) + + if nprocs == 1: + results = [] + for ifpath in tqdm.tqdm(ifpaths, total=len(ifpaths)): + _process_utterance( + ifpath=ifpath, + input_dir=input_dir, + output_dir=output_dir, + vec_executor=vec_executor) + else: + with ThreadPoolExecutor(nprocs) as pool: + with tqdm.tqdm(total=len(ifpaths)) as progress: + for ifpath in ifpaths: + future = pool.submit(_process_utterance, ifpath, input_dir, + output_dir, vec_executor) + future.add_done_callback(lambda p: progress.update()) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="compute utterance embed.") + parser.add_argument( + "--input", type=str, help="path of the audio_file folder.") + parser.add_argument( + "--pattern", + type=str, + default="*.wav", + help="pattern to filter audio files.") + parser.add_argument( + "--output", + metavar="OUTPUT_DIR", + help="path to save spk embedding results.") + parser.add_argument( + "--num-cpu", type=int, default=1, help="number of process.") + args = parser.parse_args() + + main(args) diff --git a/paddlespeech/t2s/exps/voice_cloning.py b/paddlespeech/t2s/exps/voice_cloning.py index b51a4d7bc..80cfea4a6 100644 --- a/paddlespeech/t2s/exps/voice_cloning.py +++ b/paddlespeech/t2s/exps/voice_cloning.py @@ -21,13 +21,28 @@ import soundfile as sf import yaml from yacs.config import CfgNode +from paddlespeech.cli.vector import VectorExecutor from paddlespeech.t2s.exps.syn_utils import get_am_inference from paddlespeech.t2s.exps.syn_utils import get_voc_inference from paddlespeech.t2s.frontend.zh_frontend import Frontend +from paddlespeech.t2s.utils import str2bool from paddlespeech.vector.exps.ge2e.audio_processor import SpeakerVerificationPreprocessor from paddlespeech.vector.models.lstm_speaker_encoder import LSTMSpeakerEncoder +def gen_random_embed(use_ecapa: bool=False): + if use_ecapa: + # Randomly generate numbers of -25 ~ 25, 192 is the dim of spk_emb + random_spk_emb = (-1 + 2 * np.random.rand(192)) * 25 + + # GE2E + else: + # Randomly generate numbers of 0 ~ 0.2, 256 is the dim of spk_emb + random_spk_emb = np.random.rand(256) * 0.2 + random_spk_emb = paddle.to_tensor(random_spk_emb, dtype='float32') + return random_spk_emb + + def voice_cloning(args): # Init body. with open(args.am_config) as f: @@ -41,30 +56,47 @@ def voice_cloning(args): print(am_config) print(voc_config) + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + input_dir = Path(args.input_dir) + # speaker encoder - p = SpeakerVerificationPreprocessor( - sampling_rate=16000, - audio_norm_target_dBFS=-30, - vad_window_length=30, - vad_moving_average_width=8, - vad_max_silence_length=6, - mel_window_length=25, - mel_window_step=10, - n_mels=40, - partial_n_frames=160, - min_pad_coverage=0.75, - partial_overlap_ratio=0.5) - print("Audio Processor Done!") - - speaker_encoder = LSTMSpeakerEncoder( - n_mels=40, num_layers=3, hidden_size=256, output_size=256) - speaker_encoder.set_state_dict(paddle.load(args.ge2e_params_path)) - speaker_encoder.eval() - print("GE2E Done!") + if args.use_ecapa: + vec_executor = VectorExecutor() + # warm up + vec_executor( + audio_file=input_dir / os.listdir(input_dir)[0], force_yes=True) + print("ECAPA-TDNN Done!") + # use GE2E + else: + p = SpeakerVerificationPreprocessor( + sampling_rate=16000, + audio_norm_target_dBFS=-30, + vad_window_length=30, + vad_moving_average_width=8, + vad_max_silence_length=6, + mel_window_length=25, + mel_window_step=10, + n_mels=40, + partial_n_frames=160, + min_pad_coverage=0.75, + partial_overlap_ratio=0.5) + print("Audio Processor Done!") + + speaker_encoder = LSTMSpeakerEncoder( + n_mels=40, num_layers=3, hidden_size=256, output_size=256) + speaker_encoder.set_state_dict(paddle.load(args.ge2e_params_path)) + speaker_encoder.eval() + print("GE2E Done!") frontend = Frontend(phone_vocab_path=args.phones_dict) print("frontend done!") + sentence = args.text + input_ids = frontend.get_input_ids(sentence, merge_sentences=True) + phone_ids = input_ids["phone_ids"][0] + # acoustic model am_inference = get_am_inference( am=args.am, @@ -80,26 +112,19 @@ def voice_cloning(args): voc_ckpt=args.voc_ckpt, voc_stat=args.voc_stat) - output_dir = Path(args.output_dir) - output_dir.mkdir(parents=True, exist_ok=True) - - input_dir = Path(args.input_dir) - - sentence = args.text - - input_ids = frontend.get_input_ids(sentence, merge_sentences=True) - phone_ids = input_ids["phone_ids"][0] - for name in os.listdir(input_dir): utt_id = name.split(".")[0] ref_audio_path = input_dir / name - mel_sequences = p.extract_mel_partials(p.preprocess_wav(ref_audio_path)) - # print("mel_sequences: ", mel_sequences.shape) - with paddle.no_grad(): - spk_emb = speaker_encoder.embed_utterance( - paddle.to_tensor(mel_sequences)) - # print("spk_emb shape: ", spk_emb.shape) - + if args.use_ecapa: + spk_emb = vec_executor(audio_file=ref_audio_path, force_yes=True) + spk_emb = paddle.to_tensor(spk_emb) + # GE2E + else: + mel_sequences = p.extract_mel_partials( + p.preprocess_wav(ref_audio_path)) + with paddle.no_grad(): + spk_emb = speaker_encoder.embed_utterance( + paddle.to_tensor(mel_sequences)) with paddle.no_grad(): wav = voc_inference(am_inference(phone_ids, spk_emb=spk_emb)) @@ -108,16 +133,17 @@ def voice_cloning(args): wav.numpy(), samplerate=am_config.fs) print(f"{utt_id} done!") - # Randomly generate numbers of 0 ~ 0.2, 256 is the dim of spk_emb - random_spk_emb = np.random.rand(256) * 0.2 - random_spk_emb = paddle.to_tensor(random_spk_emb, dtype='float32') - utt_id = "random_spk_emb" - with paddle.no_grad(): - wav = voc_inference(am_inference(phone_ids, spk_emb=random_spk_emb)) - sf.write( - str(output_dir / (utt_id + ".wav")), - wav.numpy(), - samplerate=am_config.fs) + + # generate 5 random_spk_emb + for i in range(5): + random_spk_emb = gen_random_embed(args.use_ecapa) + utt_id = "random_spk_emb" + with paddle.no_grad(): + wav = voc_inference(am_inference(phone_ids, spk_emb=random_spk_emb)) + sf.write( + str(output_dir / (utt_id + "_" + str(i) + ".wav")), + wav.numpy(), + samplerate=am_config.fs) print(f"{utt_id} done!") @@ -171,13 +197,15 @@ def parse_args(): type=str, default="每当你觉得,想要批评什么人的时候,你切要记着,这个世界上的人,并非都具备你禀有的条件。", help="text to synthesize, a line") - parser.add_argument( "--ge2e_params_path", type=str, help="ge2e params path.") - + parser.add_argument( + "--use_ecapa", + type=str2bool, + default=False, + help="whether to use ECAPA-TDNN as speaker encoder.") parser.add_argument( "--ngpu", type=int, default=1, help="if ngpu=0, use cpu.") - parser.add_argument( "--input-dir", type=str, From 5ae5e6819c575fc7169c0c5f47d4ab7f4216c73d Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 8 Sep 2022 06:45:48 +0000 Subject: [PATCH 077/101] update readme, test=tts --- README.md | 57 +++++++++++++-------- README_cn.md | 46 +++++++++++------ demos/text_to_speech/README.md | 82 +++++++++++++++++++---------- demos/text_to_speech/README_cn.md | 85 ++++++++++++++++++++----------- 4 files changed, 175 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index 5c62925e2..2fb281e7f 100644 --- a/README.md +++ b/README.md @@ -159,15 +159,20 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV). ### Recent Update -- 👑 2022.05.13: Release [PP-ASR](./docs/source/asr/PPASR.md)、[PP-TTS](./docs/source/tts/PPTTS.md)、[PP-VPR](docs/source/vpr/PPVPR.md) -- 👏🏻 2022.05.06: `Streaming ASR` with `Punctuation Restoration` and `Token Timestamp`. -- 👏🏻 2022.05.06: `Server` is available for `Speaker Verification`, and `Punctuation Restoration`. -- 👏🏻 2022.04.28: `Streaming Server` is available for `Automatic Speech Recognition` and `Text-to-Speech`. -- 👏🏻 2022.03.28: `Server` is available for `Audio Classification`, `Automatic Speech Recognition` and `Text-to-Speech`. -- 👏🏻 2022.03.28: `CLI` is available for `Speaker Verification`. +- ⚡ 2022.08.25: Release TTS [finetune](./examples/other/tts_finetune/tts3) example. +- 🔥 2022.08.22: Add ERNIE-SAT models: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat). +- 🔥 2022.08.15: Add [g2pW](https://github.com/GitYCC/g2pW) into TTS Chinese Text Frontend. +- 🔥 2022.08.09: Release [Chinese English mixed TTS](./examples/zh_en_tts/tts3). +- ⚡ 2022.08.03: Add ONNXRuntime infer for TTS CLI. +- 🎉 2022.07.18: Release VITS: [VITS-csmsc](./examples/csmsc/vits)、[VITS-aishell3](./examples/aishell3/vits)、[VITS-VC](./examples/aishell3/vits-vc). +- 🎉 2022.06.22: All TTS models support ONNX format. +- 🍀 2022.06.17: Add [PaddleSpeech Web Demo](./demos/speech_web). +- 👑 2022.05.13: Release [PP-ASR](./docs/source/asr/PPASR.md)、[PP-TTS](./docs/source/tts/PPTTS.md)、[PP-VPR](docs/source/vpr/PPVPR.md). +- 👏🏻 2022.05.06: `PaddleSpeech Streaming Server` is available for `Streaming ASR` with `Punctuation Restoration` and `Token Timestamp` and `Text-to-Speech`. +- 👏🏻 2022.05.06: `PaddleSpeech Server` is available for `Audio Classification`, `Automatic Speech Recognition` and `Text-to-Speech`, `Speaker Verification` and `Punctuation Restoration`. +- 👏🏻 2022.03.28: `PaddleSpeech CLI` is available for `Speaker Verification`. - 🤗 2021.12.14: [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available! -- 👏🏻 2021.12.10: `CLI` is available for `Audio Classification`, `Automatic Speech Recognition`, `Speech Translation (English to Chinese)` and `Text-to-Speech`. - +- 👏🏻 2021.12.10: `PaddleSpeech CLI` is available for `Audio Classification`, `Automatic Speech Recognition`, `Speech Translation (English to Chinese)` and `Text-to-Speech`. ### Community - Scan the QR code below with your Wechat, you can access to official technical exchange group and get the bonus ( more than 20GB learning materials, such as papers, codes and videos ) and the live link of the lessons. Look forward to your participation. @@ -599,49 +604,56 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - HiFiGAN - LJSpeech / VCTK / CSMSC / AISHELL-3 + HiFiGAN + LJSpeech / VCTK / CSMSC / AISHELL-3 HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 - WaveRNN - CSMSC + WaveRNN + CSMSC WaveRNN-csmsc - Voice Cloning + Voice Cloning GE2E Librispeech, etc. - ge2e + GE2E + + + + SV2TTS (GE2E + Tacotron2) + AISHELL-3 + + VC0 - GE2E + Tacotron2 + SV2TTS (GE2E + FastSpeech2) AISHELL-3 - ge2e-tacotron2-aishell3 + VC1 - GE2E + FastSpeech2 + SV2TTS (ECAPA-TDNN + FastSpeech2) AISHELL-3 - ge2e-fastspeech2-aishell3 + VC2 GE2E + VITS AISHELL-3 - ge2e-vits-aishell3 + VITS-VC - + End-to-End VITS CSMSC / AISHELL-3 @@ -876,8 +888,9 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P

## Acknowledgement -- Many thanks to [david-95](https://github.com/david-95) improved TTS, fixed multi-punctuation bug, and contributed to multiple program and data. -- Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW) +- Many thanks to [HighCWu](https://github.com/HighCWu)for adding [VITS-aishell3](./examples/aishell3/vits) and [VITS-VC](./examples/aishell3/vits-vc) examples. +- Many thanks to [david-95](https://github.com/david-95) improved TTS, fixed multi-punctuation bug, and contributed to multiple program and data. +- Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW). - Many thanks to [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) for years of attention, constructive advice and great help. - Many thanks to [mymagicpower](https://github.com/mymagicpower) for the Java implementation of ASR upon [short](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk) and [long](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk) audio files. - Many thanks to [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) for developing Virtual Uploader(VUP)/Virtual YouTuber(VTuber) with PaddleSpeech TTS function. diff --git a/README_cn.md b/README_cn.md index 21cd00a99..590124648 100644 --- a/README_cn.md +++ b/README_cn.md @@ -181,12 +181,20 @@ ### 近期更新 - +- ⚡ 2022.08.25: 发布 TTS [finetune](./examples/other/tts_finetune/tts3) 示例。 +- 🔥 2022.08.22: 新增 ERNIE-SAT 模型: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat)。 +- 🔥 2022.08.15: 将 [g2pW](https://github.com/GitYCC/g2pW) 引入 TTS 中文文本前端。 +- 🔥 2022.08.09: 发布[中英文混合 TTS](./examples/zh_en_tts/tts3)。 +- ⚡ 2022.08.03: TTS CLI 新增 ONNXRuntime 推理方式。 +- 🎉 2022.07.18: 发布 VITS 模型: [VITS-csmsc](./examples/csmsc/vits)、[VITS-aishell3](./examples/aishell3/vits)、[VITS-VC](./examples/aishell3/vits-vc)。 +- 🎉 2022.06.22: 所有 TTS 模型支持了 ONNX 格式。 +- 🍀 2022.06.17: 新增 [PaddleSpeech 网页应用](./demos/speech_web)。 - 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md) 流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md) 流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md) 全链路声纹识别系统 -- 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。 -- 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 -- 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成,声纹验证。 -- 🤗 2021.12.14: PaddleSpeech [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available! +- 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线!覆盖了语音识别(标点恢复、时间戳)和语音合成。 +- 👏🏻 2022.05.06: PaddleSpeech Server 上线!覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 +- 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成和声纹验证。 +- 🤗 2021.12.14: PaddleSpeech [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) 和 [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) 可在 Hugging Face Spaces 上体验! +- 👏🏻 2021.12.10: PaddleSpeech CLI 支持语音分类, 语音识别, 语音翻译(英译中)和语音合成。 ### 🔥 加入技术交流群获取入群福利 @@ -237,7 +245,6 @@ pip install . ## 快速开始 - 安装完成后,开发者可以通过命令行或者 Python 快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持 16k wav 格式音频。 你也可以在 `aistudio` 中快速体验 👉🏻[一键预测,快速上手 Speech 开发任务](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660878142250)。 @@ -624,34 +631,40 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 - 声音克隆 + 声音克隆 GE2E Librispeech, etc. - ge2e + GE2E - GE2E + Tacotron2 + SV2TTS (GE2E + Tacotron2) AISHELL-3 - ge2e-tacotron2-aishell3 + VC0 - GE2E + FastSpeech2 + SV2TTS (GE2E + FastSpeech2) AISHELL-3 - ge2e-fastspeech2-aishell3 + VC1 - GE2E + VITS + SV2TTS (ECAPA-TDNN + FastSpeech2) AISHELL-3 - ge2e-vits-aishell3 + VC2 + + GE2E + VITS + AISHELL-3 + + VITS-VC + 端到端 @@ -896,8 +909,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声

## 致谢 -- 非常感谢 [david-95](https://github.com/david-95)修复句尾多标点符号出错的问题,补充frontend语音polyphonic 数据,贡献补充多条程序和数据 -- 非常感谢 [BarryKCL](https://github.com/BarryKCL)基于[G2PW](https://github.com/GitYCC/g2pW)对TTS中文文本前端的优化。 +- 非常感谢 [HighCWu](https://github.com/HighCWu) 新增 [VITS-aishell3](./examples/aishell3/vits) 和 [VITS-VC](./examples/aishell3/vits-vc) 代码示例。 +- 非常感谢 [david-95](https://github.com/david-95) 修复句尾多标点符号出错的问题,贡献补充多条程序和数据。 +- 非常感谢 [BarryKCL](https://github.com/BarryKCL) 基于 [G2PW](https://github.com/GitYCC/g2pW) 对 TTS 中文文本前端的优化。 - 非常感谢 [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) 多年来的关注和建议,以及在诸多问题上的帮助。 - 非常感谢 [mymagicpower](https://github.com/mymagicpower) 采用PaddleSpeech 对 ASR 的[短语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk)及[长语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk)进行 Java 实现。 - 非常感谢 [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) 采用 PaddleSpeech 语音合成功能实现 Virtual Uploader(VUP)/Virtual YouTuber(VTuber) 虚拟主播。 diff --git a/demos/text_to_speech/README.md b/demos/text_to_speech/README.md index 3288ecf2f..41dcf820b 100644 --- a/demos/text_to_speech/README.md +++ b/demos/text_to_speech/README.md @@ -16,8 +16,8 @@ You can choose one way from easy, meduim and hard to install paddlespeech. The input of this demo should be a text of the specific language that can be passed via argument. ### 3. Usage - Command Line (Recommended) + The default acoustic model is `Fastspeech2`, and the default vocoder is `HiFiGAN`, the default inference method is dygraph inference. - Chinese - The default acoustic model is `Fastspeech2`, and the default vocoder is `Parallel WaveGAN`. ```bash paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" ``` @@ -58,6 +58,20 @@ The input of this demo should be a text of the specific language that can be pas paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav ``` + - Use ONNXRuntime infer: + ```bash + paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output default.wav --use_onnx True + paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output ss.wav --use_onnx True + paddlespeech tts --voc mb_melgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output mb.wav --use_onnx True + paddlespeech tts --voc pwgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_aishell3 --voc hifigan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_hifigan.wav --use_onnx True + paddlespeech tts --am fastspeech2_ljspeech --voc pwgan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_ljspeech --voc hifigan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_hifigan.wav --use_onnx True + paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_vctk --voc hifigan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_hifigan.wav --use_onnx True + ``` + Usage: ```bash @@ -80,6 +94,8 @@ The input of this demo should be a text of the specific language that can be pas - `lang`: Language of tts task. Default: `zh`. - `device`: Choose device to execute model inference. Default: default device of paddlepaddle in current environment. - `output`: Output wave filepath. Default: `output.wav`. + - `use_onnx`: whether to usen ONNXRuntime inference. + - `fs`: sample rate for ONNX models when use specified model files. Output: ```bash @@ -87,38 +103,50 @@ The input of this demo should be a text of the specific language that can be pas ``` - Python API - ```python - import paddle - from paddlespeech.cli.tts import TTSExecutor - - tts_executor = TTSExecutor() - wav_file = tts_executor( - text='今天的天气不错啊', - output='output.wav', - am='fastspeech2_csmsc', - am_config=None, - am_ckpt=None, - am_stat=None, - spk_id=0, - phones_dict=None, - tones_dict=None, - speaker_dict=None, - voc='pwgan_csmsc', - voc_config=None, - voc_ckpt=None, - voc_stat=None, - lang='zh', - device=paddle.get_device()) - print('Wave file has been generated: {}'.format(wav_file)) - ``` - + - Dygraph infer: + ```python + import paddle + from paddlespeech.cli.tts import TTSExecutor + tts_executor = TTSExecutor() + wav_file = tts_executor( + text='今天的天气不错啊', + output='output.wav', + am='fastspeech2_csmsc', + am_config=None, + am_ckpt=None, + am_stat=None, + spk_id=0, + phones_dict=None, + tones_dict=None, + speaker_dict=None, + voc='pwgan_csmsc', + voc_config=None, + voc_ckpt=None, + voc_stat=None, + lang='zh', + device=paddle.get_device()) + print('Wave file has been generated: {}'.format(wav_file)) + ``` + - ONNXRuntime infer: + ```python + from paddlespeech.cli.tts import TTSExecutor + tts_executor = TTSExecutor() + wav_file = tts_executor( + text='对数据集进行预处理', + output='output.wav', + am='fastspeech2_csmsc', + voc='hifigan_csmsc', + lang='zh', + use_onnx=True, + cpu_threads=2) + ``` + Output: ```bash Wave file has been generated: output.wav ``` ### 4. Pretrained Models - Here is a list of pretrained models released by PaddleSpeech that can be used by command and python API: - Acoustic model diff --git a/demos/text_to_speech/README_cn.md b/demos/text_to_speech/README_cn.md index ec5eb5ae9..4a4132238 100644 --- a/demos/text_to_speech/README_cn.md +++ b/demos/text_to_speech/README_cn.md @@ -1,26 +1,23 @@ (简体中文|[English](./README.md)) # 语音合成 - ## 介绍 语音合成是一种自然语言建模过程,其将文本转换为语音以进行音频演示。 这个 demo 是一个从给定文本生成音频的实现,它可以通过使用 `PaddleSpeech` 的单个命令或 python 中的几行代码来实现。 - ## 使用方法 ### 1. 安装 请看[安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install_cn.md)。 -你可以从 easy,medium,hard 三中方式中选择一种方式安装。 +你可以从 easy,medium,hard 三种方式中选择一种方式安装。 ### 2. 准备输入 这个 demo 的输入是通过参数传递的特定语言的文本。 ### 3. 使用方法 - 命令行 (推荐使用) + 默认的声学模型是 `Fastspeech2`,默认的声码器是 `HiFiGAN`,默认推理方式是动态图推理。 - 中文 - - 默认的声学模型是 `Fastspeech2`,默认的声码器是 `Parallel WaveGAN`. ```bash paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" ``` @@ -61,6 +58,19 @@ paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav ``` + - 使用 ONNXRuntime 推理: + ```bash + paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output default.wav --use_onnx True + paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output ss.wav --use_onnx True + paddlespeech tts --voc mb_melgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output mb.wav --use_onnx True + paddlespeech tts --voc pwgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_aishell3 --voc hifigan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_hifigan.wav --use_onnx True + paddlespeech tts --am fastspeech2_ljspeech --voc pwgan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_ljspeech --voc hifigan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_hifigan.wav --use_onnx True + paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_pwgan.wav --use_onnx True + paddlespeech tts --am fastspeech2_vctk --voc hifigan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_hifigan.wav --use_onnx True + ``` 使用方法: @@ -84,6 +94,8 @@ - `lang`:TTS 任务的语言, 默认值:`zh`。 - `device`:执行预测的设备, 默认值:当前系统下 paddlepaddle 的默认 device。 - `output`:输出音频的路径, 默认值:`output.wav`。 + - `use_onnx`: 是否使用 ONNXRuntime 进行推理。 + - `fs`: 使用特定 ONNX 模型时的采样率。 输出: ```bash @@ -91,31 +103,44 @@ ``` - Python API - ```python - import paddle - from paddlespeech.cli.tts import TTSExecutor - - tts_executor = TTSExecutor() - wav_file = tts_executor( - text='今天的天气不错啊', - output='output.wav', - am='fastspeech2_csmsc', - am_config=None, - am_ckpt=None, - am_stat=None, - spk_id=0, - phones_dict=None, - tones_dict=None, - speaker_dict=None, - voc='pwgan_csmsc', - voc_config=None, - voc_ckpt=None, - voc_stat=None, - lang='zh', - device=paddle.get_device()) - print('Wave file has been generated: {}'.format(wav_file)) - ``` - + - 动态图推理: + ```python + import paddle + from paddlespeech.cli.tts import TTSExecutor + tts_executor = TTSExecutor() + wav_file = tts_executor( + text='今天的天气不错啊', + output='output.wav', + am='fastspeech2_csmsc', + am_config=None, + am_ckpt=None, + am_stat=None, + spk_id=0, + phones_dict=None, + tones_dict=None, + speaker_dict=None, + voc='pwgan_csmsc', + voc_config=None, + voc_ckpt=None, + voc_stat=None, + lang='zh', + device=paddle.get_device()) + print('Wave file has been generated: {}'.format(wav_file)) + ``` + - ONNXRuntime 推理: + ```python + from paddlespeech.cli.tts import TTSExecutor + tts_executor = TTSExecutor() + wav_file = tts_executor( + text='对数据集进行预处理', + output='output.wav', + am='fastspeech2_csmsc', + voc='hifigan_csmsc', + lang='zh', + use_onnx=True, + cpu_threads=2) + ``` + 输出: ```bash Wave file has been generated: output.wav From 056622d5f637565d90527407ca6c93e595b4c202 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 8 Sep 2022 07:53:48 +0000 Subject: [PATCH 078/101] update speech_server config, test=tts --- README.md | 2 +- README_cn.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2fb281e7f..f17cec13a 100644 --- a/README.md +++ b/README.md @@ -381,7 +381,7 @@ Developers can have a try of our speech server with [PaddleSpeech Server Command **Start server** ```shell -paddlespeech_server start --config_file ./paddlespeech/server/conf/application.yaml +paddlespeech_server start --config_file ./demos/speech_server/conf/application.yaml ``` **Access Speech Recognition Services** diff --git a/README_cn.md b/README_cn.md index 590124648..070a656a2 100644 --- a/README_cn.md +++ b/README_cn.md @@ -400,7 +400,7 @@ python API 一键预测 **启动服务** ```shell -paddlespeech_server start --config_file ./paddlespeech/server/conf/application.yaml +paddlespeech_server start --config_file ./demos/speech_server/conf/application.yaml ``` **访问语音识别服务** From f7873773bff4b26cc6c621194a8bc176e7c2d24c Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 8 Sep 2022 19:10:38 +0800 Subject: [PATCH 079/101] uadd __init__.py for VITS, test=tts (#2362) --- paddlespeech/t2s/exps/vits/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 paddlespeech/t2s/exps/vits/__init__.py diff --git a/paddlespeech/t2s/exps/vits/__init__.py b/paddlespeech/t2s/exps/vits/__init__.py new file mode 100644 index 000000000..abf198b97 --- /dev/null +++ b/paddlespeech/t2s/exps/vits/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. From b76bcc482efc6806852f71d8474b5c399bdbb896 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 8 Sep 2022 19:33:52 +0800 Subject: [PATCH 080/101] Delete CHANGELOG.md --- CHANGELOG.md | 66 ---------------------------------------------------- 1 file changed, 66 deletions(-) delete mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 2782b8176..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# Changelog - -Date: 2022-3-22, Author: yt605155624. -Add features to: CLI: - - Support aishell3_hifigan、vctk_hifigan - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1587 - -Date: 2022-3-09, Author: yt605155624. -Add features to: T2S: - - Add ljspeech hifigan egs. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1549 - -Date: 2022-3-08, Author: yt605155624. -Add features to: T2S: - - Add aishell3 hifigan egs. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1545 - -Date: 2022-3-08, Author: yt605155624. -Add features to: T2S: - - Add vctk hifigan egs. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1544 - -Date: 2022-1-29, Author: yt605155624. -Add features to: T2S: - - Update aishell3 vc0 with new Tacotron2. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1419 - -Date: 2022-1-29, Author: yt605155624. -Add features to: T2S: - - Add ljspeech Tacotron2. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1416 - -Date: 2022-1-24, Author: yt605155624. -Add features to: T2S: - - Add csmsc WaveRNN. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1379 - -Date: 2022-1-19, Author: yt605155624. -Add features to: T2S: - - Add csmsc Tacotron2. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1314 - - -Date: 2022-1-10, Author: Jackwaterveg. -Add features to: CLI: - - Support English (librispeech/asr1/transformer). - - Support choosing `decode_method` for conformer and transformer models. - - Refactor the config, using the unified config. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1297 - -*** - -Date: 2022-1-17, Author: Jackwaterveg. -Add features to: CLI: - - Support deepspeech2 online/offline model(aishell). - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1356 - -*** - -Date: 2022-1-24, Author: Jackwaterveg. -Add features to: ctc_decoders: - - Support online ctc prefix-beam search decoder. - - Unified ctc online decoder and ctc offline decoder. - - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/821 - -*** From 82e04d7815a8cf1935f2fec5cddc03bdb87c8484 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Thu, 8 Sep 2022 12:28:19 +0000 Subject: [PATCH 081/101] fix trianer --- examples/aishell/asr1/run.sh | 4 ++-- paddlespeech/s2t/training/trainer.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/aishell/asr1/run.sh b/examples/aishell/asr1/run.sh index bd4f50e3f..701dcd2ac 100644 --- a/examples/aishell/asr1/run.sh +++ b/examples/aishell/asr1/run.sh @@ -2,8 +2,8 @@ source path.sh set -e -gpus=0,1,2,3 -stage=0 +gpus=1 +stage=1 stop_stage=50 conf_path=conf/conformer.yaml ips= #xx.xx.xx.xx,xx.xx.xx.xx diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index a7eb9892d..d1bd30fef 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -19,6 +19,9 @@ from pathlib import Path import paddle from paddle import distributed as dist +import pdb +pdb.set_trace() +dist.init_parallel_env() from visualdl import LogWriter from paddlespeech.s2t.training.reporter import ObsScope @@ -176,7 +179,7 @@ class Trainer(): def init_parallel(self): """Init environment for multiprocess training. """ - dist.init_parallel_env() + # dist.init_parallel_env() @mp_tools.rank_zero_only def save(self, tag=None, infos: dict=None): From e04f111b8a71f4c150bb3c0c650efa4fbd27196d Mon Sep 17 00:00:00 2001 From: WongLaw <95171490+WongLaw@users.noreply.github.com> Date: Thu, 8 Sep 2022 20:34:14 +0800 Subject: [PATCH 082/101] Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin (#2364) * Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin, test=doc --- docs/requirements.txt | 2 +- examples/other/g2p/README.md | 3 +++ examples/other/tn/README.md | 3 +++ setup.py | 2 +- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index bd071e7e2..3fb82367f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -27,7 +27,7 @@ pattern_singleton Pillow>=9.0.0 praatio==5.0.0 prettytable -pypinyin +pypinyin<=0.44.0 pypinyin-dict python-dateutil pyworld==0.2.12 diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index 85c9535d1..a1911b2f6 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -9,6 +9,9 @@ We use `WER` as an evaluation criterion. Run the command below to get the results of the test. ```bash +cd ../../../tools +bash extras/install_sclite.sh +cd - ./run.sh ``` diff --git a/examples/other/tn/README.md b/examples/other/tn/README.md index 3b80de661..cae89a36a 100644 --- a/examples/other/tn/README.md +++ b/examples/other/tn/README.md @@ -5,6 +5,9 @@ We use `CER` as an evaluation criterion. ## Start Run the command below to get the results of the test. ```bash +cd ../../../tools +bash extras/install_sclite.sh +cd - ./run.sh ``` The `avg CER` of text normalization is: 0.00730093543235227 diff --git a/setup.py b/setup.py index fac9e1207..e551d9fa6 100644 --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ base = [ "Pillow>=9.0.0", "praatio==5.0.0", "protobuf>=3.1.0, <=3.20.0", - "pypinyin", + "pypinyin<=0.44.0", "pypinyin-dict", "python-dateutil", "pyworld==0.2.12", From 881618dc2a8f5aff10e38abed6cde72a3f4454d5 Mon Sep 17 00:00:00 2001 From: "david.95" Date: Fri, 9 Sep 2022 10:54:29 +0800 Subject: [PATCH 083/101] add tool to compare two tests results, show difference -t tts --- examples/other/g2p/compare_badcase.py | 63 +++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 examples/other/g2p/compare_badcase.py diff --git a/examples/other/g2p/compare_badcase.py b/examples/other/g2p/compare_badcase.py new file mode 100644 index 000000000..a4a78a23f --- /dev/null +++ b/examples/other/g2p/compare_badcase.py @@ -0,0 +1,63 @@ +# -*- encoding:utf-8 -*- +import re +import sys +''' +@arthur: david_95 + +Assum you executed g2p test twice, the WER rate have some gap, you would like to see what sentences error cause your rate up. +so you may get test result ( exp/g2p )into two directories, as exp/prefolder and exp/curfolder +run this program as "python compare_badcase.py prefolder curfolder" +then you will get diffrences between two run, uuid, phonetics, chinese samples + +''' + + +def compare(prefolder, curfolder): + ''' + compare file of text.g2p.pra in two folders + result P1 will be prefolder ; P2 will be curfolder, just about the sequence you input in argvs + ''' + + linecnt = 0 + pre_block = [] + cur_block = [] + zh_lines = [] + with open(prefolder + "/text.g2p.pra", "r") as pre_file, open( + curfolder + "/text.g2p.pra", "r") as cur_file: + for pre_line, cur_line in zip(pre_file, cur_file): + linecnt += 1 + + if linecnt < 11: #skip non-data head in files + continue + else: + pre_block.append(pre_line.strip()) + cur_block.append(cur_line.strip()) + if pre_line.strip().startswith( + "Eval:") and pre_line.strip() != cur_line.strip(): + uuid = pre_block[-5].replace("id: (baker_", "").replace(")", + "") + with open("data/g2p/text", 'r') as txt: + conlines = txt.readlines() + + for line in conlines: + if line.strip().startswith(uuid.strip()): + print(line) + zh_lines.append(re.sub(r"#[1234]", "", line)) + break + + print("*" + cur_block[-3]) # ref + print("P1 " + pre_block[-2]) + print("P2 " + cur_block[-2]) + print("P1 " + pre_block[-1]) + print("P2 " + cur_block[-1] + "\n\n") + pre_block = [] + cur_block = [] + + print("\n") + print(str.join("\n", zh_lines)) + + +if __name__ == '__main__': + assert len( + sys.argv) == 3, "Usage: python compare_badcase.py %prefolder %curfolder" + compare(sys.argv[1], sys.argv[2]) From 61422e71e32782b57f4deccec737365e5b4418c7 Mon Sep 17 00:00:00 2001 From: "david.95" Date: Fri, 9 Sep 2022 11:34:03 +0800 Subject: [PATCH 084/101] add tool to compare test badcase and add run examples,test=tts --- examples/other/g2p/compare_badcase.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/other/g2p/compare_badcase.py b/examples/other/g2p/compare_badcase.py index a4a78a23f..d762459c3 100644 --- a/examples/other/g2p/compare_badcase.py +++ b/examples/other/g2p/compare_badcase.py @@ -9,6 +9,9 @@ so you may get test result ( exp/g2p )into two directories, as exp/prefolder and run this program as "python compare_badcase.py prefolder curfolder" then you will get diffrences between two run, uuid, phonetics, chinese samples +examples: python compare_badcase.py exp/g2p_laotouzi exp/g2p +in this example: exp/g2p_laotouzi and exp/g2p are two folders with two g2p tests result + ''' From 0975a332c4652301d659be7adc6184a9236e980f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 9 Sep 2022 15:53:14 +0800 Subject: [PATCH 085/101] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f17cec13a..59c61f776 100644 --- a/README.md +++ b/README.md @@ -888,7 +888,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P

## Acknowledgement -- Many thanks to [HighCWu](https://github.com/HighCWu)for adding [VITS-aishell3](./examples/aishell3/vits) and [VITS-VC](./examples/aishell3/vits-vc) examples. +- Many thanks to [HighCWu](https://github.com/HighCWu) for adding [VITS-aishell3](./examples/aishell3/vits) and [VITS-VC](./examples/aishell3/vits-vc) examples. - Many thanks to [david-95](https://github.com/david-95) improved TTS, fixed multi-punctuation bug, and contributed to multiple program and data. - Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW). - Many thanks to [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) for years of attention, constructive advice and great help. From 9560d650dbfc5df59af39aa33ea74a0b4081796f Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 9 Sep 2022 08:30:10 +0000 Subject: [PATCH 086/101] fix dp init --- paddlespeech/s2t/training/trainer.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index d1bd30fef..1093e4a12 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -19,9 +19,8 @@ from pathlib import Path import paddle from paddle import distributed as dist -import pdb -pdb.set_trace() dist.init_parallel_env() + from visualdl import LogWriter from paddlespeech.s2t.training.reporter import ObsScope @@ -125,9 +124,6 @@ class Trainer(): else: raise Exception("invalid device") - if self.parallel: - self.init_parallel() - self.checkpoint = Checkpoint( kbest_n=self.config.checkpoint.kbest_n, latest_n=self.config.checkpoint.latest_n) @@ -176,11 +172,6 @@ class Trainer(): """ return self.args.ngpu > 1 - def init_parallel(self): - """Init environment for multiprocess training. - """ - # dist.init_parallel_env() - @mp_tools.rank_zero_only def save(self, tag=None, infos: dict=None): """Save checkpoint (model parameters and optimizer states). From 989b755e8e9cfba8e8bb5fad7f672275980e1c1e Mon Sep 17 00:00:00 2001 From: WongLaw <95171490+WongLaw@users.noreply.github.com> Date: Fri, 9 Sep 2022 16:55:58 +0800 Subject: [PATCH 087/101] Revised must_neural_tone_words, test=doc. (#2370) * Revised must_neural_tone_words. --- paddlespeech/t2s/exps/vits/__init__.py | 2 +- paddlespeech/t2s/frontend/tone_sandhi.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/paddlespeech/t2s/exps/vits/__init__.py b/paddlespeech/t2s/exps/vits/__init__.py index abf198b97..97043fd7b 100644 --- a/paddlespeech/t2s/exps/vits/__init__.py +++ b/paddlespeech/t2s/exps/vits/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py index 9fff4272c..10a9540c3 100644 --- a/paddlespeech/t2s/frontend/tone_sandhi.py +++ b/paddlespeech/t2s/frontend/tone_sandhi.py @@ -30,7 +30,7 @@ class ToneSandhi(): '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', + '戏弄', '将军', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', @@ -59,8 +59,7 @@ class ToneSandhi(): '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', '凤凰', '拖沓', '寒碜', - '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', '扫把', '惦记', '戏弄', - '将军' + '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', '扫把', '惦记' } self.must_not_neural_tone_words = { '男子', '女子', '分子', '原子', '量子', '莲子', '石子', '瓜子', '电子', '人人', '虎虎', From 6745e9dd6b176123bd9dea80576945bad7f0a0ea Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 9 Sep 2022 09:23:56 +0000 Subject: [PATCH 088/101] fix dp init --- paddlespeech/s2t/models/u2/u2.py | 4 +-- paddlespeech/s2t/modules/attention.py | 36 +++++++++---------- .../s2t/modules/conformer_convolution.py | 4 +-- paddlespeech/s2t/modules/decoder_layer.py | 17 +++------ paddlespeech/s2t/modules/encoder.py | 14 +++----- paddlespeech/s2t/modules/encoder_layer.py | 16 ++++----- .../engine/asr/online/python/asr_engine.py | 3 +- 7 files changed, 38 insertions(+), 56 deletions(-) diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index 813e1e529..8a9849492 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -605,8 +605,8 @@ class U2BaseModel(ASRInterface, nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) - cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Export interface for c++ call, give input chunk xs, and return output from time 0 to current chunk. diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py index 92990048d..2d236743a 100644 --- a/paddlespeech/s2t/modules/attention.py +++ b/paddlespeech/s2t/modules/attention.py @@ -86,7 +86,7 @@ class MultiHeadedAttention(nn.Layer): self, value: paddle.Tensor, scores: paddle.Tensor, - mask: paddle.Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) + mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool) ) -> paddle.Tensor: """Compute attention context vector. Args: @@ -127,15 +127,14 @@ class MultiHeadedAttention(nn.Layer): return self.linear_out(x) # (batch, time1, d_model) - def forward( - self, - query: paddle.Tensor, - key: paddle.Tensor, - value: paddle.Tensor, - mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) - pos_emb: paddle.Tensor, # paddle.empty([0]) - cache: paddle.Tensor # paddle.zeros([0,0,0,0]) - ) -> Tuple[paddle.Tensor, paddle.Tensor]: + def forward(self, + query: paddle.Tensor, + key: paddle.Tensor, + value: paddle.Tensor, + mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + pos_emb: paddle.Tensor=paddle.empty([0]), + cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) + ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute scaled dot product attention. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). @@ -244,15 +243,14 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): return x - def forward( - self, - query: paddle.Tensor, - key: paddle.Tensor, - value: paddle.Tensor, - mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) - pos_emb: paddle.Tensor, # paddle.empty([0]) - cache: paddle.Tensor # paddle.zeros([0,0,0,0]) - ) -> Tuple[paddle.Tensor, paddle.Tensor]: + def forward(self, + query: paddle.Tensor, + key: paddle.Tensor, + value: paddle.Tensor, + mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + pos_emb: paddle.Tensor=paddle.empty([0]), + cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) + ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute 'Scaled Dot Product Attention' with rel. positional encoding. Args: query (paddle.Tensor): Query tensor (#batch, time1, size). diff --git a/paddlespeech/s2t/modules/conformer_convolution.py b/paddlespeech/s2t/modules/conformer_convolution.py index b35fea5b9..be6056546 100644 --- a/paddlespeech/s2t/modules/conformer_convolution.py +++ b/paddlespeech/s2t/modules/conformer_convolution.py @@ -108,8 +108,8 @@ class ConvolutionModule(nn.Layer): def forward( self, x: paddle.Tensor, - mask_pad: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) - cache: paddle.Tensor # paddle.zeros([0,0,0,0]) + mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor]: """Compute convolution module. Args: diff --git a/paddlespeech/s2t/modules/decoder_layer.py b/paddlespeech/s2t/modules/decoder_layer.py index c8843b723..37b124e84 100644 --- a/paddlespeech/s2t/modules/decoder_layer.py +++ b/paddlespeech/s2t/modules/decoder_layer.py @@ -121,16 +121,11 @@ class DecoderLayer(nn.Layer): if self.concat_after: tgt_concat = paddle.cat( - (tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask, - paddle.empty([0]), - paddle.zeros([0, 0, 0, 0]))[0]), - dim=-1) + (tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0]), dim=-1) x = residual + self.concat_linear1(tgt_concat) else: x = residual + self.dropout( - self.self_attn(tgt_q, tgt, tgt, tgt_q_mask, - paddle.empty([0]), paddle.zeros([0, 0, 0, 0]))[ - 0]) + self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0]) if not self.normalize_before: x = self.norm1(x) @@ -139,15 +134,11 @@ class DecoderLayer(nn.Layer): x = self.norm2(x) if self.concat_after: x_concat = paddle.cat( - (x, self.src_attn(x, memory, memory, memory_mask, - paddle.empty([0]), - paddle.zeros([0, 0, 0, 0]))[0]), - dim=-1) + (x, self.src_attn(x, memory, memory, memory_mask)[0]), dim=-1) x = residual + self.concat_linear2(x_concat) else: x = residual + self.dropout( - self.src_attn(x, memory, memory, memory_mask, - paddle.empty([0]), paddle.zeros([0, 0, 0, 0]))[0]) + self.src_attn(x, memory, memory, memory_mask)[0]) if not self.normalize_before: x = self.norm2(x) diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index cf4e32fa4..2f4ad1b29 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -175,9 +175,7 @@ class BaseEncoder(nn.Layer): decoding_chunk_size, self.static_chunk_size, num_decoding_left_chunks) for layer in self.encoders: - xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad, - paddle.zeros([0, 0, 0, 0]), - paddle.zeros([0, 0, 0, 0])) + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) if self.normalize_before: xs = self.after_norm(xs) # Here we assume the mask is not changed in encoder layers, so just @@ -190,9 +188,9 @@ class BaseEncoder(nn.Layer): xs: paddle.Tensor, offset: int, required_cache_size: int, - att_cache: paddle.Tensor, # paddle.zeros([0,0,0,0]) - cnn_cache: paddle.Tensor, # paddle.zeros([0,0,0,0]), - att_mask: paddle.Tensor, # paddle.ones([0,0,0], dtype=paddle.bool) + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + att_mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: """ Forward just one chunk Args: @@ -255,7 +253,6 @@ class BaseEncoder(nn.Layer): xs, att_mask, pos_emb, - mask_pad=paddle.ones([0, 0, 0], dtype=paddle.bool), att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache, cnn_cache=cnn_cache[i:i + 1] if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, ) @@ -328,8 +325,7 @@ class BaseEncoder(nn.Layer): chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( - chunk_xs, offset, required_cache_size, att_cache, cnn_cache, - paddle.ones([0, 0, 0], dtype=paddle.bool)) + chunk_xs, offset, required_cache_size, att_cache, cnn_cache) outputs.append(y) offset += y.shape[1] diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index 4555b535f..dac62bce3 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -76,10 +76,9 @@ class TransformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle. - Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool) - att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) - cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: @@ -106,8 +105,7 @@ class TransformerEncoderLayer(nn.Layer): if self.normalize_before: x = self.norm1(x) - x_att, new_att_cache = self.self_attn( - x, x, x, mask, paddle.empty([0]), cache=att_cache) + x_att, new_att_cache = self.self_attn(x, x, x, mask, cache=att_cache) if self.concat_after: x_concat = paddle.concat((x, x_att), axis=-1) @@ -195,9 +193,9 @@ class ConformerEncoderLayer(nn.Layer): x: paddle.Tensor, mask: paddle.Tensor, pos_emb: paddle.Tensor, - mask_pad: paddle.Tensor, #paddle.ones([0, 0, 0],dtype=paddle.bool) - att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) - cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0]) + mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]) ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: """Compute encoded features. Args: diff --git a/paddlespeech/server/engine/asr/online/python/asr_engine.py b/paddlespeech/server/engine/asr/online/python/asr_engine.py index 87d88ee60..5782d7035 100644 --- a/paddlespeech/server/engine/asr/online/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/python/asr_engine.py @@ -480,8 +480,7 @@ class PaddleASRConnectionHanddler: self.offset, required_cache_size, att_cache=self.att_cache, - cnn_cache=self.cnn_cache, - att_mask=paddle.ones([0, 0, 0], dtype=paddle.bool)) + cnn_cache=self.cnn_cache) outputs.append(y) # update the global offset, in decoding frame unit From 08b9c45811110b3656bc6ad9844199055a9063d0 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 9 Sep 2022 09:26:41 +0000 Subject: [PATCH 089/101] fix dp init --- examples/aishell/asr1/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/aishell/asr1/run.sh b/examples/aishell/asr1/run.sh index 701dcd2ac..bd4f50e3f 100644 --- a/examples/aishell/asr1/run.sh +++ b/examples/aishell/asr1/run.sh @@ -2,8 +2,8 @@ source path.sh set -e -gpus=1 -stage=1 +gpus=0,1,2,3 +stage=0 stop_stage=50 conf_path=conf/conformer.yaml ips= #xx.xx.xx.xx,xx.xx.xx.xx From fdcc8c042762da5141fc5c59f43b37dfa28cfab7 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 9 Sep 2022 17:49:20 +0800 Subject: [PATCH 090/101] Update README.md --- examples/aishell3_vctk/ernie_sat/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/aishell3_vctk/ernie_sat/README.md b/examples/aishell3_vctk/ernie_sat/README.md index 777bea326..a849488d5 100644 --- a/examples/aishell3_vctk/ernie_sat/README.md +++ b/examples/aishell3_vctk/ernie_sat/README.md @@ -29,9 +29,11 @@ Or train your MFA model reference to [mfa example](https://github.com/PaddlePadd Assume the paths to the datasets are: - `~/datasets/data_aishell3` - `~/datasets/VCTK-Corpus-0.92` + Assume the path to the MFA results of the datasets are: - `./aishell3_alignment_tone` - `./vctk_alignment` + Run the command below to 1. **source path**. 2. preprocess the dataset. From 663e3ab58ee21d24b2f6d28f5d1050fba84be088 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 9 Sep 2022 09:52:49 +0000 Subject: [PATCH 091/101] fix dp init --- paddlespeech/s2t/training/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index 1093e4a12..4a69d78a4 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -19,7 +19,9 @@ from pathlib import Path import paddle from paddle import distributed as dist -dist.init_parallel_env() +world_size = dist.get_world_size() +if world_size > 1: + dist.init_parallel_env() from visualdl import LogWriter From 08c3ceb04bde74735b204090600fc8bc2106a70b Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 9 Sep 2022 15:40:37 +0000 Subject: [PATCH 092/101] update wenetspeech streaming coformer result --- examples/wenetspeech/asr1/RESULTS.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/examples/wenetspeech/asr1/RESULTS.md b/examples/wenetspeech/asr1/RESULTS.md index cc209db75..af84a5f6e 100644 --- a/examples/wenetspeech/asr1/RESULTS.md +++ b/examples/wenetspeech/asr1/RESULTS.md @@ -34,3 +34,15 @@ Pretrain model from http://mobvoi-speech-public.ufile.ucloud.cn/public/wenet/wen | conformer | 32.52 M | conf/conformer.yaml | spec_aug | aishell1 | ctc_greedy_search | - | 0.052534 | | conformer | 32.52 M | conf/conformer.yaml | spec_aug | aishell1 | ctc_prefix_beam_search | - | 0.052915 | | conformer | 32.52 M | conf/conformer.yaml | spec_aug | aishell1 | attention_rescoring | - | 0.047904 | + + +## Conformer Steaming Pretrained Model + +Pretrain model from https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_wenetspeech_ckpt_1.0.0a.model.tar.gz + +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size | CER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | attention | 16 | 0.056273 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_greedy_search | 16 | 0.078918 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_prefix_beam_search | 16 | 0.079080 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | attention_rescoring | 16 | 0.054401 | From 445cb2b08c49632e08f9f847b0b062d32a507efa Mon Sep 17 00:00:00 2001 From: yuehuayingxueluo <867460659@qq.com> Date: Tue, 13 Sep 2022 15:05:33 +0800 Subject: [PATCH 093/101] fix prepare.sh (#2376) Co-authored-by: yuehuayingxueluo --- tests/test_tipc/prepare.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) mode change 100644 => 100755 tests/test_tipc/prepare.sh diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh old mode 100644 new mode 100755 index 2a2272813..cb05a1d0f --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -15,6 +15,7 @@ dataline=$(cat ${FILENAME}) # parser params IFS=$'\n' lines=(${dataline}) +python=python # The training params model_name=$(func_parser_value "${lines[1]}") @@ -68,7 +69,7 @@ if [[ ${MODE} = "benchmark_train" ]];then if [[ ${model_name} == "pwgan" ]]; then # 下载 csmsc 数据集并解压缩 - wget -nc https://weixinxcxdb.oss-cn-beijing.aliyuncs.com/gwYinPinKu/BZNSYP.rar + wget -nc https://paddle-wheel.bj.bcebos.com/benchmark/BZNSYP.rar mkdir -p BZNSYP unrar x BZNSYP.rar BZNSYP wget -nc https://paddlespeech.bj.bcebos.com/Parakeet/benchmark/durations.txt @@ -80,6 +81,10 @@ if [[ ${MODE} = "benchmark_train" ]];then python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/test/raw/metadata.jsonl --dumpdir=dump/test/norm --stats=dump/train/feats_stats.npy fi + echo "barrier start" + PYTHON="${python}" bash test_tipc/barrier.sh + echo "barrier end" + if [[ ${model_name} == "mdtc" ]]; then # 下载 Snips 数据集并解压缩 wget https://paddlespeech.bj.bcebos.com/datasets/hey_snips_kws_4.0.tar.gz.1 From ec571bb0d113d5ab01324b0120438b8c1824f56b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 13 Sep 2022 16:56:03 +0800 Subject: [PATCH 094/101] Update README.md --- examples/voxceleb/sv0/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/voxceleb/sv0/README.md b/examples/voxceleb/sv0/README.md index 26c95aca9..7fe759ebc 100644 --- a/examples/voxceleb/sv0/README.md +++ b/examples/voxceleb/sv0/README.md @@ -148,4 +148,4 @@ source path.sh CUDA_VISIBLE_DEVICES= bash ./local/test.sh ./data sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1/model/ conf/ecapa_tdnn.yaml ``` -The performance of the released models are shown in [this](./RESULTS.md) +The performance of the released models are shown in [this](./RESULT.md) From 80b180217df310b8738c06577c88965bab38f160 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 14 Sep 2022 10:37:03 +0800 Subject: [PATCH 095/101] [TTS] fix some bugs of ERNIE-SAT (#2378) * fix ernie_sat, test=tts * fix for comments, test=tts --- .../ernie_sat/local/synthesize_e2e.sh | 6 ++-- .../ernie_sat/local/synthesize_e2e.sh | 6 ++-- .../vctk/ernie_sat/local/synthesize_e2e.sh | 6 ++-- paddlespeech/t2s/exps/ernie_sat/align.py | 4 +-- .../t2s/exps/ernie_sat/synthesize_e2e.py | 28 +++++++++++-------- 5 files changed, 27 insertions(+), 23 deletions(-) diff --git a/examples/aishell3/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3/ernie_sat/local/synthesize_e2e.sh index b33e8ca09..77b353b52 100755 --- a/examples/aishell3/ernie_sat/local/synthesize_e2e.sh +++ b/examples/aishell3/ernie_sat/local/synthesize_e2e.sh @@ -13,9 +13,9 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/synthesize_e2e.py \ --task_name=synthesize \ - --wav_path=source/SSB03540307.wav\ - --old_str='请播放歌曲小苹果。' \ - --new_str='歌曲真好听。' \ + --wav_path=source/SSB03540307.wav \ + --old_str='请播放歌曲小苹果' \ + --new_str='歌曲真好听' \ --source_lang=zh \ --target_lang=zh \ --erniesat_config=${config_path} \ diff --git a/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh index c30af6e85..446ac8791 100755 --- a/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh +++ b/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh @@ -15,7 +15,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then python3 ${BIN_DIR}/synthesize_e2e.py \ --task_name=synthesize \ --wav_path=source/p243_313.wav \ - --old_str='For that reason cover should not be given.' \ + --old_str='For that reason cover should not be given' \ --new_str='今天天气很好' \ --source_lang=en \ --target_lang=zh \ @@ -36,8 +36,8 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then python3 ${BIN_DIR}/synthesize_e2e.py \ --task_name=synthesize \ --wav_path=source/SSB03540307.wav \ - --old_str='请播放歌曲小苹果。' \ - --new_str="Thank you!" \ + --old_str='请播放歌曲小苹果' \ + --new_str="Thank you" \ --source_lang=zh \ --target_lang=en \ --erniesat_config=${config_path} \ diff --git a/examples/vctk/ernie_sat/local/synthesize_e2e.sh b/examples/vctk/ernie_sat/local/synthesize_e2e.sh index fee540169..dcc710447 100755 --- a/examples/vctk/ernie_sat/local/synthesize_e2e.sh +++ b/examples/vctk/ernie_sat/local/synthesize_e2e.sh @@ -14,7 +14,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then python3 ${BIN_DIR}/synthesize_e2e.py \ --task_name=synthesize \ --wav_path=source/p243_313.wav \ - --old_str='For that reason cover should not be given.' \ + --old_str='For that reason cover should not be given' \ --new_str='I love you very much do you love me' \ --source_lang=en \ --target_lang=en \ @@ -36,8 +36,8 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then python3 ${BIN_DIR}/synthesize_e2e.py \ --task_name=edit \ --wav_path=source/p243_313.wav \ - --old_str='For that reason cover should not be given.' \ - --new_str='For that reason cover is not impossible to be given.' \ + --old_str='For that reason cover should not be given' \ + --new_str='For that reason cover is not impossible to be given' \ --source_lang=en \ --target_lang=en \ --erniesat_config=${config_path} \ diff --git a/paddlespeech/t2s/exps/ernie_sat/align.py b/paddlespeech/t2s/exps/ernie_sat/align.py index 464f51a3b..8dbe685f5 100755 --- a/paddlespeech/t2s/exps/ernie_sat/align.py +++ b/paddlespeech/t2s/exps/ernie_sat/align.py @@ -58,7 +58,7 @@ def _readtg(tg_path: str, lang: str='en', fs: int=24000, n_shift: int=300): durations[-2] += durations[-1] durations = durations[:-1] - # replace ' and 'sil' with 'sp' + # replace '' and 'sil' with 'sp' phones = ['sp' if (phn == '' or phn == 'sil') else phn for phn in phones] if lang == 'en': @@ -195,7 +195,7 @@ def words2phns(text: str, lang='en'): wrd = wrd.upper() if (wrd not in ds): wrd2phns[str(index) + '_' + wrd] = 'spn' - phns.extend('spn') + phns.extend(['spn']) else: wrd2phns[str(index) + '_' + wrd] = word2phns_dict[wrd].split() phns.extend(word2phns_dict[wrd].split()) diff --git a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py index 21c9ae044..e450aa1a0 100644 --- a/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/ernie_sat/synthesize_e2e.py @@ -137,9 +137,6 @@ def prep_feats_with_dur(wav_path: str, new_wav = np.concatenate( [wav_org[:wav_left_idx], blank_wav, wav_org[wav_right_idx:]]) - # 音频是正常遮住了 - sf.write(str("mask_wav.wav"), new_wav, samplerate=fs) - # 4. get old and new mel span to be mask old_span_bdy = get_span_bdy( mfa_start=mfa_start, mfa_end=mfa_end, span_to_repl=span_to_repl) @@ -274,7 +271,8 @@ def get_wav(wav_path: str, new_str: str='', duration_adjust: bool=True, fs: int=24000, - n_shift: int=300): + n_shift: int=300, + task_name: str='synthesize'): outs = get_mlm_output( wav_path=wav_path, @@ -298,9 +296,11 @@ def get_wav(wav_path: str, alt_wav = np.squeeze(alt_wav) old_time_bdy = [n_shift * x for x in old_span_bdy] - wav_replaced = np.concatenate( - [wav_org[:old_time_bdy[0]], alt_wav, wav_org[old_time_bdy[1]:]]) - + if task_name == 'edit': + wav_replaced = np.concatenate( + [wav_org[:old_time_bdy[0]], alt_wav, wav_org[old_time_bdy[1]:]]) + else: + wav_replaced = alt_wav wav_dict = {"origin": wav_org, "output": wav_replaced} return wav_dict @@ -356,7 +356,11 @@ def parse_args(): "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") # ernie sat related - parser.add_argument("--task_name", type=str, help="task name") + parser.add_argument( + "--task_name", + type=str, + choices=['edit', 'synthesize'], + help="task name.") parser.add_argument("--wav_path", type=str, help="path of old wav") parser.add_argument("--old_str", type=str, help="old string") parser.add_argument("--new_str", type=str, help="new string") @@ -410,10 +414,9 @@ if __name__ == '__main__': if args.task_name == 'edit': new_str = new_str elif args.task_name == 'synthesize': - new_str = old_str + new_str + new_str = old_str + ' ' + new_str else: - new_str = old_str + new_str - print("new_str:", new_str) + new_str = old_str + ' ' + new_str # Extractor mel_extractor = LogMelFBank( @@ -467,7 +470,8 @@ if __name__ == '__main__': new_str=new_str, duration_adjust=args.duration_adjust, fs=erniesat_config.fs, - n_shift=erniesat_config.n_shift) + n_shift=erniesat_config.n_shift, + task_name=args.task_name) sf.write( args.output_name, wav_dict['output'], samplerate=erniesat_config.fs) From 02679906e649bf123c15e988bf84facd885aa7ee Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 14 Sep 2022 15:22:25 +0800 Subject: [PATCH 096/101] Update tts_papers.md --- docs/source/tts/tts_papers.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/tts/tts_papers.md b/docs/source/tts/tts_papers.md index 681b21066..f3ca1b624 100644 --- a/docs/source/tts/tts_papers.md +++ b/docs/source/tts/tts_papers.md @@ -5,6 +5,7 @@ - [Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-trained BERT](https://www1.se.cuhk.edu.hk/~hccl/publications/pub/201909_INTERSPEECH_DongyangDAI.pdf) - [Polyphone Disambiguation in Mandarin Chinese with Semi-Supervised Learning](https://www.isca-speech.org/archive/pdfs/interspeech_2021/shi21d_interspeech.pdf) * github: https://github.com/PaperMechanica/SemiPPL +- [WikipediaHomographData](https://github.com/google-research-datasets/WikipediaHomographData) ### Text Normalization #### English - [applenob/text_normalization](https://github.com/applenob/text_normalization) From 324b166c5293323082e2c326d728618fd05fcac0 Mon Sep 17 00:00:00 2001 From: WongLaw <95171490+WongLaw@users.noreply.github.com> Date: Wed, 14 Sep 2022 16:11:12 +0800 Subject: [PATCH 097/101] Removed useless spk_id in speech_server and streaming_tts_server from demos, and support bilingual server engine, test=tts (#2380) * Removed useless spk_id in speech_server and streaming_tts_server from demos, and support bilingual server engine. --- demos/speech_server/conf/application.yaml | 4 ++-- demos/streaming_tts_server/conf/tts_online_application.yaml | 3 +-- .../streaming_tts_server/conf/tts_online_ws_application.yaml | 3 +-- paddlespeech/server/engine/engine_warmup.py | 4 +++- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/demos/speech_server/conf/application.yaml b/demos/speech_server/conf/application.yaml index 9c171c470..b5ee80095 100644 --- a/demos/speech_server/conf/application.yaml +++ b/demos/speech_server/conf/application.yaml @@ -61,7 +61,7 @@ tts_python: phones_dict: tones_dict: speaker_dict: - spk_id: 0 + # voc (vocoder) choices=['pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', # 'pwgan_vctk', 'mb_melgan_csmsc', 'style_melgan_csmsc', @@ -87,7 +87,7 @@ tts_inference: phones_dict: tones_dict: speaker_dict: - spk_id: 0 + am_predictor_conf: device: # set 'gpu:id' or 'cpu' diff --git a/demos/streaming_tts_server/conf/tts_online_application.yaml b/demos/streaming_tts_server/conf/tts_online_application.yaml index e617912fe..f5ec9dc8e 100644 --- a/demos/streaming_tts_server/conf/tts_online_application.yaml +++ b/demos/streaming_tts_server/conf/tts_online_application.yaml @@ -29,7 +29,7 @@ tts_online: phones_dict: tones_dict: speaker_dict: - spk_id: 0 + # voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc'] # Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference @@ -70,7 +70,6 @@ tts_online-onnx: phones_dict: tones_dict: speaker_dict: - spk_id: 0 am_sample_rate: 24000 am_sess_conf: device: "cpu" # set 'gpu:id' or 'cpu' diff --git a/demos/streaming_tts_server/conf/tts_online_ws_application.yaml b/demos/streaming_tts_server/conf/tts_online_ws_application.yaml index 329f882cc..c65633917 100644 --- a/demos/streaming_tts_server/conf/tts_online_ws_application.yaml +++ b/demos/streaming_tts_server/conf/tts_online_ws_application.yaml @@ -29,7 +29,7 @@ tts_online: phones_dict: tones_dict: speaker_dict: - spk_id: 0 + # voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc'] # Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference @@ -70,7 +70,6 @@ tts_online-onnx: phones_dict: tones_dict: speaker_dict: - spk_id: 0 am_sample_rate: 24000 am_sess_conf: device: "cpu" # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py index 3751554c2..ff65dff97 100644 --- a/paddlespeech/server/engine/engine_warmup.py +++ b/paddlespeech/server/engine/engine_warmup.py @@ -27,8 +27,10 @@ def warm_up(engine_and_type: str, warm_up_time: int=3) -> bool: sentence = "您好,欢迎使用语音合成服务。" elif tts_engine.lang == 'en': sentence = "Hello and welcome to the speech synthesis service." + elif tts_engine.lang == 'mix': + sentence = "您好,欢迎使用TTS多语种服务。" else: - logger.error("tts engine only support lang: zh or en.") + logger.error("tts engine only support lang: zh or en or mix.") sys.exit(-1) if engine_and_type == "tts_python": From cdf095595f0398ac0fb20d9cd6f80672c5c00d0c Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Thu, 15 Sep 2022 15:47:59 +0800 Subject: [PATCH 098/101] [tts] finetune add frozen (#2385) * finetune add frozen --- examples/other/tts_finetune/tts3/README.md | 9 + examples/other/tts_finetune/tts3/finetune.py | 43 ++++- .../other/tts_finetune/tts3/finetune.yaml | 12 ++ .../other/tts_finetune/tts3/local/extract.py | 7 +- .../other/tts_finetune/tts3/local/train.py | 178 ++++++++++++++++++ examples/other/tts_finetune/tts3/run.sh | 12 +- 6 files changed, 242 insertions(+), 19 deletions(-) create mode 100644 examples/other/tts_finetune/tts3/finetune.yaml create mode 100644 examples/other/tts_finetune/tts3/local/train.py diff --git a/examples/other/tts_finetune/tts3/README.md b/examples/other/tts_finetune/tts3/README.md index 1ad30328b..192ee7ff4 100644 --- a/examples/other/tts_finetune/tts3/README.md +++ b/examples/other/tts_finetune/tts3/README.md @@ -75,6 +75,15 @@ When "Prepare" done. The structure of the current directory is listed below. ``` +### Set finetune.yaml +`finetune.yaml` contains some configurations for fine-tuning. You can try various options to fine better result. +Arguments: + - `batch_size`: finetune batch size. Default: -1, means 64 which same to pretrained model + - `learning_rate`: learning rate. Default: 0.0001 + - `num_snapshots`: number of save models. Default: -1, means 5 which same to pretrained model + - `frozen_layers`: frozen layers. must be a list. If you don't want to frozen any layer, set []. + + ## Get Started Run the command below to diff --git a/examples/other/tts_finetune/tts3/finetune.py b/examples/other/tts_finetune/tts3/finetune.py index 0f060b44d..207e2dbc5 100644 --- a/examples/other/tts_finetune/tts3/finetune.py +++ b/examples/other/tts_finetune/tts3/finetune.py @@ -14,6 +14,7 @@ import argparse import os from pathlib import Path +from typing import List from typing import Union import yaml @@ -21,10 +22,10 @@ from local.check_oov import get_check_result from local.extract import extract_feature from local.label_process import get_single_label from local.prepare_env import generate_finetune_env +from local.train import train_sp from paddle import distributed as dist from yacs.config import CfgNode -from paddlespeech.t2s.exps.fastspeech2.train import train_sp from utils.gen_duration_from_textgrid import gen_duration_from_textgrid DICT_EN = 'tools/aligner/cmudict-0.7b' @@ -38,15 +39,24 @@ os.environ['PATH'] = MFA_PATH + '/:' + os.environ['PATH'] class TrainArgs(): - def __init__(self, ngpu, config_file, dump_dir: Path, output_dir: Path): + def __init__(self, + ngpu, + config_file, + dump_dir: Path, + output_dir: Path, + frozen_layers: List[str]): + # config: fastspeech2 config file. self.config = str(config_file) self.train_metadata = str(dump_dir / "train/norm/metadata.jsonl") self.dev_metadata = str(dump_dir / "dev/norm/metadata.jsonl") + # model output dir. self.output_dir = str(output_dir) self.ngpu = ngpu self.phones_dict = str(dump_dir / "phone_id_map.txt") self.speaker_dict = str(dump_dir / "speaker_id_map.txt") self.voice_cloning = False + # frozen layers + self.frozen_layers = frozen_layers def get_mfa_result( @@ -122,12 +132,11 @@ if __name__ == '__main__': "--ngpu", type=int, default=2, help="if ngpu=0, use cpu.") parser.add_argument("--epoch", type=int, default=100, help="finetune epoch") - parser.add_argument( - "--batch_size", - type=int, - default=-1, - help="batch size, default -1 means same as pretrained model") + "--finetune_config", + type=str, + default="./finetune.yaml", + help="Path to finetune config file") args = parser.parse_args() @@ -147,8 +156,14 @@ if __name__ == '__main__': with open(config_file) as f: config = CfgNode(yaml.safe_load(f)) config.max_epoch = config.max_epoch + args.epoch - if args.batch_size > 0: - config.batch_size = args.batch_size + + with open(args.finetune_config) as f2: + finetune_config = CfgNode(yaml.safe_load(f2)) + config.batch_size = finetune_config.batch_size if finetune_config.batch_size > 0 else config.batch_size + config.optimizer.learning_rate = finetune_config.learning_rate if finetune_config.learning_rate > 0 else config.optimizer.learning_rate + config.num_snapshots = finetune_config.num_snapshots if finetune_config.num_snapshots > 0 else config.num_snapshots + frozen_layers = finetune_config.frozen_layers + assert type(frozen_layers) == list, "frozen_layers should be set a list." if args.lang == 'en': lexicon_file = DICT_EN @@ -158,6 +173,13 @@ if __name__ == '__main__': mfa_phone_file = MFA_PHONE_ZH else: print('please input right lang!!') + + print(f"finetune max_epoch: {config.max_epoch}") + print(f"finetune batch_size: {config.batch_size}") + print(f"finetune learning_rate: {config.optimizer.learning_rate}") + print(f"finetune num_snapshots: {config.num_snapshots}") + print(f"finetune frozen_layers: {frozen_layers}") + am_phone_file = pretrained_model_dir / "phone_id_map.txt" label_file = input_dir / "labels.txt" @@ -181,7 +203,8 @@ if __name__ == '__main__': generate_finetune_env(output_dir, pretrained_model_dir) # create a new args for training - train_args = TrainArgs(args.ngpu, config_file, dump_dir, output_dir) + train_args = TrainArgs(args.ngpu, config_file, dump_dir, output_dir, + frozen_layers) # finetune models # dispatch diff --git a/examples/other/tts_finetune/tts3/finetune.yaml b/examples/other/tts_finetune/tts3/finetune.yaml new file mode 100644 index 000000000..374a69f3d --- /dev/null +++ b/examples/other/tts_finetune/tts3/finetune.yaml @@ -0,0 +1,12 @@ +########################################################### +# PARAS SETTING # +########################################################### +# Set to -1 to indicate that the parameter is the same as the pretrained model configuration + +batch_size: -1 +learning_rate: 0.0001 # learning rate +num_snapshots: -1 + +# frozen_layers should be a list +# if you don't need to freeze, set frozen_layers to [] +frozen_layers: ["encoder", "duration_predictor"] diff --git a/examples/other/tts_finetune/tts3/local/extract.py b/examples/other/tts_finetune/tts3/local/extract.py index edd92420b..630b58ce3 100644 --- a/examples/other/tts_finetune/tts3/local/extract.py +++ b/examples/other/tts_finetune/tts3/local/extract.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import math import os from operator import itemgetter from pathlib import Path @@ -211,9 +210,9 @@ def extract_feature(duration_file: str, mel_extractor, pitch_extractor, energy_extractor = get_extractor(config) wav_files = sorted(list((input_dir).rglob("*.wav"))) - # split data into 3 sections, train: 80%, dev: 10%, test: 10% - num_train = math.ceil(len(wav_files) * 0.8) - num_dev = math.ceil(len(wav_files) * 0.1) + # split data into 3 sections, train: len(wav_files) - 2, dev: 1, test: 1 + num_train = len(wav_files) - 2 + num_dev = 1 print(num_train, num_dev) train_wav_files = wav_files[:num_train] diff --git a/examples/other/tts_finetune/tts3/local/train.py b/examples/other/tts_finetune/tts3/local/train.py new file mode 100644 index 000000000..d065ae593 --- /dev/null +++ b/examples/other/tts_finetune/tts3/local/train.py @@ -0,0 +1,178 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import os +import shutil +from pathlib import Path +from typing import List + +import jsonlines +import numpy as np +import paddle +from paddle import DataParallel +from paddle import distributed as dist +from paddle.io import DataLoader +from paddle.io import DistributedBatchSampler + +from paddlespeech.t2s.datasets.am_batch_fn import fastspeech2_multi_spk_batch_fn +from paddlespeech.t2s.datasets.am_batch_fn import fastspeech2_single_spk_batch_fn +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.models.fastspeech2 import FastSpeech2 +from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Evaluator +from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Updater +from paddlespeech.t2s.training.extensions.snapshot import Snapshot +from paddlespeech.t2s.training.extensions.visualizer import VisualDL +from paddlespeech.t2s.training.optimizer import build_optimizers +from paddlespeech.t2s.training.seeding import seed_everything +from paddlespeech.t2s.training.trainer import Trainer + + +def freeze_layer(model, layers: List[str]): + """freeze layers + + Args: + layers (List[str]): frozen layers + """ + for layer in layers: + for param in eval("model." + layer + ".parameters()"): + param.trainable = False + + +def train_sp(args, config): + # decides device type and whether to run in parallel + # setup running environment correctly + if (not paddle.is_compiled_with_cuda()) or args.ngpu == 0: + paddle.set_device("cpu") + else: + paddle.set_device("gpu") + world_size = paddle.distributed.get_world_size() + if world_size > 1: + paddle.distributed.init_parallel_env() + + # set the random seed, it is a must for multiprocess training + seed_everything(config.seed) + + print( + f"rank: {dist.get_rank()}, pid: {os.getpid()}, parent_pid: {os.getppid()}", + ) + fields = [ + "text", "text_lengths", "speech", "speech_lengths", "durations", + "pitch", "energy" + ] + converters = {"speech": np.load, "pitch": np.load, "energy": np.load} + spk_num = None + if args.speaker_dict is not None: + print("multiple speaker fastspeech2!") + collate_fn = fastspeech2_multi_spk_batch_fn + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id) + fields += ["spk_id"] + elif args.voice_cloning: + print("Training voice cloning!") + collate_fn = fastspeech2_multi_spk_batch_fn + fields += ["spk_emb"] + converters["spk_emb"] = np.load + else: + print("single speaker fastspeech2!") + collate_fn = fastspeech2_single_spk_batch_fn + print("spk_num:", spk_num) + + # dataloader has been too verbose + logging.getLogger("DataLoader").disabled = True + + # construct dataset for training and validation + with jsonlines.open(args.train_metadata, 'r') as reader: + train_metadata = list(reader) + train_dataset = DataTable( + data=train_metadata, + fields=fields, + converters=converters, ) + with jsonlines.open(args.dev_metadata, 'r') as reader: + dev_metadata = list(reader) + dev_dataset = DataTable( + data=dev_metadata, + fields=fields, + converters=converters, ) + + # collate function and dataloader + + train_sampler = DistributedBatchSampler( + train_dataset, + batch_size=config.batch_size, + shuffle=True, + drop_last=True) + + print("samplers done!") + + train_dataloader = DataLoader( + train_dataset, + batch_sampler=train_sampler, + collate_fn=collate_fn, + num_workers=config.num_workers) + + dev_dataloader = DataLoader( + dev_dataset, + shuffle=False, + drop_last=False, + batch_size=config.batch_size, + collate_fn=collate_fn, + num_workers=config.num_workers) + print("dataloaders done!") + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + odim = config.n_mels + model = FastSpeech2( + idim=vocab_size, odim=odim, spk_num=spk_num, **config["model"]) + + # freeze layer + if args.frozen_layers != []: + freeze_layer(model, args.frozen_layers) + + if world_size > 1: + model = DataParallel(model) + print("model done!") + + optimizer = build_optimizers(model, **config["optimizer"]) + print("optimizer done!") + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + if dist.get_rank() == 0: + config_name = args.config.split("/")[-1] + # copy conf to output_dir + shutil.copyfile(args.config, output_dir / config_name) + + updater = FastSpeech2Updater( + model=model, + optimizer=optimizer, + dataloader=train_dataloader, + output_dir=output_dir, + **config["updater"]) + + trainer = Trainer(updater, (config.max_epoch, 'epoch'), output_dir) + + evaluator = FastSpeech2Evaluator( + model, dev_dataloader, output_dir=output_dir, **config["updater"]) + + if dist.get_rank() == 0: + trainer.extend(evaluator, trigger=(1, "epoch")) + trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) + trainer.extend( + Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) + trainer.run() diff --git a/examples/other/tts_finetune/tts3/run.sh b/examples/other/tts_finetune/tts3/run.sh index 9bb7ec6f0..9c877e642 100755 --- a/examples/other/tts_finetune/tts3/run.sh +++ b/examples/other/tts_finetune/tts3/run.sh @@ -10,11 +10,12 @@ mfa_dir=./mfa_result dump_dir=./dump output_dir=./exp/default lang=zh -ngpu=2 +ngpu=1 +finetune_config=./finetune.yaml -ckpt=snapshot_iter_96600 +ckpt=snapshot_iter_96699 -gpus=0,1 +gpus=1 CUDA_VISIBLE_DEVICES=${gpus} stage=0 stop_stage=100 @@ -35,7 +36,8 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --output_dir=${output_dir} \ --lang=${lang} \ --ngpu=${ngpu} \ - --epoch=100 + --epoch=100 \ + --finetune_config=${finetune_config} fi @@ -54,7 +56,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --voc_stat=pretrained_models/hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ - --output_dir=./test_e2e \ + --output_dir=./test_e2e/ \ --phones_dict=${dump_dir}/phone_id_map.txt \ --speaker_dict=${dump_dir}/speaker_id_map.txt \ --spk_id=0 From 4ac206e22ff2c7c669e4b4c2b6f74f842020aca6 Mon Sep 17 00:00:00 2001 From: tianhao zhang <15600919271@163.com> Date: Fri, 16 Sep 2022 02:38:17 +0000 Subject: [PATCH 099/101] update wenetspeech RESULT.md, test=doc --- examples/wenetspeech/asr1/RESULTS.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/wenetspeech/asr1/RESULTS.md b/examples/wenetspeech/asr1/RESULTS.md index af84a5f6e..f22c652e6 100644 --- a/examples/wenetspeech/asr1/RESULTS.md +++ b/examples/wenetspeech/asr1/RESULTS.md @@ -46,3 +46,10 @@ Pretrain model from https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1 | conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_greedy_search | 16 | 0.078918 | | conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_prefix_beam_search | 16 | 0.079080 | | conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | attention_rescoring | 16 | 0.054401 | + +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size | CER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | attention | -1 | 0.050767 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_greedy_search | -1 | 0.061884 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | ctc_prefix_beam_search | -1 | 0.062056 | +| conformer | 32.52 M | conf/chunk_conformer.yaml | spec_aug | aishell1 | attention_rescoring | -1 | 0.052110 | From eac362057c3db60a2b60ef49eb51867187050a18 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 16 Sep 2022 16:00:52 +0800 Subject: [PATCH 100/101] add typehint for g2pw (#2390) --- paddlespeech/t2s/frontend/g2pw/__init__.py | 2 +- paddlespeech/t2s/frontend/g2pw/dataset.py | 66 +++++++++++----------- paddlespeech/t2s/frontend/g2pw/onnx_api.py | 50 +++++++++------- paddlespeech/t2s/frontend/g2pw/utils.py | 11 ++-- 4 files changed, 71 insertions(+), 58 deletions(-) diff --git a/paddlespeech/t2s/frontend/g2pw/__init__.py b/paddlespeech/t2s/frontend/g2pw/__init__.py index 0eaeee5df..89b3af3ca 100644 --- a/paddlespeech/t2s/frontend/g2pw/__init__.py +++ b/paddlespeech/t2s/frontend/g2pw/__init__.py @@ -1 +1 @@ -from paddlespeech.t2s.frontend.g2pw.onnx_api import G2PWOnnxConverter +from .onnx_api import G2PWOnnxConverter diff --git a/paddlespeech/t2s/frontend/g2pw/dataset.py b/paddlespeech/t2s/frontend/g2pw/dataset.py index 98af5f463..8a1c2e0bf 100644 --- a/paddlespeech/t2s/frontend/g2pw/dataset.py +++ b/paddlespeech/t2s/frontend/g2pw/dataset.py @@ -15,6 +15,10 @@ Credits This code is modified from https://github.com/GitYCC/g2pW """ +from typing import Dict +from typing import List +from typing import Tuple + import numpy as np from paddlespeech.t2s.frontend.g2pw.utils import tokenize_and_map @@ -23,22 +27,17 @@ ANCHOR_CHAR = '▁' def prepare_onnx_input(tokenizer, - labels, - char2phonemes, - chars, - texts, - query_ids, - phonemes=None, - pos_tags=None, - use_mask=False, - use_char_phoneme=False, - use_pos=False, - window_size=None, - max_len=512): + labels: List[str], + char2phonemes: Dict[str, List[int]], + chars: List[str], + texts: List[str], + query_ids: List[int], + use_mask: bool=False, + window_size: int=None, + max_len: int=512) -> Dict[str, np.array]: if window_size is not None: - truncated_texts, truncated_query_ids = _truncate_texts(window_size, - texts, query_ids) - + truncated_texts, truncated_query_ids = _truncate_texts( + window_size=window_size, texts=texts, query_ids=query_ids) input_ids = [] token_type_ids = [] attention_masks = [] @@ -51,13 +50,19 @@ def prepare_onnx_input(tokenizer, query_id = (truncated_query_ids if window_size else query_ids)[idx] try: - tokens, text2token, token2text = tokenize_and_map(tokenizer, text) + tokens, text2token, token2text = tokenize_and_map( + tokenizer=tokenizer, text=text) except Exception: print(f'warning: text "{text}" is invalid') return {} text, query_id, tokens, text2token, token2text = _truncate( - max_len, text, query_id, tokens, text2token, token2text) + max_len=max_len, + text=text, + query_id=query_id, + tokens=tokens, + text2token=text2token, + token2text=token2text) processed_tokens = ['[CLS]'] + tokens + ['[SEP]'] @@ -91,7 +96,8 @@ def prepare_onnx_input(tokenizer, return outputs -def _truncate_texts(window_size, texts, query_ids): +def _truncate_texts(window_size: int, texts: List[str], + query_ids: List[int]) -> Tuple[List[str], List[int]]: truncated_texts = [] truncated_query_ids = [] for text, query_id in zip(texts, query_ids): @@ -105,7 +111,12 @@ def _truncate_texts(window_size, texts, query_ids): return truncated_texts, truncated_query_ids -def _truncate(max_len, text, query_id, tokens, text2token, token2text): +def _truncate(max_len: int, + text: str, + query_id: int, + tokens: List[str], + text2token: List[int], + token2text: List[Tuple[int]]): truncate_len = max_len - 2 if len(tokens) <= truncate_len: return (text, query_id, tokens, text2token, token2text) @@ -132,18 +143,8 @@ def _truncate(max_len, text, query_id, tokens, text2token, token2text): ], [(s - start, e - start) for s, e in token2text[token_start:token_end]]) -def prepare_data(sent_path, lb_path=None): - raw_texts = open(sent_path).read().rstrip().split('\n') - query_ids = [raw.index(ANCHOR_CHAR) for raw in raw_texts] - texts = [raw.replace(ANCHOR_CHAR, '') for raw in raw_texts] - if lb_path is None: - return texts, query_ids - else: - phonemes = open(lb_path).read().rstrip().split('\n') - return texts, query_ids, phonemes - - -def get_phoneme_labels(polyphonic_chars): +def get_phoneme_labels(polyphonic_chars: List[List[str]] + ) -> Tuple[List[str], Dict[str, List[int]]]: labels = sorted(list(set([phoneme for char, phoneme in polyphonic_chars]))) char2phonemes = {} for char, phoneme in polyphonic_chars: @@ -153,7 +154,8 @@ def get_phoneme_labels(polyphonic_chars): return labels, char2phonemes -def get_char_phoneme_labels(polyphonic_chars): +def get_char_phoneme_labels(polyphonic_chars: List[List[str]] + ) -> Tuple[List[str], Dict[str, List[int]]]: labels = sorted( list(set([f'{char} {phoneme}' for char, phoneme in polyphonic_chars]))) char2phonemes = {} diff --git a/paddlespeech/t2s/frontend/g2pw/onnx_api.py b/paddlespeech/t2s/frontend/g2pw/onnx_api.py index 180e8ae15..ad32c4050 100644 --- a/paddlespeech/t2s/frontend/g2pw/onnx_api.py +++ b/paddlespeech/t2s/frontend/g2pw/onnx_api.py @@ -17,6 +17,10 @@ Credits """ import json import os +from typing import Any +from typing import Dict +from typing import List +from typing import Tuple import numpy as np import onnxruntime @@ -37,7 +41,8 @@ from paddlespeech.utils.env import MODEL_HOME model_version = '1.1' -def predict(session, onnx_input, labels): +def predict(session, onnx_input: Dict[str, Any], + labels: List[str]) -> Tuple[List[str], List[float]]: all_preds = [] all_confidences = [] probs = session.run([], { @@ -61,10 +66,10 @@ def predict(session, onnx_input, labels): class G2PWOnnxConverter: def __init__(self, - model_dir=MODEL_HOME, - style='bopomofo', - model_source=None, - enable_non_tradional_chinese=False): + model_dir: os.PathLike=MODEL_HOME, + style: str='bopomofo', + model_source: str=None, + enable_non_tradional_chinese: bool=False): uncompress_path = download_and_decompress( g2pw_onnx_models['G2PWModel'][model_version], model_dir) @@ -76,7 +81,8 @@ class G2PWOnnxConverter: os.path.join(uncompress_path, 'g2pW.onnx'), sess_options=sess_options) self.config = load_config( - os.path.join(uncompress_path, 'config.py'), use_default=True) + config_path=os.path.join(uncompress_path, 'config.py'), + use_default=True) self.model_source = model_source if model_source else self.config.model_source self.enable_opencc = enable_non_tradional_chinese @@ -103,9 +109,9 @@ class G2PWOnnxConverter: .strip().split('\n') ] self.labels, self.char2phonemes = get_char_phoneme_labels( - self.polyphonic_chars + polyphonic_chars=self.polyphonic_chars ) if self.config.use_char_phoneme else get_phoneme_labels( - self.polyphonic_chars) + polyphonic_chars=self.polyphonic_chars) self.chars = sorted(list(self.char2phonemes.keys())) @@ -146,7 +152,7 @@ class G2PWOnnxConverter: if self.enable_opencc: self.cc = OpenCC('s2tw') - def _convert_bopomofo_to_pinyin(self, bopomofo): + def _convert_bopomofo_to_pinyin(self, bopomofo: str) -> str: tone = bopomofo[-1] assert tone in '12345' component = self.bopomofo_convert_dict.get(bopomofo[:-1]) @@ -156,7 +162,7 @@ class G2PWOnnxConverter: print(f'Warning: "{bopomofo}" cannot convert to pinyin') return None - def __call__(self, sentences): + def __call__(self, sentences: List[str]) -> List[List[str]]: if isinstance(sentences, str): sentences = [sentences] @@ -169,23 +175,25 @@ class G2PWOnnxConverter: sentences = translated_sentences texts, query_ids, sent_ids, partial_results = self._prepare_data( - sentences) + sentences=sentences) if len(texts) == 0: # sentences no polyphonic words return partial_results onnx_input = prepare_onnx_input( - self.tokenizer, - self.labels, - self.char2phonemes, - self.chars, - texts, - query_ids, + tokenizer=self.tokenizer, + labels=self.labels, + char2phonemes=self.char2phonemes, + chars=self.chars, + texts=texts, + query_ids=query_ids, use_mask=self.config.use_mask, - use_char_phoneme=self.config.use_char_phoneme, window_size=None) - preds, confidences = predict(self.session_g2pW, onnx_input, self.labels) + preds, confidences = predict( + session=self.session_g2pW, + onnx_input=onnx_input, + labels=self.labels) if self.config.use_char_phoneme: preds = [pred.split(' ')[1] for pred in preds] @@ -195,7 +203,9 @@ class G2PWOnnxConverter: return results - def _prepare_data(self, sentences): + def _prepare_data( + self, sentences: List[str] + ) -> Tuple[List[str], List[int], List[int], List[List[str]]]: texts, query_ids, sent_ids, partial_results = [], [], [], [] for sent_id, sent in enumerate(sentences): # pypinyin works well for Simplified Chinese than Traditional Chinese diff --git a/paddlespeech/t2s/frontend/g2pw/utils.py b/paddlespeech/t2s/frontend/g2pw/utils.py index ad02c4c1d..ba9ce51ba 100644 --- a/paddlespeech/t2s/frontend/g2pw/utils.py +++ b/paddlespeech/t2s/frontend/g2pw/utils.py @@ -15,10 +15,11 @@ Credits This code is modified from https://github.com/GitYCC/g2pW """ +import os import re -def wordize_and_map(text): +def wordize_and_map(text: str): words = [] index_map_from_text_to_word = [] index_map_from_word_to_text = [] @@ -54,8 +55,8 @@ def wordize_and_map(text): return words, index_map_from_text_to_word, index_map_from_word_to_text -def tokenize_and_map(tokenizer, text): - words, text2word, word2text = wordize_and_map(text) +def tokenize_and_map(tokenizer, text: str): + words, text2word, word2text = wordize_and_map(text=text) tokens = [] index_map_from_token_to_text = [] @@ -82,7 +83,7 @@ def tokenize_and_map(tokenizer, text): return tokens, index_map_from_text_to_token, index_map_from_token_to_text -def _load_config(config_path): +def _load_config(config_path: os.PathLike): import importlib.util spec = importlib.util.spec_from_file_location('__init__', config_path) config = importlib.util.module_from_spec(spec) @@ -130,7 +131,7 @@ default_config_dict = { } -def load_config(config_path, use_default=False): +def load_config(config_path: os.PathLike, use_default: bool=False): config = _load_config(config_path) if use_default: for attr, val in default_config_dict.items(): From e6cbcca3e220b3b2ae869055f0771b48958b512b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 16 Sep 2022 16:23:47 +0800 Subject: [PATCH 101/101] fix ERNIE-SAT README, test=doc (#2392) --- examples/aishell3/ernie_sat/README.md | 13 ++++++------- examples/aishell3_vctk/ernie_sat/README.md | 13 ++++++------- examples/vctk/ernie_sat/README.md | 11 +++++------ 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/examples/aishell3/ernie_sat/README.md b/examples/aishell3/ernie_sat/README.md index 707ee1381..eb867ab75 100644 --- a/examples/aishell3/ernie_sat/README.md +++ b/examples/aishell3/ernie_sat/README.md @@ -1,11 +1,10 @@ -# ERNIE-SAT with AISHELL3 dataset +# ERNIE-SAT with VCTK dataset +ERNIE-SAT speech-text joint pretraining framework, which achieves SOTA results in cross-lingual multi-speaker speech synthesis and cross-lingual speech editing tasks, It can be applied to a series of scenarios such as Speech Editing, personalized Speech Synthesis, and Voice Cloning. -ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 - -## 模型框架 -ERNIE-SAT 中我们提出了两项创新: -- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 -- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 +## Model Framework +In ERNIE-SAT, we propose two innovations: +- In the pretraining process, the phonemes corresponding to Chinese and English are used as input to achieve cross-language and personalized soft phoneme mapping +- The joint mask learning of speech and text is used to realize the alignment of speech and text

diff --git a/examples/aishell3_vctk/ernie_sat/README.md b/examples/aishell3_vctk/ernie_sat/README.md index a849488d5..d55af6756 100644 --- a/examples/aishell3_vctk/ernie_sat/README.md +++ b/examples/aishell3_vctk/ernie_sat/README.md @@ -1,11 +1,10 @@ -# ERNIE-SAT with AISHELL3 and VCTK dataset +# ERNIE-SAT with VCTK dataset +ERNIE-SAT speech-text joint pretraining framework, which achieves SOTA results in cross-lingual multi-speaker speech synthesis and cross-lingual speech editing tasks, It can be applied to a series of scenarios such as Speech Editing, personalized Speech Synthesis, and Voice Cloning. -ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 - -## 模型框架 -ERNIE-SAT 中我们提出了两项创新: -- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 -- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 +## Model Framework +In ERNIE-SAT, we propose two innovations: +- In the pretraining process, the phonemes corresponding to Chinese and English are used as input to achieve cross-language and personalized soft phoneme mapping +- The joint mask learning of speech and text is used to realize the alignment of speech and text

diff --git a/examples/vctk/ernie_sat/README.md b/examples/vctk/ernie_sat/README.md index 0a2f9359e..94c7ae25d 100644 --- a/examples/vctk/ernie_sat/README.md +++ b/examples/vctk/ernie_sat/README.md @@ -1,11 +1,10 @@ # ERNIE-SAT with VCTK dataset +ERNIE-SAT speech-text joint pretraining framework, which achieves SOTA results in cross-lingual multi-speaker speech synthesis and cross-lingual speech editing tasks, It can be applied to a series of scenarios such as Speech Editing, personalized Speech Synthesis, and Voice Cloning. -ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。 - -## 模型框架 -ERNIE-SAT 中我们提出了两项创新: -- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射 -- 采用语言和语音的联合掩码学习实现了语言和语音的对齐 +## Model Framework +In ERNIE-SAT, we propose two innovations: +- In the pretraining process, the phonemes corresponding to Chinese and English are used as input to achieve cross-language and personalized soft phoneme mapping +- The joint mask learning of speech and text is used to realize the alignment of speech and text