From 528ae58a67e2878d9fbbf01d7233d5a84642f514 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 8 Mar 2023 11:03:45 +0800 Subject: [PATCH 01/37] [TTS]remove pad op in static model by replace F.pad with nn.Pad1D and nn.Pad2D (#3002) * remove pad op in static model by replace F.pad with nn.Pad1D and nn.Pad2D * fix variable names * add note --- examples/csmsc/vits/local/paddle2onnx.sh | 1 + examples/csmsc/vits/run.sh | 31 +++++++++++++++++++++++ paddlespeech/t2s/models/vits/generator.py | 7 ++++- paddlespeech/t2s/models/vits/transform.py | 24 ++++++++---------- 4 files changed, 49 insertions(+), 14 deletions(-) create mode 120000 examples/csmsc/vits/local/paddle2onnx.sh diff --git a/examples/csmsc/vits/local/paddle2onnx.sh b/examples/csmsc/vits/local/paddle2onnx.sh new file mode 120000 index 000000000..87c46634d --- /dev/null +++ b/examples/csmsc/vits/local/paddle2onnx.sh @@ -0,0 +1 @@ +../../tts3/local/paddle2onnx.sh \ No newline at end of file diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index ac190bfa8..f2c5d452f 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -39,3 +39,34 @@ fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then CUDA_VISIBLE_DEVICES=${gpus} ./local/inference.sh ${train_output_path} ${add_blank}|| exit -1 fi + +# # not ready yet for operator missing in Paddle2ONNX +# # paddle2onnx, please make sure the static models are in ${train_output_path}/inference first +# # we have only tested the following models so far +# if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then +# # install paddle2onnx +# version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') +# if [[ -z "$version" || ${version} != '1.0.0' ]]; then +# pip install paddle2onnx==1.0.0 +# fi +# ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx vits_csmsc +# fi + +# # inference with onnxruntime +# if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then +# ./local/ort_predict.sh ${train_output_path} +# fi + +# # not ready yet for operator missing in Paddle-Lite +# # must run after stage 3 (which stage generated static models) +# if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then +# # NOTE by yuantian 2022.11.21: please compile develop version of Paddle-Lite to export and run TTS models, +# # cause TTS models are supported by https://github.com/PaddlePaddle/Paddle-Lite/pull/9587 +# # and https://github.com/PaddlePaddle/Paddle-Lite/pull/9706 +# ./local/export2lite.sh ${train_output_path} inference pdlite vits_csmsc x86 +# fi + +# if [ ${stage} -le 8 ] && [ ${stop_stage} -ge 8 ]; then +# CUDA_VISIBLE_DEVICES=${gpus} ./local/lite_predict.sh ${train_output_path} || exit -1 +# fi + diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py index 7ecc51619..fbd2d6653 100644 --- a/paddlespeech/t2s/models/vits/generator.py +++ b/paddlespeech/t2s/models/vits/generator.py @@ -279,6 +279,10 @@ class VITSGenerator(nn.Layer): from paddlespeech.t2s.models.vits.monotonic_align import maximum_path self.maximum_path = maximum_path + self.pad1d = nn.Pad1D( + padding=[1, 0], + mode='constant', + data_format='NLC', ) def forward( self, @@ -685,5 +689,6 @@ class VITSGenerator(nn.Layer): ''' path = paddle.cast(path, dtype='float32') - path = path - F.pad(path, [0, 0, 1, 0, 0, 0])[:, :-1] + pad_tmp = self.pad1d(path)[:, :-1] + path = path - pad_tmp return path.unsqueeze(1).transpose([0, 1, 3, 2]) * mask diff --git a/paddlespeech/t2s/models/vits/transform.py b/paddlespeech/t2s/models/vits/transform.py index ea333dcff..61bd5ee2b 100644 --- a/paddlespeech/t2s/models/vits/transform.py +++ b/paddlespeech/t2s/models/vits/transform.py @@ -18,6 +18,7 @@ This code is based on https://github.com/bayesiains/nflows. """ import numpy as np import paddle +from paddle import nn from paddle.nn import functional as F from paddlespeech.t2s.modules.nets_utils import paddle_gather @@ -87,9 +88,9 @@ def unconstrained_rational_quadratic_spline( outputs = paddle.zeros(inputs.shape) logabsdet = paddle.zeros(inputs.shape) if tails == "linear": - unnormalized_derivatives = F.pad( - unnormalized_derivatives, - pad=[0] * (len(unnormalized_derivatives.shape) - 1) * 2 + [1, 1]) + # 注意 padding 的参数顺序 + pad2d = nn.Pad2D(padding=[1, 1, 0, 0], mode='constant') + unnormalized_derivatives = pad2d(unnormalized_derivatives) constant = np.log(np.exp(1 - min_derivative) - 1) unnormalized_derivatives[..., 0] = constant unnormalized_derivatives[..., -1] = constant @@ -142,6 +143,10 @@ def rational_quadratic_spline( # for dygraph to static # if paddle.min(inputs) < left or paddle.max(inputs) > right: # raise ValueError("Input to a transform is not within its domain") + pad1d = nn.Pad1D( + padding=[1, 0], + mode='constant', + data_format='NCL', ) num_bins = unnormalized_widths.shape[-1] # for dygraph to static @@ -153,11 +158,8 @@ def rational_quadratic_spline( widths = F.softmax(unnormalized_widths, axis=-1) widths = min_bin_width + (1 - min_bin_width * num_bins) * widths cumwidths = paddle.cumsum(widths, axis=-1) - cumwidths = F.pad( - cumwidths, - pad=[0] * (len(cumwidths.shape) - 1) * 2 + [1, 0], - mode="constant", - value=0.0) + + cumwidths = pad1d(cumwidths.unsqueeze(0)).squeeze() cumwidths = (right - left) * cumwidths + left cumwidths[..., 0] = left cumwidths[..., -1] = right @@ -168,11 +170,7 @@ def rational_quadratic_spline( heights = F.softmax(unnormalized_heights, axis=-1) heights = min_bin_height + (1 - min_bin_height * num_bins) * heights cumheights = paddle.cumsum(heights, axis=-1) - cumheights = F.pad( - cumheights, - pad=[0] * (len(cumheights.shape) - 1) * 2 + [1, 0], - mode="constant", - value=0.0) + cumheights = pad1d(cumheights.unsqueeze(0)).squeeze() cumheights = (top - bottom) * cumheights + bottom cumheights[..., 0] = bottom cumheights[..., -1] = top From 30e546c7b6c9aabfbada906996555d6776d76974 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 9 Mar 2023 10:22:48 +0800 Subject: [PATCH 02/37] Update copyright-check.hook --- .pre-commit-hooks/copyright-check.hook | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-hooks/copyright-check.hook b/.pre-commit-hooks/copyright-check.hook index 761edbc01..5a409e062 100644 --- a/.pre-commit-hooks/copyright-check.hook +++ b/.pre-commit-hooks/copyright-check.hook @@ -19,7 +19,7 @@ import subprocess import platform COPYRIGHT = ''' -Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -128,4 +128,4 @@ def main(argv=None): if __name__ == '__main__': - exit(main()) \ No newline at end of file + exit(main()) From 5186319f48d0cd631a48f26ff9fc94f5fc4ff3f0 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Thu, 9 Mar 2023 15:04:29 +0800 Subject: [PATCH 03/37] fix load model schedule error, config optional. (#3008) --- paddlespeech/s2t/exps/wav2vec2/model.py | 2 +- paddlespeech/s2t/training/scheduler.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py index 86b56b876..6c90f99e1 100644 --- a/paddlespeech/s2t/exps/wav2vec2/model.py +++ b/paddlespeech/s2t/exps/wav2vec2/model.py @@ -591,7 +591,7 @@ class Wav2Vec2ASRTrainer(Trainer): def setup_dataloader(self): config = self.config.clone() self.use_streamdata = config.get("use_stream_data", False) - self.use_sb = config.use_sb_pipeline + self.use_sb = config.get("use_sb_pipeline", False) if self.use_sb: hparams_file = config.sb_pipeline_conf with open(hparams_file, 'r', encoding='utf8') as fin: diff --git a/paddlespeech/s2t/training/scheduler.py b/paddlespeech/s2t/training/scheduler.py index 53c756ce3..a5e7a08f1 100644 --- a/paddlespeech/s2t/training/scheduler.py +++ b/paddlespeech/s2t/training/scheduler.py @@ -220,7 +220,6 @@ class NewBobScheduler(LRScheduler): def load(self, data): """Loads the needed information.""" - data = paddle.load(data) self.last_epoch = data["current_epoch_index"] self.hyperparam_value = data["hyperparam_value"] self.metric_values = data["metric_values"] From 3145325b4eafd93d803a97d675fd00551b63a2b0 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Fri, 10 Mar 2023 11:30:09 +0800 Subject: [PATCH 04/37] [ASR] add wav2vec2 aishell model result, test=asr (#3012) * Create RESULT.md * add wav2vec2ASR-large-aishell1 finetune model. * update model link and add readme. * fix released model info. --- README.md | 1 + README_cn.md | 1 + docs/source/released_model.md | 2 +- examples/aishell/asr3/README.md | 8 ++--- examples/aishell/asr3/RESULT.md | 17 +++++++++++ paddlespeech/resource/pretrained_models.py | 35 ++++++++++++++++------ 6 files changed, 50 insertions(+), 14 deletions(-) create mode 100644 examples/aishell/asr3/RESULT.md diff --git a/README.md b/README.md index 0cb99d1c6..5c5dc3a0f 100644 --- a/README.md +++ b/README.md @@ -178,6 +178,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV). ### Recent Update +- 👑 2023.03.09: Add [Wav2vec2ASR-zh](./examples/aishell/asr3). - 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo](./demos/TTSArmLinux). - 🎉 2023.02.16: Add [Cantonese TTS](./examples/canton/tts3). - 🔥 2023.01.10: Add [code-switch asr CLI and Demos](./demos/speech_recognition). diff --git a/README_cn.md b/README_cn.md index 0f2adf811..fa013029c 100644 --- a/README_cn.md +++ b/README_cn.md @@ -183,6 +183,7 @@ - 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。 ### 近期更新 +- 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3). - 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例](./demos/TTSArmLinux)。 - 🎉 2023.02.16: 新增[粤语语音合成](./examples/canton/tts3)。 - 🔥 2023.01.10: 新增[中英混合 ASR CLI 和 Demos](./demos/speech_recognition)。 diff --git a/docs/source/released_model.md b/docs/source/released_model.md index 634be7b7f..9e9221779 100644 --- a/docs/source/released_model.md +++ b/docs/source/released_model.md @@ -25,7 +25,7 @@ Model | Pre-Train Method | Pre-Train Data | Finetune Data | Size | Descriptions [Wav2vec2-large-960h-lv60-self Model](https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams) | wav2vec2 | Librispeech and LV-60k Dataset (5.3w h) | - | 1.18 GB |Pre-trained Wav2vec2.0 Model | - | - | - | [Wav2vec2ASR-large-960h-librispeech Model](https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.1.model.tar.gz) | wav2vec2 | Librispeech and LV-60k Dataset (5.3w h) | Librispeech (960 h) | 718 MB |Encoder: Wav2vec2.0, Decoder: CTC, Decoding method: Greedy search | - | 0.0189 | [Wav2vecASR Librispeech ASR3](../../examples/librispeech/asr3) | [Wav2vec2-large-wenetspeech-self Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2-large-wenetspeech-self_ckpt_1.3.0.model.tar.gz) | wav2vec2 | Wenetspeech Dataset (1w h) | - | 714 MB |Pre-trained Wav2vec2.0 Model | - | - | - | -[Wav2vec2ASR-large-aishell1 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.3.0.model.tar.gz) | wav2vec2 | Wenetspeech Dataset (1w h) | aishell1 (train set) | 1.17 GB |Encoder: Wav2vec2.0, Decoder: CTC, Decoding method: Greedy search | 0.0453 | - | - | +[Wav2vec2ASR-large-aishell1 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz) | wav2vec2 | Wenetspeech Dataset (1w h) | aishell1 (train set) | 1.18 GB |Encoder: Wav2vec2.0, Decoder: CTC, Decoding method: Greedy search | 0.0510 | - | - | ### Whisper Model Demo Link | Training Data | Size | Descriptions | CER | Model diff --git a/examples/aishell/asr3/README.md b/examples/aishell/asr3/README.md index e5806d621..f6fa60d7f 100644 --- a/examples/aishell/asr3/README.md +++ b/examples/aishell/asr3/README.md @@ -164,8 +164,8 @@ using the `tar` scripts to unpack the model and then you can use the script to t For example: ```bash -wget https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.3.0.model.tar.gz -tar xzvf wav2vec2ASR-large-aishell1_ckpt_1.3.0.model.tar.gz +wget https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz +tar xzvf wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz source path.sh # If you have process the data and get the manifest file, you can skip the following 2 steps bash local/data.sh --stage -1 --stop_stage -1 @@ -185,8 +185,8 @@ In some situations, you want to use the trained model to do the inference for th ``` you can train the model by yourself using ```bash run.sh --stage 0 --stop_stage 3```, or you can download the pretrained model through the script below: ```bash -wget https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.3.0.model.tar.gz -tar xzvf wav2vec2ASR-large-aishell1_ckpt_1.3.0.model.tar.gz +wget https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz +tar xzvf wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz ``` You can download the audio demo: ```bash diff --git a/examples/aishell/asr3/RESULT.md b/examples/aishell/asr3/RESULT.md new file mode 100644 index 000000000..1291ef15c --- /dev/null +++ b/examples/aishell/asr3/RESULT.md @@ -0,0 +1,17 @@ +# AISHELL + +## Version + +* paddle version: develop (commit id: daea892c67e85da91906864de40ce9f6f1b893ae) +* paddlespeech version: develop (commit id: c14b4238b256693281e59605abff7c9435b3e2b2) + +## Device +* python: 3.7 +* cuda: 10.2 +* cudnn: 7.6 + +## Result +train: Epoch 80, 2*V100-32G, batchsize:5 +| Model | Params | Config | Augmentation| Test set | Decode method | WER | +| --- | --- | --- | --- | --- | --- | --- | +| wav2vec2ASR | 324.49 M | conf/wav2vec2ASR.yaml | spec_aug | test-set | greedy search | 5.1009 | diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index dd5f08b0b..04df18623 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -103,6 +103,22 @@ ssl_dynamic_pretrained_models = { 'exp/wav2vec2ASR/checkpoints/avg_1.pdparams', }, }, + "wav2vec2ASR_aishell1-zh-16k": { + '1.4': { + 'url': + 'https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz', + 'md5': + '9f0bc943adb822789bf61e674b229d17', + 'cfg_path': + 'model.yaml', + 'ckpt_path': + 'exp/wav2vec2ASR/checkpoints/avg_1', + 'model': + 'exp/wav2vec2ASR/checkpoints/avg_1.pdparams', + 'params': + 'exp/wav2vec2ASR/checkpoints/avg_1.pdparams', + }, + }, } # --------------------------------- @@ -1644,8 +1660,8 @@ tts_static_pretrained_models["pwgan_male-en"] = tts_static_pretrained_models[ "pwgan_male-mix"] = tts_static_pretrained_models["pwgan_male-zh"] tts_static_pretrained_models["hifigan_male-en"] = tts_static_pretrained_models[ "hifigan_male-mix"] = tts_static_pretrained_models["hifigan_male-zh"] -tts_static_pretrained_models["pwgan_aishell3-canton"] = tts_static_pretrained_models[ - "pwgan_aishell3-zh"] +tts_static_pretrained_models[ + "pwgan_aishell3-canton"] = tts_static_pretrained_models["pwgan_aishell3-zh"] tts_onnx_pretrained_models = { # speedyspeech @@ -1979,8 +1995,9 @@ tts_onnx_pretrained_models["pwgan_male_onnx-en"] = tts_onnx_pretrained_models[ tts_onnx_pretrained_models["hifigan_male_onnx-en"] = tts_onnx_pretrained_models[ "hifigan_male_onnx-mix"] = tts_onnx_pretrained_models[ "hifigan_male_onnx-zh"] -tts_onnx_pretrained_models["pwgan_aishell3_onnx-canton"] = tts_onnx_pretrained_models[ - "pwgan_aishell3_onnx-zh"] +tts_onnx_pretrained_models[ + "pwgan_aishell3_onnx-canton"] = tts_onnx_pretrained_models[ + "pwgan_aishell3_onnx-zh"] # --------------------------------- # ------------ Vector ------------- @@ -2058,10 +2075,10 @@ rhy_frontend_models = { # --------------------------------- StarGANv2VC_source = { - '1.0' :{ - 'url': 'https://paddlespeech.bj.bcebos.com/Parakeet/released_models/starganv2vc/StarGANv2VC_source.zip', - 'md5': '195e169419163f5648030ba84c71f866', - + '1.0': { + 'url': + 'https://paddlespeech.bj.bcebos.com/Parakeet/released_models/starganv2vc/StarGANv2VC_source.zip', + 'md5': + '195e169419163f5648030ba84c71f866', } } - From b8c597183b3b2702673c175221e2764a6aa03dbf Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 10 Mar 2023 16:02:24 +0800 Subject: [PATCH 05/37] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5c5dc3a0f..3c60db650 100644 --- a/README.md +++ b/README.md @@ -180,6 +180,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision ### Recent Update - 👑 2023.03.09: Add [Wav2vec2ASR-zh](./examples/aishell/asr3). - 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo](./demos/TTSArmLinux). +- 🔥 2023.03.03 Add Voice Conversion [StarGANv2-VC synthesize pipeline](./examples/vctk/vc3). - 🎉 2023.02.16: Add [Cantonese TTS](./examples/canton/tts3). - 🔥 2023.01.10: Add [code-switch asr CLI and Demos](./demos/speech_recognition). - 👑 2023.01.06: Add [code-switch asr tal_cs recipe](./examples/tal_cs/asr1/). From 817263fd3010443964a82180bc2921a4ad9ddef4 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 10 Mar 2023 16:03:43 +0800 Subject: [PATCH 06/37] Update README_cn.md --- README_cn.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README_cn.md b/README_cn.md index fa013029c..29ee387c0 100644 --- a/README_cn.md +++ b/README_cn.md @@ -183,8 +183,9 @@ - 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。 ### 近期更新 -- 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3). +- 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3)。 - 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例](./demos/TTSArmLinux)。 +- 🔥 2023.03.03: 新增声音转换模型 [StarGANv2-VC 合成流程](./examples/vctk/vc3)。 - 🎉 2023.02.16: 新增[粤语语音合成](./examples/canton/tts3)。 - 🔥 2023.01.10: 新增[中英混合 ASR CLI 和 Demos](./demos/speech_recognition)。 - 👑 2023.01.06: 新增 [ASR 中英混合 tal_cs 训练推理流程](./examples/tal_cs/asr1/)。 From 319c805968916cf5cb9146e73fd15ce4cfbd574a Mon Sep 17 00:00:00 2001 From: MistEO Date: Fri, 10 Mar 2023 21:25:07 +0800 Subject: [PATCH 07/37] [TTS] Support set device id for tts prediction, test=tts (#3019) --- paddlespeech/t2s/exps/syn_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 12b75615e..79f6d567b 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -490,6 +490,7 @@ def get_predictor( device: str='cpu', # for gpu use_trt: bool=False, + device_id: int=0, # for trt use_dynamic_shape: bool=True, min_subgraph_size: int=5, @@ -505,6 +506,7 @@ def get_predictor( params_file (os.PathLike): name of params_file. device (str): Choose the device you want to run, it can be: cpu/gpu, default is cpu. use_trt (bool): whether to use TensorRT or not in GPU. + device_id (int): Choose your device id, only valid when the device is gpu, default 0. use_dynamic_shape (bool): use dynamic shape or not in TensorRT. use_mkldnn (bool): whether to use MKLDNN or not in CPU. cpu_threads (int): num of thread when use CPU. @@ -521,7 +523,7 @@ def get_predictor( config.enable_memory_optim() config.switch_ir_optim(True) if device == "gpu": - config.enable_use_gpu(100, 0) + config.enable_use_gpu(100, device_id) else: config.disable_gpu() config.set_cpu_math_library_num_threads(cpu_threads) From 33190ac4cbdd09aa5c8bc548c3db1c7c9dc31ac6 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 13 Mar 2023 15:21:58 +0800 Subject: [PATCH 08/37] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 970cb984a..ffe4d5f39 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ base = [ "sacrebleu", "textgrid", "timer", - "ToJyutping", + "ToJyutping==0.2.1", "typeguard", "webrtcvad", "yacs~=0.1.8", From acf943007e420cf791936daf3f065961d0bf1640 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 13 Mar 2023 15:32:53 +0800 Subject: [PATCH 09/37] Update requirements.txt --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index c2d56bf91..e40204228 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -38,7 +38,7 @@ sphinx-markdown-tables sphinx_rtd_theme textgrid timer -ToJyutping +ToJyutping==0.2.1 typeguard webrtcvad websockets From 1afd14acd936b3adf87a9a0410faf3d055102afd Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Mon, 13 Mar 2023 18:20:20 +0800 Subject: [PATCH 10/37] [TTS]add Diffsinger with opencpop dataset (#3005) --- examples/opencpop/svs1/README.md | 174 +++++ examples/opencpop/svs1/README_cn.md | 179 +++++ examples/opencpop/svs1/conf/default.yaml | 159 +++++ examples/opencpop/svs1/local/preprocess.sh | 74 ++ examples/opencpop/svs1/local/synthesize.sh | 27 + examples/opencpop/svs1/local/train.sh | 13 + examples/opencpop/svs1/path.sh | 13 + examples/opencpop/svs1/run.sh | 32 + paddlespeech/t2s/datasets/am_batch_fn.py | 123 ++++ paddlespeech/t2s/datasets/get_feats.py | 15 +- paddlespeech/t2s/datasets/preprocess_utils.py | 91 +++ paddlespeech/t2s/exps/diffsinger/__init__.py | 13 + .../t2s/exps/diffsinger/get_minmax.py | 82 +++ paddlespeech/t2s/exps/diffsinger/normalize.py | 189 +++++ .../t2s/exps/diffsinger/preprocess.py | 376 ++++++++++ paddlespeech/t2s/exps/diffsinger/train.py | 257 +++++++ paddlespeech/t2s/exps/syn_utils.py | 40 +- paddlespeech/t2s/exps/synthesize.py | 54 +- .../t2s/models/diffsinger/__init__.py | 15 + .../t2s/models/diffsinger/diffsinger.py | 399 +++++++++++ .../models/diffsinger/diffsinger_updater.py | 302 ++++++++ .../t2s/models/diffsinger/fastspeech2midi.py | 654 ++++++++++++++++++ .../t2s/models/fastspeech2/fastspeech2.py | 13 +- paddlespeech/t2s/modules/activation.py | 3 +- paddlespeech/t2s/modules/diffnet.py | 245 +++++++ paddlespeech/t2s/modules/diffusion.py | 261 ++----- paddlespeech/t2s/modules/masked_fill.py | 2 - .../modules/predictor/variance_predictor.py | 2 +- .../t2s/modules/transformer/encoder.py | 17 +- paddlespeech/t2s/modules/wavenet_denoiser.py | 191 +++++ 30 files changed, 3778 insertions(+), 237 deletions(-) create mode 100644 examples/opencpop/svs1/README.md create mode 100644 examples/opencpop/svs1/README_cn.md create mode 100644 examples/opencpop/svs1/conf/default.yaml create mode 100755 examples/opencpop/svs1/local/preprocess.sh create mode 100755 examples/opencpop/svs1/local/synthesize.sh create mode 100755 examples/opencpop/svs1/local/train.sh create mode 100755 examples/opencpop/svs1/path.sh create mode 100755 examples/opencpop/svs1/run.sh create mode 100644 paddlespeech/t2s/exps/diffsinger/__init__.py create mode 100644 paddlespeech/t2s/exps/diffsinger/get_minmax.py create mode 100644 paddlespeech/t2s/exps/diffsinger/normalize.py create mode 100644 paddlespeech/t2s/exps/diffsinger/preprocess.py create mode 100644 paddlespeech/t2s/exps/diffsinger/train.py create mode 100644 paddlespeech/t2s/models/diffsinger/__init__.py create mode 100644 paddlespeech/t2s/models/diffsinger/diffsinger.py create mode 100644 paddlespeech/t2s/models/diffsinger/diffsinger_updater.py create mode 100644 paddlespeech/t2s/models/diffsinger/fastspeech2midi.py create mode 100644 paddlespeech/t2s/modules/diffnet.py create mode 100644 paddlespeech/t2s/modules/wavenet_denoiser.py diff --git a/examples/opencpop/svs1/README.md b/examples/opencpop/svs1/README.md new file mode 100644 index 000000000..2e28a6e61 --- /dev/null +++ b/examples/opencpop/svs1/README.md @@ -0,0 +1,174 @@ +([简体中文](./README_cn.md)|English) +# DiffSinger with Opencpop +This example contains code used to train a [DiffSinger](https://arxiv.org/abs/2105.02446) model with [Mandarin singing corpus](https://wenet.org.cn/opencpop/). + +## Dataset +### Download and Extract +Download Opencpop from it's [Official Website](https://wenet.org.cn/opencpop/download/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/Opencpop`. + +## Get Started +Assume the path to the dataset is `~/datasets/Opencpop`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - (Supporting) synthesize waveform from a text file. +5. (Supporting) inference using the static model. +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── energy_stats.npy + ├── norm + ├── pitch_stats.npy + ├── raw + ├── speech_stats.npy + └── speech_stretchs.npy + +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech, pitch and energy features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. `speech_stretchs.npy` contains the minimum and maximum values of each dimension of the mel spectrum, which is used for linear stretching before training/inference of the diffusion module. +Note: Since the training effect of non-norm features is due to norm, the features saved under `norm` are features that have not been normed. + + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains utterance id, speaker id, phones, text_lengths, speech_lengths, phone durations, the path of speech features, the path of pitch features, the path of energy features, note, note durations, slur. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. +Here's the complete help message. +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--phones-dict PHONES_DICT] + [--speaker-dict SPEAKER_DICT] [--speech-stretchs SPEECH_STRETCHS] + +Train a FastSpeech2 model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG fastspeech2 config file. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu=0, use cpu. + --phones-dict PHONES_DICT + phone vocabulary file. + --speaker-dict SPEAKER_DICT + speaker id map file for multiple speaker model. + --speech-stretchs SPEECH_STRETCHS + min amd max mel for stretching. +``` +1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. +2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder. +3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory. +4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. +5. `--phones-dict` is the path of the phone vocabulary file. +6. `--speech-stretchs` is the path of mel's min-max data file. + +### Synthesizing +We use parallel wavegan as the neural vocoder. +Download pretrained parallel wavegan model from [pwgan_opencpop_ckpt_1.4.0.zip](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/pwgan_opencpop_ckpt_1.4.0.zip) and unzip it. +```bash +unzip pwgan_opencpop_ckpt_1.4.0.zip +``` +Parallel WaveGAN checkpoint contains files listed below. +```text +pwgan_opencpop_ckpt_1.4.0.zip +├── default.yaml # default config used to train parallel wavegan +├── snapshot_iter_100000.pdz # model parameters of parallel wavegan +└── feats_stats.npy # statistics used to normalize spectrogram when training parallel wavegan +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] + [--am {diffsinger_opencpop}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--voc {pwgan_opencpop}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + [--speech_stretchs SPEECH_STRETCHS] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} + Choose acoustic model type of tts task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --tones_dict TONES_DICT + tone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --voice-cloning VOICE_CLONING + whether training voice cloning model. + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} + Choose vocoder type of tts task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. + --speech-stretchs mel min and max values file. +``` + + +## Pretrained Model +Pretrained DiffSinger model: +- [diffsinger_opencpop_ckpt_1.4.0.zip](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/diffsinger_opencpop_ckpt_1.4.0.zip) + +DiffSinger checkpoint contains files listed below. +```text +diffsinger_opencpop_ckpt_1.4.0.zip +├── default.yaml # default config used to train diffsinger +├── energy_stats.npy # statistics used to normalize energy when training diffsinger if norm is needed +├── phone_id_map.txt # phone vocabulary file when training diffsinger +├── pitch_stats.npy # statistics used to normalize pitch when training diffsinger if norm is needed +├── snapshot_iter_160000.pdz # model parameters of diffsinger +├── speech_stats.npy # statistics used to normalize mel when training diffsinger if norm is needed +└── speech_stretchs.npy # Min and max values to use for mel spectral stretching before training diffusion + +``` +At present, the text frontend is not perfect, and the method of `synthesize_e2e` is not supported for synthesizing audio. Try using `synthesize` first. \ No newline at end of file diff --git a/examples/opencpop/svs1/README_cn.md b/examples/opencpop/svs1/README_cn.md new file mode 100644 index 000000000..19908fd60 --- /dev/null +++ b/examples/opencpop/svs1/README_cn.md @@ -0,0 +1,179 @@ +(简体中文|[English](./README.md)) +# 用 Opencpop 数据集训练 DiffSinger 模型 + +本用例包含用于训练 [DiffSinger](https://arxiv.org/abs/2105.02446) 模型的代码,使用 [Mandarin singing corpus](https://wenet.org.cn/opencpop/) 数据集。 + +## 数据集 +### 下载并解压 +从 [官方网站](https://wenet.org.cn/opencpop/download/) 下载数据集 + +## 开始 +假设数据集的路径是 `~/datasets/Opencpop`. +运行下面的命令会进行如下操作: + +1. **设置原路径**。 +2. 对数据集进行预处理。 +3. 训练模型 +4. 合成波形 + - 从 `metadata.jsonl` 合成波形。 + - (支持中)从文本文件合成波形。 +5. (支持中)使用静态模型进行推理。 +```bash +./run.sh +``` +您可以选择要运行的一系列阶段,或者将 `stage` 设置为 `stop-stage` 以仅使用一个阶段,例如,运行以下命令只会预处理数据集。 +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### 数据预处理 +```bash +./local/preprocess.sh ${conf_path} +``` +当它完成时。将在当前目录中创建 `dump` 文件夹。转储文件夹的结构如下所示。 + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── energy_stats.npy + ├── norm + ├── pitch_stats.npy + ├── raw + ├── speech_stats.npy + └── speech_stretchs.npy +``` + +数据集分为三个部分,即 `train` 、 `dev` 和 `test` ,每个部分都包含一个 `norm` 和 `raw` 子文件夹。原始文件夹包含每个话语的语音、音调和能量特征,而 `norm` 文件夹包含规范化的特征。用于规范化特征的统计数据是从 `dump/train/*_stats.npy` 中的训练集计算出来的。`speech_stretchs.npy` 中包含 mel谱每个维度上的最小值和最大值,用于 diffusion 模块训练/推理前的线性拉伸。 +注意:由于非 norm 特征训练效果由于 norm,因此 `norm` 下保存的特征是未经过 norm 的特征。 + + +此外,还有一个 `metadata.jsonl` 在每个子文件夹中。它是一个类似表格的文件,包含话语id,音色id,音素、文本长度、语音长度、音素持续时间、语音特征路径、音调特征路径、能量特征路径、音调,音调持续时间,是否为转音。 + +### 模型训练 +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` 调用 `${BIN_DIR}/train.py` 。 +以下是完整的帮助信息。 + +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--phones-dict PHONES_DICT] + [--speaker-dict SPEAKER_DICT] [--speech-stretchs SPEECH_STRETCHS] + +Train a DiffSinger model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG fastspeech2 config file. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu=0, use cpu. + --phones-dict PHONES_DICT + phone vocabulary file. + --speaker-dict SPEAKER_DICT + speaker id map file for multiple speaker model. + --speech-stretchs SPEECH_STRETCHS + min amd max mel for stretching. +``` +1. `--config` 是一个 yaml 格式的配置文件,用于覆盖默认配置,位于 `conf/default.yaml`. +2. `--train-metadata` 和 `--dev-metadata` 应为 `dump` 文件夹中 `train` 和 `dev` 下的规范化元数据文件 +3. `--output-dir` 是保存结果的目录。 检查点保存在此目录中的 `checkpoints/` 目录下。 +4. `--ngpu` 要使用的 GPU 数,如果 ngpu==0,则使用 cpu 。 +5. `--phones-dict` 是音素词汇表文件的路径。 +6. `--speech-stretchs` mel的最小最大值数据的文件路径。 + +### 合成 +我们使用 parallel opencpop 作为神经声码器(vocoder)。 +从 [pwgan_opencpop_ckpt_1.4.0.zip](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/pwgan_opencpop_ckpt_1.4.0.zip) 下载预训练的 parallel wavegan 模型并将其解压。 + +```bash +unzip pwgan_opencpop_ckpt_1.4.0.zip +``` +Parallel WaveGAN 检查点包含如下文件。 +```text +pwgan_opencpop_ckpt_1.4.0.zip +├── default.yaml # 用于训练 parallel wavegan 的默认配置 +├── snapshot_iter_100000.pdz # parallel wavegan 的模型参数 +└── feats_stats.npy # 训练平行波形时用于规范化谱图的统计数据 +``` +`./local/synthesize.sh` 调用 `${BIN_DIR}/../synthesize.py` 即可从 `metadata.jsonl`中合成波形。 + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] + [--am {diffsinger_opencpop}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--voc {pwgan_opencpop}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + [--speech_stretchs SPEECH_STRETCHS] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} + Choose acoustic model type of tts task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --tones_dict TONES_DICT + tone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --voice-cloning VOICE_CLONING + whether training voice cloning model. + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} + Choose vocoder type of tts task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. + --speech-stretchs mel min and max values file. +``` + +## 预训练模型 +预先训练的 DiffSinger 模型: +- [diffsinger_opencpop_ckpt_1.4.0.zip](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/diffsinger_opencpop_ckpt_1.4.0.zip) + + +DiffSinger 检查点包含下列文件。 +```text +diffsinger_opencpop_ckpt_1.4.0.zip +├── default.yaml # 用于训练 diffsinger 的默认配置 +├── energy_stats.npy # 训练 diffsinger 时如若需要 norm energy 会使用到的统计数据 +├── phone_id_map.txt # 训练 diffsinger 时的音素词汇文件 +├── pitch_stats.npy # 训练 diffsinger 时如若需要 norm pitch 会使用到的统计数据 +├── snapshot_iter_160000.pdz # 模型参数和优化器状态 +├── speech_stats.npy # 训练 diffsinger 时用于规范化频谱图的统计数据 +└── speech_stretchs.npy # 训练 diffusion 前用于 mel 谱拉伸的最小及最大值 + +``` +目前文本前端未完善,暂不支持 `synthesize_e2e` 的方式合成音频。尝试效果可先使用 `synthesize`。 diff --git a/examples/opencpop/svs1/conf/default.yaml b/examples/opencpop/svs1/conf/default.yaml new file mode 100644 index 000000000..5d8060630 --- /dev/null +++ b/examples/opencpop/svs1/conf/default.yaml @@ -0,0 +1,159 @@ +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 24000 # sr +n_fft: 512 # FFT size (samples). +n_shift: 128 # Hop size (samples). 12.5ms +win_length: 512 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + +# Only used for feats_type != raw + +fmin: 30 # Minimum frequency of Mel basis. +fmax: 12000 # Maximum frequency of Mel basis. +n_mels: 80 # The number of mel basis. + +# Only used for the model using pitch features (e.g. FastSpeech2) +f0min: 80 # Minimum f0 for pitch extraction. +f0max: 750 # Maximum f0 for pitch extraction. + + +########################################################### +# DATA SETTING # +########################################################### +batch_size: 48 # batch size +num_workers: 1 # number of gpu + + +########################################################### +# MODEL SETTING # +########################################################### +model: + # music score related + note_num: 300 # number of note + is_slur_num: 2 # number of slur + # fastspeech2 module options + use_energy_pred: False # whether use energy predictor + use_postnet: False # whether use postnet + + # fastspeech2 module + fastspeech2_params: + adim: 256 # attention dimension + aheads: 2 # number of attention heads + elayers: 4 # number of encoder layers + eunits: 1024 # number of encoder ff units + dlayers: 4 # number of decoder layers + dunits: 1024 # number of decoder ff units + positionwise_layer_type: conv1d-linear # type of position-wise layer + positionwise_conv_kernel_size: 9 # kernel size of position wise conv layer + transformer_enc_dropout_rate: 0.1 # dropout rate for transformer encoder layer + transformer_enc_positional_dropout_rate: 0.1 # dropout rate for transformer encoder positional encoding + transformer_enc_attn_dropout_rate: 0.0 # dropout rate for transformer encoder attention layer + transformer_activation_type: "gelu" # Activation function type in transformer. + encoder_normalize_before: True # whether to perform layer normalization before the input + decoder_normalize_before: True # whether to perform layer normalization before the input + reduction_factor: 1 # reduction factor + init_type: xavier_uniform # initialization type + init_enc_alpha: 1.0 # initial value of alpha of encoder scaled position encoding + init_dec_alpha: 1.0 # initial value of alpha of decoder scaled position encoding + use_scaled_pos_enc: True # whether to use scaled positional encoding + transformer_dec_dropout_rate: 0.1 # dropout rate for transformer decoder layer + transformer_dec_positional_dropout_rate: 0.1 # dropout rate for transformer decoder positional encoding + transformer_dec_attn_dropout_rate: 0.0 # dropout rate for transformer decoder attention layer + duration_predictor_layers: 5 # number of layers of duration predictor + duration_predictor_chans: 256 # number of channels of duration predictor + duration_predictor_kernel_size: 3 # filter size of duration predictor + duration_predictor_dropout_rate: 0.5 # dropout rate in energy predictor + pitch_predictor_layers: 5 # number of conv layers in pitch predictor + pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor + pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor + pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor + pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch + pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch + stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder + energy_predictor_layers: 2 # number of conv layers in energy predictor + energy_predictor_chans: 256 # number of channels of conv layers in energy predictor + energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor + energy_predictor_dropout: 0.5 # dropout rate in energy predictor + energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy + energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy + stop_gradient_from_energy_predictor: False # whether to stop the gradient from energy predictor to encoder + postnet_layers: 5 # number of layers of postnet + postnet_filts: 5 # filter size of conv layers in postnet + postnet_chans: 256 # number of channels of conv layers in postnet + postnet_dropout_rate: 0.5 # dropout rate for postnet + + # denoiser module + denoiser_params: + in_channels: 80 # Number of channels of the input mel-spectrogram + out_channels: 80 # Number of channels of the output mel-spectrogram + kernel_size: 3 # Kernel size of the residual blocks inside + layers: 20 # Number of residual blocks inside + stacks: 5 # The number of groups to split the residual blocks into + residual_channels: 256 # Residual channel of the residual blocks + gate_channels: 512 # Gate channel of the residual blocks + skip_channels: 256 # Skip channel of the residual blocks + aux_channels: 256 # Auxiliary channel of the residual blocks + dropout: 0.1 # Dropout of the residual blocks + bias: True # Whether to use bias in residual blocks + use_weight_norm: False # Whether to use weight norm in all convolutions + init_type: "kaiming_normal" # Type of initialize weights of a neural network module + + + diffusion_params: + num_train_timesteps: 100 # The number of timesteps between the noise and the real during training + beta_start: 0.0001 # beta start parameter for the scheduler + beta_end: 0.06 # beta end parameter for the scheduler + beta_schedule: "linear" # beta schedule parameter for the scheduler + num_max_timesteps: 100 # The max timestep transition from real to noise + stretch: True # whether to stretch before diffusion + + +########################################################### +# UPDATER SETTING # +########################################################### +fs2_updater: + use_masking: True # whether to apply masking for padded part in loss calculation + +ds_updater: + use_masking: True # whether to apply masking for padded part in loss calculation + + +########################################################### +# OPTIMIZER SETTING # +########################################################### +# fastspeech2 optimizer +fs2_optimizer: + optim: adam # optimizer type + learning_rate: 0.001 # learning rate + +# diffusion optimizer +ds_optimizer_params: + beta1: 0.9 + beta2: 0.98 + weight_decay: 0.0 + +ds_scheduler_params: + learning_rate: 0.001 + gamma: 0.5 + step_size: 50000 +ds_grad_norm: 1 + + +########################################################### +# INTERVAL SETTING # +########################################################### +only_train_diffusion: True # Whether to freeze fastspeech2 parameters when training diffusion +ds_train_start_steps: 160000 # Number of steps to start to train diffusion module. +train_max_steps: 320000 # Number of training steps. +save_interval_steps: 2000 # Interval steps to save checkpoint. +eval_interval_steps: 2000 # Interval steps to evaluate the network. +num_snapshots: 5 + + +########################################################### +# OTHER SETTING # +########################################################### +seed: 10086 diff --git a/examples/opencpop/svs1/local/preprocess.sh b/examples/opencpop/svs1/local/preprocess.sh new file mode 100755 index 000000000..26fd44689 --- /dev/null +++ b/examples/opencpop/svs1/local/preprocess.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=opencpop \ + --rootdir=~/datasets/Opencpop/segments \ + --dumpdir=dump \ + --label-file=~/datasets/Opencpop/segments/transcriptions.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="speech" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="pitch" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="energy" +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # normalize and covert phone/speaker to id, dev and test should use train's stats + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # Get feature(mel) extremum for diffusion stretch + echo "Get feature(mel) extremum ..." + python3 ${BIN_DIR}/get_minmax.py \ + --metadata=dump/train/norm/metadata.jsonl \ + --speech-stretchs=dump/train/speech_stretchs.npy +fi diff --git a/examples/opencpop/svs1/local/synthesize.sh b/examples/opencpop/svs1/local/synthesize.sh new file mode 100755 index 000000000..1159e0074 --- /dev/null +++ b/examples/opencpop/svs1/local/synthesize.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +stage=0 +stop_stage=0 + +# pwgan +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=diffsinger_opencpop \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_opencpop \ + --voc_config=pwgan_opencpop_ckpt_1.4.0/default.yaml \ + --voc_ckpt=pwgan_opencpop_ckpt_1.4.0/snapshot_iter_100000.pdz \ + --voc_stat=pwgan_opencpop_ckpt_1.4.0/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --speech_stretchs=dump/train/speech_stretchs.npy +fi + diff --git a/examples/opencpop/svs1/local/train.sh b/examples/opencpop/svs1/local/train.sh new file mode 100755 index 000000000..5be624fc4 --- /dev/null +++ b/examples/opencpop/svs1/local/train.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=1 \ + --phones-dict=dump/phone_id_map.txt \ + --speech-stretchs=dump/train/speech_stretchs.npy diff --git a/examples/opencpop/svs1/path.sh b/examples/opencpop/svs1/path.sh new file mode 100755 index 000000000..8bda5dce6 --- /dev/null +++ b/examples/opencpop/svs1/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=diffsinger +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} diff --git a/examples/opencpop/svs1/run.sh b/examples/opencpop/svs1/run.sh new file mode 100755 index 000000000..7bde38518 --- /dev/null +++ b/examples/opencpop/svs1/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_320000.pdz + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # synthesize, vocoder is pwgan by default + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/paddlespeech/t2s/datasets/am_batch_fn.py b/paddlespeech/t2s/datasets/am_batch_fn.py index c95d908dc..9ae791b48 100644 --- a/paddlespeech/t2s/datasets/am_batch_fn.py +++ b/paddlespeech/t2s/datasets/am_batch_fn.py @@ -414,6 +414,129 @@ def fastspeech2_multi_spk_batch_fn(examples): return batch +def diffsinger_single_spk_batch_fn(examples): + # fields = ["text", "note", "note_dur", "is_slur", "text_lengths", "speech", "speech_lengths", "durations", "pitch", "energy"] + text = [np.array(item["text"], dtype=np.int64) for item in examples] + note = [np.array(item["note"], dtype=np.int64) for item in examples] + note_dur = [np.array(item["note_dur"], dtype=np.float32) for item in examples] + is_slur = [np.array(item["is_slur"], dtype=np.int64) for item in examples] + speech = [np.array(item["speech"], dtype=np.float32) for item in examples] + pitch = [np.array(item["pitch"], dtype=np.float32) for item in examples] + energy = [np.array(item["energy"], dtype=np.float32) for item in examples] + durations = [ + np.array(item["durations"], dtype=np.int64) for item in examples + ] + + text_lengths = [ + np.array(item["text_lengths"], dtype=np.int64) for item in examples + ] + speech_lengths = [ + np.array(item["speech_lengths"], dtype=np.int64) for item in examples + ] + + text = batch_sequences(text) + note = batch_sequences(note) + note_dur = batch_sequences(note_dur) + is_slur = batch_sequences(is_slur) + pitch = batch_sequences(pitch) + speech = batch_sequences(speech) + durations = batch_sequences(durations) + energy = batch_sequences(energy) + + # convert each batch to paddle.Tensor + text = paddle.to_tensor(text) + note = paddle.to_tensor(note) + note_dur = paddle.to_tensor(note_dur) + is_slur = paddle.to_tensor(is_slur) + pitch = paddle.to_tensor(pitch) + speech = paddle.to_tensor(speech) + durations = paddle.to_tensor(durations) + energy = paddle.to_tensor(energy) + text_lengths = paddle.to_tensor(text_lengths) + speech_lengths = paddle.to_tensor(speech_lengths) + + batch = { + "text": text, + "note": note, + "note_dur": note_dur, + "is_slur": is_slur, + "text_lengths": text_lengths, + "durations": durations, + "speech": speech, + "speech_lengths": speech_lengths, + "pitch": pitch, + "energy": energy + } + return batch + + +def diffsinger_multi_spk_batch_fn(examples): + # fields = ["text", "note", "note_dur", "is_slur", "text_lengths", "speech", "speech_lengths", "durations", "pitch", "energy", "spk_id"/"spk_emb"] + text = [np.array(item["text"], dtype=np.int64) for item in examples] + note = [np.array(item["note"], dtype=np.int64) for item in examples] + note_dur = [np.array(item["note_dur"], dtype=np.float32) for item in examples] + is_slur = [np.array(item["is_slur"], dtype=np.int64) for item in examples] + speech = [np.array(item["speech"], dtype=np.float32) for item in examples] + pitch = [np.array(item["pitch"], dtype=np.float32) for item in examples] + energy = [np.array(item["energy"], dtype=np.float32) for item in examples] + durations = [ + np.array(item["durations"], dtype=np.int64) for item in examples + ] + text_lengths = [ + np.array(item["text_lengths"], dtype=np.int64) for item in examples + ] + speech_lengths = [ + np.array(item["speech_lengths"], dtype=np.int64) for item in examples + ] + + text = batch_sequences(text) + note = batch_sequences(note) + note_dur = batch_sequences(note_dur) + is_slur = batch_sequences(is_slur) + pitch = batch_sequences(pitch) + speech = batch_sequences(speech) + durations = batch_sequences(durations) + energy = batch_sequences(energy) + + # convert each batch to paddle.Tensor + text = paddle.to_tensor(text) + note = paddle.to_tensor(note) + note_dur = paddle.to_tensor(note_dur) + is_slur = paddle.to_tensor(is_slur) + pitch = paddle.to_tensor(pitch) + speech = paddle.to_tensor(speech) + durations = paddle.to_tensor(durations) + energy = paddle.to_tensor(energy) + text_lengths = paddle.to_tensor(text_lengths) + speech_lengths = paddle.to_tensor(speech_lengths) + + batch = { + "text": text, + "note": note, + "note_dur": note_dur, + "is_slur": is_slur, + "text_lengths": text_lengths, + "durations": durations, + "speech": speech, + "speech_lengths": speech_lengths, + "pitch": pitch, + "energy": energy + } + # spk_emb has a higher priority than spk_id + if "spk_emb" in examples[0]: + spk_emb = [ + np.array(item["spk_emb"], dtype=np.float32) for item in examples + ] + spk_emb = batch_sequences(spk_emb) + spk_emb = paddle.to_tensor(spk_emb) + batch["spk_emb"] = spk_emb + elif "spk_id" in examples[0]: + spk_id = [np.array(item["spk_id"], dtype=np.int64) for item in examples] + spk_id = paddle.to_tensor(spk_id) + batch["spk_id"] = spk_id + return batch + + def transformer_single_spk_batch_fn(examples): # fields = ["text", "text_lengths", "speech", "speech_lengths"] text = [np.array(item["text"], dtype=np.int64) for item in examples] diff --git a/paddlespeech/t2s/datasets/get_feats.py b/paddlespeech/t2s/datasets/get_feats.py index 5ec97b810..ea273e245 100644 --- a/paddlespeech/t2s/datasets/get_feats.py +++ b/paddlespeech/t2s/datasets/get_feats.py @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # Modified from espnet(https://github.com/espnet/espnet) +from typing import List +from typing import Optional +from typing import Union + import librosa import numpy as np import pyworld from scipy.interpolate import interp1d - -from typing import Optional -from typing import Union from typing_extensions import Literal - class LogMelFBank(): def __init__(self, sr: int=24000, @@ -79,7 +79,7 @@ class LogMelFBank(): def _spectrogram(self, wav: np.ndarray): D = self._stft(wav) - return np.abs(D) ** self.power + return np.abs(D)**self.power def _mel_spectrogram(self, wav: np.ndarray): S = self._spectrogram(wav) @@ -117,7 +117,6 @@ class Pitch(): if (f0 == 0).all(): print("All frames seems to be unvoiced, this utt will be removed.") return f0 - # padding start and end of f0 sequence start_f0 = f0[f0 != 0][0] end_f0 = f0[f0 != 0][-1] @@ -179,6 +178,8 @@ class Pitch(): f0 = self._calculate_f0(wav, use_continuous_f0, use_log_f0) if use_token_averaged_f0 and duration is not None: f0 = self._average_by_duration(f0, duration) + else: + f0 = np.expand_dims(np.array(f0), 0).T return f0 @@ -237,6 +238,8 @@ class Energy(): energy = self._calculate_energy(wav) if use_token_averaged_energy and duration is not None: energy = self._average_by_duration(energy, duration) + else: + energy = np.expand_dims(np.array(energy), 0).T return energy diff --git a/paddlespeech/t2s/datasets/preprocess_utils.py b/paddlespeech/t2s/datasets/preprocess_utils.py index 445b69bda..bf813b22a 100644 --- a/paddlespeech/t2s/datasets/preprocess_utils.py +++ b/paddlespeech/t2s/datasets/preprocess_utils.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import re +from typing import List + +import librosa +import numpy as np # speaker|utt_id|phn dur phn dur ... @@ -41,6 +45,90 @@ def get_phn_dur(file_name): return sentence, speaker_set +def note2midi(notes: List[str]) -> List[str]: + """Covert note string to note id, for example: ["C1"] -> [24] + + Args: + notes (List[str]): the list of note string + + Returns: + List[str]: the list of note id + """ + midis = [] + for note in notes: + if note == 'rest': + midi = 0 + else: + midi = librosa.note_to_midi(note.split("/")[0]) + midis.append(midi) + + return midis + + +def time2frame( + times: List[float], + sample_rate: int=24000, + n_shift: int=128, ) -> List[int]: + """Convert the phoneme duration of time(s) into frames + + Args: + times (List[float]): phoneme duration of time(s) + sample_rate (int, optional): sample rate. Defaults to 24000. + n_shift (int, optional): frame shift. Defaults to 128. + + Returns: + List[int]: phoneme duration of frame + """ + end = 0.0 + ends = [] + for t in times: + end += t + ends.append(end) + frame_pos = librosa.time_to_frames(ends, sr=sample_rate, hop_length=n_shift) + durations = np.diff(frame_pos, prepend=0) + return durations + + +def get_sentences_svs( + file_name, + dataset: str='opencpop', + sample_rate: int=24000, + n_shift: int=128, ): + ''' + read label file + Args: + file_name (str or Path): path of gen_duration_from_textgrid.py's result + dataset (str): dataset name + Returns: + Dict: the information of sentence, include [phone id (int)], [the frame of phone (int)], [note id (int)], [note duration (float)], [is slur (int)], text(str), speaker name (str) + tuple: speaker name + ''' + f = open(file_name, 'r') + sentence = {} + speaker_set = set() + if dataset == 'opencpop': + speaker_set.add("opencpop") + for line in f: + line_list = line.strip().split('|') + utt = line_list[0] + text = line_list[1] + ph = line_list[2].split() + midi = note2midi(line_list[3].split()) + midi_dur = line_list[4].split() + ph_dur = time2frame([float(t) for t in line_list[5].split()], sample_rate=sample_rate, n_shift=n_shift) + is_slur = line_list[6].split() + assert len(ph) == len(midi) == len(midi_dur) == len(is_slur) + sentence[utt] = (ph, [int(i) for i in ph_dur], + [int(i) for i in midi], + [float(i) for i in midi_dur], + [int(i) for i in is_slur], text, "opencpop") + else: + print("dataset should in {opencpop} now!") + + f.close() + return sentence, speaker_set + + def merge_silence(sentence): ''' merge silences @@ -88,6 +176,9 @@ def get_input_token(sentence, output_path, dataset="baker"): phn_token = ["", ""] + phn_token if dataset in {"baker", "aishell3"}: phn_token += [",", "。", "?", "!"] + # svs dataset + elif dataset in {"opencpop"}: + pass else: phn_token += [",", ".", "?", "!"] phn_token += [""] diff --git a/paddlespeech/t2s/exps/diffsinger/__init__.py b/paddlespeech/t2s/exps/diffsinger/__init__.py new file mode 100644 index 000000000..595add0ae --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/exps/diffsinger/get_minmax.py b/paddlespeech/t2s/exps/diffsinger/get_minmax.py new file mode 100644 index 000000000..5457f1e24 --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/get_minmax.py @@ -0,0 +1,82 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import logging + +import jsonlines +import numpy as np +from tqdm import tqdm + +from paddlespeech.t2s.datasets.data_table import DataTable + + +def get_minmax(spec, min_spec, max_spec): + # spec: [T, 80] + for i in range(spec.shape[1]): + min_value = np.min(spec[:, i]) + max_value = np.max(spec[:, i]) + min_spec[i] = min(min_value, min_spec[i]) + max_spec[i] = max(max_value, max_spec[i]) + + return min_spec, max_spec + + +def main(): + """Run preprocessing process.""" + parser = argparse.ArgumentParser( + description="Normalize dumped raw features (See detail in parallel_wavegan/bin/normalize.py)." + ) + parser.add_argument( + "--metadata", + type=str, + required=True, + help="directory including feature files to be normalized. " + "you need to specify either *-scp or rootdir.") + + parser.add_argument( + "--speech-stretchs", + type=str, + required=True, + help="min max spec file. only computer on train data") + + args = parser.parse_args() + + # get dataset + with jsonlines.open(args.metadata, 'r') as reader: + metadata = list(reader) + dataset = DataTable( + metadata, converters={ + "speech": np.load, + }) + logging.info(f"The number of files = {len(dataset)}.") + + n_mel = 80 + min_spec = 100.0 * np.ones(shape=(n_mel), dtype=np.float32) + max_spec = -100.0 * np.ones(shape=(n_mel), dtype=np.float32) + + for item in tqdm(dataset): + spec = item['speech'] + min_spec, max_spec = get_minmax(spec, min_spec, max_spec) + + # Using min_spec=-6.0 training effect is better so far + min_spec = -6.0 * np.ones(shape=(n_mel), dtype=np.float32) + min_max_spec = np.stack([min_spec, max_spec], axis=0) + np.save( + str(args.speech_stretchs), + min_max_spec.astype(np.float32), + allow_pickle=False) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/diffsinger/normalize.py b/paddlespeech/t2s/exps/diffsinger/normalize.py new file mode 100644 index 000000000..d3e611621 --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/normalize.py @@ -0,0 +1,189 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Normalize feature files and dump them.""" +import argparse +import logging +from operator import itemgetter +from pathlib import Path + +import jsonlines +import numpy as np +from sklearn.preprocessing import StandardScaler +from tqdm import tqdm + +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.utils import str2bool + + +def main(): + """Run preprocessing process.""" + parser = argparse.ArgumentParser( + description="Normalize dumped raw features (See detail in parallel_wavegan/bin/normalize.py)." + ) + parser.add_argument( + "--metadata", + type=str, + required=True, + help="directory including feature files to be normalized. " + "you need to specify either *-scp or rootdir.") + + parser.add_argument( + "--dumpdir", + type=str, + required=True, + help="directory to dump normalized feature files.") + parser.add_argument( + "--speech-stats", + type=str, + required=True, + help="speech statistics file.") + parser.add_argument( + "--pitch-stats", type=str, required=True, help="pitch statistics file.") + parser.add_argument( + "--energy-stats", + type=str, + required=True, + help="energy statistics file.") + parser.add_argument( + "--phones-dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker-dict", type=str, default=None, help="speaker id map file.") + parser.add_argument( + "--norm-feats", + type=str2bool, + default=False, + help="whether to norm features") + + args = parser.parse_args() + + dumpdir = Path(args.dumpdir).expanduser() + # use absolute path + dumpdir = dumpdir.resolve() + dumpdir.mkdir(parents=True, exist_ok=True) + + # get dataset + with jsonlines.open(args.metadata, 'r') as reader: + metadata = list(reader) + dataset = DataTable( + metadata, + converters={ + "speech": np.load, + "pitch": np.load, + "energy": np.load, + }) + logging.info(f"The number of files = {len(dataset)}.") + + # restore scaler + speech_scaler = StandardScaler() + if args.norm_feats: + speech_scaler.mean_ = np.load(args.speech_stats)[0] + speech_scaler.scale_ = np.load(args.speech_stats)[1] + else: + speech_scaler.mean_ = np.zeros( + np.load(args.speech_stats)[0].shape, dtype="float32") + speech_scaler.scale_ = np.ones( + np.load(args.speech_stats)[1].shape, dtype="float32") + speech_scaler.n_features_in_ = speech_scaler.mean_.shape[0] + + pitch_scaler = StandardScaler() + if args.norm_feats: + pitch_scaler.mean_ = np.load(args.pitch_stats)[0] + pitch_scaler.scale_ = np.load(args.pitch_stats)[1] + else: + pitch_scaler.mean_ = np.zeros( + np.load(args.pitch_stats)[0].shape, dtype="float32") + pitch_scaler.scale_ = np.ones( + np.load(args.pitch_stats)[1].shape, dtype="float32") + pitch_scaler.n_features_in_ = pitch_scaler.mean_.shape[0] + + energy_scaler = StandardScaler() + if args.norm_feats: + energy_scaler.mean_ = np.load(args.energy_stats)[0] + energy_scaler.scale_ = np.load(args.energy_stats)[1] + else: + energy_scaler.mean_ = np.zeros( + np.load(args.energy_stats)[0].shape, dtype="float32") + energy_scaler.scale_ = np.ones( + np.load(args.energy_stats)[1].shape, dtype="float32") + energy_scaler.n_features_in_ = energy_scaler.mean_.shape[0] + + vocab_phones = {} + with open(args.phones_dict, 'rt') as f: + phn_id = [line.strip().split() for line in f.readlines()] + for phn, id in phn_id: + vocab_phones[phn] = int(id) + + vocab_speaker = {} + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + for spk, id in spk_id: + vocab_speaker[spk] = int(id) + + # process each file + output_metadata = [] + + for item in tqdm(dataset): + utt_id = item['utt_id'] + speech = item['speech'] + pitch = item['pitch'] + energy = item['energy'] + # normalize + speech = speech_scaler.transform(speech) + speech_dir = dumpdir / "data_speech" + speech_dir.mkdir(parents=True, exist_ok=True) + speech_path = speech_dir / f"{utt_id}_speech.npy" + np.save(speech_path, speech.astype(np.float32), allow_pickle=False) + + pitch = pitch_scaler.transform(pitch) + pitch_dir = dumpdir / "data_pitch" + pitch_dir.mkdir(parents=True, exist_ok=True) + pitch_path = pitch_dir / f"{utt_id}_pitch.npy" + np.save(pitch_path, pitch.astype(np.float32), allow_pickle=False) + + energy = energy_scaler.transform(energy) + energy_dir = dumpdir / "data_energy" + energy_dir.mkdir(parents=True, exist_ok=True) + energy_path = energy_dir / f"{utt_id}_energy.npy" + np.save(energy_path, energy.astype(np.float32), allow_pickle=False) + phone_ids = [vocab_phones[p] for p in item['phones']] + spk_id = vocab_speaker[item["speaker"]] + record = { + "utt_id": item['utt_id'], + "spk_id": spk_id, + "text": phone_ids, + "text_lengths": item['text_lengths'], + "speech_lengths": item['speech_lengths'], + "durations": item['durations'], + "speech": str(speech_path), + "pitch": str(pitch_path), + "energy": str(energy_path), + "note": item['note'], + "note_dur": item['note_dur'], + "is_slur": item['is_slur'], + } + # add spk_emb for voice cloning + if "spk_emb" in item: + record["spk_emb"] = str(item["spk_emb"]) + + output_metadata.append(record) + output_metadata.sort(key=itemgetter('utt_id')) + output_metadata_path = Path(args.dumpdir) / "metadata.jsonl" + with jsonlines.open(output_metadata_path, 'w') as writer: + for item in output_metadata: + writer.write(item) + logging.info(f"metadata dumped into {output_metadata_path}") + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/diffsinger/preprocess.py b/paddlespeech/t2s/exps/diffsinger/preprocess.py new file mode 100644 index 000000000..be526eff1 --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/preprocess.py @@ -0,0 +1,376 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import os +from concurrent.futures import ThreadPoolExecutor +from operator import itemgetter +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List + +import jsonlines +import librosa +import numpy as np +import tqdm +import yaml +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.get_feats import Energy +from paddlespeech.t2s.datasets.get_feats import LogMelFBank +from paddlespeech.t2s.datasets.get_feats import Pitch +from paddlespeech.t2s.datasets.preprocess_utils import compare_duration_and_mel_length +from paddlespeech.t2s.datasets.preprocess_utils import get_input_token +from paddlespeech.t2s.datasets.preprocess_utils import get_sentences_svs +from paddlespeech.t2s.datasets.preprocess_utils import get_spk_id_map +from paddlespeech.t2s.utils import str2bool + +ALL_INITIALS = [ + 'zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', + 'j', 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w' +] +ALL_FINALS = [ + 'a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia', + 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong', + 'ou', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', + 'vn' +] + + +def process_sentence( + config: Dict[str, Any], + fp: Path, + sentences: Dict, + output_dir: Path, + mel_extractor=None, + pitch_extractor=None, + energy_extractor=None, + cut_sil: bool=True, + spk_emb_dir: Path=None, ): + utt_id = fp.stem + record = None + if utt_id in sentences: + # reading, resampling may occur + wav, _ = librosa.load(str(fp), sr=config.fs) + if len(wav.shape) != 1: + return record + max_value = np.abs(wav).max() + if max_value > 1.0: + wav = wav / max_value + assert len(wav.shape) == 1, f"{utt_id} is not a mono-channel audio." + assert np.abs(wav).max( + ) <= 1.0, f"{utt_id} is seems to be different that 16 bit PCM." + phones = sentences[utt_id][0] + durations = sentences[utt_id][1] + note = sentences[utt_id][2] + note_dur = sentences[utt_id][3] + is_slur = sentences[utt_id][4] + speaker = sentences[utt_id][-1] + + # extract mel feats + logmel = mel_extractor.get_log_mel_fbank(wav) + # change duration according to mel_length + compare_duration_and_mel_length(sentences, utt_id, logmel) + # utt_id may be popped in compare_duration_and_mel_length + if utt_id not in sentences: + return None + phones = sentences[utt_id][0] + durations = sentences[utt_id][1] + num_frames = logmel.shape[0] + + assert sum( + durations + ) == num_frames, "the sum of durations doesn't equal to the num of mel frames. " + speech_dir = output_dir / "data_speech" + speech_dir.mkdir(parents=True, exist_ok=True) + speech_path = speech_dir / (utt_id + "_speech.npy") + np.save(speech_path, logmel) + # extract pitch and energy + pitch = pitch_extractor.get_pitch(wav) + assert pitch.shape[0] == num_frames + pitch_dir = output_dir / "data_pitch" + pitch_dir.mkdir(parents=True, exist_ok=True) + pitch_path = pitch_dir / (utt_id + "_pitch.npy") + np.save(pitch_path, pitch) + energy = energy_extractor.get_energy(wav) + assert energy.shape[0] == num_frames + energy_dir = output_dir / "data_energy" + energy_dir.mkdir(parents=True, exist_ok=True) + energy_path = energy_dir / (utt_id + "_energy.npy") + np.save(energy_path, energy) + + record = { + "utt_id": utt_id, + "phones": phones, + "text_lengths": len(phones), + "speech_lengths": num_frames, + "durations": durations, + "speech": str(speech_path), + "pitch": str(pitch_path), + "energy": str(energy_path), + "speaker": speaker, + "note": note, + "note_dur": note_dur, + "is_slur": is_slur, + } + if spk_emb_dir: + if speaker in os.listdir(spk_emb_dir): + embed_name = utt_id + ".npy" + embed_path = spk_emb_dir / speaker / embed_name + if embed_path.is_file(): + record["spk_emb"] = str(embed_path) + else: + return None + return record + + +def process_sentences( + config, + fps: List[Path], + sentences: Dict, + output_dir: Path, + mel_extractor=None, + pitch_extractor=None, + energy_extractor=None, + nprocs: int=1, + cut_sil: bool=True, + spk_emb_dir: Path=None, + write_metadata_method: str='w', ): + if nprocs == 1: + results = [] + for fp in tqdm.tqdm(fps, total=len(fps)): + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir, ) + if record: + results.append(record) + else: + with ThreadPoolExecutor(nprocs) as pool: + futures = [] + with tqdm.tqdm(total=len(fps)) as progress: + for fp in fps: + future = pool.submit( + process_sentence, + config, + fp, + sentences, + output_dir, + mel_extractor, + pitch_extractor, + energy_extractor, + cut_sil, + spk_emb_dir, ) + future.add_done_callback(lambda p: progress.update()) + futures.append(future) + + results = [] + for ft in futures: + record = ft.result() + if record: + results.append(record) + + results.sort(key=itemgetter("utt_id")) + with jsonlines.open(output_dir / "metadata.jsonl", + write_metadata_method) as writer: + for item in results: + writer.write(item) + print("Done") + + +def main(): + # parse config and args + parser = argparse.ArgumentParser( + description="Preprocess audio and then extract features.") + + parser.add_argument( + "--dataset", + default="opencpop", + type=str, + help="name of dataset, should in {opencpop} now") + + parser.add_argument( + "--rootdir", default=None, type=str, help="directory to dataset.") + + parser.add_argument( + "--dumpdir", + type=str, + required=True, + help="directory to dump feature files.") + + parser.add_argument( + "--label-file", default=None, type=str, help="path to label file.") + + parser.add_argument("--config", type=str, help="diffsinger config file.") + + parser.add_argument( + "--num-cpu", type=int, default=1, help="number of process.") + + parser.add_argument( + "--cut-sil", + type=str2bool, + default=True, + help="whether cut sil in the edge of audio") + + parser.add_argument( + "--spk_emb_dir", + default=None, + type=str, + help="directory to speaker embedding files.") + + parser.add_argument( + "--write_metadata_method", + default="w", + type=str, + choices=["w", "a"], + help="How the metadata.jsonl file is written.") + args = parser.parse_args() + + rootdir = Path(args.rootdir).expanduser() + dumpdir = Path(args.dumpdir).expanduser() + # use absolute path + dumpdir = dumpdir.resolve() + dumpdir.mkdir(parents=True, exist_ok=True) + label_file = Path(args.label_file).expanduser() + + if args.spk_emb_dir: + spk_emb_dir = Path(args.spk_emb_dir).expanduser().resolve() + else: + spk_emb_dir = None + + assert rootdir.is_dir() + assert label_file.is_file() + + with open(args.config, 'rt') as f: + config = CfgNode(yaml.safe_load(f)) + + sentences, speaker_set = get_sentences_svs( + label_file, + dataset=args.dataset, + sample_rate=config.fs, + n_shift=config.n_shift, ) + + phone_id_map_path = dumpdir / "phone_id_map.txt" + speaker_id_map_path = dumpdir / "speaker_id_map.txt" + get_input_token(sentences, phone_id_map_path, args.dataset) + get_spk_id_map(speaker_set, speaker_id_map_path) + + if args.dataset == "opencpop": + wavdir = rootdir / "wavs" + # split data into 3 sections + train_file = rootdir / "train.txt" + train_wav_files = [] + with open(train_file, "r") as f_train: + for line in f_train.readlines(): + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + train_wav_files.append(wav_path) + + test_file = rootdir / "test.txt" + dev_wav_files = [] + test_wav_files = [] + num_dev = 106 + count = 0 + with open(test_file, "r") as f_test: + for line in f_test.readlines(): + count += 1 + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + if count > num_dev: + test_wav_files.append(wav_path) + else: + dev_wav_files.append(wav_path) + + else: + print("dataset should in {opencpop} now!") + + train_dump_dir = dumpdir / "train" / "raw" + train_dump_dir.mkdir(parents=True, exist_ok=True) + dev_dump_dir = dumpdir / "dev" / "raw" + dev_dump_dir.mkdir(parents=True, exist_ok=True) + test_dump_dir = dumpdir / "test" / "raw" + test_dump_dir.mkdir(parents=True, exist_ok=True) + + # Extractor + mel_extractor = LogMelFBank( + sr=config.fs, + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window, + n_mels=config.n_mels, + fmin=config.fmin, + fmax=config.fmax) + pitch_extractor = Pitch( + sr=config.fs, + hop_length=config.n_shift, + f0min=config.f0min, + f0max=config.f0max) + energy_extractor = Energy( + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window) + + # process for the 3 sections + if train_wav_files: + process_sentences( + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + nprocs=args.num_cpu, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) + if dev_wav_files: + process_sentences( + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) + if test_wav_files: + process_sentences( + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + nprocs=args.num_cpu, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/diffsinger/train.py b/paddlespeech/t2s/exps/diffsinger/train.py new file mode 100644 index 000000000..e79104c4a --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/train.py @@ -0,0 +1,257 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import logging +import os +import shutil +from pathlib import Path + +import jsonlines +import numpy as np +import paddle +import yaml +from paddle import DataParallel +from paddle import distributed as dist +from paddle import nn +from paddle.io import DataLoader +from paddle.io import DistributedBatchSampler +from paddle.optimizer import AdamW +from paddle.optimizer.lr import StepDecay +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.am_batch_fn import diffsinger_multi_spk_batch_fn +from paddlespeech.t2s.datasets.am_batch_fn import diffsinger_single_spk_batch_fn +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.models.diffsinger import DiffSinger +from paddlespeech.t2s.models.diffsinger import DiffSingerEvaluator +from paddlespeech.t2s.models.diffsinger import DiffSingerUpdater +from paddlespeech.t2s.models.diffsinger import DiffusionLoss +from paddlespeech.t2s.models.diffsinger.fastspeech2midi import FastSpeech2MIDILoss +from paddlespeech.t2s.training.extensions.snapshot import Snapshot +from paddlespeech.t2s.training.extensions.visualizer import VisualDL +from paddlespeech.t2s.training.optimizer import build_optimizers +from paddlespeech.t2s.training.seeding import seed_everything +from paddlespeech.t2s.training.trainer import Trainer + + +def train_sp(args, config): + # decides device type and whether to run in parallel + # setup running environment correctly + if (not paddle.is_compiled_with_cuda()) or args.ngpu == 0: + paddle.set_device("cpu") + else: + paddle.set_device("gpu") + world_size = paddle.distributed.get_world_size() + if world_size > 1: + paddle.distributed.init_parallel_env() + + # set the random seed, it is a must for multiprocess training + seed_everything(config.seed) + + print( + f"rank: {dist.get_rank()}, pid: {os.getpid()}, parent_pid: {os.getppid()}", + ) + fields = [ + "text", "text_lengths", "speech", "speech_lengths", "durations", + "pitch", "energy", "note", "note_dur", "is_slur" + ] + converters = {"speech": np.load, "pitch": np.load, "energy": np.load} + spk_num = None + if args.speaker_dict is not None: + print("multiple speaker diffsinger!") + collate_fn = diffsinger_multi_spk_batch_fn + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id) + fields += ["spk_id"] + else: + collate_fn = diffsinger_single_spk_batch_fn + print("single speaker diffsinger!") + + print("spk_num:", spk_num) + + # dataloader has been too verbose + logging.getLogger("DataLoader").disabled = True + + # construct dataset for training and validation + with jsonlines.open(args.train_metadata, 'r') as reader: + train_metadata = list(reader) + train_dataset = DataTable( + data=train_metadata, + fields=fields, + converters=converters, ) + with jsonlines.open(args.dev_metadata, 'r') as reader: + dev_metadata = list(reader) + dev_dataset = DataTable( + data=dev_metadata, + fields=fields, + converters=converters, ) + + # collate function and dataloader + train_sampler = DistributedBatchSampler( + train_dataset, + batch_size=config.batch_size, + shuffle=True, + drop_last=True) + + print("samplers done!") + + train_dataloader = DataLoader( + train_dataset, + batch_sampler=train_sampler, + collate_fn=collate_fn, + num_workers=config.num_workers) + + dev_dataloader = DataLoader( + dev_dataset, + shuffle=False, + drop_last=False, + batch_size=config.batch_size, + collate_fn=collate_fn, + num_workers=config.num_workers) + print("dataloaders done!") + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + with open(args.speech_stretchs, "r") as f: + spec_min = np.load(args.speech_stretchs)[0] + spec_max = np.load(args.speech_stretchs)[1] + spec_min = paddle.to_tensor(spec_min) + spec_max = paddle.to_tensor(spec_max) + print("min and max spec done!") + + odim = config.n_mels + config["model"]["fastspeech2_params"]["spk_num"] = spk_num + model = DiffSinger( + spec_min=spec_min, + spec_max=spec_max, + idim=vocab_size, + odim=odim, + **config["model"], ) + model_fs2 = model.fs2 + model_ds = model.diffusion + if world_size > 1: + model = DataParallel(model) + model_fs2 = model._layers.fs2 + model_ds = model._layers.diffusion + print("models done!") + + criterion_fs2 = FastSpeech2MIDILoss(**config["fs2_updater"]) + criterion_ds = DiffusionLoss(**config["ds_updater"]) + print("criterions done!") + + optimizer_fs2 = build_optimizers(model_fs2, **config["fs2_optimizer"]) + lr_schedule_ds = StepDecay(**config["ds_scheduler_params"]) + gradient_clip_ds = nn.ClipGradByGlobalNorm(config["ds_grad_norm"]) + optimizer_ds = AdamW( + learning_rate=lr_schedule_ds, + grad_clip=gradient_clip_ds, + parameters=model_ds.parameters(), + **config["ds_optimizer_params"]) + print("optimizer done!") + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + if dist.get_rank() == 0: + config_name = args.config.split("/")[-1] + # copy conf to output_dir + shutil.copyfile(args.config, output_dir / config_name) + + updater = DiffSingerUpdater( + model=model, + optimizers={ + "fs2": optimizer_fs2, + "ds": optimizer_ds, + }, + criterions={ + "fs2": criterion_fs2, + "ds": criterion_ds, + }, + dataloader=train_dataloader, + ds_train_start_steps=config.ds_train_start_steps, + output_dir=output_dir, + only_train_diffusion=config["only_train_diffusion"]) + + evaluator = DiffSingerEvaluator( + model=model, + criterions={ + "fs2": criterion_fs2, + "ds": criterion_ds, + }, + dataloader=dev_dataloader, + output_dir=output_dir, ) + + trainer = Trainer( + updater, + stop_trigger=(config.train_max_steps, "iteration"), + out=output_dir, ) + + if dist.get_rank() == 0: + trainer.extend( + evaluator, trigger=(config.eval_interval_steps, 'iteration')) + trainer.extend(VisualDL(output_dir), trigger=(1, 'iteration')) + trainer.extend( + Snapshot(max_size=config.num_snapshots), + trigger=(config.save_interval_steps, 'iteration')) + + print("Trainer Done!") + trainer.run() + + +def main(): + # parse args and config and redirect to train_sp + parser = argparse.ArgumentParser(description="Train a DiffSinger model.") + parser.add_argument("--config", type=str, help="diffsinger config file.") + parser.add_argument("--train-metadata", type=str, help="training data.") + parser.add_argument("--dev-metadata", type=str, help="dev data.") + parser.add_argument("--output-dir", type=str, help="output dir.") + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu=0, use cpu.") + parser.add_argument( + "--phones-dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker-dict", + type=str, + default=None, + help="speaker id map file for multiple speaker model.") + parser.add_argument( + "--speech-stretchs", + type=str, + help="The min and max values of the mel spectrum.") + + args = parser.parse_args() + + with open(args.config) as f: + config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(config) + print( + f"master see the word size: {dist.get_world_size()}, from pid: {os.getpid()}" + ) + + # dispatch + if args.ngpu > 1: + dist.spawn(train_sp, (args, config), nprocs=args.ngpu) + else: + train_sp(args, config) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 79f6d567b..60608ee5b 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -56,6 +56,11 @@ model_alias = { "paddlespeech.t2s.models.tacotron2:Tacotron2", "tacotron2_inference": "paddlespeech.t2s.models.tacotron2:Tacotron2Inference", + "diffsinger": + "paddlespeech.t2s.models.diffsinger:DiffSinger", + "diffsinger_inference": + "paddlespeech.t2s.models.diffsinger:DiffSingerInference", + # voc "pwgan": "paddlespeech.t2s.models.parallel_wavegan:PWGGenerator", @@ -142,6 +147,8 @@ def get_test_dataset(test_metadata: List[Dict[str, Any]], fields += ["spk_emb"] else: print("single speaker fastspeech2!") + elif am_name == 'diffsinger': + fields = ["utt_id", "text", "note", "note_dur", "is_slur"] elif am_name == 'speedyspeech': fields = ["utt_id", "phones", "tones"] elif am_name == 'tacotron2': @@ -326,14 +333,16 @@ def run_frontend(frontend: object, # dygraph -def get_am_inference(am: str='fastspeech2_csmsc', - am_config: CfgNode=None, - am_ckpt: Optional[os.PathLike]=None, - am_stat: Optional[os.PathLike]=None, - phones_dict: Optional[os.PathLike]=None, - tones_dict: Optional[os.PathLike]=None, - speaker_dict: Optional[os.PathLike]=None, - return_am: bool=False): +def get_am_inference( + am: str='fastspeech2_csmsc', + am_config: CfgNode=None, + am_ckpt: Optional[os.PathLike]=None, + am_stat: Optional[os.PathLike]=None, + phones_dict: Optional[os.PathLike]=None, + tones_dict: Optional[os.PathLike]=None, + speaker_dict: Optional[os.PathLike]=None, + return_am: bool=False, + speech_stretchs: Optional[os.PathLike]=None, ): with open(phones_dict, 'rt', encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) @@ -356,6 +365,19 @@ def get_am_inference(am: str='fastspeech2_csmsc', if am_name == 'fastspeech2': am = am_class( idim=vocab_size, odim=odim, spk_num=spk_num, **am_config["model"]) + elif am_name == 'diffsinger': + with open(speech_stretchs, "r") as f: + spec_min = np.load(speech_stretchs)[0] + spec_max = np.load(speech_stretchs)[1] + spec_min = paddle.to_tensor(spec_min) + spec_max = paddle.to_tensor(spec_max) + am_config["model"]["fastspeech2_params"]["spk_num"] = spk_num + am = am_class( + spec_min=spec_min, + spec_max=spec_max, + idim=vocab_size, + odim=odim, + **am_config["model"], ) elif am_name == 'speedyspeech': am = am_class( vocab_size=vocab_size, @@ -366,8 +388,6 @@ def get_am_inference(am: str='fastspeech2_csmsc', am = am_class(idim=vocab_size, odim=odim, **am_config["model"]) elif am_name == 'erniesat': am = am_class(idim=vocab_size, odim=odim, **am_config["model"]) - else: - print("wrong am, please input right am!!!") am.set_state_dict(paddle.load(am_ckpt)["main_params"]) am.eval() diff --git a/paddlespeech/t2s/exps/synthesize.py b/paddlespeech/t2s/exps/synthesize.py index 70e52244f..6189522db 100644 --- a/paddlespeech/t2s/exps/synthesize.py +++ b/paddlespeech/t2s/exps/synthesize.py @@ -60,7 +60,8 @@ def evaluate(args): am_stat=args.am_stat, phones_dict=args.phones_dict, tones_dict=args.tones_dict, - speaker_dict=args.speaker_dict) + speaker_dict=args.speaker_dict, + speech_stretchs=args.speech_stretchs, ) test_dataset = get_test_dataset( test_metadata=test_metadata, am=args.am, @@ -107,6 +108,20 @@ def evaluate(args): if args.voice_cloning and "spk_emb" in datum: spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) mel = am_inference(phone_ids, spk_emb=spk_emb) + elif am_name == 'diffsinger': + phone_ids = paddle.to_tensor(datum["text"]) + note = paddle.to_tensor(datum["note"]) + note_dur = paddle.to_tensor(datum["note_dur"]) + is_slur = paddle.to_tensor(datum["is_slur"]) + # get_mel_fs2 = False, means mel from diffusion, get_mel_fs2 = True, means mel from fastspeech2. + get_mel_fs2 = False + # mel: [T, mel_bin] + mel = am_inference( + phone_ids, + note=note, + note_dur=note_dur, + is_slur=is_slur, + get_mel_fs2=get_mel_fs2) # vocoder wav = voc_inference(mel) @@ -134,10 +149,17 @@ def parse_args(): type=str, default='fastspeech2_csmsc', choices=[ - 'speedyspeech_csmsc', 'fastspeech2_csmsc', 'fastspeech2_ljspeech', - 'fastspeech2_aishell3', 'fastspeech2_vctk', 'tacotron2_csmsc', - 'tacotron2_ljspeech', 'tacotron2_aishell3', 'fastspeech2_mix', - 'fastspeech2_canton' + 'speedyspeech_csmsc', + 'fastspeech2_csmsc', + 'fastspeech2_ljspeech', + 'fastspeech2_aishell3', + 'fastspeech2_vctk', + 'tacotron2_csmsc', + 'tacotron2_ljspeech', + 'tacotron2_aishell3', + 'fastspeech2_mix', + 'fastspeech2_canton', + 'diffsinger_opencpop', ], help='Choose acoustic model type of tts task.') parser.add_argument( @@ -170,10 +192,19 @@ def parse_args(): type=str, default='pwgan_csmsc', choices=[ - 'pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', 'pwgan_vctk', - 'mb_melgan_csmsc', 'wavernn_csmsc', 'hifigan_csmsc', - 'hifigan_ljspeech', 'hifigan_aishell3', 'hifigan_vctk', - 'style_melgan_csmsc' + 'pwgan_csmsc', + 'pwgan_ljspeech', + 'pwgan_aishell3', + 'pwgan_vctk', + 'mb_melgan_csmsc', + 'wavernn_csmsc', + 'hifigan_csmsc', + 'hifigan_ljspeech', + 'hifigan_aishell3', + 'hifigan_vctk', + 'style_melgan_csmsc', + "pwgan_opencpop", + "hifigan_opencpop", ], help='Choose vocoder type of tts task.') parser.add_argument( @@ -191,6 +222,11 @@ def parse_args(): "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") parser.add_argument("--test_metadata", type=str, help="test metadata.") parser.add_argument("--output_dir", type=str, help="output dir.") + parser.add_argument( + "--speech_stretchs", + type=str, + default=None, + help="The min and max values of the mel spectrum.") args = parser.parse_args() return args diff --git a/paddlespeech/t2s/models/diffsinger/__init__.py b/paddlespeech/t2s/models/diffsinger/__init__.py new file mode 100644 index 000000000..785293ee2 --- /dev/null +++ b/paddlespeech/t2s/models/diffsinger/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .diffsinger import * +from .diffsinger_updater import * diff --git a/paddlespeech/t2s/models/diffsinger/diffsinger.py b/paddlespeech/t2s/models/diffsinger/diffsinger.py new file mode 100644 index 000000000..990cfc56a --- /dev/null +++ b/paddlespeech/t2s/models/diffsinger/diffsinger.py @@ -0,0 +1,399 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +"""DiffSinger related modules for paddle""" +from typing import Any +from typing import Dict +from typing import Tuple + +import numpy as np +import paddle +from paddle import nn +from typeguard import check_argument_types + +from paddlespeech.t2s.models.diffsinger.fastspeech2midi import FastSpeech2MIDI +from paddlespeech.t2s.modules.diffnet import DiffNet +from paddlespeech.t2s.modules.diffusion import GaussianDiffusion + + +class DiffSinger(nn.Layer): + """DiffSinger module. + + This is a module of DiffSinger described in `DiffSinger: Singing Voice Synthesis via Shallow Diffusion Mechanism`._ + .. _`DiffSinger: Singing Voice Synthesis via Shallow Diffusion Mechanism`: + https://arxiv.org/pdf/2105.02446.pdf + + Args: + + Returns: + + """ + + def __init__( + self, + # min and max spec for stretching before diffusion + spec_min: paddle.Tensor, + spec_max: paddle.Tensor, + # fastspeech2midi config + idim: int, + odim: int, + use_energy_pred: bool=False, + use_postnet: bool=False, + # music score related + note_num: int=300, + is_slur_num: int=2, + fastspeech2_params: Dict[str, Any]={ + "adim": 256, + "aheads": 2, + "elayers": 4, + "eunits": 1024, + "dlayers": 4, + "dunits": 1024, + "positionwise_layer_type": "conv1d", + "positionwise_conv_kernel_size": 1, + "use_scaled_pos_enc": True, + "use_batch_norm": True, + "encoder_normalize_before": True, + "decoder_normalize_before": True, + "encoder_concat_after": False, + "decoder_concat_after": False, + "reduction_factor": 1, + # for transformer + "transformer_enc_dropout_rate": 0.1, + "transformer_enc_positional_dropout_rate": 0.1, + "transformer_enc_attn_dropout_rate": 0.1, + "transformer_dec_dropout_rate": 0.1, + "transformer_dec_positional_dropout_rate": 0.1, + "transformer_dec_attn_dropout_rate": 0.1, + "transformer_activation_type": "gelu", + # duration predictor + "duration_predictor_layers": 2, + "duration_predictor_chans": 384, + "duration_predictor_kernel_size": 3, + "duration_predictor_dropout_rate": 0.1, + # pitch predictor + "use_pitch_embed": True, + "pitch_predictor_layers": 2, + "pitch_predictor_chans": 384, + "pitch_predictor_kernel_size": 3, + "pitch_predictor_dropout": 0.5, + "pitch_embed_kernel_size": 9, + "pitch_embed_dropout": 0.5, + "stop_gradient_from_pitch_predictor": False, + # energy predictor + "use_energy_embed": False, + "energy_predictor_layers": 2, + "energy_predictor_chans": 384, + "energy_predictor_kernel_size": 3, + "energy_predictor_dropout": 0.5, + "energy_embed_kernel_size": 9, + "energy_embed_dropout": 0.5, + "stop_gradient_from_energy_predictor": False, + # postnet + "postnet_layers": 5, + "postnet_chans": 512, + "postnet_filts": 5, + "postnet_dropout_rate": 0.5, + # spk emb + "spk_num": None, + "spk_embed_dim": None, + "spk_embed_integration_type": "add", + # training related + "init_type": "xavier_uniform", + "init_enc_alpha": 1.0, + "init_dec_alpha": 1.0, + # speaker classifier + "enable_speaker_classifier": False, + "hidden_sc_dim": 256, + }, + # denoiser config + denoiser_params: Dict[str, Any]={ + "in_channels": 80, + "out_channels": 80, + "kernel_size": 3, + "layers": 20, + "stacks": 5, + "residual_channels": 256, + "gate_channels": 512, + "skip_channels": 256, + "aux_channels": 256, + "dropout": 0., + "bias": True, + "use_weight_norm": False, + "init_type": "kaiming_normal", + }, + # diffusion config + diffusion_params: Dict[str, Any]={ + "num_train_timesteps": 100, + "beta_start": 0.0001, + "beta_end": 0.06, + "beta_schedule": "squaredcos_cap_v2", + "num_max_timesteps": 60, + "stretch": True, + }, ): + """Initialize DiffSinger module. + + Args: + spec_min (paddle.Tensor): The minimum value of the feature(mel) to stretch before diffusion. + spec_max (paddle.Tensor): The maximum value of the feature(mel) to stretch before diffusion. + idim (int): Dimension of the inputs (Input vocabrary size.). + odim (int): Dimension of the outputs (Acoustic feature dimension.). + use_energy_pred (bool, optional): whether use energy predictor. Defaults False. + use_postnet (bool, optional): whether use postnet. Defaults False. + note_num (int, optional): The number of note. Defaults to 300. + is_slur_num (int, optional): The number of slur. Defaults to 2. + fastspeech2_params (Dict[str, Any]): Parameter dict for fastspeech2 module. + denoiser_params (Dict[str, Any]): Parameter dict for dinoiser module. + diffusion_params (Dict[str, Any]): Parameter dict for diffusion module. + """ + assert check_argument_types() + super().__init__() + self.fs2 = FastSpeech2MIDI( + idim=idim, + odim=odim, + fastspeech2_params=fastspeech2_params, + note_num=note_num, + is_slur_num=is_slur_num, + use_energy_pred=use_energy_pred, + use_postnet=use_postnet, ) + denoiser = DiffNet(**denoiser_params) + self.diffusion = GaussianDiffusion( + denoiser, + **diffusion_params, + min_values=spec_min, + max_values=spec_max, ) + + def forward( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + text_lengths: paddle.Tensor, + speech: paddle.Tensor, + speech_lengths: paddle.Tensor, + durations: paddle.Tensor, + pitch: paddle.Tensor, + energy: paddle.Tensor, + spk_emb: paddle.Tensor=None, + spk_id: paddle.Tensor=None, + only_train_fs2: bool=True, + ) -> Tuple[paddle.Tensor, Dict[str, paddle.Tensor], paddle.Tensor]: + """Calculate forward propagation. + + Args: + text(Tensor(int64)): + Batch of padded token (phone) ids (B, Tmax). + note(Tensor(int64)): + Batch of padded note (element in music score) ids (B, Tmax). + note_dur(Tensor(float32)): + Batch of padded note durations in seconds (element in music score) (B, Tmax). + is_slur(Tensor(int64)): + Batch of padded slur (element in music score) ids (B, Tmax). + text_lengths(Tensor(int64)): + Batch of phone lengths of each input (B,). + speech(Tensor[float32]): + Batch of padded target features (e.g. mel) (B, Lmax, odim). + speech_lengths(Tensor(int64)): + Batch of the lengths of each target features (B,). + durations(Tensor(int64)): + Batch of padded token durations in frame (B, Tmax). + pitch(Tensor[float32]): + Batch of padded frame-averaged pitch (B, Lmax, 1). + energy(Tensor[float32]): + Batch of padded frame-averaged energy (B, Lmax, 1). + spk_emb(Tensor[float32], optional): + Batch of speaker embeddings (B, spk_embed_dim). + spk_id(Tnesor[int64], optional(int64)): + Batch of speaker ids (B,) + only_train_fs2(bool): + Whether to train only the fastspeech2 module + + Returns: + + """ + # only train fastspeech2 module firstly + before_outs, after_outs, d_outs, p_outs, e_outs, ys, olens, spk_logits = self.fs2( + text=text, + note=note, + note_dur=note_dur, + is_slur=is_slur, + text_lengths=text_lengths, + speech=speech, + speech_lengths=speech_lengths, + durations=durations, + pitch=pitch, + energy=energy, + spk_id=spk_id, + spk_emb=spk_emb) + if only_train_fs2: + return before_outs, after_outs, d_outs, p_outs, e_outs, ys, olens, spk_logits + + # get the encoder output from fastspeech2 as the condition of denoiser module + cond_fs2, mel_masks = self.fs2.encoder_infer_batch( + text=text, + note=note, + note_dur=note_dur, + is_slur=is_slur, + text_lengths=text_lengths, + speech_lengths=speech_lengths, + ds=durations, + ps=pitch, + es=energy) + cond_fs2 = cond_fs2.transpose((0, 2, 1)) + + # get the output(final mel) from diffusion module + noise_pred, noise_target = self.diffusion( + speech.transpose((0, 2, 1)), cond_fs2) + return noise_pred, noise_target, mel_masks + + def inference( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + get_mel_fs2: bool=False, ): + """Run inference + + Args: + text(Tensor(int64)): + Batch of padded token (phone) ids (B, Tmax). + note(Tensor(int64)): + Batch of padded note (element in music score) ids (B, Tmax). + note_dur(Tensor(float32)): + Batch of padded note durations in seconds (element in music score) (B, Tmax). + is_slur(Tensor(int64)): + Batch of padded slur (element in music score) ids (B, Tmax). + get_mel_fs2 (bool, optional): . Defaults to False. + Whether to get mel from fastspeech2 module. + + Returns: + + """ + mel_fs2, _, _, _ = self.fs2.inference(text, note, note_dur, is_slur) + if get_mel_fs2: + return mel_fs2 + mel_fs2 = mel_fs2.unsqueeze(0).transpose((0, 2, 1)) + cond_fs2 = self.fs2.encoder_infer(text, note, note_dur, is_slur) + cond_fs2 = cond_fs2.transpose((0, 2, 1)) + noise = paddle.randn(mel_fs2.shape) + mel = self.diffusion.inference( + noise=noise, + cond=cond_fs2, + ref_x=mel_fs2, + scheduler_type="ddpm", + num_inference_steps=60) + mel = mel.transpose((0, 2, 1)) + return mel[0] + + +class DiffSingerInference(nn.Layer): + def __init__(self, normalizer, model): + super().__init__() + self.normalizer = normalizer + self.acoustic_model = model + + def forward(self, text, note, note_dur, is_slur, get_mel_fs2: bool=False): + """Calculate forward propagation. + + Args: + text(Tensor(int64)): + Batch of padded token (phone) ids (B, Tmax). + note(Tensor(int64)): + Batch of padded note (element in music score) ids (B, Tmax). + note_dur(Tensor(float32)): + Batch of padded note durations in seconds (element in music score) (B, Tmax). + is_slur(Tensor(int64)): + Batch of padded slur (element in music score) ids (B, Tmax). + get_mel_fs2 (bool, optional): . Defaults to False. + Whether to get mel from fastspeech2 module. + + Returns: + logmel(Tensor(float32)): denorm logmel, [T, mel_bin] + """ + normalized_mel = self.acoustic_model.inference( + text=text, + note=note, + note_dur=note_dur, + is_slur=is_slur, + get_mel_fs2=get_mel_fs2) + logmel = normalized_mel + return logmel + + +class DiffusionLoss(nn.Layer): + """Loss function module for Diffusion module on DiffSinger.""" + + def __init__(self, use_masking: bool=True, + use_weighted_masking: bool=False): + """Initialize feed-forward Transformer loss module. + Args: + use_masking (bool): + Whether to apply masking for padded part in loss calculation. + use_weighted_masking (bool): + Whether to weighted masking in loss calculation. + """ + assert check_argument_types() + super().__init__() + + assert (use_masking != use_weighted_masking) or not use_masking + self.use_masking = use_masking + self.use_weighted_masking = use_weighted_masking + + # define criterions + reduction = "none" if self.use_weighted_masking else "mean" + self.l1_criterion = nn.L1Loss(reduction=reduction) + + def forward( + self, + noise_pred: paddle.Tensor, + noise_target: paddle.Tensor, + mel_masks: paddle.Tensor, ) -> paddle.Tensor: + """Calculate forward propagation. + + Args: + noise_pred(Tensor): + Batch of outputs predict noise (B, Lmax, odim). + noise_target(Tensor): + Batch of target noise (B, Lmax, odim). + mel_masks(Tensor): + Batch of mask of real mel (B, Lmax, 1). + Returns: + + """ + # apply mask to remove padded part + if self.use_masking: + noise_pred = noise_pred.masked_select( + mel_masks.broadcast_to(noise_pred.shape)) + noise_target = noise_target.masked_select( + mel_masks.broadcast_to(noise_target.shape)) + + # calculate loss + l1_loss = self.l1_criterion(noise_pred, noise_target) + + # make weighted mask and apply it + if self.use_weighted_masking: + mel_masks = mel_masks.unsqueeze(-1) + out_weights = mel_masks.cast(dtype=paddle.float32) / mel_masks.cast( + dtype=paddle.float32).sum( + axis=1, keepdim=True) + out_weights /= noise_target.shape[0] * noise_target.shape[2] + + # apply weight + l1_loss = l1_loss.multiply(out_weights) + l1_loss = l1_loss.masked_select( + mel_masks.broadcast_to(l1_loss.shape)).sum() + + return l1_loss diff --git a/paddlespeech/t2s/models/diffsinger/diffsinger_updater.py b/paddlespeech/t2s/models/diffsinger/diffsinger_updater.py new file mode 100644 index 000000000..d89b09b2a --- /dev/null +++ b/paddlespeech/t2s/models/diffsinger/diffsinger_updater.py @@ -0,0 +1,302 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from pathlib import Path +from typing import Dict + +import paddle +from paddle import distributed as dist +from paddle.io import DataLoader +from paddle.nn import Layer +from paddle.optimizer import Optimizer + +from paddlespeech.t2s.training.extensions.evaluator import StandardEvaluator +from paddlespeech.t2s.training.reporter import report +from paddlespeech.t2s.training.updaters.standard_updater import StandardUpdater +from paddlespeech.t2s.training.updaters.standard_updater import UpdaterState + +logging.basicConfig( + format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s', + datefmt='[%Y-%m-%d %H:%M:%S]') +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class DiffSingerUpdater(StandardUpdater): + def __init__(self, + model: Layer, + optimizers: Dict[str, Optimizer], + criterions: Dict[str, Layer], + dataloader: DataLoader, + ds_train_start_steps: int=160000, + output_dir: Path=None, + only_train_diffusion: bool=True): + super().__init__(model, optimizers, dataloader, init_state=None) + self.model = model._layers if isinstance(model, + paddle.DataParallel) else model + self.only_train_diffusion = only_train_diffusion + + self.optimizers = optimizers + self.optimizer_fs2: Optimizer = optimizers['fs2'] + self.optimizer_ds: Optimizer = optimizers['ds'] + + self.criterions = criterions + self.criterion_fs2 = criterions['fs2'] + self.criterion_ds = criterions['ds'] + + self.dataloader = dataloader + + self.ds_train_start_steps = ds_train_start_steps + + self.state = UpdaterState(iteration=0, epoch=0) + self.train_iterator = iter(self.dataloader) + + log_file = output_dir / 'worker_{}.log'.format(dist.get_rank()) + self.filehandler = logging.FileHandler(str(log_file)) + logger.addHandler(self.filehandler) + self.logger = logger + self.msg = "" + + def update_core(self, batch): + self.msg = "Rank: {}, ".format(dist.get_rank()) + losses_dict = {} + # spk_id!=None in multiple spk diffsinger + spk_id = batch["spk_id"] if "spk_id" in batch else None + spk_emb = batch["spk_emb"] if "spk_emb" in batch else None + # No explicit speaker identifier labels are used during voice cloning training. + if spk_emb is not None: + spk_id = None + + # only train fastspeech2 module firstly + if self.state.iteration < self.ds_train_start_steps: + before_outs, after_outs, d_outs, p_outs, e_outs, ys, olens, spk_logits = self.model( + text=batch["text"], + note=batch["note"], + note_dur=batch["note_dur"], + is_slur=batch["is_slur"], + text_lengths=batch["text_lengths"], + speech=batch["speech"], + speech_lengths=batch["speech_lengths"], + durations=batch["durations"], + pitch=batch["pitch"], + energy=batch["energy"], + spk_id=spk_id, + spk_emb=spk_emb, + only_train_fs2=True, ) + + l1_loss_fs2, ssim_loss_fs2, duration_loss, pitch_loss, energy_loss, speaker_loss = self.criterion_fs2( + after_outs=after_outs, + before_outs=before_outs, + d_outs=d_outs, + p_outs=p_outs, + e_outs=e_outs, + ys=ys, + ds=batch["durations"], + ps=batch["pitch"], + es=batch["energy"], + ilens=batch["text_lengths"], + olens=olens, + spk_logits=spk_logits, + spk_ids=spk_id, ) + + loss_fs2 = l1_loss_fs2 + ssim_loss_fs2 + duration_loss + pitch_loss + energy_loss + speaker_loss + + self.optimizer_fs2.clear_grad() + loss_fs2.backward() + self.optimizer_fs2.step() + + report("train/loss_fs2", float(loss_fs2)) + report("train/l1_loss_fs2", float(l1_loss_fs2)) + report("train/ssim_loss_fs2", float(ssim_loss_fs2)) + report("train/duration_loss", float(duration_loss)) + report("train/pitch_loss", float(pitch_loss)) + + losses_dict["l1_loss_fs2"] = float(l1_loss_fs2) + losses_dict["ssim_loss_fs2"] = float(ssim_loss_fs2) + losses_dict["duration_loss"] = float(duration_loss) + losses_dict["pitch_loss"] = float(pitch_loss) + + if speaker_loss != 0.: + report("train/speaker_loss", float(speaker_loss)) + losses_dict["speaker_loss"] = float(speaker_loss) + if energy_loss != 0.: + report("train/energy_loss", float(energy_loss)) + losses_dict["energy_loss"] = float(energy_loss) + + losses_dict["loss_fs2"] = float(loss_fs2) + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + + # Then only train diffusion module, freeze fastspeech2 parameters. + if self.state.iteration > self.ds_train_start_steps: + for param in self.model.fs2.parameters(): + param.trainable = False if self.only_train_diffusion else True + + noise_pred, noise_target, mel_masks = self.model( + text=batch["text"], + note=batch["note"], + note_dur=batch["note_dur"], + is_slur=batch["is_slur"], + text_lengths=batch["text_lengths"], + speech=batch["speech"], + speech_lengths=batch["speech_lengths"], + durations=batch["durations"], + pitch=batch["pitch"], + energy=batch["energy"], + spk_id=spk_id, + spk_emb=spk_emb, + only_train_fs2=False, ) + + noise_pred = noise_pred.transpose((0, 2, 1)) + noise_target = noise_target.transpose((0, 2, 1)) + mel_masks = mel_masks.transpose((0, 2, 1)) + l1_loss_ds = self.criterion_ds( + noise_pred=noise_pred, + noise_target=noise_target, + mel_masks=mel_masks, ) + + loss_ds = l1_loss_ds + + self.optimizer_ds.clear_grad() + loss_ds.backward() + self.optimizer_ds.step() + + report("train/loss_ds", float(loss_ds)) + report("train/l1_loss_ds", float(l1_loss_ds)) + losses_dict["l1_loss_ds"] = float(l1_loss_ds) + losses_dict["loss_ds"] = float(loss_ds) + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + + self.logger.info(self.msg) + + +class DiffSingerEvaluator(StandardEvaluator): + def __init__( + self, + model: Layer, + criterions: Dict[str, Layer], + dataloader: DataLoader, + output_dir: Path=None, ): + super().__init__(model, dataloader) + self.model = model._layers if isinstance(model, + paddle.DataParallel) else model + + self.criterions = criterions + self.criterion_fs2 = criterions['fs2'] + self.criterion_ds = criterions['ds'] + self.dataloader = dataloader + + log_file = output_dir / 'worker_{}.log'.format(dist.get_rank()) + self.filehandler = logging.FileHandler(str(log_file)) + logger.addHandler(self.filehandler) + self.logger = logger + self.msg = "" + + def evaluate_core(self, batch): + self.msg = "Evaluate: " + losses_dict = {} + # spk_id!=None in multiple spk diffsinger + spk_id = batch["spk_id"] if "spk_id" in batch else None + spk_emb = batch["spk_emb"] if "spk_emb" in batch else None + if spk_emb is not None: + spk_id = None + + # Here show fastspeech2 eval + before_outs, after_outs, d_outs, p_outs, e_outs, ys, olens, spk_logits = self.model( + text=batch["text"], + note=batch["note"], + note_dur=batch["note_dur"], + is_slur=batch["is_slur"], + text_lengths=batch["text_lengths"], + speech=batch["speech"], + speech_lengths=batch["speech_lengths"], + durations=batch["durations"], + pitch=batch["pitch"], + energy=batch["energy"], + spk_id=spk_id, + spk_emb=spk_emb, + only_train_fs2=True, ) + + l1_loss_fs2, ssim_loss_fs2, duration_loss, pitch_loss, energy_loss, speaker_loss = self.criterion_fs2( + after_outs=after_outs, + before_outs=before_outs, + d_outs=d_outs, + p_outs=p_outs, + e_outs=e_outs, + ys=ys, + ds=batch["durations"], + ps=batch["pitch"], + es=batch["energy"], + ilens=batch["text_lengths"], + olens=olens, + spk_logits=spk_logits, + spk_ids=spk_id, ) + + loss_fs2 = l1_loss_fs2 + ssim_loss_fs2 + duration_loss + pitch_loss + energy_loss + speaker_loss + + report("eval/loss_fs2", float(loss_fs2)) + report("eval/l1_loss_fs2", float(l1_loss_fs2)) + report("eval/ssim_loss_fs2", float(ssim_loss_fs2)) + report("eval/duration_loss", float(duration_loss)) + report("eval/pitch_loss", float(pitch_loss)) + + losses_dict["l1_loss_fs2"] = float(l1_loss_fs2) + losses_dict["ssim_loss_fs2"] = float(ssim_loss_fs2) + losses_dict["duration_loss"] = float(duration_loss) + losses_dict["pitch_loss"] = float(pitch_loss) + + if speaker_loss != 0.: + report("eval/speaker_loss", float(speaker_loss)) + losses_dict["speaker_loss"] = float(speaker_loss) + if energy_loss != 0.: + report("eval/energy_loss", float(energy_loss)) + losses_dict["energy_loss"] = float(energy_loss) + + losses_dict["loss_fs2"] = float(loss_fs2) + + # Here show diffusion eval + noise_pred, noise_target, mel_masks = self.model( + text=batch["text"], + note=batch["note"], + note_dur=batch["note_dur"], + is_slur=batch["is_slur"], + text_lengths=batch["text_lengths"], + speech=batch["speech"], + speech_lengths=batch["speech_lengths"], + durations=batch["durations"], + pitch=batch["pitch"], + energy=batch["energy"], + spk_id=spk_id, + spk_emb=spk_emb, + only_train_fs2=False, ) + + noise_pred = noise_pred.transpose((0, 2, 1)) + noise_target = noise_target.transpose((0, 2, 1)) + mel_masks = mel_masks.transpose((0, 2, 1)) + l1_loss_ds = self.criterion_ds( + noise_pred=noise_pred, + noise_target=noise_target, + mel_masks=mel_masks, ) + + loss_ds = l1_loss_ds + + report("eval/loss_ds", float(loss_ds)) + report("eval/l1_loss_ds", float(l1_loss_ds)) + losses_dict["l1_loss_ds"] = float(l1_loss_ds) + losses_dict["loss_ds"] = float(loss_ds) + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + + self.logger.info(self.msg) diff --git a/paddlespeech/t2s/models/diffsinger/fastspeech2midi.py b/paddlespeech/t2s/models/diffsinger/fastspeech2midi.py new file mode 100644 index 000000000..cce88d8a0 --- /dev/null +++ b/paddlespeech/t2s/models/diffsinger/fastspeech2midi.py @@ -0,0 +1,654 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +from typing import Any +from typing import Dict +from typing import Sequence +from typing import Tuple + +import paddle +from paddle import nn +from typeguard import check_argument_types + +from paddlespeech.t2s.models.fastspeech2 import FastSpeech2 +from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Loss +from paddlespeech.t2s.modules.losses import ssim +from paddlespeech.t2s.modules.masked_fill import masked_fill +from paddlespeech.t2s.modules.nets_utils import make_non_pad_mask +from paddlespeech.t2s.modules.nets_utils import make_pad_mask + + +class FastSpeech2MIDI(FastSpeech2): + """The Fastspeech2 module of DiffSinger. + """ + + def __init__( + self, + # fastspeech2 network structure related + idim: int, + odim: int, + fastspeech2_params: Dict[str, Any], + # note emb + note_num: int=300, + # is_slur emb + is_slur_num: int=2, + use_energy_pred: bool=False, + use_postnet: bool=False, ): + """Initialize FastSpeech2 module for svs. + Args: + fastspeech2_params (Dict): + The config of FastSpeech2 module on DiffSinger model + note_num (Optional[int]): + Number of note. If not None, assume that the + note_ids will be provided as the input and use note_embedding_table. + is_slur_num (Optional[int]): + Number of note. If not None, assume that the + is_slur_ids will be provided as the input + + """ + assert check_argument_types() + super().__init__(idim=idim, odim=odim, **fastspeech2_params) + self.use_energy_pred = use_energy_pred + self.use_postnet = use_postnet + if not self.use_postnet: + self.postnet = None + + self.note_embed_dim = self.is_slur_embed_dim = fastspeech2_params[ + "adim"] + + # note_ embed + self.note_embedding_table = nn.Embedding( + num_embeddings=note_num, + embedding_dim=self.note_embed_dim, + padding_idx=self.padding_idx) + self.note_dur_layer = nn.Linear(1, self.note_embed_dim) + + # slur embed + self.is_slur_embedding_table = nn.Embedding( + num_embeddings=is_slur_num, + embedding_dim=self.is_slur_embed_dim, + padding_idx=self.padding_idx) + + def forward( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + text_lengths: paddle.Tensor, + speech: paddle.Tensor, + speech_lengths: paddle.Tensor, + durations: paddle.Tensor, + pitch: paddle.Tensor, + energy: paddle.Tensor, + spk_emb: paddle.Tensor=None, + spk_id: paddle.Tensor=None, + ) -> Tuple[paddle.Tensor, Dict[str, paddle.Tensor], paddle.Tensor]: + """Calculate forward propagation. + + Args: + text(Tensor(int64)): + Batch of padded token (phone) ids (B, Tmax). + note(Tensor(int64)): + Batch of padded note (element in music score) ids (B, Tmax). + note_dur(Tensor(float32)): + Batch of padded note durations in seconds (element in music score) (B, Tmax). + is_slur(Tensor(int64)): + Batch of padded slur (element in music score) ids (B, Tmax). + text_lengths(Tensor(int64)): + Batch of phone lengths of each input (B,). + speech(Tensor[float32]): + Batch of padded target features (e.g. mel) (B, Lmax, odim). + speech_lengths(Tensor(int64)): + Batch of the lengths of each target features (B,). + durations(Tensor(int64)): + Batch of padded token durations in frame (B, Tmax). + pitch(Tensor[float32]): + Batch of padded frame-averaged pitch (B, Lmax, 1). + energy(Tensor[float32]): + Batch of padded frame-averaged energy (B, Lmax, 1). + spk_emb(Tensor[float32], optional): + Batch of speaker embeddings (B, spk_embed_dim). + spk_id(Tnesor[int64], optional(int64)): + Batch of speaker ids (B,) + + Returns: + + """ + xs = paddle.cast(text, 'int64') + note = paddle.cast(note, 'int64') + note_dur = paddle.cast(note_dur, 'float32') + is_slur = paddle.cast(is_slur, 'int64') + ilens = paddle.cast(text_lengths, 'int64') + olens = paddle.cast(speech_lengths, 'int64') + ds = paddle.cast(durations, 'int64') + ps = pitch + es = energy + ys = speech + olens = speech_lengths + if spk_id is not None: + spk_id = paddle.cast(spk_id, 'int64') + # forward propagation + before_outs, after_outs, d_outs, p_outs, e_outs, spk_logits = self._forward( + xs=xs, + note=note, + note_dur=note_dur, + is_slur=is_slur, + ilens=ilens, + olens=olens, + ds=ds, + ps=ps, + es=es, + is_inference=False, + spk_emb=spk_emb, + spk_id=spk_id, ) + # modify mod part of groundtruth + if self.reduction_factor > 1: + olens = olens - olens % self.reduction_factor + max_olen = max(olens) + ys = ys[:, :max_olen] + + return before_outs, after_outs, d_outs, p_outs, e_outs, ys, olens, spk_logits + + def _forward( + self, + xs: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + ilens: paddle.Tensor, + olens: paddle.Tensor=None, + ds: paddle.Tensor=None, + ps: paddle.Tensor=None, + es: paddle.Tensor=None, + is_inference: bool=False, + is_train_diffusion: bool=False, + return_after_enc=False, + alpha: float=1.0, + spk_emb=None, + spk_id=None, ) -> Sequence[paddle.Tensor]: + + before_outs = after_outs = d_outs = p_outs = e_outs = spk_logits = None + # forward encoder + masks = self._source_mask(ilens) + note_emb = self.note_embedding_table(note) + note_dur_emb = self.note_dur_layer(paddle.unsqueeze(note_dur, axis=-1)) + is_slur_emb = self.is_slur_embedding_table(is_slur) + + # (B, Tmax, adim) + hs, _ = self.encoder( + xs=xs, + masks=masks, + note_emb=note_emb, + note_dur_emb=note_dur_emb, + is_slur_emb=is_slur_emb, ) + + if self.spk_num and self.enable_speaker_classifier and not is_inference: + hs_for_spk_cls = self.grad_reverse(hs) + spk_logits = self.speaker_classifier(hs_for_spk_cls, ilens) + else: + spk_logits = None + + # integrate speaker embedding + if self.spk_embed_dim is not None: + # spk_emb has a higher priority than spk_id + if spk_emb is not None: + hs = self._integrate_with_spk_embed(hs, spk_emb) + elif spk_id is not None: + spk_emb = self.spk_embedding_table(spk_id) + hs = self._integrate_with_spk_embed(hs, spk_emb) + + # forward duration predictor (phone-level) and variance predictors (frame-level) + d_masks = make_pad_mask(ilens) + if olens is not None: + pitch_masks = make_pad_mask(olens).unsqueeze(-1) + else: + pitch_masks = None + + # inference for decoder input for diffusion + if is_train_diffusion: + hs = self.length_regulator(hs, ds, is_inference=False) + p_outs = self.pitch_predictor(hs.detach(), pitch_masks) + p_embs = self.pitch_embed(p_outs.transpose((0, 2, 1))).transpose( + (0, 2, 1)) + hs += p_embs + if self.use_energy_pred: + e_outs = self.energy_predictor(hs.detach(), pitch_masks) + e_embs = self.energy_embed( + e_outs.transpose((0, 2, 1))).transpose((0, 2, 1)) + hs += e_embs + + elif is_inference: + # (B, Tmax) + if ds is not None: + d_outs = ds + else: + d_outs = self.duration_predictor.inference(hs, d_masks) + + # (B, Lmax, adim) + hs = self.length_regulator(hs, d_outs, alpha, is_inference=True) + + if ps is not None: + p_outs = ps + else: + if self.stop_gradient_from_pitch_predictor: + p_outs = self.pitch_predictor(hs.detach(), pitch_masks) + else: + p_outs = self.pitch_predictor(hs, pitch_masks) + p_embs = self.pitch_embed(p_outs.transpose((0, 2, 1))).transpose( + (0, 2, 1)) + hs += p_embs + + if self.use_energy_pred: + if es is not None: + e_outs = es + else: + if self.stop_gradient_from_energy_predictor: + e_outs = self.energy_predictor(hs.detach(), pitch_masks) + else: + e_outs = self.energy_predictor(hs, pitch_masks) + e_embs = self.energy_embed( + e_outs.transpose((0, 2, 1))).transpose((0, 2, 1)) + hs += e_embs + + # training + else: + d_outs = self.duration_predictor(hs, d_masks) + # (B, Lmax, adim) + hs = self.length_regulator(hs, ds, is_inference=False) + if self.stop_gradient_from_pitch_predictor: + p_outs = self.pitch_predictor(hs.detach(), pitch_masks) + else: + p_outs = self.pitch_predictor(hs, pitch_masks) + p_embs = self.pitch_embed(ps.transpose((0, 2, 1))).transpose( + (0, 2, 1)) + hs += p_embs + + if self.use_energy_pred: + if self.stop_gradient_from_energy_predictor: + e_outs = self.energy_predictor(hs.detach(), pitch_masks) + else: + e_outs = self.energy_predictor(hs, pitch_masks) + e_embs = self.energy_embed(es.transpose((0, 2, 1))).transpose( + (0, 2, 1)) + hs += e_embs + + # forward decoder + if olens is not None and not is_inference: + if self.reduction_factor > 1: + olens_in = paddle.to_tensor( + [olen // self.reduction_factor for olen in olens.numpy()]) + else: + olens_in = olens + # (B, 1, T) + h_masks = self._source_mask(olens_in) + else: + h_masks = None + + if return_after_enc: + return hs, h_masks + + if self.decoder_type == 'cnndecoder': + # remove output masks for dygraph to static graph + zs = self.decoder(hs, h_masks) + before_outs = zs + else: + # (B, Lmax, adim) + zs, _ = self.decoder(hs, h_masks) + # (B, Lmax, odim) + before_outs = self.feat_out(zs).reshape( + (paddle.shape(zs)[0], -1, self.odim)) + + # postnet -> (B, Lmax//r * r, odim) + if self.postnet is None: + after_outs = before_outs + else: + after_outs = before_outs + self.postnet( + before_outs.transpose((0, 2, 1))).transpose((0, 2, 1)) + + return before_outs, after_outs, d_outs, p_outs, e_outs, spk_logits + + def encoder_infer( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + alpha: float=1.0, + spk_emb=None, + spk_id=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + xs = paddle.cast(text, 'int64').unsqueeze(0) + note = paddle.cast(note, 'int64').unsqueeze(0) + note_dur = paddle.cast(note_dur, 'float32').unsqueeze(0) + is_slur = paddle.cast(is_slur, 'int64').unsqueeze(0) + # setup batch axis + ilens = paddle.shape(xs)[1] + + if spk_emb is not None: + spk_emb = spk_emb.unsqueeze(0) + + # (1, L, odim) + # use *_ to avoid bug in dygraph to static graph + hs, _ = self._forward( + xs=xs, + note=note, + note_dur=note_dur, + is_slur=is_slur, + ilens=ilens, + is_inference=True, + return_after_enc=True, + alpha=alpha, + spk_emb=spk_emb, + spk_id=spk_id, ) + return hs + + # get encoder output for diffusion training + def encoder_infer_batch( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + text_lengths: paddle.Tensor, + speech_lengths: paddle.Tensor, + ds: paddle.Tensor=None, + ps: paddle.Tensor=None, + es: paddle.Tensor=None, + alpha: float=1.0, + spk_emb=None, + spk_id=None, ) -> Tuple[paddle.Tensor, paddle.Tensor]: + + xs = paddle.cast(text, 'int64') + note = paddle.cast(note, 'int64') + note_dur = paddle.cast(note_dur, 'float32') + is_slur = paddle.cast(is_slur, 'int64') + ilens = paddle.cast(text_lengths, 'int64') + olens = paddle.cast(speech_lengths, 'int64') + + if spk_emb is not None: + spk_emb = spk_emb.unsqueeze(0) + + # (1, L, odim) + # use *_ to avoid bug in dygraph to static graph + hs, h_masks = self._forward( + xs=xs, + note=note, + note_dur=note_dur, + is_slur=is_slur, + ilens=ilens, + olens=olens, + ds=ds, + ps=ps, + es=es, + return_after_enc=True, + is_train_diffusion=True, + alpha=alpha, + spk_emb=spk_emb, + spk_id=spk_id, ) + return hs, h_masks + + def inference( + self, + text: paddle.Tensor, + note: paddle.Tensor, + note_dur: paddle.Tensor, + is_slur: paddle.Tensor, + durations: paddle.Tensor=None, + pitch: paddle.Tensor=None, + energy: paddle.Tensor=None, + alpha: float=1.0, + use_teacher_forcing: bool=False, + spk_emb=None, + spk_id=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Generate the sequence of features given the sequences of characters. + + Args: + text(Tensor(int64)): + Input sequence of characters (T,). + note(Tensor(int64)): + Input note (element in music score) ids (T,). + note_dur(Tensor(float32)): + Input note durations in seconds (element in music score) (T,). + is_slur(Tensor(int64)): + Input slur (element in music score) ids (T,). + durations(Tensor, optional (int64)): + Groundtruth of duration (T,). + pitch(Tensor, optional): + Groundtruth of token-averaged pitch (T, 1). + energy(Tensor, optional): + Groundtruth of token-averaged energy (T, 1). + alpha(float, optional): + Alpha to control the speed. + use_teacher_forcing(bool, optional): + Whether to use teacher forcing. + If true, groundtruth of duration, pitch and energy will be used. + spk_emb(Tensor, optional, optional): + peaker embedding vector (spk_embed_dim,). (Default value = None) + spk_id(Tensor, optional(int64), optional): + spk ids (1,). (Default value = None) + + Returns: + + """ + xs = paddle.cast(text, 'int64').unsqueeze(0) + note = paddle.cast(note, 'int64').unsqueeze(0) + note_dur = paddle.cast(note_dur, 'float32').unsqueeze(0) + is_slur = paddle.cast(is_slur, 'int64').unsqueeze(0) + d, p, e = durations, pitch, energy + # setup batch axis + ilens = paddle.shape(xs)[1] + + if spk_emb is not None: + spk_emb = spk_emb.unsqueeze(0) + + if use_teacher_forcing: + # use groundtruth of duration, pitch, and energy + ds = d.unsqueeze(0) if d is not None else None + ps = p.unsqueeze(0) if p is not None else None + es = e.unsqueeze(0) if e is not None else None + + # (1, L, odim) + _, outs, d_outs, p_outs, e_outs, _ = self._forward( + xs=xs, + note=note, + note_dur=note_dur, + is_slur=is_slur, + ilens=ilens, + ds=ds, + ps=ps, + es=es, + spk_emb=spk_emb, + spk_id=spk_id, + is_inference=True) + else: + # (1, L, odim) + _, outs, d_outs, p_outs, e_outs, _ = self._forward( + xs=xs, + note=note, + note_dur=note_dur, + is_slur=is_slur, + ilens=ilens, + is_inference=True, + alpha=alpha, + spk_emb=spk_emb, + spk_id=spk_id, ) + + if e_outs is None: + e_outs = [None] + + return outs[0], d_outs[0], p_outs[0], e_outs[0] + + +class FastSpeech2MIDILoss(FastSpeech2Loss): + """Loss function module for DiffSinger.""" + + def __init__(self, use_masking: bool=True, + use_weighted_masking: bool=False): + """Initialize feed-forward Transformer loss module. + Args: + use_masking (bool): + Whether to apply masking for padded part in loss calculation. + use_weighted_masking (bool): + Whether to weighted masking in loss calculation. + """ + assert check_argument_types() + super().__init__(use_masking, use_weighted_masking) + + def forward( + self, + after_outs: paddle.Tensor, + before_outs: paddle.Tensor, + d_outs: paddle.Tensor, + p_outs: paddle.Tensor, + e_outs: paddle.Tensor, + ys: paddle.Tensor, + ds: paddle.Tensor, + ps: paddle.Tensor, + es: paddle.Tensor, + ilens: paddle.Tensor, + olens: paddle.Tensor, + spk_logits: paddle.Tensor=None, + spk_ids: paddle.Tensor=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor, + paddle.Tensor, ]: + """Calculate forward propagation. + + Args: + after_outs(Tensor): + Batch of outputs after postnets (B, Lmax, odim). + before_outs(Tensor): + Batch of outputs before postnets (B, Lmax, odim). + d_outs(Tensor): + Batch of outputs of duration predictor (B, Tmax). + p_outs(Tensor): + Batch of outputs of pitch predictor (B, Lmax, 1). + e_outs(Tensor): + Batch of outputs of energy predictor (B, Lmax, 1). + ys(Tensor): + Batch of target features (B, Lmax, odim). + ds(Tensor): + Batch of durations (B, Tmax). + ps(Tensor): + Batch of target frame-averaged pitch (B, Lmax, 1). + es(Tensor): + Batch of target frame-averaged energy (B, Lmax, 1). + ilens(Tensor): + Batch of the lengths of each input (B,). + olens(Tensor): + Batch of the lengths of each target (B,). + spk_logits(Option[Tensor]): + Batch of outputs after speaker classifier (B, Lmax, num_spk) + spk_ids(Option[Tensor]): + Batch of target spk_id (B,) + + + Returns: + + + """ + l1_loss = duration_loss = pitch_loss = energy_loss = speaker_loss = ssim_loss = 0.0 + + # apply mask to remove padded part + if self.use_masking: + # make feature for ssim loss + out_pad_masks = make_pad_mask(olens).unsqueeze(-1) + before_outs_ssim = masked_fill(before_outs, out_pad_masks, 0.0) + if not paddle.equal_all(after_outs, before_outs): + after_outs_ssim = masked_fill(after_outs, out_pad_masks, 0.0) + ys_ssim = masked_fill(ys, out_pad_masks, 0.0) + + out_masks = make_non_pad_mask(olens).unsqueeze(-1) + before_outs = before_outs.masked_select( + out_masks.broadcast_to(before_outs.shape)) + if not paddle.equal_all(after_outs, before_outs): + after_outs = after_outs.masked_select( + out_masks.broadcast_to(after_outs.shape)) + ys = ys.masked_select(out_masks.broadcast_to(ys.shape)) + duration_masks = make_non_pad_mask(ilens) + d_outs = d_outs.masked_select( + duration_masks.broadcast_to(d_outs.shape)) + ds = ds.masked_select(duration_masks.broadcast_to(ds.shape)) + pitch_masks = out_masks + p_outs = p_outs.masked_select( + pitch_masks.broadcast_to(p_outs.shape)) + ps = ps.masked_select(pitch_masks.broadcast_to(ps.shape)) + if e_outs is not None: + e_outs = e_outs.masked_select( + pitch_masks.broadcast_to(e_outs.shape)) + es = es.masked_select(pitch_masks.broadcast_to(es.shape)) + + if spk_logits is not None and spk_ids is not None: + batch_size = spk_ids.shape[0] + spk_ids = paddle.repeat_interleave(spk_ids, spk_logits.shape[1], + None) + spk_logits = paddle.reshape(spk_logits, + [-1, spk_logits.shape[-1]]) + mask_index = spk_logits.abs().sum(axis=1) != 0 + spk_ids = spk_ids[mask_index] + spk_logits = spk_logits[mask_index] + + # calculate loss + l1_loss = self.l1_criterion(before_outs, ys) + ssim_loss = 1.0 - ssim( + before_outs_ssim.unsqueeze(1), ys_ssim.unsqueeze(1)) + if not paddle.equal_all(after_outs, before_outs): + l1_loss += self.l1_criterion(after_outs, ys) + ssim_loss += ( + 1.0 - ssim(after_outs_ssim.unsqueeze(1), ys_ssim.unsqueeze(1))) + l1_loss = l1_loss * 0.5 + ssim_loss = ssim_loss * 0.5 + + duration_loss = self.duration_criterion(d_outs, ds) + pitch_loss = self.l1_criterion(p_outs, ps) + if e_outs is not None: + energy_loss = self.l1_criterion(e_outs, es) + + if spk_logits is not None and spk_ids is not None: + speaker_loss = self.ce_criterion(spk_logits, spk_ids) / batch_size + + # make weighted mask and apply it + if self.use_weighted_masking: + out_masks = make_non_pad_mask(olens).unsqueeze(-1) + out_weights = out_masks.cast(dtype=paddle.float32) / out_masks.cast( + dtype=paddle.float32).sum( + axis=1, keepdim=True) + out_weights /= ys.shape[0] * ys.shape[2] + duration_masks = make_non_pad_mask(ilens) + duration_weights = (duration_masks.cast(dtype=paddle.float32) / + duration_masks.cast(dtype=paddle.float32).sum( + axis=1, keepdim=True)) + duration_weights /= ds.shape[0] + + # apply weight + l1_loss = l1_loss.multiply(out_weights) + l1_loss = l1_loss.masked_select( + out_masks.broadcast_to(l1_loss.shape)).sum() + ssim_loss = ssim_loss.multiply(out_weights) + ssim_loss = ssim_loss.masked_select( + out_masks.broadcast_to(ssim_loss.shape)).sum() + duration_loss = (duration_loss.multiply(duration_weights) + .masked_select(duration_masks).sum()) + pitch_masks = out_masks + pitch_weights = out_weights + pitch_loss = pitch_loss.multiply(pitch_weights) + pitch_loss = pitch_loss.masked_select( + pitch_masks.broadcast_to(pitch_loss.shape)).sum() + if e_outs is not None: + energy_loss = energy_loss.multiply(pitch_weights) + energy_loss = energy_loss.masked_select( + pitch_masks.broadcast_to(energy_loss.shape)).sum() + + return l1_loss, ssim_loss, duration_loss, pitch_loss, energy_loss, speaker_loss diff --git a/paddlespeech/t2s/models/fastspeech2/fastspeech2.py b/paddlespeech/t2s/models/fastspeech2/fastspeech2.py index c790c8cb2..8ce19795e 100644 --- a/paddlespeech/t2s/models/fastspeech2/fastspeech2.py +++ b/paddlespeech/t2s/models/fastspeech2/fastspeech2.py @@ -93,6 +93,7 @@ class FastSpeech2(nn.Layer): transformer_dec_dropout_rate: float=0.1, transformer_dec_positional_dropout_rate: float=0.1, transformer_dec_attn_dropout_rate: float=0.1, + transformer_activation_type: str="relu", # for conformer conformer_pos_enc_layer_type: str="rel_pos", conformer_self_attn_layer_type: str="rel_selfattn", @@ -200,6 +201,8 @@ class FastSpeech2(nn.Layer): Dropout rate after decoder positional encoding. transformer_dec_attn_dropout_rate (float): Dropout rate in decoder self-attention module. + transformer_activation_type (str): + Activation function type in transformer. conformer_pos_enc_layer_type (str): Pos encoding layer type in conformer. conformer_self_attn_layer_type (str): @@ -250,7 +253,7 @@ class FastSpeech2(nn.Layer): Kernel size of energy embedding. energy_embed_dropout_rate (float): Dropout rate for energy embedding. - stop_gradient_from_energy_predictor(bool): + stop_gradient_from_energy_predictor (bool): Whether to stop gradient from energy predictor to encoder. spk_num (Optional[int]): Number of speakers. If not None, assume that the spk_embed_dim is not None, @@ -269,7 +272,7 @@ class FastSpeech2(nn.Layer): How to integrate tone embedding. init_type (str): How to initialize transformer parameters. - init_enc_alpha (float): + init_enc_alpha (float): Initial value of alpha in scaled pos encoding of the encoder. init_dec_alpha (float): Initial value of alpha in scaled pos encoding of the decoder. @@ -344,7 +347,8 @@ class FastSpeech2(nn.Layer): normalize_before=encoder_normalize_before, concat_after=encoder_concat_after, positionwise_layer_type=positionwise_layer_type, - positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) + positionwise_conv_kernel_size=positionwise_conv_kernel_size, + activation_type=transformer_activation_type) elif encoder_type == "conformer": self.encoder = ConformerEncoder( idim=idim, @@ -453,7 +457,8 @@ class FastSpeech2(nn.Layer): normalize_before=decoder_normalize_before, concat_after=decoder_concat_after, positionwise_layer_type=positionwise_layer_type, - positionwise_conv_kernel_size=positionwise_conv_kernel_size, ) + positionwise_conv_kernel_size=positionwise_conv_kernel_size, + activation_type=conformer_activation_type, ) elif decoder_type == "conformer": self.decoder = ConformerEncoder( idim=0, diff --git a/paddlespeech/t2s/modules/activation.py b/paddlespeech/t2s/modules/activation.py index 8d8cd62ef..f1c099b76 100644 --- a/paddlespeech/t2s/modules/activation.py +++ b/paddlespeech/t2s/modules/activation.py @@ -37,7 +37,8 @@ def get_activation(act, **kwargs): "selu": paddle.nn.SELU, "leakyrelu": paddle.nn.LeakyReLU, "swish": paddle.nn.Swish, - "glu": GLU + "glu": GLU, + "gelu": paddle.nn.GELU, } return activation_funcs[act](**kwargs) diff --git a/paddlespeech/t2s/modules/diffnet.py b/paddlespeech/t2s/modules/diffnet.py new file mode 100644 index 000000000..25339daea --- /dev/null +++ b/paddlespeech/t2s/modules/diffnet.py @@ -0,0 +1,245 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from paddlespeech.t2s.modules.nets_utils import initialize +from paddlespeech.utils.initialize import _calculate_fan_in_and_fan_out +from paddlespeech.utils.initialize import kaiming_normal_ +from paddlespeech.utils.initialize import kaiming_uniform_ +from paddlespeech.utils.initialize import uniform_ +from paddlespeech.utils.initialize import zeros_ + + +def Conv1D(*args, **kwargs): + layer = nn.Conv1D(*args, **kwargs) + # Initialize the weight to be consistent with the official + kaiming_normal_(layer.weight) + + # Initialization is consistent with torch + if layer.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(layer.weight) + if fan_in != 0: + bound = 1 / math.sqrt(fan_in) + uniform_(layer.bias, -bound, bound) + return layer + + +# Initialization is consistent with torch +def Linear(*args, **kwargs): + layer = nn.Linear(*args, **kwargs) + kaiming_uniform_(layer.weight, a=math.sqrt(5)) + if layer.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(layer.weight) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + uniform_(layer.bias, -bound, bound) + return layer + + +class ResidualBlock(nn.Layer): + """ResidualBlock + + Args: + encoder_hidden (int, optional): + Input feature size of the 1D convolution, by default 256 + residual_channels (int, optional): + Feature size of the residual output(and also the input), by default 256 + gate_channels (int, optional): + Output feature size of the 1D convolution, by default 512 + kernel_size (int, optional): + Kernel size of the 1D convolution, by default 3 + dilation (int, optional): + Dilation of the 1D convolution, by default 4 + """ + + def __init__(self, + encoder_hidden: int=256, + residual_channels: int=256, + gate_channels: int=512, + kernel_size: int=3, + dilation: int=4): + super().__init__() + self.dilated_conv = Conv1D( + residual_channels, + gate_channels, + kernel_size, + padding=dilation, + dilation=dilation) + self.diffusion_projection = Linear(residual_channels, residual_channels) + self.conditioner_projection = Conv1D(encoder_hidden, gate_channels, 1) + self.output_projection = Conv1D(residual_channels, gate_channels, 1) + + def forward( + self, + x: paddle.Tensor, + diffusion_step: paddle.Tensor, + cond: paddle.Tensor, ): + """Calculate forward propagation. + Args: + spec (Tensor(float32)): input feature. (B, residual_channels, T) + diffusion_step (Tensor(int64)): The timestep input (adding noise step). (B,) + cond (Tensor(float32)): The auxiliary input (e.g. fastspeech2 encoder output). (B, residual_channels, T) + + Returns: + x (Tensor(float32)): output (B, residual_channels, T) + + """ + diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) + cond = self.conditioner_projection(cond) + y = x + diffusion_step + + y = self.dilated_conv(y) + cond + + gate, filter = paddle.chunk(y, 2, axis=1) + y = F.sigmoid(gate) * paddle.tanh(filter) + + y = self.output_projection(y) + residual, skip = paddle.chunk(y, 2, axis=1) + return (x + residual) / math.sqrt(2.0), skip + + +class SinusoidalPosEmb(nn.Layer): + """Positional embedding + """ + + def __init__(self, dim: int=256): + super().__init__() + self.dim = dim + + def forward(self, x: paddle.Tensor): + x = paddle.cast(x, 'float32') + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = paddle.exp(paddle.arange(half_dim) * -emb) + emb = x[:, None] * emb[None, :] + emb = paddle.concat([emb.sin(), emb.cos()], axis=-1) + return emb + + +class DiffNet(nn.Layer): + """A Mel-Spectrogram Denoiser + + Args: + in_channels (int, optional): + Number of channels of the input mel-spectrogram, by default 80 + out_channels (int, optional): + Number of channels of the output mel-spectrogram, by default 80 + kernel_size (int, optional): + Kernel size of the residual blocks inside, by default 3 + layers (int, optional): + Number of residual blocks inside, by default 20 + stacks (int, optional): + The number of groups to split the residual blocks into, by default 5 + Within each group, the dilation of the residual block grows exponentially. + residual_channels (int, optional): + Residual channel of the residual blocks, by default 256 + gate_channels (int, optional): + Gate channel of the residual blocks, by default 512 + skip_channels (int, optional): + Skip channel of the residual blocks, by default 256 + aux_channels (int, optional): + Auxiliary channel of the residual blocks, by default 256 + dropout (float, optional): + Dropout of the residual blocks, by default 0. + bias (bool, optional): + Whether to use bias in residual blocks, by default True + use_weight_norm (bool, optional): + Whether to use weight norm in all convolutions, by default False + """ + + def __init__( + self, + in_channels: int=80, + out_channels: int=80, + kernel_size: int=3, + layers: int=20, + stacks: int=5, + residual_channels: int=256, + gate_channels: int=512, + skip_channels: int=256, + aux_channels: int=256, + dropout: float=0., + bias: bool=True, + use_weight_norm: bool=False, + init_type: str="kaiming_normal", ): + super().__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.layers = layers + self.aux_channels = aux_channels + self.residual_channels = residual_channels + self.gate_channels = gate_channels + self.kernel_size = kernel_size + self.dilation_cycle_length = layers // stacks + self.skip_channels = skip_channels + + self.input_projection = Conv1D(self.in_channels, self.residual_channels, + 1) + self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels) + dim = self.residual_channels + self.mlp = nn.Sequential( + Linear(dim, dim * 4), nn.Mish(), Linear(dim * 4, dim)) + self.residual_layers = nn.LayerList([ + ResidualBlock( + encoder_hidden=self.aux_channels, + residual_channels=self.residual_channels, + gate_channels=self.gate_channels, + kernel_size=self.kernel_size, + dilation=2**(i % self.dilation_cycle_length)) + for i in range(self.layers) + ]) + self.skip_projection = Conv1D(self.residual_channels, + self.skip_channels, 1) + self.output_projection = Conv1D(self.residual_channels, + self.out_channels, 1) + zeros_(self.output_projection.weight) + + def forward( + self, + spec: paddle.Tensor, + diffusion_step: paddle.Tensor, + cond: paddle.Tensor, ): + """Calculate forward propagation. + Args: + spec (Tensor(float32)): The input mel-spectrogram. (B, n_mel, T) + diffusion_step (Tensor(int64)): The timestep input (adding noise step). (B,) + cond (Tensor(float32)): The auxiliary input (e.g. fastspeech2 encoder output). (B, D_enc_out, T) + + Returns: + x (Tensor(float32)): pred noise (B, n_mel, T) + + """ + x = spec + x = self.input_projection(x) # x [B, residual_channel, T] + + x = F.relu(x) + diffusion_step = self.diffusion_embedding(diffusion_step) + diffusion_step = self.mlp(diffusion_step) + skip = [] + for layer_id, layer in enumerate(self.residual_layers): + x, skip_connection = layer( + x=x, + diffusion_step=diffusion_step, + cond=cond, ) + skip.append(skip_connection) + x = paddle.sum( + paddle.stack(skip), axis=0) / math.sqrt(len(self.residual_layers)) + x = self.skip_projection(x) + x = F.relu(x) + x = self.output_projection(x) # [B, 80, T] + return x diff --git a/paddlespeech/t2s/modules/diffusion.py b/paddlespeech/t2s/modules/diffusion.py index be684ce38..3222a8032 100644 --- a/paddlespeech/t2s/modules/diffusion.py +++ b/paddlespeech/t2s/modules/diffusion.py @@ -17,6 +17,7 @@ from typing import Callable from typing import Optional from typing import Tuple +import numpy as np import paddle import ppdiffusers from paddle import nn @@ -27,170 +28,6 @@ from paddlespeech.t2s.modules.nets_utils import initialize from paddlespeech.t2s.modules.residual_block import WaveNetResidualBlock -class WaveNetDenoiser(nn.Layer): - """A Mel-Spectrogram Denoiser modified from WaveNet - - Args: - in_channels (int, optional): - Number of channels of the input mel-spectrogram, by default 80 - out_channels (int, optional): - Number of channels of the output mel-spectrogram, by default 80 - kernel_size (int, optional): - Kernel size of the residual blocks inside, by default 3 - layers (int, optional): - Number of residual blocks inside, by default 20 - stacks (int, optional): - The number of groups to split the residual blocks into, by default 5 - Within each group, the dilation of the residual block grows exponentially. - residual_channels (int, optional): - Residual channel of the residual blocks, by default 256 - gate_channels (int, optional): - Gate channel of the residual blocks, by default 512 - skip_channels (int, optional): - Skip channel of the residual blocks, by default 256 - aux_channels (int, optional): - Auxiliary channel of the residual blocks, by default 256 - dropout (float, optional): - Dropout of the residual blocks, by default 0. - bias (bool, optional): - Whether to use bias in residual blocks, by default True - use_weight_norm (bool, optional): - Whether to use weight norm in all convolutions, by default False - """ - - def __init__( - self, - in_channels: int=80, - out_channels: int=80, - kernel_size: int=3, - layers: int=20, - stacks: int=5, - residual_channels: int=256, - gate_channels: int=512, - skip_channels: int=256, - aux_channels: int=256, - dropout: float=0., - bias: bool=True, - use_weight_norm: bool=False, - init_type: str="kaiming_normal", ): - super().__init__() - - # initialize parameters - initialize(self, init_type) - - self.in_channels = in_channels - self.out_channels = out_channels - self.aux_channels = aux_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - self.first_t_emb = nn.Sequential( - Timesteps( - residual_channels, - flip_sin_to_cos=False, - downscale_freq_shift=1), - nn.Linear(residual_channels, residual_channels * 4), - nn.Mish(), nn.Linear(residual_channels * 4, residual_channels)) - self.t_emb_layers = nn.LayerList([ - nn.Linear(residual_channels, residual_channels) - for _ in range(layers) - ]) - - self.first_conv = nn.Conv1D( - in_channels, residual_channels, 1, bias_attr=True) - self.first_act = nn.ReLU() - - self.conv_layers = nn.LayerList() - for layer in range(layers): - dilation = 2**(layer % layers_per_stack) - conv = WaveNetResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=aux_channels, - dilation=dilation, - dropout=dropout, - bias=bias) - self.conv_layers.append(conv) - - final_conv = nn.Conv1D(skip_channels, out_channels, 1, bias_attr=True) - nn.initializer.Constant(0.0)(final_conv.weight) - self.last_conv_layers = nn.Sequential(nn.ReLU(), - nn.Conv1D( - skip_channels, - skip_channels, - 1, - bias_attr=True), - nn.ReLU(), final_conv) - - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x, t, c): - """Denoise mel-spectrogram. - - Args: - x(Tensor): - Shape (N, C_in, T), The input mel-spectrogram. - t(Tensor): - Shape (N), The timestep input. - c(Tensor): - Shape (N, C_aux, T'). The auxiliary input (e.g. fastspeech2 encoder output). - - Returns: - Tensor: Shape (N, C_out, T), the denoised mel-spectrogram. - """ - assert c.shape[-1] == x.shape[-1] - - if t.shape[0] != x.shape[0]: - t = t.tile([x.shape[0]]) - t_emb = self.first_t_emb(t) - t_embs = [ - t_emb_layer(t_emb)[..., None] for t_emb_layer in self.t_emb_layers - ] - - x = self.first_conv(x) - x = self.first_act(x) - skips = 0 - for f, t in zip(self.conv_layers, t_embs): - x = x + t - x, s = f(x, c) - skips += s - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - x = self.last_conv_layers(skips) - return x - - def apply_weight_norm(self): - """Recursively apply weight normalization to all the Convolution layers - in the sublayers. - """ - - def _apply_weight_norm(layer): - if isinstance(layer, (nn.Conv1D, nn.Conv2D)): - nn.utils.weight_norm(layer) - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Recursively remove weight normalization from all the Convolution - layers in the sublayers. - """ - - def _remove_weight_norm(layer): - try: - nn.utils.remove_weight_norm(layer) - except ValueError: - pass - - self.apply(_remove_weight_norm) - - class GaussianDiffusion(nn.Layer): """Common Gaussian Diffusion Denoising Model Module @@ -207,6 +44,13 @@ class GaussianDiffusion(nn.Layer): beta schedule parameter for the scheduler, by default 'squaredcos_cap_v2' (cosine schedule). num_max_timesteps (int, optional): The max timestep transition from real to noise, by default None. + stretch (bool, optional): + Whether to stretch before diffusion, by defalut True. + min_values: (paddle.Tensor): + The minimum value of the feature to stretch. + max_values: (paddle.Tensor): + The maximum value of the feature to stretch. + Examples: >>> import paddle @@ -294,13 +138,17 @@ class GaussianDiffusion(nn.Layer): """ - def __init__(self, - denoiser: nn.Layer, - num_train_timesteps: Optional[int]=1000, - beta_start: Optional[float]=0.0001, - beta_end: Optional[float]=0.02, - beta_schedule: Optional[str]="squaredcos_cap_v2", - num_max_timesteps: Optional[int]=None): + def __init__( + self, + denoiser: nn.Layer, + num_train_timesteps: Optional[int]=1000, + beta_start: Optional[float]=0.0001, + beta_end: Optional[float]=0.02, + beta_schedule: Optional[str]="squaredcos_cap_v2", + num_max_timesteps: Optional[int]=None, + stretch: bool=True, + min_values: paddle.Tensor=None, + max_values: paddle.Tensor=None, ): super().__init__() self.num_train_timesteps = num_train_timesteps @@ -315,6 +163,22 @@ class GaussianDiffusion(nn.Layer): beta_end=beta_end, beta_schedule=beta_schedule) self.num_max_timesteps = num_max_timesteps + self.stretch = stretch + self.min_values = min_values + self.max_values = max_values + + def norm_spec(self, x): + """ + Linearly map x to [-1, 1] + Args: + x: [B, T, N] + """ + return (x - self.min_values) / (self.max_values - self.min_values + ) * 2 - 1 + + def denorm_spec(self, x): + return (x + 1) / 2 * (self.max_values - self.min_values + ) + self.min_values def forward(self, x: paddle.Tensor, cond: Optional[paddle.Tensor]=None ) -> Tuple[paddle.Tensor, paddle.Tensor]: @@ -333,6 +197,11 @@ class GaussianDiffusion(nn.Layer): The noises which is added to the input. """ + if self.stretch: + x = x.transpose((0, 2, 1)) + x = self.norm_spec(x) + x = x.transpose((0, 2, 1)) + noise_scheduler = self.noise_scheduler # Sample noise that we'll add to the mel-spectrograms @@ -360,7 +229,7 @@ class GaussianDiffusion(nn.Layer): num_inference_steps: Optional[int]=1000, strength: Optional[float]=None, scheduler_type: Optional[str]="ddpm", - clip_noise: Optional[bool]=True, + clip_noise: Optional[bool]=False, clip_noise_range: Optional[Tuple[float, float]]=(-1, 1), callback: Optional[Callable[[int, int, int, paddle.Tensor], None]]=None, @@ -369,9 +238,9 @@ class GaussianDiffusion(nn.Layer): Args: noise (Tensor): - The input tensor as a starting point for denoising. + The input tensor as a starting point for denoising. cond (Tensor, optional): - Conditional input for compute noises. + Conditional input for compute noises. (N, C_aux, T) ref_x (Tensor, optional): The real output for the denoising process to refer. num_inference_steps (int, optional): @@ -382,6 +251,7 @@ class GaussianDiffusion(nn.Layer): scheduler_type (str, optional): Noise scheduler for generate noises. Choose a great scheduler can skip many denoising step, by default 'ddpm'. + only support 'ddpm' now ! clip_noise (bool, optional): Whether to clip each denoised output, by default True. clip_noise_range (tuple, optional): @@ -425,48 +295,33 @@ class GaussianDiffusion(nn.Layer): # set timesteps scheduler.set_timesteps(num_inference_steps) - # prepare first noise variables noisy_input = noise - timesteps = scheduler.timesteps - if ref_x is not None: - init_timestep = None - if strength is None or strength < 0. or strength > 1.: - strength = None - if self.num_max_timesteps is not None: - strength = self.num_max_timesteps / self.num_train_timesteps - if strength is not None: - # get the original timestep using init_timestep - init_timestep = min( - int(num_inference_steps * strength), num_inference_steps) - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = scheduler.timesteps[t_start:] - num_inference_steps = num_inference_steps - t_start - noisy_input = scheduler.add_noise( - ref_x, noise, timesteps[:1].tile([noise.shape[0]])) - - # denoising loop + if self.stretch and ref_x is not None: + ref_x = ref_x.transpose((0, 2, 1)) + ref_x = self.norm_spec(ref_x) + ref_x = ref_x.transpose((0, 2, 1)) + + # for ddpm + timesteps = paddle.to_tensor( + np.flipud(np.arange(num_inference_steps))) + noisy_input = scheduler.add_noise(ref_x, noise, timesteps[0]) + denoised_output = noisy_input if clip_noise: n_min, n_max = clip_noise_range denoised_output = paddle.clip(denoised_output, n_min, n_max) - num_warmup_steps = len( - timesteps) - num_inference_steps * scheduler.order for i, t in enumerate(timesteps): denoised_output = scheduler.scale_model_input(denoised_output, t) - - # predict the noise residual noise_pred = self.denoiser(denoised_output, t, cond) - # compute the previous noisy sample x_t -> x_t-1 denoised_output = scheduler.step(noise_pred, t, denoised_output).prev_sample if clip_noise: denoised_output = paddle.clip(denoised_output, n_min, n_max) - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and - (i + 1) % scheduler.order == 0): - if callback is not None and i % callback_steps == 0: - callback(i, t, len(timesteps), denoised_output) + if self.stretch: + denoised_output = denoised_output.transpose((0, 2, 1)) + denoised_output = self.denorm_spec(denoised_output) + denoised_output = denoised_output.transpose((0, 2, 1)) return denoised_output diff --git a/paddlespeech/t2s/modules/masked_fill.py b/paddlespeech/t2s/modules/masked_fill.py index b32222547..1445a926a 100644 --- a/paddlespeech/t2s/modules/masked_fill.py +++ b/paddlespeech/t2s/modules/masked_fill.py @@ -38,11 +38,9 @@ def masked_fill(xs: paddle.Tensor, value: Union[float, int]): # comment following line for converting dygraph to static graph. # assert is_broadcastable(xs.shape, mask.shape) is True - # bshape = paddle.broadcast_shape(xs.shape, mask.shape) bshape = broadcast_shape(xs.shape, mask.shape) mask.stop_gradient = True mask = mask.broadcast_to(bshape) - trues = paddle.ones_like(xs) * value mask = mask.cast(dtype=paddle.bool) xs = paddle.where(mask, trues, xs) diff --git a/paddlespeech/t2s/modules/predictor/variance_predictor.py b/paddlespeech/t2s/modules/predictor/variance_predictor.py index 4c2a67cc4..197f73595 100644 --- a/paddlespeech/t2s/modules/predictor/variance_predictor.py +++ b/paddlespeech/t2s/modules/predictor/variance_predictor.py @@ -96,7 +96,7 @@ class VariancePredictor(nn.Layer): xs = f(xs) # (B, Tmax, 1) xs = self.linear(xs.transpose([0, 2, 1])) - + if x_masks is not None: xs = masked_fill(xs, x_masks, 0.0) return xs diff --git a/paddlespeech/t2s/modules/transformer/encoder.py b/paddlespeech/t2s/modules/transformer/encoder.py index f2aed5892..0fd94689d 100644 --- a/paddlespeech/t2s/modules/transformer/encoder.py +++ b/paddlespeech/t2s/modules/transformer/encoder.py @@ -15,6 +15,7 @@ from typing import List from typing import Union +import paddle from paddle import nn from paddlespeech.t2s.modules.activation import get_activation @@ -390,7 +391,13 @@ class TransformerEncoder(BaseEncoder): padding_idx=padding_idx, encoder_type="transformer") - def forward(self, xs, masks): + def forward(self, + xs: paddle.Tensor, + masks: paddle.Tensor, + note_emb: paddle.Tensor=None, + note_dur_emb: paddle.Tensor=None, + is_slur_emb: paddle.Tensor=None, + scale: int=16): """Encoder input sequence. Args: @@ -398,6 +405,12 @@ class TransformerEncoder(BaseEncoder): Input tensor (#batch, time, idim). masks(Tensor): Mask tensor (#batch, 1, time). + note_emb(Tensor): + Input tensor (#batch, time, attention_dim). + note_dur_emb(Tensor): + Input tensor (#batch, time, attention_dim). + is_slur_emb(Tensor): + Input tensor (#batch, time, attention_dim). Returns: Tensor: @@ -406,6 +419,8 @@ class TransformerEncoder(BaseEncoder): Mask tensor (#batch, 1, time). """ xs = self.embed(xs) + if note_emb is not None: + xs = scale * xs + note_emb + note_dur_emb + is_slur_emb xs, masks = self.encoders(xs, masks) if self.normalize_before: xs = self.after_norm(xs) diff --git a/paddlespeech/t2s/modules/wavenet_denoiser.py b/paddlespeech/t2s/modules/wavenet_denoiser.py new file mode 100644 index 000000000..f84a0893d --- /dev/null +++ b/paddlespeech/t2s/modules/wavenet_denoiser.py @@ -0,0 +1,191 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Callable +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +import ppdiffusers +from paddle import nn +from ppdiffusers.models.embeddings import Timesteps +from ppdiffusers.schedulers import DDPMScheduler + +from paddlespeech.t2s.modules.nets_utils import initialize +from paddlespeech.t2s.modules.residual_block import WaveNetResidualBlock + + +class WaveNetDenoiser(nn.Layer): + """A Mel-Spectrogram Denoiser modified from WaveNet + + Args: + in_channels (int, optional): + Number of channels of the input mel-spectrogram, by default 80 + out_channels (int, optional): + Number of channels of the output mel-spectrogram, by default 80 + kernel_size (int, optional): + Kernel size of the residual blocks inside, by default 3 + layers (int, optional): + Number of residual blocks inside, by default 20 + stacks (int, optional): + The number of groups to split the residual blocks into, by default 5 + Within each group, the dilation of the residual block grows exponentially. + residual_channels (int, optional): + Residual channel of the residual blocks, by default 256 + gate_channels (int, optional): + Gate channel of the residual blocks, by default 512 + skip_channels (int, optional): + Skip channel of the residual blocks, by default 256 + aux_channels (int, optional): + Auxiliary channel of the residual blocks, by default 256 + dropout (float, optional): + Dropout of the residual blocks, by default 0. + bias (bool, optional): + Whether to use bias in residual blocks, by default True + use_weight_norm (bool, optional): + Whether to use weight norm in all convolutions, by default False + """ + + def __init__( + self, + in_channels: int=80, + out_channels: int=80, + kernel_size: int=3, + layers: int=20, + stacks: int=5, + residual_channels: int=256, + gate_channels: int=512, + skip_channels: int=256, + aux_channels: int=256, + dropout: float=0., + bias: bool=True, + use_weight_norm: bool=False, + init_type: str="kaiming_normal", ): + super().__init__() + + # initialize parameters + initialize(self, init_type) + + self.in_channels = in_channels + self.out_channels = out_channels + self.aux_channels = aux_channels + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + self.first_t_emb = nn.Sequential( + Timesteps( + residual_channels, + flip_sin_to_cos=False, + downscale_freq_shift=1), + nn.Linear(residual_channels, residual_channels * 4), + nn.Mish(), nn.Linear(residual_channels * 4, residual_channels)) + self.t_emb_layers = nn.LayerList([ + nn.Linear(residual_channels, residual_channels) + for _ in range(layers) + ]) + + self.first_conv = nn.Conv1D( + in_channels, residual_channels, 1, bias_attr=True) + self.first_act = nn.ReLU() + + self.conv_layers = nn.LayerList() + for layer in range(layers): + dilation = 2**(layer % layers_per_stack) + conv = WaveNetResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=aux_channels, + dilation=dilation, + dropout=dropout, + bias=bias) + self.conv_layers.append(conv) + + final_conv = nn.Conv1D(skip_channels, out_channels, 1, bias_attr=True) + nn.initializer.Constant(0.0)(final_conv.weight) + self.last_conv_layers = nn.Sequential(nn.ReLU(), + nn.Conv1D( + skip_channels, + skip_channels, + 1, + bias_attr=True), + nn.ReLU(), final_conv) + + if use_weight_norm: + self.apply_weight_norm() + + def forward(self, x: paddle.Tensor, t: paddle.Tensor, c: paddle.Tensor): + """Denoise mel-spectrogram. + + Args: + x(Tensor): + Shape (B, C_in, T), The input mel-spectrogram. + t(Tensor): + Shape (B), The timestep input. + c(Tensor): + Shape (B, C_aux, T'). The auxiliary input (e.g. fastspeech2 encoder output). + + Returns: + Tensor: Shape (B, C_out, T), the pred noise. + """ + assert c.shape[-1] == x.shape[-1] + + if t.shape[0] != x.shape[0]: + t = t.tile([x.shape[0]]) + t_emb = self.first_t_emb(t) + t_embs = [ + t_emb_layer(t_emb)[..., None] for t_emb_layer in self.t_emb_layers + ] + + x = self.first_conv(x) + x = self.first_act(x) + skips = 0 + for f, t in zip(self.conv_layers, t_embs): + x = x + t + x, s = f(x, c) + skips += s + skips *= math.sqrt(1.0 / len(self.conv_layers)) + + x = self.last_conv_layers(skips) + return x + + def apply_weight_norm(self): + """Recursively apply weight normalization to all the Convolution layers + in the sublayers. + """ + + def _apply_weight_norm(layer): + if isinstance(layer, (nn.Conv1D, nn.Conv2D)): + nn.utils.weight_norm(layer) + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + """Recursively remove weight normalization from all the Convolution + layers in the sublayers. + """ + + def _remove_weight_norm(layer): + try: + nn.utils.remove_weight_norm(layer) + except ValueError: + pass + + self.apply(_remove_weight_norm) From 34f2995bcf823f20d3ffe0bb602fbdfe1ca90040 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=80=81=E8=99=8E=E4=BC=9A=E6=B8=B8=E6=B3=B3?= Date: Mon, 13 Mar 2023 18:35:49 +0800 Subject: [PATCH 11/37] [TTS][Paddle-Lite] Add Chinese C++ TTS Frontend, let TTS Arm Linux demo support the synthesis of arbitrary Chinese sentences (#3018) * [TTS] add a TTS CPP frontend demo --- demos/TTSArmLinux/.gitignore | 4 + demos/TTSArmLinux/README.md | 8 +- demos/TTSArmLinux/build-depends.sh | 1 + demos/TTSArmLinux/build.sh | 17 +- demos/TTSArmLinux/clean.sh | 9 + demos/TTSArmLinux/config.sh | 5 +- demos/TTSArmLinux/download.sh | 14 + demos/TTSArmLinux/front.conf | 21 + demos/TTSArmLinux/run.sh | 13 +- demos/TTSArmLinux/src/CMakeLists.txt | 39 +- demos/TTSArmLinux/src/Predictor.hpp | 268 +++-- demos/TTSArmLinux/src/TTSCppFrontend | 1 + demos/TTSArmLinux/src/main.cc | 146 ++- demos/TTSArmLinux/src/third-party | 1 + demos/TTSCppFrontend/.gitignore | 2 + demos/TTSCppFrontend/CMakeLists.txt | 63 ++ demos/TTSCppFrontend/README.md | 55 ++ demos/TTSCppFrontend/build-depends.sh | 20 + demos/TTSCppFrontend/build.sh | 21 + demos/TTSCppFrontend/clean.sh | 10 + demos/TTSCppFrontend/download.sh | 62 ++ demos/TTSCppFrontend/front_demo/front.conf | 21 + .../TTSCppFrontend/front_demo/front_demo.cpp | 65 ++ .../gentools/gen_dict_paddlespeech.py | 87 ++ .../front_demo/gentools/genid.py | 22 + .../front_demo/gentools/word2phones.py | 37 + demos/TTSCppFrontend/run_front_demo.sh | 7 + demos/TTSCppFrontend/src/base/type_conv.cpp | 18 + demos/TTSCppFrontend/src/base/type_conv.h | 18 + .../src/front/front_interface.cpp | 933 ++++++++++++++++++ .../src/front/front_interface.h | 156 +++ .../src/front/text_normalize.cpp | 462 +++++++++ .../TTSCppFrontend/src/front/text_normalize.h | 62 ++ .../TTSCppFrontend/third-party/CMakeLists.txt | 64 ++ 34 files changed, 2573 insertions(+), 159 deletions(-) create mode 120000 demos/TTSArmLinux/build-depends.sh create mode 100644 demos/TTSArmLinux/front.conf create mode 120000 demos/TTSArmLinux/src/TTSCppFrontend create mode 120000 demos/TTSArmLinux/src/third-party create mode 100644 demos/TTSCppFrontend/.gitignore create mode 100644 demos/TTSCppFrontend/CMakeLists.txt create mode 100644 demos/TTSCppFrontend/README.md create mode 100755 demos/TTSCppFrontend/build-depends.sh create mode 100755 demos/TTSCppFrontend/build.sh create mode 100755 demos/TTSCppFrontend/clean.sh create mode 100755 demos/TTSCppFrontend/download.sh create mode 100644 demos/TTSCppFrontend/front_demo/front.conf create mode 100644 demos/TTSCppFrontend/front_demo/front_demo.cpp create mode 100644 demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py create mode 100644 demos/TTSCppFrontend/front_demo/gentools/genid.py create mode 100644 demos/TTSCppFrontend/front_demo/gentools/word2phones.py create mode 100755 demos/TTSCppFrontend/run_front_demo.sh create mode 100644 demos/TTSCppFrontend/src/base/type_conv.cpp create mode 100644 demos/TTSCppFrontend/src/base/type_conv.h create mode 100644 demos/TTSCppFrontend/src/front/front_interface.cpp create mode 100644 demos/TTSCppFrontend/src/front/front_interface.h create mode 100644 demos/TTSCppFrontend/src/front/text_normalize.cpp create mode 100644 demos/TTSCppFrontend/src/front/text_normalize.h create mode 100644 demos/TTSCppFrontend/third-party/CMakeLists.txt diff --git a/demos/TTSArmLinux/.gitignore b/demos/TTSArmLinux/.gitignore index 13135e376..f18480d7a 100644 --- a/demos/TTSArmLinux/.gitignore +++ b/demos/TTSArmLinux/.gitignore @@ -1,4 +1,8 @@ +# 目录 build/ output/ libs/ models/ + +# 符号连接 +dict diff --git a/demos/TTSArmLinux/README.md b/demos/TTSArmLinux/README.md index 32b85e0a4..50ae1e4bf 100644 --- a/demos/TTSArmLinux/README.md +++ b/demos/TTSArmLinux/README.md @@ -12,7 +12,7 @@ ``` # Ubuntu -sudo apt install build-essential cmake wget tar unzip +sudo apt install build-essential cmake pkg-config wget tar unzip # CentOS sudo yum groupinstall "Development Tools" @@ -45,10 +45,14 @@ cd PaddleSpeech/demos/TTSArmLinux ``` ./run.sh +./run.sh --sentence "语音合成测试" +./run.sh --sentence "输出到指定的音频文件" --output_wav ./output/test.wav +./run.sh --help ``` -将把 [src/main.cpp](src/main.cpp) 里定义在 `sentencesToChoose` 数组中的十句话转换为 `wav` 文件,保存在 `output` 文件夹中。 +目前只支持中文合成,出现任何英文都会导致程序崩溃。 +如果未指定`--wav_file`,默认输出到`./output/tts.wav`。 ## 手动编译 Paddle Lite 库 diff --git a/demos/TTSArmLinux/build-depends.sh b/demos/TTSArmLinux/build-depends.sh new file mode 120000 index 000000000..fd3aec9c8 --- /dev/null +++ b/demos/TTSArmLinux/build-depends.sh @@ -0,0 +1 @@ +src/TTSCppFrontend/build-depends.sh \ No newline at end of file diff --git a/demos/TTSArmLinux/build.sh b/demos/TTSArmLinux/build.sh index c872e5749..5d31173ef 100755 --- a/demos/TTSArmLinux/build.sh +++ b/demos/TTSArmLinux/build.sh @@ -1,8 +1,11 @@ #!/bin/bash set -e +set -x cd "$(dirname "$(realpath "$0")")" +BASE_DIR="$PWD" + # load configure . ./config.sh @@ -10,11 +13,17 @@ cd "$(dirname "$(realpath "$0")")" echo "ARM_ABI is ${ARM_ABI}" echo "PADDLE_LITE_DIR is ${PADDLE_LITE_DIR}" -rm -rf build -mkdir -p build -cd build +echo "Build depends..." +./build-depends.sh "$@" +mkdir -p "$BASE_DIR/build" +cd "$BASE_DIR/build" cmake -DPADDLE_LITE_DIR="${PADDLE_LITE_DIR}" -DARM_ABI="${ARM_ABI}" ../src -make + +if [ "$*" = "" ]; then + make -j$(nproc) +else + make "$@" +fi echo "make successful!" diff --git a/demos/TTSArmLinux/clean.sh b/demos/TTSArmLinux/clean.sh index 1ea365566..2743801c3 100755 --- a/demos/TTSArmLinux/clean.sh +++ b/demos/TTSArmLinux/clean.sh @@ -1,8 +1,11 @@ #!/bin/bash set -e +set -x cd "$(dirname "$(realpath "$0")")" +BASE_DIR="$PWD" + # load configure . ./config.sh @@ -12,3 +15,9 @@ set -x rm -rf "$OUTPUT_DIR" rm -rf "$LIBS_DIR" rm -rf "$MODELS_DIR" +rm -rf "$BASE_DIR/build" + +"$BASE_DIR/src/TTSCppFrontend/clean.sh" + +# 符号连接 +rm "$BASE_DIR/dict" diff --git a/demos/TTSArmLinux/config.sh b/demos/TTSArmLinux/config.sh index 0a04f18ee..bf38d7d6d 100644 --- a/demos/TTSArmLinux/config.sh +++ b/demos/TTSArmLinux/config.sh @@ -10,5 +10,6 @@ OUTPUT_DIR="${PWD}/output" PADDLE_LITE_DIR="${LIBS_DIR}/inference_lite_lib.armlinux.${ARM_ABI}.gcc.with_extra.with_cv/cxx" #PADDLE_LITE_DIR="/path/to/Paddle-Lite/build.lite.linux.${ARM_ABI}.gcc/inference_lite_lib.armlinux.${ARM_ABI}/cxx" -AM_MODEL_PATH="${MODELS_DIR}/cpu/fastspeech2_csmsc_arm.nb" -VOC_MODEL_PATH="${MODELS_DIR}/cpu/mb_melgan_csmsc_arm.nb" +ACOUSTIC_MODEL_PATH="${MODELS_DIR}/cpu/fastspeech2_csmsc_arm.nb" +VOCODER_PATH="${MODELS_DIR}/cpu/mb_melgan_csmsc_arm.nb" +FRONT_CONF="${PWD}/front.conf" diff --git a/demos/TTSArmLinux/download.sh b/demos/TTSArmLinux/download.sh index 560374bc9..7eaa836a5 100755 --- a/demos/TTSArmLinux/download.sh +++ b/demos/TTSArmLinux/download.sh @@ -3,6 +3,8 @@ set -e cd "$(dirname "$(realpath "$0")")" +BASE_DIR="$PWD" + # load configure . ./config.sh @@ -38,6 +40,10 @@ download() { echo '=======================' } +######################################## + +echo "Download models..." + download 'inference_lite_lib.armlinux.armv8.gcc.with_extra.with_cv.tar.gz' \ 'https://paddlespeech.bj.bcebos.com/demos/TTSArmLinux/inference_lite_lib.armlinux.armv8.gcc.with_extra.with_cv.tar.gz' \ '39e0c6604f97c70f5d13c573d7e709b9' \ @@ -54,3 +60,11 @@ download 'fs2cnn_mbmelgan_cpu_v1.3.0.tar.gz' \ "$MODELS_DIR" echo "Done." + +######################################## + +echo "Download dictionary files..." + +ln -s src/TTSCppFrontend/front_demo/dict "$BASE_DIR/" + +"$BASE_DIR/src/TTSCppFrontend/download.sh" diff --git a/demos/TTSArmLinux/front.conf b/demos/TTSArmLinux/front.conf new file mode 100644 index 000000000..04bd2d97f --- /dev/null +++ b/demos/TTSArmLinux/front.conf @@ -0,0 +1,21 @@ +# jieba conf +--jieba_dict_path=./dict/jieba/jieba.dict.utf8 +--jieba_hmm_path=./dict/jieba/hmm_model.utf8 +--jieba_user_dict_path=./dict/jieba/user.dict.utf8 +--jieba_idf_path=./dict/jieba/idf.utf8 +--jieba_stop_word_path=./dict/jieba/stop_words.utf8 + +# dict conf fastspeech2_0.4 +--seperate_tone=false +--word2phone_path=./dict/fastspeech2_nosil_baker_ckpt_0.4/word2phone_fs2.dict +--phone2id_path=./dict/fastspeech2_nosil_baker_ckpt_0.4/phone_id_map.txt +--tone2id_path=./dict/fastspeech2_nosil_baker_ckpt_0.4/word2phone_fs2.dict + +# dict conf speedyspeech_0.5 +#--seperate_tone=true +#--word2phone_path=./dict/speedyspeech_nosil_baker_ckpt_0.5/word2phone.dict +#--phone2id_path=./dict/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt +#--tone2id_path=./dict/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt + +# dict of tranditional_to_simplified +--trand2simpd_path=./dict/tranditional_to_simplified/trand2simp.txt diff --git a/demos/TTSArmLinux/run.sh b/demos/TTSArmLinux/run.sh index efcb61b5b..d0860f044 100755 --- a/demos/TTSArmLinux/run.sh +++ b/demos/TTSArmLinux/run.sh @@ -7,12 +7,13 @@ cd "$(dirname "$(realpath "$0")")" . ./config.sh # create dir -rm -rf "$OUTPUT_DIR" mkdir -p "$OUTPUT_DIR" # run -for i in {1..10}; do - (set -x; ./build/paddlespeech_tts_demo "$AM_MODEL_PATH" "$VOC_MODEL_PATH" $i "$OUTPUT_DIR/$i.wav") -done - -ls -lh "$OUTPUT_DIR"/*.wav +set -x +./build/paddlespeech_tts_demo \ + --front_conf "$FRONT_CONF" \ + --acoustic_model "$ACOUSTIC_MODEL_PATH" \ + --vocoder "$VOCODER_PATH" \ + "$@" +# end diff --git a/demos/TTSArmLinux/src/CMakeLists.txt b/demos/TTSArmLinux/src/CMakeLists.txt index e1076af92..f8240d0ce 100644 --- a/demos/TTSArmLinux/src/CMakeLists.txt +++ b/demos/TTSArmLinux/src/CMakeLists.txt @@ -1,4 +1,18 @@ cmake_minimum_required(VERSION 3.10) +project(paddlespeech_tts_demo) + + +########## Global Options ########## + +option(WITH_FRONT_DEMO "Build front demo" OFF) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) +set(ABSL_PROPAGATE_CXX_STD ON) + + +########## ARM Options ########## + set(CMAKE_SYSTEM_NAME Linux) if(ARM_ABI STREQUAL "armv8") set(CMAKE_SYSTEM_PROCESSOR aarch64) @@ -13,14 +27,16 @@ else() return() endif() -project(paddlespeech_tts_demo) + +########## Paddle Lite Options ########## + message(STATUS "TARGET ARCH ABI: ${ARM_ABI}") message(STATUS "PADDLE LITE DIR: ${PADDLE_LITE_DIR}") include_directories(${PADDLE_LITE_DIR}/include) link_directories(${PADDLE_LITE_DIR}/libs/${ARM_ABI}) link_directories(${PADDLE_LITE_DIR}/lib) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + if(ARM_ABI STREQUAL "armv8") set(CMAKE_CXX_FLAGS "-march=armv8-a ${CMAKE_CXX_FLAGS}") set(CMAKE_C_FLAGS "-march=armv8-a ${CMAKE_C_FLAGS}") @@ -29,6 +45,9 @@ elseif(ARM_ABI STREQUAL "armv7hf") set(CMAKE_C_FLAGS "-march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 ${CMAKE_C_FLAGS}" ) endif() + +########## Dependencies ########## + find_package(OpenMP REQUIRED) if(OpenMP_FOUND OR OpenMP_CXX_FOUND) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") @@ -43,5 +62,19 @@ else() return() endif() + +############### tts cpp frontend ############### + +add_subdirectory(TTSCppFrontend) + +include_directories( + TTSCppFrontend/src + third-party/build/src/cppjieba/include + third-party/build/src/limonp/include +) + + +############### paddlespeech_tts_demo ############### + add_executable(paddlespeech_tts_demo main.cc) -target_link_libraries(paddlespeech_tts_demo paddle_light_api_shared) +target_link_libraries(paddlespeech_tts_demo paddle_light_api_shared paddlespeech_tts_front) diff --git a/demos/TTSArmLinux/src/Predictor.hpp b/demos/TTSArmLinux/src/Predictor.hpp index 221d51fc1..985d01158 100644 --- a/demos/TTSArmLinux/src/Predictor.hpp +++ b/demos/TTSArmLinux/src/Predictor.hpp @@ -9,32 +9,78 @@ using namespace paddle::lite_api; -typedef int16_t WavDataType; +class PredictorInterface { +public: + virtual ~PredictorInterface() = 0; + virtual bool Init( + const std::string &AcousticModelPath, + const std::string &VocoderPath, + PowerMode cpuPowerMode, + int cpuThreadNum, + // WAV采样率(必须与模型输出匹配) + // 如果播放速度和音调异常,请修改采样率 + // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 + uint32_t wavSampleRate + ) = 0; + virtual std::shared_ptr LoadModel(const std::string &modelPath, int cpuThreadNum, PowerMode cpuPowerMode) = 0; + virtual void ReleaseModel() = 0; + virtual bool RunModel(const std::vector &phones) = 0; + virtual std::unique_ptr GetAcousticModelOutput(const std::vector &phones) = 0; + virtual std::unique_ptr GetVocoderOutput(std::unique_ptr &&amOutput) = 0; + virtual void VocoderOutputToWav(std::unique_ptr &&vocOutput) = 0; + virtual void SaveFloatWav(float *floatWav, int64_t size) = 0; + virtual bool IsLoaded() = 0; + virtual float GetInferenceTime() = 0; + virtual int GetWavSize() = 0; + // 获取WAV持续时间(单位:毫秒) + virtual float GetWavDuration() = 0; + // 获取RTF(合成时间 / 音频时长) + virtual float GetRTF() = 0; + virtual void ReleaseWav() = 0; + virtual bool WriteWavToFile(const std::string &wavPath) = 0; +}; + +PredictorInterface::~PredictorInterface() {} -class Predictor { +// WavDataType: WAV数据类型 +// 可在 int16_t 和 float 之间切换, +// 用于生成 16-bit PCM 或 32-bit IEEE float 格式的 WAV +template +class Predictor : public PredictorInterface { public: - bool Init(const std::string &AMModelPath, const std::string &VOCModelPath, int cpuThreadNum, const std::string &cpuPowerMode) { + virtual bool Init( + const std::string &AcousticModelPath, + const std::string &VocoderPath, + PowerMode cpuPowerMode, + int cpuThreadNum, + // WAV采样率(必须与模型输出匹配) + // 如果播放速度和音调异常,请修改采样率 + // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 + uint32_t wavSampleRate + ) override { // Release model if exists ReleaseModel(); - AM_predictor_ = LoadModel(AMModelPath, cpuThreadNum, cpuPowerMode); - if (AM_predictor_ == nullptr) { + acoustic_model_predictor_ = LoadModel(AcousticModelPath, cpuThreadNum, cpuPowerMode); + if (acoustic_model_predictor_ == nullptr) { return false; } - VOC_predictor_ = LoadModel(VOCModelPath, cpuThreadNum, cpuPowerMode); - if (VOC_predictor_ == nullptr) { + vocoder_predictor_ = LoadModel(VocoderPath, cpuThreadNum, cpuPowerMode); + if (vocoder_predictor_ == nullptr) { return false; } + wav_sample_rate_ = wavSampleRate; + return true; } - ~Predictor() { + virtual ~Predictor() { ReleaseModel(); ReleaseWav(); } - std::shared_ptr LoadModel(const std::string &modelPath, int cpuThreadNum, const std::string &cpuPowerMode) { + virtual std::shared_ptr LoadModel(const std::string &modelPath, int cpuThreadNum, PowerMode cpuPowerMode) override { if (modelPath.empty()) { return nullptr; } @@ -43,33 +89,17 @@ public: MobileConfig config; config.set_model_from_file(modelPath); config.set_threads(cpuThreadNum); - - if (cpuPowerMode == "LITE_POWER_HIGH") { - config.set_power_mode(PowerMode::LITE_POWER_HIGH); - } else if (cpuPowerMode == "LITE_POWER_LOW") { - config.set_power_mode(PowerMode::LITE_POWER_LOW); - } else if (cpuPowerMode == "LITE_POWER_FULL") { - config.set_power_mode(PowerMode::LITE_POWER_FULL); - } else if (cpuPowerMode == "LITE_POWER_NO_BIND") { - config.set_power_mode(PowerMode::LITE_POWER_NO_BIND); - } else if (cpuPowerMode == "LITE_POWER_RAND_HIGH") { - config.set_power_mode(PowerMode::LITE_POWER_RAND_HIGH); - } else if (cpuPowerMode == "LITE_POWER_RAND_LOW") { - config.set_power_mode(PowerMode::LITE_POWER_RAND_LOW); - } else { - std::cerr << "Unknown cpu power mode!" << std::endl; - return nullptr; - } + config.set_power_mode(cpuPowerMode); return CreatePaddlePredictor(config); } - void ReleaseModel() { - AM_predictor_ = nullptr; - VOC_predictor_ = nullptr; + virtual void ReleaseModel() override { + acoustic_model_predictor_ = nullptr; + vocoder_predictor_ = nullptr; } - bool RunModel(const std::vector &phones) { + virtual bool RunModel(const std::vector &phones) override { if (!IsLoaded()) { return false; } @@ -78,7 +108,7 @@ public: auto start = std::chrono::system_clock::now(); // 执行推理 - VOCOutputToWav(GetAMOutput(phones)); + VocoderOutputToWav(GetVocoderOutput(GetAcousticModelOutput(phones))); // 计时结束 auto end = std::chrono::system_clock::now(); @@ -90,16 +120,16 @@ public: return true; } - std::unique_ptr GetAMOutput(const std::vector &phones) { - auto phones_handle = AM_predictor_->GetInput(0); + virtual std::unique_ptr GetAcousticModelOutput(const std::vector &phones) override { + auto phones_handle = acoustic_model_predictor_->GetInput(0); phones_handle->Resize({static_cast(phones.size())}); phones_handle->CopyFromCpu(phones.data()); - AM_predictor_->Run(); + acoustic_model_predictor_->Run(); // 获取输出Tensor - auto am_output_handle = AM_predictor_->GetOutput(0); + auto am_output_handle = acoustic_model_predictor_->GetOutput(0); // 打印输出Tensor的shape - std::cout << "AM Output shape: "; + std::cout << "Acoustic Model Output shape: "; auto shape = am_output_handle->shape(); for (auto s : shape) { std::cout << s << ", "; @@ -109,60 +139,46 @@ public: return am_output_handle; } - void VOCOutputToWav(std::unique_ptr &&input) { - auto mel_handle = VOC_predictor_->GetInput(0); + virtual std::unique_ptr GetVocoderOutput(std::unique_ptr &&amOutput) override { + auto mel_handle = vocoder_predictor_->GetInput(0); // [?, 80] - auto dims = input->shape(); + auto dims = amOutput->shape(); mel_handle->Resize(dims); - auto am_output_data = input->mutable_data(); + auto am_output_data = amOutput->mutable_data(); mel_handle->CopyFromCpu(am_output_data); - VOC_predictor_->Run(); + vocoder_predictor_->Run(); // 获取输出Tensor - auto voc_output_handle = VOC_predictor_->GetOutput(0); + auto voc_output_handle = vocoder_predictor_->GetOutput(0); // 打印输出Tensor的shape - std::cout << "VOC Output shape: "; + std::cout << "Vocoder Output shape: "; auto shape = voc_output_handle->shape(); for (auto s : shape) { std::cout << s << ", "; } std::cout << std::endl; + return voc_output_handle; + } + + virtual void VocoderOutputToWav(std::unique_ptr &&vocOutput) override { // 获取输出Tensor的数据 int64_t output_size = 1; - for (auto dim : voc_output_handle->shape()) { + for (auto dim : vocOutput->shape()) { output_size *= dim; } - auto output_data = voc_output_handle->mutable_data(); + auto output_data = vocOutput->mutable_data(); SaveFloatWav(output_data, output_size); } - inline float Abs(float number) { - return (number < 0) ? -number : number; - } - - void SaveFloatWav(float *floatWav, int64_t size) { - wav_.resize(size); - float maxSample = 0.01; - // 寻找最大采样值 - for (int64_t i=0; i maxSample) { - maxSample = sample; - } - } - // 把采样值缩放到 int_16 范围 - for (int64_t i=0; i(GetWavSize()) / sizeof(WavDataType) / static_cast(wav_sample_rate_) * 1000; } - struct WavHeader { - // RIFF 头 - char riff[4] = {'R', 'I', 'F', 'F'}; - uint32_t size = 0; - char wave[4] = {'W', 'A', 'V', 'E'}; - - // FMT 头 - char fmt[4] = {'f', 'm', 't', ' '}; - uint32_t fmt_size = 16; - uint16_t audio_format = 1; // 1为整数编码,3为浮点编码 - uint16_t num_channels = 1; - - // 如果播放速度和音调异常,请修改采样率 - // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 - uint32_t sample_rate = 24000; - - uint32_t byte_rate = 64000; - uint16_t block_align = 2; - uint16_t bits_per_sample = sizeof(WavDataType) * 8; + // 获取RTF(合成时间 / 音频时长) + virtual float GetRTF() override { + return GetInferenceTime() / GetWavDuration(); + } - // DATA 头 - char data[4] = {'d', 'a', 't', 'a'}; - uint32_t data_size = 0; - }; + virtual void ReleaseWav() override { + wav_.clear(); + } - bool WriteWavToFile(const std::string &wavPath) { + virtual bool WriteWavToFile(const std::string &wavPath) override { std::ofstream fout(wavPath, std::ios::binary); if (!fout.is_open()) { return false; @@ -211,8 +212,10 @@ public: // 写入头信息 WavHeader header; + header.audio_format = GetWavAudioFormat(); header.data_size = GetWavSize(); header.size = sizeof(header) - 8 + header.data_size; + header.sample_rate = wav_sample_rate_; header.byte_rate = header.sample_rate * header.num_channels * header.bits_per_sample / 8; header.block_align = header.num_channels * header.bits_per_sample / 8; fout.write(reinterpret_cast(&header), sizeof(header)); @@ -224,9 +227,80 @@ public: return true; } -private: +protected: + struct WavHeader { + // RIFF 头 + char riff[4] = {'R', 'I', 'F', 'F'}; + uint32_t size = 0; + char wave[4] = {'W', 'A', 'V', 'E'}; + + // FMT 头 + char fmt[4] = {'f', 'm', 't', ' '}; + uint32_t fmt_size = 16; + uint16_t audio_format = 0; + uint16_t num_channels = 1; + uint32_t sample_rate = 0; + uint32_t byte_rate = 0; + uint16_t block_align = 0; + uint16_t bits_per_sample = sizeof(WavDataType) * 8; + + // DATA 头 + char data[4] = {'d', 'a', 't', 'a'}; + uint32_t data_size = 0; + }; + + enum WavAudioFormat { + WAV_FORMAT_16BIT_PCM = 1, // 16-bit PCM 格式 + WAV_FORMAT_32BIT_FLOAT = 3 // 32-bit IEEE float 格式 + }; + +protected: + // 返回值通过模板特化由 WavDataType 决定 + inline uint16_t GetWavAudioFormat(); + + inline float Abs(float number) { + return (number < 0) ? -number : number; + } + +protected: float inference_time_ = 0; - std::shared_ptr AM_predictor_ = nullptr; - std::shared_ptr VOC_predictor_ = nullptr; + uint32_t wav_sample_rate_ = 0; std::vector wav_; + std::shared_ptr acoustic_model_predictor_ = nullptr; + std::shared_ptr vocoder_predictor_ = nullptr; }; + +template<> +uint16_t Predictor::GetWavAudioFormat() { + return Predictor::WAV_FORMAT_16BIT_PCM; +} + +template<> +uint16_t Predictor::GetWavAudioFormat() { + return Predictor::WAV_FORMAT_32BIT_FLOAT; +} + +// 保存 16-bit PCM 格式 WAV +template<> +void Predictor::SaveFloatWav(float *floatWav, int64_t size) { + wav_.resize(size); + float maxSample = 0.01; + // 寻找最大采样值 + for (int64_t i=0; i maxSample) { + maxSample = sample; + } + } + // 把采样值缩放到 int_16 范围 + for (int64_t i=0; i +void Predictor::SaveFloatWav(float *floatWav, int64_t size) { + wav_.resize(size); + std::copy_n(floatWav, size, wav_.data()); +} diff --git a/demos/TTSArmLinux/src/TTSCppFrontend b/demos/TTSArmLinux/src/TTSCppFrontend new file mode 120000 index 000000000..25953976d --- /dev/null +++ b/demos/TTSArmLinux/src/TTSCppFrontend @@ -0,0 +1 @@ +../../TTSCppFrontend/ \ No newline at end of file diff --git a/demos/TTSArmLinux/src/main.cc b/demos/TTSArmLinux/src/main.cc index 0bf78a7de..f3bd0f7b0 100644 --- a/demos/TTSArmLinux/src/main.cc +++ b/demos/TTSArmLinux/src/main.cc @@ -1,72 +1,128 @@ #include #include #include -#include "paddle_api.h" +#include +#include +#include +#include +#include +#include #include "Predictor.hpp" using namespace paddle::lite_api; -std::vector> sentencesToChoose = { - // 009901 昨日,这名“伤者”与医生全部被警方依法刑事拘留。 - {261, 231, 175, 116, 179, 262, 44, 154, 126, 177, 19, 262, 42, 241, 72, 177, 56, 174, 245, 37, 186, 37, 49, 151, 127, 69, 19, 179, 72, 69, 4, 260, 126, 177, 116, 151, 239, 153, 141}, - // 009902 钱伟长想到上海来办学校是经过深思熟虑的。 - {174, 83, 213, 39, 20, 260, 89, 40, 30, 177, 22, 71, 9, 153, 8, 37, 17, 260, 251, 260, 99, 179, 177, 116, 151, 125, 70, 233, 177, 51, 176, 108, 177, 184, 153, 242, 40, 45}, - // 009903 她见我一进门就骂,吃饭时也骂,骂得我抬不起头。 - {182, 2, 151, 85, 232, 73, 151, 123, 154, 52, 151, 143, 154, 5, 179, 39, 113, 69, 17, 177, 114, 105, 154, 5, 179, 154, 5, 40, 45, 232, 182, 8, 37, 186, 174, 74, 182, 168}, - // 009904 李述德在离开之前,只说了一句“柱驼杀父亲了”。 - {153, 74, 177, 186, 40, 42, 261, 10, 153, 73, 152, 7, 262, 113, 174, 83, 179, 262, 115, 177, 230, 153, 45, 73, 151, 242, 180, 262, 186, 182, 231, 177, 2, 69, 186, 174, 124, 153, 45}, - // 009905 这种车票和保险单捆绑出售属于重复性购买。 - {262, 44, 262, 163, 39, 41, 173, 99, 71, 42, 37, 28, 260, 84, 40, 14, 179, 152, 220, 37, 21, 39, 183, 177, 170, 179, 177, 185, 240, 39, 162, 69, 186, 260, 128, 70, 170, 154, 9}, - // 009906 戴佩妮的男友西米露接唱情歌,让她非常开心。 - {40, 10, 173, 49, 155, 72, 40, 45, 155, 15, 142, 260, 72, 154, 74, 153, 186, 179, 151, 103, 39, 22, 174, 126, 70, 41, 179, 175, 22, 182, 2, 69, 46, 39, 20, 152, 7, 260, 120}, - // 009907 观大势、谋大局、出大策始终是该院的办院方针。 - {70, 199, 40, 5, 177, 116, 154, 168, 40, 5, 151, 240, 179, 39, 183, 40, 5, 38, 44, 179, 177, 115, 262, 161, 177, 116, 70, 7, 247, 40, 45, 37, 17, 247, 69, 19, 262, 51}, - // 009908 他们骑着摩托回家,正好为农忙时的父母帮忙。 - {182, 2, 154, 55, 174, 73, 262, 45, 154, 157, 182, 230, 71, 212, 151, 77, 180, 262, 59, 71, 29, 214, 155, 162, 154, 20, 177, 114, 40, 45, 69, 186, 154, 185, 37, 19, 154, 20}, - // 009909 但是因为还没到退休年龄,只能掰着指头捱日子。 - {40, 17, 177, 116, 120, 214, 71, 8, 154, 47, 40, 30, 182, 214, 260, 140, 155, 83, 153, 126, 180, 262, 115, 155, 57, 37, 7, 262, 45, 262, 115, 182, 171, 8, 175, 116, 261, 112}, - // 009910 这几天雨水不断,人们恨不得待在家里不出门。 - {262, 44, 151, 74, 182, 82, 240, 177, 213, 37, 184, 40, 202, 180, 175, 52, 154, 55, 71, 54, 37, 186, 40, 42, 40, 7, 261, 10, 151, 77, 153, 74, 37, 186, 39, 183, 154, 52}, -}; - -void usage(const char *binName) { - std::cerr << "Usage:" << std::endl - << "\t" << binName << " " << std::endl; -} +DEFINE_string(sentence, "你好,欢迎使用语音合成服务", "Text to be synthesized (Chinese only. English will crash the program.)"); +DEFINE_string(front_conf, "./front.conf", "Front configuration file"); +DEFINE_string(acoustic_model, "./models/cpu/fastspeech2_csmsc_arm.nb", "Acoustic model .nb file"); +DEFINE_string(vocoder, "./models/cpu/fastspeech2_csmsc_arm.nb", "vocoder .nb file"); +DEFINE_string(output_wav, "./output/tts.wav", "Output WAV file"); +DEFINE_string(wav_bit_depth, "16", "WAV bit depth, 16 (16-bit PCM) or 32 (32-bit IEEE float)"); +DEFINE_string(wav_sample_rate, "24000", "WAV sample rate, should match the output of the vocoder"); +DEFINE_string(cpu_thread, "1", "CPU thread numbers"); int main(int argc, char *argv[]) { - if (argc < 5) { - usage(argv[0]); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + PredictorInterface *predictor; + + if (FLAGS_wav_bit_depth == "16") { + predictor = new Predictor(); + } else if (FLAGS_wav_bit_depth == "32") { + predictor = new Predictor(); + } else { + LOG(ERROR) << "Unsupported WAV bit depth: " << FLAGS_wav_bit_depth; return -1; } - const char *AMModelPath = argv[1]; - const char *VOCModelPath = argv[2]; - int sentencesIndex = atoi(argv[3]) - 1; - const char *outputWavPath = argv[4]; - if (sentencesIndex < 0 || sentencesIndex >= sentencesToChoose.size()) { - std::cerr << "sentences-index out of range" << std::endl; + + /////////////////////////// 前端:文本转音素 /////////////////////////// + + // 实例化文本前端引擎 + ppspeech::FrontEngineInterface *front_inst = nullptr; + front_inst = new ppspeech::FrontEngineInterface(FLAGS_front_conf); + if ((!front_inst) || (front_inst->init())) { + LOG(ERROR) << "Creater tts engine failed!"; + if (front_inst != nullptr) { + delete front_inst; + } + front_inst = nullptr; return -1; } - Predictor predictor; - if (!predictor.Init(AMModelPath, VOCModelPath, 1, "LITE_POWER_HIGH")) { - std::cerr << "predictor init failed" << std::endl; + std::wstring ws_sentence = ppspeech::utf8string2wstring(FLAGS_sentence); + + // 繁体转简体 + std::wstring sentence_simp; + front_inst->Trand2Simp(ws_sentence, sentence_simp); + ws_sentence = sentence_simp; + + std::string s_sentence; + std::vector sentence_part; + std::vector phoneids = {}; + std::vector toneids = {}; + + // 根据标点进行分句 + LOG(INFO) << "Start to segment sentences by punctuation"; + front_inst->SplitByPunc(ws_sentence, sentence_part); + LOG(INFO) << "Segment sentences through punctuation successfully"; + + // 分句后获取音素id + LOG(INFO) << "Start to get the phoneme and tone id sequence of each sentence"; + for(int i = 0; i < sentence_part.size(); i++) { + + LOG(INFO) << "Raw sentence is: " << ppspeech::wstring2utf8string(sentence_part[i]); + front_inst->SentenceNormalize(sentence_part[i]); + s_sentence = ppspeech::wstring2utf8string(sentence_part[i]); + LOG(INFO) << "After normalization sentence is: " << s_sentence; + + if (0 != front_inst->GetSentenceIds(s_sentence, phoneids, toneids)) { + LOG(ERROR) << "TTS inst get sentence phoneids and toneids failed"; + return -1; + } + + } + LOG(INFO) << "The phoneids of the sentence is: " << limonp::Join(phoneids.begin(), phoneids.end(), " "); + LOG(INFO) << "The toneids of the sentence is: " << limonp::Join(toneids.begin(), toneids.end(), " "); + LOG(INFO) << "Get the phoneme id sequence of each sentence successfully"; + + + /////////////////////////// 后端:音素转音频 /////////////////////////// + + // WAV采样率(必须与模型输出匹配) + // 如果播放速度和音调异常,请修改采样率 + // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 + const uint32_t wavSampleRate = std::stoul(FLAGS_wav_sample_rate); + + // CPU线程数 + const int cpuThreadNum = std::stol(FLAGS_cpu_thread); + + // CPU电源模式 + const PowerMode cpuPowerMode = PowerMode::LITE_POWER_HIGH; + + if (!predictor->Init(FLAGS_acoustic_model, FLAGS_vocoder, cpuPowerMode, cpuThreadNum, wavSampleRate)) { + LOG(ERROR) << "predictor init failed" << std::endl; return -1; } - if (!predictor.RunModel(sentencesToChoose[sentencesIndex])) { - std::cerr << "predictor run model failed" << std::endl; + std::vector phones(phoneids.size()); + std::transform(phoneids.begin(), phoneids.end(), phones.begin(), [](int x) { return static_cast(x); }); + + if (!predictor->RunModel(phones)) { + LOG(ERROR) << "predictor run model failed" << std::endl; return -1; } - std::cout << "Inference time: " << predictor.GetInferenceTime() << " ms, " - << "WAV size (without header): " << predictor.GetWavSize() << " bytes" << std::endl; + LOG(INFO) << "Inference time: " << predictor->GetInferenceTime() << " ms, " + << "WAV size (without header): " << predictor->GetWavSize() << " bytes, " + << "WAV duration: " << predictor->GetWavDuration() << " ms, " + << "RTF: " << predictor->GetRTF() << std::endl; - if (!predictor.WriteWavToFile(outputWavPath)) { - std::cerr << "write wav file failed" << std::endl; + if (!predictor->WriteWavToFile(FLAGS_output_wav)) { + LOG(ERROR) << "write wav file failed" << std::endl; return -1; } + delete predictor; + return 0; } diff --git a/demos/TTSArmLinux/src/third-party b/demos/TTSArmLinux/src/third-party new file mode 120000 index 000000000..851b2c1ec --- /dev/null +++ b/demos/TTSArmLinux/src/third-party @@ -0,0 +1 @@ +TTSCppFrontend/third-party \ No newline at end of file diff --git a/demos/TTSCppFrontend/.gitignore b/demos/TTSCppFrontend/.gitignore new file mode 100644 index 000000000..0075a9011 --- /dev/null +++ b/demos/TTSCppFrontend/.gitignore @@ -0,0 +1,2 @@ +build/ +dict/ diff --git a/demos/TTSCppFrontend/CMakeLists.txt b/demos/TTSCppFrontend/CMakeLists.txt new file mode 100644 index 000000000..14245372b --- /dev/null +++ b/demos/TTSCppFrontend/CMakeLists.txt @@ -0,0 +1,63 @@ +cmake_minimum_required(VERSION 3.10) +project(paddlespeech_tts_cpp) + + +########## Global Options ########## + +option(WITH_FRONT_DEMO "Build front demo" ON) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) +set(ABSL_PROPAGATE_CXX_STD ON) + + +########## Dependencies ########## + +set(ENV{PKG_CONFIG_PATH} "${CMAKE_SOURCE_DIR}/third-party/build/lib/pkgconfig:${CMAKE_SOURCE_DIR}/third-party/build/lib64/pkgconfig") +find_package(PkgConfig REQUIRED) + +# It is hard to load xxx-config.cmake in a custom location, so use pkgconfig instead. +pkg_check_modules(ABSL REQUIRED absl_strings IMPORTED_TARGET) +pkg_check_modules(GFLAGS REQUIRED gflags IMPORTED_TARGET) +pkg_check_modules(GLOG REQUIRED libglog IMPORTED_TARGET) + +# load header-only libraries +include_directories( + ${CMAKE_SOURCE_DIR}/third-party/build/src/cppjieba/include + ${CMAKE_SOURCE_DIR}/third-party/build/src/limonp/include +) + +find_package(Threads REQUIRED) + + +########## paddlespeech_tts_front ########## + +include_directories(src) + +file(GLOB FRONT_SOURCES + ./src/base/*.cpp + ./src/front/*.cpp +) +add_library(paddlespeech_tts_front STATIC ${FRONT_SOURCES}) + +target_link_libraries( + paddlespeech_tts_front + PUBLIC + PkgConfig::GFLAGS + PkgConfig::GLOG + PkgConfig::ABSL + Threads::Threads +) + + +########## tts_front_demo ########## + +if (WITH_FRONT_DEMO) + + file(GLOB FRONT_DEMO_SOURCES front_demo/*.cpp) + add_executable(tts_front_demo ${FRONT_DEMO_SOURCES}) + + target_include_directories(tts_front_demo PRIVATE ./front_demo) + target_link_libraries(tts_front_demo PRIVATE paddlespeech_tts_front) + +endif (WITH_FRONT_DEMO) diff --git a/demos/TTSCppFrontend/README.md b/demos/TTSCppFrontend/README.md new file mode 100644 index 000000000..592140ae1 --- /dev/null +++ b/demos/TTSCppFrontend/README.md @@ -0,0 +1,55 @@ +# PaddleSpeech TTS CPP Frontend + +A TTS frontend that implements text-to-phoneme conversion. + +Currently it only supports Chinese, any English word will crash the demo. + +## Install Build Tools + +``` +# Ubuntu +sudo apt install build-essential cmake pkg-config + +# CentOS +sudo yum groupinstall "Development Tools" +sudo yum install cmake +``` + +If your cmake version is too old, you can go here to download a precompiled new version: https://cmake.org/download/ + +## Build + +``` +# Build with all CPU cores +./build.sh + +# Build with 1 core +./build.sh -j1 +``` + +Dependent libraries will be automatically downloaded to the `third-party/build` folder. + +If the download speed is too slow, you can open [third-party/CMakeLists.txt](third-party/CMakeLists.txt) and modify `GIT_REPOSITORY` URLs. + +## Download dictionary files + +``` +./download.sh +``` + +## Run + +``` +./run_front_demo.sh +./run_front_demo.sh --help +./run_front_demo.sh --sentence "这是语音合成服务的文本前端,用于将文本转换为音素序号数组。" +./run_front_demo.sh --front_conf ./front_demo/front.conf --sentence "你还需要一个语音合成后端才能将其转换为实际的声音。" +``` + +## Clean + +``` +./clean.sh +``` + +The folders `front_demo/dict`, `build` and `third-party/build` will be deleted. diff --git a/demos/TTSCppFrontend/build-depends.sh b/demos/TTSCppFrontend/build-depends.sh new file mode 100755 index 000000000..c5f2ca125 --- /dev/null +++ b/demos/TTSCppFrontend/build-depends.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e +set -x + +cd "$(dirname "$(realpath "$0")")" + +cd ./third-party + +mkdir -p build +cd build + +cmake .. + +if [ "$*" = "" ]; then + make -j$(nproc) +else + make "$@" +fi + +echo "Done." diff --git a/demos/TTSCppFrontend/build.sh b/demos/TTSCppFrontend/build.sh new file mode 100755 index 000000000..a136cb936 --- /dev/null +++ b/demos/TTSCppFrontend/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e +set -x + +cd "$(dirname "$(realpath "$0")")" + +echo "************* Download & Build Dependencies *************" +./build-depends.sh "$@" + +echo "************* Build Front Lib and Demo *************" +mkdir -p ./build +cd ./build +cmake .. + +if [ "$*" = "" ]; then + make -j$(nproc) +else + make "$@" +fi + +echo "Done." diff --git a/demos/TTSCppFrontend/clean.sh b/demos/TTSCppFrontend/clean.sh new file mode 100755 index 000000000..efbb28871 --- /dev/null +++ b/demos/TTSCppFrontend/clean.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e +set -x + +cd "$(dirname "$(realpath "$0")")" +rm -rf "./front_demo/dict" +rm -rf "./build" +rm -rf "./third-party/build" + +echo "Done." diff --git a/demos/TTSCppFrontend/download.sh b/demos/TTSCppFrontend/download.sh new file mode 100755 index 000000000..0953e3a59 --- /dev/null +++ b/demos/TTSCppFrontend/download.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(realpath "$0")")" + +download() { + file="$1" + url="$2" + md5="$3" + dir="$4" + + cd "$dir" + + if [ -f "$file" ] && [ "$(md5sum "$file" | awk '{ print $1 }')" = "$md5" ]; then + echo "File $file (MD5: $md5) has been downloaded." + else + echo "Downloading $file..." + wget -O "$file" "$url" + + # MD5 verify + fileMd5="$(md5sum "$file" | awk '{ print $1 }')" + if [ "$fileMd5" == "$md5" ]; then + echo "File $file (MD5: $md5) has been downloaded." + else + echo "MD5 mismatch, file may be corrupt" + echo "$file MD5: $fileMd5, it should be $md5" + fi + fi + + echo "Extracting $file..." + echo '-----------------------' + tar -vxf "$file" + echo '=======================' +} + +######################################## + +DIST_DIR="$PWD/front_demo/dict" + +mkdir -p "$DIST_DIR" + +download 'fastspeech2_nosil_baker_ckpt_0.4.tar.gz' \ + 'https://paddlespeech.bj.bcebos.com/t2s/text_frontend/fastspeech2_nosil_baker_ckpt_0.4.tar.gz' \ + '7bf1bab1737375fa123c413eb429c573' \ + "$DIST_DIR" + +download 'speedyspeech_nosil_baker_ckpt_0.5.tar.gz' \ + 'https://paddlespeech.bj.bcebos.com/t2s/text_frontend/speedyspeech_nosil_baker_ckpt_0.5.tar.gz' \ + '0b7754b21f324789aef469c61f4d5b8f' \ + "$DIST_DIR" + +download 'jieba.tar.gz' \ + 'https://paddlespeech.bj.bcebos.com/t2s/text_frontend/jieba.tar.gz' \ + '6d30f426bd8c0025110a483f051315ca' \ + "$DIST_DIR" + +download 'tranditional_to_simplified.tar.gz' \ + 'https://paddlespeech.bj.bcebos.com/t2s/text_frontend/tranditional_to_simplified.tar.gz' \ + '258f5b59d5ebfe96d02007ca1d274a7f' \ + "$DIST_DIR" + +echo "Done." diff --git a/demos/TTSCppFrontend/front_demo/front.conf b/demos/TTSCppFrontend/front_demo/front.conf new file mode 100644 index 000000000..e9ce1c94d --- /dev/null +++ b/demos/TTSCppFrontend/front_demo/front.conf @@ -0,0 +1,21 @@ +# jieba conf +--jieba_dict_path=./front_demo/dict/jieba/jieba.dict.utf8 +--jieba_hmm_path=./front_demo/dict/jieba/hmm_model.utf8 +--jieba_user_dict_path=./front_demo/dict/jieba/user.dict.utf8 +--jieba_idf_path=./front_demo/dict/jieba/idf.utf8 +--jieba_stop_word_path=./front_demo/dict/jieba/stop_words.utf8 + +# dict conf fastspeech2_0.4 +--seperate_tone=false +--word2phone_path=./front_demo/dict/fastspeech2_nosil_baker_ckpt_0.4/word2phone_fs2.dict +--phone2id_path=./front_demo/dict/fastspeech2_nosil_baker_ckpt_0.4/phone_id_map.txt +--tone2id_path=./front_demo/dict/fastspeech2_nosil_baker_ckpt_0.4/word2phone_fs2.dict + +# dict conf speedyspeech_0.5 +#--seperate_tone=true +#--word2phone_path=./front_demo/dict/speedyspeech_nosil_baker_ckpt_0.5/word2phone.dict +#--phone2id_path=./front_demo/dict/speedyspeech_nosil_baker_ckpt_0.5/phone_id_map.txt +#--tone2id_path=./front_demo/dict/speedyspeech_nosil_baker_ckpt_0.5/tone_id_map.txt + +# dict of tranditional_to_simplified +--trand2simpd_path=./front_demo/dict/tranditional_to_simplified/trand2simp.txt diff --git a/demos/TTSCppFrontend/front_demo/front_demo.cpp b/demos/TTSCppFrontend/front_demo/front_demo.cpp new file mode 100644 index 000000000..e943fd6f7 --- /dev/null +++ b/demos/TTSCppFrontend/front_demo/front_demo.cpp @@ -0,0 +1,65 @@ +#include +//#include "utils/dir_utils.h" +#include "front/front_interface.h" +#include +#include +#include + +DEFINE_string(sentence, "你好,欢迎使用语音合成服务", "Text to be synthesized"); +DEFINE_string(front_conf, "./front_demo/front.conf", "Front conf file"); +//DEFINE_string(seperate_tone, "true", "If true, get phoneids and tonesid"); + + +int main(int argc, char** argv) { + gflags::ParseCommandLineFlags(&argc, &argv, true); + // 实例化文本前端引擎 + ppspeech::FrontEngineInterface *front_inst = nullptr; + front_inst = new ppspeech::FrontEngineInterface(FLAGS_front_conf); + if ((!front_inst) || (front_inst->init())) { + LOG(ERROR) << "Creater tts engine failed!"; + if (front_inst != nullptr) { + delete front_inst; + } + front_inst = nullptr; + return -1; + } + + std::wstring ws_sentence = ppspeech::utf8string2wstring(FLAGS_sentence); + + // 繁体转简体 + std::wstring sentence_simp; + front_inst->Trand2Simp(ws_sentence, sentence_simp); + ws_sentence = sentence_simp; + + std::string s_sentence; + std::vector sentence_part; + std::vector phoneids = {}; + std::vector toneids = {}; + + // 根据标点进行分句 + LOG(INFO) << "Start to segment sentences by punctuation"; + front_inst->SplitByPunc(ws_sentence, sentence_part); + LOG(INFO) << "Segment sentences through punctuation successfully"; + + // 分句后获取音素id + LOG(INFO) << "Start to get the phoneme and tone id sequence of each sentence"; + for(int i = 0; i < sentence_part.size(); i++) { + + LOG(INFO) << "Raw sentence is: " << ppspeech::wstring2utf8string(sentence_part[i]); + front_inst->SentenceNormalize(sentence_part[i]); + s_sentence = ppspeech::wstring2utf8string(sentence_part[i]); + LOG(INFO) << "After normalization sentence is: " << s_sentence; + + if (0 != front_inst->GetSentenceIds(s_sentence, phoneids, toneids)) { + LOG(ERROR) << "TTS inst get sentence phoneids and toneids failed"; + return -1; + } + + } + LOG(INFO) << "The phoneids of the sentence is: " << limonp::Join(phoneids.begin(), phoneids.end(), " "); + LOG(INFO) << "The toneids of the sentence is: " << limonp::Join(toneids.begin(), toneids.end(), " "); + LOG(INFO) << "Get the phoneme id sequence of each sentence successfully"; + + return EXIT_SUCCESS; +} + diff --git a/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py b/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py new file mode 100644 index 000000000..e9a2c96f6 --- /dev/null +++ b/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py @@ -0,0 +1,87 @@ +# !/usr/bin/env python3 +# -*- coding: utf-8 -*- +######################################################################## +# +# Copyright 2021 liangyunming(liangyunming@baidu.com) +# +# Execute the script when PaddleSpeech has been installed +# PaddleSpeech: https://github.com/PaddlePaddle/PaddleSpeech + +######################################################################## + +import argparse +import configparser +from paddlespeech.t2s.frontend.zh_frontend import Frontend + +def get_phone(frontend, word, merge_sentences=True, print_info=False, robot=False, get_tone_ids=False): + phonemes = frontend.get_phonemes(word, merge_sentences, print_info, robot) + # Some optimizations + phones, tones = frontend._get_phone_tone(phonemes[0], get_tone_ids) + #print(type(phones), phones) + #print(type(tones), tones) + return phones, tones + + +def gen_word2phone_dict(frontend, jieba_words_dict, word2phone_dict, get_tone=False): + with open(jieba_words_dict, "r") as f1, open(word2phone_dict, "w+") as f2: + for line in f1.readlines(): + word = line.split(" ")[0] + phone, tone = get_phone(frontend, word, get_tone_ids=get_tone) + phone_str = "" + + if tone: + assert(len(phone) == len(tone)) + for i in range(len(tone)): + phone_tone = phone[i] + tone[i] + phone_str += (" " + phone_tone) + phone_str = phone_str.strip("sp0").strip(" ") + else: + for x in phone: + phone_str += (" " + x) + phone_str = phone_str.strip("sp").strip(" ") + print(phone_str) + f2.write(word + " " + phone_str + "\n") + print("Generate word2phone dict successfully.") + + +def main(): + parser = argparse.ArgumentParser( + description="Generate dictionary") + parser.add_argument( + "--config", type=str, default="./config.ini", help="config file.") + parser.add_argument( + "--am_type", type=str, default="fastspeech2", help="fastspeech2 or speedyspeech") + args = parser.parse_args() + + # Read config + cf = configparser.ConfigParser() + cf.read(args.config) + jieba_words_dict_file = cf.get("jieba", "jieba_words_dict") # get words dict + + am_type = args.am_type + if(am_type == "fastspeech2"): + phone2id_dict_file = cf.get(am_type, "phone2id_dict") + word2phone_dict_file = cf.get(am_type, "word2phone_dict") + + frontend = Frontend(phone_vocab_path=phone2id_dict_file) + print("frontend done!") + + gen_word2phone_dict(frontend, jieba_words_dict_file, word2phone_dict_file, get_tone=False) + + elif(am_type == "speedyspeech"): + phone2id_dict_file = cf.get(am_type, "phone2id_dict") + tone2id_dict_file = cf.get(am_type, "tone2id_dict") + word2phone_dict_file = cf.get(am_type, "word2phone_dict") + + frontend = Frontend(phone_vocab_path=phone2id_dict_file, tone_vocab_path=tone2id_dict_file) + print("frontend done!") + + gen_word2phone_dict(frontend, jieba_words_dict_file, word2phone_dict_file, get_tone=True) + + + else: + print("Please set correct am type, fastspeech2 or speedyspeech.") + + +if __name__ == "__main__": + main() diff --git a/demos/TTSCppFrontend/front_demo/gentools/genid.py b/demos/TTSCppFrontend/front_demo/gentools/genid.py new file mode 100644 index 000000000..e2866bb0e --- /dev/null +++ b/demos/TTSCppFrontend/front_demo/gentools/genid.py @@ -0,0 +1,22 @@ +#from parakeet.frontend.vocab import Vocab + +PHONESFILE = "./dict/phones.txt" +PHONES_ID_FILE = "./dict/phonesid.dict" +TONESFILE = "./dict/tones.txt" +TONES_ID_FILE = "./dict/tonesid.dict" + +def GenIdFile(file, idfile): + id = 2 + with open(file, 'r') as f1, open(idfile, "w+") as f2: + f2.write(" 0\n") + f2.write(" 1\n") + for line in f1.readlines(): + phone = line.strip() + print(phone + " " + str(id) + "\n") + f2.write(phone + " " + str(id) + "\n") + id += 1 + +if __name__ == "__main__": + GenIdFile(PHONESFILE, PHONES_ID_FILE) + GenIdFile(TONESFILE, TONES_ID_FILE) + diff --git a/demos/TTSCppFrontend/front_demo/gentools/word2phones.py b/demos/TTSCppFrontend/front_demo/gentools/word2phones.py new file mode 100644 index 000000000..6a1822023 --- /dev/null +++ b/demos/TTSCppFrontend/front_demo/gentools/word2phones.py @@ -0,0 +1,37 @@ +from pypinyin import lazy_pinyin, Style +import re + +worddict = "./dict/jieba_part.dict.utf8" +newdict = "./dict/word_phones.dict" + +def GenPhones(initials, finals, seperate=True): + + phones = [] + for c, v in zip(initials, finals): + if re.match(r'i\d', v): + if c in ['z', 'c', 's']: + v = re.sub('i', 'ii', v) + elif c in ['zh', 'ch', 'sh', 'r']: + v = re.sub('i', 'iii', v) + if c: + if seperate == True: + phones.append(c + '0') + elif seperate == False: + phones.append(c) + else: + print("Not sure whether phone and tone need to be separated") + if v: + phones.append(v) + return phones + + +with open(worddict, "r") as f1, open(newdict, "w+") as f2: + for line in f1.readlines(): + word = line.split(" ")[0] + initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS) + finals = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) + + phones = GenPhones(initials, finals, True) + + temp = " ".join(phones) + f2.write(word + " " + temp + "\n") diff --git a/demos/TTSCppFrontend/run_front_demo.sh b/demos/TTSCppFrontend/run_front_demo.sh new file mode 100755 index 000000000..4dcded5c1 --- /dev/null +++ b/demos/TTSCppFrontend/run_front_demo.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e +set -x + +cd "$(dirname "$(realpath "$0")")" + +./build/tts_front_demo "$@" diff --git a/demos/TTSCppFrontend/src/base/type_conv.cpp b/demos/TTSCppFrontend/src/base/type_conv.cpp new file mode 100644 index 000000000..5d5de43c5 --- /dev/null +++ b/demos/TTSCppFrontend/src/base/type_conv.cpp @@ -0,0 +1,18 @@ +#include "base/type_conv.h" + +namespace ppspeech { +// wstring to string +std::string wstring2utf8string(const std::wstring& str) +{ + static std::wstring_convert > strCnv; + return strCnv.to_bytes(str); +} + +// string to wstring +std::wstring utf8string2wstring(const std::string& str) +{ + static std::wstring_convert< std::codecvt_utf8 > strCnv; + return strCnv.from_bytes(str); +} + +} diff --git a/demos/TTSCppFrontend/src/base/type_conv.h b/demos/TTSCppFrontend/src/base/type_conv.h new file mode 100644 index 000000000..9acb7a6d2 --- /dev/null +++ b/demos/TTSCppFrontend/src/base/type_conv.h @@ -0,0 +1,18 @@ +#ifndef BASE_TYPE_CONVC_H +#define BASE_TYPE_CONVC_H + +#include +#include +#include + + +namespace ppspeech { +// wstring to string +std::string wstring2utf8string(const std::wstring& str); + +// string to wstring +std::wstring utf8string2wstring(const std::string& str); + +} + +#endif // BASE_TYPE_CONVC_H \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/front_interface.cpp b/demos/TTSCppFrontend/src/front/front_interface.cpp new file mode 100644 index 000000000..5b828ac1b --- /dev/null +++ b/demos/TTSCppFrontend/src/front/front_interface.cpp @@ -0,0 +1,933 @@ +#include "front/front_interface.h" + +namespace ppspeech { + +int FrontEngineInterface::init() { + if (_initialed) { + return 0; + } + if (0 != ReadConfFile()) { + LOG(ERROR) << "Read front conf file failed"; + return -1; + } + + _jieba = new cppjieba::Jieba(_jieba_dict_path, _jieba_hmm_path, _jieba_user_dict_path, + _jieba_idf_path, _jieba_stop_word_path); + + _punc = {",", "。", "、", "?", ":", ";", "~", "!", + ",", ".", "?", "!", ":", ";", "/", "\\"}; + _punc_omit = {"“", "”", "\"", "\""}; + + // 需要儿化音处理的词语 + must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"}; + not_erhua = { + "虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿", + "拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿", "脑瘫儿", + "流浪儿", "体弱儿", "混血儿", "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿", "侄儿", + "孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿", + "狗儿" + }; + + must_not_neural_tone_words = {"男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子"}; + // 需要轻声处理的词语 + must_neural_tone_words = { + "麻烦", "麻利", "鸳鸯", "高粱", "骨头", "骆驼", "马虎", "首饰", "馒头", "馄饨", "风筝", + "难为", "队伍", "阔气", "闺女", "门道", "锄头", "铺盖", "铃铛", "铁匠", "钥匙", "里脊", + "里头", "部分", "那么", "道士", "造化", "迷糊", "连累", "这么", "这个", "运气", "过去", + "软和", "转悠", "踏实", "跳蚤", "跟头", "趔趄", "财主", "豆腐", "讲究", "记性", "记号", + "认识", "规矩", "见识", "裁缝", "补丁", "衣裳", "衣服", "衙门", "街坊", "行李", "行当", + "蛤蟆", "蘑菇", "薄荷", "葫芦", "葡萄", "萝卜", "荸荠", "苗条", "苗头", "苍蝇", "芝麻", + "舒服", "舒坦", "舌头", "自在", "膏药", "脾气", "脑袋", "脊梁", "能耐", "胳膊", "胭脂", + "胡萝", "胡琴", "胡同", "聪明", "耽误", "耽搁", "耷拉", "耳朵", "老爷", "老实", "老婆", + "老头", "老太", "翻腾", "罗嗦", "罐头", "编辑", "结实", "红火", "累赘", "糨糊", "糊涂", + "精神", "粮食", "簸箕", "篱笆", "算计", "算盘", "答应", "笤帚", "笑语", "笑话", "窟窿", + "窝囊", "窗户", "稳当", "稀罕", "称呼", "秧歌", "秀气", "秀才", "福气", "祖宗", "砚台", + "码头", "石榴", "石头", "石匠", "知识", "眼睛", "眯缝", "眨巴", "眉毛", "相声", "盘算", + "白净", "痢疾", "痛快", "疟疾", "疙瘩", "疏忽", "畜生", "生意", "甘蔗", "琵琶", "琢磨", + "琉璃", "玻璃", "玫瑰", "玄乎", "狐狸", "状元", "特务", "牲口", "牙碜", "牌楼", "爽快", + "爱人", "热闹", "烧饼", "烟筒", "烂糊", "点心", "炊帚", "灯笼", "火候", "漂亮", "滑溜", + "溜达", "温和", "清楚", "消息", "浪头", "活泼", "比方", "正经", "欺负", "模糊", "槟榔", + "棺材", "棒槌", "棉花", "核桃", "栅栏", "柴火", "架势", "枕头", "枇杷", "机灵", "本事", + "木头", "木匠", "朋友", "月饼", "月亮", "暖和", "明白", "时候", "新鲜", "故事", "收拾", + "收成", "提防", "挖苦", "挑剔", "指甲", "指头", "拾掇", "拳头", "拨弄", "招牌", "招呼", + "抬举", "护士", "折腾", "扫帚", "打量", "打算", "打点", "打扮", "打听", "打发", "扎实", + "扁担", "戒指", "懒得", "意识", "意思", "情形", "悟性", "怪物", "思量", "怎么", "念头", + "念叨", "快活", "忙活", "志气", "心思", "得罪", "张罗", "弟兄", "开通", "应酬", "庄稼", + "干事", "帮手", "帐篷", "希罕", "师父", "师傅", "巴结", "巴掌", "差事", "工夫", "岁数", + "屁股", "尾巴", "少爷", "小气", "小伙", "将就", "对头", "对付", "寡妇", "家伙", "客气", + "实在", "官司", "学问", "学生", "字号", "嫁妆", "媳妇", "媒人", "婆家", "娘家", "委屈", + "姑娘", "姐夫", "妯娌", "妥当", "妖精", "奴才", "女婿", "头发", "太阳", "大爷", "大方", + "大意", "大夫", "多少", "多么", "外甥", "壮实", "地道", "地方", "在乎", "困难", "嘴巴", + "嘱咐", "嘟囔", "嘀咕", "喜欢", "喇嘛", "喇叭", "商量", "唾沫", "哑巴", "哈欠", "哆嗦", + "咳嗽", "和尚", "告诉", "告示", "含糊", "吓唬", "后头", "名字", "名堂", "合同", "吆喝", + "叫唤", "口袋", "厚道", "厉害", "千斤", "包袱", "包涵", "匀称", "勤快", "动静", "动弹", + "功夫", "力气", "前头", "刺猬", "刺激", "别扭", "利落", "利索", "利害", "分析", "出息", + "凑合", "凉快", "冷战", "冤枉", "冒失", "养活", "关系", "先生", "兄弟", "便宜", "使唤", + "佩服", "作坊", "体面", "位置", "似的", "伙计", "休息", "什么", "人家", "亲戚", "亲家", + "交情", "云彩", "事情", "买卖", "主意", "丫头", "丧气", "两口", "东西", "东家", "世故", + "不由", "不在", "下水", "下巴", "上头", "上司", "丈夫", "丈人", "一辈", "那个", "菩萨", + "父亲", "母亲", "咕噜", "邋遢", "费用", "冤家", "甜头", "介绍", "荒唐", "大人", "泥鳅", + "幸福", "熟悉", "计划", "扑腾", "蜡烛", "姥爷", "照顾", "喉咙", "吉他", "弄堂", "蚂蚱", + "凤凰", "拖沓", "寒碜", "糟蹋", "倒腾", "报复", "逻辑", "盘缠", "喽啰", "牢骚", "咖喱", + "扫把", "惦记" + }; + + + // 生成词典(词到音素的映射) + if (0 != GenDict(_word2phone_path, word_phone_map)) { + LOG(ERROR) << "Genarate word2phone dict failed"; + return -1; + } + + // 生成音素字典(音素到音素id的映射) + if (0 != GenDict(_phone2id_path, phone_id_map)) { + LOG(ERROR) << "Genarate phone2id dict failed"; + return -1; + } + + // 生成音调字典(音调到音调id的映射) + if (_seperate_tone == "true") { + if (0 != GenDict(_tone2id_path, tone_id_map)) { + LOG(ERROR) << "Genarate tone2id dict failed"; + return -1; + } + } + + // 生成繁简字典(繁体到简体id的映射) + if (0 != GenDict(_trand2simp_path, trand_simp_map)) { + LOG(ERROR) << "Genarate trand2simp dict failed"; + return -1; + } + + _initialed = true; + return 0; +} + +int FrontEngineInterface::ReadConfFile() { + std::ifstream is(_conf_file.c_str(), std::ifstream::in); + if (!is.good()) { + LOG(ERROR) << "Cannot open config file: " << _conf_file; + return -1; + } + std::string line, key, value; + while (std::getline(is, line)) { + if (line.substr(0, 2) == "--") { + size_t pos = line.find_first_of("=", 0); + std::string key = line.substr(2, pos-2); + std::string value = line.substr(pos + 1); + conf_map[key] = value; + LOG(INFO) << "Key: " << key << "; Value: " << value; + } + } + + // jieba conf path + _jieba_dict_path = conf_map["jieba_dict_path"]; + _jieba_hmm_path = conf_map["jieba_hmm_path"]; + _jieba_user_dict_path = conf_map["jieba_user_dict_path"]; + _jieba_idf_path = conf_map["jieba_idf_path"]; + _jieba_stop_word_path = conf_map["jieba_stop_word_path"]; + + // dict path + _seperate_tone = conf_map["seperate_tone"]; + _word2phone_path = conf_map["word2phone_path"]; + _phone2id_path = conf_map["phone2id_path"]; + _tone2id_path = conf_map["tone2id_path"]; + _trand2simp_path = conf_map["trand2simpd_path"]; + + return 0; +} + +int FrontEngineInterface::Trand2Simp(const std::wstring &sentence, std::wstring &sentence_simp) { + //sentence_simp = sentence; + for(int i = 0; i < sentence.length(); i++) { + std::wstring temp(1, sentence[i]); + std::string sigle_word = ppspeech::wstring2utf8string(temp); + // 单个字是否在繁转简的字典里 + if(trand_simp_map.find(sigle_word) == trand_simp_map.end()) { + sentence_simp += temp; + } else { + sentence_simp += (ppspeech::utf8string2wstring(trand_simp_map[sigle_word])); + } + } + + return 0; +} + +int FrontEngineInterface::GenDict(const std::string &dict_file, std::map &map) { + std::ifstream is(dict_file.c_str(), std::ifstream::in); + if (!is.good()) { + LOG(ERROR) << "Cannot open dict file: " << dict_file; + return -1; + } + std::string line, key, value; + while (std::getline(is, line)) { + size_t pos = line.find_first_of(" ", 0); + key = line.substr(0, pos); + value = line.substr(pos + 1); + map[key] = value; + } + return 0; +} + +int FrontEngineInterface::GetSegResult(std::vector> &seg, + std::vector &seg_words) { + std::vector> ::iterator iter; + for(iter=seg.begin(); iter!=seg.end(); iter++) { + seg_words.push_back((*iter).first); + } + return 0; +} + +int FrontEngineInterface::GetSentenceIds(const std::string &sentence, std::vector &phoneids, std::vector &toneids) { + std::vector> cut_result; //分词结果包含词和词性 + if (0 != Cut(sentence, cut_result)) { + LOG(ERROR) << "Cut sentence: \"" << sentence << "\" failed"; + return -1; + } + + if (0 != GetWordsIds(cut_result, phoneids, toneids)) { + LOG(ERROR) << "Get words phoneids failed"; + return -1; + } + return 0; +} + +int FrontEngineInterface::GetWordsIds(const std::vector> &cut_result, std::vector &phoneids, + std::vector &toneids) { + std::string word; + std::string pos; + std::vector word_initials; + std::vector word_finals; + std::string phone; + for(int i = 0; i < cut_result.size(); i++) { + word = cut_result[i].first; + pos = cut_result[i].second; + if (std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { // 非可忽略的标点 + word_initials = {}; + word_finals = {}; + phone = ""; + // 判断是否在标点符号集合中 + if (std::find(_punc.begin(), _punc.end(), word) == _punc.end()) { // 文字 + // 获取字词的声母韵母列表 + if(0 != GetInitialsFinals(word, word_initials, word_finals)) { + LOG(ERROR) << "Genarate the word_initials and word_finals of " << word << " failed"; + return -1; + } + + // 对读音进行修改 + if(0 != ModifyTone(word, pos, word_finals)) { + LOG(ERROR) << "Failed to modify tone."; + } + + // 对儿化音进行修改 + std::vector> new_initals_finals = MergeErhua(word_initials, word_finals, word, pos); + word_initials = new_initals_finals[0]; + word_finals = new_initals_finals[1]; + + // 将声母和韵母合并成音素 + assert(word_initials.size() == word_finals.size()); + std::string temp_phone; + for(int j = 0; j < word_initials.size(); j++) { + if(word_initials[j] != "") { + temp_phone = word_initials[j] + " " + word_finals[j]; + } else { + temp_phone = word_finals[j]; + } + if(j == 0) { + phone += temp_phone; + } else { + phone += (" " + temp_phone); + } + } + } else { // 标点符号 + if(_seperate_tone == "true") { + phone = "sp0"; // speedyspeech + } else { + phone = "sp"; // fastspeech2 + } + } + + // 音素到音素id + if(0 != Phone2Phoneid(phone, phoneids, toneids)) { + LOG(ERROR) << "Genarate the phone id of " << word << " failed"; + return -1; + } + } + } + + return 0; + +} + +int FrontEngineInterface::Cut(const std::string &sentence, std::vector> &cut_result) { + std::vector> cut_result_jieba; + + // 结巴分词 + _jieba->Tag(sentence, cut_result_jieba); + + // 对分词后结果进行整合 + if (0 != MergeforModify(cut_result_jieba, cut_result)) { + LOG(ERROR) << "Failed to modify for word segmentation result."; + return -1; + } + + return 0; +} + +int FrontEngineInterface::GetPhone(const std::string &word, std::string &phone) { + // 判断 word 在不在 词典里,如果不在,进行CutAll分词 + if (word_phone_map.find(word) == word_phone_map.end()) { + std::vector wordcut; + _jieba->CutAll(word, wordcut); + phone = word_phone_map[wordcut[0]]; + for (int i = 1; i < wordcut.size(); i++) { + phone += (" " + word_phone_map[wordcut[i]]); + } + } else { + phone = word_phone_map[word]; + } + + return 0; +} + +int FrontEngineInterface::Phone2Phoneid(const std::string &phone, std::vector &phoneid, std::vector &toneid) { + std::vector phone_vec; + phone_vec = absl::StrSplit(phone, " "); + std::string temp_phone; + for(int i = 0; i < phone_vec.size(); i++) { + temp_phone = phone_vec[i]; + if(_seperate_tone == "true") { + phoneid.push_back(atoi((phone_id_map[temp_phone.substr(0, temp_phone.length()-1)]).c_str())); + toneid.push_back(atoi((tone_id_map[temp_phone.substr(temp_phone.length()-1, temp_phone.length())]).c_str())); + }else { + phoneid.push_back(atoi((phone_id_map[temp_phone]).c_str())); + } + + } + return 0; +} + + +// 根据韵母判断该词中每个字的读音都为第三声。true表示词中每个字都是第三声 +bool FrontEngineInterface::AllToneThree(const std::vector &finals) { + bool flags = true; + for(int i = 0; i < finals.size(); i++) { + if((int)finals[i].back() != 51) { //如果读音不为第三声 + flags = false; + } + } + return flags; + +} + +// 判断词是否是叠词 +bool FrontEngineInterface::IsReduplication(const std::string &word) { + bool flags = false; + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + int len = word_wstr.length(); + if(len == 2 && word_wstr[0] == word_wstr[1]){ + flags = true; + } + return flags; + +} + +// 获取每个字词的声母和韵母列表, word_initials 为声母列表,word_finals 为韵母列表 +int FrontEngineInterface::GetInitialsFinals(const std::string &word, std::vector &word_initials, std::vector &word_finals) { + std::string phone; + GetPhone(word, phone); //获取字词对应的音素 + std::vector phone_vec = absl::StrSplit(phone, " "); + //获取韵母,每个字的音素有1或者2个,start为单个字音素的起始位置。 + int start = 0; + while(start < phone_vec.size()) { + if(phone_vec[start] == "sp" || phone_vec[start] == "sp0") { + start += 1; + } + // 最后一位不是数字或者最后一位的数字是0,均表示声母,第二个是韵母 + else if(isdigit(phone_vec[start].back()) == 0 || (int)phone_vec[start].back() == 48) { + word_initials.push_back(phone_vec[start]); + word_finals.push_back(phone_vec[start + 1]); + start += 2; + } else { + word_initials.push_back(""); + word_finals.push_back(phone_vec[start]); + start += 1; + } + } + + assert(word_finals.size() == ppspeech::utf8string2wstring(word).length() && word_finals.size() == word_initials.size()); + + return 0; +} + +// 获取每个字词的韵母列表 +int FrontEngineInterface::GetFinals(const std::string &word, std::vector &word_finals) { + std::vector word_initials; + if(0 != GetInitialsFinals(word, word_initials, word_finals)) { + LOG(ERROR) << "Failed to get word finals"; + return -1; + } + + return 0; +} + +int FrontEngineInterface::Word2WordVec(const std::string &word, std::vector &wordvec) { + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + for(int i = 0; i < word_wstr.length(); i++) { + std::wstring word_sigle(1, word_wstr[i]); + wordvec.push_back(word_sigle); + } + return 0; + +} + +// yuantian01解释:把一个词再进行分词找到。例子:小雨伞 --> 小 雨伞 或者 小雨 伞 +int FrontEngineInterface::SplitWord(const std::string &word, std::vector &new_word_vec) { + std::vector word_vec; + std::string second_subword; + _jieba->CutForSearch(word, word_vec); + // 升序 + std::sort(word_vec.begin(), word_vec.end(), [](std::string a, std::string b ) {return a.size() > b.size();}); + std::string first_subword = word_vec[0]; // 提取长度最短的字符串 + int first_begin_idx = word.find_first_of(first_subword); + if(first_begin_idx == 0) { + second_subword = word.substr(first_subword.length()); + new_word_vec.push_back(first_subword); + new_word_vec.push_back(second_subword); + } else { + second_subword = word.substr(0, word.length() - first_subword.length()); + new_word_vec.push_back(second_subword); + new_word_vec.push_back(first_subword); + } + + return 0; + +} + + +//example: 不 一起 --> 不一起 +std::vector> FrontEngineInterface::MergeBu(std::vector> &seg_result) { + std::vector> result; + std::string word; + std::string pos; + std::string last_word = ""; + + for(int i = 0; i < seg_result.size(); i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if(last_word == "不") { + word = last_word + word; + } + if(word != "不") { + result.push_back(make_pair(word, pos)); + } + last_word = word; + } + + if(last_word == "不") { + result.push_back(make_pair(last_word, "d")); + last_word = ""; + } + + return result; +} + +std::vector> FrontEngineInterface::Mergeyi(std::vector> &seg_result) { + std::vector> result_temp; + std::string word; + std::string pos; + + // function 1 example: 听 一 听 --> 听一听 + for(int i = 0; i < seg_result.size(); i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if((i - 1 >= 0) && (word == "一") && (i + 1 < seg_result.size()) && + (seg_result[i - 1].first == seg_result[i + 1].first) && seg_result[i - 1].second == "v") { + result_temp[i - 1].first = result_temp[i - 1].first + "一" + result_temp[i - 1].first; + } else { + if((i - 2 >= 0) && (seg_result[i - 1].first == "一") && (seg_result[i - 2].first == word) && (pos == "v")) { + continue; + } else{ + result_temp.push_back(make_pair(word, pos)); + } + } + } + + // function 2 example: 一 你 --> 一你 + std::vector> result = {}; + for(int j = 0; j < result_temp.size(); j++) { + word = result_temp[j].first; + pos = result_temp[j].second; + if((result.size() != 0) && (result.back().first == "一")) { + result.back().first = result.back().first + word; + } else { + result.push_back(make_pair(word, pos)); + } + + } + + return result; +} + +// example: 你 你 --> 你你 +std::vector> FrontEngineInterface::MergeReduplication(std::vector> &seg_result) { + std::vector> result; + std::string word; + std::string pos; + + for(int i = 0; i < seg_result.size(); i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if((result.size() != 0) && (word == result.back().first)) { + result.back().first = result.back().first + seg_result[i].first; + } else { + result.push_back(make_pair(word, pos)); + } + } + + return result; +} + +// the first and the second words are all_tone_three +std::vector> FrontEngineInterface::MergeThreeTones(std::vector> &seg_result) { + std::vector> result; + std::string word; + std::string pos; + std::vector> finals; //韵母数组 + std::vector word_final; + std::vector merge_last(seg_result.size(), false); + + // 判断最后一个分词结果是不是标点,不看标点的声母韵母 + int word_num = seg_result.size() - 1; + if(std::find(_punc.begin(), _punc.end(), seg_result[word_num].first) == _punc.end()){ // 最后一个分词结果不是标点 + word_num += 1; + } + + // 获取韵母数组 + for(int i = 0; i < word_num; i++) { + word_final = {}; + word = seg_result[i].first; + pos = seg_result[i].second; + if(std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { // 非可忽略的标点,即文字 + if(0 != GetFinals(word, word_final)) { + LOG(ERROR) << "Failed to get the final of word."; + } + } + + finals.push_back(word_final); + } + assert(word_num == finals.size()); + + // 对第三声读音的字词分词结果进行处理 + for(int i = 0; i < word_num; i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if(i - 1 >= 0 && AllToneThree(finals[i - 1]) && AllToneThree(finals[i]) && !merge_last[i - 1]) { + // if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi + if(!IsReduplication(seg_result[i - 1].first) && + (ppspeech::utf8string2wstring(seg_result[i - 1].first)).length() + (ppspeech::utf8string2wstring(word)).length() <= 3) { + result.back().first = result.back().first + seg_result[i].first; + merge_last[i] = true; + } else { + result.push_back(make_pair(word, pos)); + } + } else { + result.push_back(make_pair(word, pos)); + } + } + + //把标点的分词结果补上 + if(word_num < seg_result.size()) { + result.push_back(make_pair(seg_result[word_num].first, seg_result[word_num].second)); + } + + return result; +} + +// the last char of first word and the first char of second word is tone_three +std::vector> FrontEngineInterface::MergeThreeTones2(std::vector> &seg_result) { + std::vector> result; + std::string word; + std::string pos; + std::vector> finals; //韵母数组 + std::vector word_final; + std::vector merge_last(seg_result.size(), false); + + // 判断最后一个分词结果是不是标点 + int word_num = seg_result.size() - 1; + if(std::find(_punc.begin(), _punc.end(), seg_result[word_num].first) == _punc.end()){ // 最后一个分词结果不是标点 + word_num += 1; + } + + // 获取韵母数组 + for(int i = 0; i < word_num; i++) { + word_final = {}; + word = seg_result[i].first; + pos = seg_result[i].second; + // 如果是文字,则获取韵母,如果是可忽略的标点,例如引号,则跳过 + if(std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { + if(0 != GetFinals(word, word_final)) { + LOG(ERROR) << "Failed to get the final of word."; + } + } + + finals.push_back(word_final); + } + assert(word_num == finals.size()); + + // 对第三声读音的字词分词结果进行处理 + for(int i = 0; i < word_num; i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if(i - 1 >= 0 && !finals[i - 1].empty() && absl::EndsWith(finals[i - 1].back(), "3") == true && + !finals[i].empty() && absl::EndsWith(finals[i].front(), "3") == true && !merge_last[i - 1]) { + // if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi + if(!IsReduplication(seg_result[i - 1].first) && + (ppspeech::utf8string2wstring(seg_result[i - 1].first)).length() + ppspeech::utf8string2wstring(word).length() <= 3) { + result.back().first = result.back().first + seg_result[i].first; + merge_last[i] = true; + } else { + result.push_back(make_pair(word, pos)); + } + } else { + result.push_back(make_pair(word, pos)); + } + } + + //把标点的分词结果补上 + if(word_num < seg_result.size()) { + result.push_back(make_pair(seg_result[word_num].first, seg_result[word_num].second)); + } + + return result; +} + +// example: 吃饭 儿 --> 吃饭儿 +std::vector> FrontEngineInterface::MergeEr(std::vector> &seg_result) { + std::vector> result; + std::string word; + std::string pos; + + for(int i = 0; i < seg_result.size(); i++) { + word = seg_result[i].first; + pos = seg_result[i].second; + if((i - 1 >= 0) && (word == "儿")){ + result.back().first = result.back().first + seg_result[i].first; + } else { + result.push_back(make_pair(word, pos)); + } + } + + return result; +} + +int FrontEngineInterface::MergeforModify(std::vector> &seg_word_type, + std::vector> &modify_seg_word_type) { + + std::vector seg_result; + GetSegResult(seg_word_type, seg_result); + LOG(INFO) << "Before merge, seg result is: " << limonp::Join(seg_result.begin(), seg_result.end(), "/"); + + modify_seg_word_type = MergeBu(seg_word_type); + modify_seg_word_type = Mergeyi(modify_seg_word_type); + modify_seg_word_type = MergeReduplication(modify_seg_word_type); + modify_seg_word_type = MergeThreeTones(modify_seg_word_type); + modify_seg_word_type = MergeThreeTones2(modify_seg_word_type); + modify_seg_word_type = MergeEr(modify_seg_word_type); + + seg_result = {}; + GetSegResult(modify_seg_word_type, seg_result); + LOG(INFO) << "After merge, seg result is: " << limonp::Join(seg_result.begin(), seg_result.end(), "/"); + + return 0; +} + + +int FrontEngineInterface::BuSandi(const std::string &word, std::vector &finals) { + std::wstring bu = L"不"; + std::vector wordvec; + // 一个词转成向量形式 + if(0 != Word2WordVec(word, wordvec)) { + LOG(ERROR) << "Failed to get word vector"; + return -1; + } + + // e.g. 看不懂 b u4 --> b u5, 将韵母的最后一位替换成 5 + if(wordvec.size() == 3 && wordvec[1] == bu) { + finals[1] = finals[1].replace(finals[1].length() - 1, 1, "5"); + } else { + // e.g. 不怕 b u4 --> b u2, 将韵母的最后一位替换成 2 + for(int i = 0; i < wordvec.size(); i++) { + if(wordvec[i] == bu && i + 1 < wordvec.size() && + absl::EndsWith(finals[i + 1], "4") == true) { + finals[i] = finals[i].replace(finals[i].length() - 1, 1, "2"); + } + } + } + + return 0; +} + + +int FrontEngineInterface::YiSandhi(const std::string &word, std::vector &finals) { + std::wstring yi = L"一"; + std::vector wordvec; + // 一个词转成向量形式 + if(0 != Word2WordVec(word, wordvec)) { + LOG(ERROR) << "Failed to get word vector"; + return -1; + } + + //情况1:"一" in number sequences, e.g. 一零零, 二一零 + std::wstring num_wstr = L"零一二三四六七八九"; + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + if(word_wstr.find(yi) != word_wstr.npos && wordvec.back() != yi) { + int flags = 0; + for(int j = 0; j < wordvec.size(); j++) { + if(num_wstr.find(wordvec[j]) == num_wstr.npos) { + flags = -1; + break; + } + } + if(flags == 0) { + return 0; + } + } else if(wordvec.size() == 3 && wordvec[1] == yi && wordvec[0] == wordvec[2]) { + // "一" between reduplication words shold be yi5, e.g. 看一看 + finals[1] = finals[1].replace(finals[1].length() - 1, 1, "5"); + } else if(wordvec[0] == L"第" && wordvec[1] == yi) { //以第一位开始 + finals[1] = finals[1].replace(finals[1].length() - 1, 1, "1"); + } else { + for(int i = 0; i < wordvec.size(); i++) { + if(wordvec[i] == yi && i + 1 < wordvec.size()) { + if(absl::EndsWith(finals[i + 1], "4") == true) { + // "一" before tone4 should be yi2, e.g. 一段 + finals[i] = finals[i].replace(finals[i].length() - 1, 1, "2"); + } else { + // "一" before non-tone4 should be yi4, e.g. 一天 + finals[i] = finals[i].replace(finals[i].length() - 1, 1, "4"); + } + } + } + } + + return 0; +} + +int FrontEngineInterface::NeuralSandhi(const std::string &word, const std::string &pos, std::vector &finals) { + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + std::vector wordvec; + // 一个词转成向量形式 + if(0 != Word2WordVec(word, wordvec)) { + LOG(ERROR) << "Failed to get word vector"; + return -1; + } + int word_num = wordvec.size(); + assert(word_num == word_wstr.length()); + + // 情况1:reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 + for(int j = 0; j < wordvec.size(); j++) { + std::string inits = "nva"; + if(j - 1 >= 0 && wordvec[j] == wordvec[j - 1] && inits.find(pos[0]) != inits.npos) { + finals[j] = finals[j].replace(finals[j].length() - 1, 1, "5"); + } + } + + // 情况2:对下述词的处理 + std::wstring yuqici = L"吧呢哈啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶"; + std::wstring de = L"的地得"; + std::wstring le = L"了着过"; + std::vector le_pos = {"ul", "uz", "ug"}; + std::wstring men = L"们子"; + std::vector men_pos = {"r", "n"}; + std::wstring weizhi = L"上下里"; + std::vector weizhi_pos = {"s", "l", "f"}; + std::wstring dong = L"来去"; + std::wstring fangxiang = L"上下进出回过起开"; + std::wstring ge = L"个"; + std::wstring xiushi = L"几有两半多各整每做是零一二三四六七八九"; + auto ge_idx = word_wstr.find_first_of(ge); // 出现“个”的第一个位置 + + if(word_num >= 1 && yuqici.find(wordvec.back()) != yuqici.npos) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else if(word_num >= 1 && de.find(wordvec.back()) != de.npos) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else if(word_num == 1 && le.find(wordvec[0]) != le.npos && find(le_pos.begin(), le_pos.end(), pos) != le_pos.end()) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else if(word_num > 1 && men.find(wordvec.back()) != men.npos && find(men_pos.begin(), men_pos.end(), pos) != men_pos.end() + && find(must_not_neural_tone_words.begin(), must_not_neural_tone_words.end(), word) != must_not_neural_tone_words.end()) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else if(word_num > 1 && weizhi.find(wordvec.back()) != weizhi.npos && find(weizhi_pos.begin(), weizhi_pos.end(), pos) != weizhi_pos.end()) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else if(word_num > 1 && dong.find(wordvec.back()) != dong.npos && fangxiang.find(wordvec[word_num - 2]) != fangxiang.npos) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } + // 情况3:对“个”字前面带有修饰词的字词读音处理 + else if((ge_idx != word_wstr.npos && ge_idx >= 1 && xiushi.find(wordvec[ge_idx - 1]) != xiushi.npos) + || word_wstr == ge) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } else { + if(find(must_neural_tone_words.begin(), must_neural_tone_words.end(), word) != must_neural_tone_words.end() + || (word_num >= 2 && find(must_neural_tone_words.begin(), must_neural_tone_words.end(), ppspeech::wstring2utf8string(word_wstr.substr(word_num - 2))) != must_neural_tone_words.end())) { + finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + } + } + + // 进行进一步分词,把长词切分更短些 + std::vector word_list; + if(0 != SplitWord(word, word_list)) { + LOG(ERROR) << "Failed to split word."; + return -1; + } + // 创建对应的 韵母列表 + std::vector> finals_list; + std::vector finals_temp; + finals_temp.assign(finals.begin(), finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length()); + finals_list.push_back(finals_temp); + finals_temp.assign(finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length(), finals.end()); + finals_list.push_back(finals_temp); + + finals = {}; + for(int i = 0; i < word_list.size(); i++) { + std::wstring temp_wstr = ppspeech::utf8string2wstring(word_list[i]); + if((find(must_neural_tone_words.begin(), must_neural_tone_words.end(), word_list[i]) != must_neural_tone_words.end()) + || (temp_wstr.length() >= 2 && find(must_neural_tone_words.begin(), must_neural_tone_words.end(), ppspeech::wstring2utf8string(temp_wstr.substr(temp_wstr.length() - 2))) != must_neural_tone_words.end())) { + finals_list[i].back() = finals_list[i].back().replace(finals_list[i].back().length() - 1, 1, "5"); + } + finals.insert(finals.end(), finals_list[i].begin(), finals_list[i].end()); + } + + return 0; +} + +int FrontEngineInterface::ThreeSandhi(const std::string &word, std::vector &finals) { + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + std::vector> finals_list; + std::vector finals_temp; + std::vector wordvec; + // 一个词转成向量形式 + if(0 != Word2WordVec(word, wordvec)) { + LOG(ERROR) << "Failed to get word vector"; + return -1; + } + int word_num = wordvec.size(); + assert(word_num == word_wstr.length()); + + if(word_num == 2 && AllToneThree(finals)) { + finals[0] = finals[0].replace(finals[0].length() - 1, 1, "2"); + } else if(word_num == 3) { + // 进行进一步分词,把长词切分更短些 + std::vector word_list; + if(0 != SplitWord(word, word_list)) { + LOG(ERROR) << "Failed to split word."; + return -1; + } + if(AllToneThree(finals)) { + std::wstring temp_wstr = ppspeech::utf8string2wstring(word_list[0]); + //disyllabic + monosyllabic, e.g. 蒙古/包 + if(temp_wstr.length() == 2) { + finals[0] = finals[0].replace(finals[0].length() - 1, 1, "2"); + finals[1] = finals[1].replace(finals[1].length() - 1, 1, "2"); + } else if(temp_wstr.length() == 1) { //monosyllabic + disyllabic, e.g. 纸/老虎 + finals[1] = finals[1].replace(finals[1].length() - 1, 1, "2"); + } + } else { + // 创建对应的 韵母列表 + finals_temp = {}; + finals_list = {}; + finals_temp.assign(finals.begin(), finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length()); + finals_list.push_back(finals_temp); + finals_temp.assign(finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length(), finals.end()); + finals_list.push_back(finals_temp); + + finals = {}; + for(int i = 0; i < finals_list.size(); i++) { + // e.g. 所有/人 + if(AllToneThree(finals_list[i]) && finals_list[i].size() == 2) { + finals_list[i][0] = finals_list[i][0].replace(finals_list[i][0].length() - 1, 1, "2"); + } else if(i == 1 && !(AllToneThree(finals_list[i])) && absl::EndsWith(finals_list[i][0], "3") == true + && absl::EndsWith(finals_list[0].back(), "3") == true) { + finals_list[0].back() = finals_list[0].back().replace(finals_list[0].back().length() - 1, 1, "2"); + } + + } + finals.insert(finals.end(), finals_list[0].begin(), finals_list[0].end()); + finals.insert(finals.end(), finals_list[1].begin(), finals_list[1].end()); + } + + } else if(word_num == 4) { //将成语拆分为两个长度为 2 的单词 + // 创建对应的 韵母列表 + finals_temp = {}; + finals_list = {}; + finals_temp.assign(finals.begin(), finals.begin() + 2); + finals_list.push_back(finals_temp); + finals_temp.assign(finals.begin() + 2, finals.end()); + finals_list.push_back(finals_temp); + + finals = {}; + for(int j = 0; j < finals_list.size(); j++){ + if(AllToneThree(finals_list[j])) { + finals_list[j][0] = finals_list[j][0].replace(finals_list[j][0].length() - 1, 1, "2"); + } + finals.insert(finals.end(), finals_list[j].begin(), finals_list[j].end()); + } + + } + + return 0; +} + +int FrontEngineInterface::ModifyTone(const std::string &word, const std::string &pos, std::vector &finals) { + if((0 != BuSandi(word, finals)) || (0 != YiSandhi(word, finals)) || + (0 != NeuralSandhi(word, pos, finals)) || (0 != ThreeSandhi(word,finals))) { + LOG(ERROR) << "Failed to modify tone of the word: " << word; + return -1; + } + + return 0; +} + +std::vector> FrontEngineInterface::MergeErhua(const std::vector &initials, const std::vector &finals, const std::string &word, const std::string &pos) { + std::vector new_initials = {}; + std::vector new_finals = {}; + std::vector> new_initials_finals; + std::vector specified_pos = {"a", "j", "nr"}; + std::wstring word_wstr = ppspeech::utf8string2wstring(word); + std::vector wordvec; + // 一个词转成向量形式 + if(0 != Word2WordVec(word, wordvec)) { + LOG(ERROR) << "Failed to get word vector"; + } + int word_num = wordvec.size(); + + if((find(must_erhua.begin(), must_erhua.end(), word) == must_erhua.end()) && + ((find(not_erhua.begin(), not_erhua.end(), word) != not_erhua.end()) || (find(specified_pos.begin(), specified_pos.end(), pos) != specified_pos.end()))) { + new_initials_finals.push_back(initials); + new_initials_finals.push_back(finals); + return new_initials_finals; + } + if(finals.size() != word_num) { + new_initials_finals.push_back(initials); + new_initials_finals.push_back(finals); + return new_initials_finals; + } + + assert(finals.size() == word_num); + for(int i = 0; i < finals.size(); i++) { + if(i == finals.size() - 1 && wordvec[i] == L"儿" && (finals[i] == "er2" || finals[i] == "er5") && word_num >= 2 && + find(not_erhua.begin(), not_erhua.end(), ppspeech::wstring2utf8string(word_wstr.substr(word_wstr.length() - 2))) == not_erhua.end() && !new_finals.empty()) { + new_finals.back() = new_finals.back().substr(0, new_finals.back().length()-1) + "r" + new_finals.back().substr(new_finals.back().length()-1); + } else { + new_initials.push_back(initials[i]); + new_finals.push_back(finals[i]); + } + } + new_initials_finals.push_back(new_initials); + new_initials_finals.push_back(new_finals); + + return new_initials_finals; + +} + + +} diff --git a/demos/TTSCppFrontend/src/front/front_interface.h b/demos/TTSCppFrontend/src/front/front_interface.h new file mode 100644 index 000000000..8df026c8d --- /dev/null +++ b/demos/TTSCppFrontend/src/front/front_interface.h @@ -0,0 +1,156 @@ +#ifndef PADDLE_TTS_SERVING_FRONT_FRONT_INTERFACE_H +#define PADDLE_TTS_SERVING_FRONT_FRONT_INTERFACE_H + +#include +#include +#include +#include +#include +//#include "utils/dir_utils.h" +#include +#include "front/text_normalize.h" +#include "absl/strings/str_split.h" + + +namespace ppspeech { + + class FrontEngineInterface : public TextNormalizer{ + public: + FrontEngineInterface(std::string conf) : _conf_file(conf) { + TextNormalizer(); + _jieba = nullptr; + _initialed = false; + init(); + } + + int init(); + ~FrontEngineInterface() { + + } + + // 读取配置文件 + int ReadConfFile(); + + // 简体转繁体 + int Trand2Simp(const std::wstring &sentence, std::wstring &sentence_simp); + + // 生成字典 + int GenDict(const std::string &file, std::map &map); + + // 由 词+词性的分词结果转为仅包含词的结果 + int GetSegResult(std::vector> &seg, std::vector &seg_words); + + // 生成句子的音素,音调id。如果音素和音调未分开,则 toneids 为空(fastspeech2),反之则不为空(speedyspeech) + int GetSentenceIds(const std::string &sentence, std::vector &phoneids, std::vector &toneids); + + // 根据分词结果获取词的音素,音调id,并对读音进行适当修改 (ModifyTone)。如果音素和音调未分开,则 toneids 为空(fastspeech2),反之则不为空(speedyspeech) + int GetWordsIds(const std::vector> &cut_result, std::vector &phoneids, std::vector &toneids); + + // 结巴分词生成包含词和词性的分词结果,再对分词结果进行适当修改 (MergeforModify) + int Cut(const std::string &sentence, std::vector> &cut_result); + + // 字词到音素的映射,查找字典 + int GetPhone(const std::string &word, std::string &phone); + + // 音素到音素id + int Phone2Phoneid(const std::string &phone, std::vector &phoneid, std::vector &toneids); + + + // 根据韵母判断该词中每个字的读音都为第三声。true表示词中每个字都是第三声 + bool AllToneThree(const std::vector &finals); + + // 判断词是否是叠词 + bool IsReduplication(const std::string &word); + + // 获取每个字词的声母韵母列表 + int GetInitialsFinals(const std::string &word, std::vector &word_initials, std::vector &word_finals); + + // 获取每个字词的韵母列表 + int GetFinals(const std::string &word, std::vector &word_finals); + + // 整个词转成向量形式,向量的每个元素对应词的一个字 + int Word2WordVec(const std::string &word, std::vector &wordvec); + + // 将整个词重新进行 full cut,分词后,各个词会在词典中 + int SplitWord(const std::string &word, std::vector &fullcut_word); + + // 对分词结果进行处理:对包含“不”字的分词结果进行整理 + std::vector> MergeBu(std::vector> &seg_result); + + // 对分词结果进行处理:对包含“一”字的分词结果进行整理 + std::vector> Mergeyi(std::vector> &seg_result); + + // 对分词结果进行处理:对前后相同的两个字进行合并 + std::vector> MergeReduplication(std::vector> &seg_result); + + // 对一个词和后一个词他们的读音均为第三声的两个词进行合并 + std::vector> MergeThreeTones(std::vector> &seg_result); + + // 对一个词的最后一个读音和后一个词的第一个读音为第三声的两个词进行合并 + std::vector> MergeThreeTones2(std::vector> &seg_result); + + // 对分词结果进行处理:对包含“儿”字的分词结果进行整理 + std::vector> MergeEr(std::vector> &seg_result); + + // 对分词结果进行处理、修改 + int MergeforModify(std::vector> &seg_result, std::vector> &merge_seg_result); + + + // 对包含“不”字的相关词音调进行修改 + int BuSandi(const std::string &word, std::vector &finals); + + // 对包含“一”字的相关词音调进行修改 + int YiSandhi(const std::string &word, std::vector &finals); + + // 对一些特殊词(包括量词,语助词等)的相关词音调进行修改 + int NeuralSandhi(const std::string &word, const std::string &pos, std::vector &finals); + + // 对包含第三声的相关词音调进行修改 + int ThreeSandhi(const std::string &word, std::vector &finals); + + // 对字词音调进行处理、修改 + int ModifyTone(const std::string &word, const std::string &pos, std::vector &finals); + + + // 对儿化音进行处理 + std::vector> MergeErhua(const std::vector &initials, const std::vector &finals, const std::string &word, const std::string &pos); + + + + private: + bool _initialed; + cppjieba::Jieba *_jieba; + std::vector _punc; + std::vector _punc_omit; + + std::string _conf_file; + std::map conf_map; + std::map word_phone_map; + std::map phone_id_map; + std::map tone_id_map; + std::map trand_simp_map; + + + std::string _jieba_dict_path; + std::string _jieba_hmm_path; + std::string _jieba_user_dict_path; + std::string _jieba_idf_path; + std::string _jieba_stop_word_path; + + std::string _seperate_tone; + std::string _word2phone_path; + std::string _phone2id_path; + std::string _tone2id_path; + std::string _trand2simp_path; + + std::vector must_erhua; + std::vector not_erhua; + + std::vector must_not_neural_tone_words; + std::vector must_neural_tone_words; + + + + }; +} +#endif \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/text_normalize.cpp b/demos/TTSCppFrontend/src/front/text_normalize.cpp new file mode 100644 index 000000000..11a493ba9 --- /dev/null +++ b/demos/TTSCppFrontend/src/front/text_normalize.cpp @@ -0,0 +1,462 @@ +#include "front/text_normalize.h" + +namespace ppspeech { + +// 初始化 digits_map and unit_map +int TextNormalizer::InitMap() { + + digits_map["0"] = "零"; + digits_map["1"] = "一"; + digits_map["2"] = "二"; + digits_map["3"] = "三"; + digits_map["4"] = "四"; + digits_map["5"] = "五"; + digits_map["6"] = "六"; + digits_map["7"] = "七"; + digits_map["8"] = "八"; + digits_map["9"] = "九"; + + units_map[1] = "十"; + units_map[2] = "百"; + units_map[3] = "千"; + units_map[4] = "万"; + units_map[8] = "亿"; + + return 0; +} + +// 替换 +int TextNormalizer::Replace(std::wstring &sentence, const int &pos, const int &len, const std::wstring &repstr) { + // 删除原来的 + sentence.erase(pos, len); + // 插入新的 + sentence.insert(pos, repstr); + return 0; + +} + +// 根据标点符号切分句子 +int TextNormalizer::SplitByPunc(const std::wstring &sentence, std::vector &sentence_part) { + std::wstring temp = sentence; + std::wregex reg(L"[:,;。?!,;?!]"); + std::wsmatch match; + + while (std::regex_search (temp, match, reg)) { + sentence_part.push_back(temp.substr(0, match.position(0) + match.length(0))); + Replace(temp, 0, match.position(0) + match.length(0), L""); + } + // 如果最后没有标点符号 + if(temp != L"") { + sentence_part.push_back(temp); + } + return 0; +} + +//数字转文本,10200 - > 一万零二百 +std::string TextNormalizer::CreateTextValue(const std::string &num_str, bool use_zero) { + + std::string num_lstrip = std::string(absl::StripPrefix(num_str, "0")).data(); + int len = num_lstrip.length(); + + if(len == 0) { + return ""; + } else if (len == 1) { + if(use_zero && (len < num_str.length())) { + return digits_map["0"] + digits_map[num_lstrip]; + } else { + return digits_map[num_lstrip]; + } + } else { + int largest_unit = 0; // 最大单位 + std::string first_part; + std::string second_part; + + if (len > 1 and len <= 2) { + largest_unit = 1; + } else if (len > 2 and len <= 3) { + largest_unit = 2; + } else if (len > 3 and len <= 4) { + largest_unit = 3; + } else if (len > 4 and len <= 8) { + largest_unit = 4; + } else if (len > 8) { + largest_unit = 8; + } + + first_part = num_str.substr(0, num_str.length() - largest_unit); + second_part = num_str.substr(num_str.length() - largest_unit); + + return CreateTextValue(first_part, use_zero) + units_map[largest_unit] + CreateTextValue(second_part, use_zero); + } +} + +// 数字一个一个对应,可直接用于年份,电话,手机, +std::string TextNormalizer::SingleDigit2Text(const std::string &num_str, bool alt_one) { + std::string text = ""; + if (alt_one) { + digits_map["1"] = "幺"; + } else { + digits_map["1"] = "一"; + } + + for (size_t i = 0; i < num_str.size(); i++) { + std::string num_int(1, num_str[i]); + if (digits_map.find(num_int) == digits_map.end()) { + LOG(ERROR) << "digits_map doesn't have key: " << num_int; + } + text += digits_map[num_int]; + } + + return text; +} + +std::string TextNormalizer::SingleDigit2Text(const std::wstring &num, bool alt_one) { + std::string num_str = wstring2utf8string(num); + return SingleDigit2Text(num_str, alt_one); +} + +// 数字整体对应,可直接用于月份,日期,数值整数部分 +std::string TextNormalizer::MultiDigit2Text(const std::string &num_str, bool alt_one, bool use_zero) { + LOG(INFO) << "aaaaaaaaaaaaaaaa: " << alt_one << use_zero; + if (alt_one) { + digits_map["1"] = "幺"; + } else { + digits_map["1"] = "一"; + } + + std::wstring result = utf8string2wstring(CreateTextValue(num_str, use_zero)); + std::wstring result_0(1, result[0]); + std::wstring result_1(1, result[1]); + // 一十八 --> 十八 + if ((result_0 == utf8string2wstring(digits_map["1"])) && (result_1 == utf8string2wstring(units_map[1]))) { + return wstring2utf8string(result.substr(1,result.length())); + } else { + return wstring2utf8string(result); + } +} + +std::string TextNormalizer::MultiDigit2Text(const std::wstring &num, bool alt_one, bool use_zero) { + std::string num_str = wstring2utf8string(num); + return MultiDigit2Text(num_str, alt_one, use_zero); +} + +// 数字转文本,包括整数和小数 +std::string TextNormalizer::Digits2Text(const std::string &num_str) { + std::string text; + std::vector integer_decimal; + integer_decimal = absl::StrSplit(num_str, "."); + + if(integer_decimal.size() == 1) { // 整数 + text = MultiDigit2Text(integer_decimal[0]); + } else if(integer_decimal.size() == 2) { // 小数 + if(integer_decimal[0] == "") { // 无整数的小数类型,例如:.22 + text = "点" + SingleDigit2Text(std::string(absl::StripSuffix(integer_decimal[1], "0")).data()); + } else { // 常规小数类型,例如:12.34 + text = MultiDigit2Text(integer_decimal[0]) + "点" + \ + SingleDigit2Text(std::string(absl::StripSuffix(integer_decimal[1], "0")).data()); + } + } else { + return "The value does not conform to the numeric format"; + } + + return text; +} + +std::string TextNormalizer::Digits2Text(const std::wstring &num) { + std::string num_str = wstring2utf8string(num); + return Digits2Text(num_str); +} + +// 日期,2021年8月18日 --> 二零二一年八月十八日 +int TextNormalizer::ReData(std::wstring &sentence) { + std::wregex reg(L"(\\d{4}|\\d{2})年((0?[1-9]|1[0-2])月)?(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?"); + std::wsmatch match; + std::string rep; + + while (std::regex_search (sentence, match, reg)) { + rep = ""; + rep += SingleDigit2Text(match[1]) + "年"; + if(match[3] != L"") { + rep += MultiDigit2Text(match[3], false, false) + "月"; + } + if(match[5] != L"") { + rep += MultiDigit2Text(match[5], false, false) + wstring2utf8string(match[9]); + } + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + } + + return 0; +} + + +// XX-XX-XX or XX/XX/XX 例如:2021/08/18 --> 二零二一年八月十八日 +int TextNormalizer::ReData2(std::wstring &sentence) { + std::wregex reg(L"(\\d{4})([- /.])(0[1-9]|1[012])\\2(0[1-9]|[12][0-9]|3[01])"); + std::wsmatch match; + std::string rep; + + while (std::regex_search (sentence, match, reg)) { + rep = ""; + rep += (SingleDigit2Text(match[1]) + "年"); + rep += (MultiDigit2Text(match[3], false, false) + "月"); + rep += (MultiDigit2Text(match[4], false, false) + "日"); + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + } + + return 0; +} + +// XX:XX:XX 09:09:02 --> 九点零九分零二秒 +int TextNormalizer::ReTime(std::wstring &sentence) { + std::wregex reg(L"([0-1]?[0-9]|2[0-3]):([0-5][0-9])(:([0-5][0-9]))?"); + std::wsmatch match; + std::string rep; + + while (std::regex_search (sentence, match, reg)) { + rep = ""; + rep += (MultiDigit2Text(match[1], false, false) + "点"); + if(absl::StartsWith(wstring2utf8string(match[2]), "0")) { + rep += "零"; + } + rep += (MultiDigit2Text(match[2]) + "分"); + if(absl::StartsWith(wstring2utf8string(match[4]), "0")) { + rep += "零"; + } + rep += (MultiDigit2Text(match[4]) + "秒"); + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 温度,例如:-24.3℃ --> 零下二十四点三度 +int TextNormalizer::ReTemperature(std::wstring &sentence) { + std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)(°C|℃|度|摄氏度)"); + std::wsmatch match; + std::string rep; + std::string sign; + std::vector integer_decimal; + std::string unit; + + while (std::regex_search (sentence, match, reg)) { + match[1] == L"-" ? sign = "负" : sign = ""; + match[4] == L"摄氏度"? unit = "摄氏度" : unit = "度"; + rep = sign + Digits2Text(match[2]) + unit; + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + } + + return 0; + +} + +// 分数,例如: 1/3 --> 三分之一 +int TextNormalizer::ReFrac(std::wstring &sentence) { + std::wregex reg(L"(-?)(\\d+)/(\\d+)"); + std::wsmatch match; + std::string sign; + std::string rep; + while (std::regex_search (sentence, match, reg)) { + match[1] == L"-" ? sign = "负" : sign = ""; + rep = sign + MultiDigit2Text(match[3]) + "分之" + MultiDigit2Text(match[2]); + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 百分数,例如:45.5% --> 百分之四十五点五 +int TextNormalizer::RePercentage(std::wstring &sentence) { + std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)%"); + std::wsmatch match; + std::string sign; + std::string rep; + std::vector integer_decimal; + + while (std::regex_search (sentence, match, reg)) { + match[1] == L"-" ? sign = "负" : sign = ""; + rep = sign + "百分之" + Digits2Text(match[2]); + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 手机号码,例如:+86 18883862235 --> 八六幺八八八三八六二二三五 +int TextNormalizer::ReMobilePhone(std::wstring &sentence) { + std::wregex reg(L"(\\d)?((\\+?86 ?)?1([38]\\d|5[0-35-9]|7[678]|9[89])\\d{8})(\\d)?"); + std::wsmatch match; + std::string rep; + std::vector country_phonenum; + + while (std::regex_search (sentence, match, reg)) { + country_phonenum = absl::StrSplit(wstring2utf8string(match[0]), "+"); + rep = ""; + for(int i = 0; i < country_phonenum.size(); i++) { + LOG(INFO) << country_phonenum[i]; + rep += SingleDigit2Text(country_phonenum[i], true); + } + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + } + + return 0; +} + +// 座机号码,例如:010-51093154 --> 零幺零五幺零九三幺五四 +int TextNormalizer::RePhone(std::wstring &sentence) { + std::wregex reg(L"(\\d)?((0(10|2[1-3]|[3-9]\\d{2})-?)?[1-9]\\d{6,7})(\\d)?"); + std::wsmatch match; + std::vector zone_phonenum; + std::string rep; + + while (std::regex_search (sentence, match, reg)) { + rep = ""; + zone_phonenum = absl::StrSplit(wstring2utf8string(match[0]), "-"); + for(int i = 0; i < zone_phonenum.size(); i ++) { + rep += SingleDigit2Text(zone_phonenum[i], true); + } + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 范围,例如:60~90 --> 六十到九十 +int TextNormalizer::ReRange(std::wstring &sentence) { + std::wregex reg(L"((-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+)))[-~]((-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+)))"); + std::wsmatch match; + std::string rep; + std::string sign1; + std::string sign2; + + while (std::regex_search (sentence, match, reg)) { + rep = ""; + match[2] == L"-" ? sign1 = "负" : sign1 = ""; + if(match[6] != L"") { + rep += sign1 + Digits2Text(match[6]) + "到"; + } else { + rep += sign1 + Digits2Text(match[3]) + "到"; + } + match[9] == L"-" ? sign2 = "负" : sign2 = ""; + if(match[13] != L"") { + rep += sign2 + Digits2Text(match[13]); + } else { + rep += sign2 + Digits2Text(match[10]); + } + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 带负号的整数,例如:-10 --> 负十 +int TextNormalizer::ReInterger(std::wstring &sentence) { + std::wregex reg(L"(-)(\\d+)"); + std::wsmatch match; + std::string rep; + while (std::regex_search (sentence, match, reg)) { + rep = "负" + MultiDigit2Text(match[2]); + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 纯小数 +int TextNormalizer::ReDecimalNum(std::wstring &sentence) { + std::wregex reg(L"(-?)((\\d+)(\\.\\d+))|(\\.(\\d+))"); + std::wsmatch match; + std::string sign; + std::string rep; + //std::vector integer_decimal; + while (std::regex_search (sentence, match, reg)) { + match[1] == L"-" ? sign = "负" : sign = ""; + if(match[5] != L"") { + rep = sign + Digits2Text(match[5]); + } else { + rep = sign + Digits2Text(match[2]); + } + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + + return 0; +} + +// 正整数 + 量词 +int TextNormalizer::RePositiveQuantifiers(std::wstring &sentence) { + std::wstring common_quantifiers = L"(朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲| \ + 墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂| \ + 课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘| \ + 毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日| \ + 季|刻|时|周|天|秒|分|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万| \ + 万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)"; + std::wregex reg(L"(\\d+)([多余几])?" + common_quantifiers); + std::wsmatch match; + std::string rep; + while (std::regex_search (sentence, match, reg)) { + rep = MultiDigit2Text(match[1]); + Replace(sentence, match.position(1), match.length(1), utf8string2wstring(rep)); + } + + return 0; +} + +// 编号类数字,例如: 89757 --> 八九七五七 +int TextNormalizer::ReDefalutNum(std::wstring &sentence) { + std::wregex reg(L"\\d{3}\\d*"); + std::wsmatch match; + while (std::regex_search (sentence, match, reg)) { + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(SingleDigit2Text(match[0]))); + } + + return 0; +} + +int TextNormalizer::ReNumber(std::wstring &sentence) { + std::wregex reg(L"(-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+))"); + std::wsmatch match; + std::string sign; + std::string rep; + while (std::regex_search (sentence, match, reg)) { + match[1] == L"-" ? sign = "负" : sign = ""; + if(match[5] != L"") { + rep = sign + Digits2Text(match[5]); + } else { + rep = sign + Digits2Text(match[2]); + } + + Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + } + return 0; +} + +// 整体正则,按顺序 +int TextNormalizer::SentenceNormalize(std::wstring &sentence) { + ReData(sentence); + ReData2(sentence); + ReTime(sentence); + ReTemperature(sentence); + ReFrac(sentence); + RePercentage(sentence); + ReMobilePhone(sentence); + RePhone(sentence); + ReRange(sentence); + ReInterger(sentence); + ReDecimalNum(sentence); + RePositiveQuantifiers(sentence); + ReDefalutNum(sentence); + ReNumber(sentence); + return 0; +} + + +} \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/text_normalize.h b/demos/TTSCppFrontend/src/front/text_normalize.h new file mode 100644 index 000000000..20d502b82 --- /dev/null +++ b/demos/TTSCppFrontend/src/front/text_normalize.h @@ -0,0 +1,62 @@ +#ifndef PADDLE_TTS_SERVING_FRONT_TEXT_NORMALIZE_H +#define PADDLE_TTS_SERVING_FRONT_TEXT_NORMALIZE_H + +#include +#include +#include +#include +#include +#include "absl/strings/str_split.h" +#include "absl/strings/strip.h" +#include "base/type_conv.h" + +namespace ppspeech { + +class TextNormalizer { +public: + TextNormalizer() { + InitMap(); + } + ~TextNormalizer() { + + } + + int InitMap(); + int Replace(std::wstring &sentence, const int &pos, const int &len, const std::wstring &repstr); + int SplitByPunc(const std::wstring &sentence, std::vector &sentence_part); + + std::string CreateTextValue(const std::string &num, bool use_zero=true); + std::string SingleDigit2Text(const std::string &num_str, bool alt_one = false); + std::string SingleDigit2Text(const std::wstring &num, bool alt_one = false); + std::string MultiDigit2Text(const std::string &num_str, bool alt_one = false, bool use_zero = true); + std::string MultiDigit2Text(const std::wstring &num, bool alt_one = false, bool use_zero = true); + std::string Digits2Text(const std::string &num_str); + std::string Digits2Text(const std::wstring &num); + + int ReData(std::wstring &sentence); + int ReData2(std::wstring &sentence); + int ReTime(std::wstring &sentence); + int ReTemperature(std::wstring &sentence); + int ReFrac(std::wstring &sentence); + int RePercentage(std::wstring &sentence); + int ReMobilePhone(std::wstring &sentence); + int RePhone(std::wstring &sentence); + int ReRange(std::wstring &sentence); + int ReInterger(std::wstring &sentence); + int ReDecimalNum(std::wstring &sentence); + int RePositiveQuantifiers(std::wstring &sentence); + int ReDefalutNum(std::wstring &sentence); + int ReNumber(std::wstring &sentence); + int SentenceNormalize(std::wstring &sentence); + + +private: + std::map digits_map; + std::map units_map; + + +}; + +} + +#endif \ No newline at end of file diff --git a/demos/TTSCppFrontend/third-party/CMakeLists.txt b/demos/TTSCppFrontend/third-party/CMakeLists.txt new file mode 100644 index 000000000..0579b8f24 --- /dev/null +++ b/demos/TTSCppFrontend/third-party/CMakeLists.txt @@ -0,0 +1,64 @@ +cmake_minimum_required(VERSION 3.10) +project(tts_third_party_libs) + +include(ExternalProject) + +# gflags +ExternalProject_Add(gflags + GIT_REPOSITORY https://github.com/gflags/gflags.git + GIT_TAG v2.2.2 + PREFIX ${CMAKE_CURRENT_BINARY_DIR} + INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX= + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DBUILD_STATIC_LIBS=OFF + -DBUILD_SHARED_LIBS=ON +) + +# glog +ExternalProject_Add( + glog + GIT_REPOSITORY https://github.com/google/glog.git + GIT_TAG v0.6.0 + PREFIX ${CMAKE_CURRENT_BINARY_DIR} + INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX= + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + DEPENDS gflags +) + +# abseil +ExternalProject_Add( + abseil + GIT_REPOSITORY https://github.com/abseil/abseil-cpp.git + GIT_TAG 20230125.1 + PREFIX ${CMAKE_CURRENT_BINARY_DIR} + INSTALL_DIR ${CMAKE_CURRENT_BINARY_DIR} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX= + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DABSL_PROPAGATE_CXX_STD=ON +) + +# cppjieba (header-only) +ExternalProject_Add( + cppjieba + GIT_REPOSITORY https://github.com/yanyiwu/cppjieba.git + GIT_TAG v5.0.3 + PREFIX ${CMAKE_CURRENT_BINARY_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +# limonp (header-only) +ExternalProject_Add( + limonp + GIT_REPOSITORY https://github.com/yanyiwu/limonp.git + GIT_TAG v0.6.6 + PREFIX ${CMAKE_CURRENT_BINARY_DIR} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) From 271112ca69a8a73500c4fec0f83cda53672f620b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 13 Mar 2023 21:12:45 +0800 Subject: [PATCH 12/37] fix vits reduce_sum's input/output dtype, test=tts (#3028) --- paddlespeech/t2s/models/vits/duration_predictor.py | 14 ++++++-------- paddlespeech/t2s/models/vits/generator.py | 13 ++++++++----- paddlespeech/t2s/models/vits/transform.py | 11 +++++++++-- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/paddlespeech/t2s/models/vits/duration_predictor.py b/paddlespeech/t2s/models/vits/duration_predictor.py index b0bb68d0f..12177fbc2 100644 --- a/paddlespeech/t2s/models/vits/duration_predictor.py +++ b/paddlespeech/t2s/models/vits/duration_predictor.py @@ -155,12 +155,10 @@ class StochasticDurationPredictor(nn.Layer): z_u, z1 = paddle.split(z_q, [1, 1], 1) u = F.sigmoid(z_u) * x_mask z0 = (w - u) * x_mask - logdet_tot_q += paddle.sum( - (F.log_sigmoid(z_u) + F.log_sigmoid(-z_u)) * x_mask, [1, 2]) - logq = (paddle.sum(-0.5 * - (math.log(2 * math.pi) + - (e_q**2)) * x_mask, [1, 2]) - logdet_tot_q) - + tmp1 = (F.log_sigmoid(z_u) + F.log_sigmoid(-z_u)) * x_mask + logdet_tot_q += paddle.sum(tmp1, [1, 2]) + tmp2 = -0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask + logq = (paddle.sum(tmp2, [1, 2]) - logdet_tot_q) logdet_tot = 0 z0, logdet = self.log_flow(z0, x_mask) logdet_tot += logdet @@ -168,8 +166,8 @@ class StochasticDurationPredictor(nn.Layer): for flow in self.flows: z, logdet = flow(z, x_mask, g=x, inverse=inverse) logdet_tot = logdet_tot + logdet - nll = (paddle.sum(0.5 * (math.log(2 * math.pi) + - (z**2)) * x_mask, [1, 2]) - logdet_tot) + tmp3 = 0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask + nll = (paddle.sum(tmp3, [1, 2]) - logdet_tot) # (B,) return nll + logq else: diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py index fbd2d6653..44bd78984 100644 --- a/paddlespeech/t2s/models/vits/generator.py +++ b/paddlespeech/t2s/models/vits/generator.py @@ -371,8 +371,9 @@ class VITSGenerator(nn.Layer): # (B, H, T_text) s_p_sq_r = paddle.exp(-2 * logs_p) # (B, 1, T_text) + tmp1 = -0.5 * math.log(2 * math.pi) - logs_p neg_x_ent_1 = paddle.sum( - -0.5 * math.log(2 * math.pi) - logs_p, + tmp1, [1], keepdim=True, ) # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) @@ -384,8 +385,9 @@ class VITSGenerator(nn.Layer): z_p.transpose([0, 2, 1]), (m_p * s_p_sq_r), ) # (B, 1, T_text) + tmp2 = -0.5 * (m_p**2) * s_p_sq_r neg_x_ent_4 = paddle.sum( - -0.5 * (m_p**2) * s_p_sq_r, + tmp2, [1], keepdim=True, ) # (B, T_feats, T_text) @@ -403,7 +405,6 @@ class VITSGenerator(nn.Layer): w = attn.sum(2) dur_nll = self.duration_predictor(x, x_mask, w=w, g=g) dur_nll = dur_nll / paddle.sum(x_mask) - # expand the length to match with the feature sequence # (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats) m_p = paddle.matmul(attn.squeeze(1), @@ -511,8 +512,9 @@ class VITSGenerator(nn.Layer): # (B, H, T_text) s_p_sq_r = paddle.exp(-2 * logs_p) # (B, 1, T_text) + tmp3 = -0.5 * math.log(2 * math.pi) - logs_p neg_x_ent_1 = paddle.sum( - -0.5 * math.log(2 * math.pi) - logs_p, + tmp3, [1], keepdim=True, ) # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) @@ -524,8 +526,9 @@ class VITSGenerator(nn.Layer): z_p.transpose([0, 2, 1]), (m_p * s_p_sq_r), ) # (B, 1, T_text) + tmp4 = -0.5 * (m_p**2) * s_p_sq_r neg_x_ent_4 = paddle.sum( - -0.5 * (m_p**2) * s_p_sq_r, + tmp4, [1], keepdim=True, ) # (B, T_feats, T_text) diff --git a/paddlespeech/t2s/models/vits/transform.py b/paddlespeech/t2s/models/vits/transform.py index 61bd5ee2b..0edc1d09d 100644 --- a/paddlespeech/t2s/models/vits/transform.py +++ b/paddlespeech/t2s/models/vits/transform.py @@ -61,8 +61,12 @@ def piecewise_rational_quadratic_transform( def mask_preprocess(x, mask): + # bins.dtype = int32 B, C, T, bins = paddle.shape(x) - new_x = paddle.zeros([mask.sum(), bins]) + mask_int = paddle.cast(mask, dtype='int64') + # paddle.sum 输入是 int32 或 bool 的时候,输出是 int64 + # paddle.zeros (fill_constant) 的 shape 会被强制转成 int32 类型 + new_x = paddle.zeros([paddle.sum(mask_int), bins]) for i in range(bins): new_x[:, i] = x[:, :, :, i][mask] return new_x @@ -240,4 +244,7 @@ def rational_quadratic_spline( def _searchsorted(bin_locations, inputs, eps=1e-6): bin_locations[..., -1] += eps - return paddle.sum(inputs[..., None] >= bin_locations, axis=-1) - 1 + mask = inputs[..., None] >= bin_locations + mask_int = paddle.cast(mask, 'int64') + out = paddle.sum(mask_int, axis=-1) - 1 + return out From f7567e479593ce3f23acfd91bfe4b309c603fb59 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 10:38:36 +0800 Subject: [PATCH 13/37] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c60db650..f71d0562a 100644 --- a/README.md +++ b/README.md @@ -179,7 +179,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision ### Recent Update - 👑 2023.03.09: Add [Wav2vec2ASR-zh](./examples/aishell/asr3). -- 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo](./demos/TTSArmLinux). +- 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo (with C++ Chinese Text Frontend)](./demos/TTSArmLinux). - 🔥 2023.03.03 Add Voice Conversion [StarGANv2-VC synthesize pipeline](./examples/vctk/vc3). - 🎉 2023.02.16: Add [Cantonese TTS](./examples/canton/tts3). - 🔥 2023.01.10: Add [code-switch asr CLI and Demos](./demos/speech_recognition). From 259f4936ee3fd9696488ba69e7e25d3a89a4ef3d Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 10:39:12 +0800 Subject: [PATCH 14/37] Update README_cn.md --- README_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_cn.md b/README_cn.md index 29ee387c0..5771d766b 100644 --- a/README_cn.md +++ b/README_cn.md @@ -184,7 +184,7 @@ ### 近期更新 - 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3)。 -- 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例](./demos/TTSArmLinux)。 +- 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例 (包含 C++ 中文文本前端模块)](./demos/TTSArmLinux)。 - 🔥 2023.03.03: 新增声音转换模型 [StarGANv2-VC 合成流程](./examples/vctk/vc3)。 - 🎉 2023.02.16: 新增[粤语语音合成](./examples/canton/tts3)。 - 🔥 2023.01.10: 新增[中英混合 ASR CLI 和 Demos](./demos/speech_recognition)。 From 1aa7495dabb439f62d89a72163b5d19fa5f4c290 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 14:20:47 +0800 Subject: [PATCH 15/37] [TTS]Add license and reformat for TTSCppFrontend (#3030) --- demos/TTSArmLinux/src/Predictor.hpp | 160 +-- demos/TTSArmLinux/src/main.cc | 86 +- demos/TTSCppFrontend/README.md | 1 + .../TTSCppFrontend/front_demo/front_demo.cpp | 54 +- .../gentools/gen_dict_paddlespeech.py | 76 +- .../front_demo/gentools/genid.py | 17 +- .../front_demo/gentools/word2phones.py | 28 +- demos/TTSCppFrontend/src/base/type_conv.cpp | 30 +- demos/TTSCppFrontend/src/base/type_conv.h | 23 +- .../src/front/front_interface.cpp | 975 +++++++++++------- .../src/front/front_interface.h | 276 ++--- .../src/front/text_normalize.cpp | 356 ++++--- .../TTSCppFrontend/src/front/text_normalize.h | 89 +- 13 files changed, 1323 insertions(+), 848 deletions(-) diff --git a/demos/TTSArmLinux/src/Predictor.hpp b/demos/TTSArmLinux/src/Predictor.hpp index 985d01158..f173abb5c 100644 --- a/demos/TTSArmLinux/src/Predictor.hpp +++ b/demos/TTSArmLinux/src/Predictor.hpp @@ -1,7 +1,20 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include #include -#include #include +#include #include #include #include @@ -10,24 +23,28 @@ using namespace paddle::lite_api; class PredictorInterface { -public: + public: virtual ~PredictorInterface() = 0; - virtual bool Init( - const std::string &AcousticModelPath, - const std::string &VocoderPath, - PowerMode cpuPowerMode, - int cpuThreadNum, - // WAV采样率(必须与模型输出匹配) - // 如果播放速度和音调异常,请修改采样率 - // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 - uint32_t wavSampleRate - ) = 0; - virtual std::shared_ptr LoadModel(const std::string &modelPath, int cpuThreadNum, PowerMode cpuPowerMode) = 0; + virtual bool Init(const std::string &AcousticModelPath, + const std::string &VocoderPath, + PowerMode cpuPowerMode, + int cpuThreadNum, + // WAV采样率(必须与模型输出匹配) + // 如果播放速度和音调异常,请修改采样率 + // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 + uint32_t wavSampleRate) = 0; + virtual std::shared_ptr LoadModel( + const std::string &modelPath, + int cpuThreadNum, + PowerMode cpuPowerMode) = 0; virtual void ReleaseModel() = 0; virtual bool RunModel(const std::vector &phones) = 0; - virtual std::unique_ptr GetAcousticModelOutput(const std::vector &phones) = 0; - virtual std::unique_ptr GetVocoderOutput(std::unique_ptr &&amOutput) = 0; - virtual void VocoderOutputToWav(std::unique_ptr &&vocOutput) = 0; + virtual std::unique_ptr GetAcousticModelOutput( + const std::vector &phones) = 0; + virtual std::unique_ptr GetVocoderOutput( + std::unique_ptr &&amOutput) = 0; + virtual void VocoderOutputToWav( + std::unique_ptr &&vocOutput) = 0; virtual void SaveFloatWav(float *floatWav, int64_t size) = 0; virtual bool IsLoaded() = 0; virtual float GetInferenceTime() = 0; @@ -45,23 +62,22 @@ PredictorInterface::~PredictorInterface() {} // WavDataType: WAV数据类型 // 可在 int16_t 和 float 之间切换, // 用于生成 16-bit PCM 或 32-bit IEEE float 格式的 WAV -template +template class Predictor : public PredictorInterface { -public: - virtual bool Init( - const std::string &AcousticModelPath, - const std::string &VocoderPath, - PowerMode cpuPowerMode, - int cpuThreadNum, - // WAV采样率(必须与模型输出匹配) - // 如果播放速度和音调异常,请修改采样率 - // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 - uint32_t wavSampleRate - ) override { + public: + bool Init(const std::string &AcousticModelPath, + const std::string &VocoderPath, + PowerMode cpuPowerMode, + int cpuThreadNum, + // WAV采样率(必须与模型输出匹配) + // 如果播放速度和音调异常,请修改采样率 + // 常见采样率:16000, 24000, 32000, 44100, 48000, 96000 + uint32_t wavSampleRate) override { // Release model if exists ReleaseModel(); - acoustic_model_predictor_ = LoadModel(AcousticModelPath, cpuThreadNum, cpuPowerMode); + acoustic_model_predictor_ = + LoadModel(AcousticModelPath, cpuThreadNum, cpuPowerMode); if (acoustic_model_predictor_ == nullptr) { return false; } @@ -80,7 +96,10 @@ public: ReleaseWav(); } - virtual std::shared_ptr LoadModel(const std::string &modelPath, int cpuThreadNum, PowerMode cpuPowerMode) override { + std::shared_ptr LoadModel( + const std::string &modelPath, + int cpuThreadNum, + PowerMode cpuPowerMode) override { if (modelPath.empty()) { return nullptr; } @@ -94,12 +113,12 @@ public: return CreatePaddlePredictor(config); } - virtual void ReleaseModel() override { + void ReleaseModel() override { acoustic_model_predictor_ = nullptr; vocoder_predictor_ = nullptr; } - virtual bool RunModel(const std::vector &phones) override { + bool RunModel(const std::vector &phones) override { if (!IsLoaded()) { return false; } @@ -115,12 +134,13 @@ public: // 计算用时 std::chrono::duration duration = end - start; - inference_time_ = duration.count() * 1000; // 单位:毫秒 + inference_time_ = duration.count() * 1000; // 单位:毫秒 return true; } - virtual std::unique_ptr GetAcousticModelOutput(const std::vector &phones) override { + std::unique_ptr GetAcousticModelOutput( + const std::vector &phones) override { auto phones_handle = acoustic_model_predictor_->GetInput(0); phones_handle->Resize({static_cast(phones.size())}); phones_handle->CopyFromCpu(phones.data()); @@ -139,7 +159,8 @@ public: return am_output_handle; } - virtual std::unique_ptr GetVocoderOutput(std::unique_ptr &&amOutput) override { + std::unique_ptr GetVocoderOutput( + std::unique_ptr &&amOutput) override { auto mel_handle = vocoder_predictor_->GetInput(0); // [?, 80] auto dims = amOutput->shape(); @@ -161,7 +182,8 @@ public: return voc_output_handle; } - virtual void VocoderOutputToWav(std::unique_ptr &&vocOutput) override { + void VocoderOutputToWav( + std::unique_ptr &&vocOutput) override { // 获取输出Tensor的数据 int64_t output_size = 1; for (auto dim : vocOutput->shape()) { @@ -172,39 +194,31 @@ public: SaveFloatWav(output_data, output_size); } - virtual void SaveFloatWav(float *floatWav, int64_t size) override; + void SaveFloatWav(float *floatWav, int64_t size) override; - virtual bool IsLoaded() override { - return acoustic_model_predictor_ != nullptr && vocoder_predictor_ != nullptr; + bool IsLoaded() override { + return acoustic_model_predictor_ != nullptr && + vocoder_predictor_ != nullptr; } - virtual float GetInferenceTime() override { - return inference_time_; - } + float GetInferenceTime() override { return inference_time_; } - const std::vector & GetWav() { - return wav_; - } + const std::vector &GetWav() { return wav_; } - virtual int GetWavSize() override { - return wav_.size() * sizeof(WavDataType); - } + int GetWavSize() override { return wav_.size() * sizeof(WavDataType); } // 获取WAV持续时间(单位:毫秒) - virtual float GetWavDuration() override { - return static_cast(GetWavSize()) / sizeof(WavDataType) / static_cast(wav_sample_rate_) * 1000; + float GetWavDuration() override { + return static_cast(GetWavSize()) / sizeof(WavDataType) / + static_cast(wav_sample_rate_) * 1000; } // 获取RTF(合成时间 / 音频时长) - virtual float GetRTF() override { - return GetInferenceTime() / GetWavDuration(); - } + float GetRTF() override { return GetInferenceTime() / GetWavDuration(); } - virtual void ReleaseWav() override { - wav_.clear(); - } + void ReleaseWav() override { wav_.clear(); } - virtual bool WriteWavToFile(const std::string &wavPath) override { + bool WriteWavToFile(const std::string &wavPath) override { std::ofstream fout(wavPath, std::ios::binary); if (!fout.is_open()) { return false; @@ -216,18 +230,20 @@ public: header.data_size = GetWavSize(); header.size = sizeof(header) - 8 + header.data_size; header.sample_rate = wav_sample_rate_; - header.byte_rate = header.sample_rate * header.num_channels * header.bits_per_sample / 8; + header.byte_rate = header.sample_rate * header.num_channels * + header.bits_per_sample / 8; header.block_align = header.num_channels * header.bits_per_sample / 8; - fout.write(reinterpret_cast(&header), sizeof(header)); + fout.write(reinterpret_cast(&header), sizeof(header)); // 写入wav数据 - fout.write(reinterpret_cast(wav_.data()), header.data_size); + fout.write(reinterpret_cast(wav_.data()), + header.data_size); fout.close(); return true; } -protected: + protected: struct WavHeader { // RIFF 头 char riff[4] = {'R', 'I', 'F', 'F'}; @@ -250,19 +266,17 @@ protected: }; enum WavAudioFormat { - WAV_FORMAT_16BIT_PCM = 1, // 16-bit PCM 格式 + WAV_FORMAT_16BIT_PCM = 1, // 16-bit PCM 格式 WAV_FORMAT_32BIT_FLOAT = 3 // 32-bit IEEE float 格式 }; -protected: + protected: // 返回值通过模板特化由 WavDataType 决定 inline uint16_t GetWavAudioFormat(); - inline float Abs(float number) { - return (number < 0) ? -number : number; - } + inline float Abs(float number) { return (number < 0) ? -number : number; } -protected: + protected: float inference_time_ = 0; uint32_t wav_sample_rate_ = 0; std::vector wav_; @@ -270,36 +284,36 @@ protected: std::shared_ptr vocoder_predictor_ = nullptr; }; -template<> +template <> uint16_t Predictor::GetWavAudioFormat() { return Predictor::WAV_FORMAT_16BIT_PCM; } -template<> +template <> uint16_t Predictor::GetWavAudioFormat() { return Predictor::WAV_FORMAT_32BIT_FLOAT; } // 保存 16-bit PCM 格式 WAV -template<> +template <> void Predictor::SaveFloatWav(float *floatWav, int64_t size) { wav_.resize(size); float maxSample = 0.01; // 寻找最大采样值 - for (int64_t i=0; i maxSample) { maxSample = sample; } } // 把采样值缩放到 int_16 范围 - for (int64_t i=0; i +template <> void Predictor::SaveFloatWav(float *floatWav, int64_t size) { wav_.resize(size); std::copy_n(floatWav, size, wav_.data()); diff --git a/demos/TTSArmLinux/src/main.cc b/demos/TTSArmLinux/src/main.cc index f3bd0f7b0..0b8e26bc4 100644 --- a/demos/TTSArmLinux/src/main.cc +++ b/demos/TTSArmLinux/src/main.cc @@ -1,23 +1,48 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include #include #include +#include #include #include -#include -#include -#include -#include -#include #include "Predictor.hpp" using namespace paddle::lite_api; -DEFINE_string(sentence, "你好,欢迎使用语音合成服务", "Text to be synthesized (Chinese only. English will crash the program.)"); +DEFINE_string( + sentence, + "你好,欢迎使用语音合成服务", + "Text to be synthesized (Chinese only. English will crash the program.)"); DEFINE_string(front_conf, "./front.conf", "Front configuration file"); -DEFINE_string(acoustic_model, "./models/cpu/fastspeech2_csmsc_arm.nb", "Acoustic model .nb file"); -DEFINE_string(vocoder, "./models/cpu/fastspeech2_csmsc_arm.nb", "vocoder .nb file"); +DEFINE_string(acoustic_model, + "./models/cpu/fastspeech2_csmsc_arm.nb", + "Acoustic model .nb file"); +DEFINE_string(vocoder, + "./models/cpu/fastspeech2_csmsc_arm.nb", + "vocoder .nb file"); DEFINE_string(output_wav, "./output/tts.wav", "Output WAV file"); -DEFINE_string(wav_bit_depth, "16", "WAV bit depth, 16 (16-bit PCM) or 32 (32-bit IEEE float)"); -DEFINE_string(wav_sample_rate, "24000", "WAV sample rate, should match the output of the vocoder"); +DEFINE_string(wav_bit_depth, + "16", + "WAV bit depth, 16 (16-bit PCM) or 32 (32-bit IEEE float)"); +DEFINE_string(wav_sample_rate, + "24000", + "WAV sample rate, should match the output of the vocoder"); DEFINE_string(cpu_thread, "1", "CPU thread numbers"); int main(int argc, char *argv[]) { @@ -53,7 +78,7 @@ int main(int argc, char *argv[]) { // 繁体转简体 std::wstring sentence_simp; - front_inst->Trand2Simp(ws_sentence, sentence_simp); + front_inst->Trand2Simp(ws_sentence, &sentence_simp); ws_sentence = sentence_simp; std::string s_sentence; @@ -63,28 +88,30 @@ int main(int argc, char *argv[]) { // 根据标点进行分句 LOG(INFO) << "Start to segment sentences by punctuation"; - front_inst->SplitByPunc(ws_sentence, sentence_part); + front_inst->SplitByPunc(ws_sentence, &sentence_part); LOG(INFO) << "Segment sentences through punctuation successfully"; // 分句后获取音素id - LOG(INFO) << "Start to get the phoneme and tone id sequence of each sentence"; - for(int i = 0; i < sentence_part.size(); i++) { - - LOG(INFO) << "Raw sentence is: " << ppspeech::wstring2utf8string(sentence_part[i]); - front_inst->SentenceNormalize(sentence_part[i]); + LOG(INFO) + << "Start to get the phoneme and tone id sequence of each sentence"; + for (int i = 0; i < sentence_part.size(); i++) { + LOG(INFO) << "Raw sentence is: " + << ppspeech::wstring2utf8string(sentence_part[i]); + front_inst->SentenceNormalize(&sentence_part[i]); s_sentence = ppspeech::wstring2utf8string(sentence_part[i]); LOG(INFO) << "After normalization sentence is: " << s_sentence; - - if (0 != front_inst->GetSentenceIds(s_sentence, phoneids, toneids)) { + + if (0 != front_inst->GetSentenceIds(s_sentence, &phoneids, &toneids)) { LOG(ERROR) << "TTS inst get sentence phoneids and toneids failed"; return -1; } - } - LOG(INFO) << "The phoneids of the sentence is: " << limonp::Join(phoneids.begin(), phoneids.end(), " "); - LOG(INFO) << "The toneids of the sentence is: " << limonp::Join(toneids.begin(), toneids.end(), " "); + LOG(INFO) << "The phoneids of the sentence is: " + << limonp::Join(phoneids.begin(), phoneids.end(), " "); + LOG(INFO) << "The toneids of the sentence is: " + << limonp::Join(toneids.begin(), toneids.end(), " "); LOG(INFO) << "Get the phoneme id sequence of each sentence successfully"; - + /////////////////////////// 后端:音素转音频 /////////////////////////// @@ -99,13 +126,19 @@ int main(int argc, char *argv[]) { // CPU电源模式 const PowerMode cpuPowerMode = PowerMode::LITE_POWER_HIGH; - if (!predictor->Init(FLAGS_acoustic_model, FLAGS_vocoder, cpuPowerMode, cpuThreadNum, wavSampleRate)) { + if (!predictor->Init(FLAGS_acoustic_model, + FLAGS_vocoder, + cpuPowerMode, + cpuThreadNum, + wavSampleRate)) { LOG(ERROR) << "predictor init failed" << std::endl; return -1; } std::vector phones(phoneids.size()); - std::transform(phoneids.begin(), phoneids.end(), phones.begin(), [](int x) { return static_cast(x); }); + std::transform(phoneids.begin(), phoneids.end(), phones.begin(), [](int x) { + return static_cast(x); + }); if (!predictor->RunModel(phones)) { LOG(ERROR) << "predictor run model failed" << std::endl; @@ -113,7 +146,8 @@ int main(int argc, char *argv[]) { } LOG(INFO) << "Inference time: " << predictor->GetInferenceTime() << " ms, " - << "WAV size (without header): " << predictor->GetWavSize() << " bytes, " + << "WAV size (without header): " << predictor->GetWavSize() + << " bytes, " << "WAV duration: " << predictor->GetWavDuration() << " ms, " << "RTF: " << predictor->GetRTF() << std::endl; diff --git a/demos/TTSCppFrontend/README.md b/demos/TTSCppFrontend/README.md index 592140ae1..552858de3 100644 --- a/demos/TTSCppFrontend/README.md +++ b/demos/TTSCppFrontend/README.md @@ -38,6 +38,7 @@ If the download speed is too slow, you can open [third-party/CMakeLists.txt](thi ``` ## Run +You can change `--phone2id_path` in `./front_demo/front.conf` to the `phone_id_map.txt` of your own acoustic model. ``` ./run_front_demo.sh diff --git a/demos/TTSCppFrontend/front_demo/front_demo.cpp b/demos/TTSCppFrontend/front_demo/front_demo.cpp index e943fd6f7..19f16758b 100644 --- a/demos/TTSCppFrontend/front_demo/front_demo.cpp +++ b/demos/TTSCppFrontend/front_demo/front_demo.cpp @@ -1,19 +1,32 @@ -#include -//#include "utils/dir_utils.h" -#include "front/front_interface.h" -#include +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #include +#include #include +#include +#include "front/front_interface.h" DEFINE_string(sentence, "你好,欢迎使用语音合成服务", "Text to be synthesized"); DEFINE_string(front_conf, "./front_demo/front.conf", "Front conf file"); -//DEFINE_string(seperate_tone, "true", "If true, get phoneids and tonesid"); +// DEFINE_string(seperate_tone, "true", "If true, get phoneids and tonesid"); int main(int argc, char** argv) { gflags::ParseCommandLineFlags(&argc, &argv, true); // 实例化文本前端引擎 - ppspeech::FrontEngineInterface *front_inst = nullptr; + ppspeech::FrontEngineInterface* front_inst = nullptr; front_inst = new ppspeech::FrontEngineInterface(FLAGS_front_conf); if ((!front_inst) || (front_inst->init())) { LOG(ERROR) << "Creater tts engine failed!"; @@ -28,7 +41,7 @@ int main(int argc, char** argv) { // 繁体转简体 std::wstring sentence_simp; - front_inst->Trand2Simp(ws_sentence, sentence_simp); + front_inst->Trand2Simp(ws_sentence, &sentence_simp); ws_sentence = sentence_simp; std::string s_sentence; @@ -38,28 +51,29 @@ int main(int argc, char** argv) { // 根据标点进行分句 LOG(INFO) << "Start to segment sentences by punctuation"; - front_inst->SplitByPunc(ws_sentence, sentence_part); + front_inst->SplitByPunc(ws_sentence, &sentence_part); LOG(INFO) << "Segment sentences through punctuation successfully"; // 分句后获取音素id - LOG(INFO) << "Start to get the phoneme and tone id sequence of each sentence"; - for(int i = 0; i < sentence_part.size(); i++) { - - LOG(INFO) << "Raw sentence is: " << ppspeech::wstring2utf8string(sentence_part[i]); - front_inst->SentenceNormalize(sentence_part[i]); + LOG(INFO) + << "Start to get the phoneme and tone id sequence of each sentence"; + for (int i = 0; i < sentence_part.size(); i++) { + LOG(INFO) << "Raw sentence is: " + << ppspeech::wstring2utf8string(sentence_part[i]); + front_inst->SentenceNormalize(&sentence_part[i]); s_sentence = ppspeech::wstring2utf8string(sentence_part[i]); LOG(INFO) << "After normalization sentence is: " << s_sentence; - - if (0 != front_inst->GetSentenceIds(s_sentence, phoneids, toneids)) { + + if (0 != front_inst->GetSentenceIds(s_sentence, &phoneids, &toneids)) { LOG(ERROR) << "TTS inst get sentence phoneids and toneids failed"; return -1; } - } - LOG(INFO) << "The phoneids of the sentence is: " << limonp::Join(phoneids.begin(), phoneids.end(), " "); - LOG(INFO) << "The toneids of the sentence is: " << limonp::Join(toneids.begin(), toneids.end(), " "); + LOG(INFO) << "The phoneids of the sentence is: " + << limonp::Join(phoneids.begin(), phoneids.end(), " "); + LOG(INFO) << "The toneids of the sentence is: " + << limonp::Join(toneids.begin(), toneids.end(), " "); LOG(INFO) << "Get the phoneme id sequence of each sentence successfully"; - + return EXIT_SUCCESS; } - diff --git a/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py b/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py index e9a2c96f6..5aaa6e345 100644 --- a/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py +++ b/demos/TTSCppFrontend/front_demo/gentools/gen_dict_paddlespeech.py @@ -1,19 +1,28 @@ -# !/usr/bin/env python3 -# -*- coding: utf-8 -*- -######################################################################## +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # -# Copyright 2021 liangyunming(liangyunming@baidu.com) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# Execute the script when PaddleSpeech has been installed -# PaddleSpeech: https://github.com/PaddlePaddle/PaddleSpeech - -######################################################################## - +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import argparse import configparser + from paddlespeech.t2s.frontend.zh_frontend import Frontend -def get_phone(frontend, word, merge_sentences=True, print_info=False, robot=False, get_tone_ids=False): + +def get_phone(frontend, + word, + merge_sentences=True, + print_info=False, + robot=False, + get_tone_ids=False): phonemes = frontend.get_phonemes(word, merge_sentences, print_info, robot) # Some optimizations phones, tones = frontend._get_phone_tone(phonemes[0], get_tone_ids) @@ -22,7 +31,10 @@ def get_phone(frontend, word, merge_sentences=True, print_info=False, robot=Fals return phones, tones -def gen_word2phone_dict(frontend, jieba_words_dict, word2phone_dict, get_tone=False): +def gen_word2phone_dict(frontend, + jieba_words_dict, + word2phone_dict, + get_tone=False): with open(jieba_words_dict, "r") as f1, open(word2phone_dict, "w+") as f2: for line in f1.readlines(): word = line.split(" ")[0] @@ -30,9 +42,9 @@ def gen_word2phone_dict(frontend, jieba_words_dict, word2phone_dict, get_tone=Fa phone_str = "" if tone: - assert(len(phone) == len(tone)) + assert (len(phone) == len(tone)) for i in range(len(tone)): - phone_tone = phone[i] + tone[i] + phone_tone = phone[i] + tone[i] phone_str += (" " + phone_tone) phone_str = phone_str.strip("sp0").strip(" ") else: @@ -45,43 +57,55 @@ def gen_word2phone_dict(frontend, jieba_words_dict, word2phone_dict, get_tone=Fa def main(): - parser = argparse.ArgumentParser( - description="Generate dictionary") + parser = argparse.ArgumentParser(description="Generate dictionary") parser.add_argument( "--config", type=str, default="./config.ini", help="config file.") parser.add_argument( - "--am_type", type=str, default="fastspeech2", help="fastspeech2 or speedyspeech") + "--am_type", + type=str, + default="fastspeech2", + help="fastspeech2 or speedyspeech") args = parser.parse_args() # Read config cf = configparser.ConfigParser() cf.read(args.config) - jieba_words_dict_file = cf.get("jieba", "jieba_words_dict") # get words dict + jieba_words_dict_file = cf.get("jieba", + "jieba_words_dict") # get words dict am_type = args.am_type - if(am_type == "fastspeech2"): + if (am_type == "fastspeech2"): phone2id_dict_file = cf.get(am_type, "phone2id_dict") word2phone_dict_file = cf.get(am_type, "word2phone_dict") frontend = Frontend(phone_vocab_path=phone2id_dict_file) print("frontend done!") - gen_word2phone_dict(frontend, jieba_words_dict_file, word2phone_dict_file, get_tone=False) - - elif(am_type == "speedyspeech"): + gen_word2phone_dict( + frontend, + jieba_words_dict_file, + word2phone_dict_file, + get_tone=False) + + elif (am_type == "speedyspeech"): phone2id_dict_file = cf.get(am_type, "phone2id_dict") tone2id_dict_file = cf.get(am_type, "tone2id_dict") word2phone_dict_file = cf.get(am_type, "word2phone_dict") - frontend = Frontend(phone_vocab_path=phone2id_dict_file, tone_vocab_path=tone2id_dict_file) + frontend = Frontend( + phone_vocab_path=phone2id_dict_file, + tone_vocab_path=tone2id_dict_file) print("frontend done!") - gen_word2phone_dict(frontend, jieba_words_dict_file, word2phone_dict_file, get_tone=True) - + gen_word2phone_dict( + frontend, + jieba_words_dict_file, + word2phone_dict_file, + get_tone=True) else: print("Please set correct am type, fastspeech2 or speedyspeech.") - - + + if __name__ == "__main__": main() diff --git a/demos/TTSCppFrontend/front_demo/gentools/genid.py b/demos/TTSCppFrontend/front_demo/gentools/genid.py index e2866bb0e..cf83623f0 100644 --- a/demos/TTSCppFrontend/front_demo/gentools/genid.py +++ b/demos/TTSCppFrontend/front_demo/gentools/genid.py @@ -1,10 +1,23 @@ -#from parakeet.frontend.vocab import Vocab +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. PHONESFILE = "./dict/phones.txt" PHONES_ID_FILE = "./dict/phonesid.dict" TONESFILE = "./dict/tones.txt" TONES_ID_FILE = "./dict/tonesid.dict" + def GenIdFile(file, idfile): id = 2 with open(file, 'r') as f1, open(idfile, "w+") as f2: @@ -16,7 +29,7 @@ def GenIdFile(file, idfile): f2.write(phone + " " + str(id) + "\n") id += 1 + if __name__ == "__main__": GenIdFile(PHONESFILE, PHONES_ID_FILE) GenIdFile(TONESFILE, TONES_ID_FILE) - diff --git a/demos/TTSCppFrontend/front_demo/gentools/word2phones.py b/demos/TTSCppFrontend/front_demo/gentools/word2phones.py index 6a1822023..8726ee89c 100644 --- a/demos/TTSCppFrontend/front_demo/gentools/word2phones.py +++ b/demos/TTSCppFrontend/front_demo/gentools/word2phones.py @@ -1,9 +1,25 @@ -from pypinyin import lazy_pinyin, Style +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import re +from pypinyin import lazy_pinyin +from pypinyin import Style + worddict = "./dict/jieba_part.dict.utf8" newdict = "./dict/word_phones.dict" + def GenPhones(initials, finals, seperate=True): phones = [] @@ -14,9 +30,9 @@ def GenPhones(initials, finals, seperate=True): elif c in ['zh', 'ch', 'sh', 'r']: v = re.sub('i', 'iii', v) if c: - if seperate == True: + if seperate is True: phones.append(c + '0') - elif seperate == False: + elif seperate is False: phones.append(c) else: print("Not sure whether phone and tone need to be separated") @@ -28,8 +44,10 @@ def GenPhones(initials, finals, seperate=True): with open(worddict, "r") as f1, open(newdict, "w+") as f2: for line in f1.readlines(): word = line.split(" ")[0] - initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS) - finals = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) + initials = lazy_pinyin( + word, neutral_tone_with_five=True, style=Style.INITIALS) + finals = lazy_pinyin( + word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) phones = GenPhones(initials, finals, True) diff --git a/demos/TTSCppFrontend/src/base/type_conv.cpp b/demos/TTSCppFrontend/src/base/type_conv.cpp index 5d5de43c5..b7ff63642 100644 --- a/demos/TTSCppFrontend/src/base/type_conv.cpp +++ b/demos/TTSCppFrontend/src/base/type_conv.cpp @@ -1,18 +1,28 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "base/type_conv.h" namespace ppspeech { // wstring to string -std::string wstring2utf8string(const std::wstring& str) -{ - static std::wstring_convert > strCnv; +std::string wstring2utf8string(const std::wstring& str) { + static std::wstring_convert> strCnv; return strCnv.to_bytes(str); } - -// string to wstring -std::wstring utf8string2wstring(const std::string& str) -{ - static std::wstring_convert< std::codecvt_utf8 > strCnv; - return strCnv.from_bytes(str); -} +// string to wstring +std::wstring utf8string2wstring(const std::string& str) { + static std::wstring_convert> strCnv; + return strCnv.from_bytes(str); } +} // namespace ppspeech diff --git a/demos/TTSCppFrontend/src/base/type_conv.h b/demos/TTSCppFrontend/src/base/type_conv.h index 9acb7a6d2..6aecfc438 100644 --- a/demos/TTSCppFrontend/src/base/type_conv.h +++ b/demos/TTSCppFrontend/src/base/type_conv.h @@ -1,18 +1,31 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #ifndef BASE_TYPE_CONVC_H #define BASE_TYPE_CONVC_H -#include -#include #include +#include +#include namespace ppspeech { // wstring to string std::string wstring2utf8string(const std::wstring& str); - -// string to wstring -std::wstring utf8string2wstring(const std::string& str); +// string to wstring +std::wstring utf8string2wstring(const std::string& str); } #endif // BASE_TYPE_CONVC_H \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/front_interface.cpp b/demos/TTSCppFrontend/src/front/front_interface.cpp index 5b828ac1b..8bd466d28 100644 --- a/demos/TTSCppFrontend/src/front/front_interface.cpp +++ b/demos/TTSCppFrontend/src/front/front_interface.cpp @@ -1,3 +1,16 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "front/front_interface.h" namespace ppspeech { @@ -5,96 +18,123 @@ namespace ppspeech { int FrontEngineInterface::init() { if (_initialed) { return 0; - } + } if (0 != ReadConfFile()) { LOG(ERROR) << "Read front conf file failed"; return -1; } - _jieba = new cppjieba::Jieba(_jieba_dict_path, _jieba_hmm_path, _jieba_user_dict_path, - _jieba_idf_path, _jieba_stop_word_path); - - _punc = {",", "。", "、", "?", ":", ";", "~", "!", - ",", ".", "?", "!", ":", ";", "/", "\\"}; - _punc_omit = {"“", "”", "\"", "\""}; + _jieba = new cppjieba::Jieba(_jieba_dict_path, + _jieba_hmm_path, + _jieba_user_dict_path, + _jieba_idf_path, + _jieba_stop_word_path); + + _punc = {",", + "。", + "、", + "?", + ":", + ";", + "~", + "!", + ",", + ".", + "?", + "!", + ":", + ";", + "/", + "\\"}; + _punc_omit = {"“", "”", "\"", "\""}; // 需要儿化音处理的词语 - must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"}; - not_erhua = { - "虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿", - "拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿", "脑瘫儿", - "流浪儿", "体弱儿", "混血儿", "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿", "侄儿", - "孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿", - "狗儿" - }; - - must_not_neural_tone_words = {"男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子"}; + must_erhua = { + "小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"}; + not_erhua = {"虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", + "有儿", "一儿", "我儿", "俺儿", "妻儿", "拐儿", + "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", + "婴幼儿", "连体儿", "脑瘫儿", "流浪儿", "体弱儿", "混血儿", + "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿", + "侄儿", "孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", + "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿", + "狗儿"}; + + must_not_neural_tone_words = { + "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子"}; // 需要轻声处理的词语 must_neural_tone_words = { - "麻烦", "麻利", "鸳鸯", "高粱", "骨头", "骆驼", "马虎", "首饰", "馒头", "馄饨", "风筝", - "难为", "队伍", "阔气", "闺女", "门道", "锄头", "铺盖", "铃铛", "铁匠", "钥匙", "里脊", - "里头", "部分", "那么", "道士", "造化", "迷糊", "连累", "这么", "这个", "运气", "过去", - "软和", "转悠", "踏实", "跳蚤", "跟头", "趔趄", "财主", "豆腐", "讲究", "记性", "记号", - "认识", "规矩", "见识", "裁缝", "补丁", "衣裳", "衣服", "衙门", "街坊", "行李", "行当", - "蛤蟆", "蘑菇", "薄荷", "葫芦", "葡萄", "萝卜", "荸荠", "苗条", "苗头", "苍蝇", "芝麻", - "舒服", "舒坦", "舌头", "自在", "膏药", "脾气", "脑袋", "脊梁", "能耐", "胳膊", "胭脂", - "胡萝", "胡琴", "胡同", "聪明", "耽误", "耽搁", "耷拉", "耳朵", "老爷", "老实", "老婆", - "老头", "老太", "翻腾", "罗嗦", "罐头", "编辑", "结实", "红火", "累赘", "糨糊", "糊涂", - "精神", "粮食", "簸箕", "篱笆", "算计", "算盘", "答应", "笤帚", "笑语", "笑话", "窟窿", - "窝囊", "窗户", "稳当", "稀罕", "称呼", "秧歌", "秀气", "秀才", "福气", "祖宗", "砚台", - "码头", "石榴", "石头", "石匠", "知识", "眼睛", "眯缝", "眨巴", "眉毛", "相声", "盘算", - "白净", "痢疾", "痛快", "疟疾", "疙瘩", "疏忽", "畜生", "生意", "甘蔗", "琵琶", "琢磨", - "琉璃", "玻璃", "玫瑰", "玄乎", "狐狸", "状元", "特务", "牲口", "牙碜", "牌楼", "爽快", - "爱人", "热闹", "烧饼", "烟筒", "烂糊", "点心", "炊帚", "灯笼", "火候", "漂亮", "滑溜", - "溜达", "温和", "清楚", "消息", "浪头", "活泼", "比方", "正经", "欺负", "模糊", "槟榔", - "棺材", "棒槌", "棉花", "核桃", "栅栏", "柴火", "架势", "枕头", "枇杷", "机灵", "本事", - "木头", "木匠", "朋友", "月饼", "月亮", "暖和", "明白", "时候", "新鲜", "故事", "收拾", - "收成", "提防", "挖苦", "挑剔", "指甲", "指头", "拾掇", "拳头", "拨弄", "招牌", "招呼", - "抬举", "护士", "折腾", "扫帚", "打量", "打算", "打点", "打扮", "打听", "打发", "扎实", - "扁担", "戒指", "懒得", "意识", "意思", "情形", "悟性", "怪物", "思量", "怎么", "念头", - "念叨", "快活", "忙活", "志气", "心思", "得罪", "张罗", "弟兄", "开通", "应酬", "庄稼", - "干事", "帮手", "帐篷", "希罕", "师父", "师傅", "巴结", "巴掌", "差事", "工夫", "岁数", - "屁股", "尾巴", "少爷", "小气", "小伙", "将就", "对头", "对付", "寡妇", "家伙", "客气", - "实在", "官司", "学问", "学生", "字号", "嫁妆", "媳妇", "媒人", "婆家", "娘家", "委屈", - "姑娘", "姐夫", "妯娌", "妥当", "妖精", "奴才", "女婿", "头发", "太阳", "大爷", "大方", - "大意", "大夫", "多少", "多么", "外甥", "壮实", "地道", "地方", "在乎", "困难", "嘴巴", - "嘱咐", "嘟囔", "嘀咕", "喜欢", "喇嘛", "喇叭", "商量", "唾沫", "哑巴", "哈欠", "哆嗦", - "咳嗽", "和尚", "告诉", "告示", "含糊", "吓唬", "后头", "名字", "名堂", "合同", "吆喝", - "叫唤", "口袋", "厚道", "厉害", "千斤", "包袱", "包涵", "匀称", "勤快", "动静", "动弹", - "功夫", "力气", "前头", "刺猬", "刺激", "别扭", "利落", "利索", "利害", "分析", "出息", - "凑合", "凉快", "冷战", "冤枉", "冒失", "养活", "关系", "先生", "兄弟", "便宜", "使唤", - "佩服", "作坊", "体面", "位置", "似的", "伙计", "休息", "什么", "人家", "亲戚", "亲家", - "交情", "云彩", "事情", "买卖", "主意", "丫头", "丧气", "两口", "东西", "东家", "世故", - "不由", "不在", "下水", "下巴", "上头", "上司", "丈夫", "丈人", "一辈", "那个", "菩萨", - "父亲", "母亲", "咕噜", "邋遢", "费用", "冤家", "甜头", "介绍", "荒唐", "大人", "泥鳅", - "幸福", "熟悉", "计划", "扑腾", "蜡烛", "姥爷", "照顾", "喉咙", "吉他", "弄堂", "蚂蚱", - "凤凰", "拖沓", "寒碜", "糟蹋", "倒腾", "报复", "逻辑", "盘缠", "喽啰", "牢骚", "咖喱", - "扫把", "惦记" - }; - - + "麻烦", "麻利", "鸳鸯", "高粱", "骨头", "骆驼", "马虎", "首饰", "馒头", + "馄饨", "风筝", "难为", "队伍", "阔气", "闺女", "门道", "锄头", "铺盖", + "铃铛", "铁匠", "钥匙", "里脊", "里头", "部分", "那么", "道士", "造化", + "迷糊", "连累", "这么", "这个", "运气", "过去", "软和", "转悠", "踏实", + "跳蚤", "跟头", "趔趄", "财主", "豆腐", "讲究", "记性", "记号", "认识", + "规矩", "见识", "裁缝", "补丁", "衣裳", "衣服", "衙门", "街坊", "行李", + "行当", "蛤蟆", "蘑菇", "薄荷", "葫芦", "葡萄", "萝卜", "荸荠", "苗条", + "苗头", "苍蝇", "芝麻", "舒服", "舒坦", "舌头", "自在", "膏药", "脾气", + "脑袋", "脊梁", "能耐", "胳膊", "胭脂", "胡萝", "胡琴", "胡同", "聪明", + "耽误", "耽搁", "耷拉", "耳朵", "老爷", "老实", "老婆", "老头", "老太", + "翻腾", "罗嗦", "罐头", "编辑", "结实", "红火", "累赘", "糨糊", "糊涂", + "精神", "粮食", "簸箕", "篱笆", "算计", "算盘", "答应", "笤帚", "笑语", + "笑话", "窟窿", "窝囊", "窗户", "稳当", "稀罕", "称呼", "秧歌", "秀气", + "秀才", "福气", "祖宗", "砚台", "码头", "石榴", "石头", "石匠", "知识", + "眼睛", "眯缝", "眨巴", "眉毛", "相声", "盘算", "白净", "痢疾", "痛快", + "疟疾", "疙瘩", "疏忽", "畜生", "生意", "甘蔗", "琵琶", "琢磨", "琉璃", + "玻璃", "玫瑰", "玄乎", "狐狸", "状元", "特务", "牲口", "牙碜", "牌楼", + "爽快", "爱人", "热闹", "烧饼", "烟筒", "烂糊", "点心", "炊帚", "灯笼", + "火候", "漂亮", "滑溜", "溜达", "温和", "清楚", "消息", "浪头", "活泼", + "比方", "正经", "欺负", "模糊", "槟榔", "棺材", "棒槌", "棉花", "核桃", + "栅栏", "柴火", "架势", "枕头", "枇杷", "机灵", "本事", "木头", "木匠", + "朋友", "月饼", "月亮", "暖和", "明白", "时候", "新鲜", "故事", "收拾", + "收成", "提防", "挖苦", "挑剔", "指甲", "指头", "拾掇", "拳头", "拨弄", + "招牌", "招呼", "抬举", "护士", "折腾", "扫帚", "打量", "打算", "打点", + "打扮", "打听", "打发", "扎实", "扁担", "戒指", "懒得", "意识", "意思", + "情形", "悟性", "怪物", "思量", "怎么", "念头", "念叨", "快活", "忙活", + "志气", "心思", "得罪", "张罗", "弟兄", "开通", "应酬", "庄稼", "干事", + "帮手", "帐篷", "希罕", "师父", "师傅", "巴结", "巴掌", "差事", "工夫", + "岁数", "屁股", "尾巴", "少爷", "小气", "小伙", "将就", "对头", "对付", + "寡妇", "家伙", "客气", "实在", "官司", "学问", "学生", "字号", "嫁妆", + "媳妇", "媒人", "婆家", "娘家", "委屈", "姑娘", "姐夫", "妯娌", "妥当", + "妖精", "奴才", "女婿", "头发", "太阳", "大爷", "大方", "大意", "大夫", + "多少", "多么", "外甥", "壮实", "地道", "地方", "在乎", "困难", "嘴巴", + "嘱咐", "嘟囔", "嘀咕", "喜欢", "喇嘛", "喇叭", "商量", "唾沫", "哑巴", + "哈欠", "哆嗦", "咳嗽", "和尚", "告诉", "告示", "含糊", "吓唬", "后头", + "名字", "名堂", "合同", "吆喝", "叫唤", "口袋", "厚道", "厉害", "千斤", + "包袱", "包涵", "匀称", "勤快", "动静", "动弹", "功夫", "力气", "前头", + "刺猬", "刺激", "别扭", "利落", "利索", "利害", "分析", "出息", "凑合", + "凉快", "冷战", "冤枉", "冒失", "养活", "关系", "先生", "兄弟", "便宜", + "使唤", "佩服", "作坊", "体面", "位置", "似的", "伙计", "休息", "什么", + "人家", "亲戚", "亲家", "交情", "云彩", "事情", "买卖", "主意", "丫头", + "丧气", "两口", "东西", "东家", "世故", "不由", "不在", "下水", "下巴", + "上头", "上司", "丈夫", "丈人", "一辈", "那个", "菩萨", "父亲", "母亲", + "咕噜", "邋遢", "费用", "冤家", "甜头", "介绍", "荒唐", "大人", "泥鳅", + "幸福", "熟悉", "计划", "扑腾", "蜡烛", "姥爷", "照顾", "喉咙", "吉他", + "弄堂", "蚂蚱", "凤凰", "拖沓", "寒碜", "糟蹋", "倒腾", "报复", "逻辑", + "盘缠", "喽啰", "牢骚", "咖喱", "扫把", "惦记"}; + + // 生成词典(词到音素的映射) - if (0 != GenDict(_word2phone_path, word_phone_map)) { + if (0 != GenDict(_word2phone_path, &word_phone_map)) { LOG(ERROR) << "Genarate word2phone dict failed"; return -1; } // 生成音素字典(音素到音素id的映射) - if (0 != GenDict(_phone2id_path, phone_id_map)) { + if (0 != GenDict(_phone2id_path, &phone_id_map)) { LOG(ERROR) << "Genarate phone2id dict failed"; return -1; } // 生成音调字典(音调到音调id的映射) if (_seperate_tone == "true") { - if (0 != GenDict(_tone2id_path, tone_id_map)) { + if (0 != GenDict(_tone2id_path, &tone_id_map)) { LOG(ERROR) << "Genarate tone2id dict failed"; return -1; - } + } } // 生成繁简字典(繁体到简体id的映射) - if (0 != GenDict(_trand2simp_path, trand_simp_map)) { + if (0 != GenDict(_trand2simp_path, &trand_simp_map)) { LOG(ERROR) << "Genarate trand2simp dict failed"; return -1; } @@ -113,14 +153,14 @@ int FrontEngineInterface::ReadConfFile() { while (std::getline(is, line)) { if (line.substr(0, 2) == "--") { size_t pos = line.find_first_of("=", 0); - std::string key = line.substr(2, pos-2); + std::string key = line.substr(2, pos - 2); std::string value = line.substr(pos + 1); conf_map[key] = value; LOG(INFO) << "Key: " << key << "; Value: " << value; } } - // jieba conf path + // jieba conf path _jieba_dict_path = conf_map["jieba_dict_path"]; _jieba_hmm_path = conf_map["jieba_hmm_path"]; _jieba_user_dict_path = conf_map["jieba_user_dict_path"]; @@ -137,23 +177,26 @@ int FrontEngineInterface::ReadConfFile() { return 0; } -int FrontEngineInterface::Trand2Simp(const std::wstring &sentence, std::wstring &sentence_simp) { - //sentence_simp = sentence; - for(int i = 0; i < sentence.length(); i++) { +int FrontEngineInterface::Trand2Simp(const std::wstring &sentence, + std::wstring *sentence_simp) { + // sentence_simp = sentence; + for (int i = 0; i < sentence.length(); i++) { std::wstring temp(1, sentence[i]); std::string sigle_word = ppspeech::wstring2utf8string(temp); // 单个字是否在繁转简的字典里 - if(trand_simp_map.find(sigle_word) == trand_simp_map.end()) { - sentence_simp += temp; + if (trand_simp_map.find(sigle_word) == trand_simp_map.end()) { + sentence_simp->append(temp); } else { - sentence_simp += (ppspeech::utf8string2wstring(trand_simp_map[sigle_word])); + sentence_simp->append( + (ppspeech::utf8string2wstring(trand_simp_map[sigle_word]))); } } return 0; } -int FrontEngineInterface::GenDict(const std::string &dict_file, std::map &map) { +int FrontEngineInterface::GenDict(const std::string &dict_file, + std::map *map) { std::ifstream is(dict_file.c_str(), std::ifstream::in); if (!is.good()) { LOG(ERROR) << "Cannot open dict file: " << dict_file; @@ -163,28 +206,32 @@ int FrontEngineInterface::GenDict(const std::string &dict_file, std::map> &seg, - std::vector &seg_words) { - std::vector> ::iterator iter; - for(iter=seg.begin(); iter!=seg.end(); iter++) { - seg_words.push_back((*iter).first); +int FrontEngineInterface::GetSegResult( + std::vector> *seg, + std::vector *seg_words) { + std::vector>::iterator iter; + for (iter = seg->begin(); iter != seg->end(); iter++) { + seg_words->push_back((*iter).first); } return 0; } -int FrontEngineInterface::GetSentenceIds(const std::string &sentence, std::vector &phoneids, std::vector &toneids) { - std::vector> cut_result; //分词结果包含词和词性 - if (0 != Cut(sentence, cut_result)) { +int FrontEngineInterface::GetSentenceIds(const std::string &sentence, + std::vector *phoneids, + std::vector *toneids) { + std::vector> + cut_result; //分词结果包含词和词性 + if (0 != Cut(sentence, &cut_result)) { LOG(ERROR) << "Cut sentence: \"" << sentence << "\" failed"; return -1; } - + if (0 != GetWordsIds(cut_result, phoneids, toneids)) { LOG(ERROR) << "Get words phoneids failed"; return -1; @@ -192,81 +239,89 @@ int FrontEngineInterface::GetSentenceIds(const std::string &sentence, std::vecto return 0; } -int FrontEngineInterface::GetWordsIds(const std::vector> &cut_result, std::vector &phoneids, - std::vector &toneids) { +int FrontEngineInterface::GetWordsIds( + const std::vector> &cut_result, + std::vector *phoneids, + std::vector *toneids) { std::string word; std::string pos; std::vector word_initials; std::vector word_finals; std::string phone; - for(int i = 0; i < cut_result.size(); i++) { + for (int i = 0; i < cut_result.size(); i++) { word = cut_result[i].first; pos = cut_result[i].second; - if (std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { // 非可忽略的标点 + if (std::find(_punc_omit.begin(), _punc_omit.end(), word) == + _punc_omit.end()) { // 非可忽略的标点 word_initials = {}; word_finals = {}; phone = ""; // 判断是否在标点符号集合中 - if (std::find(_punc.begin(), _punc.end(), word) == _punc.end()) { // 文字 + if (std::find(_punc.begin(), _punc.end(), word) == + _punc.end()) { // 文字 // 获取字词的声母韵母列表 - if(0 != GetInitialsFinals(word, word_initials, word_finals)) { - LOG(ERROR) << "Genarate the word_initials and word_finals of " << word << " failed"; + if (0 != + GetInitialsFinals(word, &word_initials, &word_finals)) { + LOG(ERROR) + << "Genarate the word_initials and word_finals of " + << word << " failed"; return -1; } - + // 对读音进行修改 - if(0 != ModifyTone(word, pos, word_finals)) { + if (0 != ModifyTone(word, pos, &word_finals)) { LOG(ERROR) << "Failed to modify tone."; } // 对儿化音进行修改 - std::vector> new_initals_finals = MergeErhua(word_initials, word_finals, word, pos); + std::vector> new_initals_finals = + MergeErhua(word_initials, word_finals, word, pos); word_initials = new_initals_finals[0]; word_finals = new_initals_finals[1]; - + // 将声母和韵母合并成音素 assert(word_initials.size() == word_finals.size()); std::string temp_phone; - for(int j = 0; j < word_initials.size(); j++) { - if(word_initials[j] != "") { + for (int j = 0; j < word_initials.size(); j++) { + if (word_initials[j] != "") { temp_phone = word_initials[j] + " " + word_finals[j]; } else { temp_phone = word_finals[j]; } - if(j == 0) { + if (j == 0) { phone += temp_phone; } else { phone += (" " + temp_phone); } } - } else { // 标点符号 - if(_seperate_tone == "true") { - phone = "sp0"; // speedyspeech + } else { // 标点符号 + if (_seperate_tone == "true") { + phone = "sp0"; // speedyspeech } else { - phone = "sp"; // fastspeech2 - } + phone = "sp"; // fastspeech2 + } } // 音素到音素id - if(0 != Phone2Phoneid(phone, phoneids, toneids)) { - LOG(ERROR) << "Genarate the phone id of " << word << " failed"; + if (0 != Phone2Phoneid(phone, phoneids, toneids)) { + LOG(ERROR) << "Genarate the phone id of " << word << " failed"; return -1; } } } - return 0; - } -int FrontEngineInterface::Cut(const std::string &sentence, std::vector> &cut_result) { +int FrontEngineInterface::Cut( + const std::string &sentence, + std::vector> *cut_result) { std::vector> cut_result_jieba; - + // 结巴分词 _jieba->Tag(sentence, cut_result_jieba); // 对分词后结果进行整合 - if (0 != MergeforModify(cut_result_jieba, cut_result)) { + if (0 != MergeforModify(&cut_result_jieba, cut_result)) { LOG(ERROR) << "Failed to modify for word segmentation result."; return -1; } @@ -274,50 +329,57 @@ int FrontEngineInterface::Cut(const std::string &sentence, std::vector wordcut; _jieba->CutAll(word, wordcut); - phone = word_phone_map[wordcut[0]]; + phone->assign(word_phone_map[wordcut[0]]); for (int i = 1; i < wordcut.size(); i++) { - phone += (" " + word_phone_map[wordcut[i]]); + phone->assign((*phone) + (" " + word_phone_map[wordcut[i]])); } } else { - phone = word_phone_map[word]; + phone->assign(word_phone_map[word]); } return 0; } -int FrontEngineInterface::Phone2Phoneid(const std::string &phone, std::vector &phoneid, std::vector &toneid) { +int FrontEngineInterface::Phone2Phoneid(const std::string &phone, + std::vector *phoneid, + std::vector *toneid) { std::vector phone_vec; phone_vec = absl::StrSplit(phone, " "); std::string temp_phone; - for(int i = 0; i < phone_vec.size(); i++) { + for (int i = 0; i < phone_vec.size(); i++) { temp_phone = phone_vec[i]; - if(_seperate_tone == "true") { - phoneid.push_back(atoi((phone_id_map[temp_phone.substr(0, temp_phone.length()-1)]).c_str())); - toneid.push_back(atoi((tone_id_map[temp_phone.substr(temp_phone.length()-1, temp_phone.length())]).c_str())); - }else { - phoneid.push_back(atoi((phone_id_map[temp_phone]).c_str())); + if (_seperate_tone == "true") { + phoneid->push_back(atoi( + (phone_id_map[temp_phone.substr(0, temp_phone.length() - 1)]) + .c_str())); + toneid->push_back( + atoi((tone_id_map[temp_phone.substr(temp_phone.length() - 1, + temp_phone.length())]) + .c_str())); + } else { + phoneid->push_back(atoi((phone_id_map[temp_phone]).c_str())); } - } return 0; } // 根据韵母判断该词中每个字的读音都为第三声。true表示词中每个字都是第三声 -bool FrontEngineInterface::AllToneThree(const std::vector &finals) { +bool FrontEngineInterface::AllToneThree( + const std::vector &finals) { bool flags = true; - for(int i = 0; i < finals.size(); i++) { - if((int)finals[i].back() != 51) { //如果读音不为第三声 + for (int i = 0; i < finals.size(); i++) { + if (static_cast(finals[i].back()) != 51) { //如果读音不为第三声 flags = false; - } + } } return flags; - } // 判断词是否是叠词 @@ -325,45 +387,49 @@ bool FrontEngineInterface::IsReduplication(const std::string &word) { bool flags = false; std::wstring word_wstr = ppspeech::utf8string2wstring(word); int len = word_wstr.length(); - if(len == 2 && word_wstr[0] == word_wstr[1]){ + if (len == 2 && word_wstr[0] == word_wstr[1]) { flags = true; } return flags; - } -// 获取每个字词的声母和韵母列表, word_initials 为声母列表,word_finals 为韵母列表 -int FrontEngineInterface::GetInitialsFinals(const std::string &word, std::vector &word_initials, std::vector &word_finals) { - std::string phone; - GetPhone(word, phone); //获取字词对应的音素 +// 获取每个字词的声母和韵母列表, word_initials 为声母列表,word_finals +// 为韵母列表 +int FrontEngineInterface::GetInitialsFinals( + const std::string &word, + std::vector *word_initials, + std::vector *word_finals) { + std::string phone; + GetPhone(word, &phone); //获取字词对应的音素 std::vector phone_vec = absl::StrSplit(phone, " "); //获取韵母,每个字的音素有1或者2个,start为单个字音素的起始位置。 - int start = 0; - while(start < phone_vec.size()) { - if(phone_vec[start] == "sp" || phone_vec[start] == "sp0") { + int start = 0; + while (start < phone_vec.size()) { + if (phone_vec[start] == "sp" || phone_vec[start] == "sp0") { start += 1; - } - // 最后一位不是数字或者最后一位的数字是0,均表示声母,第二个是韵母 - else if(isdigit(phone_vec[start].back()) == 0 || (int)phone_vec[start].back() == 48) { - word_initials.push_back(phone_vec[start]); - word_finals.push_back(phone_vec[start + 1]); + } else if (isdigit(phone_vec[start].back()) == 0 || + static_cast(phone_vec[start].back()) == 48) { + word_initials->push_back(phone_vec[start]); + word_finals->push_back(phone_vec[start + 1]); start += 2; } else { - word_initials.push_back(""); - word_finals.push_back(phone_vec[start]); + word_initials->push_back(""); + word_finals->push_back(phone_vec[start]); start += 1; } } - - assert(word_finals.size() == ppspeech::utf8string2wstring(word).length() && word_finals.size() == word_initials.size()); + + assert(word_finals->size() == ppspeech::utf8string2wstring(word).length() && + word_finals->size() == word_initials->size()); return 0; } // 获取每个字词的韵母列表 -int FrontEngineInterface::GetFinals(const std::string &word, std::vector &word_finals) { +int FrontEngineInterface::GetFinals(const std::string &word, + std::vector *word_finals) { std::vector word_initials; - if(0 != GetInitialsFinals(word, word_initials, word_finals)) { + if (0 != GetInitialsFinals(word, &word_initials, word_finals)) { LOG(ERROR) << "Failed to get word finals"; return -1; } @@ -371,162 +437,189 @@ int FrontEngineInterface::GetFinals(const std::string &word, std::vector &wordvec) { +int FrontEngineInterface::Word2WordVec(const std::string &word, + std::vector *wordvec) { std::wstring word_wstr = ppspeech::utf8string2wstring(word); - for(int i = 0; i < word_wstr.length(); i++) { + for (int i = 0; i < word_wstr.length(); i++) { std::wstring word_sigle(1, word_wstr[i]); - wordvec.push_back(word_sigle); + wordvec->push_back(word_sigle); } return 0; - } // yuantian01解释:把一个词再进行分词找到。例子:小雨伞 --> 小 雨伞 或者 小雨 伞 -int FrontEngineInterface::SplitWord(const std::string &word, std::vector &new_word_vec) { +int FrontEngineInterface::SplitWord(const std::string &word, + std::vector *new_word_vec) { std::vector word_vec; std::string second_subword; _jieba->CutForSearch(word, word_vec); // 升序 - std::sort(word_vec.begin(), word_vec.end(), [](std::string a, std::string b ) {return a.size() > b.size();}); + std::sort(word_vec.begin(), + word_vec.end(), + [](std::string a, std::string b) { return a.size() > b.size(); }); std::string first_subword = word_vec[0]; // 提取长度最短的字符串 int first_begin_idx = word.find_first_of(first_subword); - if(first_begin_idx == 0) { + if (first_begin_idx == 0) { second_subword = word.substr(first_subword.length()); - new_word_vec.push_back(first_subword); - new_word_vec.push_back(second_subword); + new_word_vec->push_back(first_subword); + new_word_vec->push_back(second_subword); } else { second_subword = word.substr(0, word.length() - first_subword.length()); - new_word_vec.push_back(second_subword); - new_word_vec.push_back(first_subword); + new_word_vec->push_back(second_subword); + new_word_vec->push_back(first_subword); } return 0; - } -//example: 不 一起 --> 不一起 -std::vector> FrontEngineInterface::MergeBu(std::vector> &seg_result) { +// example: 不 一起 --> 不一起 +std::vector> FrontEngineInterface::MergeBu( + std::vector> *seg_result) { std::vector> result; std::string word; std::string pos; std::string last_word = ""; - - for(int i = 0; i < seg_result.size(); i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if(last_word == "不") { + + for (int i = 0; i < seg_result->size(); i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if (last_word == "不") { word = last_word + word; - } - if(word != "不") { + } + if (word != "不") { result.push_back(make_pair(word, pos)); - } + } last_word = word; } - if(last_word == "不") { + if (last_word == "不") { result.push_back(make_pair(last_word, "d")); last_word = ""; } - + return result; } -std::vector> FrontEngineInterface::Mergeyi(std::vector> &seg_result) { - std::vector> result_temp; +std::vector> FrontEngineInterface::Mergeyi( + std::vector> *seg_result) { + std::vector> *result_temp = + new std::vector>(); std::string word; std::string pos; - // function 1 example: 听 一 听 --> 听一听 - for(int i = 0; i < seg_result.size(); i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if((i - 1 >= 0) && (word == "一") && (i + 1 < seg_result.size()) && - (seg_result[i - 1].first == seg_result[i + 1].first) && seg_result[i - 1].second == "v") { - result_temp[i - 1].first = result_temp[i - 1].first + "一" + result_temp[i - 1].first; + for (int i = 0; i < seg_result->size(); i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + + if ((i - 1 >= 0) && (word == "一") && (i + 1 < seg_result->size()) && + (std::get<0>((*seg_result)[i - 1]) == + std::get<0>((*seg_result)[i + 1])) && + std::get<1>((*seg_result)[i - 1]) == "v") { + std::get<0>((*result_temp)[i - 1]) = + std::get<0>((*result_temp)[i - 1]) + "一" + + std::get<0>((*result_temp)[i - 1]); + } else { + if ((i - 2 >= 0) && (std::get<0>((*seg_result)[i - 1]) == "一") && + (std::get<0>((*seg_result)[i - 2]) == word) && (pos == "v")) { + continue; } else { - if((i - 2 >= 0) && (seg_result[i - 1].first == "一") && (seg_result[i - 2].first == word) && (pos == "v")) { - continue; - } else{ - result_temp.push_back(make_pair(word, pos)); - } - } + result_temp->push_back(make_pair(word, pos)); + } + } } // function 2 example: 一 你 --> 一你 std::vector> result = {}; - for(int j = 0; j < result_temp.size(); j++) { - word = result_temp[j].first; - pos = result_temp[j].second; - if((result.size() != 0) && (result.back().first == "一")) { + for (int j = 0; j < result_temp->size(); j++) { + word = std::get<0>((*result_temp)[j]); + pos = std::get<1>((*result_temp)[j]); + if ((result.size() != 0) && (result.back().first == "一")) { result.back().first = result.back().first + word; } else { result.push_back(make_pair(word, pos)); - } - + } } - + return result; } // example: 你 你 --> 你你 -std::vector> FrontEngineInterface::MergeReduplication(std::vector> &seg_result) { +std::vector> +FrontEngineInterface::MergeReduplication( + std::vector> *seg_result) { std::vector> result; std::string word; std::string pos; - for(int i = 0; i < seg_result.size(); i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if((result.size() != 0) && (word == result.back().first)) { - result.back().first = result.back().first + seg_result[i].first; + for (int i = 0; i < seg_result->size(); i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if ((result.size() != 0) && (word == result.back().first)) { + result.back().first = + result.back().first + std::get<0>((*seg_result)[i]); } else { result.push_back(make_pair(word, pos)); } } - + return result; } -// the first and the second words are all_tone_three -std::vector> FrontEngineInterface::MergeThreeTones(std::vector> &seg_result) { +// the first and the second words are all_tone_three +std::vector> +FrontEngineInterface::MergeThreeTones( + std::vector> *seg_result) { std::vector> result; std::string word; - std::string pos; - std::vector> finals; //韵母数组 + std::string pos; + std::vector> finals; //韵母数组 std::vector word_final; - std::vector merge_last(seg_result.size(), false); + std::vector merge_last(seg_result->size(), false); // 判断最后一个分词结果是不是标点,不看标点的声母韵母 - int word_num = seg_result.size() - 1; - if(std::find(_punc.begin(), _punc.end(), seg_result[word_num].first) == _punc.end()){ // 最后一个分词结果不是标点 + int word_num = seg_result->size() - 1; + + // seg_result[word_num].first + if (std::find( + _punc.begin(), _punc.end(), std::get<0>((*seg_result)[word_num])) == + _punc.end()) { // 最后一个分词结果不是标点 word_num += 1; } // 获取韵母数组 - for(int i = 0; i < word_num; i++) { + for (int i = 0; i < word_num; i++) { word_final = {}; - word = seg_result[i].first; - pos = seg_result[i].second; - if(std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { // 非可忽略的标点,即文字 - if(0 != GetFinals(word, word_final)) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if (std::find(_punc_omit.begin(), _punc_omit.end(), word) == + _punc_omit.end()) { // 非可忽略的标点,即文字 + if (0 != GetFinals(word, &word_final)) { LOG(ERROR) << "Failed to get the final of word."; } - } + } - finals.push_back(word_final); + finals.push_back(word_final); } assert(word_num == finals.size()); // 对第三声读音的字词分词结果进行处理 - for(int i = 0; i < word_num; i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if(i - 1 >= 0 && AllToneThree(finals[i - 1]) && AllToneThree(finals[i]) && !merge_last[i - 1]) { - // if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if(!IsReduplication(seg_result[i - 1].first) && - (ppspeech::utf8string2wstring(seg_result[i - 1].first)).length() + (ppspeech::utf8string2wstring(word)).length() <= 3) { - result.back().first = result.back().first + seg_result[i].first; + for (int i = 0; i < word_num; i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if (i - 1 >= 0 && AllToneThree(finals[i - 1]) && + AllToneThree(finals[i]) && !merge_last[i - 1]) { + // if the last word is reduplication, not merge, because + // reduplication need to be _neural_sandhi + // seg_result[i - 1].first + if (!IsReduplication(std::get<0>((*seg_result)[i - 1])) && + (ppspeech::utf8string2wstring( + std::get<0>((*seg_result)[i - 1]))) + .length() + + (ppspeech::utf8string2wstring(word)).length() <= + 3) { + result.back().first = + result.back().first + std::get<0>((*seg_result)[i]); merge_last[i] = true; } else { result.push_back(make_pair(word, pos)); @@ -537,54 +630,73 @@ std::vector> FrontEngineInterface::MergeThre } //把标点的分词结果补上 - if(word_num < seg_result.size()) { - result.push_back(make_pair(seg_result[word_num].first, seg_result[word_num].second)); + if (word_num < seg_result->size()) { + result.push_back( + // seg_result[word_num].first seg_result[word_num].second + // std::get<0>((*seg_result)[word_num]) + make_pair(std::get<0>((*seg_result)[word_num]), + std::get<1>((*seg_result)[word_num]))); } return result; } -// the last char of first word and the first char of second word is tone_three -std::vector> FrontEngineInterface::MergeThreeTones2(std::vector> &seg_result) { +// the last char of first word and the first char of second word is tone_three +std::vector> +FrontEngineInterface::MergeThreeTones2( + std::vector> *seg_result) { std::vector> result; std::string word; - std::string pos; - std::vector> finals; //韵母数组 + std::string pos; + std::vector> finals; //韵母数组 std::vector word_final; - std::vector merge_last(seg_result.size(), false); + std::vector merge_last(seg_result->size(), false); // 判断最后一个分词结果是不是标点 - int word_num = seg_result.size() - 1; - if(std::find(_punc.begin(), _punc.end(), seg_result[word_num].first) == _punc.end()){ // 最后一个分词结果不是标点 + int word_num = seg_result->size() - 1; + if (std::find( + _punc.begin(), _punc.end(), std::get<0>((*seg_result)[word_num])) == + _punc.end()) { // 最后一个分词结果不是标点 word_num += 1; } // 获取韵母数组 - for(int i = 0; i < word_num; i++) { + for (int i = 0; i < word_num; i++) { word_final = {}; - word = seg_result[i].first; - pos = seg_result[i].second; + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); // 如果是文字,则获取韵母,如果是可忽略的标点,例如引号,则跳过 - if(std::find(_punc_omit.begin(), _punc_omit.end(), word) == _punc_omit.end()) { - if(0 != GetFinals(word, word_final)) { + if (std::find(_punc_omit.begin(), _punc_omit.end(), word) == + _punc_omit.end()) { + if (0 != GetFinals(word, &word_final)) { LOG(ERROR) << "Failed to get the final of word."; } - } + } - finals.push_back(word_final); + finals.push_back(word_final); } assert(word_num == finals.size()); // 对第三声读音的字词分词结果进行处理 - for(int i = 0; i < word_num; i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if(i - 1 >= 0 && !finals[i - 1].empty() && absl::EndsWith(finals[i - 1].back(), "3") == true && - !finals[i].empty() && absl::EndsWith(finals[i].front(), "3") == true && !merge_last[i - 1]) { - // if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if(!IsReduplication(seg_result[i - 1].first) && - (ppspeech::utf8string2wstring(seg_result[i - 1].first)).length() + ppspeech::utf8string2wstring(word).length() <= 3) { - result.back().first = result.back().first + seg_result[i].first; + for (int i = 0; i < word_num; i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if (i - 1 >= 0 && !finals[i - 1].empty() && + absl::EndsWith(finals[i - 1].back(), "3") == true && + !finals[i].empty() && + absl::EndsWith(finals[i].front(), "3") == true && + !merge_last[i - 1]) { + // if the last word is reduplication, not merge, because + // reduplication need to be _neural_sandhi + // seg_result[i - 1].first + if (!IsReduplication(std::get<0>((*seg_result)[i - 1])) && + (ppspeech::utf8string2wstring( + std::get<0>((*seg_result)[i - 1]))) + .length() + + ppspeech::utf8string2wstring(word).length() <= + 3) { + result.back().first = + result.back().first + std::get<0>((*seg_result)[i]); merge_last[i] = true; } else { result.push_back(make_pair(word, pos)); @@ -595,73 +707,86 @@ std::vector> FrontEngineInterface::MergeThre } //把标点的分词结果补上 - if(word_num < seg_result.size()) { - result.push_back(make_pair(seg_result[word_num].first, seg_result[word_num].second)); + if (word_num < seg_result->size()) { + result.push_back(make_pair(std::get<0>((*seg_result)[word_num]), + std::get<1>((*seg_result)[word_num]))); } return result; } // example: 吃饭 儿 --> 吃饭儿 -std::vector> FrontEngineInterface::MergeEr(std::vector> &seg_result) { +std::vector> FrontEngineInterface::MergeEr( + std::vector> *seg_result) { std::vector> result; std::string word; std::string pos; - for(int i = 0; i < seg_result.size(); i++) { - word = seg_result[i].first; - pos = seg_result[i].second; - if((i - 1 >= 0) && (word == "儿")){ - result.back().first = result.back().first + seg_result[i].first; + for (int i = 0; i < seg_result->size(); i++) { + word = std::get<0>((*seg_result)[i]); + pos = std::get<1>((*seg_result)[i]); + if ((i - 1 >= 0) && (word == "儿")) { + result.back().first = + result.back().first + std::get<0>((*seg_result)[i]); } else { - result.push_back(make_pair(word, pos)); + result.push_back(make_pair(word, pos)); } } return result; } -int FrontEngineInterface::MergeforModify(std::vector> &seg_word_type, - std::vector> &modify_seg_word_type) { - +int FrontEngineInterface::MergeforModify( + std::vector> *seg_word_type, + std::vector> *modify_seg_word_type) { std::vector seg_result; - GetSegResult(seg_word_type, seg_result); - LOG(INFO) << "Before merge, seg result is: " << limonp::Join(seg_result.begin(), seg_result.end(), "/"); - - modify_seg_word_type = MergeBu(seg_word_type); - modify_seg_word_type = Mergeyi(modify_seg_word_type); - modify_seg_word_type = MergeReduplication(modify_seg_word_type); - modify_seg_word_type = MergeThreeTones(modify_seg_word_type); - modify_seg_word_type = MergeThreeTones2(modify_seg_word_type); - modify_seg_word_type = MergeEr(modify_seg_word_type); - + GetSegResult(seg_word_type, &seg_result); + LOG(INFO) << "Before merge, seg result is: " + << limonp::Join(seg_result.begin(), seg_result.end(), "/"); + std::vector> tmp; + tmp = MergeBu(seg_word_type); + *modify_seg_word_type = tmp; + tmp = Mergeyi(modify_seg_word_type); + *modify_seg_word_type = tmp; + tmp = MergeReduplication(modify_seg_word_type); + *modify_seg_word_type = tmp; + tmp = MergeThreeTones(modify_seg_word_type); + *modify_seg_word_type = tmp; + tmp = MergeThreeTones2(modify_seg_word_type); + *modify_seg_word_type = tmp; + tmp = MergeEr(modify_seg_word_type); + *modify_seg_word_type = tmp; seg_result = {}; - GetSegResult(modify_seg_word_type, seg_result); - LOG(INFO) << "After merge, seg result is: " << limonp::Join(seg_result.begin(), seg_result.end(), "/"); + + GetSegResult(modify_seg_word_type, &seg_result); + LOG(INFO) << "After merge, seg result is: " + << limonp::Join(seg_result.begin(), seg_result.end(), "/"); return 0; } -int FrontEngineInterface::BuSandi(const std::string &word, std::vector &finals) { +int FrontEngineInterface::BuSandi(const std::string &word, + std::vector *finals) { std::wstring bu = L"不"; std::vector wordvec; // 一个词转成向量形式 - if(0 != Word2WordVec(word, wordvec)) { + if (0 != Word2WordVec(word, &wordvec)) { LOG(ERROR) << "Failed to get word vector"; return -1; } // e.g. 看不懂 b u4 --> b u5, 将韵母的最后一位替换成 5 - if(wordvec.size() == 3 && wordvec[1] == bu) { - finals[1] = finals[1].replace(finals[1].length() - 1, 1, "5"); + if (wordvec.size() == 3 && wordvec[1] == bu) { + (*finals)[1] = (*finals)[1].replace((*finals)[1].length() - 1, 1, "5"); } else { // e.g. 不怕 b u4 --> b u2, 将韵母的最后一位替换成 2 - for(int i = 0; i < wordvec.size(); i++) { - if(wordvec[i] == bu && i + 1 < wordvec.size() && - absl::EndsWith(finals[i + 1], "4") == true) { - finals[i] = finals[i].replace(finals[i].length() - 1, 1, "2"); - } + for (int i = 0; i < wordvec.size(); i++) { + if (wordvec[i] == bu && i + 1 < wordvec.size() && + absl::EndsWith((*finals)[i + 1], "4") == true) { + (*finals)[i] = + (*finals)[i].replace((*finals)[i].length() - 1, 1, "2"); + } } } @@ -669,11 +794,12 @@ int FrontEngineInterface::BuSandi(const std::string &word, std::vector &finals) { +int FrontEngineInterface::YiSandhi(const std::string &word, + std::vector *finals) { std::wstring yi = L"一"; std::vector wordvec; // 一个词转成向量形式 - if(0 != Word2WordVec(word, wordvec)) { + if (0 != Word2WordVec(word, &wordvec)) { LOG(ERROR) << "Failed to get word vector"; return -1; } @@ -681,44 +807,49 @@ int FrontEngineInterface::YiSandhi(const std::string &word, std::vector &finals) { +int FrontEngineInterface::NeuralSandhi(const std::string &word, + const std::string &pos, + std::vector *finals) { std::wstring word_wstr = ppspeech::utf8string2wstring(word); std::vector wordvec; // 一个词转成向量形式 - if(0 != Word2WordVec(word, wordvec)) { + if (0 != Word2WordVec(word, &wordvec)) { LOG(ERROR) << "Failed to get word vector"; return -1; } @@ -726,10 +857,12 @@ int FrontEngineInterface::NeuralSandhi(const std::string &word, const std::strin assert(word_num == word_wstr.length()); // 情况1:reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for(int j = 0; j < wordvec.size(); j++) { + for (int j = 0; j < wordvec.size(); j++) { std::string inits = "nva"; - if(j - 1 >= 0 && wordvec[j] == wordvec[j - 1] && inits.find(pos[0]) != inits.npos) { - finals[j] = finals[j].replace(finals[j].length() - 1, 1, "5"); + if (j - 1 >= 0 && wordvec[j] == wordvec[j - 1] && + inits.find(pos[0]) != inits.npos) { + (*finals)[j] = + (*finals)[j].replace((*finals)[j].length() - 1, 1, "5"); } } @@ -747,147 +880,204 @@ int FrontEngineInterface::NeuralSandhi(const std::string &word, const std::strin std::wstring ge = L"个"; std::wstring xiushi = L"几有两半多各整每做是零一二三四六七八九"; auto ge_idx = word_wstr.find_first_of(ge); // 出现“个”的第一个位置 - - if(word_num >= 1 && yuqici.find(wordvec.back()) != yuqici.npos) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } else if(word_num >= 1 && de.find(wordvec.back()) != de.npos) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } else if(word_num == 1 && le.find(wordvec[0]) != le.npos && find(le_pos.begin(), le_pos.end(), pos) != le_pos.end()) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } else if(word_num > 1 && men.find(wordvec.back()) != men.npos && find(men_pos.begin(), men_pos.end(), pos) != men_pos.end() - && find(must_not_neural_tone_words.begin(), must_not_neural_tone_words.end(), word) != must_not_neural_tone_words.end()) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } else if(word_num > 1 && weizhi.find(wordvec.back()) != weizhi.npos && find(weizhi_pos.begin(), weizhi_pos.end(), pos) != weizhi_pos.end()) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } else if(word_num > 1 && dong.find(wordvec.back()) != dong.npos && fangxiang.find(wordvec[word_num - 2]) != fangxiang.npos) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } - // 情况3:对“个”字前面带有修饰词的字词读音处理 - else if((ge_idx != word_wstr.npos && ge_idx >= 1 && xiushi.find(wordvec[ge_idx - 1]) != xiushi.npos) - || word_wstr == ge) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); + + if (word_num >= 1 && yuqici.find(wordvec.back()) != yuqici.npos) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if (word_num >= 1 && de.find(wordvec.back()) != de.npos) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if (word_num == 1 && le.find(wordvec[0]) != le.npos && + find(le_pos.begin(), le_pos.end(), pos) != le_pos.end()) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if (word_num > 1 && men.find(wordvec.back()) != men.npos && + find(men_pos.begin(), men_pos.end(), pos) != men_pos.end() && + find(must_not_neural_tone_words.begin(), + must_not_neural_tone_words.end(), + word) != must_not_neural_tone_words.end()) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if (word_num > 1 && weizhi.find(wordvec.back()) != weizhi.npos && + find(weizhi_pos.begin(), weizhi_pos.end(), pos) != + weizhi_pos.end()) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if (word_num > 1 && dong.find(wordvec.back()) != dong.npos && + fangxiang.find(wordvec[word_num - 2]) != fangxiang.npos) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } else if ((ge_idx != word_wstr.npos && ge_idx >= 1 && + xiushi.find(wordvec[ge_idx - 1]) != xiushi.npos) || + word_wstr == ge) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); } else { - if(find(must_neural_tone_words.begin(), must_neural_tone_words.end(), word) != must_neural_tone_words.end() - || (word_num >= 2 && find(must_neural_tone_words.begin(), must_neural_tone_words.end(), ppspeech::wstring2utf8string(word_wstr.substr(word_num - 2))) != must_neural_tone_words.end())) { - finals.back() = finals.back().replace(finals.back().length() - 1, 1, "5"); - } + if (find(must_neural_tone_words.begin(), + must_neural_tone_words.end(), + word) != must_neural_tone_words.end() || + (word_num >= 2 && + find(must_neural_tone_words.begin(), + must_neural_tone_words.end(), + ppspeech::wstring2utf8string(word_wstr.substr( + word_num - 2))) != must_neural_tone_words.end())) { + (*finals).back() = + (*finals).back().replace((*finals).back().length() - 1, 1, "5"); + } } // 进行进一步分词,把长词切分更短些 std::vector word_list; - if(0 != SplitWord(word, word_list)) { + if (0 != SplitWord(word, &word_list)) { LOG(ERROR) << "Failed to split word."; return -1; } // 创建对应的 韵母列表 std::vector> finals_list; std::vector finals_temp; - finals_temp.assign(finals.begin(), finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length()); + finals_temp.assign((*finals).begin(), + (*finals).begin() + + ppspeech::utf8string2wstring(word_list[0]).length()); finals_list.push_back(finals_temp); - finals_temp.assign(finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length(), finals.end()); + finals_temp.assign( + (*finals).begin() + ppspeech::utf8string2wstring(word_list[0]).length(), + (*finals).end()); finals_list.push_back(finals_temp); - finals = {}; - for(int i = 0; i < word_list.size(); i++) { + finals = new std::vector(); + for (int i = 0; i < word_list.size(); i++) { std::wstring temp_wstr = ppspeech::utf8string2wstring(word_list[i]); - if((find(must_neural_tone_words.begin(), must_neural_tone_words.end(), word_list[i]) != must_neural_tone_words.end()) - || (temp_wstr.length() >= 2 && find(must_neural_tone_words.begin(), must_neural_tone_words.end(), ppspeech::wstring2utf8string(temp_wstr.substr(temp_wstr.length() - 2))) != must_neural_tone_words.end())) { - finals_list[i].back() = finals_list[i].back().replace(finals_list[i].back().length() - 1, 1, "5"); - } - finals.insert(finals.end(), finals_list[i].begin(), finals_list[i].end()); + if ((find(must_neural_tone_words.begin(), + must_neural_tone_words.end(), + word_list[i]) != must_neural_tone_words.end()) || + (temp_wstr.length() >= 2 && + find(must_neural_tone_words.begin(), + must_neural_tone_words.end(), + ppspeech::wstring2utf8string( + temp_wstr.substr(temp_wstr.length() - 2))) != + must_neural_tone_words.end())) { + finals_list[i].back() = finals_list[i].back().replace( + finals_list[i].back().length() - 1, 1, "5"); + } + (*finals).insert( + (*finals).end(), finals_list[i].begin(), finals_list[i].end()); } return 0; } -int FrontEngineInterface::ThreeSandhi(const std::string &word, std::vector &finals) { +int FrontEngineInterface::ThreeSandhi(const std::string &word, + std::vector *finals) { std::wstring word_wstr = ppspeech::utf8string2wstring(word); std::vector> finals_list; std::vector finals_temp; std::vector wordvec; // 一个词转成向量形式 - if(0 != Word2WordVec(word, wordvec)) { + if (0 != Word2WordVec(word, &wordvec)) { LOG(ERROR) << "Failed to get word vector"; return -1; } int word_num = wordvec.size(); assert(word_num == word_wstr.length()); - if(word_num == 2 && AllToneThree(finals)) { - finals[0] = finals[0].replace(finals[0].length() - 1, 1, "2"); - } else if(word_num == 3) { + if (word_num == 2 && AllToneThree((*finals))) { + (*finals)[0] = (*finals)[0].replace((*finals)[0].length() - 1, 1, "2"); + } else if (word_num == 3) { // 进行进一步分词,把长词切分更短些 std::vector word_list; - if(0 != SplitWord(word, word_list)) { + if (0 != SplitWord(word, &word_list)) { LOG(ERROR) << "Failed to split word."; return -1; } - if(AllToneThree(finals)) { + if (AllToneThree((*finals))) { std::wstring temp_wstr = ppspeech::utf8string2wstring(word_list[0]); - //disyllabic + monosyllabic, e.g. 蒙古/包 - if(temp_wstr.length() == 2) { - finals[0] = finals[0].replace(finals[0].length() - 1, 1, "2"); - finals[1] = finals[1].replace(finals[1].length() - 1, 1, "2"); - } else if(temp_wstr.length() == 1) { //monosyllabic + disyllabic, e.g. 纸/老虎 - finals[1] = finals[1].replace(finals[1].length() - 1, 1, "2"); + // disyllabic + monosyllabic, e.g. 蒙古/包 + if (temp_wstr.length() == 2) { + (*finals)[0] = + (*finals)[0].replace((*finals)[0].length() - 1, 1, "2"); + (*finals)[1] = + (*finals)[1].replace((*finals)[1].length() - 1, 1, "2"); + } else if (temp_wstr.length() == + 1) { // monosyllabic + disyllabic, e.g. 纸/老虎 + (*finals)[1] = + (*finals)[1].replace((*finals)[1].length() - 1, 1, "2"); } } else { // 创建对应的 韵母列表 finals_temp = {}; finals_list = {}; - finals_temp.assign(finals.begin(), finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length()); + finals_temp.assign( + (*finals).begin(), + (*finals).begin() + + ppspeech::utf8string2wstring(word_list[0]).length()); finals_list.push_back(finals_temp); - finals_temp.assign(finals.begin() + ppspeech::utf8string2wstring(word_list[0]).length(), finals.end()); + finals_temp.assign( + (*finals).begin() + + ppspeech::utf8string2wstring(word_list[0]).length(), + (*finals).end()); finals_list.push_back(finals_temp); - - finals = {}; - for(int i = 0; i < finals_list.size(); i++) { + + finals = new std::vector(); + for (int i = 0; i < finals_list.size(); i++) { // e.g. 所有/人 - if(AllToneThree(finals_list[i]) && finals_list[i].size() == 2) { - finals_list[i][0] = finals_list[i][0].replace(finals_list[i][0].length() - 1, 1, "2"); - } else if(i == 1 && !(AllToneThree(finals_list[i])) && absl::EndsWith(finals_list[i][0], "3") == true - && absl::EndsWith(finals_list[0].back(), "3") == true) { - finals_list[0].back() = finals_list[0].back().replace(finals_list[0].back().length() - 1, 1, "2"); - } - + if (AllToneThree(finals_list[i]) && + finals_list[i].size() == 2) { + finals_list[i][0] = finals_list[i][0].replace( + finals_list[i][0].length() - 1, 1, "2"); + } else if (i == 1 && !(AllToneThree(finals_list[i])) && + absl::EndsWith(finals_list[i][0], "3") == true && + absl::EndsWith(finals_list[0].back(), "3") == true) { + finals_list[0].back() = finals_list[0].back().replace( + finals_list[0].back().length() - 1, 1, "2"); + } } - finals.insert(finals.end(), finals_list[0].begin(), finals_list[0].end()); - finals.insert(finals.end(), finals_list[1].begin(), finals_list[1].end()); + (*finals).insert( + (*finals).end(), finals_list[0].begin(), finals_list[0].end()); + (*finals).insert( + (*finals).end(), finals_list[1].begin(), finals_list[1].end()); } - } else if(word_num == 4) { //将成语拆分为两个长度为 2 的单词 + } else if (word_num == 4) { //将成语拆分为两个长度为 2 的单词 // 创建对应的 韵母列表 finals_temp = {}; finals_list = {}; - finals_temp.assign(finals.begin(), finals.begin() + 2); + finals_temp.assign((*finals).begin(), (*finals).begin() + 2); finals_list.push_back(finals_temp); - finals_temp.assign(finals.begin() + 2, finals.end()); + finals_temp.assign((*finals).begin() + 2, (*finals).end()); finals_list.push_back(finals_temp); - finals = {}; - for(int j = 0; j < finals_list.size(); j++){ - if(AllToneThree(finals_list[j])) { - finals_list[j][0] = finals_list[j][0].replace(finals_list[j][0].length() - 1, 1, "2"); + finals = new std::vector(); + for (int j = 0; j < finals_list.size(); j++) { + if (AllToneThree(finals_list[j])) { + finals_list[j][0] = finals_list[j][0].replace( + finals_list[j][0].length() - 1, 1, "2"); } - finals.insert(finals.end(), finals_list[j].begin(), finals_list[j].end()); + (*finals).insert( + (*finals).end(), finals_list[j].begin(), finals_list[j].end()); } - } return 0; } -int FrontEngineInterface::ModifyTone(const std::string &word, const std::string &pos, std::vector &finals) { - if((0 != BuSandi(word, finals)) || (0 != YiSandhi(word, finals)) || - (0 != NeuralSandhi(word, pos, finals)) || (0 != ThreeSandhi(word,finals))) { - LOG(ERROR) << "Failed to modify tone of the word: " << word; - return -1; - } +int FrontEngineInterface::ModifyTone(const std::string &word, + const std::string &pos, + std::vector *finals) { + if ((0 != BuSandi(word, finals)) || (0 != YiSandhi(word, finals)) || + (0 != NeuralSandhi(word, pos, finals)) || + (0 != ThreeSandhi(word, finals))) { + LOG(ERROR) << "Failed to modify tone of the word: " << word; + return -1; + } return 0; } -std::vector> FrontEngineInterface::MergeErhua(const std::vector &initials, const std::vector &finals, const std::string &word, const std::string &pos) { +std::vector> FrontEngineInterface::MergeErhua( + const std::vector &initials, + const std::vector &finals, + const std::string &word, + const std::string &pos) { std::vector new_initials = {}; std::vector new_finals = {}; std::vector> new_initials_finals; @@ -895,28 +1085,38 @@ std::vector> FrontEngineInterface::MergeErhua(const std std::wstring word_wstr = ppspeech::utf8string2wstring(word); std::vector wordvec; // 一个词转成向量形式 - if(0 != Word2WordVec(word, wordvec)) { + if (0 != Word2WordVec(word, &wordvec)) { LOG(ERROR) << "Failed to get word vector"; } int word_num = wordvec.size(); - if((find(must_erhua.begin(), must_erhua.end(), word) == must_erhua.end()) && - ((find(not_erhua.begin(), not_erhua.end(), word) != not_erhua.end()) || (find(specified_pos.begin(), specified_pos.end(), pos) != specified_pos.end()))) { + if ((find(must_erhua.begin(), must_erhua.end(), word) == + must_erhua.end()) && + ((find(not_erhua.begin(), not_erhua.end(), word) != not_erhua.end()) || + (find(specified_pos.begin(), specified_pos.end(), pos) != + specified_pos.end()))) { new_initials_finals.push_back(initials); new_initials_finals.push_back(finals); return new_initials_finals; } - if(finals.size() != word_num) { + if (finals.size() != word_num) { new_initials_finals.push_back(initials); new_initials_finals.push_back(finals); return new_initials_finals; } assert(finals.size() == word_num); - for(int i = 0; i < finals.size(); i++) { - if(i == finals.size() - 1 && wordvec[i] == L"儿" && (finals[i] == "er2" || finals[i] == "er5") && word_num >= 2 && - find(not_erhua.begin(), not_erhua.end(), ppspeech::wstring2utf8string(word_wstr.substr(word_wstr.length() - 2))) == not_erhua.end() && !new_finals.empty()) { - new_finals.back() = new_finals.back().substr(0, new_finals.back().length()-1) + "r" + new_finals.back().substr(new_finals.back().length()-1); + for (int i = 0; i < finals.size(); i++) { + if (i == finals.size() - 1 && wordvec[i] == L"儿" && + (finals[i] == "er2" || finals[i] == "er5") && word_num >= 2 && + find(not_erhua.begin(), + not_erhua.end(), + ppspeech::wstring2utf8string(word_wstr.substr( + word_wstr.length() - 2))) == not_erhua.end() && + !new_finals.empty()) { + new_finals.back() = + new_finals.back().substr(0, new_finals.back().length() - 1) + + "r" + new_finals.back().substr(new_finals.back().length() - 1); } else { new_initials.push_back(initials[i]); new_finals.push_back(finals[i]); @@ -926,8 +1126,5 @@ std::vector> FrontEngineInterface::MergeErhua(const std new_initials_finals.push_back(new_finals); return new_initials_finals; - -} - - } +} // namespace ppspeech diff --git a/demos/TTSCppFrontend/src/front/front_interface.h b/demos/TTSCppFrontend/src/front/front_interface.h index 8df026c8d..fc33a4de6 100644 --- a/demos/TTSCppFrontend/src/front/front_interface.h +++ b/demos/TTSCppFrontend/src/front/front_interface.h @@ -1,156 +1,198 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #ifndef PADDLE_TTS_SERVING_FRONT_FRONT_INTERFACE_H #define PADDLE_TTS_SERVING_FRONT_FRONT_INTERFACE_H +#include +#include #include -#include #include -#include -#include +#include //#include "utils/dir_utils.h" #include -#include "front/text_normalize.h" #include "absl/strings/str_split.h" +#include "front/text_normalize.h" namespace ppspeech { - - class FrontEngineInterface : public TextNormalizer{ - public: - FrontEngineInterface(std::string conf) : _conf_file(conf) { - TextNormalizer(); - _jieba = nullptr; - _initialed = false; - init(); - } - - int init(); - ~FrontEngineInterface() { - - } - - // 读取配置文件 - int ReadConfFile(); - - // 简体转繁体 - int Trand2Simp(const std::wstring &sentence, std::wstring &sentence_simp); - - // 生成字典 - int GenDict(const std::string &file, std::map &map); - - // 由 词+词性的分词结果转为仅包含词的结果 - int GetSegResult(std::vector> &seg, std::vector &seg_words); - - // 生成句子的音素,音调id。如果音素和音调未分开,则 toneids 为空(fastspeech2),反之则不为空(speedyspeech) - int GetSentenceIds(const std::string &sentence, std::vector &phoneids, std::vector &toneids); - - // 根据分词结果获取词的音素,音调id,并对读音进行适当修改 (ModifyTone)。如果音素和音调未分开,则 toneids 为空(fastspeech2),反之则不为空(speedyspeech) - int GetWordsIds(const std::vector> &cut_result, std::vector &phoneids, std::vector &toneids); - - // 结巴分词生成包含词和词性的分词结果,再对分词结果进行适当修改 (MergeforModify) - int Cut(const std::string &sentence, std::vector> &cut_result); - - // 字词到音素的映射,查找字典 - int GetPhone(const std::string &word, std::string &phone); - - // 音素到音素id - int Phone2Phoneid(const std::string &phone, std::vector &phoneid, std::vector &toneids); - - - // 根据韵母判断该词中每个字的读音都为第三声。true表示词中每个字都是第三声 - bool AllToneThree(const std::vector &finals); - - // 判断词是否是叠词 - bool IsReduplication(const std::string &word); - - // 获取每个字词的声母韵母列表 - int GetInitialsFinals(const std::string &word, std::vector &word_initials, std::vector &word_finals); - - // 获取每个字词的韵母列表 - int GetFinals(const std::string &word, std::vector &word_finals); - // 整个词转成向量形式,向量的每个元素对应词的一个字 - int Word2WordVec(const std::string &word, std::vector &wordvec); +class FrontEngineInterface : public TextNormalizer { + public: + explicit FrontEngineInterface(std::string conf) : _conf_file(conf) { + TextNormalizer(); + _jieba = nullptr; + _initialed = false; + init(); + } - // 将整个词重新进行 full cut,分词后,各个词会在词典中 - int SplitWord(const std::string &word, std::vector &fullcut_word); - - // 对分词结果进行处理:对包含“不”字的分词结果进行整理 - std::vector> MergeBu(std::vector> &seg_result); + int init(); + ~FrontEngineInterface() {} - // 对分词结果进行处理:对包含“一”字的分词结果进行整理 - std::vector> Mergeyi(std::vector> &seg_result); + // 读取配置文件 + int ReadConfFile(); - // 对分词结果进行处理:对前后相同的两个字进行合并 - std::vector> MergeReduplication(std::vector> &seg_result); + // 简体转繁体 + int Trand2Simp(const std::wstring &sentence, std::wstring *sentence_simp); - // 对一个词和后一个词他们的读音均为第三声的两个词进行合并 - std::vector> MergeThreeTones(std::vector> &seg_result); + // 生成字典 + int GenDict(const std::string &file, + std::map *map); - // 对一个词的最后一个读音和后一个词的第一个读音为第三声的两个词进行合并 - std::vector> MergeThreeTones2(std::vector> &seg_result); + // 由 词+词性的分词结果转为仅包含词的结果 + int GetSegResult(std::vector> *seg, + std::vector *seg_words); - // 对分词结果进行处理:对包含“儿”字的分词结果进行整理 - std::vector> MergeEr(std::vector> &seg_result); + // 生成句子的音素,音调id。如果音素和音调未分开,则 toneids + // 为空(fastspeech2),反之则不为空(speedyspeech) + int GetSentenceIds(const std::string &sentence, + std::vector *phoneids, + std::vector *toneids); - // 对分词结果进行处理、修改 - int MergeforModify(std::vector> &seg_result, std::vector> &merge_seg_result); + // 根据分词结果获取词的音素,音调id,并对读音进行适当修改 + // (ModifyTone)。如果音素和音调未分开,则 toneids + // 为空(fastspeech2),反之则不为空(speedyspeech) + int GetWordsIds( + const std::vector> &cut_result, + std::vector *phoneids, + std::vector *toneids); + // 结巴分词生成包含词和词性的分词结果,再对分词结果进行适当修改 + // (MergeforModify) + int Cut(const std::string &sentence, + std::vector> *cut_result); - // 对包含“不”字的相关词音调进行修改 - int BuSandi(const std::string &word, std::vector &finals); + // 字词到音素的映射,查找字典 + int GetPhone(const std::string &word, std::string *phone); - // 对包含“一”字的相关词音调进行修改 - int YiSandhi(const std::string &word, std::vector &finals); + // 音素到音素id + int Phone2Phoneid(const std::string &phone, + std::vector *phoneid, + std::vector *toneids); - // 对一些特殊词(包括量词,语助词等)的相关词音调进行修改 - int NeuralSandhi(const std::string &word, const std::string &pos, std::vector &finals); - // 对包含第三声的相关词音调进行修改 - int ThreeSandhi(const std::string &word, std::vector &finals); + // 根据韵母判断该词中每个字的读音都为第三声。true表示词中每个字都是第三声 + bool AllToneThree(const std::vector &finals); - // 对字词音调进行处理、修改 - int ModifyTone(const std::string &word, const std::string &pos, std::vector &finals); + // 判断词是否是叠词 + bool IsReduplication(const std::string &word); + + // 获取每个字词的声母韵母列表 + int GetInitialsFinals(const std::string &word, + std::vector *word_initials, + std::vector *word_finals); + // 获取每个字词的韵母列表 + int GetFinals(const std::string &word, + std::vector *word_finals); + + // 整个词转成向量形式,向量的每个元素对应词的一个字 + int Word2WordVec(const std::string &word, + std::vector *wordvec); + + // 将整个词重新进行 full cut,分词后,各个词会在词典中 + int SplitWord(const std::string &word, + std::vector *fullcut_word); + + // 对分词结果进行处理:对包含“不”字的分词结果进行整理 + std::vector> MergeBu( + std::vector> *seg_result); + + // 对分词结果进行处理:对包含“一”字的分词结果进行整理 + std::vector> Mergeyi( + std::vector> *seg_result); + + // 对分词结果进行处理:对前后相同的两个字进行合并 + std::vector> MergeReduplication( + std::vector> *seg_result); + + // 对一个词和后一个词他们的读音均为第三声的两个词进行合并 + std::vector> MergeThreeTones( + std::vector> *seg_result); + + // 对一个词的最后一个读音和后一个词的第一个读音为第三声的两个词进行合并 + std::vector> MergeThreeTones2( + std::vector> *seg_result); + + // 对分词结果进行处理:对包含“儿”字的分词结果进行整理 + std::vector> MergeEr( + std::vector> *seg_result); + + // 对分词结果进行处理、修改 + int MergeforModify( + std::vector> *seg_result, + std::vector> *merge_seg_result); - // 对儿化音进行处理 - std::vector> MergeErhua(const std::vector &initials, const std::vector &finals, const std::string &word, const std::string &pos); - + // 对包含“不”字的相关词音调进行修改 + int BuSandi(const std::string &word, std::vector *finals); - private: - bool _initialed; - cppjieba::Jieba *_jieba; - std::vector _punc; - std::vector _punc_omit; + // 对包含“一”字的相关词音调进行修改 + int YiSandhi(const std::string &word, std::vector *finals); + + // 对一些特殊词(包括量词,语助词等)的相关词音调进行修改 + int NeuralSandhi(const std::string &word, + const std::string &pos, + std::vector *finals); - std::string _conf_file; - std::map conf_map; - std::map word_phone_map; - std::map phone_id_map; - std::map tone_id_map; - std::map trand_simp_map; + // 对包含第三声的相关词音调进行修改 + int ThreeSandhi(const std::string &word, std::vector *finals); + + // 对字词音调进行处理、修改 + int ModifyTone(const std::string &word, + const std::string &pos, + std::vector *finals); - std::string _jieba_dict_path; - std::string _jieba_hmm_path; - std::string _jieba_user_dict_path; - std::string _jieba_idf_path; - std::string _jieba_stop_word_path; + // 对儿化音进行处理 + std::vector> MergeErhua( + const std::vector &initials, + const std::vector &finals, + const std::string &word, + const std::string &pos); + - std::string _seperate_tone; - std::string _word2phone_path; - std::string _phone2id_path; - std::string _tone2id_path; - std::string _trand2simp_path; + private: + bool _initialed; + cppjieba::Jieba *_jieba; + std::vector _punc; + std::vector _punc_omit; - std::vector must_erhua; - std::vector not_erhua; + std::string _conf_file; + std::map conf_map; + std::map word_phone_map; + std::map phone_id_map; + std::map tone_id_map; + std::map trand_simp_map; - std::vector must_not_neural_tone_words; - std::vector must_neural_tone_words; + std::string _jieba_dict_path; + std::string _jieba_hmm_path; + std::string _jieba_user_dict_path; + std::string _jieba_idf_path; + std::string _jieba_stop_word_path; + std::string _seperate_tone; + std::string _word2phone_path; + std::string _phone2id_path; + std::string _tone2id_path; + std::string _trand2simp_path; + + std::vector must_erhua; + std::vector not_erhua; - }; -} + std::vector must_not_neural_tone_words; + std::vector must_neural_tone_words; +}; +} // namespace ppspeech #endif \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/text_normalize.cpp b/demos/TTSCppFrontend/src/front/text_normalize.cpp index 11a493ba9..8420e8407 100644 --- a/demos/TTSCppFrontend/src/front/text_normalize.cpp +++ b/demos/TTSCppFrontend/src/front/text_normalize.cpp @@ -1,10 +1,22 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #include "front/text_normalize.h" namespace ppspeech { // 初始化 digits_map and unit_map int TextNormalizer::InitMap() { - digits_map["0"] = "零"; digits_map["1"] = "一"; digits_map["2"] = "二"; @@ -21,77 +33,84 @@ int TextNormalizer::InitMap() { units_map[3] = "千"; units_map[4] = "万"; units_map[8] = "亿"; - + return 0; } // 替换 -int TextNormalizer::Replace(std::wstring &sentence, const int &pos, const int &len, const std::wstring &repstr) { +int TextNormalizer::Replace(std::wstring *sentence, + const int &pos, + const int &len, + const std::wstring &repstr) { // 删除原来的 - sentence.erase(pos, len); + sentence->erase(pos, len); // 插入新的 - sentence.insert(pos, repstr); + sentence->insert(pos, repstr); return 0; - } // 根据标点符号切分句子 -int TextNormalizer::SplitByPunc(const std::wstring &sentence, std::vector &sentence_part) { +int TextNormalizer::SplitByPunc(const std::wstring &sentence, + std::vector *sentence_part) { std::wstring temp = sentence; std::wregex reg(L"[:,;。?!,;?!]"); std::wsmatch match; - while (std::regex_search (temp, match, reg)) { - sentence_part.push_back(temp.substr(0, match.position(0) + match.length(0))); - Replace(temp, 0, match.position(0) + match.length(0), L""); + while (std::regex_search(temp, match, reg)) { + sentence_part->push_back( + temp.substr(0, match.position(0) + match.length(0))); + Replace(&temp, 0, match.position(0) + match.length(0), L""); } // 如果最后没有标点符号 - if(temp != L"") { - sentence_part.push_back(temp); + if (temp != L"") { + sentence_part->push_back(temp); } return 0; } -//数字转文本,10200 - > 一万零二百 -std::string TextNormalizer::CreateTextValue(const std::string &num_str, bool use_zero) { - - std::string num_lstrip = std::string(absl::StripPrefix(num_str, "0")).data(); +// 数字转文本,10200 - > 一万零二百 +std::string TextNormalizer::CreateTextValue(const std::string &num_str, + bool use_zero) { + std::string num_lstrip = + std::string(absl::StripPrefix(num_str, "0")).data(); int len = num_lstrip.length(); - - if(len == 0) { + + if (len == 0) { return ""; } else if (len == 1) { - if(use_zero && (len < num_str.length())) { + if (use_zero && (len < num_str.length())) { return digits_map["0"] + digits_map[num_lstrip]; } else { return digits_map[num_lstrip]; } } else { - int largest_unit = 0; // 最大单位 + int largest_unit = 0; // 最大单位 std::string first_part; std::string second_part; - if (len > 1 and len <= 2) { + if (len > 1 && len <= 2) { largest_unit = 1; - } else if (len > 2 and len <= 3) { + } else if (len > 2 && len <= 3) { largest_unit = 2; - } else if (len > 3 and len <= 4) { + } else if (len > 3 && len <= 4) { largest_unit = 3; - } else if (len > 4 and len <= 8) { + } else if (len > 4 && len <= 8) { largest_unit = 4; } else if (len > 8) { - largest_unit = 8; - } + largest_unit = 8; + } first_part = num_str.substr(0, num_str.length() - largest_unit); second_part = num_str.substr(num_str.length() - largest_unit); - - return CreateTextValue(first_part, use_zero) + units_map[largest_unit] + CreateTextValue(second_part, use_zero); + + return CreateTextValue(first_part, use_zero) + units_map[largest_unit] + + CreateTextValue(second_part, use_zero); } } -// 数字一个一个对应,可直接用于年份,电话,手机, -std::string TextNormalizer::SingleDigit2Text(const std::string &num_str, bool alt_one) { +// 数字一个一个对应,可直接用于年份,电话,手机, +std::string TextNormalizer::SingleDigit2Text(const std::string &num_str, + bool alt_one) { std::string text = ""; if (alt_one) { digits_map["1"] = "幺"; @@ -110,13 +129,16 @@ std::string TextNormalizer::SingleDigit2Text(const std::string &num_str, bool al return text; } -std::string TextNormalizer::SingleDigit2Text(const std::wstring &num, bool alt_one) { +std::string TextNormalizer::SingleDigit2Text(const std::wstring &num, + bool alt_one) { std::string num_str = wstring2utf8string(num); return SingleDigit2Text(num_str, alt_one); } // 数字整体对应,可直接用于月份,日期,数值整数部分 -std::string TextNormalizer::MultiDigit2Text(const std::string &num_str, bool alt_one, bool use_zero) { +std::string TextNormalizer::MultiDigit2Text(const std::string &num_str, + bool alt_one, + bool use_zero) { LOG(INFO) << "aaaaaaaaaaaaaaaa: " << alt_one << use_zero; if (alt_one) { digits_map["1"] = "幺"; @@ -124,18 +146,22 @@ std::string TextNormalizer::MultiDigit2Text(const std::string &num_str, bool alt digits_map["1"] = "一"; } - std::wstring result = utf8string2wstring(CreateTextValue(num_str, use_zero)); + std::wstring result = + utf8string2wstring(CreateTextValue(num_str, use_zero)); std::wstring result_0(1, result[0]); std::wstring result_1(1, result[1]); // 一十八 --> 十八 - if ((result_0 == utf8string2wstring(digits_map["1"])) && (result_1 == utf8string2wstring(units_map[1]))) { - return wstring2utf8string(result.substr(1,result.length())); + if ((result_0 == utf8string2wstring(digits_map["1"])) && + (result_1 == utf8string2wstring(units_map[1]))) { + return wstring2utf8string(result.substr(1, result.length())); } else { return wstring2utf8string(result); } } -std::string TextNormalizer::MultiDigit2Text(const std::wstring &num, bool alt_one, bool use_zero) { +std::string TextNormalizer::MultiDigit2Text(const std::wstring &num, + bool alt_one, + bool use_zero) { std::string num_str = wstring2utf8string(num); return MultiDigit2Text(num_str, alt_one, use_zero); } @@ -145,15 +171,20 @@ std::string TextNormalizer::Digits2Text(const std::string &num_str) { std::string text; std::vector integer_decimal; integer_decimal = absl::StrSplit(num_str, "."); - - if(integer_decimal.size() == 1) { // 整数 + + if (integer_decimal.size() == 1) { // 整数 text = MultiDigit2Text(integer_decimal[0]); - } else if(integer_decimal.size() == 2) { // 小数 - if(integer_decimal[0] == "") { // 无整数的小数类型,例如:.22 - text = "点" + SingleDigit2Text(std::string(absl::StripSuffix(integer_decimal[1], "0")).data()); + } else if (integer_decimal.size() == 2) { // 小数 + if (integer_decimal[0] == "") { // 无整数的小数类型,例如:.22 + text = "点" + + SingleDigit2Text( + std::string(absl::StripSuffix(integer_decimal[1], "0")) + .data()); } else { // 常规小数类型,例如:12.34 - text = MultiDigit2Text(integer_decimal[0]) + "点" + \ - SingleDigit2Text(std::string(absl::StripSuffix(integer_decimal[1], "0")).data()); + text = MultiDigit2Text(integer_decimal[0]) + "点" + + SingleDigit2Text( + std::string(absl::StripSuffix(integer_decimal[1], "0")) + .data()); } } else { return "The value does not conform to the numeric format"; @@ -168,23 +199,28 @@ std::string TextNormalizer::Digits2Text(const std::wstring &num) { } // 日期,2021年8月18日 --> 二零二一年八月十八日 -int TextNormalizer::ReData(std::wstring &sentence) { - std::wregex reg(L"(\\d{4}|\\d{2})年((0?[1-9]|1[0-2])月)?(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?"); +int TextNormalizer::ReData(std::wstring *sentence) { + std::wregex reg( + L"(\\d{4}|\\d{2})年((0?[1-9]|1[0-2])月)?(((0?[1-9])|((1|2)[0-9])|30|31)" + L"([日号]))?"); std::wsmatch match; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { rep = ""; rep += SingleDigit2Text(match[1]) + "年"; - if(match[3] != L"") { + if (match[3] != L"") { rep += MultiDigit2Text(match[3], false, false) + "月"; } - if(match[5] != L"") { - rep += MultiDigit2Text(match[5], false, false) + wstring2utf8string(match[9]); + if (match[5] != L"") { + rep += MultiDigit2Text(match[5], false, false) + + wstring2utf8string(match[9]); } - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); - + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; @@ -192,255 +228,301 @@ int TextNormalizer::ReData(std::wstring &sentence) { // XX-XX-XX or XX/XX/XX 例如:2021/08/18 --> 二零二一年八月十八日 -int TextNormalizer::ReData2(std::wstring &sentence) { - std::wregex reg(L"(\\d{4})([- /.])(0[1-9]|1[012])\\2(0[1-9]|[12][0-9]|3[01])"); +int TextNormalizer::ReData2(std::wstring *sentence) { + std::wregex reg( + L"(\\d{4})([- /.])(0[1-9]|1[012])\\2(0[1-9]|[12][0-9]|3[01])"); std::wsmatch match; std::string rep; - - while (std::regex_search (sentence, match, reg)) { + + while (std::regex_search(*sentence, match, reg)) { rep = ""; rep += (SingleDigit2Text(match[1]) + "年"); rep += (MultiDigit2Text(match[3], false, false) + "月"); rep += (MultiDigit2Text(match[4], false, false) + "日"); - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); - + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } - + return 0; } // XX:XX:XX 09:09:02 --> 九点零九分零二秒 -int TextNormalizer::ReTime(std::wstring &sentence) { +int TextNormalizer::ReTime(std::wstring *sentence) { std::wregex reg(L"([0-1]?[0-9]|2[0-3]):([0-5][0-9])(:([0-5][0-9]))?"); std::wsmatch match; std::string rep; - - while (std::regex_search (sentence, match, reg)) { + + while (std::regex_search(*sentence, match, reg)) { rep = ""; rep += (MultiDigit2Text(match[1], false, false) + "点"); - if(absl::StartsWith(wstring2utf8string(match[2]), "0")) { + if (absl::StartsWith(wstring2utf8string(match[2]), "0")) { rep += "零"; } rep += (MultiDigit2Text(match[2]) + "分"); - if(absl::StartsWith(wstring2utf8string(match[4]), "0")) { + if (absl::StartsWith(wstring2utf8string(match[4]), "0")) { rep += "零"; } rep += (MultiDigit2Text(match[4]) + "秒"); - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 温度,例如:-24.3℃ --> 零下二十四点三度 -int TextNormalizer::ReTemperature(std::wstring &sentence) { - std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)(°C|℃|度|摄氏度)"); +int TextNormalizer::ReTemperature(std::wstring *sentence) { + std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)(°C|℃|度|摄氏度)"); std::wsmatch match; std::string rep; std::string sign; std::vector integer_decimal; std::string unit; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { match[1] == L"-" ? sign = "负" : sign = ""; - match[4] == L"摄氏度"? unit = "摄氏度" : unit = "度"; + match[4] == L"摄氏度" ? unit = "摄氏度" : unit = "度"; rep = sign + Digits2Text(match[2]) + unit; - - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; - } // 分数,例如: 1/3 --> 三分之一 -int TextNormalizer::ReFrac(std::wstring &sentence) { - std::wregex reg(L"(-?)(\\d+)/(\\d+)"); +int TextNormalizer::ReFrac(std::wstring *sentence) { + std::wregex reg(L"(-?)(\\d+)/(\\d+)"); std::wsmatch match; std::string sign; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { match[1] == L"-" ? sign = "负" : sign = ""; - rep = sign + MultiDigit2Text(match[3]) + "分之" + MultiDigit2Text(match[2]); - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + rep = sign + MultiDigit2Text(match[3]) + "分之" + + MultiDigit2Text(match[2]); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 百分数,例如:45.5% --> 百分之四十五点五 -int TextNormalizer::RePercentage(std::wstring &sentence) { - std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)%"); +int TextNormalizer::RePercentage(std::wstring *sentence) { + std::wregex reg(L"(-?)(\\d+(\\.\\d+)?)%"); std::wsmatch match; std::string sign; std::string rep; std::vector integer_decimal; - - while (std::regex_search (sentence, match, reg)) { + + while (std::regex_search(*sentence, match, reg)) { match[1] == L"-" ? sign = "负" : sign = ""; rep = sign + "百分之" + Digits2Text(match[2]); - - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } - + return 0; } // 手机号码,例如:+86 18883862235 --> 八六幺八八八三八六二二三五 -int TextNormalizer::ReMobilePhone(std::wstring &sentence) { - std::wregex reg(L"(\\d)?((\\+?86 ?)?1([38]\\d|5[0-35-9]|7[678]|9[89])\\d{8})(\\d)?"); +int TextNormalizer::ReMobilePhone(std::wstring *sentence) { + std::wregex reg( + L"(\\d)?((\\+?86 ?)?1([38]\\d|5[0-35-9]|7[678]|9[89])\\d{8})(\\d)?"); std::wsmatch match; std::string rep; std::vector country_phonenum; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { country_phonenum = absl::StrSplit(wstring2utf8string(match[0]), "+"); rep = ""; - for(int i = 0; i < country_phonenum.size(); i++) { + for (int i = 0; i < country_phonenum.size(); i++) { LOG(INFO) << country_phonenum[i]; rep += SingleDigit2Text(country_phonenum[i], true); } - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); - + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } - + return 0; } // 座机号码,例如:010-51093154 --> 零幺零五幺零九三幺五四 -int TextNormalizer::RePhone(std::wstring &sentence) { - std::wregex reg(L"(\\d)?((0(10|2[1-3]|[3-9]\\d{2})-?)?[1-9]\\d{6,7})(\\d)?"); +int TextNormalizer::RePhone(std::wstring *sentence) { + std::wregex reg( + L"(\\d)?((0(10|2[1-3]|[3-9]\\d{2})-?)?[1-9]\\d{6,7})(\\d)?"); std::wsmatch match; std::vector zone_phonenum; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { rep = ""; zone_phonenum = absl::StrSplit(wstring2utf8string(match[0]), "-"); - for(int i = 0; i < zone_phonenum.size(); i ++) { + for (int i = 0; i < zone_phonenum.size(); i++) { rep += SingleDigit2Text(zone_phonenum[i], true); } - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 范围,例如:60~90 --> 六十到九十 -int TextNormalizer::ReRange(std::wstring &sentence) { - std::wregex reg(L"((-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+)))[-~]((-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+)))"); +int TextNormalizer::ReRange(std::wstring *sentence) { + std::wregex reg( + L"((-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+)))[-~]((-?)((\\d+)(\\.\\d+)?)|(\\.(" + L"\\d+)))"); std::wsmatch match; std::string rep; std::string sign1; std::string sign2; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { rep = ""; match[2] == L"-" ? sign1 = "负" : sign1 = ""; - if(match[6] != L"") { + if (match[6] != L"") { rep += sign1 + Digits2Text(match[6]) + "到"; } else { rep += sign1 + Digits2Text(match[3]) + "到"; } match[9] == L"-" ? sign2 = "负" : sign2 = ""; - if(match[13] != L"") { + if (match[13] != L"") { rep += sign2 + Digits2Text(match[13]); } else { rep += sign2 + Digits2Text(match[10]); } - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 带负号的整数,例如:-10 --> 负十 -int TextNormalizer::ReInterger(std::wstring &sentence) { - std::wregex reg(L"(-)(\\d+)"); +int TextNormalizer::ReInterger(std::wstring *sentence) { + std::wregex reg(L"(-)(\\d+)"); std::wsmatch match; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { rep = "负" + MultiDigit2Text(match[2]); - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } - + return 0; } // 纯小数 -int TextNormalizer::ReDecimalNum(std::wstring &sentence) { - std::wregex reg(L"(-?)((\\d+)(\\.\\d+))|(\\.(\\d+))"); +int TextNormalizer::ReDecimalNum(std::wstring *sentence) { + std::wregex reg(L"(-?)((\\d+)(\\.\\d+))|(\\.(\\d+))"); std::wsmatch match; std::string sign; std::string rep; - //std::vector integer_decimal; - while (std::regex_search (sentence, match, reg)) { + // std::vector integer_decimal; + while (std::regex_search(*sentence, match, reg)) { match[1] == L"-" ? sign = "负" : sign = ""; - if(match[5] != L"") { + if (match[5] != L"") { rep = sign + Digits2Text(match[5]); } else { rep = sign + Digits2Text(match[2]); } - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 正整数 + 量词 -int TextNormalizer::RePositiveQuantifiers(std::wstring &sentence) { - std::wstring common_quantifiers = L"(朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲| \ - 墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂| \ - 课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘| \ - 毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日| \ - 季|刻|时|周|天|秒|分|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万| \ - 万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)"; - std::wregex reg(L"(\\d+)([多余几])?" + common_quantifiers); +int TextNormalizer::RePositiveQuantifiers(std::wstring *sentence) { + std::wstring common_quantifiers = + L"(朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|" + L"担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|" + L"溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|" + L"本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|" + L"毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|" + L"合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|" + L"卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|纪|岁|世|更|" + L"夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|" + L"元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|" + L"百万|万|千|百|)块|角|毛|分)"; + std::wregex reg(L"(\\d+)([多余几])?" + common_quantifiers); std::wsmatch match; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { rep = MultiDigit2Text(match[1]); - Replace(sentence, match.position(1), match.length(1), utf8string2wstring(rep)); + Replace(sentence, + match.position(1), + match.length(1), + utf8string2wstring(rep)); } return 0; } // 编号类数字,例如: 89757 --> 八九七五七 -int TextNormalizer::ReDefalutNum(std::wstring &sentence) { - std::wregex reg(L"\\d{3}\\d*"); +int TextNormalizer::ReDefalutNum(std::wstring *sentence) { + std::wregex reg(L"\\d{3}\\d*"); std::wsmatch match; - while (std::regex_search (sentence, match, reg)) { - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(SingleDigit2Text(match[0]))); + while (std::regex_search(*sentence, match, reg)) { + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(SingleDigit2Text(match[0]))); } return 0; } -int TextNormalizer::ReNumber(std::wstring &sentence) { - std::wregex reg(L"(-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+))"); +int TextNormalizer::ReNumber(std::wstring *sentence) { + std::wregex reg(L"(-?)((\\d+)(\\.\\d+)?)|(\\.(\\d+))"); std::wsmatch match; std::string sign; std::string rep; - while (std::regex_search (sentence, match, reg)) { + while (std::regex_search(*sentence, match, reg)) { match[1] == L"-" ? sign = "负" : sign = ""; - if(match[5] != L"") { + if (match[5] != L"") { rep = sign + Digits2Text(match[5]); } else { rep = sign + Digits2Text(match[2]); } - - Replace(sentence, match.position(0), match.length(0), utf8string2wstring(rep)); + + Replace(sentence, + match.position(0), + match.length(0), + utf8string2wstring(rep)); } return 0; } // 整体正则,按顺序 -int TextNormalizer::SentenceNormalize(std::wstring &sentence) { +int TextNormalizer::SentenceNormalize(std::wstring *sentence) { ReData(sentence); ReData2(sentence); ReTime(sentence); @@ -452,11 +534,9 @@ int TextNormalizer::SentenceNormalize(std::wstring &sentence) { ReRange(sentence); ReInterger(sentence); ReDecimalNum(sentence); - RePositiveQuantifiers(sentence); + RePositiveQuantifiers(sentence); ReDefalutNum(sentence); ReNumber(sentence); - return 0; + return 0; } - - -} \ No newline at end of file +} // namespace ppspeech \ No newline at end of file diff --git a/demos/TTSCppFrontend/src/front/text_normalize.h b/demos/TTSCppFrontend/src/front/text_normalize.h index 20d502b82..4383fa1b4 100644 --- a/demos/TTSCppFrontend/src/front/text_normalize.h +++ b/demos/TTSCppFrontend/src/front/text_normalize.h @@ -1,11 +1,24 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. #ifndef PADDLE_TTS_SERVING_FRONT_TEXT_NORMALIZE_H #define PADDLE_TTS_SERVING_FRONT_TEXT_NORMALIZE_H +#include +#include #include #include #include -#include -#include #include "absl/strings/str_split.h" #include "absl/strings/strip.h" #include "base/type_conv.h" @@ -13,50 +26,52 @@ namespace ppspeech { class TextNormalizer { -public: - TextNormalizer() { - InitMap(); - } - ~TextNormalizer() { - - } + public: + TextNormalizer() { InitMap(); } + ~TextNormalizer() {} int InitMap(); - int Replace(std::wstring &sentence, const int &pos, const int &len, const std::wstring &repstr); - int SplitByPunc(const std::wstring &sentence, std::vector &sentence_part); + int Replace(std::wstring *sentence, + const int &pos, + const int &len, + const std::wstring &repstr); + int SplitByPunc(const std::wstring &sentence, + std::vector *sentence_part); - std::string CreateTextValue(const std::string &num, bool use_zero=true); - std::string SingleDigit2Text(const std::string &num_str, bool alt_one = false); + std::string CreateTextValue(const std::string &num, bool use_zero = true); + std::string SingleDigit2Text(const std::string &num_str, + bool alt_one = false); std::string SingleDigit2Text(const std::wstring &num, bool alt_one = false); - std::string MultiDigit2Text(const std::string &num_str, bool alt_one = false, bool use_zero = true); - std::string MultiDigit2Text(const std::wstring &num, bool alt_one = false, bool use_zero = true); + std::string MultiDigit2Text(const std::string &num_str, + bool alt_one = false, + bool use_zero = true); + std::string MultiDigit2Text(const std::wstring &num, + bool alt_one = false, + bool use_zero = true); std::string Digits2Text(const std::string &num_str); std::string Digits2Text(const std::wstring &num); - int ReData(std::wstring &sentence); - int ReData2(std::wstring &sentence); - int ReTime(std::wstring &sentence); - int ReTemperature(std::wstring &sentence); - int ReFrac(std::wstring &sentence); - int RePercentage(std::wstring &sentence); - int ReMobilePhone(std::wstring &sentence); - int RePhone(std::wstring &sentence); - int ReRange(std::wstring &sentence); - int ReInterger(std::wstring &sentence); - int ReDecimalNum(std::wstring &sentence); - int RePositiveQuantifiers(std::wstring &sentence); - int ReDefalutNum(std::wstring &sentence); - int ReNumber(std::wstring &sentence); - int SentenceNormalize(std::wstring &sentence); - - -private: - std::map digits_map; - std::map units_map; + int ReData(std::wstring *sentence); + int ReData2(std::wstring *sentence); + int ReTime(std::wstring *sentence); + int ReTemperature(std::wstring *sentence); + int ReFrac(std::wstring *sentence); + int RePercentage(std::wstring *sentence); + int ReMobilePhone(std::wstring *sentence); + int RePhone(std::wstring *sentence); + int ReRange(std::wstring *sentence); + int ReInterger(std::wstring *sentence); + int ReDecimalNum(std::wstring *sentence); + int RePositiveQuantifiers(std::wstring *sentence); + int ReDefalutNum(std::wstring *sentence); + int ReNumber(std::wstring *sentence); + int SentenceNormalize(std::wstring *sentence); + private: + std::map digits_map; + std::map units_map; }; - -} +} // namespace ppspeech #endif \ No newline at end of file From 435fc5cc19be943872ef2897cb77d4979e6709df Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Tue, 14 Mar 2023 15:30:44 +0800 Subject: [PATCH 16/37] [TTS] add opencpop PWGAN example (#3031) * add opencpop voc, test=tts * soft link --- examples/opencpop/voc1/README.md | 139 ++++++++++++++ examples/opencpop/voc1/conf/default.yaml | 119 ++++++++++++ examples/opencpop/voc1/local/PTQ_static.sh | 1 + .../opencpop/voc1/local/dygraph_to_static.sh | 15 ++ examples/opencpop/voc1/local/preprocess.sh | 47 +++++ examples/opencpop/voc1/local/synthesize.sh | 1 + examples/opencpop/voc1/local/train.sh | 1 + examples/opencpop/voc1/path.sh | 1 + examples/opencpop/voc1/run.sh | 42 +++++ paddlespeech/t2s/exps/PTQ_static.py | 1 + paddlespeech/t2s/exps/dygraph_to_static.py | 169 ++++++++++++++++++ .../t2s/exps/gan_vocoder/preprocess.py | 39 +++- 12 files changed, 573 insertions(+), 2 deletions(-) create mode 100644 examples/opencpop/voc1/README.md create mode 100644 examples/opencpop/voc1/conf/default.yaml create mode 120000 examples/opencpop/voc1/local/PTQ_static.sh create mode 100755 examples/opencpop/voc1/local/dygraph_to_static.sh create mode 100755 examples/opencpop/voc1/local/preprocess.sh create mode 120000 examples/opencpop/voc1/local/synthesize.sh create mode 120000 examples/opencpop/voc1/local/train.sh create mode 120000 examples/opencpop/voc1/path.sh create mode 100755 examples/opencpop/voc1/run.sh create mode 100644 paddlespeech/t2s/exps/dygraph_to_static.py diff --git a/examples/opencpop/voc1/README.md b/examples/opencpop/voc1/README.md new file mode 100644 index 000000000..37570a648 --- /dev/null +++ b/examples/opencpop/voc1/README.md @@ -0,0 +1,139 @@ +# Parallel WaveGAN with Opencpop +This example contains code used to train a [parallel wavegan](http://arxiv.org/abs/1910.11480) model with [Mandarin singing corpus](https://wenet.org.cn/opencpop/). + +## Dataset +### Download and Extract +Download Opencpop from it's [Official Website](https://wenet.org.cn/opencpop/download/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/Opencpop`. + +## Get Started +Assume the path to the dataset is `~/datasets/Opencpop`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│ ├── norm +│ └── raw +├── test +│ ├── norm +│ └── raw +└── train + ├── norm + ├── raw + └── feats_stats.npy +``` +The dataset is split into 3 parts, namely `train`, `dev`, and `test`, each of which contains a `norm` and `raw` subfolder. The `raw` folder contains the log magnitude of the mel spectrogram of each utterance, while the norm folder contains the normalized spectrogram. The statistics used to normalize the spectrogram are computed from the training set, which is located in `dump/train/feats_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains id and paths to the spectrogram of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. +Here's the complete help message. + +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--batch-size BATCH_SIZE] [--max-iter MAX_ITER] + [--run-benchmark RUN_BENCHMARK] + [--profiler_options PROFILER_OPTIONS] + +Train a ParallelWaveGAN model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG ParallelWaveGAN config file. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu == 0, use cpu. + +benchmark: + arguments related to benchmark. + + --batch-size BATCH_SIZE + batch size. + --max-iter MAX_ITER train max steps. + --run-benchmark RUN_BENCHMARK + runing benchmark or not, if True, use the --batch-size + and --max-iter. + --profiler_options PROFILER_OPTIONS + The option of profiler, which should be in format + "key1=value1;key2=value2;key3=value3". +``` + +1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. +2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder. +3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory. +4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + +### Synthesizing +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] [--generator-type GENERATOR_TYPE] [--config CONFIG] + [--checkpoint CHECKPOINT] [--test-metadata TEST_METADATA] + [--output-dir OUTPUT_DIR] [--ngpu NGPU] + +Synthesize with GANVocoder. + +optional arguments: + -h, --help show this help message and exit + --generator-type GENERATOR_TYPE + type of GANVocoder, should in {pwgan, mb_melgan, + style_melgan, } now + --config CONFIG GANVocoder config file. + --checkpoint CHECKPOINT + snapshot to load. + --test-metadata TEST_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu == 0, use cpu. +``` + +1. `--config` parallel wavegan config file. You should use the same config with which the model is trained. +2. `--checkpoint` is the checkpoint to load. Pick one of the checkpoints from `checkpoints` inside the training output directory. +3. `--test-metadata` is the metadata of the test dataset. Use the `metadata.jsonl` in the `dev/norm` subfolder from the processed directory. +4. `--output-dir` is the directory to save the synthesized audio files. +5. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + +## Pretrained Models +The pretrained model can be downloaded here: +- [pwgan_opencpop_ckpt_1.4.0](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/pwgan_opencpop_ckpt_1.4.0.zip) + + +Parallel WaveGAN checkpoint contains files listed below. + +```text +pwgan_opencpop_ckpt_1.4.0 +├── default.yaml # default config used to train parallel wavegan +├── snapshot_iter_100000.pdz # generator parameters of parallel wavegan +└── feats_stats.npy # statistics used to normalize spectrogram when training parallel wavegan +``` +## Acknowledgement +We adapted some code from https://github.com/kan-bayashi/ParallelWaveGAN. diff --git a/examples/opencpop/voc1/conf/default.yaml b/examples/opencpop/voc1/conf/default.yaml new file mode 100644 index 000000000..ee99719dc --- /dev/null +++ b/examples/opencpop/voc1/conf/default.yaml @@ -0,0 +1,119 @@ +# This is the hyperparameter configuration file for Parallel WaveGAN. +# Please make sure this is adjusted for the CSMSC dataset. If you want to +# apply to the other dataset, you might need to carefully change some parameters. +# This configuration requires 12 GB GPU memory and takes ~3 days on RTX TITAN. + +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### +fs: 24000 # Sampling rate. +n_fft: 512 # FFT size (samples). +n_shift: 128 # Hop size (samples). 12.5ms +win_length: 512 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. +n_mels: 80 # Number of mel basis. +fmin: 30 # Minimum freq in mel basis calculation. (Hz) +fmax: 12000 # Maximum frequency in mel basis calculation. (Hz) + + +########################################################### +# GENERATOR NETWORK ARCHITECTURE SETTING # +########################################################### +generator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Kernel size of dilated convolution. + layers: 30 # Number of residual block layers. + stacks: 3 # Number of stacks i.e., dilation cycles. + residual_channels: 64 # Number of channels in residual conv. + gate_channels: 128 # Number of channels in gated conv. + skip_channels: 64 # Number of channels in skip conv. + aux_channels: 80 # Number of channels for auxiliary feature conv. + # Must be the same as num_mels. + aux_context_window: 2 # Context window size for auxiliary feature. + # If set to 2, previous 2 and future 2 frames will be considered. + dropout: 0.0 # Dropout rate. 0.0 means no dropout applied. + bias: True # use bias in residual blocks + use_weight_norm: True # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + use_causal_conv: False # use causal conv in residual blocks and upsample layers + upsample_scales: [8, 4, 2, 2] # Upsampling scales. Prodcut of these must be the same as hop size. + interpolate_mode: "nearest" # upsample net interpolate mode + freq_axis_kernel_size: 1 # upsamling net: convolution kernel size in frequencey axis + nonlinear_activation: null + nonlinear_activation_params: {} + +########################################################### +# DISCRIMINATOR NETWORK ARCHITECTURE SETTING # +########################################################### +discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_size: 3 # Number of output channels. + layers: 10 # Number of conv layers. + conv_channels: 64 # Number of chnn layers. + bias: True # Whether to use bias parameter in conv. + use_weight_norm: True # Whether to use weight norm. + # If set to true, it will be applied to all of the conv layers. + nonlinear_activation: "leakyrelu" # Nonlinear function after each conv. + nonlinear_activation_params: # Nonlinear function parameters + negative_slope: 0.2 # Alpha in leakyrelu. + +########################################################### +# STFT LOSS SETTING # +########################################################### +stft_loss_params: + fft_sizes: [1024, 2048, 512] # List of FFT size for STFT-based loss. + hop_sizes: [120, 240, 50] # List of hop size for STFT-based loss + win_lengths: [600, 1200, 240] # List of window length for STFT-based loss. + window: "hann" # Window function for STFT-based loss + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 4.0 # Loss balancing coefficient. + +########################################################### +# DATA LOADER SETTING # +########################################################### +batch_size: 8 # Batch size. +batch_max_steps: 25500 # Length of each audio in batch. Make sure dividable by n_shift. +num_workers: 1 # Number of workers in DataLoader. + +########################################################### +# OPTIMIZER & SCHEDULER SETTING # +########################################################### +generator_optimizer_params: + epsilon: 1.0e-6 # Generator's epsilon. + weight_decay: 0.0 # Generator's weight decay coefficient. +generator_scheduler_params: + learning_rate: 0.0001 # Generator's learning rate. + step_size: 200000 # Generator's scheduler step size. + gamma: 0.5 # Generator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +generator_grad_norm: 10 # Generator's gradient norm. +discriminator_optimizer_params: + epsilon: 1.0e-6 # Discriminator's epsilon. + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + learning_rate: 0.00005 # Discriminator's learning rate. + step_size: 200000 # Discriminator's scheduler step size. + gamma: 0.5 # Discriminator's scheduler gamma. + # At each step size, lr will be multiplied by this parameter. +discriminator_grad_norm: 1 # Discriminator's gradient norm. + +########################################################### +# INTERVAL SETTING # +########################################################### +discriminator_train_start_steps: 100000 # Number of steps to start to train discriminator. +train_max_steps: 400000 # Number of training steps. +save_interval_steps: 5000 # Interval steps to save checkpoint. +eval_interval_steps: 1000 # Interval steps to evaluate the network. + +########################################################### +# OTHER SETTING # +########################################################### +num_save_intermediate_results: 4 # Number of results to be saved as intermediate results. +num_snapshots: 10 # max number of snapshots to keep while training +seed: 42 # random seed for paddle, random, and np.random diff --git a/examples/opencpop/voc1/local/PTQ_static.sh b/examples/opencpop/voc1/local/PTQ_static.sh new file mode 120000 index 000000000..247ce5c74 --- /dev/null +++ b/examples/opencpop/voc1/local/PTQ_static.sh @@ -0,0 +1 @@ +../../../csmsc/voc1/local/PTQ_static.sh \ No newline at end of file diff --git a/examples/opencpop/voc1/local/dygraph_to_static.sh b/examples/opencpop/voc1/local/dygraph_to_static.sh new file mode 100755 index 000000000..40a2c51ba --- /dev/null +++ b/examples/opencpop/voc1/local/dygraph_to_static.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../../dygraph_to_static.py \ + --type=voc \ + --voc=pwgan_opencpop \ + --voc_config=${config_path} \ + --voc_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --voc_stat=dump/train/feats_stats.npy \ + --inference_dir=exp/default/inference/ diff --git a/examples/opencpop/voc1/local/preprocess.sh b/examples/opencpop/voc1/local/preprocess.sh new file mode 100755 index 000000000..edab4d0d5 --- /dev/null +++ b/examples/opencpop/voc1/local/preprocess.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/../preprocess.py \ + --rootdir=~/datasets/Opencpop/segments/ \ + --dataset=opencpop \ + --dumpdir=dump \ + --dur-file=~/datasets/Opencpop/segments/transcriptions.txt \ + --config=${config_path} \ + --cut-sil=False \ + --num-cpu=20 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="feats" +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # normalize, dev and test should use train's stats + echo "Normalize ..." + + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --stats=dump/train/feats_stats.npy + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --stats=dump/train/feats_stats.npy + + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --stats=dump/train/feats_stats.npy +fi diff --git a/examples/opencpop/voc1/local/synthesize.sh b/examples/opencpop/voc1/local/synthesize.sh new file mode 120000 index 000000000..d6aecd8d1 --- /dev/null +++ b/examples/opencpop/voc1/local/synthesize.sh @@ -0,0 +1 @@ +../../../csmsc/voc1/local/synthesize.sh \ No newline at end of file diff --git a/examples/opencpop/voc1/local/train.sh b/examples/opencpop/voc1/local/train.sh new file mode 120000 index 000000000..2942893d2 --- /dev/null +++ b/examples/opencpop/voc1/local/train.sh @@ -0,0 +1 @@ +../../../csmsc/voc1/local/train.sh \ No newline at end of file diff --git a/examples/opencpop/voc1/path.sh b/examples/opencpop/voc1/path.sh new file mode 120000 index 000000000..b7ed4fb8f --- /dev/null +++ b/examples/opencpop/voc1/path.sh @@ -0,0 +1 @@ +../../csmsc/voc1/path.sh \ No newline at end of file diff --git a/examples/opencpop/voc1/run.sh b/examples/opencpop/voc1/run.sh new file mode 100755 index 000000000..1f87425f4 --- /dev/null +++ b/examples/opencpop/voc1/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_100000.pdz + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # synthesize + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +# dygraph to static +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/dygraph_to_static.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +# PTQ_static +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/PTQ_static.sh ${train_output_path} pwgan_opencpop || exit -1 +fi diff --git a/paddlespeech/t2s/exps/PTQ_static.py b/paddlespeech/t2s/exps/PTQ_static.py index 16b3ae983..8849abe58 100644 --- a/paddlespeech/t2s/exps/PTQ_static.py +++ b/paddlespeech/t2s/exps/PTQ_static.py @@ -42,6 +42,7 @@ def parse_args(): 'hifigan_aishell3', 'hifigan_ljspeech', 'hifigan_vctk', + 'pwgan_opencpop', ], help='Choose model type of tts task.') diff --git a/paddlespeech/t2s/exps/dygraph_to_static.py b/paddlespeech/t2s/exps/dygraph_to_static.py new file mode 100644 index 000000000..3e6e94857 --- /dev/null +++ b/paddlespeech/t2s/exps/dygraph_to_static.py @@ -0,0 +1,169 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse + +import yaml +from yacs.config import CfgNode + +from paddlespeech.t2s.exps.syn_utils import am_to_static +from paddlespeech.t2s.exps.syn_utils import get_am_inference +from paddlespeech.t2s.exps.syn_utils import get_voc_inference +from paddlespeech.t2s.exps.syn_utils import voc_to_static + + +def am_dygraph_to_static(args): + with open(args.am_config) as f: + am_config = CfgNode(yaml.safe_load(f)) + am_inference = get_am_inference( + am=args.am, + am_config=am_config, + am_ckpt=args.am_ckpt, + am_stat=args.am_stat, + phones_dict=args.phones_dict, + tones_dict=args.tones_dict, + speaker_dict=args.speaker_dict) + print("acoustic model done!") + + # dygraph to static + am_inference = am_to_static( + am_inference=am_inference, + am=args.am, + inference_dir=args.inference_dir, + speaker_dict=args.speaker_dict) + print("finish to convert dygraph acoustic model to static!") + + +def voc_dygraph_to_static(args): + with open(args.voc_config) as f: + voc_config = CfgNode(yaml.safe_load(f)) + voc_inference = get_voc_inference( + voc=args.voc, + voc_config=voc_config, + voc_ckpt=args.voc_ckpt, + voc_stat=args.voc_stat) + print("voc done!") + + # dygraph to static + voc_inference = voc_to_static( + voc_inference=voc_inference, + voc=args.voc, + inference_dir=args.inference_dir) + print("finish to convert dygraph vocoder model to static!") + + +def parse_args(): + # parse args and config + parser = argparse.ArgumentParser( + description="Synthesize with acoustic model & vocoder") + parser.add_argument( + '--type', + type=str, + required=True, + choices=["am", "voc"], + help='Choose the model type of dynamic to static, am or voc') + # acoustic model + parser.add_argument( + '--am', + type=str, + default='fastspeech2_csmsc', + choices=[ + 'speedyspeech_csmsc', + 'speedyspeech_aishell3', + 'fastspeech2_csmsc', + 'fastspeech2_ljspeech', + 'fastspeech2_aishell3', + 'fastspeech2_vctk', + 'tacotron2_csmsc', + 'tacotron2_ljspeech', + 'fastspeech2_mix', + 'fastspeech2_canton', + 'fastspeech2_male-zh', + 'fastspeech2_male-en', + 'fastspeech2_male-mix', + ], + help='Choose acoustic model type of tts task.') + parser.add_argument( + '--am_config', type=str, default=None, help='Config of acoustic model.') + parser.add_argument( + '--am_ckpt', + type=str, + default=None, + help='Checkpoint file of acoustic model.') + parser.add_argument( + "--am_stat", + type=str, + default=None, + help="mean and standard deviation used to normalize spectrogram when training acoustic model." + ) + parser.add_argument( + "--phones_dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--tones_dict", type=str, default=None, help="tone vocabulary file.") + parser.add_argument( + "--speaker_dict", type=str, default=None, help="speaker id map file.") + # vocoder + parser.add_argument( + '--voc', + type=str, + default='pwgan_csmsc', + choices=[ + 'pwgan_csmsc', + 'pwgan_ljspeech', + 'pwgan_aishell3', + 'pwgan_vctk', + 'mb_melgan_csmsc', + 'style_melgan_csmsc', + 'hifigan_csmsc', + 'hifigan_ljspeech', + 'hifigan_aishell3', + 'hifigan_vctk', + 'wavernn_csmsc', + 'pwgan_male', + 'hifigan_male', + 'pwgan_opencpop', + ], + help='Choose vocoder type of tts task.') + parser.add_argument( + '--voc_config', type=str, default=None, help='Config of voc.') + parser.add_argument( + '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') + parser.add_argument( + "--voc_stat", + type=str, + default=None, + help="mean and standard deviation used to normalize spectrogram when training voc." + ) + # other + parser.add_argument( + "--inference_dir", + type=str, + default=None, + help="dir to save inference models") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.type == "am": + am_dygraph_to_static(args) + elif args.type == "voc": + voc_dygraph_to_static(args) + else: + print("type should be in ['am', 'voc'] !") + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py index 05c657682..a2629a900 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py +++ b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py @@ -29,6 +29,7 @@ from yacs.config import CfgNode from paddlespeech.t2s.datasets.get_feats import LogMelFBank from paddlespeech.t2s.datasets.preprocess_utils import get_phn_dur +from paddlespeech.t2s.datasets.preprocess_utils import get_sentences_svs from paddlespeech.t2s.datasets.preprocess_utils import merge_silence from paddlespeech.t2s.utils import str2bool @@ -192,8 +193,15 @@ def main(): with open(args.config, 'rt') as f: config = CfgNode(yaml.safe_load(f)) - sentences, speaker_set = get_phn_dur(dur_file) - merge_silence(sentences) + if args.dataset == "opencpop": + sentences, speaker_set = get_sentences_svs( + dur_file, + dataset=args.dataset, + sample_rate=config.fs, + n_shift=config.n_shift, ) + else: + sentences, speaker_set = get_phn_dur(dur_file) + merge_silence(sentences) # split data into 3 sections if args.dataset == "baker": @@ -240,6 +248,33 @@ def main(): test_wav_files += wav_files[-sub_num_dev:] else: train_wav_files += wav_files + elif args.dataset == "opencpop": + wavdir = rootdir / "wavs" + # split data into 3 sections + train_file = rootdir / "train.txt" + train_wav_files = [] + with open(train_file, "r") as f_train: + for line in f_train.readlines(): + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + train_wav_files.append(wav_path) + + test_file = rootdir / "test.txt" + dev_wav_files = [] + test_wav_files = [] + num_dev = 106 + count = 0 + with open(test_file, "r") as f_test: + for line in f_test.readlines(): + count += 1 + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + if count > num_dev: + test_wav_files.append(wav_path) + else: + dev_wav_files.append(wav_path) else: print("dataset should in {baker, ljspeech, vctk, aishell3} now!") From 65c3217b8bc0eb7245d5219250d3310bc4e0d607 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 17:15:35 +0800 Subject: [PATCH 17/37] Update textnorm_test_cases.txt --- examples/other/tn/data/textnorm_test_cases.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/other/tn/data/textnorm_test_cases.txt b/examples/other/tn/data/textnorm_test_cases.txt index 17e90d0b6..ba9e6529a 100644 --- a/examples/other/tn/data/textnorm_test_cases.txt +++ b/examples/other/tn/data/textnorm_test_cases.txt @@ -32,7 +32,7 @@ iPad Pro的秒控键盘这次也推出白色版本。|iPad Pro的秒控键盘这 明天有62%的概率降雨|明天有百分之六十二的概率降雨 这是固话0421-33441122|这是固话零四二一三三四四一一二二 这是手机+86 18544139121|这是手机八六一八五四四一三九一二一 -小王的身高是153.5cm,梦想是打篮球!我觉得有0.1%的可能性。|小王的身高是一百五十三点五cm,梦想是打篮球!我觉得有百分之零点一的可能性。 +小王的身高是153.5cm,梦想是打篮球!我觉得有0.1%的可能性。|小王的身高是一百五十三点五厘米,梦想是打篮球!我觉得有百分之零点一的可能性。 不管三七二十一|不管三七二十一 九九八十一难|九九八十一难 2018年5月23号上午10点10分|二零一八年五月二十三号上午十点十分 @@ -124,4 +124,4 @@ iPad Pro的秒控键盘这次也推出白色版本。|iPad Pro的秒控键盘这 12~23|十二到二十三 12-23|十二到二十三 25cm²|二十五平方厘米 -25m|米 \ No newline at end of file +25m|米 From 4e9bca177a16eedc00950d7128cb707b35317d08 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Tue, 14 Mar 2023 19:01:00 +0800 Subject: [PATCH 18/37] [ASR] change optimizer and fix import error, test=asr (#3023) * mv dataio.py to s2t.io.speechbrain.dataio mv dataio.py to paddlespeech.s2t.io.speechbrain.dataio * remove transformers import. * change optimizer same with released model * add paddlenlp version in RESULT.md. * fix run.sh * fix data.sh step_num. * add adadelta optimizer config. * fix wav2vec2 test_wav.sh run error. * add tokenizer config. --- examples/aishell/asr3/README.md | 4 +- examples/aishell/asr3/RESULT.md | 1 + .../aishell/asr3/conf/train_with_wav2vec.yaml | 2 +- examples/aishell/asr3/conf/wav2vec2ASR.yaml | 6 +- .../asr3/conf/wav2vec2ASR_adadelta.yaml | 168 ++++++++++++++++++ .../aishell/asr3/local/aishell_prepare.py | 2 +- examples/aishell/asr3/local/data.sh | 2 +- examples/aishell/asr3/local/test.sh | 6 +- examples/aishell/asr3/local/test_wav.sh | 2 +- examples/aishell/asr3/run.sh | 4 +- paddlespeech/resource/pretrained_models.py | 4 +- .../s2t/exps/wav2vec2/bin/test_wav.py | 14 +- 12 files changed, 193 insertions(+), 22 deletions(-) mode change 100755 => 100644 examples/aishell/asr3/conf/wav2vec2ASR.yaml create mode 100755 examples/aishell/asr3/conf/wav2vec2ASR_adadelta.yaml diff --git a/examples/aishell/asr3/README.md b/examples/aishell/asr3/README.md index f6fa60d7f..6b587e12f 100644 --- a/examples/aishell/asr3/README.md +++ b/examples/aishell/asr3/README.md @@ -190,9 +190,9 @@ tar xzvf wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz ``` You can download the audio demo: ```bash -wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/ +wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wav -P data/ ``` You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of the audio demo by running the script below. ```bash -CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1 data/demo_002_en.wav +CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1 data/demo_01_03.wav ``` diff --git a/examples/aishell/asr3/RESULT.md b/examples/aishell/asr3/RESULT.md index 1291ef15c..42edeac11 100644 --- a/examples/aishell/asr3/RESULT.md +++ b/examples/aishell/asr3/RESULT.md @@ -4,6 +4,7 @@ * paddle version: develop (commit id: daea892c67e85da91906864de40ce9f6f1b893ae) * paddlespeech version: develop (commit id: c14b4238b256693281e59605abff7c9435b3e2b2) +* paddlenlp version: 2.5.2 ## Device * python: 3.7 diff --git a/examples/aishell/asr3/conf/train_with_wav2vec.yaml b/examples/aishell/asr3/conf/train_with_wav2vec.yaml index 77b3762ef..273175d27 100755 --- a/examples/aishell/asr3/conf/train_with_wav2vec.yaml +++ b/examples/aishell/asr3/conf/train_with_wav2vec.yaml @@ -83,7 +83,7 @@ dnn_neurons: 1024 freeze_wav2vec: False dropout: 0.15 -tokenizer: !apply:transformers.BertTokenizer.from_pretrained +tokenizer: !apply:paddlenlp.transformers.AutoTokenizer.from_pretrained pretrained_model_name_or_path: bert-base-chinese # bert-base-chinese tokens length output_neurons: 21128 diff --git a/examples/aishell/asr3/conf/wav2vec2ASR.yaml b/examples/aishell/asr3/conf/wav2vec2ASR.yaml old mode 100755 new mode 100644 index cdb04f8c1..4a1274688 --- a/examples/aishell/asr3/conf/wav2vec2ASR.yaml +++ b/examples/aishell/asr3/conf/wav2vec2ASR.yaml @@ -107,6 +107,7 @@ vocab_filepath: data/lang_char/vocab.txt ########################################### unit_type: 'char' +tokenizer: bert-base-chinese mean_std_filepath: preprocess_config: conf/preprocess.yaml sortagrad: -1 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs @@ -139,12 +140,10 @@ n_epoch: 80 accum_grad: 1 global_grad_clip: 5.0 -model_optim: adadelta +model_optim: sgd model_optim_conf: lr: 1.0 weight_decay: 0.0 - rho: 0.95 - epsilon: 1.0e-8 wav2vec2_optim: adam wav2vec2_optim_conf: @@ -165,3 +164,4 @@ log_interval: 1 checkpoint: kbest_n: 50 latest_n: 5 + diff --git a/examples/aishell/asr3/conf/wav2vec2ASR_adadelta.yaml b/examples/aishell/asr3/conf/wav2vec2ASR_adadelta.yaml new file mode 100755 index 000000000..ec287f0c6 --- /dev/null +++ b/examples/aishell/asr3/conf/wav2vec2ASR_adadelta.yaml @@ -0,0 +1,168 @@ +############################################ +# Network Architecture # +############################################ +freeze_wav2vec2: False +normalize_wav: True +output_norm: True +init_type: 'kaiming_uniform' # !Warning: need to convergence +enc: + input_shape: 1024 + dnn_blocks: 3 + dnn_neurons: 1024 + activation: True + normalization: True + dropout_rate: [0.15, 0.15, 0.0] +ctc: + enc_n_units: 1024 + blank_id: 0 + dropout_rate: 0.0 + +audio_augment: + speeds: [90, 100, 110] + +spec_augment: + time_warp: True + time_warp_window: 5 + time_warp_mode: bicubic + freq_mask: True + n_freq_mask: 2 + time_mask: True + n_time_mask: 2 + replace_with_zero: False + freq_mask_width: 30 + time_mask_width: 40 +wav2vec2_params_path: exp/wav2vec2/chinese-wav2vec2-large.pdparams + + +############################################ +# Wav2Vec2.0 # +############################################ +# vocab_size: 1000000 +hidden_size: 1024 +num_hidden_layers: 24 +num_attention_heads: 16 +intermediate_size: 4096 +hidden_act: gelu +hidden_dropout: 0.1 +activation_dropout: 0.0 +attention_dropout: 0.1 +feat_proj_dropout: 0.1 +feat_quantizer_dropout: 0.0 +final_dropout: 0.0 +layerdrop: 0.1 +initializer_range: 0.02 +layer_norm_eps: 1e-5 +feat_extract_norm: layer +feat_extract_activation: gelu +conv_dim: [512, 512, 512, 512, 512, 512, 512] +conv_stride: [5, 2, 2, 2, 2, 2, 2] +conv_kernel: [10, 3, 3, 3, 3, 2, 2] +conv_bias: True +num_conv_pos_embeddings: 128 +num_conv_pos_embedding_groups: 16 +do_stable_layer_norm: True +apply_spec_augment: False +mask_channel_length: 10 +mask_channel_min_space: 1 +mask_channel_other: 0.0 +mask_channel_prob: 0.0 +mask_channel_selection: static +mask_feature_length: 10 +mask_feature_min_masks: 0 +mask_feature_prob: 0.0 +mask_time_length: 10 +mask_time_min_masks: 2 +mask_time_min_space: 1 +mask_time_other: 0.0 +mask_time_prob: 0.075 +mask_time_selection: static +num_codevectors_per_group: 320 +num_codevector_groups: 2 +contrastive_logits_temperature: 0.1 +num_negatives: 100 +codevector_dim: 256 +proj_codevector_dim: 256 +diversity_loss_weight: 0.1 +use_weighted_layer_sum: False +# pad_token_id: 0 +# bos_token_id: 1 +# eos_token_id: 2 +add_adapter: False +adapter_kernel_size: 3 +adapter_stride: 2 +num_adapter_layers: 3 +output_hidden_size: None + +########################################### +# Data # +########################################### + +train_manifest: data/manifest.train +dev_manifest: data/manifest.dev +test_manifest: data/manifest.test +vocab_filepath: data/lang_char/vocab.txt + +########################################### +# Dataloader # +########################################### + +unit_type: 'char' +tokenizer: bert-base-chinese +mean_std_filepath: +preprocess_config: conf/preprocess.yaml +sortagrad: -1 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs +batch_size: 5 # Different batch_size may cause large differences in results +maxlen_in: 51200000000 # if input length > maxlen-in batchsize is automatically reduced +maxlen_out: 1500000 # if output length > maxlen-out batchsize is automatically reduced +minibatches: 0 # for debug +batch_count: auto +batch_bins: 0 +batch_frames_in: 0 +batch_frames_out: 0 +batch_frames_inout: 0 +num_workers: 6 +subsampling_factor: 1 +num_encs: 1 +dist_sampler: True +shortest_first: True +return_lens_rate: True + +########################################### +# use speechbrain dataloader # +########################################### +use_sb_pipeline: True # whether use speechbrain pipeline. Default is True. +sb_pipeline_conf: conf/train_with_wav2vec.yaml + +########################################### +# Training # +########################################### +n_epoch: 80 +accum_grad: 1 +global_grad_clip: 5.0 + +model_optim: adadelta +model_optim_conf: + lr: 1.0 + weight_decay: 0.0 + rho: 0.95 + epsilon: 1.0e-8 + +wav2vec2_optim: adam +wav2vec2_optim_conf: + lr: 0.0001 + weight_decay: 0.0 + +model_scheduler: newbobscheduler +model_scheduler_conf: + improvement_threshold: 0.0025 + annealing_factor: 0.8 + patient: 0 +wav2vec2_scheduler: newbobscheduler +wav2vec2_scheduler_conf: + improvement_threshold: 0.0025 + annealing_factor: 0.9 + patient: 0 +log_interval: 1 +checkpoint: + kbest_n: 50 + latest_n: 5 diff --git a/examples/aishell/asr3/local/aishell_prepare.py b/examples/aishell/asr3/local/aishell_prepare.py index a25735791..2a7ba5c6c 100644 --- a/examples/aishell/asr3/local/aishell_prepare.py +++ b/examples/aishell/asr3/local/aishell_prepare.py @@ -21,7 +21,7 @@ import glob import logging import os -from paddlespeech.s2t.models.wav2vec2.io.dataio import read_audio +from paddlespeech.s2t.io.speechbrain.dataio import read_audio logger = logging.getLogger(__name__) diff --git a/examples/aishell/asr3/local/data.sh b/examples/aishell/asr3/local/data.sh index 1a468f546..bd26c1e78 100755 --- a/examples/aishell/asr3/local/data.sh +++ b/examples/aishell/asr3/local/data.sh @@ -1,7 +1,7 @@ #!/bin/bash stage=-1 -stop_stage=-1 +stop_stage=3 dict_dir=data/lang_char . ${MAIN_ROOT}/utils/parse_options.sh || exit -1; diff --git a/examples/aishell/asr3/local/test.sh b/examples/aishell/asr3/local/test.sh index 9d4b84291..91e1c5457 100755 --- a/examples/aishell/asr3/local/test.sh +++ b/examples/aishell/asr3/local/test.sh @@ -8,9 +8,7 @@ echo "using $ngpu gpus..." expdir=exp datadir=data -train_set=train_960 -recog_set="test-clean test-other dev-clean dev-other" -recog_set="test-clean" +train_set=train config_path=$1 decode_config_path=$2 @@ -75,7 +73,7 @@ for type in ctc_prefix_beam_search; do --trans_hyp ${ckpt_prefix}.${type}.rsl.text python3 utils/compute-wer.py --char=1 --v=1 \ - data/manifest.test-clean.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error + data/manifest.test.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error echo "decoding ${type} done." done diff --git a/examples/aishell/asr3/local/test_wav.sh b/examples/aishell/asr3/local/test_wav.sh index fdf3589f4..7ccef6945 100755 --- a/examples/aishell/asr3/local/test_wav.sh +++ b/examples/aishell/asr3/local/test_wav.sh @@ -14,7 +14,7 @@ ckpt_prefix=$3 audio_file=$4 mkdir -p data -wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/ +wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wav -P data/ if [ $? -ne 0 ]; then exit 1 fi diff --git a/examples/aishell/asr3/run.sh b/examples/aishell/asr3/run.sh index 9b0a3c472..557ca0fcd 100755 --- a/examples/aishell/asr3/run.sh +++ b/examples/aishell/asr3/run.sh @@ -15,11 +15,11 @@ resume= # xx e.g. 30 export FLAGS_cudnn_deterministic=1 . ${MAIN_ROOT}/utils/parse_options.sh || exit 1; -audio_file=data/demo_002_en.wav +audio_file=data/demo_01_03.wav avg_ckpt=avg_${avg_num} ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') -echo "checkpoint name ${ckpt}"git revert -v +echo "checkpoint name ${ckpt}" if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then # prepare data diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py index 04df18623..3c5db64bb 100644 --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -102,13 +102,11 @@ ssl_dynamic_pretrained_models = { 'params': 'exp/wav2vec2ASR/checkpoints/avg_1.pdparams', }, - }, - "wav2vec2ASR_aishell1-zh-16k": { '1.4': { 'url': 'https://paddlespeech.bj.bcebos.com/s2t/aishell/asr3/wav2vec2ASR-large-aishell1_ckpt_1.4.0.model.tar.gz', 'md5': - '9f0bc943adb822789bf61e674b229d17', + '150e51b8ea5d255ccce6b395de8d916a', 'cfg_path': 'model.yaml', 'ckpt_path': diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py index 0d66ac410..2416db7ee 100644 --- a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py +++ b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py @@ -18,6 +18,7 @@ from pathlib import Path import paddle import soundfile +from paddlenlp.transformers import AutoTokenizer from yacs.config import CfgNode from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer @@ -34,8 +35,13 @@ class Wav2vec2Infer(): self.config = config self.audio_file = args.audio_file - self.text_feature = TextFeaturizer( - unit_type=config.unit_type, vocab=config.vocab_filepath) + if self.config.tokenizer: + self.text_feature = AutoTokenizer.from_pretrained( + self.config.tokenizer) + else: + self.text_feature = TextFeaturizer( + unit_type=config.unit_type, vocab=config.vocab_filepath) + paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') # model @@ -59,14 +65,14 @@ class Wav2vec2Infer(): audio, _ = soundfile.read( self.audio_file, dtype="int16", always_2d=True) logger.info(f"audio shape: {audio.shape}") - xs = paddle.to_tensor(audio, dtype='float32').unsqueeze(axis=0) decode_config = self.config.decode result_transcripts, result_tokenids = self.model.decode( xs, text_feature=self.text_feature, decoding_method=decode_config.decoding_method, - beam_size=decode_config.beam_size) + beam_size=decode_config.beam_size, + tokenizer=self.config.tokenizer, ) rsl = result_transcripts[0] utt = Path(self.audio_file).name logger.info(f"hyp: {utt} {rsl}") From 348064de0d1b17b67486fbcc3287e9b90f26ea24 Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:17:23 +0800 Subject: [PATCH 19/37] [TTS] add opencpop HIFIGAN example (#3038) * add opencpop voc, test=tts * soft link * add opencpop hifigan, test=tts * update --- examples/opencpop/README.md | 6 + examples/opencpop/voc5/conf/default.yaml | 167 ++++++++++++ examples/opencpop/voc5/conf/finetune.yaml | 168 ++++++++++++ examples/opencpop/voc5/finetune.sh | 74 ++++++ examples/opencpop/voc5/local/PTQ_static.sh | 1 + .../opencpop/voc5/local/dygraph_to_static.sh | 15 ++ examples/opencpop/voc5/local/prepare_env.py | 1 + examples/opencpop/voc5/local/preprocess.sh | 1 + examples/opencpop/voc5/local/synthesize.sh | 1 + examples/opencpop/voc5/local/train.sh | 1 + examples/opencpop/voc5/path.sh | 1 + examples/opencpop/voc5/run.sh | 42 +++ paddlespeech/t2s/exps/PTQ_static.py | 1 + .../t2s/exps/diffsinger/gen_gta_mel.py | 240 ++++++++++++++++++ paddlespeech/t2s/exps/dygraph_to_static.py | 1 + 15 files changed, 720 insertions(+) create mode 100644 examples/opencpop/README.md create mode 100644 examples/opencpop/voc5/conf/default.yaml create mode 100644 examples/opencpop/voc5/conf/finetune.yaml create mode 100755 examples/opencpop/voc5/finetune.sh create mode 120000 examples/opencpop/voc5/local/PTQ_static.sh create mode 100755 examples/opencpop/voc5/local/dygraph_to_static.sh create mode 120000 examples/opencpop/voc5/local/prepare_env.py create mode 120000 examples/opencpop/voc5/local/preprocess.sh create mode 120000 examples/opencpop/voc5/local/synthesize.sh create mode 120000 examples/opencpop/voc5/local/train.sh create mode 120000 examples/opencpop/voc5/path.sh create mode 100755 examples/opencpop/voc5/run.sh create mode 100644 paddlespeech/t2s/exps/diffsinger/gen_gta_mel.py diff --git a/examples/opencpop/README.md b/examples/opencpop/README.md new file mode 100644 index 000000000..5a574dc80 --- /dev/null +++ b/examples/opencpop/README.md @@ -0,0 +1,6 @@ + +# Opencpop + +* svs1 - DiffSinger +* voc1 - Parallel WaveGAN +* voc5 - HiFiGAN diff --git a/examples/opencpop/voc5/conf/default.yaml b/examples/opencpop/voc5/conf/default.yaml new file mode 100644 index 000000000..10449f860 --- /dev/null +++ b/examples/opencpop/voc5/conf/default.yaml @@ -0,0 +1,167 @@ +# This is the configuration file for CSMSC dataset. +# This configuration is based on HiFiGAN V1, which is an official configuration. +# But I found that the optimizer setting does not work well with my implementation. +# So I changed optimizer settings as follows: +# - AdamW -> Adam +# - betas: [0.8, 0.99] -> betas: [0.5, 0.9] +# - Scheduler: ExponentialLR -> MultiStepLR +# To match the shift size difference, the upsample scales is also modified from the original 256 shift setting. + +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### +fs: 24000 # Sampling rate. +n_fft: 512 # FFT size (samples). +n_shift: 128 # Hop size (samples). 12.5ms +win_length: 512 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. +n_mels: 80 # Number of mel basis. +fmin: 80 # Minimum freq in mel basis calculation. (Hz) +fmax: 12000 # Maximum frequency in mel basis calculation. (Hz) + +########################################################### +# GENERATOR NETWORK ARCHITECTURE SETTING # +########################################################### +generator_params: + in_channels: 80 # Number of input channels. + out_channels: 1 # Number of output channels. + channels: 512 # Number of initial channels. + kernel_size: 7 # Kernel size of initial and final conv layers. + upsample_scales: [8, 4, 2, 2] # Upsampling scales. + upsample_kernel_sizes: [16, 8, 4, 4] # Kernel size for upsampling layers. + resblock_kernel_sizes: [3, 7, 11] # Kernel size for residual blocks. + resblock_dilations: # Dilations for residual blocks. + - [1, 3, 5] + - [1, 3, 5] + - [1, 3, 5] + use_additional_convs: True # Whether to use additional conv layer in residual blocks. + bias: True # Whether to use bias parameter in conv. + nonlinear_activation: "leakyrelu" # Nonlinear activation type. + nonlinear_activation_params: # Nonlinear activation paramters. + negative_slope: 0.1 + use_weight_norm: True # Whether to apply weight normalization. + + +########################################################### +# DISCRIMINATOR NETWORK ARCHITECTURE SETTING # +########################################################### +discriminator_params: + scales: 3 # Number of multi-scale discriminator. + scale_downsample_pooling: "AvgPool1D" # Pooling operation for scale discriminator. + scale_downsample_pooling_params: + kernel_size: 4 # Pooling kernel size. + stride: 2 # Pooling stride. + padding: 2 # Padding size. + scale_discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_sizes: [15, 41, 5, 3] # List of kernel sizes. + channels: 128 # Initial number of channels. + max_downsample_channels: 1024 # Maximum number of channels in downsampling conv layers. + max_groups: 16 # Maximum number of groups in downsampling conv layers. + bias: True + downsample_scales: [4, 4, 4, 4, 1] # Downsampling scales. + nonlinear_activation: "leakyrelu" # Nonlinear activation. + nonlinear_activation_params: + negative_slope: 0.1 + follow_official_norm: True # Whether to follow the official norm setting. + periods: [2, 3, 5, 7, 11] # List of period for multi-period discriminator. + period_discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_sizes: [5, 3] # List of kernel sizes. + channels: 32 # Initial number of channels. + downsample_scales: [3, 3, 3, 3, 1] # Downsampling scales. + max_downsample_channels: 1024 # Maximum number of channels in downsampling conv layers. + bias: True # Whether to use bias parameter in conv layer." + nonlinear_activation: "leakyrelu" # Nonlinear activation. + nonlinear_activation_params: # Nonlinear activation paramters. + negative_slope: 0.1 + use_weight_norm: True # Whether to apply weight normalization. + use_spectral_norm: False # Whether to apply spectral normalization. + + +########################################################### +# STFT LOSS SETTING # +########################################################### +use_stft_loss: False # Whether to use multi-resolution STFT loss. +use_mel_loss: True # Whether to use Mel-spectrogram loss. +mel_loss_params: + fs: 24000 + fft_size: 512 + hop_size: 128 + win_length: 512 + window: "hann" + num_mels: 80 + fmin: 30 + fmax: 12000 + log_base: null +generator_adv_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. +discriminator_adv_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. +use_feat_match_loss: True +feat_match_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. + average_by_layers: False # Whether to average loss by #layers in each discriminator. + include_final_outputs: False # Whether to include final outputs in feat match loss calculation. + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_aux: 45.0 # Loss balancing coefficient for STFT loss. +lambda_adv: 1.0 # Loss balancing coefficient for adversarial loss. +lambda_feat_match: 2.0 # Loss balancing coefficient for feat match loss.. + +########################################################### +# DATA LOADER SETTING # +########################################################### +batch_size: 16 # Batch size. +batch_max_steps: 8400 # Length of each audio in batch. Make sure dividable by hop_size. +num_workers: 1 # Number of workers in DataLoader. + +########################################################### +# OPTIMIZER & SCHEDULER SETTING # +########################################################### +generator_optimizer_params: + beta1: 0.5 + beta2: 0.9 + weight_decay: 0.0 # Generator's weight decay coefficient. +generator_scheduler_params: + learning_rate: 2.0e-4 # Generator's learning rate. + gamma: 0.5 # Generator's scheduler gamma. + milestones: # At each milestone, lr will be multiplied by gamma. + - 200000 + - 400000 + - 600000 + - 800000 +generator_grad_norm: -1 # Generator's gradient norm. +discriminator_optimizer_params: + beta1: 0.5 + beta2: 0.9 + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + learning_rate: 2.0e-4 # Discriminator's learning rate. + gamma: 0.5 # Discriminator's scheduler gamma. + milestones: # At each milestone, lr will be multiplied by gamma. + - 200000 + - 400000 + - 600000 + - 800000 +discriminator_grad_norm: -1 # Discriminator's gradient norm. + +########################################################### +# INTERVAL SETTING # +########################################################### +generator_train_start_steps: 1 # Number of steps to start to train discriminator. +discriminator_train_start_steps: 0 # Number of steps to start to train discriminator. +train_max_steps: 2500000 # Number of training steps. +save_interval_steps: 5000 # Interval steps to save checkpoint. +eval_interval_steps: 1000 # Interval steps to evaluate the network. + +########################################################### +# OTHER SETTING # +########################################################### +num_snapshots: 4 # max number of snapshots to keep while training +seed: 42 # random seed for paddle, random, and np.random diff --git a/examples/opencpop/voc5/conf/finetune.yaml b/examples/opencpop/voc5/conf/finetune.yaml new file mode 100644 index 000000000..0022a67aa --- /dev/null +++ b/examples/opencpop/voc5/conf/finetune.yaml @@ -0,0 +1,168 @@ +# This is the configuration file for CSMSC dataset. +# This configuration is based on HiFiGAN V1, which is an official configuration. +# But I found that the optimizer setting does not work well with my implementation. +# So I changed optimizer settings as follows: +# - AdamW -> Adam +# - betas: [0.8, 0.99] -> betas: [0.5, 0.9] +# - Scheduler: ExponentialLR -> MultiStepLR +# To match the shift size difference, the upsample scales is also modified from the original 256 shift setting. + +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### +fs: 24000 # Sampling rate. +n_fft: 512 # FFT size (samples). +n_shift: 128 # Hop size (samples). 12.5ms +win_length: 512 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. +n_mels: 80 # Number of mel basis. +fmin: 80 # Minimum freq in mel basis calculation. (Hz) +fmax: 12000 # Maximum frequency in mel basis calculation. (Hz) + +########################################################### +# GENERATOR NETWORK ARCHITECTURE SETTING # +########################################################### +generator_params: + in_channels: 80 # Number of input channels. + out_channels: 1 # Number of output channels. + channels: 512 # Number of initial channels. + kernel_size: 7 # Kernel size of initial and final conv layers. + upsample_scales: [8, 4, 2, 2] # Upsampling scales. + upsample_kernel_sizes: [16, 8, 4, 4] # Kernel size for upsampling layers. + resblock_kernel_sizes: [3, 7, 11] # Kernel size for residual blocks. + resblock_dilations: # Dilations for residual blocks. + - [1, 3, 5] + - [1, 3, 5] + - [1, 3, 5] + use_additional_convs: True # Whether to use additional conv layer in residual blocks. + bias: True # Whether to use bias parameter in conv. + nonlinear_activation: "leakyrelu" # Nonlinear activation type. + nonlinear_activation_params: # Nonlinear activation paramters. + negative_slope: 0.1 + use_weight_norm: True # Whether to apply weight normalization. + + +########################################################### +# DISCRIMINATOR NETWORK ARCHITECTURE SETTING # +########################################################### +discriminator_params: + scales: 3 # Number of multi-scale discriminator. + scale_downsample_pooling: "AvgPool1D" # Pooling operation for scale discriminator. + scale_downsample_pooling_params: + kernel_size: 4 # Pooling kernel size. + stride: 2 # Pooling stride. + padding: 2 # Padding size. + scale_discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_sizes: [15, 41, 5, 3] # List of kernel sizes. + channels: 128 # Initial number of channels. + max_downsample_channels: 1024 # Maximum number of channels in downsampling conv layers. + max_groups: 16 # Maximum number of groups in downsampling conv layers. + bias: True + downsample_scales: [4, 4, 4, 4, 1] # Downsampling scales. + nonlinear_activation: "leakyrelu" # Nonlinear activation. + nonlinear_activation_params: + negative_slope: 0.1 + follow_official_norm: True # Whether to follow the official norm setting. + periods: [2, 3, 5, 7, 11] # List of period for multi-period discriminator. + period_discriminator_params: + in_channels: 1 # Number of input channels. + out_channels: 1 # Number of output channels. + kernel_sizes: [5, 3] # List of kernel sizes. + channels: 32 # Initial number of channels. + downsample_scales: [3, 3, 3, 3, 1] # Downsampling scales. + max_downsample_channels: 1024 # Maximum number of channels in downsampling conv layers. + bias: True # Whether to use bias parameter in conv layer." + nonlinear_activation: "leakyrelu" # Nonlinear activation. + nonlinear_activation_params: # Nonlinear activation paramters. + negative_slope: 0.1 + use_weight_norm: True # Whether to apply weight normalization. + use_spectral_norm: False # Whether to apply spectral normalization. + + +########################################################### +# STFT LOSS SETTING # +########################################################### +use_stft_loss: False # Whether to use multi-resolution STFT loss. +use_mel_loss: True # Whether to use Mel-spectrogram loss. +mel_loss_params: + fs: 24000 + fft_size: 512 + hop_size: 128 + win_length: 512 + window: "hann" + num_mels: 80 + fmin: 30 + fmax: 12000 + log_base: null +generator_adv_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. +discriminator_adv_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. +use_feat_match_loss: True +feat_match_loss_params: + average_by_discriminators: False # Whether to average loss by #discriminators. + average_by_layers: False # Whether to average loss by #layers in each discriminator. + include_final_outputs: False # Whether to include final outputs in feat match loss calculation. + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_aux: 45.0 # Loss balancing coefficient for STFT loss. +lambda_adv: 1.0 # Loss balancing coefficient for adversarial loss. +lambda_feat_match: 2.0 # Loss balancing coefficient for feat match loss.. + +########################################################### +# DATA LOADER SETTING # +########################################################### +#batch_size: 16 # Batch size. +batch_size: 1 # Batch size. +batch_max_steps: 8400 # Length of each audio in batch. Make sure dividable by hop_size. +num_workers: 1 # Number of workers in DataLoader. + +########################################################### +# OPTIMIZER & SCHEDULER SETTING # +########################################################### +generator_optimizer_params: + beta1: 0.5 + beta2: 0.9 + weight_decay: 0.0 # Generator's weight decay coefficient. +generator_scheduler_params: + learning_rate: 2.0e-4 # Generator's learning rate. + gamma: 0.5 # Generator's scheduler gamma. + milestones: # At each milestone, lr will be multiplied by gamma. + - 200000 + - 400000 + - 600000 + - 800000 +generator_grad_norm: -1 # Generator's gradient norm. +discriminator_optimizer_params: + beta1: 0.5 + beta2: 0.9 + weight_decay: 0.0 # Discriminator's weight decay coefficient. +discriminator_scheduler_params: + learning_rate: 2.0e-4 # Discriminator's learning rate. + gamma: 0.5 # Discriminator's scheduler gamma. + milestones: # At each milestone, lr will be multiplied by gamma. + - 200000 + - 400000 + - 600000 + - 800000 +discriminator_grad_norm: -1 # Discriminator's gradient norm. + +########################################################### +# INTERVAL SETTING # +########################################################### +generator_train_start_steps: 1 # Number of steps to start to train discriminator. +discriminator_train_start_steps: 0 # Number of steps to start to train discriminator. +train_max_steps: 2600000 # Number of training steps. +save_interval_steps: 5000 # Interval steps to save checkpoint. +eval_interval_steps: 1000 # Interval steps to evaluate the network. + +########################################################### +# OTHER SETTING # +########################################################### +num_snapshots: 4 # max number of snapshots to keep while training +seed: 42 # random seed for paddle, random, and np.random diff --git a/examples/opencpop/voc5/finetune.sh b/examples/opencpop/voc5/finetune.sh new file mode 100755 index 000000000..76f363295 --- /dev/null +++ b/examples/opencpop/voc5/finetune.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +source path.sh + +gpus=0 +stage=0 +stop_stage=100 + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + python3 ${MAIN_ROOT}/paddlespeech/t2s/exps/diffsinger/gen_gta_mel.py \ + --diffsinger-config=diffsinger_opencpop_ckpt_1.4.0/default.yaml \ + --diffsinger-checkpoint=diffsinger_opencpop_ckpt_1.4.0/snapshot_iter_160000.pdz \ + --diffsinger-stat=diffsinger_opencpop_ckpt_1.4.0/speech_stats.npy \ + --diffsinger-stretch=diffsinger_opencpop_ckpt_1.4.0/speech_stretchs.npy \ + --dur-file=~/datasets/Opencpop/segments/transcriptions.txt \ + --output-dir=dump_finetune \ + --phones-dict=diffsinger_opencpop_ckpt_1.4.0/phone_id_map.txt \ + --dataset=opencpop \ + --rootdir=~/datasets/Opencpop/segments/ +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + python3 ${MAIN_ROOT}/utils/link_wav.py \ + --old-dump-dir=dump \ + --dump-dir=dump_finetune +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + cp dump/train/feats_stats.npy dump_finetune/train/ +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # normalize, dev and test should use train's stats + echo "Normalize ..." + + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump_finetune/train/raw/metadata.jsonl \ + --dumpdir=dump_finetune/train/norm \ + --stats=dump_finetune/train/feats_stats.npy + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump_finetune/dev/raw/metadata.jsonl \ + --dumpdir=dump_finetune/dev/norm \ + --stats=dump_finetune/train/feats_stats.npy + + python3 ${BIN_DIR}/../normalize.py \ + --metadata=dump_finetune/test/raw/metadata.jsonl \ + --dumpdir=dump_finetune/test/norm \ + --stats=dump_finetune/train/feats_stats.npy +fi + +# create finetune env +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + echo "create finetune env" + python3 local/prepare_env.py \ + --pretrained_model_dir=exp/default/checkpoints/ \ + --output_dir=exp/finetune/ +fi + +# finetune +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + CUDA_VISIBLE_DEVICES=${gpus} \ + FLAGS_cudnn_exhaustive_search=true \ + FLAGS_conv_workspace_size_limit=4000 \ + python ${BIN_DIR}/train.py \ + --train-metadata=dump_finetune/train/norm/metadata.jsonl \ + --dev-metadata=dump_finetune/dev/norm/metadata.jsonl \ + --config=conf/finetune.yaml \ + --output-dir=exp/finetune \ + --ngpu=1 +fi diff --git a/examples/opencpop/voc5/local/PTQ_static.sh b/examples/opencpop/voc5/local/PTQ_static.sh new file mode 120000 index 000000000..247ce5c74 --- /dev/null +++ b/examples/opencpop/voc5/local/PTQ_static.sh @@ -0,0 +1 @@ +../../../csmsc/voc1/local/PTQ_static.sh \ No newline at end of file diff --git a/examples/opencpop/voc5/local/dygraph_to_static.sh b/examples/opencpop/voc5/local/dygraph_to_static.sh new file mode 100755 index 000000000..65077661a --- /dev/null +++ b/examples/opencpop/voc5/local/dygraph_to_static.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../../dygraph_to_static.py \ + --type=voc \ + --voc=hifigan_opencpop \ + --voc_config=${config_path} \ + --voc_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --voc_stat=dump/train/feats_stats.npy \ + --inference_dir=exp/default/inference/ diff --git a/examples/opencpop/voc5/local/prepare_env.py b/examples/opencpop/voc5/local/prepare_env.py new file mode 120000 index 000000000..be03c86b3 --- /dev/null +++ b/examples/opencpop/voc5/local/prepare_env.py @@ -0,0 +1 @@ +../../../other/tts_finetune/tts3/local/prepare_env.py \ No newline at end of file diff --git a/examples/opencpop/voc5/local/preprocess.sh b/examples/opencpop/voc5/local/preprocess.sh new file mode 120000 index 000000000..f0cb24de9 --- /dev/null +++ b/examples/opencpop/voc5/local/preprocess.sh @@ -0,0 +1 @@ +../../voc1/local/preprocess.sh \ No newline at end of file diff --git a/examples/opencpop/voc5/local/synthesize.sh b/examples/opencpop/voc5/local/synthesize.sh new file mode 120000 index 000000000..c887112c0 --- /dev/null +++ b/examples/opencpop/voc5/local/synthesize.sh @@ -0,0 +1 @@ +../../../csmsc/voc5/local/synthesize.sh \ No newline at end of file diff --git a/examples/opencpop/voc5/local/train.sh b/examples/opencpop/voc5/local/train.sh new file mode 120000 index 000000000..2942893d2 --- /dev/null +++ b/examples/opencpop/voc5/local/train.sh @@ -0,0 +1 @@ +../../../csmsc/voc1/local/train.sh \ No newline at end of file diff --git a/examples/opencpop/voc5/path.sh b/examples/opencpop/voc5/path.sh new file mode 120000 index 000000000..b67fe2b39 --- /dev/null +++ b/examples/opencpop/voc5/path.sh @@ -0,0 +1 @@ +../../csmsc/voc5/path.sh \ No newline at end of file diff --git a/examples/opencpop/voc5/run.sh b/examples/opencpop/voc5/run.sh new file mode 100755 index 000000000..290c90d25 --- /dev/null +++ b/examples/opencpop/voc5/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_2500000.pdz + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # synthesize + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +# dygraph to static +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/dygraph_to_static.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +# PTQ_static +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/PTQ_static.sh ${train_output_path} hifigan_opencpop || exit -1 +fi diff --git a/paddlespeech/t2s/exps/PTQ_static.py b/paddlespeech/t2s/exps/PTQ_static.py index 8849abe58..a95786450 100644 --- a/paddlespeech/t2s/exps/PTQ_static.py +++ b/paddlespeech/t2s/exps/PTQ_static.py @@ -43,6 +43,7 @@ def parse_args(): 'hifigan_ljspeech', 'hifigan_vctk', 'pwgan_opencpop', + 'hifigan_opencpop', ], help='Choose model type of tts task.') diff --git a/paddlespeech/t2s/exps/diffsinger/gen_gta_mel.py b/paddlespeech/t2s/exps/diffsinger/gen_gta_mel.py new file mode 100644 index 000000000..519808f2a --- /dev/null +++ b/paddlespeech/t2s/exps/diffsinger/gen_gta_mel.py @@ -0,0 +1,240 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# generate mels using durations.txt +# for mb melgan finetune +import argparse +import os +from pathlib import Path + +import numpy as np +import paddle +import yaml +from tqdm import tqdm +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.preprocess_utils import get_sentences_svs +from paddlespeech.t2s.models.diffsinger import DiffSinger +from paddlespeech.t2s.models.diffsinger import DiffSingerInference +from paddlespeech.t2s.modules.normalizer import ZScore +from paddlespeech.t2s.utils import str2bool + + +def evaluate(args, diffsinger_config): + rootdir = Path(args.rootdir).expanduser() + assert rootdir.is_dir() + + # construct dataset for evaluation + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + phone_dict = {} + for phn, id in phn_id: + phone_dict[phn] = int(id) + + if args.speaker_dict: + with open(args.speaker_dict, 'rt') as f: + spk_id_list = [line.strip().split() for line in f.readlines()] + spk_num = len(spk_id_list) + else: + spk_num = None + + with open(args.diffsinger_stretch, "r") as f: + spec_min = np.load(args.diffsinger_stretch)[0] + spec_max = np.load(args.diffsinger_stretch)[1] + spec_min = paddle.to_tensor(spec_min) + spec_max = paddle.to_tensor(spec_max) + print("min and max spec done!") + + odim = diffsinger_config.n_mels + diffsinger_config["model"]["fastspeech2_params"]["spk_num"] = spk_num + model = DiffSinger( + spec_min=spec_min, + spec_max=spec_max, + idim=vocab_size, + odim=odim, + **diffsinger_config["model"], ) + + model.set_state_dict(paddle.load(args.diffsinger_checkpoint)["main_params"]) + model.eval() + + stat = np.load(args.diffsinger_stat) + mu, std = stat + mu = paddle.to_tensor(mu) + std = paddle.to_tensor(std) + diffsinger_normalizer = ZScore(mu, std) + + diffsinger_inference = DiffSingerInference(diffsinger_normalizer, model) + diffsinger_inference.eval() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + sentences, speaker_set = get_sentences_svs( + args.dur_file, + dataset=args.dataset, + sample_rate=diffsinger_config.fs, + n_shift=diffsinger_config.n_shift, ) + + if args.dataset == "opencpop": + wavdir = rootdir / "wavs" + # split data into 3 sections + train_file = rootdir / "train.txt" + train_wav_files = [] + with open(train_file, "r") as f_train: + for line in f_train.readlines(): + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + train_wav_files.append(wav_path) + + test_file = rootdir / "test.txt" + dev_wav_files = [] + test_wav_files = [] + num_dev = 106 + count = 0 + with open(test_file, "r") as f_test: + for line in f_test.readlines(): + count += 1 + utt = line.split("|")[0] + wav_name = utt + ".wav" + wav_path = wavdir / wav_name + if count > num_dev: + test_wav_files.append(wav_path) + else: + dev_wav_files.append(wav_path) + else: + print("dataset should in {opencpop} now!") + + train_wav_files = [ + os.path.basename(str(str_path)) for str_path in train_wav_files + ] + dev_wav_files = [ + os.path.basename(str(str_path)) for str_path in dev_wav_files + ] + test_wav_files = [ + os.path.basename(str(str_path)) for str_path in test_wav_files + ] + + for i, utt_id in enumerate(tqdm(sentences)): + phones = sentences[utt_id][0] + durations = sentences[utt_id][1] + note = sentences[utt_id][2] + note_dur = sentences[utt_id][3] + is_slur = sentences[utt_id][4] + speaker = sentences[utt_id][-1] + + phone_ids = [phone_dict[phn] for phn in phones] + phone_ids = paddle.to_tensor(np.array(phone_ids)) + + if args.speaker_dict: + speaker_id = int( + [item[1] for item in spk_id_list if speaker == item[0]][0]) + speaker_id = paddle.to_tensor(speaker_id) + else: + speaker_id = None + + durations = paddle.to_tensor(np.array(durations)) + note = paddle.to_tensor(np.array(note)) + note_dur = paddle.to_tensor(np.array(note_dur)) + is_slur = paddle.to_tensor(np.array(is_slur)) + # 生成的和真实的可能有 1, 2 帧的差距,但是 batch_fn 会修复 + # split data into 3 sections + + wav_path = utt_id + ".wav" + + if wav_path in train_wav_files: + sub_output_dir = output_dir / ("train/raw") + elif wav_path in dev_wav_files: + sub_output_dir = output_dir / ("dev/raw") + elif wav_path in test_wav_files: + sub_output_dir = output_dir / ("test/raw") + + sub_output_dir.mkdir(parents=True, exist_ok=True) + + with paddle.no_grad(): + mel = diffsinger_inference( + text=phone_ids, + note=note, + note_dur=note_dur, + is_slur=is_slur, + get_mel_fs2=False) + np.save(sub_output_dir / (utt_id + "_feats.npy"), mel) + + +def main(): + # parse args and config and redirect to train_sp + parser = argparse.ArgumentParser( + description="Generate mel with diffsinger.") + parser.add_argument( + "--dataset", + default="opencpop", + type=str, + help="name of dataset, should in {opencpop} now") + parser.add_argument( + "--rootdir", default=None, type=str, help="directory to dataset.") + parser.add_argument( + "--diffsinger-config", type=str, help="diffsinger config file.") + parser.add_argument( + "--diffsinger-checkpoint", + type=str, + help="diffsinger checkpoint to load.") + parser.add_argument( + "--diffsinger-stat", + type=str, + help="mean and standard deviation used to normalize spectrogram when training diffsinger." + ) + parser.add_argument( + "--diffsinger-stretch", + type=str, + help="min and max mel used to stretch before training diffusion.") + + parser.add_argument( + "--phones-dict", + type=str, + default="phone_id_map.txt", + help="phone vocabulary file.") + + parser.add_argument( + "--speaker-dict", type=str, default=None, help="speaker id map file.") + + parser.add_argument( + "--dur-file", default=None, type=str, help="path to durations.txt.") + parser.add_argument("--output-dir", type=str, help="output dir.") + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") + + args = parser.parse_args() + + if args.ngpu == 0: + paddle.set_device("cpu") + elif args.ngpu > 0: + paddle.set_device("gpu") + else: + print("ngpu should >= 0 !") + + with open(args.diffsinger_config) as f: + diffsinger_config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(diffsinger_config) + + evaluate(args, diffsinger_config) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/dygraph_to_static.py b/paddlespeech/t2s/exps/dygraph_to_static.py index 3e6e94857..5e15ca4ca 100644 --- a/paddlespeech/t2s/exps/dygraph_to_static.py +++ b/paddlespeech/t2s/exps/dygraph_to_static.py @@ -132,6 +132,7 @@ def parse_args(): 'pwgan_male', 'hifigan_male', 'pwgan_opencpop', + 'hifigan_opencpop', ], help='Choose vocoder type of tts task.') parser.add_argument( From 706a68bde9bbbec4688506c917caf84b575291b1 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 20:41:23 +0800 Subject: [PATCH 20/37] fix dtype diff of last expand_v2 op of VITS (#3041) --- paddlespeech/t2s/models/vits/flow.py | 9 +++++---- paddlespeech/t2s/models/vits/transform.py | 2 +- paddlespeech/t2s/modules/nets_utils.py | 4 ++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/paddlespeech/t2s/models/vits/flow.py b/paddlespeech/t2s/models/vits/flow.py index 7593eb727..94df968a0 100644 --- a/paddlespeech/t2s/models/vits/flow.py +++ b/paddlespeech/t2s/models/vits/flow.py @@ -334,11 +334,12 @@ class ConvFlow(nn.Layer): unnorm_widths = h[..., :self.bins] / denom unnorm_heights = h[..., self.bins:2 * self.bins] / denom unnorm_derivatives = h[..., 2 * self.bins:] + xb, logdet_abs = piecewise_rational_quadratic_transform( - xb, - unnorm_widths, - unnorm_heights, - unnorm_derivatives, + inputs=xb, + unnormalized_widths=unnorm_widths, + unnormalized_heights=unnorm_heights, + unnormalized_derivatives=unnorm_derivatives, inverse=inverse, tails="linear", tail_bound=self.tail_bound, ) diff --git a/paddlespeech/t2s/models/vits/transform.py b/paddlespeech/t2s/models/vits/transform.py index 0edc1d09d..917f28430 100644 --- a/paddlespeech/t2s/models/vits/transform.py +++ b/paddlespeech/t2s/models/vits/transform.py @@ -245,6 +245,6 @@ def rational_quadratic_spline( def _searchsorted(bin_locations, inputs, eps=1e-6): bin_locations[..., -1] += eps mask = inputs[..., None] >= bin_locations - mask_int = paddle.cast(mask, 'int64') + mask_int = paddle.cast(mask, dtype='int64') out = paddle.sum(mask_int, axis=-1) - 1 return out diff --git a/paddlespeech/t2s/modules/nets_utils.py b/paddlespeech/t2s/modules/nets_utils.py index 798e4dee8..99130acca 100644 --- a/paddlespeech/t2s/modules/nets_utils.py +++ b/paddlespeech/t2s/modules/nets_utils.py @@ -145,18 +145,18 @@ def make_pad_mask(lengths, xs=None, length_dim=-1): bs = paddle.shape(lengths)[0] if xs is None: - maxlen = lengths.max() + maxlen = paddle.cast(lengths.max(), dtype=bs.dtype) else: maxlen = paddle.shape(xs)[length_dim] seq_range = paddle.arange(0, maxlen, dtype=paddle.int64) + # VITS 最后一个 expand 的位置 seq_range_expand = seq_range.unsqueeze(0).expand([bs, maxlen]) seq_length_expand = lengths.unsqueeze(-1) mask = seq_range_expand >= seq_length_expand.cast(seq_range_expand.dtype) if xs is not None: assert paddle.shape(xs)[0] == bs, (paddle.shape(xs)[0], bs) - if length_dim < 0: length_dim = len(paddle.shape(xs)) + length_dim # ind = (:, None, ..., None, :, , None, ..., None) From 5270fda5bf6d151faf96412504db3dad06a4ff14 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 14 Mar 2023 20:52:55 +0800 Subject: [PATCH 21/37] [docs]update readme (#3043) --- demos/TTSArmLinux/README.md | 12 ++++++------ demos/TTSCppFrontend/README.md | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/demos/TTSArmLinux/README.md b/demos/TTSArmLinux/README.md index 50ae1e4bf..a4ccba6c8 100644 --- a/demos/TTSArmLinux/README.md +++ b/demos/TTSArmLinux/README.md @@ -10,7 +10,7 @@ ### 安装依赖 -``` +```bash # Ubuntu sudo apt install build-essential cmake pkg-config wget tar unzip @@ -25,15 +25,13 @@ sudo yum install cmake wget tar unzip 可用以下命令下载: -``` -git clone https://github.com/PaddlePaddle/PaddleSpeech.git -cd PaddleSpeech/demos/TTSArmLinux +```bash ./download.sh ``` ### 编译 Demo -``` +```bash ./build.sh ``` @@ -43,7 +41,9 @@ cd PaddleSpeech/demos/TTSArmLinux ### 运行 -``` +你可以修改 `./front.conf` 中 `--phone2id_path` 参数为你自己的声学模型的 `phone_id_map.txt` 。 + +```bash ./run.sh ./run.sh --sentence "语音合成测试" ./run.sh --sentence "输出到指定的音频文件" --output_wav ./output/test.wav diff --git a/demos/TTSCppFrontend/README.md b/demos/TTSCppFrontend/README.md index 552858de3..c179fdd04 100644 --- a/demos/TTSCppFrontend/README.md +++ b/demos/TTSCppFrontend/README.md @@ -6,7 +6,7 @@ Currently it only supports Chinese, any English word will crash the demo. ## Install Build Tools -``` +```bash # Ubuntu sudo apt install build-essential cmake pkg-config @@ -19,7 +19,7 @@ If your cmake version is too old, you can go here to download a precompiled new ## Build -``` +```bash # Build with all CPU cores ./build.sh @@ -33,14 +33,14 @@ If the download speed is too slow, you can open [third-party/CMakeLists.txt](thi ## Download dictionary files -``` +```bash ./download.sh ``` ## Run You can change `--phone2id_path` in `./front_demo/front.conf` to the `phone_id_map.txt` of your own acoustic model. -``` +```bash ./run_front_demo.sh ./run_front_demo.sh --help ./run_front_demo.sh --sentence "这是语音合成服务的文本前端,用于将文本转换为音素序号数组。" @@ -49,7 +49,7 @@ You can change `--phone2id_path` in `./front_demo/front.conf` to the `phone_id_m ## Clean -``` +```bash ./clean.sh ``` From 03ebbc335bc02914adfc19bf2bc69aa1c536c25d Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 15 Mar 2023 10:19:02 +0800 Subject: [PATCH 22/37] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f71d0562a..c86ada9ac 100644 --- a/README.md +++ b/README.md @@ -178,6 +178,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV). ### Recent Update +- 🔥 2023.03.14: Add SVS(Singing Voice Synthesis) examples with Opencpop dataset, including [DiffSinger](./examples/opencpop/svs1)、[PWGAN](./examples/opencpop/voc1) and [HiFiGAN](./examples/opencpop/voc5), the effect is continuously optimized. - 👑 2023.03.09: Add [Wav2vec2ASR-zh](./examples/aishell/asr3). - 🎉 2023.03.07: Add [TTS ARM Linux C++ Demo (with C++ Chinese Text Frontend)](./demos/TTSArmLinux). - 🔥 2023.03.03 Add Voice Conversion [StarGANv2-VC synthesize pipeline](./examples/vctk/vc3). From b32b00503747d4768f52abe4b634a4de1a658ee0 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 15 Mar 2023 10:21:33 +0800 Subject: [PATCH 23/37] Update README_cn.md --- README_cn.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README_cn.md b/README_cn.md index 5771d766b..741513bc3 100644 --- a/README_cn.md +++ b/README_cn.md @@ -183,6 +183,7 @@ - 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。 ### 近期更新 +- 🔥 2023.03.14: 新增基于 Opencpop 数据集的 SVS (歌唱合成) 示例,包含 [DiffSinger](./examples/opencpop/svs1)、[PWGAN](./examples/opencpop/voc1) 和 [HiFiGAN](./examples/opencpop/voc5),效果持续优化中。 - 👑 2023.03.09: 新增 [Wav2vec2ASR-zh](./examples/aishell/asr3)。 - 🎉 2023.03.07: 新增 [TTS ARM Linux C++ 部署示例 (包含 C++ 中文文本前端模块)](./demos/TTSArmLinux)。 - 🔥 2023.03.03: 新增声音转换模型 [StarGANv2-VC 合成流程](./examples/vctk/vc3)。 From 596f5c28f273cd13ad9db584962ea8409a1b9a54 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 15 Mar 2023 10:31:49 +0800 Subject: [PATCH 24/37] Update README.md --- README.md | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c86ada9ac..b4a8be0fa 100644 --- a/README.md +++ b/README.md @@ -585,7 +585,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - Acoustic Model + Acoustic Model Tacotron2 LJSpeech / CSMSC @@ -620,6 +620,13 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en + + DiffSinger + Opencpop + + DiffSinger-opencpop + + Vocoder WaveFlow @@ -632,7 +639,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r Parallel WaveGAN LJSpeech / VCTK / CSMSC / AISHELL-3 - PWGAN-ljspeech / PWGAN-vctk / PWGAN-csmsc / PWGAN-aishell3 + PWGAN-ljspeech / PWGAN-vctk / PWGAN-csmsc / PWGAN-aishell3 / PWGAN-opencpop @@ -653,7 +660,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r HiFiGAN LJSpeech / VCTK / CSMSC / AISHELL-3 - HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 + HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 / HiFiGAN-opencpop From 78a0ded1b23e469fc212c42094f1dc29a3e301f2 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 15 Mar 2023 10:41:58 +0800 Subject: [PATCH 25/37] Update README_cn.md --- README_cn.md | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/README_cn.md b/README_cn.md index 741513bc3..4d991f3e8 100644 --- a/README_cn.md +++ b/README_cn.md @@ -577,43 +577,50 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 tn / g2p - - - 声学模型 + + + 声学模型 Tacotron2 LJSpeech / CSMSC tacotron2-ljspeech / tacotron2-csmsc - - + + Transformer TTS LJSpeech transformer-ljspeech - - + + SpeedySpeech CSMSC speedyspeech-csmsc - - + + FastSpeech2 LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN / finetune fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en / fastspeech2-finetune - - + + ERNIE-SAT VCTK / AISHELL-3 / ZH_EN ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en - + + + DiffSinger + Opencpop + + DiffSinger-opencpop + + 声码器 WaveFlow @@ -624,9 +631,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 Parallel WaveGAN - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / Opencpop - PWGAN-ljspeech / PWGAN-vctk / PWGAN-csmsc / PWGAN-aishell3 + PWGAN-ljspeech / PWGAN-vctk / PWGAN-csmsc / PWGAN-aishell3 / PWGAN-opencpop @@ -645,9 +652,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 HiFiGAN - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / Opencpop - HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 + HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 / HiFiGAN-opencpop @@ -704,6 +711,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 + **声音分类** From b07f87b42e878e3b6eab621eab4a0edec615ab1d Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 15 Mar 2023 10:48:56 +0800 Subject: [PATCH 26/37] Update README.md --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b4a8be0fa..fbbb1480f 100644 --- a/README.md +++ b/README.md @@ -578,11 +578,11 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - Text Frontend -   - - tn / g2p - + Text Frontend +   + + tn / g2p + Acoustic Model @@ -621,12 +621,12 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r - DiffSinger + DiffSinger Opencpop DiffSinger-opencpop - + Vocoder WaveFlow @@ -637,7 +637,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r Parallel WaveGAN - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / Opencpop PWGAN-ljspeech / PWGAN-vctk / PWGAN-csmsc / PWGAN-aishell3 / PWGAN-opencpop @@ -658,7 +658,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r HiFiGAN - LJSpeech / VCTK / CSMSC / AISHELL-3 + LJSpeech / VCTK / CSMSC / AISHELL-3 / Opencpop HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3 / HiFiGAN-opencpop From 9bf54716138d323f3479efd4daee2355e5d71509 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Wed, 15 Mar 2023 11:15:02 +0800 Subject: [PATCH 27/37] optional tokenizer and fix some doc. (#3042) --- examples/librispeech/asr3/local/data.sh | 0 examples/librispeech/asr3/local/test.sh | 0 examples/librispeech/asr3/local/test_wav.sh | 0 examples/librispeech/asr3/local/train.sh | 0 examples/librispeech/asr3/run.sh | 2 +- paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py | 5 +++-- 6 files changed, 4 insertions(+), 3 deletions(-) mode change 100644 => 100755 examples/librispeech/asr3/local/data.sh mode change 100644 => 100755 examples/librispeech/asr3/local/test.sh mode change 100644 => 100755 examples/librispeech/asr3/local/test_wav.sh mode change 100644 => 100755 examples/librispeech/asr3/local/train.sh diff --git a/examples/librispeech/asr3/local/data.sh b/examples/librispeech/asr3/local/data.sh old mode 100644 new mode 100755 diff --git a/examples/librispeech/asr3/local/test.sh b/examples/librispeech/asr3/local/test.sh old mode 100644 new mode 100755 diff --git a/examples/librispeech/asr3/local/test_wav.sh b/examples/librispeech/asr3/local/test_wav.sh old mode 100644 new mode 100755 diff --git a/examples/librispeech/asr3/local/train.sh b/examples/librispeech/asr3/local/train.sh old mode 100644 new mode 100755 diff --git a/examples/librispeech/asr3/run.sh b/examples/librispeech/asr3/run.sh index 05ad505c7..f52266a1a 100644 --- a/examples/librispeech/asr3/run.sh +++ b/examples/librispeech/asr3/run.sh @@ -6,7 +6,7 @@ set -e gpus=0 stage=0 -stop_stage=0 +stop_stage=4 conf_path=conf/wav2vec2ASR.yaml ips= #xx.xx.xx.xx,xx.xx.xx.xx decode_conf_path=conf/tuning/decode.yaml diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py index 2416db7ee..0295713ff 100644 --- a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py +++ b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py @@ -34,8 +34,9 @@ class Wav2vec2Infer(): self.args = args self.config = config self.audio_file = args.audio_file + self.tokenizer = config.get("tokenizer", None) - if self.config.tokenizer: + if self.tokenizer: self.text_feature = AutoTokenizer.from_pretrained( self.config.tokenizer) else: @@ -72,7 +73,7 @@ class Wav2vec2Infer(): text_feature=self.text_feature, decoding_method=decode_config.decoding_method, beam_size=decode_config.beam_size, - tokenizer=self.config.tokenizer, ) + tokenizer=self.tokenizer, ) rsl = result_transcripts[0] utt = Path(self.audio_file).name logger.info(f"hyp: {utt} {rsl}") From 31a4562ae81b288699772cbaf2117eb4481fc736 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A4=9C=E9=9B=A8=E9=A3=98=E9=9B=B6?= Date: Wed, 15 Mar 2023 16:56:09 +0800 Subject: [PATCH 28/37] [ASR]add squeezeformer model (#2755) * add squeezeformer model * change CodeStyle, test=asr * change CodeStyle, test=asr * fix subsample rate error, test=asr * merge classes as required, test=asr * change CodeStyle, test=asr * fix missing code, test=asr * split code to new file, test=asr * remove rel_shift, test=asr --- .../asr1/conf/chunk_squeezeformer.yaml | 98 +++++ examples/aishell/asr1/conf/squeezeformer.yaml | 93 +++++ paddlespeech/s2t/models/u2/u2.py | 4 + paddlespeech/s2t/modules/attention.py | 45 ++- .../s2t/modules/conformer_convolution.py | 42 +- paddlespeech/s2t/modules/conv2d.py | 62 +++ paddlespeech/s2t/modules/encoder.py | 377 +++++++++++++++++- paddlespeech/s2t/modules/encoder_layer.py | 127 +++++- .../s2t/modules/positionwise_feed_forward.py | 32 +- paddlespeech/s2t/modules/subsampling.py | 66 ++- paddlespeech/s2t/modules/time_reduction.py | 263 ++++++++++++ paddlespeech/s2t/utils/utility.py | 7 +- 12 files changed, 1208 insertions(+), 8 deletions(-) create mode 100644 examples/aishell/asr1/conf/chunk_squeezeformer.yaml create mode 100644 examples/aishell/asr1/conf/squeezeformer.yaml create mode 100644 paddlespeech/s2t/modules/conv2d.py create mode 100644 paddlespeech/s2t/modules/time_reduction.py diff --git a/examples/aishell/asr1/conf/chunk_squeezeformer.yaml b/examples/aishell/asr1/conf/chunk_squeezeformer.yaml new file mode 100644 index 000000000..35a90b7d6 --- /dev/null +++ b/examples/aishell/asr1/conf/chunk_squeezeformer.yaml @@ -0,0 +1,98 @@ +############################################ +# Network Architecture # +############################################ +cmvn_file: +cmvn_file_type: "json" +# encoder related +encoder: squeezeformer +encoder_conf: + encoder_dim: 256 # dimension of attention + output_size: 256 # dimension of output + attention_heads: 4 + num_blocks: 12 # the number of encoder blocks + reduce_idx: 5 + recover_idx: 11 + feed_forward_expansion_factor: 8 + input_dropout_rate: 0.1 + feed_forward_dropout_rate: 0.1 + attention_dropout_rate: 0.1 + adaptive_scale: true + cnn_module_kernel: 31 + normalize_before: false + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + time_reduction_layer_type: 'stream' + causal: true + use_dynamic_chunk: true + use_dynamic_left_chunk: false + +# decoder related +decoder: transformer +decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 # sublayer output dropout + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 +# hybrid CTC/attention +model_conf: + ctc_weight: 0.3 + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + init_type: 'kaiming_uniform' # !Warning: need to convergence + +########################################### +# Data # +########################################### + +train_manifest: data/manifest.train +dev_manifest: data/manifest.dev +test_manifest: data/manifest.test + + +########################################### +# Dataloader # +########################################### + +vocab_filepath: data/lang_char/vocab.txt +spm_model_prefix: '' +unit_type: 'char' +preprocess_config: conf/preprocess.yaml +feat_dim: 80 +stride_ms: 10.0 +window_ms: 25.0 +sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs +batch_size: 32 +maxlen_in: 512 # if input length > maxlen-in, batchsize is automatically reduced +maxlen_out: 150 # if output length > maxlen-out, batchsize is automatically reduced +minibatches: 0 # for debug +batch_count: auto +batch_bins: 0 +batch_frames_in: 0 +batch_frames_out: 0 +batch_frames_inout: 0 +num_workers: 2 +subsampling_factor: 1 +num_encs: 1 + +########################################### +# Training # +########################################### +n_epoch: 240 +accum_grad: 1 +global_grad_clip: 5.0 +dist_sampler: True +optim: adam +optim_conf: + lr: 0.001 + weight_decay: 1.0e-6 +scheduler: warmuplr +scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 +log_interval: 100 +checkpoint: + kbest_n: 50 + latest_n: 5 diff --git a/examples/aishell/asr1/conf/squeezeformer.yaml b/examples/aishell/asr1/conf/squeezeformer.yaml new file mode 100644 index 000000000..b7841aca5 --- /dev/null +++ b/examples/aishell/asr1/conf/squeezeformer.yaml @@ -0,0 +1,93 @@ +############################################ +# Network Architecture # +############################################ +cmvn_file: +cmvn_file_type: "json" +# encoder related +encoder: squeezeformer +encoder_conf: + encoder_dim: 256 # dimension of attention + output_size: 256 # dimension of output + attention_heads: 4 + num_blocks: 12 # the number of encoder blocks + reduce_idx: 5 + recover_idx: 11 + feed_forward_expansion_factor: 8 + input_dropout_rate: 0.1 + feed_forward_dropout_rate: 0.1 + attention_dropout_rate: 0.1 + adaptive_scale: true + cnn_module_kernel: 31 + normalize_before: false + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + time_reduction_layer_type: 'conv1d' + +# decoder related +decoder: transformer +decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + +# hybrid CTC/attention +model_conf: + ctc_weight: 0.3 + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + init_type: 'kaiming_uniform' # !Warning: need to convergence + +########################################### +# Data # +########################################### +train_manifest: data/manifest.train +dev_manifest: data/manifest.dev +test_manifest: data/manifest.test + +########################################### +# Dataloader # +########################################### +vocab_filepath: data/lang_char/vocab.txt +spm_model_prefix: '' +unit_type: 'char' +preprocess_config: conf/preprocess.yaml +feat_dim: 80 +stride_ms: 10.0 +window_ms: 25.0 +sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs +batch_size: 32 +maxlen_in: 512 # if input length > maxlen-in, batchsize is automatically reduced +maxlen_out: 150 # if output length > maxlen-out, batchsize is automatically reduced +minibatches: 0 # for debug +batch_count: auto +batch_bins: 0 +batch_frames_in: 0 +batch_frames_out: 0 +batch_frames_inout: 0 +num_workers: 2 +subsampling_factor: 1 +num_encs: 1 + +########################################### +# Training # +########################################### +n_epoch: 150 +accum_grad: 8 +global_grad_clip: 5.0 +dist_sampler: False +optim: adam +optim_conf: + lr: 0.002 + weight_decay: 1.0e-6 +scheduler: warmuplr +scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 +log_interval: 100 +checkpoint: + kbest_n: 50 + latest_n: 5 diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index 544c1e836..6494b5304 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -43,6 +43,7 @@ from paddlespeech.s2t.modules.ctc import CTCDecoderBase from paddlespeech.s2t.modules.decoder import BiTransformerDecoder from paddlespeech.s2t.modules.decoder import TransformerDecoder from paddlespeech.s2t.modules.encoder import ConformerEncoder +from paddlespeech.s2t.modules.encoder import SqueezeformerEncoder from paddlespeech.s2t.modules.encoder import TransformerEncoder from paddlespeech.s2t.modules.initializer import DefaultInitializerContext from paddlespeech.s2t.modules.loss import LabelSmoothingLoss @@ -905,6 +906,9 @@ class U2Model(U2DecodeModel): elif encoder_type == 'conformer': encoder = ConformerEncoder( input_dim, global_cmvn=global_cmvn, **configs['encoder_conf']) + elif encoder_type == 'squeezeformer': + encoder = SqueezeformerEncoder( + input_dim, global_cmvn=global_cmvn, **configs['encoder_conf']) else: raise ValueError(f"not support encoder type:{encoder_type}") diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py index d9568dcc9..14336c03d 100644 --- a/paddlespeech/s2t/modules/attention.py +++ b/paddlespeech/s2t/modules/attention.py @@ -200,7 +200,12 @@ class MultiHeadedAttention(nn.Layer): class RelPositionMultiHeadedAttention(MultiHeadedAttention): """Multi-Head Attention layer with relative position encoding.""" - def __init__(self, n_head, n_feat, dropout_rate): + def __init__(self, + n_head, + n_feat, + dropout_rate, + adaptive_scale=False, + init_weights=False): """Construct an RelPositionMultiHeadedAttention object. Paper: https://arxiv.org/abs/1901.02860 Args: @@ -223,6 +228,39 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): pos_bias_v = self.create_parameter( (self.h, self.d_k), default_initializer=I.XavierUniform()) self.add_parameter('pos_bias_v', pos_bias_v) + self.adaptive_scale = adaptive_scale + if self.adaptive_scale: + ada_scale = self.create_parameter( + [1, 1, n_feat], default_initializer=I.Constant(1.0)) + self.add_parameter('ada_scale', ada_scale) + ada_bias = self.create_parameter( + [1, 1, n_feat], default_initializer=I.Constant(0.0)) + self.add_parameter('ada_bias', ada_bias) + if init_weights: + self.init_weights() + + def init_weights(self): + input_max = (self.h * self.d_k)**-0.5 + self.linear_q._param_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_q._bias_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_k._param_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_k._bias_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_v._param_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_v._bias_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_pos._param_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_pos._bias_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_out._param_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) + self.linear_out._bias_attr = paddle.nn.initializer.Uniform( + low=-input_max, high=input_max) def rel_shift(self, x, zero_triu: bool=False): """Compute relative positinal encoding. @@ -273,6 +311,11 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): where `cache_t == chunk_size * num_decoding_left_chunks` and `head * d_k == size` """ + if self.adaptive_scale: + query = self.ada_scale * query + self.ada_bias + key = self.ada_scale * key + self.ada_bias + value = self.ada_scale * value + self.ada_bias + q, k, v = self.forward_qkv(query, key, value) # q = q.transpose([0, 2, 1, 3]) # (batch, time1, head, d_k) diff --git a/paddlespeech/s2t/modules/conformer_convolution.py b/paddlespeech/s2t/modules/conformer_convolution.py index 09d903eee..7a0c72f3b 100644 --- a/paddlespeech/s2t/modules/conformer_convolution.py +++ b/paddlespeech/s2t/modules/conformer_convolution.py @@ -18,6 +18,7 @@ from typing import Tuple import paddle from paddle import nn +from paddle.nn import initializer as I from typeguard import check_argument_types from paddlespeech.s2t.modules.align import BatchNorm1D @@ -39,7 +40,9 @@ class ConvolutionModule(nn.Layer): activation: nn.Layer=nn.ReLU(), norm: str="batch_norm", causal: bool=False, - bias: bool=True): + bias: bool=True, + adaptive_scale: bool=False, + init_weights: bool=False): """Construct an ConvolutionModule object. Args: channels (int): The number of channels of conv layers. @@ -51,6 +54,18 @@ class ConvolutionModule(nn.Layer): """ assert check_argument_types() super().__init__() + self.bias = bias + self.channels = channels + self.kernel_size = kernel_size + self.adaptive_scale = adaptive_scale + if self.adaptive_scale: + ada_scale = self.create_parameter( + [1, 1, channels], default_initializer=I.Constant(1.0)) + self.add_parameter('ada_scale', ada_scale) + ada_bias = self.create_parameter( + [1, 1, channels], default_initializer=I.Constant(0.0)) + self.add_parameter('ada_bias', ada_bias) + self.pointwise_conv1 = Conv1D( channels, 2 * channels, @@ -105,6 +120,28 @@ class ConvolutionModule(nn.Layer): ) self.activation = activation + if init_weights: + self.init_weights() + + def init_weights(self): + pw_max = self.channels**-0.5 + dw_max = self.kernel_size**-0.5 + self.pointwise_conv1._param_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + if self.bias: + self.pointwise_conv1._bias_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + self.depthwise_conv._param_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + if self.bias: + self.depthwise_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.pointwise_conv2._param_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + if self.bias: + self.pointwise_conv2._bias_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + def forward( self, x: paddle.Tensor, @@ -123,6 +160,9 @@ class ConvolutionModule(nn.Layer): paddle.Tensor: Output tensor (#batch, time, channels). paddle.Tensor: Output cache tensor (#batch, channels, time') """ + if self.adaptive_scale: + x = self.ada_scale * x + self.ada_bias + # exchange the temporal dimension and the feature dimension x = x.transpose([0, 2, 1]) # [B, C, T] diff --git a/paddlespeech/s2t/modules/conv2d.py b/paddlespeech/s2t/modules/conv2d.py new file mode 100644 index 000000000..ca6e136ad --- /dev/null +++ b/paddlespeech/s2t/modules/conv2d.py @@ -0,0 +1,62 @@ +from typing import Optional +from typing import Union + +import paddle +import paddle.nn.functional as F +from paddle.nn.layer.conv import _ConvNd + +__all__ = ['Conv2DValid'] + + +class Conv2DValid(_ConvNd): + """ + Conv2d operator for VALID mode padding. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int=1, + padding: Union[str, int]=0, + dilation: int=1, + groups: int=1, + padding_mode: str='zeros', + weight_attr=None, + bias_attr=None, + data_format="NCHW", + valid_trigx: bool=False, + valid_trigy: bool=False) -> None: + super(Conv2DValid, self).__init__( + in_channels, + out_channels, + kernel_size, + False, + 2, + stride=stride, + padding=padding, + padding_mode=padding_mode, + dilation=dilation, + groups=groups, + weight_attr=weight_attr, + bias_attr=bias_attr, + data_format=data_format) + self.valid_trigx = valid_trigx + self.valid_trigy = valid_trigy + + def _conv_forward(self, + input: paddle.Tensor, + weight: paddle.Tensor, + bias: Optional[paddle.Tensor]): + validx, validy = 0, 0 + if self.valid_trigx: + validx = (input.shape[-2] * + (self._stride[-2] - 1) - 1 + self._kernel_size[-2]) // 2 + if self.valid_trigy: + validy = (input.shape[-1] * + (self._stride[-1] - 1) - 1 + self._kernel_size[-1]) // 2 + return F.conv2d(input, weight, bias, self._stride, (validx, validy), + self._dilation, self._groups) + + def forward(self, input: paddle.Tensor) -> paddle.Tensor: + return self._conv_forward(input, self.weight, self.bias) diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index fd7bd7b9a..d90d69d77 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -14,7 +14,10 @@ # limitations under the License. # Modified from wenet(https://github.com/wenet-e2e/wenet) """Encoder definition.""" +from typing import List +from typing import Optional from typing import Tuple +from typing import Union import paddle from paddle import nn @@ -22,6 +25,7 @@ from typeguard import check_argument_types from paddlespeech.s2t.modules.activation import get_activation from paddlespeech.s2t.modules.align import LayerNorm +from paddlespeech.s2t.modules.align import Linear from paddlespeech.s2t.modules.attention import MultiHeadedAttention from paddlespeech.s2t.modules.attention import RelPositionMultiHeadedAttention from paddlespeech.s2t.modules.conformer_convolution import ConvolutionModule @@ -29,6 +33,7 @@ from paddlespeech.s2t.modules.embedding import NoPositionalEncoding from paddlespeech.s2t.modules.embedding import PositionalEncoding from paddlespeech.s2t.modules.embedding import RelPositionalEncoding from paddlespeech.s2t.modules.encoder_layer import ConformerEncoderLayer +from paddlespeech.s2t.modules.encoder_layer import SqueezeformerEncoderLayer from paddlespeech.s2t.modules.encoder_layer import TransformerEncoderLayer from paddlespeech.s2t.modules.mask import add_optional_chunk_mask from paddlespeech.s2t.modules.mask import make_non_pad_mask @@ -36,12 +41,19 @@ from paddlespeech.s2t.modules.positionwise_feed_forward import PositionwiseFeedF from paddlespeech.s2t.modules.subsampling import Conv2dSubsampling4 from paddlespeech.s2t.modules.subsampling import Conv2dSubsampling6 from paddlespeech.s2t.modules.subsampling import Conv2dSubsampling8 +from paddlespeech.s2t.modules.subsampling import DepthwiseConv2DSubsampling4 from paddlespeech.s2t.modules.subsampling import LinearNoSubsampling +from paddlespeech.s2t.modules.time_reduction import TimeReductionLayer1D +from paddlespeech.s2t.modules.time_reduction import TimeReductionLayer2D +from paddlespeech.s2t.modules.time_reduction import TimeReductionLayerStream from paddlespeech.s2t.utils.log import Log logger = Log(__name__).getlog() -__all__ = ["BaseEncoder", 'TransformerEncoder', "ConformerEncoder"] +__all__ = [ + "BaseEncoder", 'TransformerEncoder', "ConformerEncoder", + "SqueezeformerEncoder" +] class BaseEncoder(nn.Layer): @@ -487,3 +499,366 @@ class ConformerEncoder(BaseEncoder): normalize_before=normalize_before, concat_after=concat_after) for _ in range(num_blocks) ]) + + +class SqueezeformerEncoder(nn.Layer): + def __init__(self, + input_size: int, + encoder_dim: int=256, + output_size: int=256, + attention_heads: int=4, + num_blocks: int=12, + reduce_idx: Optional[Union[int, List[int]]]=5, + recover_idx: Optional[Union[int, List[int]]]=11, + feed_forward_expansion_factor: int=4, + dw_stride: bool=False, + input_dropout_rate: float=0.1, + pos_enc_layer_type: str="rel_pos", + time_reduction_layer_type: str="conv1d", + feed_forward_dropout_rate: float=0.1, + attention_dropout_rate: float=0.1, + cnn_module_kernel: int=31, + cnn_norm_type: str="layer_norm", + dropout: float=0.1, + causal: bool=False, + adaptive_scale: bool=True, + activation_type: str="swish", + init_weights: bool=True, + global_cmvn: paddle.nn.Layer=None, + normalize_before: bool=False, + use_dynamic_chunk: bool=False, + concat_after: bool=False, + static_chunk_size: int=0, + use_dynamic_left_chunk: bool=False): + """Construct SqueezeformerEncoder + + Args: + input_size to use_dynamic_chunk, see in Transformer BaseEncoder. + encoder_dim (int): The hidden dimension of encoder layer. + output_size (int): The output dimension of final projection layer. + attention_heads (int): Num of attention head in attention module. + num_blocks (int): Num of encoder layers. + reduce_idx Optional[Union[int, List[int]]]: + reduce layer index, from 40ms to 80ms per frame. + recover_idx Optional[Union[int, List[int]]]: + recover layer index, from 80ms to 40ms per frame. + feed_forward_expansion_factor (int): Enlarge coefficient of FFN. + dw_stride (bool): Whether do depthwise convolution + on subsampling module. + input_dropout_rate (float): Dropout rate of input projection layer. + pos_enc_layer_type (str): Self attention type. + time_reduction_layer_type (str): Conv1d or Conv2d reduction layer. + cnn_module_kernel (int): Kernel size of CNN module. + activation_type (str): Encoder activation function type. + cnn_module_kernel (int): Kernel size of convolution module. + adaptive_scale (bool): Whether to use adaptive scale. + init_weights (bool): Whether to initialize weights. + causal (bool): whether to use causal convolution or not. + """ + assert check_argument_types() + super().__init__() + self.global_cmvn = global_cmvn + self.reduce_idx: Optional[Union[int, List[int]]] = [reduce_idx] \ + if type(reduce_idx) == int else reduce_idx + self.recover_idx: Optional[Union[int, List[int]]] = [recover_idx] \ + if type(recover_idx) == int else recover_idx + self.check_ascending_list() + if reduce_idx is None: + self.time_reduce = None + else: + if recover_idx is None: + self.time_reduce = 'normal' # no recovery at the end + else: + self.time_reduce = 'recover' # recovery at the end + assert len(self.reduce_idx) == len(self.recover_idx) + self.reduce_stride = 2 + self._output_size = output_size + self.normalize_before = normalize_before + self.static_chunk_size = static_chunk_size + self.use_dynamic_chunk = use_dynamic_chunk + self.use_dynamic_left_chunk = use_dynamic_left_chunk + activation = get_activation(activation_type) + + # self-attention module definition + if pos_enc_layer_type != "rel_pos": + encoder_selfattn_layer = MultiHeadedAttention + encoder_selfattn_layer_args = (attention_heads, output_size, + attention_dropout_rate) + else: + encoder_selfattn_layer = RelPositionMultiHeadedAttention + encoder_selfattn_layer_args = (attention_heads, encoder_dim, + attention_dropout_rate, + adaptive_scale, init_weights) + + # feed-forward module definition + positionwise_layer = PositionwiseFeedForward + positionwise_layer_args = ( + encoder_dim, encoder_dim * feed_forward_expansion_factor, + feed_forward_dropout_rate, activation, adaptive_scale, init_weights) + + # convolution module definition + convolution_layer = ConvolutionModule + convolution_layer_args = (encoder_dim, cnn_module_kernel, activation, + cnn_norm_type, causal, True, adaptive_scale, + init_weights) + + self.embed = DepthwiseConv2DSubsampling4( + 1, encoder_dim, + RelPositionalEncoding(encoder_dim, dropout_rate=0.1), dw_stride, + input_size, input_dropout_rate, init_weights) + + self.preln = LayerNorm(encoder_dim) + self.encoders = paddle.nn.LayerList([ + SqueezeformerEncoderLayer( + encoder_dim, + encoder_selfattn_layer(*encoder_selfattn_layer_args), + positionwise_layer(*positionwise_layer_args), + convolution_layer(*convolution_layer_args), + positionwise_layer(*positionwise_layer_args), normalize_before, + dropout, concat_after) for _ in range(num_blocks) + ]) + if time_reduction_layer_type == 'conv1d': + time_reduction_layer = TimeReductionLayer1D + time_reduction_layer_args = { + 'channel': encoder_dim, + 'out_dim': encoder_dim, + } + elif time_reduction_layer_type == 'stream': + time_reduction_layer = TimeReductionLayerStream + time_reduction_layer_args = { + 'channel': encoder_dim, + 'out_dim': encoder_dim, + } + else: + time_reduction_layer = TimeReductionLayer2D + time_reduction_layer_args = {'encoder_dim': encoder_dim} + + self.time_reduction_layer = time_reduction_layer( + **time_reduction_layer_args) + self.time_recover_layer = Linear(encoder_dim, encoder_dim) + self.final_proj = None + if output_size != encoder_dim: + self.final_proj = Linear(encoder_dim, output_size) + + def output_size(self) -> int: + return self._output_size + + def forward( + self, + xs: paddle.Tensor, + xs_lens: paddle.Tensor, + decoding_chunk_size: int=0, + num_decoding_left_chunks: int=-1, + ) -> Tuple[paddle.Tensor, paddle.Tensor]: + """Embed positions in tensor. + Args: + xs: padded input tensor (B, L, D) + xs_lens: input length (B) + decoding_chunk_size: decoding chunk size for dynamic chunk + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + Returns: + encoder output tensor, lens and mask + """ + masks = make_non_pad_mask(xs_lens).unsqueeze(1) # (B, 1, L) + + if self.global_cmvn is not None: + xs = self.global_cmvn(xs) + xs, pos_emb, masks = self.embed(xs, masks) + mask_pad = masks + chunk_masks = add_optional_chunk_mask( + xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk, + decoding_chunk_size, self.static_chunk_size, + num_decoding_left_chunks) + xs_lens = chunk_masks.squeeze(1).sum(1) + xs = self.preln(xs) + recover_activations: \ + List[Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]] = [] + index = 0 + for i, layer in enumerate(self.encoders): + if self.reduce_idx is not None: + if self.time_reduce is not None and i in self.reduce_idx: + recover_activations.append( + (xs, chunk_masks, pos_emb, mask_pad)) + xs, xs_lens, chunk_masks, mask_pad = self.time_reduction_layer( + xs, xs_lens, chunk_masks, mask_pad) + pos_emb = pos_emb[:, ::2, :] + index += 1 + + if self.recover_idx is not None: + if self.time_reduce == 'recover' and i in self.recover_idx: + index -= 1 + recover_tensor, recover_chunk_masks, recover_pos_emb, recover_mask_pad = recover_activations[ + index] + # recover output length for ctc decode + xs = paddle.repeat_interleave(xs, repeats=2, axis=1) + xs = self.time_recover_layer(xs) + recoverd_t = recover_tensor.shape[1] + xs = recover_tensor + xs[:, :recoverd_t, :] + chunk_masks = recover_chunk_masks + pos_emb = recover_pos_emb + mask_pad = recover_mask_pad + + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + + if self.final_proj is not None: + xs = self.final_proj(xs) + return xs, masks + + def check_ascending_list(self): + if self.reduce_idx is not None: + assert self.reduce_idx == sorted(self.reduce_idx), \ + "reduce_idx should be int or ascending list" + if self.recover_idx is not None: + assert self.recover_idx == sorted(self.recover_idx), \ + "recover_idx should be int or ascending list" + + def calculate_downsampling_factor(self, i: int) -> int: + if self.reduce_idx is None: + return 1 + else: + reduce_exp, recover_exp = 0, 0 + for exp, rd_idx in enumerate(self.reduce_idx): + if i >= rd_idx: + reduce_exp = exp + 1 + if self.recover_idx is not None: + for exp, rc_idx in enumerate(self.recover_idx): + if i >= rc_idx: + recover_exp = exp + 1 + return int(2**(reduce_exp - recover_exp)) + + def forward_chunk( + self, + xs: paddle.Tensor, + offset: int, + required_cache_size: int, + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + att_mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """ Forward just one chunk + + Args: + xs (paddle.Tensor): chunk input, with shape (b=1, time, mel-dim), + where `time == (chunk_size - 1) * subsample_rate + \ + subsample.right_context + 1` + offset (int): current offset in encoder output time stamp + required_cache_size (int): cache size required for next chunk + compuation + >=0: actual cache size + <0: means all history cache is required + att_cache (paddle.Tensor): cache tensor for KEY & VALUE in + transformer/conformer attention, with shape + (elayers, head, cache_t1, d_k * 2), where + `head * d_k == hidden-dim` and + `cache_t1 == chunk_size * num_decoding_left_chunks`. + cnn_cache (paddle.Tensor): cache tensor for cnn_module in conformer, + (elayers, b=1, hidden-dim, cache_t2), where + `cache_t2 == cnn.lorder - 1` + + Returns: + paddle.Tensor: output of current input xs, + with shape (b=1, chunk_size, hidden-dim). + paddle.Tensor: new attention cache required for next chunk, with + dynamic shape (elayers, head, ?, d_k * 2) + depending on required_cache_size. + paddle.Tensor: new conformer cnn cache required for next chunk, with + same shape as the original cnn_cache. + """ + assert xs.shape[0] == 1 # batch size must be one + + if self.global_cmvn is not None: + xs = self.global_cmvn(xs) + + # tmp_masks is just for interface compatibility, [B=1, C=1, T] + tmp_masks = paddle.ones([1, 1, xs.shape[1]], dtype=paddle.bool) + # before embed, xs=(B, T, D1), pos_emb=(B=1, T, D) + xs, pos_emb, _ = self.embed(xs, tmp_masks, offset=offset) + + # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim) + elayers, cache_t1 = att_cache.shape[0], att_cache.shape[2] + chunk_size = xs.shape[1] + attention_key_size = cache_t1 + chunk_size + pos_emb = self.embed.position_encoding( + offset=offset - cache_t1, size=attention_key_size) + if required_cache_size < 0: + next_cache_start = 0 + elif required_cache_size == 0: + next_cache_start = attention_key_size + else: + next_cache_start = max(attention_key_size - required_cache_size, 0) + + r_att_cache = [] + r_cnn_cache = [] + + mask_pad = paddle.ones([1, xs.shape[1]], dtype=paddle.bool) + mask_pad = mask_pad.unsqueeze(1) + max_att_len: int = 0 + recover_activations: \ + List[Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]] = [] + index = 0 + xs_lens = paddle.to_tensor([xs.shape[1]], dtype=paddle.int32) + xs = self.preln(xs) + for i, layer in enumerate(self.encoders): + # NOTE(xcsong): Before layer.forward + # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2), + # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2) + if self.reduce_idx is not None: + if self.time_reduce is not None and i in self.reduce_idx: + recover_activations.append( + (xs, att_mask, pos_emb, mask_pad)) + xs, xs_lens, att_mask, mask_pad = self.time_reduction_layer( + xs, xs_lens, att_mask, mask_pad) + pos_emb = pos_emb[:, ::2, :] + index += 1 + + if self.recover_idx is not None: + if self.time_reduce == 'recover' and i in self.recover_idx: + index -= 1 + recover_tensor, recover_att_mask, recover_pos_emb, recover_mask_pad = recover_activations[ + index] + # recover output length for ctc decode + xs = paddle.repeat_interleave(xs, repeats=2, axis=1) + xs = self.time_recover_layer(xs) + recoverd_t = recover_tensor.shape[1] + xs = recover_tensor + xs[:, :recoverd_t, :] + att_mask = recover_att_mask + pos_emb = recover_pos_emb + mask_pad = recover_mask_pad + + factor = self.calculate_downsampling_factor(i) + att_cache1 = att_cache[ + i:i + 1][:, :, ::factor, :][:, :, :pos_emb.shape[1] - xs.shape[ + 1], :] + cnn_cache1 = cnn_cache[i] if cnn_cache.shape[0] > 0 else cnn_cache + xs, _, new_att_cache, new_cnn_cache = layer( + xs, + att_mask, + pos_emb, + att_cache=att_cache1, + cnn_cache=cnn_cache1) + # NOTE(xcsong): After layer.forward + # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2), + # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2) + cached_att = new_att_cache[:, :, next_cache_start // factor:, :] + cached_cnn = new_cnn_cache.unsqueeze(0) + cached_att = cached_att.repeat_interleave(repeats=factor, axis=2) + if i == 0: + # record length for the first block as max length + max_att_len = cached_att.shape[2] + r_att_cache.append(cached_att[:, :, :max_att_len, :]) + r_cnn_cache.append(cached_cnn) + # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), + # ? may be larger than cache_t1, it depends on required_cache_size + r_att_cache = paddle.concat(r_att_cache, axis=0) + # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) + r_cnn_cache = paddle.concat(r_cnn_cache, axis=0) + + if self.final_proj is not None: + xs = self.final_proj(xs) + return xs, r_att_cache, r_cnn_cache diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index dac62bce3..ecba95e85 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -26,7 +26,10 @@ from paddlespeech.s2t.utils.log import Log logger = Log(__name__).getlog() -__all__ = ["TransformerEncoderLayer", "ConformerEncoderLayer"] +__all__ = [ + "TransformerEncoderLayer", "ConformerEncoderLayer", + "SqueezeformerEncoderLayer" +] class TransformerEncoderLayer(nn.Layer): @@ -276,3 +279,125 @@ class ConformerEncoderLayer(nn.Layer): x = self.norm_final(x) return x, mask, new_att_cache, new_cnn_cache + + +class SqueezeformerEncoderLayer(nn.Layer): + """Encoder layer module.""" + + def __init__(self, + size: int, + self_attn: paddle.nn.Layer, + feed_forward1: Optional[nn.Layer]=None, + conv_module: Optional[nn.Layer]=None, + feed_forward2: Optional[nn.Layer]=None, + normalize_before: bool=False, + dropout_rate: float=0.1, + concat_after: bool=False): + """Construct an EncoderLayer object. + + Args: + size (int): Input dimension. + self_attn (paddle.nn.Layer): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward1 (paddle.nn.Layer): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + conv_module (paddle.nn.Layer): Convolution module instance. + `ConvlutionLayer` instance can be used as the argument. + feed_forward2 (paddle.nn.Layer): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: use layer_norm after each sub-block. + """ + super().__init__() + self.size = size + self.self_attn = self_attn + self.layer_norm1 = LayerNorm(size) + self.ffn1 = feed_forward1 + self.layer_norm2 = LayerNorm(size) + self.conv_module = conv_module + self.layer_norm3 = LayerNorm(size) + self.ffn2 = feed_forward2 + self.layer_norm4 = LayerNorm(size) + self.normalize_before = normalize_before + self.dropout = nn.Dropout(dropout_rate) + self.concat_after = concat_after + if concat_after: + self.concat_linear = Linear(size + size, size) + else: + self.concat_linear = nn.Identity() + + def forward( + self, + x: paddle.Tensor, + mask: paddle.Tensor, + pos_emb: paddle.Tensor, + mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + att_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + cnn_cache: paddle.Tensor=paddle.zeros([0, 0, 0, 0]), + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Compute encoded features. + Args: + x (paddle.Tensor): Input tensor (#batch, time, size). + mask (paddle.Tensor): Mask tensor for the input (#batch, time, time). + (0,0,0) means fake mask. + pos_emb (paddle.Tensor): postional encoding, must not be None + for ConformerEncoderLayer + mask_pad (paddle.Tensor): batch padding mask used for conv module. + (#batch, 1,time), (0, 0, 0) means fake mask. + att_cache (paddle.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (paddle.Tensor): Convolution cache in conformer layer + (1, #batch=1, size, cache_t2). First dim will not be used, just + for dy2st. + Returns: + paddle.Tensor: Output tensor (#batch, time, size). + paddle.Tensor: Mask tensor (#batch, time, time). + paddle.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + paddle.Tensor: cnn_cahce tensor (#batch, size, cache_t2). + """ + # self attention module + residual = x + if self.normalize_before: + x = self.layer_norm1(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, att_cache) + if self.concat_after: + x_concat = paddle.concat((x, x_att), axis=-1) + x = residual + self.concat_linear(x_concat) + else: + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.layer_norm1(x) + + # ffn module + residual = x + if self.normalize_before: + x = self.layer_norm2(x) + x = self.ffn1(x) + x = residual + self.dropout(x) + if not self.normalize_before: + x = self.layer_norm2(x) + + # conv module + residual = x + if self.normalize_before: + x = self.layer_norm3(x) + x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache) + x = residual + self.dropout(x) + if not self.normalize_before: + x = self.layer_norm3(x) + + # ffn module + residual = x + if self.normalize_before: + x = self.layer_norm4(x) + x = self.ffn2(x) + # we do not use dropout here since it is inside feed forward function + x = residual + self.dropout(x) + if not self.normalize_before: + x = self.layer_norm4(x) + + return x, mask, new_att_cache, new_cnn_cache diff --git a/paddlespeech/s2t/modules/positionwise_feed_forward.py b/paddlespeech/s2t/modules/positionwise_feed_forward.py index c2725dc5c..9ebd5d638 100644 --- a/paddlespeech/s2t/modules/positionwise_feed_forward.py +++ b/paddlespeech/s2t/modules/positionwise_feed_forward.py @@ -16,6 +16,7 @@ """Positionwise feed forward layer definition.""" import paddle from paddle import nn +from paddle.nn import initializer as I from paddlespeech.s2t.modules.align import Linear from paddlespeech.s2t.utils.log import Log @@ -32,7 +33,9 @@ class PositionwiseFeedForward(nn.Layer): idim: int, hidden_units: int, dropout_rate: float, - activation: nn.Layer=nn.ReLU()): + activation: nn.Layer=nn.ReLU(), + adaptive_scale: bool=False, + init_weights: bool=False): """Construct a PositionwiseFeedForward object. FeedForward are appied on each position of the sequence. @@ -45,10 +48,35 @@ class PositionwiseFeedForward(nn.Layer): activation (paddle.nn.Layer): Activation function """ super().__init__() + self.idim = idim + self.hidden_units = hidden_units self.w_1 = Linear(idim, hidden_units) self.activation = activation self.dropout = nn.Dropout(dropout_rate) self.w_2 = Linear(hidden_units, idim) + self.adaptive_scale = adaptive_scale + if self.adaptive_scale: + ada_scale = self.create_parameter( + [1, 1, idim], default_initializer=I.XavierUniform()) + self.add_parameter('ada_scale', ada_scale) + ada_bias = self.create_parameter( + [1, 1, idim], default_initializer=I.XavierUniform()) + self.add_parameter('ada_bias', ada_bias) + + if init_weights: + self.init_weights() + + def init_weights(self): + ffn1_max = self.idim**-0.5 + ffn2_max = self.hidden_units**-0.5 + self.w_1._param_attr = paddle.nn.initializer.Uniform( + low=-ffn1_max, high=ffn1_max) + self.w_1._bias_attr = paddle.nn.initializer.Uniform( + low=-ffn1_max, high=ffn1_max) + self.w_2._param_attr = paddle.nn.initializer.Uniform( + low=-ffn2_max, high=ffn2_max) + self.w_2._bias_attr = paddle.nn.initializer.Uniform( + low=-ffn2_max, high=ffn2_max) def forward(self, xs: paddle.Tensor) -> paddle.Tensor: """Forward function. @@ -57,4 +85,6 @@ class PositionwiseFeedForward(nn.Layer): Returns: output tensor, (B, Lmax, D) """ + if self.adaptive_scale: + xs = self.ada_scale * xs + self.ada_bias return self.w_2(self.dropout(self.activation(self.w_1(xs)))) diff --git a/paddlespeech/s2t/modules/subsampling.py b/paddlespeech/s2t/modules/subsampling.py index 782a437ee..ef60bdf0a 100644 --- a/paddlespeech/s2t/modules/subsampling.py +++ b/paddlespeech/s2t/modules/subsampling.py @@ -29,7 +29,7 @@ logger = Log(__name__).getlog() __all__ = [ "LinearNoSubsampling", "Conv2dSubsampling4", "Conv2dSubsampling6", - "Conv2dSubsampling8" + "Conv2dSubsampling8", "DepthwiseConv2DSubsampling4" ] @@ -249,3 +249,67 @@ class Conv2dSubsampling8(Conv2dSubsampling): x = self.linear(x.transpose([0, 2, 1, 3]).reshape([b, -1, c * f])) x, pos_emb = self.pos_enc(x, offset) return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2] + + +class DepthwiseConv2DSubsampling4(BaseSubsampling): + """Depthwise Convolutional 2D subsampling (to 1/4 length). + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + pos_enc_class (nn.Layer): position encoding class. + dw_stride (int): Whether do depthwise convolution. + input_size (int): filter bank dimension. + + """ + + def __init__(self, + idim: int, + odim: int, + pos_enc_class: nn.Layer, + dw_stride: bool=False, + input_size: int=80, + input_dropout_rate: float=0.1, + init_weights: bool=True): + super(DepthwiseConv2DSubsampling4, self).__init__() + self.idim = idim + self.odim = odim + self.pw_conv = Conv2D( + in_channels=idim, out_channels=odim, kernel_size=3, stride=2) + self.act1 = nn.ReLU() + self.dw_conv = Conv2D( + in_channels=odim, + out_channels=odim, + kernel_size=3, + stride=2, + groups=odim if dw_stride else 1) + self.act2 = nn.ReLU() + self.pos_enc = pos_enc_class + self.input_proj = nn.Sequential( + Linear(odim * (((input_size - 1) // 2 - 1) // 2), odim), + nn.Dropout(p=input_dropout_rate)) + if init_weights: + linear_max = (odim * input_size / 4)**-0.5 + self.input_proj.state_dict()[ + '0.weight'] = paddle.nn.initializer.Uniform( + low=-linear_max, high=linear_max) + self.input_proj.state_dict()[ + '0.bias'] = paddle.nn.initializer.Uniform( + low=-linear_max, high=linear_max) + + self.subsampling_rate = 4 + # 6 = (3 - 1) * 1 + (3 - 1) * 2 + self.right_context = 6 + + def forward(self, x: paddle.Tensor, x_mask: paddle.Tensor, offset: int=0 + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + x = x.unsqueeze(1) # (b, c=1, t, f) + x = self.pw_conv(x) + x = self.act1(x) + x = self.dw_conv(x) + x = self.act2(x) + b, c, t, f = x.shape + x = x.transpose([0, 2, 1, 3]).reshape([b, -1, c * f]) + x, pos_emb = self.pos_enc(x, offset) + x = self.input_proj(x) + return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2] diff --git a/paddlespeech/s2t/modules/time_reduction.py b/paddlespeech/s2t/modules/time_reduction.py new file mode 100644 index 000000000..d3393f108 --- /dev/null +++ b/paddlespeech/s2t/modules/time_reduction.py @@ -0,0 +1,263 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# Copyright 2019 Mobvoi Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from wenet(https://github.com/wenet-e2e/wenet) +"""Subsampling layer definition.""" +from typing import Tuple + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from paddlespeech.s2t import masked_fill +from paddlespeech.s2t.modules.align import Conv1D +from paddlespeech.s2t.modules.conv2d import Conv2DValid +from paddlespeech.s2t.utils.log import Log + +logger = Log(__name__).getlog() + +__all__ = [ + "TimeReductionLayerStream", "TimeReductionLayer1D", "TimeReductionLayer2D" +] + + +class TimeReductionLayer1D(nn.Layer): + """ + Modified NeMo, + Squeezeformer Time Reduction procedure. + Downsamples the audio by `stride` in the time dimension. + Args: + channel (int): input dimension of + MultiheadAttentionMechanism and PositionwiseFeedForward + out_dim (int): Output dimension of the module. + kernel_size (int): Conv kernel size for + depthwise convolution in convolution module + stride (int): Downsampling factor in time dimension. + """ + + def __init__(self, + channel: int, + out_dim: int, + kernel_size: int=5, + stride: int=2): + super(TimeReductionLayer1D, self).__init__() + + self.channel = channel + self.out_dim = out_dim + self.kernel_size = kernel_size + self.stride = stride + self.padding = max(0, self.kernel_size - self.stride) + + self.dw_conv = Conv1D( + in_channels=channel, + out_channels=channel, + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + groups=channel, ) + + self.pw_conv = Conv1D( + in_channels=channel, + out_channels=out_dim, + kernel_size=1, + stride=1, + padding=0, + groups=1, ) + + self.init_weights() + + def init_weights(self): + dw_max = self.kernel_size**-0.5 + pw_max = self.channel**-0.5 + self.dw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.dw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.pw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + self.pw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + + def forward( + self, + xs, + xs_lens: paddle.Tensor, + mask: paddle.Tensor=paddle.ones((0, 0, 0), dtype=paddle.bool), + mask_pad: paddle.Tensor=paddle.ones((0, 0, 0), + dtype=paddle.bool), ): + xs = xs.transpose([0, 2, 1]) # [B, C, T] + xs = masked_fill(xs, mask_pad.equal(0), 0.0) + + xs = self.dw_conv(xs) + xs = self.pw_conv(xs) + + xs = xs.transpose([0, 2, 1]) # [B, T, C] + + B, T, D = xs.shape + mask = mask[:, ::self.stride, ::self.stride] + mask_pad = mask_pad[:, :, ::self.stride] + L = mask_pad.shape[-1] + # For JIT exporting, we remove F.pad operator. + if L - T < 0: + xs = xs[:, :L - T, :] + else: + dummy_pad = paddle.zeros([B, L - T, D], dtype=paddle.float32) + xs = paddle.concat([xs, dummy_pad], axis=1) + + xs_lens = (xs_lens + 1) // 2 + return xs, xs_lens, mask, mask_pad + + +class TimeReductionLayer2D(nn.Layer): + def __init__(self, kernel_size: int=5, stride: int=2, encoder_dim: int=256): + super(TimeReductionLayer2D, self).__init__() + self.encoder_dim = encoder_dim + self.kernel_size = kernel_size + self.dw_conv = Conv2DValid( + in_channels=encoder_dim, + out_channels=encoder_dim, + kernel_size=(kernel_size, 1), + stride=stride, + valid_trigy=True) + self.pw_conv = Conv2DValid( + in_channels=encoder_dim, + out_channels=encoder_dim, + kernel_size=1, + stride=1, + valid_trigx=False, + valid_trigy=False) + + self.kernel_size = kernel_size + self.stride = stride + self.init_weights() + + def init_weights(self): + dw_max = self.kernel_size**-0.5 + pw_max = self.encoder_dim**-0.5 + self.dw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.dw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.pw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + self.pw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + + def forward( + self, + xs: paddle.Tensor, + xs_lens: paddle.Tensor, + mask: paddle.Tensor=paddle.ones((0, 0, 0), dtype=paddle.bool), + mask_pad: paddle.Tensor=paddle.ones((0, 0, 0), dtype=paddle.bool), + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: + xs = masked_fill(xs, mask_pad.transpose([0, 2, 1]).equal(0), 0.0) + xs = xs.unsqueeze(1) + padding1 = self.kernel_size - self.stride + xs = F.pad( + xs, (0, 0, 0, 0, 0, padding1, 0, 0), mode='constant', value=0.) + xs = self.dw_conv(xs.transpose([0, 3, 2, 1])) + xs = self.pw_conv(xs).transpose([0, 3, 2, 1]).squeeze(1) + tmp_length = xs.shape[1] + xs_lens = (xs_lens + 1) // 2 + padding2 = max(0, (xs_lens.max() - tmp_length).item()) + batch_size, hidden = xs.shape[0], xs.shape[-1] + dummy_pad = paddle.zeros( + [batch_size, padding2, hidden], dtype=paddle.float32) + xs = paddle.concat([xs, dummy_pad], axis=1) + mask = mask[:, ::2, ::2] + mask_pad = mask_pad[:, :, ::2] + return xs, xs_lens, mask, mask_pad + + +class TimeReductionLayerStream(nn.Layer): + """ + Squeezeformer Time Reduction procedure. + Downsamples the audio by `stride` in the time dimension. + Args: + channel (int): input dimension of + MultiheadAttentionMechanism and PositionwiseFeedForward + out_dim (int): Output dimension of the module. + kernel_size (int): Conv kernel size for + depthwise convolution in convolution module + stride (int): Downsampling factor in time dimension. + """ + + def __init__(self, + channel: int, + out_dim: int, + kernel_size: int=1, + stride: int=2): + super(TimeReductionLayerStream, self).__init__() + + self.channel = channel + self.out_dim = out_dim + self.kernel_size = kernel_size + self.stride = stride + + self.dw_conv = Conv1D( + in_channels=channel, + out_channels=channel, + kernel_size=kernel_size, + stride=stride, + padding=0, + groups=channel) + + self.pw_conv = Conv1D( + in_channels=channel, + out_channels=out_dim, + kernel_size=1, + stride=1, + padding=0, + groups=1) + self.init_weights() + + def init_weights(self): + dw_max = self.kernel_size**-0.5 + pw_max = self.channel**-0.5 + self.dw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.dw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-dw_max, high=dw_max) + self.pw_conv._param_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + self.pw_conv._bias_attr = paddle.nn.initializer.Uniform( + low=-pw_max, high=pw_max) + + def forward( + self, + xs, + xs_lens: paddle.Tensor, + mask: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool), + mask_pad: paddle.Tensor=paddle.ones([0, 0, 0], dtype=paddle.bool)): + xs = xs.transpose([0, 2, 1]) # [B, C, T] + xs = masked_fill(xs, mask_pad.equal(0), 0.0) + + xs = self.dw_conv(xs) + xs = self.pw_conv(xs) + + xs = xs.transpose([0, 2, 1]) # [B, T, C] + + B, T, D = xs.shape + mask = mask[:, ::self.stride, ::self.stride] + mask_pad = mask_pad[:, :, ::self.stride] + L = mask_pad.shape[-1] + # For JIT exporting, we remove F.pad operator. + if L - T < 0: + xs = xs[:, :L - T, :] + else: + dummy_pad = paddle.zeros([B, L - T, D], dtype=paddle.float32) + xs = paddle.concat([xs, dummy_pad], axis=1) + + xs_lens = (xs_lens + 1) // 2 + return xs, xs_lens, mask, mask_pad diff --git a/paddlespeech/s2t/utils/utility.py b/paddlespeech/s2t/utils/utility.py index fdd8c0292..d7e7c6ca2 100644 --- a/paddlespeech/s2t/utils/utility.py +++ b/paddlespeech/s2t/utils/utility.py @@ -130,8 +130,11 @@ def get_subsample(config): Returns: int: subsample rate. """ - input_layer = config["encoder_conf"]["input_layer"] - assert input_layer in ["conv2d", "conv2d6", "conv2d8"] + if config['encoder'] == 'squeezeformer': + return 4 + else: + input_layer = config["encoder_conf"]["input_layer"] + assert input_layer in ["conv2d", "conv2d6", "conv2d8"] if input_layer == "conv2d": return 4 elif input_layer == "conv2d6": From d5720e4e7b8fcb4c555e931ccfe4fe45a39f3050 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 16 Mar 2023 10:10:02 +0800 Subject: [PATCH 29/37] fix input dtype of elementwise_mul op from bool to int64 (#3054) --- paddlespeech/t2s/models/vits/generator.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py index 44bd78984..427ae09ed 100644 --- a/paddlespeech/t2s/models/vits/generator.py +++ b/paddlespeech/t2s/models/vits/generator.py @@ -559,8 +559,9 @@ class VITSGenerator(nn.Layer): y_lengths = paddle.cast( paddle.clip(paddle.sum(dur, [1, 2]), min=1), dtype='int64') y_mask = make_non_pad_mask(y_lengths).unsqueeze(1) - attn_mask = paddle.unsqueeze(x_mask, 2) * paddle.unsqueeze(y_mask, - -1) + tmp_a = paddle.cast(paddle.unsqueeze(x_mask, 2), dtype='int64') + tmp_b = paddle.cast(paddle.unsqueeze(y_mask, -1), dtype='int64') + attn_mask = tmp_a * tmp_b attn = self._generate_path(dur, attn_mask) # expand the length to match with the feature sequence From 46334ae0450d5568224ae37cbddbe7838ef86761 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 16 Mar 2023 17:29:01 +0800 Subject: [PATCH 30/37] Update setup.py (#3056) --- docs/requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index e40204228..30622230b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -39,7 +39,7 @@ sphinx_rtd_theme textgrid timer ToJyutping==0.2.1 -typeguard +typeguard==2.13.3 webrtcvad websockets yacs~=0.1.8 diff --git a/setup.py b/setup.py index ffe4d5f39..1545c6139 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ base = [ "textgrid", "timer", "ToJyutping==0.2.1", - "typeguard", + "typeguard==2.13.3", "webrtcvad", "yacs~=0.1.8", "zhon", From 880c172db7a6e4e2e0b0f2c3a0b3cdea512e5f0a Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Tue, 21 Mar 2023 13:04:28 +0800 Subject: [PATCH 31/37] [TTS] add svs frontend (#3062) --- examples/opencpop/svs1/README.md | 110 ++++- examples/opencpop/svs1/README_cn.md | 107 ++++- .../opencpop/svs1/local/pinyin_to_phone.txt | 418 ++++++++++++++++++ .../opencpop/svs1/local/synthesize_e2e.sh | 53 +++ examples/opencpop/svs1/run.sh | 5 + paddlespeech/t2s/exps/sentences_sing.txt | 2 + paddlespeech/t2s/exps/syn_utils.py | 60 ++- paddlespeech/t2s/exps/synthesize_e2e.py | 48 +- paddlespeech/t2s/frontend/sing_frontend.py | 175 ++++++++ 9 files changed, 957 insertions(+), 21 deletions(-) create mode 100644 examples/opencpop/svs1/local/pinyin_to_phone.txt create mode 100755 examples/opencpop/svs1/local/synthesize_e2e.sh create mode 100644 paddlespeech/t2s/exps/sentences_sing.txt create mode 100644 paddlespeech/t2s/frontend/sing_frontend.py diff --git a/examples/opencpop/svs1/README.md b/examples/opencpop/svs1/README.md index 2e28a6e61..1600d0c76 100644 --- a/examples/opencpop/svs1/README.md +++ b/examples/opencpop/svs1/README.md @@ -70,7 +70,7 @@ Train a FastSpeech2 model. optional arguments: -h, --help show this help message and exit - --config CONFIG fastspeech2 config file. + --config CONFIG diffsinger config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA @@ -126,6 +126,7 @@ optional arguments: -h, --help show this help message and exit --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. + {diffsinger_opencpop} Choose acoustic model type of svs task. --am_config AM_CONFIG Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. @@ -141,6 +142,7 @@ optional arguments: whether training voice cloning model. --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. + {pwgan_opencpop, hifigan_opencpop} Choose vocoder type of svs task. --voc_config VOC_CONFIG Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. @@ -151,9 +153,84 @@ optional arguments: test metadata. --output_dir OUTPUT_DIR output dir. - --speech-stretchs mel min and max values file. + --speech-stretchs SPEECH_STRETCHS + The min and max values of the mel spectrum, using on diffusion of diffsinger. ``` +`./local/synthesize_e2e.sh` calls `${BIN_DIR}/../synthesize_e2e.py`, which can synthesize waveform from text file. +`local/pinyin_to_phone.txt` comes from the readme of the opencpop dataset, indicating the mapping from pinyin to phonemes in opencpop. + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize_e2e.py [-h] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + [--pinyin_phone PINYIN_PHONE] + [--speech_stretchs SPEECH_STRETCHS] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} + Choose acoustic model type of tts task. + {diffsinger_opencpop} Choose acoustic model type of svs task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --spk_id SPK_ID spk id for multi speaker acoustic model + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} + Choose vocoder type of tts task. + {pwgan_opencpop, hifigan_opencpop} Choose vocoder type of svs task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --lang LANG {zh, en, mix, canton} Choose language type of tts task. + {sing} Choose language type of svs task. + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize file, a 'utt_id sentence' pair per line for tts task. + A '{ utt_id input_type (is word) text notes note_durs}' or '{utt_id input_type (is phoneme) phones notes note_durs is_slurs}' pair per line for svs task. + --output_dir OUTPUT_DIR + output dir. + --pinyin_phone PINYIN_PHONE + pinyin to phone map file, using on sing_frontend. + --speech_stretchs SPEECH_STRETCHS + The min and max values of the mel spectrum, using on diffusion of diffsinger. +``` +1. `--am` is acoustic model type with the format {model_name}_{dataset} +2. `--am_config`, `--am_ckpt`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the diffsinger pretrained model. +3. `--voc` is vocoder type with the format {model_name}_{dataset} +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +5. `--lang` is language. `zh`, `en`, `mix` and `canton` for tts task. `sing` for tts task. +6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. +7. `--text` is the text file, which contains sentences to synthesize. +8. `--output_dir` is the directory to save synthesized audio files. +9. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. +10. `--inference_dir` is the directory to save static models. If this line is not added, it will not be generated and saved as a static model. +11. `--pinyin_phone` pinyin to phone map file, using on sing_frontend. +12. `--speech_stretchs` The min and max values of the mel spectrum, using on diffusion of diffsinger. + +Note: At present, the diffsinger model does not support dynamic to static, so do not add `--inference_dir`. + ## Pretrained Model Pretrained DiffSinger model: @@ -165,10 +242,35 @@ diffsinger_opencpop_ckpt_1.4.0.zip ├── default.yaml # default config used to train diffsinger ├── energy_stats.npy # statistics used to normalize energy when training diffsinger if norm is needed ├── phone_id_map.txt # phone vocabulary file when training diffsinger +├── pinyin_to_phone.txt # pinyin-to-phoneme mapping file when training diffsinger ├── pitch_stats.npy # statistics used to normalize pitch when training diffsinger if norm is needed ├── snapshot_iter_160000.pdz # model parameters of diffsinger ├── speech_stats.npy # statistics used to normalize mel when training diffsinger if norm is needed -└── speech_stretchs.npy # Min and max values to use for mel spectral stretching before training diffusion +└── speech_stretchs.npy # min and max values to use for mel spectral stretching before training diffusion + +``` + +You can use the following scripts to synthesize for `${BIN_DIR}/../sentences_sing.txt` using pretrained diffsinger and parallel wavegan models. + +```bash +source path.sh +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=diffsinger_opencpop \ + --am_config=diffsinger_opencpop_ckpt_1.4.0/default.yaml \ + --am_ckpt=diffsinger_opencpop_ckpt_1.4.0/snapshot_iter_160000.pdz \ + --am_stat=diffsinger_opencpop_ckpt_1.4.0/speech_stats.npy \ + --voc=pwgan_opencpop \ + --voc_config=pwgan_opencpop_ckpt_1.4.0/default.yaml \ + --voc_ckpt=pwgan_opencpop_ckpt_1.4.0/snapshot_iter_100000.pdz \ + --voc_stat=pwgan_opencpop_ckpt_1.4.0/feats_stats.npy \ + --lang=sing \ + --text=${BIN_DIR}/../sentences_sing.txt \ + --output_dir=exp/default/test_e2e \ + --phones_dict=diffsinger_opencpop_ckpt_1.4.0/phone_id_map.txt \ + --pinyin_phone=diffsinger_opencpop_ckpt_1.4.0/pinyin_to_phone.txt \ + --speech_stretchs=diffsinger_opencpop_ckpt_1.4.0/speech_stretchs.npy + ``` -At present, the text frontend is not perfect, and the method of `synthesize_e2e` is not supported for synthesizing audio. Try using `synthesize` first. \ No newline at end of file diff --git a/examples/opencpop/svs1/README_cn.md b/examples/opencpop/svs1/README_cn.md index 19908fd60..1435b42ec 100644 --- a/examples/opencpop/svs1/README_cn.md +++ b/examples/opencpop/svs1/README_cn.md @@ -73,7 +73,7 @@ Train a DiffSinger model. optional arguments: -h, --help show this help message and exit - --config CONFIG fastspeech2 config file. + --config CONFIG diffsinger config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA @@ -131,6 +131,7 @@ optional arguments: -h, --help show this help message and exit --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. + {diffsinger_opencpop} Choose acoustic model type of svs task. --am_config AM_CONFIG Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. @@ -146,6 +147,7 @@ optional arguments: whether training voice cloning model. --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. + {pwgan_opencpop, hifigan_opencpop} Choose vocoder type of svs task. --voc_config VOC_CONFIG Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. @@ -156,9 +158,85 @@ optional arguments: test metadata. --output_dir OUTPUT_DIR output dir. - --speech-stretchs mel min and max values file. + --speech-stretchs SPEECH_STRETCHS + The min and max values of the mel spectrum, using on diffusion of diffsinger. ``` +`./local/synthesize_e2e.sh` 调用 `${BIN_DIR}/../synthesize_e2e.py`,即可从文本文件中合成波形。 +`local/pinyin_to_phone.txt`来源于opencpop数据集中的README,表示opencpop中拼音到音素的映射。 + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize_e2e.py [-h] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + [--pinyin_phone PINYIN_PHONE] + [--speech_stretchs SPEECH_STRETCHS] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} + Choose acoustic model type of tts task. + {diffsinger_opencpop} Choose acoustic model type of svs task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --spk_id SPK_ID spk id for multi speaker acoustic model + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} + Choose vocoder type of tts task. + {pwgan_opencpop, hifigan_opencpop} Choose vocoder type of svs task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --lang LANG {zh, en, mix, canton} Choose language type of tts task. + {sing} Choose language type of svs task. + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize file, a 'utt_id sentence' pair per line for tts task. + A '{ utt_id input_type (is word) text notes note_durs}' or '{utt_id input_type (is phoneme) phones notes note_durs is_slurs}' pair per line for svs task. + --output_dir OUTPUT_DIR + output dir. + --pinyin_phone PINYIN_PHONE + pinyin to phone map file, using on sing_frontend. + --speech_stretchs SPEECH_STRETCHS + The min and max values of the mel spectrum, using on diffusion of diffsinger. +``` +1. `--am` 声学模型格式是否符合 {model_name}_{dataset} +2. `--am_config`, `--am_ckpt`, `--am_stat` 和 `--phones_dict` 是声学模型的参数,对应于 diffsinger 预训练模型中的 4 个文件。 +3. `--voc` 声码器(vocoder)格式是否符合 {model_name}_{dataset} +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` 是声码器的参数,对应于 parallel wavegan 预训练模型中的 3 个文件。 +5. `--lang` tts对应模型的语言可以是 `zh`、`en`、`mix`和`canton`。 svs 对应的语言是 `sing` 。 +6. `--test_metadata` 应为 `dump` 文件夹中 `test` 下的规范化元数据文件、 +7. `--text` 是文本文件,其中包含要合成的句子。 +8. `--output_dir` 是保存合成音频文件的目录。 +9. `--ngpu` 要使用的GPU数,如果 ngpu==0,则使用 cpu。 +10. `--inference_dir` 静态模型保存的目录。如果不加这一行,就不会生并保存成静态模型。 +11. `--pinyin_phone` 拼音到音素的映射文件。 +12. `--speech_stretchs` mel谱的最大最小值用于diffsinger中diffusion之前的线性拉伸。 + +注意: 目前 diffsinger 模型还不支持动转静,所以不要加 `--inference_dir`。 + + ## 预训练模型 预先训练的 DiffSinger 模型: - [diffsinger_opencpop_ckpt_1.4.0.zip](https://paddlespeech.bj.bcebos.com/t2s/svs/opencpop/diffsinger_opencpop_ckpt_1.4.0.zip) @@ -170,10 +248,33 @@ diffsinger_opencpop_ckpt_1.4.0.zip ├── default.yaml # 用于训练 diffsinger 的默认配置 ├── energy_stats.npy # 训练 diffsinger 时如若需要 norm energy 会使用到的统计数据 ├── phone_id_map.txt # 训练 diffsinger 时的音素词汇文件 +├── pinyin_to_phone.txt # 训练 diffsinger 时的拼音到音素映射文件 ├── pitch_stats.npy # 训练 diffsinger 时如若需要 norm pitch 会使用到的统计数据 ├── snapshot_iter_160000.pdz # 模型参数和优化器状态 ├── speech_stats.npy # 训练 diffsinger 时用于规范化频谱图的统计数据 └── speech_stretchs.npy # 训练 diffusion 前用于 mel 谱拉伸的最小及最大值 ``` -目前文本前端未完善,暂不支持 `synthesize_e2e` 的方式合成音频。尝试效果可先使用 `synthesize`。 +您可以使用以下脚本通过使用预训练的 diffsinger 和 parallel wavegan 模型为 `${BIN_DIR}/../sentences_sing.txt` 合成句子 +```bash +source path.sh + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=diffsinger_opencpop \ + --am_config=diffsinger_opencpop_ckpt_1.4.0/default.yaml \ + --am_ckpt=diffsinger_opencpop_ckpt_1.4.0/snapshot_iter_160000.pdz \ + --am_stat=diffsinger_opencpop_ckpt_1.4.0/speech_stats.npy \ + --voc=pwgan_opencpop \ + --voc_config=pwgan_opencpop_ckpt_1.4.0/default.yaml \ + --voc_ckpt=pwgan_opencpop_ckpt_1.4.0/snapshot_iter_100000.pdz \ + --voc_stat=pwgan_opencpop_ckpt_1.4.0/feats_stats.npy \ + --lang=sing \ + --text=${BIN_DIR}/../sentences_sing.txt \ + --output_dir=exp/default/test_e2e \ + --phones_dict=diffsinger_opencpop_ckpt_1.4.0/phone_id_map.txt \ + --pinyin_phone=diffsinger_opencpop_ckpt_1.4.0/pinyin_to_phone.txt \ + --speech_stretchs=diffsinger_opencpop_ckpt_1.4.0/speech_stretchs.npy + +``` diff --git a/examples/opencpop/svs1/local/pinyin_to_phone.txt b/examples/opencpop/svs1/local/pinyin_to_phone.txt new file mode 100644 index 000000000..34ed079d7 --- /dev/null +++ b/examples/opencpop/svs1/local/pinyin_to_phone.txt @@ -0,0 +1,418 @@ +a|a +ai|ai +an|an +ang|ang +ao|ao +ba|b a +bai|b ai +ban|b an +bang|b ang +bao|b ao +bei|b ei +ben|b en +beng|b eng +bi|b i +bian|b ian +biao|b iao +bie|b ie +bin|b in +bing|b ing +bo|b o +bu|b u +ca|c a +cai|c ai +can|c an +cang|c ang +cao|c ao +ce|c e +cei|c ei +cen|c en +ceng|c eng +cha|ch a +chai|ch ai +chan|ch an +chang|ch ang +chao|ch ao +che|ch e +chen|ch en +cheng|ch eng +chi|ch i +chong|ch ong +chou|ch ou +chu|ch u +chua|ch ua +chuai|ch uai +chuan|ch uan +chuang|ch uang +chui|ch ui +chun|ch un +chuo|ch uo +ci|c i +cong|c ong +cou|c ou +cu|c u +cuan|c uan +cui|c ui +cun|c un +cuo|c uo +da|d a +dai|d ai +dan|d an +dang|d ang +dao|d ao +de|d e +dei|d ei +den|d en +deng|d eng +di|d i +dia|d ia +dian|d ian +diao|d iao +die|d ie +ding|d ing +diu|d iu +dong|d ong +dou|d ou +du|d u +duan|d uan +dui|d ui +dun|d un +duo|d uo +e|e +ei|ei +en|en +eng|eng +er|er +fa|f a +fan|f an +fang|f ang +fei|f ei +fen|f en +feng|f eng +fo|f o +fou|f ou +fu|f u +ga|g a +gai|g ai +gan|g an +gang|g ang +gao|g ao +ge|g e +gei|g ei +gen|g en +geng|g eng +gong|g ong +gou|g ou +gu|g u +gua|g ua +guai|g uai +guan|g uan +guang|g uang +gui|g ui +gun|g un +guo|g uo +ha|h a +hai|h ai +han|h an +hang|h ang +hao|h ao +he|h e +hei|h ei +hen|h en +heng|h eng +hm|h m +hng|h ng +hong|h ong +hou|h ou +hu|h u +hua|h ua +huai|h uai +huan|h uan +huang|h uang +hui|h ui +hun|h un +huo|h uo +ji|j i +jia|j ia +jian|j ian +jiang|j iang +jiao|j iao +jie|j ie +jin|j in +jing|j ing +jiong|j iong +jiu|j iu +ju|j v +juan|j van +jue|j ve +jun|j vn +ka|k a +kai|k ai +kan|k an +kang|k ang +kao|k ao +ke|k e +kei|k ei +ken|k en +keng|k eng +kong|k ong +kou|k ou +ku|k u +kua|k ua +kuai|k uai +kuan|k uan +kuang|k uang +kui|k ui +kun|k un +kuo|k uo +la|l a +lai|l ai +lan|l an +lang|l ang +lao|l ao +le|l e +lei|l ei +leng|l eng +li|l i +lia|l ia +lian|l ian +liang|l iang +liao|l iao +lie|l ie +lin|l in +ling|l ing +liu|l iu +lo|l o +long|l ong +lou|l ou +lu|l u +luan|l uan +lun|l un +luo|l uo +lv|l v +lve|l ve +m|m +ma|m a +mai|m ai +man|m an +mang|m ang +mao|m ao +me|m e +mei|m ei +men|m en +meng|m eng +mi|m i +mian|m ian +miao|m iao +mie|m ie +min|m in +ming|m ing +miu|m iu +mo|m o +mou|m ou +mu|m u +n|n +na|n a +nai|n ai +nan|n an +nang|n ang +nao|n ao +ne|n e +nei|n ei +nen|n en +neng|n eng +ng|n g +ni|n i +nian|n ian +niang|n iang +niao|n iao +nie|n ie +nin|n in +ning|n ing +niu|n iu +nong|n ong +nou|n ou +nu|n u +nuan|n uan +nun|n un +nuo|n uo +nv|n v +nve|n ve +o|o +ou|ou +pa|p a +pai|p ai +pan|p an +pang|p ang +pao|p ao +pei|p ei +pen|p en +peng|p eng +pi|p i +pian|p ian +piao|p iao +pie|p ie +pin|p in +ping|p ing +po|p o +pou|p ou +pu|p u +qi|q i +qia|q ia +qian|q ian +qiang|q iang +qiao|q iao +qie|q ie +qin|q in +qing|q ing +qiong|q iong +qiu|q iu +qu|q v +quan|q van +que|q ve +qun|q vn +ran|r an +rang|r ang +rao|r ao +re|r e +ren|r en +reng|r eng +ri|r i +rong|r ong +rou|r ou +ru|r u +rua|r ua +ruan|r uan +rui|r ui +run|r un +ruo|r uo +sa|s a +sai|s ai +san|s an +sang|s ang +sao|s ao +se|s e +sen|s en +seng|s eng +sha|sh a +shai|sh ai +shan|sh an +shang|sh ang +shao|sh ao +she|sh e +shei|sh ei +shen|sh en +sheng|sh eng +shi|sh i +shou|sh ou +shu|sh u +shua|sh ua +shuai|sh uai +shuan|sh uan +shuang|sh uang +shui|sh ui +shun|sh un +shuo|sh uo +si|s i +song|s ong +sou|s ou +su|s u +suan|s uan +sui|s ui +sun|s un +suo|s uo +ta|t a +tai|t ai +tan|t an +tang|t ang +tao|t ao +te|t e +tei|t ei +teng|t eng +ti|t i +tian|t ian +tiao|t iao +tie|t ie +ting|t ing +tong|t ong +tou|t ou +tu|t u +tuan|t uan +tui|t ui +tun|t un +tuo|t uo +wa|w a +wai|w ai +wan|w an +wang|w ang +wei|w ei +wen|w en +weng|w eng +wo|w o +wu|w u +xi|x i +xia|x ia +xian|x ian +xiang|x iang +xiao|x iao +xie|x ie +xin|x in +xing|x ing +xiong|x iong +xiu|x iu +xu|x v +xuan|x van +xue|x ve +xun|x vn +ya|y a +yan|y an +yang|y ang +yao|y ao +ye|y e +yi|y i +yin|y in +ying|y ing +yo|y o +yong|y ong +you|y ou +yu|y v +yuan|y van +yue|y ve +yun|y vn +za|z a +zai|z ai +zan|z an +zang|z ang +zao|z ao +ze|z e +zei|z ei +zen|z en +zeng|z eng +zha|zh a +zhai|zh ai +zhan|zh an +zhang|zh ang +zhao|zh ao +zhe|zh e +zhei|zh ei +zhen|zh en +zheng|zh eng +zhi|zh i +zhong|zh ong +zhou|zh ou +zhu|zh u +zhua|zh ua +zhuai|zh uai +zhuan|zh uan +zhuang|zh uang +zhui|zh ui +zhun|zh un +zhuo|zh uo +zi|z i +zong|z ong +zou|z ou +zu|z u +zuan|z uan +zui|z ui +zun|z un +zuo|z uo \ No newline at end of file diff --git a/examples/opencpop/svs1/local/synthesize_e2e.sh b/examples/opencpop/svs1/local/synthesize_e2e.sh new file mode 100755 index 000000000..b3dc29b11 --- /dev/null +++ b/examples/opencpop/svs1/local/synthesize_e2e.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +stage=0 +stop_stage=0 + +# pwgan +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=diffsinger_opencpop \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_opencpop \ + --voc_config=pwgan_opencpop_ckpt_1.4.0/default.yaml \ + --voc_ckpt=pwgan_opencpop_ckpt_1.4.0/snapshot_iter_100000.pdz \ + --voc_stat=pwgan_opencpop_ckpt_1.4.0/feats_stats.npy \ + --lang=sing \ + --text=${BIN_DIR}/../sentences_sing.txt \ + --output_dir=${train_output_path}/test_e2e \ + --phones_dict=dump/phone_id_map.txt \ + --speech_stretchs=dump/train/speech_stretchs.npy \ + --pinyin_phone=local/pinyin_to_phone.txt +fi + +# for more GAN Vocoders +# hifigan +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo "in hifigan syn_e2e" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=diffsinger_opencpop \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_opencpop \ + --voc_config=hifigan_opencpop_ckpt_1.4.0/default.yaml \ + --voc_ckpt=hifigan_opencpop_ckpt_1.4.0/snapshot_iter_625000.pdz \ + --voc_stat=hifigan_opencpop_ckpt_1.4.0/feats_stats.npy \ + --lang=sing \ + --text=${BIN_DIR}/../sentences_sing.txt \ + --output_dir=${train_output_path}/test_e2e \ + --phones_dict=dump/phone_id_map.txt \ + --speech_stretchs=dump/train/speech_stretchs.npy \ + --pinyin_phone=local/pinyin_to_phone.txt + +fi diff --git a/examples/opencpop/svs1/run.sh b/examples/opencpop/svs1/run.sh index 7bde38518..bfe5b6594 100755 --- a/examples/opencpop/svs1/run.sh +++ b/examples/opencpop/svs1/run.sh @@ -30,3 +30,8 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # synthesize, vocoder is pwgan by default CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # synthesize_e2e, vocoder is pwgan by default + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/paddlespeech/t2s/exps/sentences_sing.txt b/paddlespeech/t2s/exps/sentences_sing.txt new file mode 100644 index 000000000..7b9c6272d --- /dev/null +++ b/paddlespeech/t2s/exps/sentences_sing.txt @@ -0,0 +1,2 @@ +{"utt_id": "2093003457", "input_type": "word", "text": "小酒窝长睫毛AP是你最美的记号", "notes": "C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4", "note_durs": "0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340"} +{"utt_id": "2093003458", "input_type": "phoneme", "phones": "w o m ei t ian sh ui ui b u u zh ao AP x iang n ian n i d e w ei x iao iao AP" , "notes": "C#4/Db4 C#4/Db4 D#4/Eb4 D#4/Eb4 F4 F4 F#4/Gb4 F#4/Gb4 D#4/Eb4 D#4/Eb4 D#4/Eb4 A#3/Bb3 A#3/Bb3 A#3/Bb3 rest F#4/Gb4 F#4/Gb4 F4 F4 F#4/Gb4 F#4/Gb4 F4 F4 G#4/Ab4 G#4/Ab4 D#4/Eb4 D#4/Eb4 C#4/Db4 rest", "note_durs": "0.221750 0.221750 0.414460 0.414460 0.223160 0.223160 0.430900 0.430900 0.335990 0.269270 0.269270 0.289060 0.522690 0.522690 0.355060 0.397130 0.397130 0.247690 0.247690 0.406720 0.406720 0.246830 0.246830 0.307540 0.307540 0.429910 0.429910 0.519130 0.342300", "is_slurs": "0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0"} \ No newline at end of file diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 60608ee5b..2b958b567 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -20,6 +20,7 @@ from typing import Dict from typing import List from typing import Optional +import jsonlines import numpy as np import onnxruntime as ort import paddle @@ -35,6 +36,7 @@ from paddlespeech.t2s.datasets.vocoder_batch_fn import Clip_static from paddlespeech.t2s.frontend import English from paddlespeech.t2s.frontend.canton_frontend import CantonFrontend from paddlespeech.t2s.frontend.mix_frontend import MixFrontend +from paddlespeech.t2s.frontend.sing_frontend import SingFrontend from paddlespeech.t2s.frontend.zh_frontend import Frontend from paddlespeech.t2s.modules.normalizer import ZScore from paddlespeech.utils.dynamic_import import dynamic_import @@ -127,6 +129,19 @@ def get_sentences(text_file: Optional[os.PathLike], lang: str='zh'): return sentences +# input for svs +def get_sentences_svs(text_file: Optional[os.PathLike]): + # construct dataset for evaluation + sentences = [] + with jsonlines.open(text_file, 'r') as reader: + svs_inputs = list(reader) + for svs_input in svs_inputs: + utt_id = svs_input['utt_id'] + sentence = svs_input + sentences.append((utt_id, sentence)) + return sentences + + # am only def get_test_dataset(test_metadata: List[Dict[str, Any]], am: str, @@ -268,6 +283,7 @@ def get_dev_dataloader(dev_metadata: List[Dict[str, Any]], def get_frontend(lang: str='zh', phones_dict: Optional[os.PathLike]=None, tones_dict: Optional[os.PathLike]=None, + pinyin_phone: Optional[os.PathLike]=None, use_rhy=False): if lang == 'zh': frontend = Frontend( @@ -281,18 +297,23 @@ def get_frontend(lang: str='zh', elif lang == 'mix': frontend = MixFrontend( phone_vocab_path=phones_dict, tone_vocab_path=tones_dict) + elif lang == 'sing': + frontend = SingFrontend( + pinyin_phone_path=pinyin_phone, phone_vocab_path=phones_dict) else: print("wrong lang!") return frontend -def run_frontend(frontend: object, - text: str, - merge_sentences: bool=False, - get_tone_ids: bool=False, - lang: str='zh', - to_tensor: bool=True, - add_blank: bool=False): +def run_frontend( + frontend: object, + text: str, + merge_sentences: bool=False, + get_tone_ids: bool=False, + lang: str='zh', + to_tensor: bool=True, + add_blank: bool=False, + svs_input: Dict[str, str]=None, ): outs = dict() if lang == 'zh': input_ids = {} @@ -326,8 +347,18 @@ def run_frontend(frontend: object, input_ids = frontend.get_input_ids( text, merge_sentences=merge_sentences, to_tensor=to_tensor) phone_ids = input_ids["phone_ids"] + elif lang == 'sing': + input_ids = frontend.get_input_ids( + svs_input=svs_input, to_tensor=to_tensor) + phone_ids = input_ids["phone_ids"] + note_ids = input_ids["note_ids"] + note_durs = input_ids["note_durs"] + is_slurs = input_ids["is_slurs"] + outs.update({'note_ids': note_ids}) + outs.update({'note_durs': note_durs}) + outs.update({'is_slurs': is_slurs}) else: - print("lang should in {'zh', 'en', 'mix', 'canton'}!") + print("lang should in {'zh', 'en', 'mix', 'canton', 'sing'}!") outs.update({'phone_ids': phone_ids}) return outs @@ -474,6 +505,7 @@ def am_to_static(am_inference, elif am_name == 'tacotron2': am_inference = jit.to_static( am_inference, input_spec=[InputSpec([-1], dtype=paddle.int64)]) + elif am_name == 'vits': if am_dataset in {"aishell3", "vctk"} and speaker_dict is not None: am_inference = jit.to_static( @@ -485,8 +517,20 @@ def am_to_static(am_inference, else: am_inference = jit.to_static( am_inference, input_spec=[InputSpec([-1], dtype=paddle.int64)]) + + elif am_name == 'diffsinger': + am_inference = jit.to_static( + am_inference, + input_spec=[ + InputSpec([-1], dtype=paddle.int64), # phone + InputSpec([-1], dtype=paddle.int64), # note + InputSpec([-1], dtype=paddle.float32), # note_dur + InputSpec([-1], dtype=paddle.int64), # is_slur + ]) + jit.save(am_inference, os.path.join(inference_dir, am)) am_inference = jit.load(os.path.join(inference_dir, am)) + return am_inference diff --git a/paddlespeech/t2s/exps/synthesize_e2e.py b/paddlespeech/t2s/exps/synthesize_e2e.py index db94a6e53..0c7b34b09 100644 --- a/paddlespeech/t2s/exps/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/synthesize_e2e.py @@ -24,6 +24,7 @@ from paddlespeech.t2s.exps.syn_utils import am_to_static from paddlespeech.t2s.exps.syn_utils import get_am_inference from paddlespeech.t2s.exps.syn_utils import get_frontend from paddlespeech.t2s.exps.syn_utils import get_sentences +from paddlespeech.t2s.exps.syn_utils import get_sentences_svs from paddlespeech.t2s.exps.syn_utils import get_voc_inference from paddlespeech.t2s.exps.syn_utils import run_frontend from paddlespeech.t2s.exps.syn_utils import voc_to_static @@ -44,20 +45,18 @@ def evaluate(args): print(am_config) print(voc_config) - sentences = get_sentences(text_file=args.text, lang=args.lang) - # frontend frontend = get_frontend( lang=args.lang, phones_dict=args.phones_dict, tones_dict=args.tones_dict, + pinyin_phone=args.pinyin_phone, use_rhy=args.use_rhy) print("frontend done!") # acoustic model am_name = args.am[:args.am.rindex('_')] am_dataset = args.am[args.am.rindex('_') + 1:] - am_inference = get_am_inference( am=args.am, am_config=am_config, @@ -65,8 +64,10 @@ def evaluate(args): am_stat=args.am_stat, phones_dict=args.phones_dict, tones_dict=args.tones_dict, - speaker_dict=args.speaker_dict) + speaker_dict=args.speaker_dict, + speech_stretchs=args.speech_stretchs, ) print("acoustic model done!") + # vocoder voc_inference = get_voc_inference( voc=args.voc, @@ -103,14 +104,25 @@ def evaluate(args): N = 0 T = 0 + if am_name == 'diffsinger': + sentences = get_sentences_svs(text_file=args.text) + else: + sentences = get_sentences(text_file=args.text, lang=args.lang) for utt_id, sentence in sentences: with timer() as t: + if am_name == "diffsinger": + text = "" + svs_input = sentence + else: + text = sentence + svs_input = None frontend_dict = run_frontend( frontend=frontend, - text=sentence, + text=text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids, - lang=args.lang) + lang=args.lang, + svs_input=svs_input) phone_ids = frontend_dict['phone_ids'] with paddle.no_grad(): flags = 0 @@ -134,6 +146,15 @@ def evaluate(args): mel = am_inference(part_phone_ids, part_tone_ids) elif am_name == 'tacotron2': mel = am_inference(part_phone_ids) + elif am_name == 'diffsinger': + part_note_ids = frontend_dict['note_ids'][i] + part_note_durs = frontend_dict['note_durs'][i] + part_is_slurs = frontend_dict['is_slurs'][i] + mel = am_inference( + text=part_phone_ids, + note=part_note_ids, + note_dur=part_note_durs, + is_slur=part_is_slurs, ) # vocoder wav = voc_inference(mel) if flags == 0: @@ -178,6 +199,7 @@ def parse_args(): 'fastspeech2_male-zh', 'fastspeech2_male-en', 'fastspeech2_male-mix', + 'diffsinger_opencpop', ], help='Choose acoustic model type of tts task.') parser.add_argument( @@ -223,6 +245,8 @@ def parse_args(): 'wavernn_csmsc', 'pwgan_male', 'hifigan_male', + 'pwgan_opencpop', + 'hifigan_opencpop', ], help='Choose vocoder type of tts task.') parser.add_argument( @@ -240,6 +264,7 @@ def parse_args(): '--lang', type=str, default='zh', + choices=['zh', 'en', 'mix', 'canton', 'sing'], help='Choose model language. zh or en or mix') parser.add_argument( @@ -259,6 +284,17 @@ def parse_args(): type=str2bool, default=False, help="run rhythm frontend or not") + parser.add_argument( + "--pinyin_phone", + type=str, + default=None, + help="pinyin to phone map file, using on sing_frontend.") + parser.add_argument( + "--speech_stretchs", + type=str, + default=None, + help="The min and max values of the mel spectrum, using on diffusion of diffsinger." + ) args = parser.parse_args() return args diff --git a/paddlespeech/t2s/frontend/sing_frontend.py b/paddlespeech/t2s/frontend/sing_frontend.py new file mode 100644 index 000000000..c2aecf273 --- /dev/null +++ b/paddlespeech/t2s/frontend/sing_frontend.py @@ -0,0 +1,175 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re +from typing import Dict +from typing import List + +import librosa +import numpy as np +import paddle +from pypinyin import lazy_pinyin + + +class SingFrontend(): + def __init__(self, pinyin_phone_path: str, phone_vocab_path: str): + """SVS Frontend + + Args: + pinyin_phone_path (str): pinyin to phone file path, a 'pinyin|phones' (like: ba|b a ) pair per line. + phone_vocab_path (str): phone to phone id file path, a 'phone phone id' (like: a 4 ) pair per line. + """ + self.punc = '[:,;。?!“”‘’\':,;.?!]' + + self.pinyin_phones = {'AP': 'AP', 'SP': 'SP'} + if pinyin_phone_path: + with open(pinyin_phone_path, 'rt', encoding='utf-8') as f: + for line in f.readlines(): + pinyin_phn = [ + x.strip() for x in line.split('|') if x.strip() != '' + ] + self.pinyin_phones[pinyin_phn[0]] = pinyin_phn[1] + + self.vocab_phones = {} + if phone_vocab_path: + with open(phone_vocab_path, 'rt', encoding='utf-8') as f: + phn_id = [line.strip().split() for line in f.readlines()] + for phn, id in phn_id: + self.vocab_phones[phn] = int(id) + + def get_phones(self, sentence: str) -> List[int]: + """get phone list + + Args: + sentence (str): sentence + + Returns: + List[int]: phones list + + Example: + sentence = "你好" + phones = ['n i', 'h ao'] + """ + # remove all punc + sentence = re.sub(self.punc, "", sentence) + + # Pypinyin can't solve polyphonic words + sentence = sentence.replace('最长', '最常').replace('长睫毛', '常睫毛') \ + .replace('那么长', '那么常').replace('多长', '多常') \ + .replace('很长', '很常') + + # lyric + pinyins = lazy_pinyin(sentence, strict=False) + # replace unk word with SP + pinyins = [ + pinyin if pinyin in self.pinyin_phones.keys() else "SP" + for pinyin in pinyins + ] + phones = [ + self.pinyin_phones[pinyin.strip()] for pinyin in pinyins + if pinyin.strip() in self.pinyin_phones + ] + + return phones + + def get_note_info(self, note_info: str) -> List[str]: + note_info = [x.strip() for x in note_info.split('|') if x.strip() != ''] + return note_info + + def process( + self, + phones: List[int], + notes: List[str], + note_durs: List[float], ) -> Dict[str, List[paddle.Tensor]]: + new_phones = [] + new_notes = [] + new_note_durs = [] + is_slurs = [] + assert len(phones) == len(notes) == len( + note_durs + ), "Please check the input, text, notes, note_durs should be the same length." + for i in range(len(phones)): + phone = phones[i].split() + note = notes[i].split() + note_dur = note_durs[i].split() + + for phn in phone: + new_phones.append(phn) + new_notes.append(note[0]) + new_note_durs.append(note_dur[0]) + is_slurs.append(0) + + if len(note) > 1: + for i in range(1, len(note)): + new_phones.append(phone[-1]) + new_notes.append(note[i]) + new_note_durs.append(note_dur[i]) + is_slurs.append(1) + + return new_phones, new_notes, new_note_durs, is_slurs + + def get_input_ids(self, svs_input: Dict[str, str], + to_tensor: bool=True) -> Dict[str, List[paddle.Tensor]]: + """convert input to int/float. + + Args: + svs_input (Dict[str, str]): include keys: if input_type is phones, phones, notes, note_durs and is_slurs are needed. + if input_type is word, text, notes, and note_durs sre needed. + to_tensor (bool, optional): whether to convert to Tensor. Defaults to True. + + Returns: + Dict[str, List[paddle.Tensor]]: result include phone_ids, note_ids, note_durs, is_slurs. + """ + result = {} + input_type = svs_input['input_type'] + if input_type == 'phoneme': + assert "phones" in svs_input.keys() and "notes" in svs_input.keys() and "note_durs" in svs_input.keys() and "is_slurs" in svs_input.keys(), \ + "When input_type is phoneme, phones, notes, note_durs, is_slurs should be in the svs_input." + phones = svs_input["phones"].split() + notes = svs_input["notes"].split() + note_durs = svs_input["note_durs"].split() + is_slurs = svs_input["is_slurs"].split() + assert len(phones) == len(notes) == len(note_durs) == len( + is_slurs + ), "Please check the input, phones, notes, note_durs is_slurs should be the same length." + elif input_type == "word": + assert "text" in svs_input.keys() and "notes" in svs_input.keys() and "note_durs" in svs_input.keys(), \ + "When input_type is word, text, notes, note_durs, should be in the svs_input." + phones = self.get_phones(svs_input['text']) + notes = self.get_note_info(svs_input['notes']) + note_durs = self.get_note_info(svs_input['note_durs']) + phones, notes, note_durs, is_slurs = self.process( + phones=phones, notes=notes, note_durs=note_durs) + + phone_ids = [self.vocab_phones[phn] for phn in phones] + phone_ids = np.array(phone_ids, np.int64) + note_ids = [ + librosa.note_to_midi(note.split("/")[0]) if note != 'rest' else 0 + for note in notes + ] + note_ids = np.array(note_ids, np.int64) + note_durs = np.array(note_durs, np.float32) + is_slurs = np.array(is_slurs, np.int64) + + if to_tensor: + phone_ids = paddle.to_tensor(phone_ids) + note_ids = paddle.to_tensor(note_ids) + note_durs = paddle.to_tensor(note_durs) + is_slurs = paddle.to_tensor(is_slurs) + + result['phone_ids'] = [phone_ids] + result['note_ids'] = [note_ids] + result['note_durs'] = [note_durs] + result['is_slurs'] = [is_slurs] + + return result From 0a2e367ff438b5b73d181a0a0f54b1dbe73757fb Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 21 Mar 2023 16:25:34 +0800 Subject: [PATCH 32/37] [TTS]clean starganv2 vc model code and add docstring (#2987) * clean code * add docstring --- .../starganv2_vc/AuxiliaryASR/layers.py | 228 +--------------- .../models/starganv2_vc/AuxiliaryASR/model.py | 7 +- .../t2s/models/starganv2_vc/JDCNet/model.py | 126 ++++----- .../t2s/models/starganv2_vc/starganv2_vc.py | 248 +++++++++--------- 4 files changed, 176 insertions(+), 433 deletions(-) diff --git a/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/layers.py b/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/layers.py index 71b9753c8..5901c805a 100644 --- a/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/layers.py +++ b/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/layers.py @@ -11,8 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import random - import paddle import paddle.nn.functional as F import paddleaudio.functional as audio_F @@ -46,7 +44,8 @@ class LinearNorm(nn.Layer): self.linear_layer.weight, gain=_calculate_gain(w_init_gain)) def forward(self, x: paddle.Tensor): - return self.linear_layer(x) + out = self.linear_layer(x) + return out class ConvNorm(nn.Layer): @@ -82,85 +81,6 @@ class ConvNorm(nn.Layer): return conv_signal -class CausualConv(nn.Layer): - def __init__(self, - in_channels: int, - out_channels: int, - kernel_size: int=1, - stride: int=1, - padding: int=1, - dilation: int=1, - bias: bool=True, - w_init_gain: str='linear', - param=None): - super().__init__() - if padding is None: - assert (kernel_size % 2 == 1) - padding = int(dilation * (kernel_size - 1) / 2) * 2 - else: - self.padding = padding * 2 - self.conv = nn.Conv1D( - in_channels, - out_channels, - kernel_size=kernel_size, - stride=stride, - padding=self.padding, - dilation=dilation, - bias_attr=bias) - - xavier_uniform_( - self.conv.weight, gain=_calculate_gain(w_init_gain, param=param)) - - def forward(self, x: paddle.Tensor): - x = self.conv(x) - x = x[:, :, :-self.padding] - return x - - -class CausualBlock(nn.Layer): - def __init__(self, - hidden_dim: int, - n_conv: int=3, - dropout_p: float=0.2, - activ: str='lrelu'): - super().__init__() - self.blocks = nn.LayerList([ - self._get_conv( - hidden_dim=hidden_dim, - dilation=3**i, - activ=activ, - dropout_p=dropout_p) for i in range(n_conv) - ]) - - def forward(self, x): - for block in self.blocks: - res = x - x = block(x) - x += res - return x - - def _get_conv(self, - hidden_dim: int, - dilation: int, - activ: str='lrelu', - dropout_p: float=0.2): - layers = [ - CausualConv( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - padding=dilation, - dilation=dilation), _get_activation_fn(activ), - nn.BatchNorm1D(hidden_dim), nn.Dropout(p=dropout_p), CausualConv( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - padding=1, - dilation=1), _get_activation_fn(activ), nn.Dropout(p=dropout_p) - ] - return nn.Sequential(*layers) - - class ConvBlock(nn.Layer): def __init__(self, hidden_dim: int, @@ -264,13 +184,14 @@ class Attention(nn.Layer): """ Args: query: - decoder output (batch, n_mel_channels * n_frames_per_step) + decoder output (B, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) Returns: - Tensor: alignment (batch, max_time) + Tensor: + alignment (B, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) @@ -316,144 +237,6 @@ class Attention(nn.Layer): return attention_context, attention_weights -class ForwardAttentionV2(nn.Layer): - def __init__(self, - attention_rnn_dim: int, - embedding_dim: int, - attention_dim: int, - attention_location_n_filters: int, - attention_location_kernel_size: int): - super().__init__() - self.query_layer = LinearNorm( - in_dim=attention_rnn_dim, - out_dim=attention_dim, - bias=False, - w_init_gain='tanh') - self.memory_layer = LinearNorm( - in_dim=embedding_dim, - out_dim=attention_dim, - bias=False, - w_init_gain='tanh') - self.v = LinearNorm(in_dim=attention_dim, out_dim=1, bias=False) - self.location_layer = LocationLayer( - attention_n_filters=attention_location_n_filters, - attention_kernel_size=attention_location_kernel_size, - attention_dim=attention_dim) - self.score_mask_value = -float(1e20) - - def get_alignment_energies(self, - query: paddle.Tensor, - processed_memory: paddle.Tensor, - attention_weights_cat: paddle.Tensor): - """ - Args: - query: - decoder output (batch, n_mel_channels * n_frames_per_step) - processed_memory: - processed encoder outputs (B, T_in, attention_dim) - attention_weights_cat: - prev. and cumulative att weights (B, 2, max_time) - Returns: - Tensor: alignment (batch, max_time) - """ - - processed_query = self.query_layer(query.unsqueeze(1)) - processed_attention_weights = self.location_layer(attention_weights_cat) - energies = self.v( - paddle.tanh(processed_query + processed_attention_weights + - processed_memory)) - - energies = energies.squeeze(-1) - return energies - - def forward(self, - attention_hidden_state: paddle.Tensor, - memory: paddle.Tensor, - processed_memory: paddle.Tensor, - attention_weights_cat: paddle.Tensor, - mask: paddle.Tensor, - log_alpha: paddle.Tensor): - """ - Args: - attention_hidden_state: - attention rnn last output - memory: - encoder outputs - processed_memory: - processed encoder outputs - attention_weights_cat: - previous and cummulative attention weights - mask: - binary mask for padded data - """ - log_energy = self.get_alignment_energies( - query=attention_hidden_state, - processed_memory=processed_memory, - attention_weights_cat=attention_weights_cat) - - if mask is not None: - log_energy[:] = paddle.where( - mask, - paddle.full(log_energy.shape, self.score_mask_value, - log_energy.dtype), log_energy) - log_alpha_shift_padded = [] - max_time = log_energy.shape[1] - for sft in range(2): - shifted = log_alpha[:, :max_time - sft] - shift_padded = F.pad(shifted, (sft, 0), 'constant', - self.score_mask_value) - log_alpha_shift_padded.append(shift_padded.unsqueeze(2)) - - biased = paddle.logsumexp(paddle.conat(log_alpha_shift_padded, 2), 2) - log_alpha_new = biased + log_energy - attention_weights = F.softmax(log_alpha_new, axis=1) - attention_context = paddle.bmm(attention_weights.unsqueeze(1), memory) - attention_context = attention_context.squeeze(1) - - return attention_context, attention_weights, log_alpha_new - - -class PhaseShuffle2D(nn.Layer): - def __init__(self, n: int=2): - super().__init__() - self.n = n - self.random = random.Random(1) - - def forward(self, x: paddle.Tensor, move: int=None): - # x.size = (B, C, M, L) - if move is None: - move = self.random.randint(-self.n, self.n) - - if move == 0: - return x - else: - left = x[:, :, :, :move] - right = x[:, :, :, move:] - shuffled = paddle.concat([right, left], axis=3) - return shuffled - - -class PhaseShuffle1D(nn.Layer): - def __init__(self, n: int=2): - super().__init__() - self.n = n - self.random = random.Random(1) - - def forward(self, x: paddle.Tensor, move: int=None): - # x.size = (B, C, M, L) - if move is None: - move = self.random.randint(-self.n, self.n) - - if move == 0: - return x - else: - left = x[:, :, :move] - right = x[:, :, move:] - shuffled = paddle.concat([right, left], axis=2) - - return shuffled - - class MFCC(nn.Layer): def __init__(self, n_mfcc: int=40, n_mels: int=80): super().__init__() @@ -473,7 +256,6 @@ class MFCC(nn.Layer): # -> (channel, time, n_mfcc).tranpose(...) mfcc = paddle.matmul(mel_specgram.transpose([0, 2, 1]), self.dct_mat).transpose([0, 2, 1]) - # unpack batch if unsqueezed: mfcc = mfcc.squeeze(0) diff --git a/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/model.py b/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/model.py index 48de8af1f..251974572 100644 --- a/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/model.py +++ b/paddlespeech/t2s/models/starganv2_vc/AuxiliaryASR/model.py @@ -99,7 +99,7 @@ class ASRCNN(nn.Layer): unmask_futre_steps (int): unmasking future step size. Return: - mask (paddle.BoolTensor): + Tensor (paddle.Tensor(bool)): mask future timesteps mask[i, j] = True if i > j + unmask_future_steps else False """ index_tensor = paddle.arange(out_length).unsqueeze(0).expand( @@ -194,9 +194,8 @@ class ASRS2S(nn.Layer): logit_outputs += [logit] alignments += [attention_weights] - hidden_outputs, logit_outputs, alignments = \ - self.parse_decoder_outputs( - hidden_outputs, logit_outputs, alignments) + hidden_outputs, logit_outputs, alignments = self.parse_decoder_outputs( + hidden_outputs, logit_outputs, alignments) return hidden_outputs, logit_outputs, alignments diff --git a/paddlespeech/t2s/models/starganv2_vc/JDCNet/model.py b/paddlespeech/t2s/models/starganv2_vc/JDCNet/model.py index 118b8f0e2..5938e6a7c 100644 --- a/paddlespeech/t2s/models/starganv2_vc/JDCNet/model.py +++ b/paddlespeech/t2s/models/starganv2_vc/JDCNet/model.py @@ -33,10 +33,9 @@ class JDCNet(nn.Layer): super().__init__() self.seq_len = seq_len self.num_class = num_class - - # input = (b, 1, 31, 513), b = batch size + # input: (B, num_class, T, n_mels) self.conv_block = nn.Sequential( - # out: (b, 64, 31, 513) + # output: (B, out_channels, T, n_mels) nn.Conv2D( in_channels=1, out_channels=64, @@ -45,127 +44,99 @@ class JDCNet(nn.Layer): bias_attr=False), nn.BatchNorm2D(num_features=64), nn.LeakyReLU(leaky_relu_slope), - # (b, 64, 31, 513) + # out: (B, out_channels, T, n_mels) nn.Conv2D(64, 64, 3, padding=1, bias_attr=False), ) - - # res blocks - # (b, 128, 31, 128) + # output: (B, out_channels, T, n_mels // 2) self.res_block1 = ResBlock(in_channels=64, out_channels=128) - # (b, 192, 31, 32) + # output: (B, out_channels, T, n_mels // 4) self.res_block2 = ResBlock(in_channels=128, out_channels=192) - # (b, 256, 31, 8) + # output: (B, out_channels, T, n_mels // 8) self.res_block3 = ResBlock(in_channels=192, out_channels=256) - # pool block self.pool_block = nn.Sequential( nn.BatchNorm2D(num_features=256), nn.LeakyReLU(leaky_relu_slope), - # (b, 256, 31, 2) + # (B, num_features, T, 2) nn.MaxPool2D(kernel_size=(1, 4)), nn.Dropout(p=0.5), ) - - # maxpool layers (for auxiliary network inputs) - # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2) - self.maxpool1 = nn.MaxPool2D(kernel_size=(1, 40)) - # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2) - self.maxpool2 = nn.MaxPool2D(kernel_size=(1, 20)) - # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2) - self.maxpool3 = nn.MaxPool2D(kernel_size=(1, 10)) - - # in = (b, 640, 31, 2), out = (b, 256, 31, 2) - self.detector_conv = nn.Sequential( - nn.Conv2D( - in_channels=640, - out_channels=256, - kernel_size=1, - bias_attr=False), - nn.BatchNorm2D(256), - nn.LeakyReLU(leaky_relu_slope), - nn.Dropout(p=0.5), ) - - # input: (b, 31, 512) - resized from (b, 256, 31, 2) - # output: (b, 31, 512) + # input: (B, T, input_size), resized from (B, input_size // 2, T, 2) + # output: (B, T, input_size) self.bilstm_classifier = nn.LSTM( input_size=512, hidden_size=256, time_major=False, direction='bidirectional') - - # input: (b, 31, 512) - resized from (b, 256, 31, 2) - # output: (b, 31, 512) - self.bilstm_detector = nn.LSTM( - input_size=512, - hidden_size=256, - time_major=False, - direction='bidirectional') - - # input: (b * 31, 512) - # output: (b * 31, num_class) + # input: (B * T, in_features) + # output: (B * T, num_class) self.classifier = nn.Linear( in_features=512, out_features=self.num_class) - # input: (b * 31, 512) - # output: (b * 31, 2) - binary classifier - self.detector = nn.Linear(in_features=512, out_features=2) - # initialize weights self.apply(self.init_weights) def get_feature_GAN(self, x: paddle.Tensor): - seq_len = x.shape[-2] - x = x.astype(paddle.float32).transpose([0, 1, 3, 2] if len(x.shape) == 4 - else [0, 2, 1]) - + """Calculate feature_GAN. + Args: + x(Tensor(float32)): + Shape (B, num_class, n_mels, T). + Returns: + Tensor: + Shape (B, num_features, n_mels // 8, T). + """ + x = x.astype(paddle.float32) + x = x.transpose([0, 1, 3, 2] if len(x.shape) == 4 else [0, 2, 1]) convblock_out = self.conv_block(x) - resblock1_out = self.res_block1(convblock_out) resblock2_out = self.res_block2(resblock1_out) resblock3_out = self.res_block3(resblock2_out) poolblock_out = self.pool_block[0](resblock3_out) poolblock_out = self.pool_block[1](poolblock_out) - - return poolblock_out.transpose([0, 1, 3, 2] if len(poolblock_out.shape) - == 4 else [0, 2, 1]) + GAN_feature = poolblock_out.transpose([0, 1, 3, 2] if len( + poolblock_out.shape) == 4 else [0, 2, 1]) + return GAN_feature def forward(self, x: paddle.Tensor): - """ + """Calculate forward propagation. + Args: + x(Tensor(float32)): + Shape (B, num_class, n_mels, seq_len). Returns: - classification_prediction, detection_prediction - sizes: (b, 31, 722), (b, 31, 2) + Tensor: + classifier output consists of predicted pitch classes per frame. + Shape: (B, seq_len, num_class). + Tensor: + GAN_feature. Shape: (B, num_features, n_mels // 8, seq_len) + Tensor: + poolblock_out. Shape (B, seq_len, 512) """ ############################### # forward pass for classifier # ############################### + # (B, num_class, n_mels, T) -> (B, num_class, T, n_mels) x = x.transpose([0, 1, 3, 2] if len(x.shape) == 4 else [0, 2, 1]).astype(paddle.float32) convblock_out = self.conv_block(x) - resblock1_out = self.res_block1(convblock_out) resblock2_out = self.res_block2(resblock1_out) resblock3_out = self.res_block3(resblock2_out) - poolblock_out = self.pool_block[0](resblock3_out) poolblock_out = self.pool_block[1](poolblock_out) GAN_feature = poolblock_out.transpose([0, 1, 3, 2] if len( poolblock_out.shape) == 4 else [0, 2, 1]) poolblock_out = self.pool_block[2](poolblock_out) - - # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512) + # (B, 256, seq_len, 2) => (B, seq_len, 256, 2) => (B, seq_len, 512) classifier_out = poolblock_out.transpose([0, 2, 1, 3]).reshape( (-1, self.seq_len, 512)) self.bilstm_classifier.flatten_parameters() - classifier_out, _ = self.bilstm_classifier( - classifier_out) # ignore the hidden states - - classifier_out = classifier_out.reshape((-1, 512)) # (b * 31, 512) + # ignore the hidden states + classifier_out, _ = self.bilstm_classifier(classifier_out) + # (B * seq_len, 512) + classifier_out = classifier_out.reshape((-1, 512)) classifier_out = self.classifier(classifier_out) + # (B, seq_len, num_class) classifier_out = classifier_out.reshape( - (-1, self.seq_len, self.num_class)) # (b, 31, num_class) - - # sizes: (b, 31, 722), (b, 31, 2) - # classifier output consists of predicted pitch classes per frame - # detector output consists of: (isvoice, notvoice) estimates per frame + (-1, self.seq_len, self.num_class)) return paddle.abs(classifier_out.squeeze()), GAN_feature, poolblock_out @staticmethod @@ -188,10 +159,9 @@ class ResBlock(nn.Layer): def __init__(self, in_channels: int, out_channels: int, - leaky_relu_slope=0.01): + leaky_relu_slope: float=0.01): super().__init__() self.downsample = in_channels != out_channels - # BN / LReLU / MaxPool layer before the conv layer - see Figure 1b in the paper self.pre_conv = nn.Sequential( nn.BatchNorm2D(num_features=in_channels), @@ -215,7 +185,6 @@ class ResBlock(nn.Layer): kernel_size=3, padding=1, bias_attr=False), ) - # 1 x 1 convolution layer to match the feature dimensions self.conv1by1 = None if self.downsample: @@ -226,6 +195,13 @@ class ResBlock(nn.Layer): bias_attr=False) def forward(self, x: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): Shape (B, in_channels, T, n_mels). + Returns: + Tensor: + The residual output, Shape (B, out_channels, T, n_mels // 2). + """ x = self.pre_conv(x) if self.downsample: x = self.conv(x) + self.conv1by1(x) diff --git a/paddlespeech/t2s/models/starganv2_vc/starganv2_vc.py b/paddlespeech/t2s/models/starganv2_vc/starganv2_vc.py index 96e9eda81..2a96b30c6 100644 --- a/paddlespeech/t2s/models/starganv2_vc/starganv2_vc.py +++ b/paddlespeech/t2s/models/starganv2_vc/starganv2_vc.py @@ -19,31 +19,36 @@ This work is licensed under the Creative Commons Attribution-NonCommercial http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. """ -# import copy import math import paddle import paddle.nn.functional as F from paddle import nn -from paddlespeech.utils.initialize import _calculate_gain -from paddlespeech.utils.initialize import xavier_uniform_ - -# from munch import Munch - class DownSample(nn.Layer): def __init__(self, layer_type: str): super().__init__() self.layer_type = layer_type - def forward(self, x): + def forward(self, x: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): Shape (B, dim_in, n_mels, T). + Returns: + Tensor: + layer_type == 'none': Shape (B, dim_in, n_mels, T) + layer_type == 'timepreserve': Shape (B, dim_in, n_mels // 2, T) + layer_type == 'half': Shape (B, dim_in, n_mels // 2, T // 2) + """ if self.layer_type == 'none': return x elif self.layer_type == 'timepreserve': - return F.avg_pool2d(x, (2, 1)) + out = F.avg_pool2d(x, (2, 1)) + return out elif self.layer_type == 'half': - return F.avg_pool2d(x, 2) + out = F.avg_pool2d(x, 2) + return out else: raise RuntimeError( 'Got unexpected donwsampletype %s, expected is [none, timepreserve, half]' @@ -55,13 +60,24 @@ class UpSample(nn.Layer): super().__init__() self.layer_type = layer_type - def forward(self, x): + def forward(self, x: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): Shape (B, dim_in, n_mels, T). + Returns: + Tensor: + layer_type == 'none': Shape (B, dim_in, n_mels, T) + layer_type == 'timepreserve': Shape (B, dim_in, n_mels * 2, T) + layer_type == 'half': Shape (B, dim_in, n_mels * 2, T * 2) + """ if self.layer_type == 'none': return x elif self.layer_type == 'timepreserve': - return F.interpolate(x, scale_factor=(2, 1), mode='nearest') + out = F.interpolate(x, scale_factor=(2, 1), mode='nearest') + return out elif self.layer_type == 'half': - return F.interpolate(x, scale_factor=2, mode='nearest') + out = F.interpolate(x, scale_factor=2, mode='nearest') + return out else: raise RuntimeError( 'Got unexpected upsampletype %s, expected is [none, timepreserve, half]' @@ -127,9 +143,19 @@ class ResBlk(nn.Layer): return x def forward(self, x: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): Shape (B, dim_in, n_mels, T). + Returns: + Tensor: + downsample == 'none': Shape (B, dim_in, n_mels, T). + downsample == 'timepreserve': Shape (B, dim_out, T, n_mels // 2, T). + downsample == 'half': Shape (B, dim_out, T, n_mels // 2, T // 2). + """ x = self._shortcut(x) + self._residual(x) # unit variance - return x / math.sqrt(2) + out = x / math.sqrt(2) + return out class AdaIN(nn.Layer): @@ -140,12 +166,21 @@ class AdaIN(nn.Layer): self.fc = nn.Linear(style_dim, num_features * 2) def forward(self, x: paddle.Tensor, s: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): Shape (B, style_dim, n_mels, T). + s(Tensor(float32)): Shape (style_dim, ). + Returns: + Tensor: + Shape (B, style_dim, T, n_mels, T). + """ if len(s.shape) == 1: s = s[None] h = self.fc(s) h = h.reshape((h.shape[0], h.shape[1], 1, 1)) gamma, beta = paddle.split(h, 2, axis=1) - return (1 + gamma) * self.norm(x) + beta + out = (1 + gamma) * self.norm(x) + beta + return out class AdainResBlk(nn.Layer): @@ -162,6 +197,7 @@ class AdainResBlk(nn.Layer): self.upsample = UpSample(layer_type=upsample) self.learned_sc = dim_in != dim_out self._build_weights(dim_in, dim_out, style_dim) + self.layer_type = upsample def _build_weights(self, dim_in: int, dim_out: int, style_dim: int=64): self.conv1 = nn.Conv2D( @@ -204,6 +240,18 @@ class AdainResBlk(nn.Layer): return x def forward(self, x: paddle.Tensor, s: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): + Shape (B, dim_in, n_mels, T). + s(Tensor(float32)): + Shape (64,). + Returns: + Tensor: + upsample == 'none': Shape (B, dim_out, T, n_mels, T). + upsample == 'timepreserve': Shape (B, dim_out, T, n_mels * 2, T). + upsample == 'half': Shape (B, dim_out, T, n_mels * 2, T * 2). + """ out = self._residual(x, s) if self.w_hpf == 0: out = (out + self._shortcut(x)) / math.sqrt(2) @@ -219,7 +267,8 @@ class HighPass(nn.Layer): def forward(self, x: paddle.Tensor): filter = self.filter.unsqueeze(0).unsqueeze(1).tile( [x.shape[1], 1, 1, 1]) - return F.conv2d(x, filter, padding=1, groups=x.shape[1]) + out = F.conv2d(x, filter, padding=1, groups=x.shape[1]) + return out class Generator(nn.Layer): @@ -276,12 +325,10 @@ class Generator(nn.Layer): w_hpf=w_hpf, upsample=_downtype)) # stack-like dim_in = dim_out - # bottleneck blocks (encoder) for _ in range(2): self.encode.append( ResBlk(dim_in=dim_out, dim_out=dim_out, normalize=True)) - # F0 blocks if F0_channel != 0: self.decode.insert(0, @@ -290,7 +337,6 @@ class Generator(nn.Layer): dim_out=dim_out, style_dim=style_dim, w_hpf=w_hpf)) - # bottleneck blocks (decoder) for _ in range(2): self.decode.insert(0, @@ -299,7 +345,6 @@ class Generator(nn.Layer): dim_out=dim_out + int(F0_channel / 2), style_dim=style_dim, w_hpf=w_hpf)) - if F0_channel != 0: self.F0_conv = nn.Sequential( ResBlk( @@ -307,7 +352,6 @@ class Generator(nn.Layer): dim_out=int(F0_channel / 2), normalize=True, downsample="half"), ) - if w_hpf > 0: self.hpf = HighPass(w_hpf) @@ -316,26 +360,44 @@ class Generator(nn.Layer): s: paddle.Tensor, masks: paddle.Tensor=None, F0: paddle.Tensor=None): + """Calculate forward propagation. + Args: + x(Tensor(float32)): + Shape (B, 1, n_mels, T). + s(Tensor(float32)): + Shape (64,). + masks: + None. + F0: + Shape (B, num_features(256), n_mels // 8, T). + Returns: + Tensor: + output of generator. Shape (B, 1, n_mels, T // 4 * 4) + """ x = self.stem(x) cache = {} + # output: (B, max_conv_dim, n_mels // 16, T // 4) for block in self.encode: if (masks is not None) and (x.shape[2] in [32, 64, 128]): cache[x.shape[2]] = x x = block(x) - if F0 is not None: + # input: (B, num_features(256), n_mels // 8, T) + # output: (B, num_features(256) // 2, n_mels // 16, T // 2) F0 = self.F0_conv(F0) + # output: (B, num_features(256) // 2, n_mels // 16, T // 4) F0 = F.adaptive_avg_pool2d(F0, [x.shape[-2], x.shape[-1]]) x = paddle.concat([x, F0], axis=1) - + # input: (B, max_conv_dim+num_features(256) // 2, n_mels // 16, T // 4 * 4) + # output: (B, dim_in, n_mels, T // 4 * 4) for block in self.decode: x = block(x, s) if (masks is not None) and (x.shape[2] in [32, 64, 128]): mask = masks[0] if x.shape[2] in [32] else masks[1] mask = F.interpolate(mask, size=x.shape[2], mode='bilinear') x = x + self.hpf(mask * cache[x.shape[2]]) - - return self.to_out(x) + out = self.to_out(x) + return out class MappingNetwork(nn.Layer): @@ -366,14 +428,25 @@ class MappingNetwork(nn.Layer): ]) def forward(self, z: paddle.Tensor, y: paddle.Tensor): + """Calculate forward propagation. + Args: + z(Tensor(float32)): + Shape (B, 1, n_mels, T). + y(Tensor(float32)): + speaker label. Shape (B, ). + Returns: + Tensor: + Shape (style_dim, ) + """ + h = self.shared(z) out = [] for layer in self.unshared: out += [layer(h)] - # (batch, num_domains, style_dim) + # (B, num_domains, style_dim) out = paddle.stack(out, axis=1) idx = paddle.arange(y.shape[0]) - # (batch, style_dim) + # (style_dim, ) s = out[idx, y] return s @@ -419,15 +492,25 @@ class StyleEncoder(nn.Layer): self.unshared.append(nn.Linear(dim_out, style_dim)) def forward(self, x: paddle.Tensor, y: paddle.Tensor): + """Calculate forward propagation. + Args: + x(Tensor(float32)): + Shape (B, 1, n_mels, T). + y(Tensor(float32)): + speaker label. Shape (B, ). + Returns: + Tensor: + Shape (style_dim, ) + """ h = self.shared(x) h = h.reshape((h.shape[0], -1)) out = [] for layer in self.unshared: out += [layer(h)] - # (batch, num_domains, style_dim) + # (B, num_domains, style_dim) out = paddle.stack(out, axis=1) idx = paddle.arange(y.shape[0]) - # (batch, style_dim) + # (style_dim,) s = out[idx, y] return s @@ -454,25 +537,12 @@ class Discriminator(nn.Layer): self.num_domains = num_domains def forward(self, x: paddle.Tensor, y: paddle.Tensor): - return self.dis(x, y) + out = self.dis(x, y) + return out def classifier(self, x: paddle.Tensor): - return self.cls.get_feature(x) - - -class LinearNorm(nn.Layer): - def __init__(self, - in_dim: int, - out_dim: int, - bias: bool=True, - w_init_gain: str='linear'): - super().__init__() - self.linear_layer = nn.Linear(in_dim, out_dim, bias_attr=bias) - xavier_uniform_( - self.linear_layer.weight, gain=_calculate_gain(w_init_gain)) - - def forward(self, x): - return self.linear_layer(x) + out = self.cls.get_feature(x) + return out class Discriminator2D(nn.Layer): @@ -520,97 +590,13 @@ class Discriminator2D(nn.Layer): def get_feature(self, x: paddle.Tensor): out = self.main(x) - # (batch, num_domains) + # (B, num_domains) out = out.reshape((out.shape[0], -1)) return out def forward(self, x: paddle.Tensor, y: paddle.Tensor): out = self.get_feature(x) idx = paddle.arange(y.shape[0]) - # (batch) + # (B,) ? out = out[idx, y] return out - - -''' -def build_model(args, F0_model: nn.Layer, ASR_model: nn.Layer): - generator = Generator( - dim_in=args.dim_in, - style_dim=args.style_dim, - max_conv_dim=args.max_conv_dim, - w_hpf=args.w_hpf, - F0_channel=args.F0_channel) - mapping_network = MappingNetwork( - latent_dim=args.latent_dim, - style_dim=args.style_dim, - num_domains=args.num_domains, - hidden_dim=args.max_conv_dim) - style_encoder = StyleEncoder( - dim_in=args.dim_in, - style_dim=args.style_dim, - num_domains=args.num_domains, - max_conv_dim=args.max_conv_dim) - discriminator = Discriminator( - dim_in=args.dim_in, - num_domains=args.num_domains, - max_conv_dim=args.max_conv_dim, - n_repeat=args.n_repeat) - generator_ema = copy.deepcopy(generator) - mapping_network_ema = copy.deepcopy(mapping_network) - style_encoder_ema = copy.deepcopy(style_encoder) - - nets = Munch( - generator=generator, - mapping_network=mapping_network, - style_encoder=style_encoder, - discriminator=discriminator, - f0_model=F0_model, - asr_model=ASR_model) - - nets_ema = Munch( - generator=generator_ema, - mapping_network=mapping_network_ema, - style_encoder=style_encoder_ema) - - return nets, nets_ema - - -class StarGANv2VC(nn.Layer): - def __init__( - self, - # spk_num - num_domains: int=20, - dim_in: int=64, - style_dim: int=64, - latent_dim: int=16, - max_conv_dim: int=512, - n_repeat: int=4, - w_hpf: int=0, - F0_channel: int=256): - super().__init__() - - self.generator = Generator( - dim_in=dim_in, - style_dim=style_dim, - max_conv_dim=max_conv_dim, - w_hpf=w_hpf, - F0_channel=F0_channel) - # MappingNetwork and StyleEncoder are used to generate reference_embeddings - self.mapping_network = MappingNetwork( - latent_dim=latent_dim, - style_dim=style_dim, - num_domains=num_domains, - hidden_dim=max_conv_dim) - - self.style_encoder = StyleEncoder( - dim_in=dim_in, - style_dim=style_dim, - num_domains=num_domains, - max_conv_dim=max_conv_dim) - - self.discriminator = Discriminator( - dim_in=dim_in, - num_domains=num_domains, - max_conv_dim=max_conv_dim, - repeat_num=n_repeat) -''' From ca575bdda351c634bac83fc4f75ab9abb70ec5c9 Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Tue, 21 Mar 2023 18:52:17 +0800 Subject: [PATCH 33/37] [Doc] change define asr server config to chunk asr config, test=doc (#3067) * Update README.md * Update README_cn.md --- demos/streaming_asr_server/README.md | 6 +++--- demos/streaming_asr_server/README_cn.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/demos/streaming_asr_server/README.md b/demos/streaming_asr_server/README.md index 1d33b694b..c15d0601c 100644 --- a/demos/streaming_asr_server/README.md +++ b/demos/streaming_asr_server/README.md @@ -9,7 +9,7 @@ This demo is an implementation of starting the streaming speech service and acce Streaming ASR server only support `websocket` protocol, and doesn't support `http` protocol. -服务接口定义请参考: +For service interface definitions, please refer to: - [PaddleSpeech Streaming Server WebSocket API](https://github.com/PaddlePaddle/PaddleSpeech/wiki/PaddleSpeech-Server-WebSocket-API) ## Usage @@ -23,7 +23,7 @@ You can choose one way from easy, meduim and hard to install paddlespeech. **If you install in easy mode, you need to prepare the yaml file by yourself, you can refer to ### 2. Prepare config File -The configuration file can be found in `conf/ws_application.yaml` 和 `conf/ws_conformer_wenetspeech_application.yaml`. +The configuration file can be found in `conf/ws_application.yaml` or `conf/ws_conformer_wenetspeech_application.yaml`. At present, the speech tasks integrated by the model include: DeepSpeech2 and conformer. @@ -87,7 +87,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav server_executor = ServerExecutor() server_executor( - config_file="./conf/ws_conformer_wenetspeech_application.yaml", + config_file="./conf/ws_conformer_wenetspeech_application_faster.yaml", log_file="./log/paddlespeech.log") ``` diff --git a/demos/streaming_asr_server/README_cn.md b/demos/streaming_asr_server/README_cn.md index 1902a2fa9..26a6ce404 100644 --- a/demos/streaming_asr_server/README_cn.md +++ b/demos/streaming_asr_server/README_cn.md @@ -90,7 +90,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav server_executor = ServerExecutor() server_executor( - config_file="./conf/ws_conformer_wenetspeech_application", + config_file="./conf/ws_conformer_wenetspeech_application_faster.yaml", log_file="./log/paddlespeech.log") ``` From 1a272e11de4e4d3ed970121a61904c831c53e3cf Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Tue, 21 Mar 2023 21:45:34 +0800 Subject: [PATCH 34/37] get music score, test=doc (#3070) --- docs/images/note_map.png | Bin 0 -> 301021 bytes docs/source/tts/svs_music_score.md | 178 +++++++++++++++++++++++++++++ 2 files changed, 178 insertions(+) create mode 100644 docs/images/note_map.png create mode 100644 docs/source/tts/svs_music_score.md diff --git a/docs/images/note_map.png b/docs/images/note_map.png new file mode 100644 index 0000000000000000000000000000000000000000..f280d98c4ca596fd2218cf7b9a30b50f0085c77a GIT binary patch literal 301021 zcmdRWWmjE2)a}8cxVvj{cPs8*+}+*XU5i730>#~n+rgngaXq*l+?~tw-uwQD`(clf zkz{9#?7dcI=3Fa@QBjgc{zC8t001D%%1EdI0Fc4|J@Am9HDPsz?f?K8Kvv?LhWF3Y z4WFOH-?K&Dr*D3$uNnBsI_dB;K!`y?V~{aInrNA;OQE@Is1SMK!x3kyj$H8OmKU=Z zhmyrX(uWE}p9+jBkwJ^y|9&{{1nK_u2@G04N0T6$H$LD|ztA%D**LGQ)%)J|Ea=gC zm>f-(E?1&fF>i!(aRMdtS<*Q9DB=W^eHQ3j5Pw7P{!g<38FJreMY`M>qzypnzh1Vu zGEmN+6(wq&q6ER}|23|{`~&>_pTU@uVYmM4UE&$?|I1LK|1r&7w)>hhXl|KIDPEJ5swCGWIr4PNyg7w?-nDWckB6fAW%Zw)Pd zpf%E!%Ay$T9nePZGbPxzZXWa`r>i^_^vL4%!q>{Z)^nA3_%64!tt^l)V*J9^l`}nX z?(EPXo*CF9uoyPCDt_{D@z3vnf7uz7(pw!hg{j3z)MVZ|oV~@%N9s$Ej>BWiJ^f2? z+tR$=+C^p0i57@(qoYz28qoH<^s%$Vo$gyx0>OKi^XLXE`qmH@=}A<7BjQm0x9UqW z3!(F^>)Nz@spnJXg`n@)FAj#RFR=n7qqPKkcO$+va7QI-byA<^AOM%Il;(Z!I>)o?tEW)tT~p?3-<{El4Q7F zCQ7!lz1qrrPI`3lVfMBue4OdeEO0fYR-u&vyESiT+PKp6J!h75>GbI&SHYpG9Baie z>s*^-tp_>ygXXl?vJ~=P+{Z^g!KKD6jByQw=&h;BPr>&yh_%=*&hws9Po#Go3RChL zRF4tq*fSO{SE1|PAkjzlx5D!ncWv7f7V1wISqg`N|5eebFp2>`@JmFaTvt>S65ThQ z-0g(Nc=n6#K<|y&TBStF{#e*{X43V(NMpH^gCdKj^2eHkk-vki$p{=|p%&xy7$8fK zh0Y!|+`PmCa^5P#skEV|Ve@W=0Liv|>)s->&B?d2yM;Z~v5B?<{=Bn`#$D9Y zI3}37t|@uY&pqlEkk)nV$jA7a6X=S;p2g;l=PvWpE9dQGp=k;=?-U{CV<&pc=2J5z z`!d7FDjbyoS(O{BV@?Rb*mO-}Y)V})Z!GbM4fa|jnT)n58AM!k@Z`KBV|dx-;s85d zqj-oi3l5M`eV@G$o5Rb#%zTBsoQ<(M`Eu{u%A#k6aSVdzHL}HZE&?=ks7VN9yux`L zjNo_KxO8+@guaYwZ1fUN9aBm)?NBsEZJ+{*4h5n@;GB{{!(tbsqB%magAkfxeiTff zajrJn&7RNLo(Kj*8oW-3XlntZvQo5ZY^t`-KixP{iSRKDJh2)Eap*o>voSOQvTT2( zl|rtpb`MFZitC}MQoO-fQ zG^pXmJliLUDh-D_fqxp-t$+|3GxBc|X?-*ZHt71r>w(t`}TzRN2Az+Rifd#$USrJCRpvK_4OESI(E-6z5lFp;x+3@L>H-IW_1^A zktMOLwNk`NU_t5ea?vrrpv7cNUD7DvO~Y6fIDsA*XqYyTgUQX1ta?(A+N@9#qKxUf z5uAe4 zy+yTnbr?mP$2R>}@f^*RFA+_SB9@nLHWeFroM&@ubWVe7S4xbUw3ik<6*RIn6i%*+ zH3Uu-G+M7t<-EQA&qcncJ`)(6dUaHvHNxMLjEX3s@>UL|p3Hac(*cfJq~a$*gxQn7 z0g=>pY-+al#LkxPI)rRhn&2JzAc#gYhB6?e_@XGIjCJKbyd8N>#^hMmgajHQ!vE`}PN0>0Y8140a=L1DC^_(xU(CqY8JE~4qgs3tJ4}d zYN6PKGnfou(-`EhMHQ_Ej?<|n0v&%I`8Kh0t{XCXuc|Kak0->m4u`Y z5D}go{a%IVLFOw;D;}h3oK#`|&e!~}vO`St6D84KJF#7 zVXw@t7~7jpPCWXE6lhfCU(49A4w7rykHu1r<5B;^BldWWcg(VO)*#CV{~;rk%K6nS zzwSFuPL2l5qC(&8AmNadg>HzAfCv@+F$PB|F|ddoR>+oJZc}F+joC=`1HyHDP#b2U zBX!!$JiL-i&uPy!JH9}=Wh`Mi0doVsJ)hSZ%g0BgJ~o@?;wx?9GSzPhHY^0aVQ(Mj z!P3N98=9DY3U1Zmft*ENj#F)L$)e8dd!c8=d;#Tut0{Dd1n@^WP%z+4G1LEzFaYt; zeJUS$>jBo-mdjo@gv%VGl`UIarwX}Q+o2F&ix(Hw^sfiwmMrn$WN_VO6 zIPCSdgx&kJeRq?=^bZD1=MBTi(7Rn>y}>Ume3#H8KmMKkKk)I9B!^XI0m zzxC=$S_@g^zPYQu<#kWjYtZ;E`|#~*b$BG7&J*ivLB*2%PrMO2d;Irjm;!uXlii}Q z442`ly;zM(N>0Lyr?WQUhv~XC2lQ8>6UB{;8{++X&-i3jPiOn7U1iVKHb49*>g$1U z+wHm@pC`$WOJyg)d++)%Df_l~|92;(-29|)BY2fY9O zeE-#C@D-g~LUTD7$v7q&0@e;9geaIv8~)DH%$-qud>)(wlRi9D@V-;|Lc)JpPN`qR zUR&1u&m>ecOb~NjGpU+dyETS=2flc|>z1;>_vGF$gS0UeAq~inGzOre;wCqXVGM`= z&B?h$$VC|CGjf-;Okgr+>tTg%LTPS5)8w#K>ty6JXaN#A0?u)!H4Y~w$gc{%3?TAs zniws|-|6KNwq_@Ye^baM(YxzI4Qf0t6`)5I=6l}4+)WbZxW24Jeh_LrPoaJBdsbj9 zWJ|oj*cxFgQuz_Bs3058+P(|7^l{j0ST1Ymw zDLM{$K2ht!YVM*waxM%=8V{FtV7&71OGc9}Wkbz_dG_fc%&)%0f8`73Jx>R47wvUz zReqcKaVn@r-nI)oA`dJhb%L%_2grHEdGpb(*^}1oX%prJ0k<){hk*xu^2vKdxx6ca z+=+s3aYl2kDNiq_UJnno=UGC#v(kZnGymQWFS5JuB;D`z=_OJ5uV>gI`kt#_S^w;g+i9M+ zJp5Q9jwzkZXcjXk7{B4fmq1?{kL&2nyjSvqB3-Dw*J;X|)MHz@ssaG#x zL{DJWlI53s>+}k8YCDI%9Wr~LZcDccKaSpZd)gr-&gUff*Q(e6V>F*^h*!>0cyq{E zrQ-U&4Abu>)hI1G37O5QFqo?Vyq+|t%q#m zH;5254j!V2O^@c#EbUj#!W&3zk$_FutMI$%KK{pmbE`;7pO8LbVR6+~@L_K;ad4+B z$i}1YZ~JDZigh=gZ53Ux(dBY?IRhb0bl|yXFpb$Kp{Q|UJ&%u07t#{Vmb4)h=FiF+ zssKC151&fBGw0Wj#0pd@1)$?(pR^R58Yx4$*dBgT6x+k5d0RvCTq+rRBW{tl0NsyU z9Z{qSNS|Z#`9%PO%zC`-_gHZYQyXqBOd_}jNxU#D=o$znX9! zvCC)Z9C2yg3p5BW5c=Fy+b=i2Phu*?lla+Az4ZocguuN&N{jrtxEDy`KTE(ob99?q z?M4xXx2L4t+}#Rr4A^RQdOy?>S$nHc61fSh3oO=plDGVuSF>Kz+g_z_wcnBJ)XlTB z(Q=mA>u+y9!L2T`J<0RH^wW-Nj9r~53CSL1yks@UYFtfvaaKo!J0+orDkj}E%%0gY zzdUN+ve~$f`h9W*umzX2?mLpybF2kBjXu`1E0dfvYL7}K1I(6=xY+GqLo&CJu5^%(CD_mPTfC=>3IA}-o z)YBp^=(azOxp*V|!<_aFaxd)AvAanCp52s~o5sHUCn=xYgS)ez-m?93gpvUNfUe^7 zzCumtEcs+Ux}*n-Gg%CnEsgI?C@A6BAeYo%s-*tr>RJAotN(yI#`#cgFo&4(gm;2v?3x?9`Xxp(z3raI!&C)&ojnBPe(9? zhZ4255vtsogvtX`Sb1*!PYfu0X+pTVP8auiKOgSK-SX5cM8178pgM>j&;XrASeh8Q z{XZHR;}U8v-)SCp&~_`3Pym3rYAvVdC*?rzh3}477TFHPDV&7(wD9^#w6xo-015-Q zt9@`!rqhJjarw$^zattz#vaO~gdrEshVvvLr2&#dpGD7FyoQbM5C}goD)loOn{8Z| zThZy@R$W=(X{imw#?87sn-7%bWNk*)fm@}hHgtQkaPnJQTYV`bcOE9#-M7S7rSv40 z-p_wJduucFnhJbF)a-iM@D6m??*#EZ?WHPjIwb0}ofpNun}MM2b)IIEiu4mju8t{_ zbXEfQzFk7F)JIb|95Utv4XqNE}`4cXhM@5rzVbCO8492?vq zueBQp<&EDe=E5$i(?Bz@YDo_dM@21)Jgf^PZT9-O)1^goz2x9iK^Ny`c?{w{j&B@#P>w*@THaH7pWc&6K% zsZI@7A&^z8R0uB($@kp*X{y}mioHWaZ)QCq4PJa4n&i#V0f`E*#M`?f3O7TDl`x*{ zpiD}(XwZDL*e*{a) z7+btFR}iZw{#Fa>`P5S089iO!$0vGGRZe9?f#wFZmssuNS6If#)HV9j>NlS7!Qm!b z75E&!54wNqM=Kd}C-{}-%XtV@O))5Na!z}w)Y!suaXdmOYZuVIwIDI9+T3|#oM1~d z99AX%_2dNO5yJ7+e4CTS$f2Vd;=f$J+u|Tld|VMiF>DxNlm4Y9PI(SKJG+6@0PB%- z1eva>yp3@YUi@+LHeb7iaa+b@<~^De7&;wJ0M~Zxml?c0VITG)meAj?1h7`v6KOAr z*sWrNdfm^z#c2i}L2X}h*L6-5%(eMi-+a-U(Zs7mNxgu@=Rp~?h$3x1%@Uv70;F}m zn84L^pPgq*lg_puv{yO3ULiO=kBFY0=+z0moY8)w>R>OR{IZEsRrcHhl-sNE2+$qbCN1o1 zII|J;H&JXhsxcrNa5uB*ZUwv|R2EWYIgPTyoR+d*Np_qtxJ)WgC8DexPEU!y0D13! z&ox}uFlzA~>|lc5?E-7jO05l0>2&g0((XrY0<9v~AN^v2k zV(Z{nCHZ)o4%}(?JrI^x88vP0vRz0eCU%$!MjqQ{Md(g6epq61sx`P^fTb3Nl&wzS zw)cUcmvSPkG?OU;&MQ*5Wu98)YLJjRfKS6MEL)**v?0+;3VB-G{?mI7>xXG#?@*dp zm;jQa^^!lx6wqi@eJB_c%-kV;P=`>4kcKtpEm9Mn*YY~S; z?e1xnOMhG6!G+C6qa*s07E|(koM=r8w$K+$N6Xq1ElLj`@MRBlq@;vQ_4e#FPFcrZ zBeh31lhc4eW(5KFBA3CfiKb@uR0O$n2XXZX`j8lO1k0_nOMQKPn5crkz}sD>hl^ze zd2|kWZPuGA-TF1@=tEL7Twdoo7Xi~n*&Jt0;GXM`p4&Fkm&dXmudk79-w0?60$sUN z#VywDSjZAL-A^hUrQxdke?7$WN9e|qx!r;X6amB2I|rbS9$w4b41=jw1+F4|EW(BrnI z71OOYd(1V-wpb+v@*N8J#S%XEtMRi=yaw9@hsoA*Q1v_;dmpTQ!U;tHL*H;g; zJ^bfdg;26kFhphI%JR%jiA?V1j9vw^m(A|=&ttzfIeH&AzV*BgP5aE1EeO$%DApyD z#<`sEk@B5}r>^u5OX7F+}zU&1FAFCY1BA z4;3X4xOAUCL5zQ=1H}+*BwFPMDtUMsJ7el2pNTP;chAPSxOOLQl2{{aI^*Fu!|;)s z_Cx2$xeVE|S9}N4VZ<#1pi{2BD-0`?Q zPVtVy??io|s)csS2gzNJaeFij+Op|eT-8Lj2M>u{2WHXM_-c0Z$MHJiX#F3d(B98o zoZhRF`~f3OQ4Ht1Rqjj@h2p;q#+h}yh)S5&7gj)X4@m{@r=XxWquM{`FJ7CzH;x>Z z{K#RY!~_hA7e{)_d~R=xHFNi%n@Qy$1s4+4*MysoR@~ZE_WBRcm&FVTL^J^6)LcS~KF|Nn0m(8SnM`xr~GzbQboLo(jUgv&&EXaCEAEI|ov_4^N zGy*xsFAn7kTM*S-kI{^+NWva3-Dl%@vwRL~P0DQvX%8nyo7Y>#=S@|eot+bUQI3C3 z;9jPUv;!Q^mLu=ldxR-%?OHmf@yn=GiML#fTU|CH5t;uOU&V|Rs(k|U!|Qdd9gE1t zZqQppZxtwDj#^9K^N~<#9UMQm-RFy<=Ymn3-GT;ee|Ztn?RC6cDG(%O4o*DG*LOS8 z3EJ>lCO;$zq^nePZziu|Qzd>|eaL?wWBL0$DXo0+H8j|k7~$@bkAyzxqUocffv*sX zWkgtNV(Im9wJcvRV3sS?sHeixhC|r`1Ow z1BuG|MGNG^z03&)3Rk7nH$!PCvRcu9+eHFz~dQ?H@`Y!(9R=%Ol}F@DA` z-o_XKmVYhW(sez|y(b=et_#2gy2RO8314{~H3+7^)f z`u@-WB1I+K9}Cg`bREx~_D2L0X^ZP}7y;F)I<>-?6nkhWIkP=ybxkRT(E5s4z3#`X z@_ep!toni0EWg-8YQoAnb9r8$TM3=~ca6GX;&6&)U`zT5CS{4fD;D{~jr)N7HWkg% zS39O~jczvXO-X8{N;oy~FMe_n%RbJq1Z^5wJJGGwTYguJ#9`Duf>_noMxG_#`NP?B zkjb-eyM27|(ue)wk|KWc^C4jCXImbEr%G(S2NrSOXsg-o46>Hd29_uuB$embfRJTB zF$sO0-(|}C&F1y1VlRO&`+ttHIRN_PO!L;9_CF_2!7#}VEX)CKSE5jp zs2d-BW)JJcC`iTn;BmNYQ77UJ&xZgnzC}l0lK{PBmc8pcx~_vrF#ROWE>DhZ;NoO9 z+?m1A1(f;K`$Wgq-DI9w)%U+`EdCqg5!v}`L64~uERX)Dl_hY@BBzraY-Ob5jh-r% zUEZHGk2w3)L0Q;ssf|VCv~DxlwuE0Zo6k$Qaph`?(0?Ds1xkqog$J9qc32~@5lFCM zZNFU42ez7iULKh-@ZS!B3-rH3Rbk9`ImvYl+S**En))Zf%l5;q%PdKMWnn=wQwZX&3k*Du$M1GKkPKTRT56HgU?RXH$gmHlV-)GGWNoUepmwJV)pAO7B&2_s z&K3&j?<&`&S#tA7eIEMGsaBCk_o-uujLTzu;HScmnzRWm&7NB+3Ewq4$n*H%^IYny z*Qpat(cvI29Lz91hOUO@^IdhA z9&zD_^&e^;5d{7?qjPNcCNlqqV{c|rHRWq#ywzVu611+?ek4+V^;DCqg`9-Md>soQ ztFB%|$S!?4GkCZ^{NhLAIl7k{% z{Cz^?wfjP}BELe*@pXy?)k#Cs$*}jl{m{}=u};WfR^g@Eer($ zSOVs9O^_A3ULVsZP=t~jVdm-Sl8U#W$R&P#1|j;U$l?pJSHFx}XW>P7YE&Jai|f}U zp<3zp@^<5fjC@0p3em^nA0DBJFG}6?@qD;qNKwZlk<^12kM=Z`Qpbn&+i-|O(cU~b zVqkt=bqsWLHl8?d!u{38LqyW`93^s{!+ZFeL}sF$n2r1u2rDJ}J8QfAbn^TUQ`jbo z!o~d}77c+1FEjsIoBhiD)C)^KSHSbpk+#<%r|t0A$wE~@(0ORSl2`ABM&ZruOP&AY zQXB2QuN0pN(Zk+#yOchN`}E6DeBApP!_X5)fp%|eS?@=-Rl(VDL6c1pQM1R->(V&^ zD^H)fQu2l=y5nPqT-J$U0Keyr~Y#W&C?!lCv&0s zlX}=3Ha6H|OQ*cYp*p9ZL4EJqU4*ap3<4ZLW@pY&Kt7`BjE1EtFIrK>LV_-sK(S|d z&szh6u_0SQO*RHS`3F_}4?5?TiuCUf@rQNY+wBbwWd&~cb?4n?E^kwG2U|8FN_~al zd@PwVraMw=)gLjsdeiO_9O_~ru+f!jO}>)mS&44@8w{MF8!kVL&~5>A4F zK+`eK$di42X+n6~d=G9VFIG+QVgyNNXy7H}BfiE7UHa@?7!`1m`Ef<6_>?+QU`;7- zWR3jG6wNh>J`7tu8{;5`+;;_Ve{281ex^w0Hq1PQ#Dex<_M&916eW?j<6YRp20D#L zZ`PE>>vNw>{%ihDb3;Q6Q}U_9F8V;_0)sABhmWe?1mZM5Po+KHsK1WgYivQ(&ZwV$ z0hS|*pptk&I^$naIGDQNhs%#53)@cdKo!0{J+azo^<1W^O{f+;DNj}Mc!y@M7SE4zE+nH+neLCAPHfJc`5IXyhCgq4~qUL1I z(*jFbmRz^of8bid2oL;?_+%=^Dy7XB%4kfT+rq7Cs>1jSi#tcQrxTasIT>-wQrCtW z0)hdpUzZN1Im2cqSrgKK0L8AqwYf)l3KYs@+YxrjNjHrUc9(K3j>KdzVDc#fPQ&5zJAGuAi;M4FD ztWbg}NrK!cMSU>PoN!!Ma-|xK?HY0KY2Jt?O0~-E{edm=B}zJn2zzynW_Y4!w^LIG zGXLbSUX3bnkC*X_00mF~IB zE^!hhx2i#rxqArZB7^G!SKq7ccG&gyzJ{A6KZu^pH-X-l>n=JTcjPzUlnX*!v-4bw zO~!tdQRdd~@(yFTTNM(!giBfcQQbC$wO6eXb!plR-YwFP+#R?HB`Z#YlBYp0%X0Gs z5u1d@kCf2cwz`ANdOMz$oCJ3{tF;@q%R~ZBBG>-%Kf*)vi`X5AgF3FaY4Zgi3Pe(K z7h#t|WZk!hD2#_VTOyZ8^@%vrpUwUwrDbS{Y!s%*Ks(pwV56r z?i;L$>&J8;%+@Vxe$@Te`6nzSZ@>C1^)Xqb98XU7or7{^W0;ctxZr+$xtz^Zgavo9^=QVL5t?Y3@zskF*@F z8u!JkFV#~r?aQ+fG5;#9@(sKEMclD|$>w;RoQkHA5hV?ef7>8xyN0eew&W3n8WArd zCmF8vzJEgOygJ^9-3OVKp4n z{BH8Nd>&2ycCYE8`7#N>99&wLCfzvmB??L%*CU-UaGd1a19j&c#w{nrO zaBRWBEE;=a+!|O<1G{|XF;L5+&%A_COVO6@t%7Hcq&!86(`RpFgo4x9-K3*UlZl4I zT?D`h4MC5AK?o;SQ&0$o{DwF_w%AGf=kRZ5MvZoV-kI|s@A>VK8lU&!3;!V9(&CqB zs272koj?7MRDhKV3F%9u^Y_{5eBaYq%xT`K4BO8j5*=3c#iWpN5K$}Eknvxkw*EWz zmH6mi%iKBvQGb*F#)`E=rrg_{3AsI6bvwFHxv;C8;Ng=g{QawDDr9C-ZPuqQ51?-d zG*xvXlQqus=Ux-CgFS1an_G(XMfeTCT`+$r%S4J``(={8dz=8~(ZAiV%UE*!NrL4u zm28^oW{aUkpje_Y?D6F^a!qPF=p(AbE7wkfANRI+OM@yN>MSkl=CmKxcTJ$EpX~XB@Pm#J@`BQJ}CP4 ze)>ehk35=E0yzus3_@eLMw;devK}2_BQI5L>YO8H@l~bXxsUlqQ^Ct^**nNen9KRW z?B)e`KuVf8NDTDKS7@2Et&L*3NNzmZz!;nCky$HPT{kmqYD9&y|-_^>>z+<2we;YTn~i(y-}E|2`(Hcd;P3ijRr^v7F`%6#MU@dQ9X0 zP6{mS@BtiORn0LFhrT5f;}hI9lEpyB0yU&zbAY1- z*ukPW96~`1gMnsfGDr6FYe`fs1-^lp?I$b~1upazCd3%|yTpYrd8UIdG20B?-eMxe z96zX)`f5CUQCl_Ze-I!k06o{@2d|Aa^9&1zzyR{iC?s+VPmj zv;!f&qi6?$pU2}Uo6fvEaBtRlz8#fvKS) zLRR#5b`x$+1(t{__h^`)gd3{`qT~lQ|umnPILHd`@~_;lgg+nTd(6Ya_aXA0 zf+~1==9| zQ5-WRPM0|=H_)}BRYVmyh2RM}%gCRP!z-`eAFH2(2?Q0YgK1LD}d z=zafQ|30t=3ZeRD{cER367hg}<;F_n@rqEBr2PekG#toELqO=(E!tk2ewHLxF>gol zAL)Zpj2IZBlSLC{i|NC_GNtx(l@+@Lwib3Do*zi1&70EqeF=$?iNUi;Xo#Jf?(sM# z=6UU!a$9{9<=W|swPFnGqf!thuhR-DN51TuX4N%4t&2u z>~%f;>Lhp;Z~-jxDbFOwi7mWaA?3Z;33^#=6Mh>~pAH!5;!qOx6Bw}7Ms*vPWLkAz z>(+ydGz|LZ(xfcnv%}-HJVWHSBb$B#?4c#;HJQp$L=}RUK5H|D2_;jvw~LL~q0jAt ziWxLjRt{q%SXe#crp5IK6M`nauyTI7-%fu5+RKAcAb@7vxiP@8H04H@fus$r^b11o zUa(BF%KzoYXy$EhTTOqIlk4LQ^hSJeH!x?MP&#CMygJd-*(&H;KVgh~ud&6r9%KaM zUR!`MgMhNbj+xs;DY0-5e?^iSmr?QY{q9x!$M~E`FZ<|${fh}E0ujuQ=!CfqpHmTk z$TlrZST$SPKL z2#8_ZWzO~dDf$L9JF;3l5wT{A+Z)pNI;ez;k+q#+-uyI zE_gbE6TTj*3uw1$gZWfcMLxi|&y7QF?{A=Fy!uod@aThkf%B$Of%9ZTJ&{On|J31+ z6t%O4Rovd!k~wAGtuqBU82&wtn3 z-(a+GiKlJC4H;K;8%?2MDa@I4CS>gb2*f)&+k(nx4&cQ#{JJ3-xfrD@dktu={l{v@PFCq<`MCmAxz3^U9;!O4cQ$i%)UA7 z+84Ue54=UU<_`26scCvx-k?jr$ZI)G7-3PTEzFW^K%TCHx>9WOX8_Gc_T9-0rFgLS zFE(tS>*S5LXRpj93B1Ik(8<|L4Cij<<<9kbKS(+Yr4>6jv+LIMU7^uj7PH25+23y; zYJQ3Xl(ao(OgAXPfNbO6s=N9)5!~>Za8Us);8rh+%sL%CNA4{@ct~IaFADr#t5mIG z>lz0d51|sb@{pJy|G^`PX744iSUHVWE|3`xO@=ZyU7)=@jhv)5ZD~EoY0wHQG&Yo4 zv+$A4%GBPVDG>Y!9g6gesc#&;fr5ex&WOgbxUJp1ZFVWuoa}&v}-mEoEk46RE$;ZT)qL!iTTghn-ckMupFt~xFIQ%7ynr2 z=tCf=2LNjp5SV1Um3pk`r(PVp?sn>o1X*~_Zks?oN56Mji!mj=y12KkAp2;{i9^!h zo`SoTtwp@Heq5}3%rOOY2W+@mF61hzSGZ#dENsSQJSayMJHBl$^lr4CBYwQH7;3TT zJFJ)5+QH##*d?t$Tq1$j@YzmT}naige|PK`a2Dawu1Muz!n1r~59i!p`35Jj|} z#$T$GPaf4g2);F~E!OIGzV4Z!GbAhR0E4$!Eg#%TTR}TK#H6R~v=R%H9K43le0;Y${U3nnvt9k<~2Bg8KV z9C8Yn%e^ohKks+2m$R4FT}>3_`2N{FI+K6Z?o8W-*IK==>B(FYFAWQmn8lb; z#9s5qw0WVXGz7Uvr`E+NPGjV7TvgavK+USa!S{m~5pBOi@w*m1&ow``Pym{?RE6#) z=j-iY(wSSI8dhvpdFcE)U#HOPqq^sxNhR2sm-D7mE0OoHe%hYGMfl7*!Dk*^1CK$j zrMEi>Kgz?tn{5>=o>%_{HcozhJDI}zp>XNm-P15--dKTEG~L^&<=z*Ah!K)A|ATBl zk~gOG3Ej?m%MtQN^|1PPj3C8|4}t$oE5=YkV{C>(6W;l@Yl+y~TKjHIpi~Xv?R*uv zkPDV(O~*R@F&Lscx&@Mff?`qEqL8tP4j#BXie6WTs#^3q5E4b4QfQo{lG8L?J>*tvKL%-jRdu61{#Ue`7lpQP{-i@+{$8O1J-l&aO38IPL#T1%G76tZ7jL?M@#jXV;tk0XGrACts@8Uy z+#a=@nMJs_I_rEu$PNx=Y>wN^r&Q(}JHC({X4w-=7QQoZ0yR1ix~?RM>%CY_LmsOM zosQ$=WUN{}ka`RsOOv{39EQ9MgjFnk7X~f!6JdpIBnq3}$L>cZNZ%QiIr$r-+9l)qm(B?uP}z<$EZ=vEULK0mDC^O z5b<0zqx^%+C@eiWpQ0imaeMYPO|+F&yKVZ^X@y5jYNmjqYTu<^bP`U1tyd>oGSWTT zM<>CDL;DyA%`l8k%k_@KT~GXA%3W!0*k91&t7BD)H8Fa=*At5$HICkmQc%Hi(4spW zqWnt6Dy#e|rza6HgtB`U?0_bV;AFCjMZ7J46ntD;SM+sF22Mm7MvO34NQivIDC8K6 zMrbrY*m7EUN^e_sdc8;piC<-v$+d9k;#re?Ilyr|cDCx^8TUV@wk8WW=nDk)3E#zF zlG4QRs!FLR&@6;tG4&wGU5y=f+G=}0H;)I+*mhGn@&v&arbB7k4gdzUmmmEBzclUm z5R_|`6j_1JX%-sl7U)DNH9d&UCElXG>Ti7*%kPko(Qo7${|TO4;%csa>$lyT+HHtM^=Wp-4RyEw zD*Kt-Ou~gX7;EO zvJE+esP)uW$9+(6qqBKqR~Vp(EZA|1AEy~ZlYcdYLgj1q!T_3ZqazyrE+2E2F*0Ib zwCET3_#6e6gfYk^dIOuRp1OYEThRfB84tzSSJhBV2HIj+ zy@N%w!q)Xea?G#(=pzHcz+VT;0B8KK-)KT1!exK>7N_lX4x&Yio-N4bg1$OHINo$h zd$HhSr8Zg(xI!ms+kHAb~x(9(&^4DiN&CtBjjPrYx1mJ6Ghvf(CU z|H1}|hSX0bT`Wt+K--y!RHPaBgX%5q6^{(=D;$m@!yR3`{s_dkuVYS{V&_wb}SBh-^|6BPNJE zU(oTxAz85C4`4QmMauTe;#x-$VXHD!LYj#}?(bny zDI>&WAcg>z9>Du@T7|5{u=aR0wB6Zs-yosb*c$M-Yuqx|+n^`@@i)wOh&4%?tTsR8 z&2fKgdwL`L(#LKJ&IDcF=#J1L=3b#$sA&3}`>QLLQGKigC!c)0mpvs^ds4>m7U zLJY(s_n7*f^zaCKAbsYR7a#YOs~iLYnRmykG+O{$vPMED2anVeZ8xkdYm%(zsJH9osU*a^E9PAbd3P%q8Nn zZVOiQzCu55#fgU5H|~TENfku)uPk6yyh7EeL2`ldVK*VyV0c9}P@yl86_K{WSZ}d+Nr*BpooC7!YbR_9HMweDq5zyR;)=)ao>%t83zVSqN3EB zt#CD5JOM>S!DqlcJb)ZmPNJwV9e_nxtRRQOuV3jzFyZ+S$>3K$=%#}%vBkOEvcs80 zkMdjR0#&S%i$cC4B4GPgh9mk5Vg`5Vi7n)a$+WCf;MNs&HiVZ8ivp7?^;SGJB@zx6 zzA@*(FmQQh2PO_J4%!0`lLt!AuJlGy+u@u@u4WY~U(`&3Je)94lW1DoZf7O|Dk32! zU9`c&%}3%s=7nTl&0$)6sh>-x9eph;do%_hf2#Kzs_)D11`qJAK{s(k1ftwsAMaDw zVYqhfn}uymWplpZf#KUnE6NEXw^o+l(v$?u4$LN~ zbSiF0-+fjx=+w-bgS7B>*=*BW>h#ZEcw@mO0liL}7RsOdPdzF>0{eeUAobDFCYnoc zYiA|D-!vH^(a2BSZ`A2zTwz zVW1|`ZMEqIh>1DBcfQUU^?-B0r~>=usj`D6oKR_m(WW$%iRy^Cd@cAa4y2o-#n^LL zO>V3txwnwJp6(s?ip3#ksi4<29Ap-V zoUx9?RDUq=+hxKHI-IG6C{qcyiU-zW)8a^TS;A~pQl}gYD}KDDBsmv%*xuz=_*=P| zvs3f!dVje0uVvid>kVNofoo z@ybOS{Tj$~3&$x98a)h5=n=?uS>iQ5V6EQ3L$0N>pml}?a*9xzkE>G$_QPkhrN&z? z`aZXdhQoDXL|P^eqe~R~xr3p9_YiGQ1p^n(w!2i83x9u)4u>|3oh3QxbIybvA8QCh zX>$@r0}*#ce5Sy?FZ)45;)E`j`S>`}#G=|rX(mj;g=>M?cXYY;Z)qR%4s90=_UX%! zoD8#+sC;G-GcqljXMf%MrX`11C-=V)XS*9WTs5!+onrR*rkd@ppr~i?cKR2f2AnrJ z=cpi)_*P$!mOgWuO_1G>rNquI(-9ntE3OBuO({F9-S)KgBHsZLo9>OLoJji;mJZ27& z-5ZFSo?aj>PW{!^njjmI0oRS?=#@V0CFD4Kk#zEd!Qh+yKW!j(wdl1}>cHylp> z$0DL8QGMGXKi37)Z#a+d=uTqYP4gmxXr5GCooJsCOg0`KAmkqi;B8=5)ucAJOjQYWB~mHi)@zJa|Cw&`}qY|z*{ zPGhUF)5f-K+qP}nX>8lJZ96%6-gA9_;l|9YHEY&vyo9F9bnLqPQWE99BB1@OyBhF`rso^YOv6vLlE~wXav(2LG=N>^vD*$q zQ={$6;k{rJ5kZ1DrYtH`EKQEc=JEJw?MR|i)jHm;oo)L3tZzy19)++Trcv|pz(-Ga zlB!(RNAvRhy1$mO-H82XkW=4Bkd_7L7LSfxk@=N|bH`lXdBIo0dKc3OXiBxFAe9oF zTbhAjEm@@J@g06Nt``QVHxV6d*bN|7b-tc{US?_0v9UJW%SvxuAZ|T8_fKoRU9i;| znk=M!Y<+8+A;D7L8OXD1j2`N$JXonEH=U<8f(bkW`jMZW1Rxr)-{#w}= z5N6o7b{tUC8Yop}sFnTguHty8PjC9fg?c)n&Y^I0JWjI+-mi0q@Lk^4wlFNRU!RY7 z9*WS~w^Yd=og73H<&0UEsFUCS6jH_8rTvJA_WUaUdi8D6UhY{pMJtS((-guaG@_Br z_8hkzb|$*Wk(In^z~(Dph;@VsXIyD;zjPgTpA;pGlt7p%I!G{<8U`EIZ?Ak!@%A6{r z`Dy$ytw`V6RmbBd!fh*2KzyMCCq`2FML9zLqtZ3s=fpRX>kSUbz$F~OIC= z%*OS4W~jRD8An?4ek6YDgIsA33g6vAER%E$17?W+rtSdFTQ&`ItNJ%m30%%>Kr-S5&T*$)#>j6SwHp96`bzb7t3hw`bu=C_r|kMh+7 zYWHU8ynhY5V!RJP!LkOW&`<2il4juEyrP?-=Hlz%vgEt98oxdEv}Jp=>whU;v72Z&SwAe_8)#d9-7dIiI4zdf|Fq5TbUPTe zpY)cmsp!(5;4jY1?sd3+QO>r?kx9kH8gVO?pY;0is1>;>^&AyR6fc#F%zqr)=B-F6 zz3zIIDZSy|huZ7Gqps2Zj+gz;g`{{&x(6l;)}k<~j|1+K)ql-usfF+lqItrD@Yj_P z642FkImk;pGrQ4Gjnk)WVS&#~V%t_F8O=GL9JY2lsM(m+{qbia`o zM@vO@B}r`D?SyD>dT0E)Xt9-u(R3P};4kmF#o3Ktt;z*<9j&%U(}(03`0b!4 zs8wBepVr*Am+o6FyT^Sk#c(GU+JuOClgbrm8nu`xg~eT;4Q#m1{I4}&voEN);VM4t z7cW^qmq)Osh@o1D`v>g`FU`qIJt#gRd#k~~o(Ni9@eH>!& zKiQJW^y4Z>f{}UMecQ%R-Xq|C-<@6-s%oK5nV(oy*Q~wAn!wqlzX+D=@eqPjBQZsQ z*?|*L6cm>oSkQ(8L2vv%5LIeiNYieg4VCe`wtC1rcEK$OSTr|~>dhuP=@Ep0PIvmz zpSz3d-tY5=i5nZPLki{c1D{pytQI9g6MpHG%n5pDM)F&2_p{>3B8uiKc}pwxhtqv^ zUI5!yPInq6*i_3t;%`%mkz8w!Gj;DrUqpoHC`-I8*Zz~@fkmyme1fB*^r1|5tE({X z>eic}Nj+p(mF^k}{tyL1KR52o3Bv456D4Pf2%>?5iS(-19qFmB`Qo_U1QC#05j2_p zR=XX>Xn3;XS`TFDA?SyZ+!^n;)1}4BF8VBN$D+M&jat%t=n(NiP7@An3{-yzDC#X{ zy4Ut+aK+E|`TcE#n|h9sVlyWl{_KX>Ux_494d^;H@aJNK`cj3OQ2Y4`Ui+)r#Vg!* zerzql1TnRQX^+Fi(G}f+pq%lkG@x*Fkf|7Wd+SXS*EDB}E!+l6j2!!#X5;h(&-3-^BE?Lc{E?82=GZp%ETIprx zt9@~S=FytoN5oHfPJ=DgZ^PM7RlN7?j>Q(=LT1C-P<6j6yq^opbCVI4xiVrH$?7EdId@p~p86~0_hb5SRFyk;Zb z{s_4~IOxFMQ{w2)fSazHCvX4e*Uhh|uj^6$8R#81PA&nsR}i#vHPbXXv_%afe4s6m^GkOdr#bqwXSd6g(AkzPv)QdmW3DoGf|;$O_%F+ z_SYL2op6!RQT9icP3teu`9ocMl52T`r-fAP@uBQ?P@KqY1TBtz)71pbvdqlfn{!(j zLAJ)6LlY&8JP?s^1GHT-T-(bi}hvhb|jn%sMRdm;JqTHcevJoh_Q57q43 z1^P%za37wgj?lDvCt}8$k4WIhXMCf;c)<>s@zAEe2$I?HL79#3*hyf@u_-mXiMOIu!RdAADs zdFTOgm)d;XcvMT(e~%cwfF%KfY@11BUpQN@QnoAIs_tD66V@Jc5Jv|FF1zmDU$sRI zn+`4fKa1Lzyq@W5^f+++eLc3LSCS-&GkTXl!kwXdi}ean+A z6z;>(Rd8@rM{+)HfI4f{@)Kso7DfkH%{jN-Rvu|MqQ~7C!aWulscepGGA@{7cI#7* z_U90(ah5||c3$lcvbt#$E7;TnV^;OU;PN4!f4Vbn`=OXZx)l$~T1c)KM}t`<0ArA@ zf?H>Xr~LBy;KIF<1G;5g#no)913`o{8oI|IQBRo6Uqgh21gCcpFi1WqUy7Pi5y@|J z*AgTIofafH4<9q3JXp*%1Q9={O0iIua0?=U3FfymlRI()xiY#)gUI!)V_a4*8#==N zQA>-1Un(W3KGk+vq)lz#ieBLE<#qA^m1*~_Y0Cu4y7WC&ui6YOn1^XL7=6UpORIcE z-Sx85cYr}_Od5$6lj|eXi_&uAO6n-Gp*!c)L+ulVH0SGU^rCDz`?fvZ3a2Y=er;TB zy?gUyigUQ5Vqhp;2B*kWx~@p@p}$aLM~)hd&>0xNU4d4b7t>kOcsYHXvficqha9v? z3SVF{{wbQVrcXdFd+h5Iblk^XA)L2>PB7NFB3XCtL=8bUc^nI7i)ho8vA{=ezAXD+ zeQgnlX!F2gTjdTfLdw?5!i5X{b%#uPqGf+wIMUGt!6MitjLP-%Kqk(2*`A^Ec>G&a z2_^%o0X3>MzOmR>WO+QD6@?!F>qxJ;`MNLvH~Bp957vpq;GHHT5DFolK17GOnM*T` z78Uk-b{ukCtSuLIR|fh;07QOHUVqQMS|iYR^YlF^I$do?~;c1>fEo#^}Z-A@Au9( z@5edsE|V9=g8HoIl0YDVuM2t=02EBE?NJ7L8eSKAuz$gAXtt2xO&82Zo0v@r5z+de(Bb{HG=Q8??$cZR|-ol&MiR9M;yj%^FyYx zw^^FE!*EKGYKPsH8oIaCZAAR-_-taoP7YCq*4pbmRA38`78!c$u$2cfK|fr;9RLzx zwb8n8cEKl&kDXVH6aT{moPE2bx6umqx3gY#hs?dnLoeI=g!Z)7Uu;=H?x$y7Pb#u8 z7rix|mV`#gHM08?Jb!g()LZVIV*^o?6{MPBD2>j&ox@zjbi=}LH?3_t&3`Tzgfc7x zL=phH@VJ4x1I@AIRq?ADxGi6Hzdawr0@=-(l&C*~R*ifu3E;@x%xk+TUe%7|+Suo2 zXY+@KeE%T;f`A-waU%vsgJyil&Pe$z|ACI<5k$qYkhI7VwX}>T?|pAx&kf0@ul=B4 ztJZSooUhW%wXWOjWMGtxby3~Q$mqYL*I#?Bt^~1|c@l)qM?pVouS(D==ktL@3CTAa zO+UXK4MS8@QgbLPKeOU=J6;>C*%1qWN*G<2X3kBBC^tj^uRXKj z$vv&{7vR__TZ`(Qb@W;z5}a4Dsvpmgi9C0|Wi>CSOvS7`hfLY7 zhpDNXTFsD>aDu;wM9Qo7Xf>dDf0)aT=gT(jrbV?I-4+H7E{8U$2o41H891bH7SNkP8 z+dlc;Im6}rV(Ig~m`&p=`N|EWl~X2D1Q$4O?qxf-+P64&tJp9y;*ix`4`7p=GgK+o zDC9JhZPHRk-X{*5QS|Av(O3jt965{uRZ-^skfXxk?2bGRt*2@OrgM5Qx*p$6_ncV; z*J7iSs5!6hzCwI^ZB)**TE=6}m?4s8?sp2UzfK3fUON7M?n`eidCxOY?1PVHAqu__ zM~|38>wzcUUZ)5mh;`djPp`uc{XzGeVwL>T3V4k+2g%-h$ZQaR+tyQt`=*Ez|3?v# zXhMslj?(|~L1H=YcW^EHxgaQ^UCtgifacYIpVgX^8h$MIl=-c;mSRnTzpyQN|)kfesCreD#P`3Qx!2Y6BDoPs8f5(<&E3her78#AMBU^AHzDeqy(@j5QV&u zdI)MU%jZ?+*}&af&qsd3uO4Zybpn<4lRE*9z&TXBA#PPefB$U zH5n?C`XazwwZI=@FtBv#_?C-FZuKz4yYuZuckg$TMt5H$@2|t2RTLO$D8}l^`TmeAg{Y;H{%KH6&e#Q`hU zab1D(KurqVWo)pxh@6U8uZF(tmmN_j}fpQ>gGfmB4R?Hpgtv{)_({{x9o4Yvo z1$z>m_03|}vkL3QG`Kr2%9|l%Q+F}BllL4;G78sx_~SW(e8#}od45bhIq-Hq-QU3b zK4Y2uwSTXiZM)22Oqk6tsOA2to`t{unOd#!jtE7dW4FSM$nG*lvxcbd%`WyXft{7C z{SP{F(A#70EZzIV#>n0D^$1|06nz4X4zDR`D3T zLWDs6O<7$6F;gDjtMbK$JzL+E|swV_E&wX1@1< z9~u>(*0_zTj*mdD_f?0K>mAM%*OA4lJqJmU@%CGZC^STEDeq^0;Z|3>bM(O?6Y7vI zZXu)T<(XE;WzIryCB)iJZl)*Lhj0JccjYmvJ}HVSd{Cx7-0S=?8HM(C5$@|1Fkxo8 zOy9P$j!d3S&&iX7tx_^U2FhQ%UG#x6#oNWn%*D=G`=~h7dE0(BJ(tf~a9Kfo*hld3 z4b3%8xp@)Npzz4#EN_eK#Gy)rns|A05700lK>`jpIp3 zL$W-Xx>4&AWhdJacUtbpwbc@+{Lls=n%K+Mi6>NfF1L73gJlo>JQv8TNQ$=3NapN01=k;G$5(>V6Z^N5gG@0II)M!pem$aYr1H~9FkGR z-&8#3@&TfEVrm2dwf<&c6VO>u#6UinB!CYHm?2QLUmY?BQh**^##arA4>aci843u5 z42XwO_Y{Ix<{SMJ0x@opqL`RK$Y&yJ?IT<-bzWy1;iqo|ktgOMScHsC6_Pg1E@gU- zHH0ZgMi>Ph&q4vJEYMf#kR*@q8pSsNJ77a26D{)tX>D3`Lf9(@V_QW)F%mONjxr*? zZdNA{j2&UQ!3GfQl)U>=%ss_6H@Vo^SjHe9H)aIDp!eIr-M2i%yFmMfMdvzP}=AD>DF9p69acIJN+*;%7V%z_>~7Oe{AmAZDZhc{ zNBI_cMGlQ3DMIStBIS31`CMTpBgcmVAYt_6h)R&y_j;cA_BrTj2V~%I;z<=T&IP(* zgZUsKef!YQyBT!SmTE=p!A7$LgF+S1VBs<~L8eNjW{r5}{_8izD47b%g@6i`Iuyr4 z*plcHN5HpLovFmo2Vla2;y~oc1o?r}su30I&8s1gIyxwuv(TNz<|vZ?l=!0qsg8+B zW^B}>2k==CTV^EpgBCDZWnbC=(O0XlYuuMdu_xw-7LcKjgHch%`~!}ui;PJM1CMC~ zA+ju4=K`)1fS~916W3BWZ(?$XX=~6-gLORWZ$E>Oy6zr;PXa)E;rm1E=Nbi&2)w37 z*Qw;e2vUf!td}7pMFkp`URAnV2Lk2yG^Q;dIwVg($TsXRDwPljc=~M$zB`leXzGMa zBUz|aAA=!s^y%%zoAS@}KQt^}9{bxUT%NH)C6d~nM|oaMxWb2;^iz+hn=$T03Nxpm z1<9v?QPBc?*j0_n)hh`LS%o6jIIjR4suW2Dni`;ja%QKuCB-$X^LU%d?Ng~Zdg6f1 zG{mkBW9D=zrBqlkf+VJ<9_O=@6#bhFiIB{RLUm{I9HH_>-Gq$>sHX^mgDF_)885(~ zT$Boc`tE{*%lPKQ%b^RD{2{3Tis=cE*(Fdj<-n4~gp3K;fQtLHn%+ks>G0Ynybha!n3M4!ISlhkmtZ62`_ zZu)&vXh?q&0j01nV+##FD#0n%Q%w^9aOt?ltlFr^fO$sVMlus6eZmT+314U+lCMMK zw@XY8s2*7$A(7m{V)&-S9S~9h72Is#1{z?LwWnt3il3Q!DxZ<^gW_dQoKY5SRgDNG z)&yG!G)^<|r@nGOLoP9-MTNjfs;qYj@LyNwPSJ(5rQ1f}l>lm=;cN;Qxs!%&a$=|$ zw1f;r2jBS2-%4%kyw1i4OmM|I5>OWKA=qC;OR^?^kf3s#C1klC_8-S++s-oyUYUmM zHJ3#5u{cn1ZZ)r>xzkv?L{~xAjl5>%c2z%!V{{2i1af+r#^eB%3zQ)c6}@qo+3{|( z@Gh>1bACCh*=afg;e+KgsT=I^Fvq?Z^kpuY9Z6Z_pjdEn50i~>?{=eTUgC_)!FZe0(5G)0agGY>W z#yRa(=pZCLWl2uUVF~$iLr}uGe=UQK_9H{X%hM&>dv@?wYWS2|Vcib^5U@eyeu?Ip zY-XmiPJK@_SRN8qvXq=g+&D=U5?UpON(lJ@r7aaNZZPtlBSl&Iyy#_bau-(B>_ zbN(tjnZZAczV3t8%In$|x;rdcf~N_;KWF$CRbvPwH?_=0-Zua{IWOYyTaP<1fcPWi z!^ji|EZ^{q4xOGnw_jq@o4nyp7PVW7ZHnUH@Py5TS;=CFhU*j16JkinCE5=o*@7eE zvp_Y7NRJ|xP%`w$fNJfylyZ1jp;*7ro4~L?tMBLnV8SAc68o{&5*d}W_U6Gpaw7w( zhL0C5H}q=scQ>^+Y9IUercERUBQGJ~R=NF-*uHgeOxCHHtqD#8jmxSyzd&@DP^t!y zrUt9}Lm-G`d@vya`8>I`q`Vt5u^rBRmHdVG{nJiYt>s%^-d~VF6L_bD4_+B*^|-C? z3ZR97|29H%(m^RpwZu^?|UXV zN6g}GAj$&@S0I^K>>?#{66u)SPnvrz}x_sZ? zxOQ2^)2TXk5PaFb*LvmgMMdz|Kh51GFdrc8zt)6XbJa;9O`NVeySAoI0f^#h4j?*;R_ySHcV=wh?{}bESZA{(3E|_ORUF8B#`XgK8gF)c$%DOx6r75{4hL z<^OkZQYis$9!tk_Kb`OI|66yUOihIp+3%E#8loRSOoS#g9#SX^X8*ACWgDzSSuBtDQD*hs%am8W zA+_mR0+Z4RU4yK)tkK9r7ouD^4nscyHdVs7sHytyxL$g2L^O~2quY_q`)uoLFR0q{ z&F`rTD!Aeo`d#!^<4%hFlmICs$^x1un2FFy9)WLYr~z4SQkM0|WAL7{4x%*nZ)^Y( zBvnNBilGEhBC+aYC$aChv&S|H!3eGku;>gd`D45FtV-Q}5Qd^EU-f)lj>T6%LEt(zI?9pXl z)%#h|5ZY;b;N)ARvf9$o!`pHnG|rc!qiu26!gy_DxQ5t8d) zV{K8Y-VRR;XM}{<6NcpYXP%e4IM&TKct@EKF{F)y8lV0INV42!8rQ2#v6B!BLD`aD zwL>$9Ph$V^YfMfsLwBWwC$tb&2dj*Ncrj;1?;3>ZmnS0e74Zxm-0s!t?sH9@9Ph_P zh1FcUDNNG5yZv*1Zh-Eof|(7FTHq8iDLYeQnlmqC(Gg5^rahOvvX6Z>hC@*n2K^q* zPoOx5Rma;4xVM$eeSO3(oefvpYd7tyN^$8W1NyBo%5GFUyf zJ$EX%-%o0k!UUPB^g00Tzn37=#{*rg+}~Dnuk-)v??CHFfl#cE0foxDL8;@ z3ktcvj9&aNLy01!!DZ(c+{4G!jk?3^hh(&Hmzs+yXn5p8j zhpx}tWF*!+cB}3pIn~i3`EQxkZfAj6F_MZazMH2uruop&&>p=;jRO{ZH#`J<*Hb)( zDQ1sz8Y^SwfC`PCV~?ah?hqP)|6I6hpj2UL*X_p|& zo+@N~kV8A~E#N~Szd&2|B`eH}>!uo%s6)B=0uWqVqrtM`^wHG+QimURnJ`B2pJUMZ zRRU4FEK%ztg3>_${-B>F&R-n!xV{;6si0hd+}3Fu52WQsRh2ZeCU-2$#$w>f1`>gy z{M-wJ0TkFA%BP;QPUU@92U>Al zqS^}>d)^xp4<&I0P#^$yQ%Nu!ei{ve6$?z!QOK4mHzeZ}iwXd4yg~%Cy`^6z+O;)H zo-yNFO3u{IFU5rwJz^IIeea#S!8UXHX-bqJtaK+T>rpj699vAjn7_dXvvgOTcSY7( ztNB?;XXITc4{y|Gh?OiS9tF0F)S<2Y2u$5VI)_5b@wyY1*f-kvcLcLzpXyus2@0fv z-63HywsNJYeK~R<{5?VuGDs>yi=*hVp@n zSiqi*X9+&_A9ThGO(|>Qc(h)8?C|ZDt!+cD?J;(tA?q@2RI!V=?wA54@?xT|O!&4P)YAlx!P9 z6yuIE%flo(wdCJq)-wd`Td(s+dn1h0u7mRBLiFb$zCL=ApylzqdP(|VJ}L%_`rO8q zY-fl{6ws7u%V^KSJT~u(i3Mu;8;;R8^-~LQ9!`&V74Sor^I%47W+GOVFy#3`;x=zb zomLu8A&6b(oMG%26i7f=fDikxd0jChss8p6D|4pgM@7rKI3Yzv2imS1@}qZ>kUx47 zRv(2xerMzt_%vDjAtVpp?}4v0*$%9)OVL{#f(I+_vJ%RXa3wuYbb%>K|A+Hx1wM$p zGFaIXqWH-pdkaT91NcZIuajNJZ|T~7vp(eOTidNLCPr+bO1#JzHT8CiZm6%4i2SZR zndc>k$>oEorirZQ&ECfq{aq#9C%&NFw2;6^v0Y2X8Rx+)GgtPmO1dO-ezvyVOHlSE zCSs&vN*Uh=k{_lqf9a@uV3PXQ%q$(>iwK==WrEf11Co~T`lDNoa z4mMv_XFG?-ww|Cp-F6$}&sQZF^%MMNTX?9RK+a3Tt_aN-h2=hQFk@M#kD<`oPr5 z?CI}K$*8^pw zf})zp-^|eSwBN9SY9`RgXd#^FR`5v}nL_C$*0b@!G%7ii=w|WO84V!gFSYPvlqA5v zQ7R}wR+!BUZm=6rl?*TUKbACkJ;pL;k}^wz*xv7px#3>zGx4r*ONc@xLPPLGOkXGk0t%YK#bWW{bxc#|1s z6x*EKZ6G-`b;DceI-3*^+_YJIc zp2vpCKc^2#qx&f%*_rt>U?5=A?})M?=yjlslrw+)Ii<0mRpUpk&vt%A-w~1h< zSTKu9^MiBwhXSZ915FAd{LSLe zV2vg?9s9L7g^kFJg^uR^ycPu-6lZ$XLTLbu6->W40niA5`nEw80hA=R$pgEKr*$o_C{yu8S5z1eZOtfuMm#tk%ptnu<;M24ir%hp~g3F zwtmaiR+AZuFP#N5ET9=e05gidNOUxSfNO2#y^f*9OEPoL0KkNdpxj-=R3~7zcTfzB z(cqLrg9s%g>&GkyHXdIn%rUBP9ienrOko1l`7r}0+|CK(od^Tm0w&gY?|QnN{zm#< zZo-Z%*3O_z@_a0_b>WwzFZKmp6{}(bR0kFCvX~?sCtf{EqnyZQxd#7l5Mb_5I2Tx^ zK&61#v~|blT;*?Xx}%g==DUJcBP4$30;;3_K$5_0G*rkgf@I~f9XoO&r){lGmPxcF zSeoD{!J4ezvc%I7o?PKRFhkz{8UYsNp17h9ar>*rM_3`B!eLp4W%;H&7`BDz0d>I# zH|v3BY691FP-B7#UFXLhIjdC$F2U91X;A@z=WI0pQ6sX2=dyrc9jqgb1U$SXSNqPA z=NNil&CUCkhEM3SNN(uEO#0RF1bXaWIAT5Md0E4SmcWYmEfiCS9C1T^rR8G!m{OI?+S^w}%2XxCt(bFl;&n$zq@O0CoCp)PtAjp0SKKcr z;K3xTyG)Vhi8@mz6?g~;90L;hVAR@GyYTtJqNfI$Sf>ZN_iPmM05Tg^qR6Z1^MIc!bP7b1Wp){BPakOFl z79uL!w%Zcq8!qMNthL|k0JCq?_YMf&&)Bq{R_&EC?aPdFY!U$~8x2Wj6Pi=Z2c;0d z`gvTQHVSX=%bwc9VONZBiy*^JI{gv1tP%VDfC9N-RpB`xf=HohNg9F(IZ1>C&rft- zCz%+ZBeQ7av*91c1r*W}>mGeS0FZ9(;oRI7ET-_|2F5`l9Eo(O^gcDP;L^OUCt+6_ zb#@on6U80t;ctf&@P!&(yT7-n+BdY@w}&yDNSL-k0n|O1M1~3JYu3$YW#3KLnJzDg zEdMH6_D?UkspYE-`CK1cM9+&Am4AQ%YB5dHpJZ?<4TZNqcsnLbGM2n~(A*Q%bUXe` zS(+@UW|xopf4g7l>>xv3hd-qJf_HhFTdQil4f99fqCayBz2qjgeP z;DPbFgVBEIM28pc&A=brxTBc6jQF#v3Z!nO;v%~$sNVj`p3(h(q0}Bz&NDH7y7 z&Gexa#l(}|skf?zCmwsnLP{8{ls=05h&-1DOEf5}@Q__Dpcxxu$lZDqKOKT)`9;OW zH2eCcG&1T9SSwl{QZPF5bPPyLOpJn}8Ry-md*#!|=kl9E<{wJT*T*GZmRrF_?gn%HIM|NE74Z@CdXk>4ea} zAV2XiF;iA~$O3#<`HTv1Mh*xFa@XjnfE4Up&0g}RI?#VdqgknnnVG>62sFBCw*fEm zsT#1vGD!&mfcPRA^{n$n?=In?w10XefwR*-EFf{H8>m{cr9F%i&@OgaEP>b*GibRiPv@q(O5H5_p85QUnahT_20UrF1*sJh*$gLxq#e8p-}t z8oZ>5aUU%Rd-sBo#7rvT=Hbt}g}?R+Z?R5#m9&-PoA3Yh-Lj4$355RhH>PoX8qg~v zxoehGGE6_=v=OkQqfl5E1Dm5d?(znv?kAuS4%;cC!tDKIN&WMh2p=b<@?!$MN2(Z) zy{nyTn;hrcWap!Y9JlVUB;Su-;pDF$(ZsDN776U=SN#C}D4gV;OMx68Y9526e}fQ7 zWG`wE;UYE|OCddFAPm3JZPs~Yw z8hFEzKUom42d;s&X)IuqaejEy=T+F4kf!SvLIaFuQE8!zZZ6tFE|GwXz^npjBBf4$lAI4e zq+Dnj^-2bc&Mmb|Ea?Cd7ML`dB=Y@GJ4S7_?o4rQ$Pm1nVQL;_p`0QY=<=ikxA@O$ zP@-i`wggwdIS(XczH7Z(fg%!lRUl)4k8T|!mavi96Fzt&E3_auVTNcq-r!FX{Ls5r z8;*%AQ6d=_a55^uEn_-nJ9&J9LluFnaO4Fi`;f1v!|c1NY^2GIyb;JZnApT8KIuPp zSgmv_U-M zXyxL%s*Ggdi$oxs%b9bj=zs)4im+e5fUQ!A%fl;Dq3KUwI2E6UJBq%sqSI^(b0vK@ zRN!=t?aKt{b9PAk%0G|f>E#@9jLPxB$!g%1#n;2%#jDpbP9el1!HR*91=~ozf(&_- zBDxBx#^yGp$5sqWw%w_Wi4X!lxE9rLNB|`?M&OY+ zE-@(JLi20=9J*(`NM|B=#E}OkXZQ&0YwCfPdG>o!kC9q)cx~$`-7Xhg5>ch~A4+km zN-{&L+B0aB353BSEV^&^<0?5^5Ar-Hpf5-VGJ?@~qT;rX{lsyAkvo^QukE!7qc&PM z)(7JzMM%d|cV#;&l-H?2*2c1$6h(v(r+%79GK~#zopB&@6eTElT1s9)flVR!qm)6b z`&8JaiW3^p?kt`W#Ol*fk`Uo&fm#uSZ&)G)5~zrLpb;eNE(iQ46IF?=WTS<1uG8_1 z;PHAa#p(Pnfdi6Ml^C|Z1|AA9qENzeq$Bpzp3Q1#L+U3SjM$H105uR4#?FxnkzOTM zTxTLkAyt9Na6}@PNdF^nnN<1g+%xW?8BZ7)J~4a-wAzaVS|I{B9w7@j)rA)FG+Vlx zSwp1Xjw3nl_i<>ufQZi8GKZ^S&!0-=?*|&R+(cC5aF_!(%0DKhmZ|!z*v=p$rXdFg zjJn_0(KV;TT`Y2<(^3)z8%!jWlLf<>O3;?o6iH86$kOIgMs@3e1OPI_(S3CIVc6|D z9zZYP&6Qn*8?Qww!;D9bL$q2@q70 z#H6JLVXw!44|36`dFcJ44REpMjf5;m;8bv4A~wExLtTzp0s@cJT`DTZ#5Cu;<>}0~ zL%?PgGCWL?kMKSz5 za&05I8X6j8wOTLf_C#DKut_N}T|8hXd>AH`f}s!N(;=NpA5Y+(uXhVFrmyvdKvo$j zoCcSuVYQxUpLW5hMbx^(3dF+H&@OAhWsN?(!R)iC8y9lHy;1Go7`%<6sd;ul=#gh6IUdwQ>HB>i3Wdq466rp)+Xl&;BSB z%;dEqxH@+6YJHt63|4?_j)ETqN}uW}(^nm!p|=gSyT5-j-MW2*^s@fzI@P5pl>gwo}%xE$plQ6LQC@BW6|gMPN8ANVU5-u!!*Y_aA#w|J+W3`ft0&)|b}@ z-Qvg4)n)5n?1#j+YOOkhkdVfw+AZi&#>3YneR<0`zht0P?ufSIy%5GBK9TI2Drjq;`u9D=u_kP zEVqOA%_g#%8APvZe`}AAQ!qrH^DCa`CUM*?x3fx7hjc`%70_>&x^%f6S?m0xCR=cL zDubzqAvk7gB7SRZ4r#eL>bOgmhQRH#E6=O??2q#_mf&^Irwtr7tLB{`99oZov$xGj z*VaY+$zj*!PUYc$=`@|czxPS`oH`4q7@gZ0G~VsrV?{sc+*Zb>)A+GPW%t{IpJY`s&~~yb%SY08 zY_?bojz)g5%Dat5^sYCL2Lvdh?t?gRR#Jp*-aI-B>E0+-yM83SU21zDU1fcSDLZsM z(IwhJAbHKt%DDU3RV}sr7JDtMbndB;t%w`r2n!+9cw8JCS;fe-m&%{fc&k^Z^|&is zX4q?Qa$j0V3IV3Zlt6G0<#8$hj^3t)Hm__z?}p-i?kW5mJmo$E=gjroE$H3+hDDzF zngvHI&<3K`OBR058KOxqz?+^f%$bbVK>tAn;xg8@shfXYVgGq_ygxaS&ervuVUG-c z{92LgZ-hs>t;m502?xeKA$w~7+t{7=G?LA}hc~PdCEP_q@B3fZUg=AZso94eVQ9UE`UWs3VK+jVf=-KpIDb4q9Gz4e-^bV-;n zpJMds=ZcrVjz-+N-v1~=6Xt8fw~bT5DZfS}mX4x@1)9S8u;eM`mF0w^$x-OSFE1P%gnr3NrBi(g@&mbQ>Ex)B8}pv2k8u>1R&kx z)~>_J@tmnnwZA!cxt|?acX-bYhHu`t>PvINJh(CPC{ZTjOo%L7BqYa78Iz$rcfuep$*kOT|&^Y9~JX{JJKf-V0ya@xo|F6l-*1qFMjL3yr zS@F%$2wvKEeU-`^<)34%0)V&V9#?d3GvSwQ-g5&O+U+-&TOE71V;1#yaqcj=|EC4m zWYf4EzNIKjD=LNY+tBZ;XuE=}d!px~bAK7L{u*eN_IyXF?mDjidad4acxM=Dks=pE zDUpB3!!Oe^@JcY)Tux0jU`3V@&onDb2+f~4|IrrW`7<2-n&|OYjjp0@j2XjS4WQc%Sr>6g0 zp;t^%;u!{iSC@&WCHy(~FK5{e6VAzB$V+i6_UErjX8zR-n{Q#3l!wY$yI@V1G&n!@ zh$FGZb#Bu{ww(;U+JiS_CnQ3S79bpZlKp^ z%tfIw4OC#H8fMI5YrWU&4)*1xs}bfAcC%TJ{y&&6pfV%xTD+qRR5ZQIGjwlT3e zv8{=Tp}%KEy!G48Mn!HM%J(;0vNg zSEL3XU8?ZiaiisQg0_CE$y_#>Oc+^A84Y3}9-mj>z7UE3p;A$OS)?$rn$jly%~Z^{ z%-w6#zVG=uE&s23ipdDaMqFWGp+vKC{c67Vv8v!>W3||Fpj9g=y?J_9RaMz3P$9i$ z2E0ei&tH_Pj6*0Rr^_M7$HnpAl%Rgzr~=di3wbPn?XYc2dxSGg+9GU3j}6hS2KNzdQ4*97M~J(&hOD#V;TgNGdfyZTUJlRe zy6SXl1x}?EU&{kt%U2DbI}rk&hm713Swce4ywJ-)<;rx`5o2i6Yo8w&{ax109bXnO z1w78I%X7UaD)YbIPZR||?k%c+y#}f?QN-|{p!an+W1uQ`sAc`kdEaASRZvu9#3tBj zu|5#T)JKDIzfcT_&I@N^oYT!Mj4j+)snxB2`RoH;#fcM#3z$Pv3fx3#_THvxUoX5Y zadUDn6X@uKfhV(7{}4`=N)9z{x8H27uJ3)#^SQ*v!HF)Sia$6#JzcKWwDjLlsbsqf z-65qLb()%5<8Cw_>Jj{W!c8G#UKw@56HoG_MuQ#2VdP$($gzNYFzL^O3YU8%2h3{n-@x^kmi)`k}?e)QLon6{>U~o zbU!`PH2jEbo)vg22zd+`my&m;_y!Cw@QVMFf+G%sT=PK|B2q;&l`bg%-+0PCFPFQgC$5p@poI#@d!Ikev{@3a^ zLC+C%O<&XLth8#)W|V3EYz7y{WBE+ABu_?_NE%E--y7K(-lxI{g6`9@Y=f^?&h8I; zN5eJ;{YI@zSDhng6tLCH4d=oDC(0rwcu5HfhQ9aPNyfgd2IC{kxyr7k#hO zge$vVYbcU&adGteo%f=IpVvTTq)C(R!Po8grA~|S-aTYexE&|>ajK|ElXm6*)HOyJ z;EN_sa+{Rkd$?Th|FE>Q6pc=+_G>-|8xJdy&En{D%3OM6L#U8**Vm;|4Y=*Nm%VS7 zrV0Ejyt^0f#U`MmbUAD%;%Z4%H$-5Z4aIz8Xz0s^?;fp&oO=&P3sp$|5Z;jk$| z4B;IKL2)8CX*;7DhWPyR^Gv<(OEqTiUNCFEH$`ABFW`<{)4V{|ah!Fos_(H4gh!XbD9Tn;!m(3o%6ye-HK6q)R5F5H=65GFnOnLy zYg@#_swz!}*RW;u8uzOA9GUcYo~%7zslD3?6DDH_G9&2+Q@5>YgnzgefFC52k2MCV ze%$@~g{gOaEX&aAaZrZq@b1LxfDvO(a;so(C8SoR;h_*)tO6SKw1dUU%9?U2#I8GS z(M(MObn@0&B&&#v|B@fgTRvvcrhQ?v|YMB!Fe3s%(hm0bXN~J)n)NSWUdg)|5&p$y~PkB>WN>yg)axx$Uz~8oY9mo4Ii~>igs!c}M zygN6Drc;jociF&&@JPXuGe+Y^FQO~3ldr3QJmC+N2jCptwo5Nd!GO2Fe|vB_$r#Xs zVeV?xKR5E4v@uA+Rnn!N2>V_VfNnLOv-f7u9!b!5+f9%T9WG5~cFd?li=o`0(bfg= zaYk`Qj<=k~({pTS|AW@xqe`Z<<)(Lf%A!mFoJg_btgrkQSHEe@z>oejhPaFW)7J3I z^MCy;bEMaT)YjKqVF0 zj{+rNN3XAC@x14Xp#UrDEQ?sF^Jz(nfFlTxjQr4N*xoZLSSM+o(y9JjFitQ?3rhp`cr!=CDNTD$hm zQ|~0Y9J?C*_7z(rB6v|SoawS!M98Bi=Ab8r=pyePT=Ny6<@*XR+FK=a9T ze82nor)>{IyfJvWQ;3KE$OL-0Jg);LY$=$~9t#QX5t~qPlZ;B{Ur2?WK)+&fHs41vWYL%%;H?*MrTK6EbExlRCog4{$CTP zHV{e_HP98gKvavDx#-Hu@%HoV+;r@Q3-@Y`O0d6Vxl6KE>pvljN>0~)A_RXtZO-zZ z;HcWcjbg~6pD*V5rj}6DClsyUpZ?1S!;MWy(1Zg`0&TEcXDN3uGdo<#--5NOUWBP7 zpE%31*xl@UXX*q>x$uYx)^A;rW|^~nugmp6xfM_;7w*zgse`1{;dKKCR>DV)@9&@L zI?6PvbA2bM6+H)0>t1%A1YVO8LOGZU)WpRuf~U!&)_*j!yBx%EA|fIXxE(f@1BRn+ zKsa2MtZPY+V{YbW5EkfTwR}RDS*{8JENk#^aRZ(UYc~n}t(+}xdjM<;sI6WnfEvA| zVcrg7=nRZff4Db(q;>@e%75DePqs&O3AP5Osx|AOkj2HXy4k~jDsY{WL9Lldl!KzA zs-qc$aDd?59|v?X-)sZRJm`4UT2l$TkN;4`@~}=%%oznJ2Cp|2>BA2f?h6^&w{1q{ zp1blE^NtnEX!nqEz(JUw3~d;d38Z9B8e_Th;D;eab4{VQ({k zmNNS7#R4A9!%30^PKPL#d@r$_de7nRUCK1-;#+kmiF5^%74S@VGnKht@3V%2kL{E> zFH5XQyzdQw%f0;tP{4{6F-1p7>8jzr2A;cpMi^0vgRaBD3Z^@5)$pj+*>Ee5S#Za>7#PG+KRZf+dfx{U9_Ympr3 zF=OuN{}{CBgN7w^0nhm7&z}t|7%?Lf#mxNtK#MYjB)E0zYKMGCZQ9&uV*=8FnG0Ru zK-uxpx;Z28_Kzz*-**(7*W+@l-CFgGKTxB~{87zVZdfeW{Uo0ElIvy|^`OMXXs@1A& zs~${<|J&4R3H*W}@aoVT{@Q&;717iN;qplYi7eXx^Cy|@K{bU{tW2o^q#sHFcX6>I zyi!rx4<^vLe3L#^#t1xPL*yqBJ}y43_=pHIiN*s#gA<;lFtRwLT4L*d!EjHvUU-vC zUGm?Br4F=8o{aT&6)N<}BauqIdjZIw@YQ5Jj{-j3m0V15m7NH(qC>I=!{{<2YI?Ai z6=GtU7-_PLRME&1QmQNz4Jd(d1|EB=GrrNrLr_zC)d=Y_N5J4vJn7WjFvaYPcS2_~ zK7mtrdT#B%!u?x~cF$>V9LNe@t3{tWZLYV9Ir^mbSTwMMHdcu~H8dzik=J^?$@~w| zve3X7fd_oEF-Ll=)GT?m5b~j0&nQt#tp=^Us;xX0u(#>*HHNX5EgCFv_v`b?(0QDE z*+#vmc#C$DMkKlI$D_3O?x=havn&)8<(P+o$Hgy4uk$0Z)b@Lz82sv@tux~AGK5Cf z+S3lZt&AZ7@!0V?nUXsgMpcL&9K4~-K^C8nmx)xhWC z+`wnxzrN)|qt>n^YuTi9Rh_2~1tffCvG)=Q{Eonmvag_kvmaoN_qO7~RSYQCN$q@H z_O3onW-SKS=TjbbSnczZV8D2yB*-5c|A7RvFf}z5MutZ~cu2+ji6G#-tiCDfq4zSL zAfc>Q1a_8!OoxqQ?Uwyx#zVbWNP5=$xY^<5Gl6r<=ju!p39Nq@MBw_fFE=ym^I1}0 z^zH&}JhZeazFuqs0t{tYKSgY4F8&hFv1J43TA!>>6gPU;iOb6=k=Cw2!q!=CuKj5r zaj~&MrMs4EF=XL-2e|1ms)uCL3EY-Y4LoOJey;!2*I((%(_w2{hK0Q1W6gJ6kgoSx zwB2ukBZ3~{iie{BVR1WaPw+b_U5=c%R(i;s^*w%J4B-A<0abLkC&dPfajnzLY>hXB z!xgZkEcuJ)W@(8niTgA3puXoZ3uy9s&h@VF~YI zRP()oE&@XMdIi`E8W_#e{NzW*h6@fgXH%%rbu)?!>ic(y@HIPTSFc*VjIw-X$NM~+ zu@i`}omzAZRLZ%TvmE>XHNT7VihgH{8TyZpK^x8|+d!!Nx#z zM~-nFlgGpMM-X^Z1;*iW5_laZfeaRajAZmakL;Pt#_j1hNo!+Ilo;(PwDp)AO{|SM zmB$F2jIH)QJ^&gZHhGyNk$mEx)$Ha0&B|KTny_gqzx0Bh_Q{V9Jv??Qqfr&Tiy|S6 z|86_%{MlyW;#!N$-nluR&TbRz{&+?;&@G6r)!Gg*HkD_NVrE`n>okw8OmqvT?2j>A zd;M+NkF1V`2zH*M+w0rM)?5`tCjYY{cQg=7Ws(WbTSHy{%w2(9Y?(5u=>}vCIp{V| z+@*2N;Gu!kq$2obP811PuK_K-rJDDSMpAsn*!Az0TsB=!ULFAfL72~CSAbBG(C_|w z4k++lH{y`E5C6zA_Miz#^!E^}FffS22Uj^`fO80!F9TB>LVTAg<}3dzp8$_pec)kT z|2G{vukZ8s_4R$2P>+&rq{PbG;C7j2U&Zq}9Kh830cw=O@U@TG_d3G3viL-4hjEGGb(>%1mK2DEdty^d909cK-cRVJcM+%TkP!;)5fVMyPk_`;b7TBa}hun{a zC~8U<2#&;6fRi53>Td5|5i61-tH^((W(#iiAloCU&|d_yF@1cvWY)Hmmz6^&Zf|7?wT^^kI&_g<8HYXiDB2$wKv|v!ebEodS+jYN$scBM!E0EDh zaG4qB+6jc(=IPR8l1izff3DxVaz<&D8uqAI4D~CBfmC=nb(<{Hn7<0{z6j2AMU6+f z^CBH_;Ka$4-a?NOejE5NT>%w2;P|vDj24x_74KsFdmCJ0LWz9ZLRyL8^QQM3p{LPg zre&TN5KaA_CukWwFV6Gc56dI(fSs?5xl+x`Ug`K`Hw>H0c*x0~+bS*Mu(2B~Y zqYdpZEx8&HwPL~g=lqYqVTqs*trX^6p-HSRQnyN5wP3}D$bUbF*?S#k2q0)*AG=@t z(M4?1g04qtB)9kFg82PLTuO9@mw*p804jg8MdMK-&}wu(>R{wKIp2NWPz-ooWP)G& z1Ag@d90UIIOfiUcc3iJV?xgfdan(PWuc?c!&L;Z@gkNi4pVwms&Yv-}-ZRahP~T0a zWxcsh9D}}fS1W2(EvHp$lrbQnIC%Xn1b`aB_XA78ujo~M&)4yCI5S`%>gXxNk6Njj zCKjtu&dSGTQftlLbrBz93W*Fc%F90LdBu}kR-x(kr^X5F)&%$J+qZ9UdN_DBu*7s| zLn6vTjW&`CVvAZmGP^C7{3+tR2K3{ZqyFVj1W8oP|oAN*JEvuGo$``GF=}dd`XPB&X@DlAlMdJC+30vme{D%Zy zj&Z*K)~c>D1Y|H>+YV^rB2pth6%o&|aH_^E)842cXSIw0FS5Wcb??z;UTM86S5Q6x zJK%0{J$w^vpQ$9o!J5*Ms1QlYXI>4)=m5|gun^6jGj(Gu?;9t)uMxxdFppD@zNdPS zqJn~&32m}q7O!7)hm5&vKAP3)SKB`rQP3w#fp)j&eEZ6wO}9M9ohGEnMA)qrh1ne4 zng~o(8bZM5J;T585pjvodo8BZV57(5)?%uwTCvb(@~}v9udCdysqmWAD92}-V8A4 zu&p`&JH3l}ulFZ>Cmq7vRtn3s;DuExishW7P)KIhEon9nSD%;B2?6g{oIAgajiis< zFuTwEuKYNi3I$jVXwXj{u21RzAvccNZM7SZJvMs$m(Bclk7^xkeOn$D?hFCBqkm8! z2zlc)KV2VK!KS@}d_usDZ2sE-QXefzWz5Lr{nVkY!CjU|?;X%8R;pD|>*0+#ZBd7f zbX2kqwAybLQOPWB>(zUq64cYo)f`s|?$6j=JU_RyTR3N^mPcHN(8gq|Bj8{hJpfee zKj7zW=dG7?1+NTZq4$>KHj6oWLY{M5uQGKV>#F-nrB)*%94dkD_4mc52VeU`(8r^0 zJfz+nJQF7AiK|7~O+x-BU`h4WAyLiD!M#$&`FYmu^7&f-wIN(gwb4WUkPj#2o^X5r zd8%x9BQo-qVEaFAWf!nlL&;zXR*fd@VzZ6Vea#qvk=|AVK320Gd%eCso&rubcW#m~ z^G^17ci^PwXz$5Lz`+6Oe+!m`H6{OJDy9DsoZ-uUTc6X3zK8v8>xF9m=Tgk9Un4nG zF^gVEc6H*ojEAsTsE+-0`+juzd6p8%F604~Q$?Z+I>L zZreWO%NH9*feMc*Tp2AcEiEI;6D^R%x&*S~(KOiZA|FV5bxt@-HJ_^1|N2F*c85B=sGd^39`;HnQPFOnj zz5t_~bKM9$=iiiqFXQ#TN3@iAo+epkq(h`W-8iNcqmqgO4~Mu8&wIczC^h_g)wD$v zyay+IzLfm>G*#p~4edT)y=c`Pr`VMMMgbd(v%cQ78gR8J!KyG(yHB4__M4B_w;$;V z@|SpeEL@oLf3F~ND1h+sA`{@YA2k*ugXg2}SkrO8AWKOxmcoXz?Q%{s06d@1yr*uD zyW?rnNy+^d-frLLh9N{?f;FSET+7=^(6IZtrjOA1_3`rlbhk>1mD=^S`2O^WdNL?* zRTEt8_`FuiJSwtw^uR551$JAO_7R~~vL_C<18%_^|K;T%`y~F?V7SlQfCm|+8-$4E z_hG5p0FQpY&MzBv$|%hmork)o?h|>;4}<}D1hrqJ+ib@=#PX&HLLy4vQ3C&ChvuV{1HbRmtJCasE%C}9M8f@0fG^(x ze~q;iI4DU`M1Y;ILzREA3TrZmZ?-kR9LZa6JX%uC_k1p_hN@ooy2C{E5^Cww3+qp% zmWDC%4K^XtQuBu;({@^3S!V2xt(zlB}U9Y`CWrc^EN7j1ZJWk zLNj^~W%Ja@iv5xqd5X&t1D)ovEL8Koc{$0Tku!^ISF3@o*p}aM-OJ{&p5)bpj*g<> zwr>s;Z_>>g9=UO%BnTMAE;lzzEvr<;3}5M=akWTiz;~w!*McURJq#6#bn&>v%ZxJ7 z5SB1YIQ&J#8$HKx|3bOL(B++MS?Rsk_SBd6riy%fCHKhMRgLz$t!Iti?u&8)@rfJo z1@-i&-`+7w3i`@!m^7d~IQVqM3bP0>M{j~pQ^R8&;-w5?CORG%C`2PUaMcIhUwj_#TsYWg>6a4?53t+x3LzB6PA*R1fMFS?aH*rJ)VhV;_ znQXR=izW9%DO6*aHHFgYT9`NzWf4!7WX0^qxKd4{kUIZQwdGKZIkjA*#0D^yMf*(W zc`*$bFAS|6?;z0nKo(z#&S)h_2(4qGZ|Uh-)_y+q)1k2a&JywoE|WsDp##0XhM#e19S=mHBL}q> zVj2u$2le3!E3?8*mA5jv$m;*uPbSrLfkI>?v-Pyey*L--tD7<& z^jJm>*L;$(eg2sAH0`@JcYP2S=wZ3LeOaRK2tKe)>VTmk6Y#C=;y2a2rfsr#0E`}8bRvg!S z)nCFI>OD<^lxUE`LUuwtvujx&ne?g27s|d9>D9}(zZb}rVAh~0f?33V-Zt6_SnMq4 zqlA}Mn5`30F@2L0;{i4=@&`=f{Z_9scaK%WdGNNbXf4;N)ArAL>Zoe^MM5A(Vj$Io zG&+=Rcf$U)rM;hG!AXdYk~UajHAJF^lL`lcZY4o)0AWOu?WlKL25b@qoShlcCn3=$ zuL)9}khMU+2br193Z&T4cspj;{7QD`OD@Ti(+rpNj1PPYPyb|Gt^VSvZ)p1N`}puP zKpoz!%}0X_N{Bo^cns;mV(kRRb5&@`;b~PQFWt*yk_7bY$)xaEZMOQS^=07l0B*#d1wY?x|Q=szIF zDkeyeM2>1ii90x%mR}1FPr9QnBtJ79rsd#3Sr@4oSz)bc_{E7$#$hAKCgatiqEN!U zD?&qCwk43Q&g6}-dpHFA#OY|@052z`D0{O-1yUB+bQ7T|SU30xgUSWH>F=QfN;6!6 z2#IW#6s=clugDe1PS{!I;jCf^L9C2yPaZ3#{lbCXU8+}F7E7BI%Qco@A@C?LLUL6E zJ#}4lpeM&EVQZN;nL35x1zPwxK_y-PSYIKDnvb&YdJv@o(a3e(bJ3u+)*{0qj44>N zH7H!}3*=;!sDjYWHkPbB&hT-;2E}o~sE1&?VMVdD%WnCYehbr;W!Ij%+g8P8wo;=P z3RCkEC(>`b&TvATi-r1@=6Zu)A5@5FAxF zMZ@ngcYyQ82l^!X>d)|865)mSZs{T_h{zmp*-=l;(;dQId9FoDG`FofhA5pBC7zP~Mj{n}hzKq>p#%a0A5L3}E(>K0cDR^{0Q;{Qq>Zze^L;&}f{0F| zXpV(6wji?*slrlgrhAt^Bw#f^n7p)#ID~6N4*V#1ot$sIFMRPGAq6%kwf13~?9C1w zXQ82nXSTo3VCa1U$171@V zn73Zz?4W0k+&~T7A}^*-5*2J#oJ6TkkjsTeFAmy zW-%?;n?LFO1&dV?kho#+3Q5?7nde+C3>K~MphcrZMj%oL8pu!;iX7)D;NYTCU6JW6 zmK1{nK~o2?VcurC9x4?yCzCBdgM)@6Q4RC$ILjAH@TI`=`yZR)oXgn~kLWF9cdEOl zU6Q&B7VmR0`v0;8u~eKe&2BY9`OOFw(MjZH+QjA}36kYvoGoJENNk)WO=sgSyh~?u z)+&jZQXrq`xRp5NkSW{G{nA;t2zJhl|Hrk#Dn%a)DW$u(bWu_}I<^accdJd-aNr)%+wYm&b9>L=1+my!N#}xim21r~_f5TXm|J`At4y zVrhU<+J(Y?W!@f~;JPO8d(Qps;A`JOW?lO$Z^)_ z69n$NA6&6<>GS{}G*EUoPCPbCxvo}FSe{F|hZjSOVT zHh1tY?$f{gMQ?F~SjEUSC=6i`@>T0iru!XY=tx#Zgp^=!ZS2m7+=JfeF&I!wWT9I6 zjTw^RAEf;SjZs=kte?ezA}r*vFJo04|rGAY&lS36$*HQPge##wt^w( z5Y22a4yC^lMAwPZ1~LB#gb_+UHJ{nG5K0Kdk1Pz9K#e=z0}q)*E=Umqk@1G0^RiY| z+%((qM2#3G)S{|m!BBOBfMnAZNA9F9AX+{6w}T;uHVqM?t`wbnAh`lH4mV1nj4$kR z+$hr@T_b|DQ@9=kErYTTQEIYPl~FVx0F-iVc&9g_{h3EWhJF{keO(VHq@LhyCeYExD!N~;b${l6x}AKzP1Z&sk%C9E&;~_n z4T#Z91Xj(aW0@kLfT}n*sRX~R)}e~H@nAPwF~j|?W)WMee*Jemt|~g>$^MT$(Ur@A zaktU!UeNy`V)pY+vh;i+WTcIUC{B3j2yx@*yx z3s6!(1ZMs;UMGvPWdk6eF0+?86(>OE9aOt$yZbnEO4VW_4XpC8wm?EPTm9We%fnrx zwt2JoE8R=9?apZ90>{=URf`MRj9QzZXO6v6zr*+8@0#etO1k-X4~H-DUrcPQA5m{Q z+d<}&TfqxUe#}?vzHLHVLEzxEwWeADU+Ij7ADg3|I`q{cjS|$TN~b>jO;}7PNiyw8 z10ugRbDm!|xRP`?u5#mH3oiAPYcL9?{&{?4rBn|$1&G8M?mV2YF@8O4^B%OHE5k?6 z@GhiEg+Y)dpF9LDU$))%&*K+AtPD+(UDmvLbtp-vDB9b!7VDTl{`DbM>oEHF)rZm* zdNPh&N<%N5UD9i$iRZP}QSWWus3n$Xd-Ghn1D`+$HiJqAVAzSgt99qY*2yK?3guc< z#XqbASULJD6B$Lej0$o#%!!-+gH#x zGVAhXdv_(!sIaO;VsZ;JH8C+WEDpBtTIT|p@02s_*|Za)Z?!r}Weg9!>vqmg6)KfU z&eV_~9LQi#?ShlnD&NxsbFwSjC_Q2s^cz{6_S1??#$~f2#Ci^KArnE(`C*#Qqf*!w zdv6u8W;AWsi5&n=c;W`=i3SC=)cOxbQ0%D^kx^ZtG*HY`8}M%cekRwP89JiXoj;9x zg+kdB4-LiF*Ve5Zt;)&F-n@IeT(xvT#w>^T>@Y@MIv5`bI+Gw;WuUeYIAmg7z|qcU z&vy2p!Ht-MWWvt;o7%~Oqq@S9*b`&^(_9^-gl4hU!fm{OoOj!s9Mab8b|F;YltTbm_7g`5N2t8M9W``#QMF?6^4H z^jO!|>V%d;Db^Aa@1ux>O^=jf6K5X9&<)NKi;G5>ltd%NF%c%u!qQbEp}|-h4z*%K zMhlw5NVlIA_(;!u`Cxvc?)pc96F`0DbrXEF-tjaum1{YZ$6;G+mRg>VXk zWsc6wYF5@HZSqr_7dKigrw+dG`-ChhBqNQ{?Gw`K=wM+}bX8(@EZY*dHl?|xXk&$F z$=^?ZS)L752qNoG--ehueRmdNB|b%8fs3w(Idqb9uV54#9pM=J^XM^W?w@6;$x<7% zR$r48J}M*^0XptcQeLGyRlN^EbX4%YlMy`=4F9-G%L@PWO5XQrbV4G^ibQ##+$iA0 zfgwjhu*?62a)sJZmArt*1)mhp z#YBDqAAH>FD$PUzU#asRtS3(>1m7Aq-_t$g)pH+fhCuiz==K@2|9(70U`tGT>zl&M zaSmKSWaVxkN;o_S!99E=97w3J%mAc0k1JSvB99q%e!Y$puNXiDiV?&6T^>8Ax&xU>-0 zQ)p;v%O5j}(1AAN=-&m&eCn`POtB0h?%EYBl}uDZoXcf)0YhQ4B@`0xB9J z-6bc36(()0G(*K)5m@E~+YjIty`*B$yEx>7g~@RA{cJyV2A5dMFxc3vt&H@L&sHgk zAd*#7v@As2HTzS&py%3)`Wo1N;<%n2Wc(DdenVyn0KrRD$$MIh@7+8T$xi~Mqy#A$j)}e}N)^)iie@pub zoDvOnTB@{=6;n%Sp>-LQ?46uE{iJW4@q-I-t~5O#D0CcHI!2q-)`AT^tSb2Y?KfWe z@5Fb~`)7_M6{Bp>U?#{#Bk~jd?_mSc-C3oMtAqy(6hZ$C=&FBg_f`6}m9g#i&Uu`f zB3S1;RWk<7He~zmpRt^qQjb`YZ4~JfE#P8MMJZ!X-h)LfhDl{3KqpCRuhVA{1Flh6)q{Gz3C<#muRA z%11-`)JgK2lGw`g?1fVfBR`cAp=8KAVI_JWN_13ZWWFXW76cDDK|u(U2v!hc5$fO{ zRgS7HXhdvdhksj zykC|w6TISXORi%1tXITxp%Fw@@kE{Fp|>wOLb09-WZt3o?;^hwwv#V!s(m7*VttgD zS-^&*;ShTpcW>V&cq6J4<(o*Ck&92`cMgb$KOgJn1E~YuFzIv>z z+Ty9Kh)(`K%<87Tmq0QnVT_RB?@_Qqpe*J6GchX_O)23r#J_mfV|$GAMsUaA7^$(V zV?0B#_Zrrdvxzb$@N%H8Rr2XwGu1q({DM`gGH-=#dNU;Jy2oAUN0Jl;h#2YoWuIw} zfe1Ez?MgWw2o5X!P_jyA9cAe6C;JR(S^{ra$|Ds^jrzf8d+!;xi%)4s=HLa(@dd@X z3#A=Z^K={N^$WJSgJgoS)cs;v-b72}8%_O6W}%q!h=quBcc0nev{r}jil}uUQh~A| zU4IA+{Ei5}0Sm%CDJ4#zfjZ*LNWz;=reaz(0YyS%zy7Jl^ z5Ugt{%m0yR1e)NTH|4~I5ixOdQ5PtCz_rbLIIHMy$-sY;+De_1@<`^c)vkT2K9Ztg zJdQrY$^V*~zxJ*zDg3f@lA7qie7V3*4X3SMTHd*Ucuyhb7bW#Mk5(Y^T?!#MI61jO zs~Tk_URxtMu^?=rJ2Q>Juqk10@$1SL))3^`S0)$QQM z)7;#=u;+j)?*uYLSf=ve-hgI=37)$2yz`7Atj>Z3d-iT{uK^G|9H0qMh62_WT=A*N zDm0GPIwC=UWz`jHM%bKo5kZ714N?_W(J6nrG!WFw z6^2GO8Nw)6WidUgYgU?sKS6#f{Ec{dpNy7CMMfSJigW~}b+`CJ15QrM=w!3|g~WIB1Ti^mTEotL*) zlXFXNgfACI7X#Y6-1~Dwrsf)4Nf!%jaVRn{C@NJ|Vy|@7%wyL_?O`kFpO$9m>E!ubF zR6dEbDGa&H8Y)DXSd1nO_*BdRDen4h1tGe+y65NTANuuT50@6Gvq#P#bdwWD&N7BN z=_70_>+6;0ojmGN)E#4Pg!xAa$`AQ2h(syl?RDAH7PhvwE*)Ed;OOk^449otKv&k+ z*VojjDZbp3s9DAT);I0XFF#y%-AR!7X}Er5sD>mnl)OVqkc>8q>{Wc$90{>pTOoC% zf;|R5;tA#PvHzs_@i=S1;5k0xQe6ek{tHCZ_AMSXgeDx3)7LUW$!$b=0ICNE2o82!qs;X!f*|OU>F&SW>LOS&7N=wbx z8jO>q%vOD>iZ<@$WRS1SIeG&S59ni%NT{KxP; z^!)xaKl`Owx!!}@X#Wj3ftAY=W_a?nmkCP|WlCiZ_GBK(i?#A-Qnn4_D1)ak&4 zHD$&!Z^f1}4ot)wdwUJiUJi23J3nW5ujsU!4!iGWaGPg;o>kdU9|7tKPMf8QyBXg4 z?(VH7y1g_{y2=0*il*mZUtC&a12-yb9szh9XX7RhOvcYGJ2R$i@|?z9pD&$q)% zgCKY+$cNRtz}+g%PfGw%tW@5;JhzTX)Vz)6nWbmXlLD7H9L5a^w2&QDF% z`WpPw`c73fneq zSl5?E23Th=)@>TtXRM0Z1rbD`bK}Ae;-`$=riRVw!>VE5pI|tLzK=)1Y<1zTd%0S3 zHjm#27@>xIL%RI;kFI5vjXq-%GGrizq0?$6hdY)9a1v_Y*w}CYRvL^E_|%k@Nw^8Y znAVh*9v!AA0y_<&I@5xUal=6is;kdG->w|(?c+vFXh{7POL}@f0QL{)H9+6F{uX%h^}3wQOiWAw8ub}e5P*1Biwg+!{~I$8d?T#M+gVs} zUNerE&SF15ND%bj3&RXW296y7L!T({-3$YOzJ?y8Q0wa5*Y5 zIp$pSnhuGF5B!jh+l%bAF{2mOQ zAi7Fy{rRB`Cg`LMBKB;3{&~b-I*hV?A}Z)5^tF5;dwBQC@>Ty^6ZW||(om$j#I+QR z6FYpb(}5@VKm8>fV|U`T1rqSc0jk=3tG)bUYJ}HBD)q#pzJ^gzhBa+S)y zj|)fHbUJKacRP;kG!=J%^})fRA_LrWB*FJ<7*q<+yc4e-_Eo)RlaXjCvW1Ne9M~|1 z_CLX=i{%0DH)E8!t_uKT+Ahn2BO3t%Y=Zm!p+2Cex?1)j0$BuG-kqFX9xeiS!Ta?c zV4ec#;rQ<7MCs}2-Q3*Dto?z10A$aiqCuuuZkOZGp}nRi7UIwYuDbK^p*?_g>F>%ay92QpkA%@2T&;oiJkZJg;pt2>5ug5y2btI;$)njl&(|-&tB}0W@fU z9P06Aj4+?WeuK8Ydtqy<8xS?-d7o7R8qI-;G+O;t{U$SMd;5#ILec+LOwjAL!V?Pk z0vAQq*inBGM&`Gl)e}B8IW7hYED=1|>^fHX?;eWK@SfewBHHuuE4*B zAz++F^*wEYR9puh6k5&>*E9K0+EZM9QU3=!Jtj?r0X>rIi;dzI1m zeli*$Q9SSHdw?q%fmmBtXMbf?TT?+dnha?^*VTH-RH3fAe*f;G>)}9rFdF}NqM#<& zoHbL9r}^)WA)e2_M&MnLPw+#jc?a24--+;Y1`wnPMACf<)ha|L$L%U;uA99iA02U+ zuTJh@jyMehz|4!fgJsOfAk2{ULQqo-#OC1t45Hx!bRe2@|GU4%MthGl&lSqW9CmBS zF(YO2*`sPuI&GR$kyAqjW{Q|X{_!$>`Jc|9R1nLqDZF|c40|dveR{$(kFN4>H(Qfq z|5z8Cw)FS1Nu*l@qSMph@4?|b!d%O@@|3UW$I>71 zm#}X!W`F#Pd`klbi=VR@Va)-@B-YF+fW~zRtRxpALXMNT>UrAvZ=p&wZ5;?vN&Z6T zkG;YFwC}i^MMpL_w*9V`g9!?PXMG$BPy2&Ow z+mUGW|8$)IQ=HxV{x1-ub<=-YOlJY&cn)@U;HLbyK4-BkX7df>|G5D2)%Eq7W!+6y z^pilK&cVTx&NT424NUPb+@H{+iGGJceOtMv7eDJkM$(< z18FYQD3lA$z+Vg}N&HHL?z_(CRx@xWIOa+1FZ9nu7uhuH4Yt_|#t-ro5DEgRG*TWj z-7O^AI5}D4lC&}h_gRHt+v|MZEQc?=6-!Cx)dyxSRa96IQ z4x?9TAYFAM1cZbD?|Lem^GbL3H9gfC;4H4{xl9LrBY2K;^erweEp2V(NRo=hV%}fY zG{ookcBM#xfdI>Kp*~8d*WJH)-u-;!oNW4hUG3|v<24zMoS9J=VmdoN2POL+WjqxA z0kEt=t^r*?6`2~@_jN$>zNjcT&CN*l2dEu@U>b|Zz2Jmr4pII0-+u}XK-5@PSh$B2 z@Ej712PE+7iUzdXMCpiF0)D`+fB+oGRrZ^8)zv8~c>$j<9@}oK!H{r>eAhzytBTx5 zssHJyixULAfqKCIzL;=2U-0X1aYCy-pfsKF{(L#9Z{LT7^w9|SjZ!}A9ylHhhQ04Ax{mxep{SO*JhYi|0w4E_iu@12=g;JEqX{%)fg$=H zMRih0wDfHZ8MJaR@WnInUs)X*yspg4cpAj40Oqbp1`X>K@N?Rsmf3r$IPI__3mJhkGe; z+VI{-qiTBZKeF{Ck!hN)L?x>9*jKMVrUfYoeESP17LQcQH_jb<-KW(x%~{vgG)2JSAcIqt(m*65OTOEr zLlcK$Ars4MW&Plq*7jZj#pfdXalD?A_kT2fcR1DW|NpU%V;wWQb8JF38R6KgIQ9zJ zGqb4dV`PtGXFD>oM<_d#y?4f22q_~fCBNI}cYR&{IM>BFT(8&tdft!md_L}=lkgGY z?vq{9E8COeOD-z1J&`M-ps2W*_RK>Q&&!oZm+hVLb(U8ecrYb9>fwiul}6LEHTMfr zKgBAlaokTR^^J)7q17r0xd~7JfCqkS?Yo8E|22c^9_08zGp}cDeO#KXu=DP8;#B_rQ;~o% z4XvMj{tYo(mxZ2JxW=HjBUyo83ez?DgJEPSnO%S)TLcfloNR4nWo1oG=XRzg=S|t zvc{a)h75)qJ3AF}bO*P5kgCs~J(JD9`|sD54WKg_w@hoT+I>Nt1~0A=PB56o-}b%Z zcu~RGcU{aujR&h<{?FH@EYMkeXZIz@d?@vTZoE&w2VyNo`l84HvxOqD(*WRnI}Vh; z3*MjsSkT|oiSDnBBOtJ#9eg{3@u#t|v9q(Y_#3zHf|9~$pV098boaJZn+PlAqs98- zu!vScorNERgM%k0zR350w3gp5ybcVwmn~xV=ll?CL)_Tr=4L2TW)~2w=f?v#-gvHL zg88mueLgy0Rt>c-qHu%}^jj7apWVdwhUh!HIl3qLirwO_CuLx2`n|*$c+w}<^v+Lf zf49BOTC>p3{StSzZ~Akk^>WtmKAY_CNnI&g7!lYFPJr6+bN#9)OXqIl`QdK~bi|Kx zkHZ*u=|^jSzf5aH_9A`;N-%exIGdW7no2%;H10GLrf9Fthn&n-ca=@|di{t|(#^>9 zhQGSM^;?f`j+^>zfG`|wC>Q)dnN&FYETvTlY$$75LwQu;9FU>urbLRtJ{zlWI)EOcw*Zl$T%hRx`RK-a61*8B zYd=w_Lx`MKynrsi7|#gY1l&qgVkjj0F6uW`Km?RA|7Vch17-s0Kd_*<9Ui?+33#r& zO{UoMY1owE$T=0Y!~{tYD;ITi@47r+!K46K4p$P(`TUX=1@_MG^tVh1BYVLxa6Y*C z0Gb2EXlpR>?dLR|s;a8N#^It5r-7gSsilZAkZQ?VRf z5k?PJSA+Jx7pf8P={lS4j*gD&KrO(K6GT^9GdN}?gc!*|?R)-ZvUtg3UP5oA5#(w- zIr+#dK6qVhHMztde_aIN9B?+!{j0d*Ue~!Zp8w~11|6(^P(KR`X?3Atx>BVF7w=!t zY9ulbD0G1>V8!e0VO{8@yF7b0h|h|lWXsjYv4dg&dh`>S#MNb(5uuX%!{y?)iV_k`==ML(@U?Nhh!9c_G%?S$iK5J|14Eni( zLnw029FyVHRN-NYf+9<@@xDgp=zO8qkyJQ4O}iY#e!lH5w%4)Tadsd*xWxvU&hbaU z!F_1=sI6@sZxkE~kT5xs46`Le zQv;^4Xk#qCl5!mc(x6O(i0GYKfa7Eg=3P;z}-HEI|{ z23PKRQKk@E`IIAsOiG%Q<|UkTA{EVx@s1&w^l$aEY~p=?Kji5E+Y@0Cy-&2bSN$cO z1y@1fJalU+qPL2g|K7A*X?LM}uB!CaQ{RZ?<>ldl^op;m?M*;MXt#R=X1etGRH2m> z6~92KW=mAx9m~to<*5TfSwI+Y9ASXa)6)aGfBV)gf*wZ0QF>6vYY(cvF6#2kOimWb z_XCZHmD0J%90E$4)f=x*fO80rfTH#rho_q_d$titWzy;)mcuIJy|m1NioO&vGsdKCdWELK8B_3B31tsmYZ#<|GHA7lCdObh^V^_}uru z_b&iEm<1EpQUGZB%LZ?RlX$InUB+@I>3@8C9c^>->eo;V`*SFu5P@fNrdh()vJM|o zt}*uI&-L|ozgJr_e?V2bj`aYW4uUyuG5X&IXWl0K|Pj>27Up1>E5r^s*Ba zK~)fCUd)1U5vEfF?m$@^$#=`F_LE8^3Obko7`H~L<9?@+v9UpZ&%fsn-UtMIF-;e; zCdNkS=q(W4fPM2~+6n%-w$L4V1$Y3Af5|KPBF?(9&a8BzXeCc%FgKUDrP+fjI-;Z2 z)WqnY{b4S^+0ViJ!_BS*Z%9XZJbuW72p67|hf8ehv+%=KNf3ha( z^WS@aff~4XkZ(pB$*w}Rs(H^*Y!N0bg1<_QKXusQUVd;-dUCBrm&aU=^Yh?IM4*%) zFNFZ>ZCoDR%g8Nof38joSJte4^A7$P&K)i6-{XVscmfpDC^}Rst$@MS?kR4yzsT003 zq8AWgmyw4@Wo!j?w&|Q0PMGc#(mTi=`oy@>ZQ6G6kRbRaW+i3|19pBAQ+~a={668c z`ZC3fhm*odq@4aN_`yLauCHtHT21?k*46nSrv@a?!jWzDBvvMJV!R6YcW{5$AJ5e`gW?jvL)8-)8vrV~j_&}OQ$TtK{)4fxaeoYzO|y%E2;*wA>r78L z!KZiG<@^{;o^)-$i9$uitP$Ld>mW$7E4LF%+E1N|uV-YfftMN;0EN>ICmv+wJnK-8n(rU!qRk-XOC=kZRB<*25^ z?4qdp1gc3vO&eURs3ZK|utun)-U^tS-F>-K?@rE$NN8{Mv?$+dPY^3fmo}}v1}BdS zLJif_?jXx}DXXOe-n!$pmHZlP;`;5|4GRWOqPlB9fF$l(!tw7Vy7BtKWin^-%f{!w zM<5oKmQ~g6&*tMt)5XM`2=O;PS6+exvw|*u?14PWB^D{Kjfh>}I}=;EgN))b6L<); zfK%MNhFUyXID{*`sL!af7N3Ka;W0xDo7CYqBC3Dq4T@L48tcrF2)ZcuXZLs*p-S3| z?)2WLqDuea=|uTshvL8YY6C=%;}cBG+M z^HjBoPAmUJ$yAa&(Vcz0Xj#j8=3Wch4*AgiHWFkYD57KGHflGvUO3!8u`2`(+&Q{l zt8JoisQks3QZR-)m4Z|O0vR1SvBHG7o*|B&6#1|r8@;>aA(j3S0>Ihq^}JBK-)@z2 zrPE74q$-dibaZsC{#{Ne{9L`RYQtDja{G}?A%I<+6M$Uttvt`mqS4_asF~-dg;(zy z8VqKd0Zat;0zYoLcXg{^mp`snldHt`BTx}ct$YU3@84y5p~{CVcXblWF%xVBDh3cH z6@RY!MHw7))Vk)nO|GqVfu#WEgn#_nN(Ora8F8e*)ndRWT^@V$Ol@W$V>=`quC|La z#ET{dF-?r`WXd1)NJ?8Ndi==V%~DuctjmS+D}21;EYOcO&P+|2jx58t$=b&y-U8FD z{4%>4#u;z_a6FMNQl$PPh{VB*-tp2BaM$kc&(fgyD5uO{709}td?pi?>8`1%DX$&( zd5LoQJ0m7RFVZlrQAjUcY0HIQvh>(wLG<~rqx5^SA0-N{e;#-I`DQ8MJpOZJctloi zrF05El`L{(Gj=c8BJH!K_wan~#0S-_l#-x#G@$v+EpRmhshmU$Ok1pJp?4Hy4l4`{i(B9KiWsh!fQn ztxl6kZdyEjk5wtOJQss3_yO;xf*D6JRhKSrZ#qrK{()xf2@sB3Tk4I(#@7_&_J_yL*S!^1;B z0YL&v*V()!uxx=7a_nloz{0|EO|3x=k~COcRAEYpdZ1{73Z7x&Qc+dKEMnVJq81ND z7VB$zEAur7#-oQ7ymEPV|ArAznl(I(iW>(*wpCQ^9{SAS1|Mt@B~2Ah|cWnVm~XnkuC|2ZJ^yAc*m1LwzKAFvzY9tTjpKf#EM zg>AJbCK7}^|4k2qYsZek5}7x5PG0}+4wQB2*F{|a7GB~Pb$mV|q=+g(T-4qClV951 zvVZ$~bg3iFs+f?TwEO0G|X+jkZ1mod}Chy$l z@}wkeQ;h>Tw}Q+dlN^wsC0C&rTov^OpJGQ3Lr#w4JX6mJ1@_`l>DQ$dHIUU zZjxG&P39AS1C@xq8lF8Shb&olibwj@7wcVme50e0F{yfN=WBQy!#T( z96ykBXD1sFerGYJ83|xYgGb{SNKdydTRi|Ve0jdUxUsCSeKGPD@kM zEZ5v&0eZMV$|z%L0*w|QuHEVzP!wJaZ2~?rLJHLAPW`~_B zraw>m5hWo$DBbsQXaH97q&zQnq`Q{@cm!tQ2^hD$_i6?_Nv)pW!Ff|4{Q}JK6;@YQ z(MPp767kJB<@*l*syi$ssHv?Sahl5z4O#KyMyG#8(!l7GL?%RtjKw28?B zb<}01P^jn;@yBhk>uT-V^?TkjX-hX(zP+J1mF(3M&gft5)p4Gwkd)u)H}DuSnJKfy z!^Tv2o5T@L&z|Au;t4@gyk#o#WYt6t9j5pE`54pCwKm-kaIzU*tBpl3o-MWNq)$Hm zD+uW!=c1`}k%)UqF$g#}q;Vm(Q+xdv08uAECwT;ztiIt@AL}0&BV3Iv@cz73!G2B{ zoQH_t4pSbn(PFypZMFN3Ojt1C)}zlm$gqFeiUYZS*VDcHoorU^zJ*_#6(k0#$sM)V z)&we6(tVy4#9<_DnPobzJofWD1@}Z}#ni}1lYc_~bo33dQh~T>Y-9)$&sn9xzv?(r zh6N+Mnqbck4h|N`aC3DPrAQa6!v6Q^$$CwK6Tc%D4K?a(_tn2vPco#No14roke&bm z>!|e`H)gNdlKBY~3NYNe02ZA*uoQqpL1yoVQ4mtr^?T@B-dFmq30iGZ5n~jZ)W`gBLGWM^VYWx6*1TZv` z>Tb4#YubtD{Os&#p$8*a!f1)&uo-G09rCsZ>32`Pt}VQbK7C zGws7Ct0`5K?k~8~EB!%^JGXV#!80YLiV9ur0ZZak1V%(nMfk<#n@`RB*UuTM)v!5K}X<%Xd_-!C0Uz)zcJN>g3jE!_clM-Dbr zA2shf&Y1L6)mDx=EjUjzEgZHTwyq36G~vmseO;mZN*pZSf0s=Y!W}=?E<(-*3~m@q z4s)al=n7~_N7fHPgO{TkcDm@vFk^jCXArzBL&|3HuzZ`K*>H{%S`&Q32j?&UDN z$Rh9m24E&Zv28ZcVpAjU${V?ve;i$n%kh2PXNBiT*88 zCA&{Vgt0qvosauT(EiL%{MdA~2)Qz7(uA!qDr$4Ujenskz(~1WR}yvbwmPWw)ok9C zHTs16(66Iw_URkM|8`^?OUVcKsPg}P0p2^7BmU~$JNc}U>zfB`T2OC_5*R)d!hAo9t}kYasOlUB&WB z&RDb6|MVEfkHOM>VIq+7C+HcLUVrxmNLufX`oOiso7CRd-|G-28(73CA^D)|uX)g@ z2>t1-81dVm=t2>6UY?$vAdZNInR#4hD`C0H4WgK*xIVUIn3Uaz^jmuA>jZ&N3zlh7 z6lCdf$y(ih^4TEf@^eR_Me0lYSDaJ&lg$-_48m42_80yOXM8v-|4@IsdtBjI7#*+X zn;D9+XFPA)xha~!G{uMdse0>KqpN_qXFBW_Prv@yD%oz(X32~^*}6r4 zHjq_E2eeVNunPpMkBows6nEKe_C+;0Ph2CcZjgNFGY;N)0W_4W1o3hGq1~iFjUe+N zfOimZvu2kmKv_dTwDMOVAOh7>^cp|yv<05$~q?0aXWb!P9!hg427 z5k6zCBKWkIt1IvrfH?*T6ddXx#zY^Uq+VKuwc|+-0!e;!$OXl#-xFZL&(F^%B~j9| zgZ4la^4qbh7&T>#v9~N7dJPG#6%>Fkokp|qY&Uws(|PpJN=-%rj^^_q$gC~+&$+*> zf^AQjE>CHXO-HR_$LCFOayZ9q-DbwZ1JVWK3PLmeh-v2U0#52}Dd-4T;XSBR&+up8 zF`*R-mB{orIE68mW9zHkl>*}X+`fNZ zU0d~FB(u-#c?aXQdLF6UJYT; zRMa6W*ULv9{NHK(%CDIlnAa-dp{F7lpvq-j zD4KOV#HHGzb?lYt&s8t&s10IHucX^HUGf>)$t-dv#oTc>XfBpujC`-OY>t<&G@pHK zBtAb{zWwTd-2`-_=G@pxR8164Cu=-k?k8w>iyhkHWex3XS+K7D6-aagATZ#jCv90h z-NS)mdcAysG%Wn~Yom+AQy_rYc=r-@)Pge@{AI1p&D%jM9^hqXl3;HECfN?q3a}&F zP$YOZr2Mx)qzq>mP%_mysAWBSR{pOSAfxpl=z%;tFb_b;7NAS7tE$j@ecCiHfPt^4 zrZxcFQ@P-aANX?UO8hmz4Eg9#UGFfi*;q76Q=UBwom(FCV@hLa>lg@^{CQ(i5)#*$ z(ZCy(CXb{UX1sO#HgNb+u_q@dMgl~@B3*0^2GaiJ28p7~@6YM$LAz|)dmx$m$>&rJ zi_EBZ0W!XyKg`^d6Jn)2=n%&FTboK}m+w^?2^8qIZEB7;WDsdFmD+9`+4tc7AovrB zN&tV0l7H_bZ~sK+(xzVOq-)8_l%}UBoZ#oDN7n{hmL^w)ss!`cwiqbtZ(bu1 zZPmX-9PbmhTrvE;IrZl1S(Zk>j}_JDwPy^6k`Lcs>lVNPNk%`uJX^SKY`~%ur_mfP z5JUl-vKaqJEJu|mUDH`vP1Yfrb>YRp!*nVI3Fbs~PJz zM;{5E60KTs(P)*YiBOP!1(wFOK@AM`>kkIiE3`i%>d83RID?+~f!nRWKEq*MC>UM5 zaaqp>cf$|>%GJUY`|j`H@KVz~yN+uS6$eiESQNGY?|6KF7`CQD%K%qL!}u$)nArQz zC_#(nX&_I!9kblUmrBs5$6)0@C|By(5E}gaErr!zlI5SsV9vSj)&DP3_OxWW{ilQw zaU8}1GV3Uo0}^hM8AUnRlYjDkdF#8az0$f4E}-1EHhXZ_K=#WP>yQD|E#q|~*nqFq z>1$K|prg_#&Tsrq_m{r->_H=|$H035izn{x6VL_vLKQ`ATNw*F{c6M-S5{W&@0!1G zG#@Uhsj337ao-pZV8}lLy{4Y4y7m~FAs?7yNCylHb$s%2R24-<1yqIK}f%F@?6R17Is0@eY>k> zY`vbY8_U%Nf-4w)FXNF2hVJ1Gh2#J_Et%p=)#Rettgq8at8|#6C#O4n^{xf;yg(@I zL6CH86^X8kmM&JE!>)=6ave+k92{Pt$3;W%hboqr)-ly(a2!wnJ(^qL23A=+MxW^x z*}yQWfiDbBQpb1wtlWNggc3ezt7U-^F3T?WH;6z4h+^Tmj3Dh{pYt!bGEyK}G_&_j ziIE~>=Sk<$(Y^JaXq!A8?pJzpPggO)q{;$0tfKzeBx_6b(t3ujHaCs4G!}mo1uGhF z+jA!X+j&N!_l{W3d7!iLRXJdbZ(gA{U2^P1Cg|CBi^II$cJTHC13xBQMpCQuI zG&0W8lXaLbi=>P*R!#rt&xF*;+D9D_Hngo6;{)_CN zsbpc&n8-cdWzc3HB%C#Mrm5k)azDhkeJgO+#{1hplCQ7adBQxTJkjE*Uj=U0v5FK? zE1jO#U_PkDf1@N;fo!iH%hw(}70XE&80Niy*J1Gp*=pS!rY8=E5m`L;dgSuu4dN!j z0rFrzID2Hfamio8V&`dVEmK&1{#Ov2uF7OJ!4M(mHIAt~a1zcE)f39uf9kiLH{;AY zq7ZcPrrX%Y<~H|@J{Ad=BKY55Td#f|n9BL>j2g*0<~mxodcmUbZB|12f_4iQR>78~ zE|A|Sco4N~(*}G>DK9HU-_L6=<#LJPJ3en(L%W#>oc$N?K{nuFDVRuK7xuC(10+A5 z-DoIDgjo5sDqnGVZBq&sjt-;QHu)T6K}2=LC$B`X4X2DH?sG{EKv(QzjPPxPT#~%o zs|`x5W`!e%FeSI?%;kox^(|~{>O{v{J;6Lfw6d#IKGem6i`8#6!QwH@Vx$*$1u=*~ zln&4vPfCXv<%QAc7uuP@?!|g5Xh6RO)jFChHwn^^;`7C;u52AUs^0Ovgw{8$=XKSmWm)mW5aBjMwp+U!HEeP~LEg#o zq6;fZLl5@^_uM)3965Jw(gsLvU^&X2_%${^QXRLE(UK9SxidXCmyx78?t{VR=vs)| z7aZwY`}M^4AW=xJ(}l1URjA0oO#wWP7hJdiQZhMw5-xcS^rjZoOIr0$ug5TUE`o4M z{?P1x&wz5Isxkqiu`~ijgC~GTwGr&8X;k9>=0f%Rx9*1EyLjYbWsT5&!(#e?Mf%_N z<%EG+IpeqU!Gx6eBX+8cjN%hxE)rx}nEM&7ZU4_nHe7%#hXz;O^iH0zth%}ct&OBH z&ai8lmv`V+ZIY}j31wO-ubH+4#pUF}W0Fh@Uf>+s`Ep zgy7>5q-<{^5wGHM*+Zur43mF{TJKRR6h#yvWaR^UE4~fSj?@VeK$apAuI|Rgg+li~N zKmYP?B=f4PqvI)Po(S_+m1S*`@l6{>_UA>DKf>2{ioVmR5>3?>`mxh~Fo8|V<;&ea z3B+w-a5w?+Tr#tqyb05=4C#ynKR1LVLs*83EcYmdLlp{nk4K$w$4ckb3n71)2nUmW z1dV3khoM03vSNcCVrUrz!xN=HW%4j`k=X~wRfIoHgd|KVqkG4-6$K$gyP`y zgk0qReW?$i;UG*gnjwf5rsvW1wSEO@^N-?{)FF_Md0Rx3B}qb$?x3MF3VRw>m`;w0 z=U2x-+$7K$XQxj>`0uo7lu_Q7rcf^PatYhjCLyf#hp3iBbEaW_)BYDPvB88H(v6A2 zTwX3|EL~n0yy*Vtxf1QPAToSdJL+S|5*uj?VM0Xt(?zT0P(41EAta=ZpkYi9fuVw) zP$1jOFd|C6$u!zrv#Thvd*pg<2yz-cYbxx(#x~zEKF=n{7}V+;gfE^5a$j4$404R< z^lnSoZQS65O0DFg+j`>1iUOIQcq8H+f>?B?nr1wbQINpDx;PZ`=EuNU8FP# zlku+5n*KmtNCO0OPD0E+%E6k%2*v@)G7u^ZC2C3Qpjc0yd4^w0-@%IKljemAoX7$m?IbF6FMsJU*A#PR1~xM2+Tx!+~f-q zp8UF-a!e|to-2NSUC-9aO^oKq$@UtugN_Z-RoG)3CX2M-nxA%&KCEyd8sWN%+Hku{ z*J3y>eMd{Ua**GOpUAP6nZ&V3Hbz|&Z(yf%wE_b%&UF8YqU+!&hq##bNmWvgHAg5H zaV-fJ-ug^y0uAqT&?JRv!YWDiN%dgr5;GZ)IsQ39NVkb-#r-#^vVBTI0zN)@s%WN0 zRJg5CCmRuz0D(hbrfJ%&x~8MS`X!{Wz`$?W8uVj!zvvQ>KuMI!Qn((8X{-71ATHPO zHMwZ}&WaA4_ynRXS-tjtc1Wi;n@N^AAR)j1eD6Ea{A+XANF7XhnBhxb1?#|u2n77I ztPIq^2pDVKRdLvi=dcMRvf@xUHwlQy_;k3|SJ=lJtSKGxce+q(FQ!qG#fWhJ{ON5Q zqh~1S9E55W&)_ZWqkX{X{bJQuN7oxC|Xda_HM%#>6S?vV(cta2n#5Apizh*soDto15O+l zM*mJuirXs+6r&Pj8b02h`}VutoG@)TDeBTx7mpV1N`fMg$enT>R2mF}L}2NXw1Z|M zwKaROzx8yiTHx%2IC&*-1h^8Rl7!;-W;#s^D2ZPW0T-e3tbIcCA*J&*Wc?V!lr%p|gP(N! z#-flYl?2R=K_9=8pn4Cre=j03jA)7CvfxTt)c%R|3YP7b%(il0q$;5@XJ0BEModJ& z;a^@MNf)&;UHc6v5q$`|fuCQz12Z8mO{|JmgrA#G^Y}9X{(twoWT#1cc7(>{{64!VE=RRlryDK^5*BUly_RoYA^jcy<6*f(^t z$zElg|Etn(pwElwFw)k)hg4Fo6i_HBISh$?#i!8n?}lZUDy;1@H2PshU6Gs)Jum-n zp$oIgPPcV-PeKEFVuio%#)p?HyG9)ielOn>X6rH#>l>x81}NzF5fElLE2%T- zA|gtt2-j4&Mj$Zl%%TVpLbWJrc1mbuPmf5@Q`t}7@~ZbS-;)gkz8@B_M|=l|e5|va z=k6H?zx`He`08H%`qHzj^EGXOvB2hv&legNvUx~lR>24MImAxsU{x{Gtw3hghqUD zSQX)hI?O)Nqp(yWDxEj1*?xkEVLrQ;HV@}wLoymLM%N1o(feQC@iJ7Yn(!nQQg$4O zn1mrtr%QBooUW3`P(*(Yr;tYbAmuZ1`*w(bcd&E4uVnr){QVm$CD`I)sXWg|kHqhq z)Dw@b{lE}zL?~IPZ582-713$*j8T%|%?ZM%B7`aKs$Ft>(|4EA2<^Rwp?)bF6Fx9> ziog`LkQp2fnty&WxnW^$p^R7D%H%^yAK{~wSoi!##;-6ueJD50o&!hfxRgpin)Usd zOmf|QdS`lW3V5W6rnF_8eaMWWMscRpV3Cg@h!>BzY(5%y|YE06713;;Sj?~p^%}`{naCb zQhU|J-(fNuqdo={p$L4#vu~oLYKXb{G|2O#C%%P8Uo2r9g`pDw@4G zLh4u*eypT(f?OCS($QZ?%Xajhn5Az8B-c^QsT7i##Ovd@c94m`2F0F!yjG+lDt8a|%vVG%P_< z4-1)Sp(hq}TH=I3g!ru#j@o2-!I3WlUevk!dB#nP1YRkI@teT%Atcn1oTi#PFKKBlq3G5)50{fGIcRgB_j9> zWQ-Ks-lBTf=VVFE~q4j zAZBmM?`3w;#UrzpVL<4QnM&7CJilqTq$3?kU}V|i_B~gMKA$Bx1A$N2XwveIe>=Kf zJEcD}4|3lZhGZ6C+5dHDFZfg)rKA;BjHX9@KK&g#e2dvmWp0R-VuCG<(6^NRc;5vt zoG|@Jp04^xUe#yEC#1fDfE?A+q)dsr)9k7t&iIs|Y@^vv@H1NN95r*JWbs4%Ge-etqJo;9r8ni9Bb{~S9g{oqU zJ3V|p1%^hD^EN8e%EU}h!Ym?v%ve|BI_#FSXj=ZGn8Jrasq-KGoy;W4g@;KBQZ7V& zH)KRf3i6JPRT$Z>4t7yVBpm$`qJ)I=P*t1i75D6|hb2DZOh=i~l=hn#s(9dn7z9l~ zb|@>`W{S~=j3@u4Y1)3uWAxv!$lMCkP2zq3Pc1_9GUt#f(vkYF!DpStHlN4CZu}IR zV_Bj>Fx6tpo(^QX55>EK32Wu(MX~0*U6HY)LHF!gib2l)K)pS775;~AZSR*Y%p;1k zb-XO(BrAYRD&o!_7ZPkR->{ba!gg^raC*>hBH;6>$0rMq2NaE84I7k z5ng50;LGWIN=!*yX=m=uXq9*(LVRHv-|r{S;%?I4+QVu{${6iCswRMpXYplpE4-Ka z#+9$TWN{X+)|YWvrba-9|6M2hoKt<h`gZVkhaLQ)0+Jb9WL?TzH_C26>^Baf)-$;Wg6hkH5^-R(2JM#!$o{ zs=rK(oxSVbM7}ns7|Q){(J}Y-K7M%LvfjDvuEu{$a%Jk@RfUjg za&0`d{n(4b1y9dDY=7w$awGoJV~!sWO7^aPYMO-{=U)9CQTXY^I+3c>kn6*$Xrn75 zMXmKgX76eL!*-!=&#mz{sflxtp2QYII=rWr0Y)V+?orr$-mplO`k238=x$9$&&}P! zqt1g9=Y06p8j7)SuPo#De$TXW@tmUyH#%!^Lq{#%a`iuLftrBCZ}#Rd{m8;xNy6aM zWvgg3-UfrSs#|p>HwsxMVj#;y0FHZ1Xwb$z6yVJbG^e#@OI%wlm!LH^9_FL&YV^6l z@&`YdDN0+l7pfU6G1`&bkoxRc6BU6T)-Y?}Dxx8LYqoEEUM{_mw)^9&%&j4Z)Z*6} zJWj6v!CuBn$S}q$00t)Z`ZndAkE(_ob9hx<7bmkHXQV#7k52V<)?k5nqSE~H(GH9S z6a&xcisbpr+dk@5uYn`ECvscN3F!wl{fKZ-3)hsYxi-S0p0vIB;;B?$v;F$n*KQ&G z(RPwg+zRA*xgJ=i+uQ756!AnVv_;?4J8N%d%*onOjJdEABb^<;Cfvln9=9%JTH8G9 z{im=KWj!t;Mb#Y9C{T^XP?L;%IeOQ^x!U@~Q zRuCGJZ_oQA_f93Zfp(VESjkj)=sV3gyH^;duHY za(UWa$4;h}2TIOA6{BO)^q9o6b)_9(h6Y^%998_rs=4lSQ`q+|=2V$to@@uKmyf+f z@`rWzzKwYW{nJIGC6u+=@zH=I8D7qv!zF+i7K#g z!~=La;oKL`+fklS&p z=)0{dKtC|)=DEmt>|&3~ap@HEQEq&^8c62m!7z1|dTMW)=bN)lO%>~#8y$N?e9Zdv zMnRVY>C>lJn4RD9u#0qN%2_zouh1MmDbH!j2^v>^++_H7%iOB)I>*bYUAUj9PSBUt7y?O=IpZiBZinn%r1jJc~g_`|l2gVmXCEI5PC^_l?|B?p z+QYHMNPvyFuPcgXwteq_=u~~I{oQE0-HtEbuKh!iXWr6#I!zUEB^8#$0uEROz7CIg z7tNeDLdFs8>cZfnTX0u_&zMiDR}C*;cdoO^=-Dc3S;qShM=ptF<7soV!QSMJ(l!*Hp_tL&#;GT1X2V0?I28m;|G$yOw1>d z!l~LKt=+RonEBtL{<&8Ie?S78jKwd63jR4ieopbjo~SpVNf*6b=DUm`Heyn z!Ci)N4EIw?-n)PJa&YUzyVpf@MF)i3^p;l=J?5WRC6(EB9t)CL*!vUK)Id^LqDfVz zk8M7x#tL||<6K%2ZPZ{EC;rw3YZ3*r#47;+RSu=A@|AyQ z*z|h}k!(S9)8^hJ#TW#fWY=D|jSf<}G52B1jQR*K!|eQ0XV`+xZ%sPIKlQxCU5m}x zA?c>MXEfY)i7(S`g^{uCM_cIci!{H($mEDTLN3jL)^v3$f6gdkr|@X~4;eSxf6=%+ zUi`hTYzU)FwknQ3Tl=CzfFNDQZ&JpCsqAmYWIhP}wEL)=ES-nbG!L#vjriAgKvMP=82P4ejzCT(`U$z;D!sBeX2@%cW|zF`L)AR@vM@d%ePnS z1BLwKrKmBz@dvL8qdmw3w~j0C1T#w@OJu&rh0|5xoV(79YwLd|*M@ENHLdR*vc@QA zEARCV{qsJcSG4x+*UO&9%Tu${kj&KI5`RcIjz`R0%=JM7_jkx|f`a?5&-R@6P$rgr z%cE-SF)12KxVHNIY$^W!-(^z^#dGR5QW3d0*|;68{y636vYjxWdPv5q`1-Fj*jY?y zpk%Hbr}I7rz2Y_1I)8Ik+2=VqlqIxpK2ch#`r*$djH^tk=bDKgQ~F&-Rub|J@>YtW za8GyQ#+P6AzdLpOu@PK}C-$IW?r${mSTo{rPM#j@VQk`CCxF<$HcexGm*J9K)X|F- za`;@1`n#-JKfJ$DMHjn44IyM!6=^XV<66_sOQ_vG`oLpEIrQTxel!<>_@=a^mGsTQ z@h#@Zn)H?8kU&DGs`eaQ8bnpf9XE4!uJ@DUrbzsStB(kAO;6DCSE3v)nBP1rI!aJ_ zLklQku;-2-8G?^qSqx8bjaFhU9}T?)?^s?o_H@^4eS@4K|Az88S`2-(oFvSQPg<|! zS039CS0167anV#$>|WYQqDA6LWq}&fextVGbAA<_V6-6>=V&Ii>OZ+g+kC%Kjt99H zj!KF9CIO2ttNDc|N=!!d%4+{B9%IMTNmc@pQs{vD_SNVDy|OHV5|-jWoUgDw&h8&j zTuP#?-c<4UjU|Eiq2vOLp_m5Su^c%r{K}qY89nJp5&!iyrKo+k`9NXQtE=OAA7lS} z-<;l&c2Ny*tfk{py{p;Slr`jJpG~ge-B-ClP%i3AX-1gBqwCYA`|Vx@e|Dz&8xntb zad%bsRCZzf?en^FiH^6uLi%t55f-YRwEsSM6j|^+c`RBpxkNM+xvOiN?J;}KqG1Vx zW_)+K?eAY)%GRk+j{9Nyf4uwf?#63`km;pA}X5y)1c0 z;VKi;x*SVjIY23y(3)Fu`9yzs4KM7?-FcsS&Nu(@*A(SM6TdmITU}}JxD840`6m($ zZ6l9b8zm_H@5_^+^=S368^3?uAAB)T#N2kLL-{tg0bd{ror@$@GP-38_E}lo)i^{A z4>QD}xCckDLX3CsW$VMQch2nbWs0D2c(P9WZhREf>LjZXW4%mt9FOTa&B8b`^;lpP z8Jc|ERb>yENB{F?H6u@G#kW=RbD_W$WuS-6CIW};Gl}v_m2tcr-eiLJ#Q4TqmF99( zUn95ROt3R!v7Qc2m}X07nTl*^UlWVRrJcydY=c1sSH^`46i9@XsP1Yw5F z0gTdB5Sf?(JD(vR!I5asUOU@yLgsq*k+d2|HCtG1y_H6Iuiu8Ow@hp}@?JT(eGeff z42%$xu72yhXOyfY1r#57i8s_clrK{v32HstDNk9m)e+FwDRnX?4_~z}2E8Ey9qaRi zmDENf1qaFvF2x(nNds(oRxGs?L(!!OWAYw|jNlE#;szx?k=7il@~aeHUu|p;J&g1Q zDG6&B%iDz|M$MwwC=f|0{DiHR-ioZ2U}DM17Jsla7?xXXltCB7zbRY4^?rnB|05)n z6JjWFZ;}#=ssGR*NYqBT>fh^CpnM65iS}K1q6mMb3zDX_jn^8F?o*2rN4&vsubN5BhYfUZ$|EMgOl#wdFcDNVKnkNx;&O;)z5O%ro4xe0LmE-c+O5-~xTQ8%GX*P{Pn0CC zbS8{1#DKuUG!h26o%3+isiHHJ8STp7Jg=(}Vna+cD1mh%Cs%#h_6>yqccY#-T|JD|RrBtAZj`pm;U=5Z0;m z(J~KA^DnboE=m}B>R)!YQQf44~Pk>ZT%d!Qi*p?D;OR0M^6 z*Eny2cheKFA^mHNp&$urz&q@Bac(c$#B$MMm!o zIdIv&A6$Cu^eG7F-7v~@XFt9{{<1%G8{&Rg{VBkc^X|Xvb<`;(rS$!mRklSQ=_tjd z2>!9C+}_~>qO~N*qF||{Q_4rfcnhH{QDf^hR||;~-!qdiy?1w~4h-mR-}eYoM%7!i zq_c;)b!|wh8K*^{%zsbo;P^Lv@QcWzO6Fzr#^(R9MpBa#Nt`#q)=88YILF6A=CNg9 zWQ53e^xYZ8ih)evE@ZGxzsf>LQd(LEX-8fTD_;|CARbCUL#o2RJ913>azkT3GOU!lvNC6qF`3{tNN?gnpC?8-0Sr^CZ%Sci*jy9|@X3fa(LUgN9i3XT zm@9JEtx=f~A_U*9a?#W-W-ODk?32YS5S`Zc<0Ex67n~Inu~W2&Xb_Fc*r%nYWxYip zFE%SMDc~sTnPWka0P(ebRV0{m+gLJ^2$M2RozE*{_MHAT}Z z#g2+^nf#8%g|e=sx<;2d^UBMmDow|KWu^W$N2;25*Y=@qq-~z33*=s0I!AV-Xd*50 zu|HfiQbL_ z>W2_I)x$Oemk1kZ%ddzUgO;hTwF;4hH$sVQS2Y$>(VdR$js!jmciYREAH8a1!>doN z>BDeLB{t-(WG+AR^3NYsePQtMINHR)W0K=mA4rcB8uUEv^QN@*_pYd{T>8j$s&eQ4 zt)}N>+@CPJX07JG8SPR0a5MQU$Iox(P%tf70ba8aqA_!wXL%e{SRUp-nlk2!xEEsK3JCkmmV;R!n`lBb?``Kz0tg$+=4fr>M4boYsPyZ?h*%&| z4a|f_$OM2!%tYXpV)=0FFHFX14_SaD*s+yQ>n1I< z42gwl&eUmUFR#qkd|X*tI-tRU?UyVIZl~dE!3pc@?d=U|zm$$ct~heeRsXg3 zRH|b{Mnv-W6r^|W>LNZsf1YbN()LG&XdFNqezLEDyb7r#ScwW|{fBP9KKOq=Z;7%bwkz5knYaocV^g0M zG&qhW9RY2msw#$n{ykSK($(V5Xc~*5@BBw&fU<(bJGe{3Y56^Cn3YUp|Xt_V(4lA|~v6R~7y&d$4-7dIh=UMncvVqU3DWD?6`(v4R)&mDs-{vr1f zf5`xuK_$)hjQIhZ-8^9`3<)_Z6pH{+ddHRxi3= zs2x`Di(r`TP1$HVmscbidMwj`p zd}1@H7IQqEh!;B$q;)>~g_w{xqn<6{;JeIsM~+_SJl2ZgD}^b~eJ%6=D>PTLqzFZu zWv|{-0;n=QMc?g26YiK+ey6Ks$HwQu>>5Yy!CIHNDM88<;yV4feXZ>C6@;y0Me-M@ z;G+vNMrY@va3qUOmq>0+FGt*s+vFBOA%P3yNztJB^d2{W6(9o)s6yPjuB(|5+WhZ; zi5DLqZ&tt9R$Pq`NR!>QA99-OduXd5KiEd3NW99yceVp0wvgIR{Z4_VnPxb6g`?@j zRj{$?`U?9-j^1Gufg-iRadhvyv&G$Bwlf(4!7vYR`xo{^yl5IW;Y_OyqppWqzf)Jk z+rN#I>kXyOK>7pjRlk#lm1XoJ=XTqJ6TcHIhV#K+3jhI)km9i20EIbA5S`Yjp@@B1^_PSF3Z05Aahm!KW(UYB=Pm9 zH*m;GDI<21V-4>|LOe0gy4suS`+>I(uie5d^LjJP( z80k#PlRv{%jHJ4;Gx6$tX$nuEg&L=W(;$g0^2nlLS>BK4hIgp4l;C{dFP9%PmR)aD zM2Lu$s1^|^nMPw1Js(h0n5u1{@yEq!6DDrX#yqy6fv_*}M)BrYj2)VJlni%p8IY^%6cb(`;CU zL>_5W=u(2nh+Hw6nBsWhz0%!LSs9rqZ%nv2W$p>9@L^*cr|4p<4EAB2The&_{f*Ey z1@5QN&*afG84~?}Ip)5cy?AHfAk)*8$|b@NV}64+YR!q2X1XN!)3T&}`i*5SzyQ5m zbx{U3sp=NVEw1>C?BVS56KBI3^x1rA-8R(7?ZYqWhe>T*-z~(7l38A^Dd4dS*fW4z zQ3=+xuVzJ7R%Z2?@McFeDI``JfUA4xvY_C^-46))%i5{en$KHNv<-vjc_1-cTb`c> z+{ISQ7^HqM+P?2LMt%yoS+@s^;wh%Xgn|G}7I;`e+v(@a-NZt{qShlfq+SCNN(LwpuEHH^xuf8A<_r~(N zHUi0INMC3dS>1ILC?~mOy>ytla?p>?tMv2RPHT-*FG6RnJ%TR&OF`3Q=#xPS5CzQ0 z$hXgxjL$BzMG7wUE)T9i_u<3Gu6;buI^ zwN>AV5+>J3HI1X;?ad4e@ykH`LSpy-`9;RvL*g1<0opOE|et4 z`A~U29WL;w9`Q7c@zD}59+03iZKprYpcbJIYY-Ad+AMVtd!KAO$V4gG#FMHBpD8Dh zlY*2ve%cPn%gY;iJbg_h!p1J3Gz*|t_c!;PC$g?8s=*)jNN*NHD*iE~4~H*Y?7C3%YUdqVkGuv0#hjXy zHckmk>D{cST%NU1XLxypc&dLrpDmp12Lx8$fF#F1b}pSN+5Q3Ms-Cx?wRGC)DD8Z` z*E1Xa1xNj;U(iSwHmdbjziK1Sn6z=_(fRh|)pO|t>Mlslj&t%q4t!Vmrq(7P2L5s@ zdIkkGzO_=LqkR~T|bJxIF+h%s7A~>GIei;tQzy1ER z44oJO0ijGKx5IX4@SpSZFm?RXi4SBqCR~b&KHP-GVWnfU2o*%oM0wtWq>j$QcYb&0 z=jY-^$e4V&`U0R>{^RwH8b7;D{N?XF2I>?@{wt94WML_1atzexx|KGpgoIB%9jIPH zCSP}tTTeSDMHUR*9fN{`1X8^WZ^0;4^pVl}xOF2k|7CWT3azg5#dpX{M#e2)_!-FT zWR#YEBbPYwz&SvUul2jT{y<*6ZD#}h0{0b-1$@f+4_Yw(C~~B{Oa|3o6`ke;4QQ?T zor*qQO&$mQ#PZ|)=zUqYmE0=Q_Ho;VBzEt)*JD(S!8liJ0!Nu!x2I2^=kldU`g zLSxQr?xo20jTiFAVOfjgOd0%(|7OQ@**DO6TC!-+f{~sjMDi!UB8lQsQ~yo^xFchL z_fm8VEpGJ6YIof!Qzh}3N&+g8)2GXsFR@5f7W!pk_?)q>8PoAHw)*KYfAP$heyE4( zF(#-hl&T+~AI*~i*n<~?yCd|Hkad#Yrj8jEA}|sIl*nF zrKR;TKi|}9W5OX1aR<2>-hvGN``x2x24I5#*l?Okd1OR{fPerdXWT!=;Jy3a%fq@~ z)tntqZlZ)0Fn@;lA0PdLyaL?jb7Fw#7<1r^JZ-?AW&}(RRicV z;5hXFZJ&zZb(49#a6}Z4`owR%?6qo?sVsfk2q2bhCvvpr{xUk1b=4!pG>U%oXykLf zD;*=^f5~j!fzje{9wVJLz8n;EKj6&5-_u+*u>HB8T@!o!SUWlVD~U;v_dIRgsFSa` z*2cQ~?a2FN`_X}o*OP-D;m3=KF2BFcF~{~E&4nakA~53;!QwtN4Y6`mDoNN?|Jh9N zxK8TTZ->wC{q0g^%I=yaS@1Ff{2%#bnx$I(8k89JByJrS*uL;VNyQx0vWZ{ii^&dA zb2OTaD)HK^7WaFVtt>Aux2pWdO#wHnjoXo#nkd2K(RzeFP*YH}Gry#4h}5iuttjPO z5NeWt$`TMu=Q3-&nTm)%d}GHf8nAkf=x^~_rZm5;qO~9Q%rC#_xwXnUap$0;SPq8} z5h3wLeD1c&bRTu$MrcJG^FIl3--FNH%ym8IgCKWa+}P+j<$p}5)z8%0!FV?#KJ<0c z4;#IzhI(r;??{i-QQLmhIF0r7VS7N3b@jUA$SDJ;^#OBX(y-yr+t!z-KRBvcqCtTk zg+v#GlF|kgq3h_u_79v~CQDRkmK42?7*yW??OY^PLF8gT2xy{lru>dOhk#@;UgW_h z5j{JJurd{KE>>2aXC$F#U6R6OhqOmD`RuV&s%ztB$mj@`kd++jOq7*72T2;Kx8;}u z3FOop{(GGH?E8XjY%YM|cM4Q%w;(G6Ol(n~i$4;D2}UFP#P5~%BAE@vx zKmJ4V0K%XU8=D$S`WwE@U{?PvRKS95O;uN@0Md>#Y>+=lN=g#*ILMXHwj0E|lfivX zxd11v6X@-LV3H;MA2ZEg9v z&(kjJKdhTwvx1c9%lX%@0*7=%t`83nK#~r4FAB{qEin5hCp_|b3)P11fDkJRJ_8^; z&H+YvX^I32r4+CtD4(Ux@%{lg7u^7K&lhj5en|`l9u{^?9~)^Hl9{qc5=0*s$x%|b ztyNx9$bWcF&FyOY8?qn{kz60F*Jc6>`NmAtxUk}=`<##3Nv1TA`N0E?T=(u*TMv_Q zIt<^vV&j*Yh3BDIqrV!of7-e=(XbbcBLg8GkE@IQS8(^=+8t$vW#NvVZYu9Ppg$Iw$0@c zM9F0qA~j)fOV}Wj@(3KO@?Za<*PwZRO3Rj{=PrRZtaw*f$Vy06X@+URA+8=vcx_*3 zK**3P`wKpW)O*BLkqfq$tNI2*l5XC3-jeJK%rUCl4qm&*%$rv9WBU983WYmN!#+w&O+q zE~h%wXd(V0#w4Vl$u(QA{$V18bJuu{K0P-ba8}M8HdSZ+@I9mpj0^2DmCf$@*XqsM+cpw;+*}qx=Z~WZRD~fQ$^ZY4acz z?x_4TgU*0XSQUIj=zW{w~{4kRazKTyhk8AL#_&Gj%nSeI)2(qOPn^S28#ucEo{X{~W zKM8)A5=RU-)veH+uQLt;cI!DsP5 zZeGa3{})UD0tUbj72jb4WhDfV&wc&;dTyHQfP8!D9})?=WY(5S58&QFAbtn9a3D3_ z`6mYs?(XhBIXPiNGhEmelZG?JWE&(Mis$A>u7xgaez&bQV*T<2AT%qMgJbc&y3%rx zz~iFvqi9C$?@f?AKdWOE#xT?&t6<~QT@+S$X8IDpzV2_} zJ$1iatEZ?}IJ~AMj%bw06 zb!|63(bdf3F`bCzEbIf#4)ux>U~kiIz>u1t0_$dC@~x^7crZ*F_)lyDt3Nh^l)w4r#%t6n?f4HWHGZp%OhiBrhmV4@ zpPxSF12}iU0)d-d0YDGwS6U1ggaLkTq52~y8=HxMT8(x%@TQT(;I{w)37-G}P`ejT z_m}C+`c@z30W&d0U6$1IccjQwAwb350|L|u> z@zb_8RN4Spz~4jwwsjo5A_Qmn>b9q+&Fa_=>R*o(Kx_jxPESq(!FBQm5a#GL%Fxiz z{(U)slL8A13y26ypbZd8K|n}&aCpe@I9GD?p5zltjQFjd&P>VJDX@dz%Bl_Nw^ks|sv_0-QpiSS=@{(l#sUE;q~g!H&- zJ#+a$AcL>eTD@x|X6e}X3zuLEI4l{6c^h;-#yMJ=D8(RI(u{xY{GlIO`lsg1uXlJn zFH7|YyqWV7$x;S@Z!bZttc-?no7;6)?ZER0=@EJc^>H>xasZu&&*#FTrb#Bxoe{&4 z13?{67HzJ{r5_T_;`I}Yyc@EPNcQ-QlP};an#6s`Er!n(+5&|dufuewJU)(Off?oC zzOn=fv>hU~4uY&~jOxTJDP7t$=O}^tpDwks!a|SH$-heNo%{X7Nv-5pk*BF*jMJ62 z`qEO*`D)UkE`yT7Vx})>3XD5$yqV7%omhIDDbIHGdJI}>H=Y#DnxwhH#qHB&W%w&z z{cJ#2UIm`Ujr|y$QQiWnj#1)Y8FblZC))forzMvTb%#vlHdF03eaIvT6%<-q$~lkDY8*GrDti-H|Kz~t6a_{hVUK3cX2uu6y|F2R-`yNxqNgH8Zp>A2(MaIOkK z#<^EopqX^p>N?Ba2cqAyylf2s9r=%aIm+A{P*+%=avePo*Q%B$+pim~t<6zNh~fs) zJxQ3RuvZzd5Ko?zrW;kJpoH}9SLZ>n68UQhlgGoYCUR^$z>AlcmiTRFO8%AQF~(13 zEa`sW{t!L?qH0q^I&>Z&nyW&LhR_=&|NdWaqXKsq5N7{qx4=(=BXjJ1x&bzvI1BIZ z)g;eci}oDWGzCPGyL!nt!oVyB($TV``+82EZtS>D7k0c}9sinZ>;m0brb<7!eTc zbQ}!;9mG_1aKnKOt^Gn^Z(+@m*^eQ}l&WA`12F5(UDxv;8?6AZ2zvi$rOoU40nmun z-k$75YC%EGmdzy2%fPby>F@jeeV{!72@eH+#QW^}{j1kmVT?$?j5r8%b%J%*^0YLT zRw_kx9cJu;-aqQ{evSq2BsSg-^b+Q+3uTJqUe1q{-;U{f-%O^jd(KV<*3W)Fc=a6= zn#g6Qey$n+XoKKl!x%#4D6X$?5J~2rfpocd@>I$i*xXrlR6+al&HKEa(dh1RCfhkp zoo%8SCb5*VK%#dB%aF&Zq`TW$D^(GvNg3eFZyBK4P|pA!awf3Y@p4H z#n<1J(uYBan%sJo3yl~3dobe`}a{WX{ znjz{FCvU(0KL@iBh!KVh&;8^tXM>{acZ0m$iApr#tV|2hGQ|@i@pZKX8s@CR^}a+w zid3ewA<-}fa^zwg594cwYyZ;mBxe?%^HbJu_vTDq^?- zPcJ8LuL7Yjp0}?bZwPsFUQdjlH&p$|XhHe%8l3T@CZ);p_Df-TIb*Jh$XRdQzW@N) zhXaovV2k9rY$KDB8qV1@>(%_Dseh6B5p3`(X1uZYqD13)D*UM?@bMVb>04WU798}Y zxDud5Cf5u@OCBkcFy+ehAe3dugxd7I0X)9DHKX&^hi z2RKXrL)&t@eTo#RCSaOEIBU%wp;cw{I-(`o4*u#Z*o|blJAnrT>UTxz?VEh@JB!`M z^5q(ZXQ`S{BtwJ*i%E_OI#s)Q>$Sip;2gwUw%j`aC@a;Q(X(n?0-OSW@CL^1?|M8Ki>@rj%Pm}Jq8M!N0N>OFBndeP#H|lxd5j>;Zn$3`&2$&mH!v{pBi(M1)I|P==Bh zI{J#=Y2-<*YEPHbF6`~AHb?zXQLb;f2LpU9EFF4bb3^ksji#==|C2eQp`YiYe0fgN zMN;?K>uz%XL`imS=iYK1vxxP{Q@b8kVrjD-R1!gOvn5P4CRVh4;?FoDHhK;P)9QDG zjw4Q|uXo_ddsekhHgn!@DT*J*PZ*~6Z^EY1s4Bzr@ zWnG>+IVyEIcHd*iYx{DE{$86s@%85t zy#oDon^PO%t)Dm&TkbJ@!2RUp^i;JJTU<9yy@PBLa@kG(9bnu5zvY)2J$KN{lM+p% zX}~Ln;9txzt)cqwVVqfSG%BPD6YQPjIcNksS9H4W4|mfIWu#ghJ#Vk*DnO;NAZR-S z{IY<Xc`b@=GN9O_8Wk`%4Q#NkXH=a^h}!W%R>1Z;lEHjG1md2xTb)S z`MW~Hx$OnPEJcB)R9)(Ov#>JLJ2H;4C@`BI_XZb`p&(Y&p|WVF4?u8%aKe@F&eN%( zdp~Dm`}{uS%Cm^!hi)#_d0Mo!>d~?=}84A>QF+DwTD1 zJ{FUUiZSLY(NzBWgfn4dDa8Q)d7PLBn;Oe#TAoaAKHPU5GG}4$VH(`w4ohf zqWy+FQF6fdCu%Ki-Q%<>y}GX9>TBRtjpQ2#yKw?Mv^Kmaf4x8J@6rEu$-;p~Xpcl@Z6M1{%NJ#j|B>$t0TwYYta8Z7c`v4hvrhL@ zblPF`C`)T*X(^oPM%UYb^Nnurq1;b_kf%!}w>LQI=2&sl)R`kj@_P1KRq^FM?P!y> zq=bJ|MKeYB6815mY=!L@-_^a{W6>pupfoT0mHdvmIgN;eAs+d@+e1@0PeX?|?#loiThHvh`DKvp zb?utzH3B#BWmJoK!CIO>RPQS+wuaYSQ^hRw%>!{2=%*|IRD75Jy*pf0W7W7zuzk$+ zzD=Pf13LPf-3a$NyG@W_lVh7Xtj=+?k2$(8bF{>xMOtcvh)k|L{odGgVMbDl^* z8Kwk^-t&Bi-$}RgPFWn3`4|SQ^0$|3V^Q$1cUU1C*)35AfLHfqS+Wo9e`j8q(nFLg zi`D6$9$t(^fl1829$6)?c6g!4)i>1~uR~jP`I=^=eoeGqDqNkHKfk6Ela9)SciX<2a$o^EJ^KOp0avP{ z8{H=@Lnm#YmWtjyEBfI;O*p2rXR{N>eujuknlJNlWv>KUHO7GL#kSYHZ!?yiZLG#L{B>> zTQ3w&4jR8#P|p=`6KeyDuR>#3j%@_}ri)Ixp*_6JT};09hnk;j{$Ng%onfv)Q>ksc z2mPLIui1!|?hAfCF?fOCZ4T>>Ta+Uqeu9yI&$USp6JMLhcLC9EYHx%J}G|D+7CIAS-@&JoOJo>ZQ zwfa?|l!pAn9ix}Hr56J7G5#L^U(LVm$f_};iCEy{{)r{M(q7w{A>D&vdVYgffRB#= z;d_lGwnxB!+1YwHMu+w>lK~G0<#jzLGEkWwD=SsJCk>a8fv;yc5{J(>kD`COd~yt> zGkISEDi0k#9S6uGE$Jp_Vf-LG9Mw2D}g7*Hvw83_wY7m{$P_m|YF2`?j{X1g`2zrC!=uhVL_|D?&-AVhd` zGd6D3VHwszj$-;Xjpmd7L)wV?)hg#vryvAbMbGy+;IiBIfE%QM-`~qI=<-0V`V7^n z2w!&v2n%xTCc#jf4urtn=-%x8c)9-%UoVgJ{zC(veXqi;H8aJQ0O7`Mc<{HMAol!w zZu(c8j76`^WUXsEJBxM4o=;vyuI@M||GrN2-z-(B+RB#@XrLq|D_{-89rmD@c)|^Y z*uyUQd{_HRWU$B$eGUk;kZD=Kv3toj`FJKY2xktH^ZC4u%)onk!0)(g`?tnMtB-$m zd6nPn*`PA9EI*Dr0;E#cj$83sBkuRbs>}6(6?s_&n$#d`25t}(P_9=Pu+(wa7(UKi z>?)Hztu{`+6%;h!rAC`-E#-JS^EZov<9Zv{Obs?K4r)Ri(NCf4G#TQ5^JBDD?>@9D z?Iess!a3+Im9bVuOa082)nK`dLHXux$7#pyC-+Z=cYFOex}685-6!oI3^{exDR(wY z)6aHRPJclOvsoK|@t^C*`)=dhJY3RrSpmt4j8g1J*E}n*I_@$1kxQGg?&U zg&+3?DOJQUtu8*oqrdN4?Gbrt1t{ucKVJWI&cxGwlQe>LF0+r?!m%NA_$`dH!<?IsY7IlAovXyFu?UiAdelFuJ;g>BA))PZ`8w%cK>JwRK-}Nk=%p!S8Qil>uX~S%j z+gSzWP7$NsH7Cyt;7w(I$y#$Af{%I2{Lk79I9j~<EB=09QW~N3;X+0Bvljf>wTYnILb&rEmpKzyX-_< z^op7KSLaN6id=1meh34L6?)ugumi&)JFw(16k)@VONEN_^K;<(@XsHQwaYtMJ=T4F zIXn!jbRhfogv_OnV9ofltU?_6a>LN2|2>#&#;GO65(ggWU1Y-j<;-jFuOBZSHp<@B)R z?cAXxZOd@!XI>Wfo1!Fg&>(Ak?F(ZgO?qMG2N5)0uJDy{1( z?LkkDOcOojl|3WoG46t&_iNjsYXX(pu5?I(v0SYrQok-5OgBZ(^MjtZ(^(&S)JQHMAB_pSN3FN>i$RKZ+y8_MXj23|1}R-m3yGpIpj@M zR;zY;tw5YB0_Jbp@$BcY2gkF&w5-g>;u z8Y1$^skzb(o`zZunX(s7GZwT>BB`mV)8K2q&8oOV-+97`YgWy9Z3KMkEJX=5hBeQ~ zN=h4ujEn?(XB_-+o|uScO0khpOo}e?Y*Ot8FKe)GfJvu1Y}5KT(|%1)yS0w3mzNj+ zLd+4DX%*Pi26ct@+9-3n(u?~iEuDa!mKCS2?Px}Dg9M=19n6%5OZ0gfjy8ZX)gXTm z@cVhax!Z7SuioJkkf_18yu?4;P9tBv=puF=^F8^ub_tLA7D`5*(xue1w;eSK@g&IK z#^!F^uR9yAfh176lfP2uM;!42^Qx^Y$%8^)T_o_Z>DMUbsJgL^3E_)^Yo*4cqTf=? zipA37N7J#)zqfBRpZJacRbg7Q!=q#J(i2y!d0Z-v z|LAqBeChOh?yM?!Ttg~dPJLv8-OQ$QEi4_NfLBIyzOqrHeXfGobzEJZ7vb31^Ehry z_WdXlTn1okj0lsEqQidi+P`z%dyx zt2e64_t^gO__tGMUPk#t)_An!3cXCO_{x-aF~JFbLwHu8?o^UD4_fL>4m%5FZd++zZfcx^j>2$Mmz_5Po-mW~QeBC@2}W*Wz8i+g%rFztS6rdW zD(rSzBlE*P|E|pHIYf@1$`3Y@_ z5<=#6*9Yc?r1|$_@YaF*nnM$Tr+y0?po%ix*T-i~9}Sl%G^(>_o)kQt1$oq;)1cEm zW$L4N;TxOLL4HW!lR5b-lKGe?CurG~ce+16Z2i)d*B#!=(B|77mK@#XK#GslP<<{} zQW5Gp>^Lu2EaE@)+Za);^KB^35PZm> z23w>qHDYM4Xq4YpE(~8!C407cEHBbdWtWPym=`z+o%;vTOWnS#2vhAaSTWhgio;~_ zEkq;DFE<}?WIh21vKfi_`#D3C52QvDMr}5U(fx&mai_iu?3ZuHzV5Uouv#SSLg}K3 zGAXBA4v<8_;`bK3N77>y3uX!l);7ST1*q7~w7rlpii9xeQ5)E_FEz2PD;K4=Yq!~x z2Ht(0@q!}hqfGh@f8z6>uSg@wFbcG*a8d4mV`oS~mCt!=>gB0&qSuW4j#2*J7~!GL zqC>ei?#12oZZs>4r0cwlIEM5-|K+Ou-;i8_@c3nqH|TbdI9}+26fJ6ds--5PG?Xhe zP0mzA&%FkHUy-0Wn}WG^Yoq|*!-Ef!b6Hqt?6V_J8+eed6X4ToXgkDIx%H&b#x_L? z-msE<{pC`bI(g{{16Tm7w)GmnTtEy4KXN&rKf!}V8Q1dAH~yD>h}C>&y%Mdczkkp) z>4zTtq$t?lkA0Kdu+hy}RB!^g#Qz~%a6_!I+p3YGZP%x2mojylDFL6u~|4nE$0><#nklcqH?xwJr60QRRg(P*W zQZ>}{i~~)%Fd}h|SUFovVq$vF+fPOwTab>6f&7<08%~dX%u5m{_UT3s?J=V05+n!% z->Hc$i<->TYHT}NDSXFO8t;OMWv$k&wF%7_Aqr+LNcpAN)Nx$IeqXc+ga6dhIA=u6MD=|6=TN z;`J>>UZqldEn}%iu2QL9;Mwi1Q1hYdC^GNYH*WVp`|e3j7h>qKE-p&~ zXJRtM{~;^=YcHJt-cmO?(#gEN)`}bCSa*tOEf-C!i|l98kN?s}eH7D~?(a!oP0>Qx zw~;ua>@(#k^$GULbLez1u_emBm}HNqLo4-DSrs`I61MBr$0U3d5AKG!mudpu&7ul;(DWw&=Hy99_81BDbmpf zhL^5~qUctuoZXkl-F!hNPSd0Pm+Q};gMuoX5dI@Qj}Jve6ws~rbnuktB-?TYDsGv27^BVd*XhZZIi`saPm*2+@ds5W|3btxTnfmgBV2c}xYU zI~d9ZG=VS-!Zw?`xB6su5&QypX4}4l(=+)nI)vFbi;j>X@_@y<$ki79V!9*=7Z$a= zYDy@+o)JmnR`75awztQ*!Y?EGSm+wN}yDf6HaBM4G59KEe7V< ziK0VoFY0t?{~bmU@6DK-UHed_)7gPikA@Piz!Qw;f4^t@m1H0hbGxFP zJkHXfDxJe9om>T`_FIZ(b2F=m{mNNr7G{w;v8mL(0ok_@7l#Ie=h^(%E7jMlJOyrn z%dy!DpGnVXg#`%O3DO;Hj>}lQAV!|ck1q<5nMyD9L*(a2=iQIT*Pv|LDCs~833h*} zGJdw({gpb|b2bL@PCU~ho)5#ghrf*3g2Z*4(;YjZ=;h1{c7vnMX>xo^xJPjX8kL?~ zi~C^0&^~mvODzBYE`Ttd2frQzYdWk)m-dYm?dGrxgE^$?Kg+PVw&?SU#H|k}iwQ(} zvh@+n2`SxJC}n?{p)$nKYx`2E&|{#p#HBeEl8-Nym`ePe*4Ul)weIY40Br5tAOD9OnuK z$mAWWXIf%wsp!Lvt9N2!TTpPurcflA{KAE5;p0#d5>gKwDva5?%DcSG z7x}ii=BHdsbiDB&EyKpDyd|y|5=NdG|x7D=k{yu#y-^Gs&pG}S|`Dm%m8Q>+#XIlntb+8P-v%>( zA0BPAF&NXqad>TV_Q8I%ip}-781eht4v{Ja=GX&`DO*rxKgDuJ{v3)2_=a7mG17J|2l3_|Jm|I|DdEQUZCpl%|S z)0BmYQ!9GMmv3W&vk?D1`-1}3j&RpA6q15CyVzo_x_TBR2MhMC}TJZt>u_2&9$BJZM?FzR#U z8PiV3d;!5GW*CK)4C#(oHC7}h%$US~VjNa9*oqwR;xO1Hgpo_nne#F-Z0D0yQD{~x zFmSldnqTae_RGIa)8v=WAmN&O1x*&@RZl|=H)C@4g;h9j@zbCfh-blR!X!ZHm`GtU zA$CW#lgbc%%WQzgq~&`0DW*79g+NdDuBWdFS_LPVF1DCXz5N6uY%9m-67!y7v9{QZ z916$FF1{IxN94mq$H#{Ni5(2OV0`gHlZ|w&TdMY+dJqCvjQP2x>`}Z_b@dQl zHrBWt3_W1%B4}fh{{22TSGe6FEo$yZG<`z~##{b)h(IRnx)%jhACp+S zlZ@@SQqh+ibgFz-CZq4}Kyfq9(NMbhXw-Si8}#zy#Eg}K3Xecn%{`2Zw!rk#ng}h{ zXFZ0s?({H^y87^^ZkIsBi3n7JZI&gBlJNaiLrGbO&b}Cu(mV|}d0a*@{0Ja`UY3WHivIj}qGqWOJ46bNT zokOCA-Kq>V825rH+cby$4@sODG+eTKG}OFyuo`0prlO;STjS@yhXf^qf4!rO5Bb{?qF|GPGRN!fRdG+sPnU5VJS75NpOdc#?jjCL$>zv zHipVrC7iO$cVEnVQ>dZPly`8mN&`uRVc?OP^;wn3Vx0twdY+eUn9_vbl*!RyR!Q-g zTD;L`yal6V`S5;a{zExspT@ATc)1v2nK+#K_=5^^_1WnN({soFK09Tu>=g<=#Rc?P zWZ7Q6!|K06o_lEbjCAzX*ZZ_SZ;y93)=97NIiQO3E5TBZB6G2>Z4IG0x}c9eg_L#o zMmqEk2?e^}va;sr%(U8vcjkoH>?=1++bDV?GAj8IAKvI4@Tq&^+1J3m{o&T9iK{ro zY}1gUO%eUxJO_5ugeYn|0o9aKRUFZ^pt*s$j?yl}iHjigTQxjtx_8=J$)cJ`C59>@ z8TI@4T=~x2c>ZSoWLyHxpCpWEhwoA_eJv~`WwAbJp=8_Z$^~VT-X6FOu@TNE2;ymIEGKck`xYMpgU0y^as$*ZmY{ z0hlRY=2m?1QDAz}<_&Rh36MT8F|U|rRmfq{Q(?$8+Lp|Qmco!j-TXjh#dG6_`#k(- zCbTXkOL0$Ys%2`r?nz{Pj93Bt9c4(8oQ3rS-a>5oSwyC1cdMlD8m9?GE>^_u2;tE9 zo|%+jX-^-J)C z)MC_097$(1{hm6i_3C8gjH&6YE2RSA67k|$PssGdTnzD8UE)yGafd%G(tKQU;w3)p zl8ky5T4{wYd6uAoVIUFnkwFqcf{MO!d|6h{@{+(u3_)oAkIpXzfyiOP}W8DTAdhg@%?> zP?`p{0)t}xSoklATk1xJYEuvFI&3Fvg4r>_|6!LDCr?jZ`@a0p3u9hoq=HPI!o_Uv(RD%0pXCrPDue z*{!q+KdiqeCvno}HvY00BvTAc8j&(Egv+jHIHIsI&^H?UiGIA=GVnvEG+tK<@)u4u z%J`XZWibUjnuK7ECH!(^OZ>8!7$dxOt_kKGTwUEh$whxXSx=2!v|br#Zc#O4b~Mcu zIEfj`u1j6^cz>uWRy2jJe1~^x

5sQexi?NXtJ_dsx52LcSz&YT5gJu95Z(nSE3m zRs7z+Sobm(b3Wx1dc=6Kwl!`OE1*|vc;-uKoUojvLq8ad@G8ISlRQg+^<}9& zBMVD{wW%2F7Wg<&wk%%{8C99uV*ZWp_xbW;o|Q=^X&EDs4l)<26;ffAPTu4&8b=Cm0@C^F4RY9s3kGZ4?!q@ z-ccxi3$!e!F;8Ma@s{k(9++y^Arezra29>UjI~{no*s+4;oWyBMeoZ-mtLfu(IZ5F zr7t>D_N&a+u@a0>jWyZ&aBeeufhWpWBN4PS=xIWgOeoe~Y5x=HvvEjOpkCkZjSNv6 zp_>nC+X-IfZ^v-zSpD;e?jZ*V)3PRm{)9i3mPJ@9NURNGeV&T$4=C0>+Z+Q$4Kteo z5-X-3NA4yNBELzzLDL+Y)$`P&@OyCS=_IC>8@cf5{McfWhZ9m6NaZ8;D}RTj1xO0g zh;u#fLU6+GNwkP2ZGIFY=Wf||TSk)5`yFLGiu)y)Q}!fVQ%QHK(#n;{-#4!>aQr`X z2uf%rT11ogP}nx_kn_=s^i4C8nZ#)V>DM#xzsd0oQnkQaHXsb_X>a1eOW;4_(o1ex z<6%KV!$5aRQV63^_%OZWr8$49!5#e17~PN(k%UBx3HJCKOSWzpWa)Q!h7}|D&*6ON zA!_{9n!o#L9@Xl&1-7xce?!hOMHJLGvNqUCBIS`7_%h1V{*lpNjY#H4JCr;lTVRiu z{$2Rlu9F!ODiayoz_Vwm(OZ44E5d@=g@`?dz0gk-vwq2##w?{-NcTCS#Ryr}4Cxy| z#t9KbGHo|8Bn)~$uP#P%Ft)BCOp;-otzVcgYFq1RCj`57AhIpomuw0|W7m4*(0yzo zSn4AZH?=Z@YYmNh=-^;d{>md1ApvIUzxD07eqSNJ3w9RtJ@}h58o27QN&Xs)PiD87 zZ2uVhMVA7j#E)8SQv-K7k^${QSau+j^iz3R68R5h5?@V{C=|WvW)2re>G%RB9+VcS zF~u<9LbSPuSnP(}M*EGUD-4|2>QbU5{);VT?^vxX9E`^$OI!`jKT>bOvJx z7W19^z7+MnzM2mLEH{2Cse@uVn(>n(bDD0p%SrJ8LR|ibNK9b|O6ZQ)o^19FHZ+lL;V#b1%d;eT4yAZa^U<&=kB_q}#dmjrX34>1Zv8}S zUY4(P@7Z4Gu>{j>LE}_ZH}hGPo#F!q;;=bM2RTzLCd777mC;F`Zx1hZK7{={y}}2ka+$qBcoRJ(T2>SnksYJ zAoEy30WIpOsKLZXJ;dQSJ09h-`R9Xmf&T2WbQ~2EmR_9F6PmB%xX`H+qa7LD zJ&U%n?U#WuASFyu+3_CB{fY|S8J&W^iK-8#CPJ?U{7{mj8(zwI!smlkw>_5eXW@C- zh+18{ZaCee(8$>K{$nw;zeKF-bH%yX$F+@x9UX;*EQd|k?i5?i2}AJ)izpLSFrBPh zeUU}@_?1&E*n%v9tK{Cp6pX-gmVK&{9FODsIx*G%+94H{etajmVYz9 z%~5)@r&X9yFZYYz`!JTry*}_%#mFcgAuZQ=EPsqH=ie`i_b=e{v7KlU!fh`ygm zmt-*vw+c>78h~dZTdIas15n4yf7`$YEDGnkW=7-DYm5>ZJXJV|>j{j}KlzBsBmgKp!cz zD}U}iy2j6xQfX+()24pyuT0DC`c}NDOJ-|r{cYrx!gNr{Du0aHSl0+4RR7$|o(FaC z<{Qv2xT(*_QEYut`f2~M-*Z)(CJfs!HteI<`y3>i96(Y!bmvHzT2Q#u;$ z!-0|V_3z$x8?Njl;}pb%sdGzrsQ|=cORR___Bdlp*+SllL#2}YF`_Qve8tbZ_JfOk zc}7dh@}F@012(mc^0e{hbjy>zZYFyb`ahHm2G%J%?ycoR+9VhchIVFKDULvv!2Nct zakiPiG)Ozn90N}3eeY77H6V4vAxYZXDz(2YLqi8crka<9(d>18C~UQ26c5YW+(i+H zPh~$QMfa;I2vKfmTw#TnV|`>@=TCO#Ve6(Wpi1@jOf6(*8_mn#1T}!Ic*0fxieUjB zkK4jxk-804oh!%6LYP~6T(HJtx5!j^B*yTz5JeOjo^Y`oUC=9f9bOflMZLuXjnXtO zUzvf#p2?g~ik=Evd*3w~2j9itThF`Nx)_(JcWr0o5o49y=1d1R%Nc21Xq8yvpHSpu z6#npg_0EP@jwiJv5ZK~bW(MY?f7^sEhx+&%wWZPwX3hJF@$%i0*K5Q$3>x#*=BONx z{oQ^)1LqCIm1E9WTFD?MqvLR8Egv#X57N$Au@ob`?ZjS(Z#hq(pBHm16y!{i8U#cr zA~UpsE;H?~yXzk=Iv%6ys-DmeE)Y^v((HSR(8`?}rTtNmS7az{p2IY!e;=Qz#&~Zs zn1X+WW`C|^m1ASES@b6jSPX%x4TM#qyBqcDXx&|E(6Og+caO_xaDtVApP?2Iq_r%6 z-l0ok(LLE?%(TcOEZQp%?=S<0?Jt&9=B73$z$e{Z(s{TLKrRkOZQZ zZJAQLil{_c^uGj<)~tsy%SpTDkdeikm<u4CrN?TQ`j z92Sr8Ya}&Q441}epp=AFtdU31Pwge&LS(F29PV`3FqWRBVgY?oW_tOWo)dp|);cH* zEsv3!lFSSztNkSi{oh=cvZRwUO)BBwF&4&OB8Qk{-ue`O#p^Lrw_y~YB&Y2K^<>6U zTg%55@qs7OonacKV9#H$u!=j-jM9SCddi8m_+z=)YGgNpHnz2>{eYv=QV!btp9yi} zc>?&{aJfGtTWGer95$N1Yzf6F>iTnFI}MZSBbBCD0EdI9Cb!}bE7C_zQL%x+G}OBMVl5o6=%C^@0k1-$5!K9I+bk|p`A zPqX>I62|9Dqzy8vKR`ktOgGFN_aCiDR(QbiSY1@J&ce-0ytG-h-UUvNkOrl0jgBK? zusJpERvQXaHr7f|o^@>8PiPFQ)qnX}5J` z)#p-=zQ`?ZyJgm6D2)=Mz5A7|fe}G1CPQ z%NKHN0vNK$)WJ`CxG!5X!J7vfJzOrYBv!5}1)~r=~FI0PDk|<_n z6Xd{$Nhb)i#UFf5VS%|nAH9$omBU#rtoR^GOVfjba!fCmFMuM{PfmeGQ6X_61=l76 z_nI0S3v*GgpkE{{^2oY@WZ@~hf?AeTMgFoqd%kAYcK_9ddXAEtT#lL0z;x*oEAcx4 zsnMT4Fl?i}qDo#t$o5jEB$qL={54k57q6@@N;LVAg~2ReJXHw_r*66ow40FKz7`yZ zLcSUpmE1_uYw{GI>sel!y67;HMMg$mU03~;`CP4F{_vc}A zf+WkJkwc)@?MRuq~fwAGj@4!8b(D(C2lZ0UkigpRqRV?K0 zh-4Y9^Y!B8stpDNg_db{)jKs9E^!5TKPmg9Q}z5Mf`?MIdX;FHkX9^``(~b!a6=XT z5Mwg?VtkjzRQ#aU@gDN`PSez2H}fd&vgOQG2DMk~6(v5UgJ?INrMRHk6n;;UpSCok zC+uk_E}XQh9kfD`6I7uIOn&@a1?tJSo_aqg$#E&em9lomX+qY*W>I{cpFF+T`I{YN z<+ncDPoF3G5BMtNmZEXbUbA|NJhIVz!;PxcP`W4V(y7BOm&@l5Bdeq-17_we%H?TX z3sp+E>RFJVIF+>_l`LjOapSl~>yMdG^5es4#zx1)e-lg60Oo^#8XqvA=Zvmdv^4mZ zvKW3pR87Bj3kkffMP!tzlZM%Rr-L1!CnTjPTAG_t#uLqa>y)Ne1pDHU-|5=he@Pl; z9*fk1B0>4a+`Yw@F@?%cS=Jc;U($a`65f=3CN%d`_R7e`b6XN8o!H||-pU^v+{`t- zQtC4_s75Wl9x1tonI#>V>VVVb^KXqmnH^B1q@oit@8)SQSx@Jd)D?Xdt&Yd0mhn(2 zJlUHnQAOAyOn(VHD=tRh8lVQx;KQ#tk76&aeqrkzpALdE_erS;QcnINsbK-T8K#s# z4z-%7-I%ec+u-2dEqNAG;K*U~_)}5zXE+58V0VOt0aSLIw#&zUr&aqV9(Hy(5|nPW zzTHB3YU<(~>MyP8L;K4gF@p3sSy`1T=6}qiDt@u7RsvE_isfq5@S&s9NapVOEcb)r zl8W|+ARKMmAM^EAR{wd%=@Pe57<=wzxcmiBlIdoJVx>2L8^C6=H!?!>r63+FOP2j; znX9JXTHcL}iH^2js@?DBzd!$@T!PQh1opwbpAU7?+GXZH<5KVokB;}{nN%fbH%7Y^ zC09&YvCwD=OytF(C0jAd%2~8hCVWA>Mq?2XfcXDifE8%XLcznI+cjU(oC4r+U(4;z z*iR=PaO%thON|6@ai=3$4N{pUol45=K8Y5q`?4Agl2G{+_yz^f$LXs5l;npnO&s@t z)snP;4v`u+ENDGfDQBZ5BApUOHJrgNx>m=O<>+2alf7EyZWvglp)~j!D3_>s-E|X#g~&CJ zttbv`q@cmUK{?M&Ot7N~w8}(WOiWBt&l=y#a?M8)Q-zF3n#=GY-m5y&ce6%Oyze}= zbV-n)4pkY2QJP-M9-QkKWu8)GdvD9my07A<->0(j$EI+Stzv3q)e`W)x6Jbm9mI7O z+tSWiXHaG(EIyC)F=FP?)zePUr36Y?#7a&(7Fw7r)%=?~XZQak6~AevN@C zo7L?Zzi+5M9!uYE3f<=zj$63r(>$;_ZIRbAmwIwWmL~T2@Gw`Zz13um8@#B+u;sSs zsZFO`rexnMWM)}f)ABWG7_#y9uLu8`AKKFe-bnnbxUOpf13>lq^qZ;UENpZnpBjSVh`UC$jlok8OofOCt8w>LL8x3{;q)n&p{A*URg=~(^fvq>L3 z7j6-1Up%%x!2JgwsnQ|TP+HpjI~|m8-f!xl`1fKtiWZ{W?1JMHsg{ifFKTUC8zb*Y zrMO4b&bM8|ju_~^l5qC1+feqY{n2In+(1%hh)YZ9nwNLjIL=cuRCNT{9SsG%Q++sP zY}4FPJ8=>j_jwiqii6px9yaL#eb2IHp{no8Wq1^;C`zfrRmwE3*J568f$;aN9{R7b z=1K6zHNXN$9sEB3abWDo0q`&4^-DqKLKPxcE8S$KgxL0p9odk3tfjlP|eMD>B zRL7#a%&5`vSWTtwp5JU{26Ja*t|Dby=Ce5UMy<5Q_t&+TQQn&C*WzjNezx`S4t( zAFpCQ!9jZ;&zS!i-Le0Y=;L)X>GLuX!jif7*sgQ{2(o;KWW-!IcU%3UqAic^ql0f4 zBsA}{0G4r{%U}X)POy3zK=N;YpKB-n`^}oqD-#N!HSKebfhXROr|}JObar+I&@mri zy6Wb&A0O9Vy-!w}gbio@eV|hT=OX}R64`b_mU%A%*gr60Te}5c44^jvt$^|uv$bme zjJiq2PV)&d0P?)Sn1CV;Fk=9i$}n;?i~kj@vGZ-memepAW^!^8h;N3}qLcan9D^7< zC=meKQj_&;2~e9jfLR1UYM>vrcefSebN4f(<2=eH-TPtRVQ=hB3=sYGrt#Nbuc?3k zOzSjIyDI?@sLZ3|8oaLufL#%ZOun5ubLu8#faqITSg27ZNdvZXZ|U;qO4YwpW4!~Q zElc(mkVfSI#lggDyVNR@=ToGoqM};;p5cw_boH{g*XeP?U#AzU7)&Gu82&TlrpQ1W z*ECXF{V1#$L=4t^{K`4S!=&K((xeQ78O&N(qj?Y#uqi>u9 zjG2mgJconp=B*~Xj~UNZF~=R_?KoPtf}fJax+j}HNPUy)D_0fqt{c*FnPn;A^EvPB zcs_pq{#_XYsEeC64^=-pTrnF{vQiz;*WcA+r>)>X)KV&X zwnT@k0sFFj0kJk&K-_Pee$sB4oMw~{LN7DVEYS64aqni)r=z62d5TGwtwi7)&WiZz ze}P-sV_t?DqS;{mqv^}_x!jXfm73?xqRAVlxaIVImZRf-^z9#!ELJRr$*!rn1Ozed zI`i2us!UV#fvv4A#Ubi&&$SMasJod;m1xs1oet+9)hc!U&IXa6Asf|J9dQIcEc{QNvl@NcVSkc-44*tv;aybNN5AB zisIqp1IStX70AYf;BW+d+ZBL2145OT!;Y83``Pg<$0>=-z58~X%0;J+7l22(24OCu zqeEq*?dEc8vxq5pJqU4%LgAGcO1R^r`i%buNz#o+fWz8q`0 zoxlCzcAWqeprF9vcbQWse*SN-!}d-u_{O6{kJ=s1`wmF zs;aobKx7BF|H;9!Ri^vNm{JfH!H&bOvf~b-pVvlqg|u6Yj{)${b%a?g%VUg-Hps6- zaQSmvm+vcNyg)kVuyGBo*9!C9l;?s*TMk2fY@7WgbyO?sNO zeb_#KZnWU_r0uj)aDOzGcrARQvwpsLWPEzU>(6LxG1@WzX`|VF)J2H^tx42)i{rm=zn7&mkh7o=|3w9((@#xQa!5@`5|!pi z`r`mqmgmJ43m*x+lv<+IGTbTZ(>H}5$K)`tyA9G%K|B{R#r${|)&U%<>0!#!$v39Ufz779h zb)dm8<)t-?##06sOczKPO@|CE>EJIFEr*paRtiM#)t{`lx_5zzYiKhX{-a`%A`NGU zVoga1y>Q!p`#hS)k>&GpU(iVz`O7=;O3!A(Sp6JpBL2^3IDp zK!8?Ff9M6S0PE}7Z{k!lV0mEUKAmPr4{GY*kPt|OPe9d)a_44iYintF#BuvVnKe#O zo<3>}AaO)Qpt=owxNGNUXLnvQD=bs5*#$8BuOQYyomL4C>>I+tRc75ExpvOyDVr0S9;V<&+d7@9Wbgcx*z5y&J`)oh8DZBh;_mS7Bt>a1lSvRZA*F_0~laeCs9t zdUz&6INW{_L`Qtzz4q68;G~K4)#r^5V5IEC{#4p_i+7@SJGU@PHi61IN&@v|F%k}o zwU4^yd(Qhb+=}MjNMBfPV)_eAv%6^>mpq$HyXw2a#bt|J^%wSO-)iN!lRXl61T`f^h1&2_-OA+y265@AbCA zh8vcvXNoHNHe@>BQY(TX_arphiqDe87=-SknF#)N2&6B#)tqac>@#qA?B74JIM{Hy z=s;nxq_OUnL03hGzHBY8-Om&vOMldH~$RuD+N z>CBQn8TLaoW4)koCfowtyJhZi7?+j%?|UoZyonU-sVSKlcB94_i?Xt^*30#%>&7Lr z$@!Pj@PNr_<@(|W4q7V^DUv*%+mh;zDkP%J=UAo#I&4`j8o;Gg!Thu zRd3LsICO5waNaZFe_TY?as4+iFhIq3ehc7Ug#cQq#6XNCbq}rwr9MxS;2x@P?!M>o z*JDyj$6YaZa-txKL!@U!4|ms|3&KGu1Dzr=FMo1&sxHdvOXbx-Hx9A=K>OO`$tXym zH?xJuMpk3JLhV-w*n48?9s|olRiVZ#5=wSOE3mPYs|d|@M>JaCi^NzixOLz1w;O7` zx)TJ$h4kbPj_^ui^y^~Vt{lVYor7}nvBv3ele}I*iyr$I`6Ti#>uKgk@)T?;H|JgZ znpy76O0pQvqk)WR%wAEuc-2dmlwVh8aKFAf>=*K zZ*>^s^EgbD>CXKI@Gys>7YP;KM)z^~0C~nK4>w`u@}K8B2z<86fAQ2&2$`x^;S3T) zK37#NW1I%7%}$_;aMJ>A{1j4&5S4Q;3`ji;zUTYw_?Q`U+uQN%yKEKSqnK&zZ+HGZ zAkX{+-%*JvCe??tg-5~0zeOc)7DRn8XY%A(ylU=!M@C$(L<^LE4QtMifRiDL9i==Z z0^fTCGhX#YU>w;_W5fI5h-B#h_8k7q0bMX{wr$U~S2~rtUbkH5=}l5aIKGWPs*$Smc+=3) zwrI)iykn1;?b7y`A(h17z4vv^4fr>-@A01Amvn$>@ZjK(KoAy?&;l)0Y- z;!+x7LAB#Sc`xg=9%S-6K-Q*CKvvc9nuULT2mt9_FPUR$M0y_3zAqV{uP8LxZ64+= zh{)oyUZ3_Qb+A0_fGBJJoaONb~Rtwb$7QShAk3DE7)c zx;H=2InPW)%2V$-4SgD9;@xvD%&BTZ-#SK8fJ zxBlB|sXe+v#r$foy_>3yNze95r~Wx`g_fOaKB|_|alVl1?^9e^o7QcaDsaOert5x) zI7;l>f%pDv`#y?M-Tt(V|MkShrt!922T1#`U%}IJqt=uZrLTy!>#*t!zh0>(z_M9e zs%Nulxp3A~$k;@lHSPn@>fmgZDx6Nq*D=~G91(rA5$9yTS$CRu=03LZ-XicQU9qq= z{b!-`xWw;y@hj`OaJ+5jvyEB(vI3U!pM_SCKlNt3Hlay}gk< zhjTHIzxdDEZ@~dPY0g$+a^A|*|CWDSKAbpjQlG&10izJ^j5&}wVg2S$STiDi^~@b! zGkrw<)}b_Hv(#Fp_*u0C`VGqTZ{ZBMi8#DM17Co%Z@l9c>}jC5SETXCeD{q%oZBqr z!M#MiSSh_mAwDt^<@E`NzvaYZepGrqbRe4WD`@%@#4pubEw4{xPxjqD1Kex6Ti>Fu z0{*cxSrRgSQ~`4@b$GkW>TNsVC`U6AfiU6sS@<1dQRcNKXDI}B9g3&-(G?hyq%0Lt z!XcVhopIr)+PCkd7@D@l8ZzN^-OV)!jy?2>l^)(elWKqFO-hZ{I22Y$atvaAlu#GV z7UY&+dez({=K=uA?Vbaym2@2W;ao-uE8aK)GR;!6(IK8m=FPv{7&yTR;@_>C;lqGa zhaAzL!-hC#s6|{TN$e@p?;L-Oj?IF zj@U&6Ccvu4!Zxm^4&uOpiIya^(YR}d^Vh6#Dv660rd@ZT7K|Sl5erot7`{AmQ85hF zap?{>DfB0s5scBl7H*|0iq&W`j3sjWPZ6^Ft0xNd#$l<)i-?~Nm zKN}rw&y4NRVS~;q6AES;@RK<%tr_EPxHI`mSsxP}>69yrC35ZV)qj%$dMUK;WX3IN z(6F{Rjd!nO__gVSGf(*eL7Ld}buHcT!tUCg7p0r6yvlay;9oN@oFnWLcqspiZ1D3dfYOQ;WzUD?iP=i?yznQN~qIw6~NA%Kb@Y zzX1ZB%8IaYFlA8xchQF*cC-A?j~o?`Q>uLY&89z3VSO?4OY4?!DNPM@KTja@(2<$p z!5NuysHtcavwZH`+NJfp_C&P9ft&r^mB;HgvxNEP;&i&^`KbA~^P{j>pZS>dR%q+bm^!J8w?IB4d27n+bHEwtxn9mHrR^ z&SvIw`r>4#r^~r@J|gTk1pNEG_i`4M--@3O=395sr@>g^J=G~XqyVy##rCE#5JCYzdt<33;i^8U$<;R3|uSE}2aJt6Z=i&qu8KuCTqC9C3fp z*5S$-W@u?S*o*VJ1a(L_JRgk0p($aELjp=h@jnJq7GZy@sf1O zYM$0XD+-c^&9@PZHu=%~zgk@jYl+J`9bP;Hi*t@h)1 zJ=bD#sH+t66)}D)Rw>!cSg_QE5P6$c@Tzi2;C114M(B8s%ox^a;m2E2j#Ca$BVf3p z6Ia~`ts$y)Gp<(y{dR5|htIW6x!2RVm`$U1cZ|1d|FDkp(-PT@=e|^u@DJ(!sQduP z#$VT2=Tp@ceJs(qIS42lA*UTS!@DxX;D$bQpL2tZiHdcZyi&C~9|8k60fX7d%FL!r zd&IMbgHfhbtVRo*$rPCp?4KM*-gV@Q6RQ_Y60JumC(6yalp3k@x zh3!ShX5Yq(fe4GE-me$B(EZgzSyj`-I*Wt%eW6?kudwdw2KL#4Mze%8+A1R9I#ya* zI^A;$T#OScTA$*Remw6mc3iJ;Jie4)$<=VjSzLn>&i`5E^!DPr?EP$ahgQ?=pJkhl z9~Qg-T>^TvJHtsF_)$;aXZpP7{wuAuAA=;v#M~GNjWUnM6R6WT7G4E&DeIE^o<1rp zh4$Fj)fFt?I)Im1weG$omjG6J+SuxK=E|?1-Wrc!e6eiSkTnkNJvMZSbjLApQl)(` z%Y|`*q41}cyHr5BkPtOq%#)FyceY6BKPsZNckjRh*_5yEbb_R)*0h(y_~ZK8=XI>3 zn1gCqyq=-G+PYV7r6_*lPe(4%lP48wC}Zbow;H?~5~>nF z2oAxf8w&Bht~2Ry^E`KL)4ui6q51ruhFQLHjoH`F7t$wwk3L>6_j|8*H^ecY@wtEl zjHTsUM}&^wMpPX zM+gv{xtHt;34T+$PIfgXsi-ZM_ws#zBp~*LI(V9LYur1$z8+h*^j^C^rdD`vjMQio z_P_713a!;VuJCz2#A!bi$_b*m)tqsxM(Sn$8-tOdrGOQ+Y16rZ-e8|4z!c7xd$-42-GP&q6v3|Ztu zTFzb}l-}a%`@1#o()wJd14Js*W3f6lbstv*I_%F;OaE%+uY9BR2{dp>5qlo!5&W|W zM3yeiuQf_$E!i~frr4JH{d1~IwdG?X&hc8*9_73K)iWz(eIzE%$~2qTxYExWh@uP8&g|ZkK5VV07j+pl3nkdA0!z zUs|W5YbP@cc`v5udujtGCnnG_oLW|yP~hSI*nOl5V?FpRz!g#Je^h0m<2gZSt`Y}%Pyju~8dk#Cs84gX6QIixTLHj9YYXsMxblc;blDmWVS`mmSoC!uxur5?Fe;ARZ0^y6%fA($0X3>bR@m`e?g7(y~f;^Qv`o zb*WAVpTv9Pzf}%t%*Og2QWihTREBr#t4wncg#S6Zf>t1IsmduRa9vur2$Oy|FABmF z&X~%qfFTRVU)S8zN8Uf?%loweB;4xtcmqJHM>aPRj4xN)bEA!c0nojXWDvN6r|8$X{# zYbLE5q=Gj6QTxOdY&)MCPkAnRkZ0aAe^L!?mcR{W63zLym37zCeY?xQ z$X>Peb?tskjQ^%ewf#Vu?soBUkZ@TvM9kY{uu)hY4@Iw^6Zoj<*mn8>XRt@)d_UFJ z&7C^qV=3Tb)fXk_y$($br0u*`?#kFKakeMSd1Dn;i;Uy!dIvraciwJGxJP~yD4jB;q%pHtkoa_`LiLgO}-(f?Ik zT-?yHWd%ys@6(uJ$2}4))AoYdN%VwL4lMR=r~Kv5*uH=O$t`#>TcOR%&8_h&m=Ri9 zCy^c*9WAN=P#@+<#k$~f-$_J3OFu4dZ{T>T@0==AES`1Kl?tbbT_qfd|$@v zEt^1%)yKZ@Oif=pfj{`X_c93j$LsQL%QrH;o;#aTe0E^-?L!0~1V&}IYQ=p6n%BQ? z`S!8|oN4CtkZ+AjFhk|{yG!G+`BBv`?$a$5Y8@JP6(>l6Q>4AGo^TS)x`l;^~hK z%X(#I!_r|!Ky}Wo+HSd=wjLG+*#l;I+IL~*8aBzc$DD}2-d)Szp-Qyr>?9?G@#Ex;0=^HODRWn_>n!!R&%PK65*Aa97>v2DyA|bv6_pugV|Duct3nXogaM-#> znR_mi{6T%LV(~i6sHlN`zc4xs>55r{7>LyF~=W}xaf(}>s8*6SG<23|zj zc;<%2`kmf`^P=Tnj>|WTtl0cd&)6L9+Akc#4ziGpm1N5a@BOe}UTA_6{6nWRKiaMz zJf1v?wDEll4-N@dJ*px-LuO(58bE^J`I%I+-ddBVeS2=X0_`b#+UjZb_0sR>PRs$SaYpEa~@y7Lyxy$<3!QZh5kXTi|Cl{q+LnXig( zpvmQVcDZ@Ad@eOK-NEiCa04DGR+bfUHq%SqhzF;Rdv)L4;NTCV7Dm4vmPomYQUQ`H zsHTF1K1swW7L$_-CZ{?cUwzBR*M8dXXxSW+(kz=B8IewPxabL{`qSwg8u4k@oez%uJ))h-KU1y z66<>(xcKeQE*j5oj?wtf2WY6;5Repd36i6ot^+M>+0t+slOwKAnmotfw)b+sbxZEw zKL!LLZH#hf83CZJddg!*RvVT;X{5(KO+U}sdPc&K0H7;>{|-zpfBl-yWT#7HL=&p3 zynQI9TfG_dG?HFPsu(%*{1E&II&-59&)b@7cjmc^y#HWBfJ=I_(n#k)CbK$5kWwkC zKS?<0KQ{~6+)1R%d)}Ls%ilVD>F@36@pQB*=J)7z6=@J17~wUJ>qT6bT=0tKjPdI< z2%(h`ju+VNW|ehZ9iO+>+V({$KYCqF0{XB|mHYxTGuRf6oI2VyF_c&%G^eDzj$~)o#kY~>53ZP`XwxRY_4BQ zKm$@(@}eod0{N#^b#=73Ff=Y9aXAN7KTZsFPS&d3<28r&BaY2u#Lr2|HfCjdEQ@Qx z_S47q+qVgve}j&IMg^a4MO81^&%?M4Uez+S_c74Al9VO|pv_mZ%;eYn@^JX(o~vOM zEY4-J#Ysd&^tW;&47h&|NYW48wEf%*`752#x0L0 z$$7qUn!KV=rc&DD^14|Leu(#rl)k*T3jq`~*a17k{mU8AGI0U#v@Z|A-C!mu>tP!c zv#|9kkcyp;o0Uha7|SH}SHk8$1&bKYN2fwiR097Or?`hL0^;xOD|Yj8_PvP9r3{#`u6lWDy)AS8;_KRc=qrUNNLW04A zTc*^7!^*j#dp%QD&`0k5DVjTbTpLQw)Muvy3#;ErBS)1kfV}WwD1T0xmwwxl{Mfh+ zAmY{P^R#VXGWVDAjIQ?m;+w6Bp{ezOdF7bgv#XwT2PgXRp`*$?6ca& z*KD$=C6M$ur%i?Jw8~UIC~Uq@_zV?}hy-Qzk*;XwmO&UwBoCm9T3KtND2fY(Nc=lv z43>|35TKa$knWvx+bnmgCwmPT`EGh8X%vnQvoY#QBNA>p)%D;ME0M&j{9x2Mv0s+W zc2*g$3Dl=vT5d;WNmN?!Z^vOlGNK><1S?IFcNSOsE~7KQSEi;zPyze)U-8C$yG#(Q zXk@bG7|mySN`wysX!a1e?U$r|&y`RbgU> zgCNqo3I3iHToYt!HH{B}#$@Xk&YqEQrF4V=G7%K4@Ne{-+Y`Aj58r$U0(VrX7~=n^ zH>ns8SG(?Cq-a3<8bAr;L2{b@Td$oSkIVtE*jxz$0o>?M@+A2T?x!Mp$z-VTU)gzR zW5ZIywsh<_*UxN^^049y%)d(ISL4Lve%zI;lJ)%lk$`}IA5GgG)R*c1N7GqGRoOjl z{LqcSLw8Gem$bBWBi-E~At2q|ozmS(NJ@80Dj_A^@NWKVy?kOV!E-qG-m|Zn+4H-= zWhKVcT05?+`Xn~qDfj6pQ1R(k8KRF5(V$hP<-0Sl^%~V%Z}K?S-OGj!PgdOx<9+!D zD8$6@=9b*Ssslfv1Fbgtag$Xg88AaJdig0Vx!b3{xFpE+>Qptl9DeUOgBG<8tibAOqD=S$gC2K;lK7TCje4#%KsC;q z|B1jBT=f`mD%3&JX6N8k3v>gw#zc{PvI}#){m-<`!yzf0PqKna+P>9BNn3#&!F!Zr zkz#U9E|EI_#fw&94KNs+(1tr@LQwbu=b__bOu`WvC*(Bg+Uz>>MbW3EBiQ+=Yp{Qd z$!22I$`A)9Uy5pvCEl*U;McJn1cwC3Ah%;csDFltv7j#Tm&8CIWgTIkeEzh1u4_zN zaJqH<*j-MuAn)to3~nl(4IF;s4*k3FQ$j4%K;i<1Qu8+&_S_BQV;>Rh2wXbc%$SVH zlPpMGIGt!Z6iPjYiu7B%0>o_-6js#4{)EtYHX2m7c2i#(qqh;{5U5C)Iz~ZJAqtaGWcN?prk|g8wenx+`(%we!=-v0U zy?AIP^VqoVFmIU~E7EWXoYGAjO5V3o1(_Q+N(idA5ncMSng)DXUBBFc9nL~Ft!4vd zqm`9i9alf5&?H?3-m~!l+0)ef2zA|iJ$2R@dWHv4$XvuwcAdor4Cp&#Rd<|ozSpVM z7uqFZg(NHT{Ts{s?)__c$|%{bS+(LOz46BX;&6k@AaBAa#G(ur6kWnnRR7#I4|QMC zu#S7fVbbr8m=tIXo@pcYc{GY}aDfZ`ekN9SOy6bb4RKZHs5~*C8clI=1&s1w-uWzB zR?hcoHXc|T(wj%qs=g)rd@ka1(rPm=2iY%;zNY^vI}!%L+ES?2rGjlKj-UY)Ae_l+ z)Xw8(C^kOkceM~JV=COadG)lm9gQR7qA2w7tscCSc9XLi$LB+LvggB_1G$3PlQwBH zCbT!`C(WnpwV$4rYd=}fBa-7DH1bLP?j;Zx%`Tan7GH;b;x(T_vqE=n%C3^wXzt z=d)ey(>uAgy z=BS-@+F)SQ=M2TZ^L|iEl_VYxgqyGiQW$fnxcNH-iWC1wWcqIIQh-^PPYjf1mE

    VQY1#FVz#V%?wmxHcv+?<5FScq&r+Q^nrFYVw?La;)5c{bA} z1DWfu3{F2VaCfWXSDAHW6r}eS<49o8nRb$RL1FcHp)jR9eRBS&#^;2R>)1(>haKE( zwhld6B9t~ZRnCGTS_a=vhe)TGXkn$)jbQP4=21{}@=O63_BQogPRw*#r5I?M^y<5l z2WC{OkCi*NopL{fjDI^t!axZPoln#)(Tt*@NO9lz_=#<=Jp_ImrobMlQjQB0TqF6Cd1$4m#rnDt%c%& zrB$mk)HsuVv_D$Mrr4zDJ$hD{xK~Br18e1;hjeGQyqpTO_{u`YEPLg(81>Y5jK103 zf|ut}S23s?6P0C-1b4m&&hPI07iCwpk89-e?3k_s&84hY5CeMZzMQTe(2Hh}E3f3QLUBby5m!-J7H~0r zF}t6BUm>6Ng;P5Y|AaR@(Mk4}JC zBXYVJbU)-j6j3Jwt7#R%Atb(1H=VE<2j1-{g)R1Q#Ym@mc-%S$1@*F&9@=4V|LI%Z z&FPVp_UQuST4jj53@prCAXHC)0BYgfb;uWEFLb97Q!`fa?@)unQ9Ke^mmd860dRUw z#2x9doA?u#Gk&T!p9HYcoJS1ow;)4C(D-&R&J2G=VR!=dtV(jEP>xry0~(cs+lT^s zc{Fe`vTz~V6oJr~R5^r75_Y)QPN?-@^UrF@d}wuoZ7_o4Yg7Ml!~cDD(DINyYq?Q> zg`?u8t#hlwqM5$&vS`yNoNRwfTJ?MjN<+Ap^zb`J<%$E_!&E;jf zDZc1%tb0@W6sU;k^}2P1v3w6=fUgrRo9B%6bRAT7TD zsu=R|L6ZuLX+SdD zi=@YQ&;Mf3ma5?Qwb(~A2wZB27&5@cvuq<`Lf#asXt5y<1aW8D|AtesZA!NNaLhzV zXX189o?9ERC%=G+Zq|#d5~j;37w|QZc&bVwKp#;G1NtE#5YechBS3sKP$*gAC_B=^ zgN1}pbVjNxytuK4X+nRh)7mI9;lDZCof2P57u%X&qV18N@U}N;zB?B-;AT}T4dbS6 znP78wD3W+44x-9=$A5_uV<};ppo|?6Yb&Fenms|!k=D=S5ktslYgn#oqYe0P)heE62FTZhDU7_Z%)Aa_E8M^6^010#3LI*P_zMFIJ0N zH-)=8pO@P|n&xo(ufMMp$#*v~4L{cTdY}``wU#B!bFA^AhJH#~ue@S{rtDzu@lRk_|9CcjSi`+W+!{IBWf&aOIOI9j*Gac ze;x;28|Np)nX-F2+|bl>(a*haJM2wWGSvygWsY;&iG&xE&rZKX4+_bkb9x;4KIS3} zuY6ri5L8e|){TK5t>vqnixq%4F*rBoLvG zh-v-E`S5oTUC0T~1N&{FhePq26*p1yr?Pp{ujm~$XeP*;n3&Oj3HyJ1f%&r9;y@Ry zf8?QMb zL6jU6{bA{sM#^}~gnxdPOLq~gCn@{$D_3xvq17L$GeG-S)WB~ z+k#7#?r@BkLKNA^BG9FfK-vh>uVTyQvA%1+LzqMM_%&6nQJrTD7Kwru)6a_JalZURLzqkD|na{p49)L3AN=7v$1doWjEY?T*p8AN*t<@TKUu zIEm;a9m6^AoDVyh?fT#d`vQNw`nlO4PWjuvvnt$$-cG6=nFNdA2x#@Aj8aS8Tx;8v zmnBoN$ecrYHrZ*?ZYuPK7Y*?b8jIIAQXET4*A%_Nc~2v6y6^2_oK;#y+8b4wmXy>{ zeg6bk!}OTR@?(@CQ3&&qMTP%2k-BKN&y*ivxzoJtd!*L0?4lqk*DElT9ozmKVtQSl zkxsFoOJQZ2tQr|UY@`I0rI0t^jPZ%d0c!ol0H=-_8+k#uBp4C;P~T(Zs7s7~W~JX5 zTw-c>busfA^MV(xT&7eRFzMyYHQgmmFs{}lszB#EJP-M%Xh%E*ZoSK5(J?bbtwAlz z6(noQA?f3XT32z^xA1T_n)y0+&R;d{`E;4utv6ZQ*%fQ%O<2(w^-zR|5TYyDtwW&x zGX4l4{ncbQ>F0lNr?(tr$AZU`)oj27Ls>1BDMZ&03YoC2-Z_cKbMsJImNu;g!1V@$ zjiq!h7<2iNw>$J7(lP4E_zmwOoA7b0C3`k|Hd~mpQ26zZ+~9e5hf+`Koxk2r{a#RI zf#^`AaIm3BKt?U&hHy~@Kaou){4F5mtOcb{~1%weZDa()#GWU9^Vvo%P}9>y(qa2FVO) zT-AJhNMmaIn$9&zT!ZsfDprWNF^6O^PQGj%d8}4|ehvN*Ph_Dma%sae#l~z5O(hSl zGoF1Z|A#oNvF*iO`W@8%-{;hQw6$e=|Gtozf7Wf&CsfcSX`*EPm2|@+ElJI$0M&=! z4S{DY5%aj$BC7b3f}q%$N>yT+U_(eO{5s=W`$~gt5Amc7!aJ&C zLC@wp!pno$v}eH?2j)B*!x_onvYX4w7)>y<$lny5D?9c`PapTg;BT~RD$L8+-@Om8 zAiMHLxxG=??YP^-BYEDR^jBfPmZ*lVhMt^zTPA*tbKy-x|1rP^8ULVGLFN!e#{$#& z!k4u(c5ay>g&kflGa?~Ex?|_9#oO4u!HluWcM}10775hN0X{^hBD6Lo%{+sAd)s_S zL83;>B}<9k9D3}~0gXI{bbPk5^pwu&oSBw+!7>CzJA9gQo&v`STVN8uw5%*FG)ypu zM;kjqHCL4(gb!JsC9iVcCNx<|(cPYyujRWR)L5oiKapCN127IaIVGla zZ6{H}{|u93BxI~y4lh}i{b!Dhbfhky9s&b{{e^G2tzMl#O+d)0R^vgvLSgMMj|)Gi zqbwF2odrh~G2b-g$cSQbt;JjUK(YI=WXP0p@ksFZ&#pnY7?VFfBWhFqKN2pgz5aza z;hW=_)SZSr3?ZaKZvFA8G`Z$S<_LbaH_+IuRj9nC;! zTOe-3GtQ(DxZaft&n+`(iiwm7QSGbs)G3pQ&fAcmpO+}N*c1Nhn1)^NiHePe55pQi zp~wCw?qj=4O%|br7E#s2T!9SkXJYm zfh;}4j@0i`-o{_114g?fc+32cK!fyfXf_56v2Kmzx4W(P7h>85Elq;jHr8@yHwq~4 z?7uG%8*uUPbMt3Q=0H(IcPiMl(wLm0X(; zT4EAXQoUeRg1_{xin(g%`gH-En0zjx+ne}DemxVadx<)gqOF9B+cT-`T3c zia|EsCz3}Rkwk~ikOyfJkn1p7p&9qHSAC(ROL2yjP-rZ^m-+qh&9#gnC0?5kWg~2_ zul=Z0a!NqZ@5VFV5`NojHv{@p~MBSlV8vzp)E1ZoN&Rz`9j7+xvSgChrmZ?MZTX z2Sx~Sg+_SR(3sEzXH0ZyfY~1mopegljmOEd9RCxa^$@jK39+KzMf^~|PIz>;dgPo$ znDwFF2SDEiKy&iJ>u5(sWRU6m@DQ69hvMM<@2yRW>zezLrm!rqVhdLiBH!VT94MP|ymS_<9X}8>cY5Df# z{j(C1(o@d^n|9E_nXvFYdGY!QkHh+X+9B%2blx}AcBtZJUW-m`*^0r2x0Xst>*NQp zmVCUbzuNwwAH>Fs zV7osLJya8&i)zx3`$0k9<)q!w_eQW{hy;cemcFfO7b$!%pW>iSiIdV(r#CB96nA-;_HM1K)vY$i znv5m*)MB~@0U1x^!G#CMg2)4fhD>FXNr?q)jdf0rNd?0Yg2e0$Ny+$h?0-pb-kw_s zSXOu&if=IFrO=vLSJ^BS(Ga52Fa){@!?(#)1!FAbh)olO)kU*~)gATot5R3&ElI*p z;e?D0Hke?Q;3!Kf=OUlyu#Nrsp*uk(t(6iM=|d~!&?HWiNu1ncg&pM-oso&+ifksD z{Q>pQ#j2|m964EGq9=T2aK!R0wA3(?Ekv%71Bt!w)3T;?)~EHOF{0n7=OXIq_wtql zfguCqv^Cce(0kFTzHOXoj=wKT*y6}xZNo+BziMN{Fl9ia!{uto9B^R^DOHuV;f5v| zhx^ibzhR*MgC8sN#gCG?zZep!su|UU_LJj$T5??r`nTWjM;$mO;r~4FwsPP80F#i$+;i?x`yZN1Fb3 zOmA^UvqgC3a9@M?`iCFiR2!eGQUAzOcM^nD3FmJf4?hT=`*WXC;PG)BXfll}dK{>g zqE_hAtBCkeACxZV4L`6f42KMdu>F3ea>L#8gC?}Z8f;}3@$VAL&of?b2Pq} zU(d*ujr`O-eCXoom^}caT$CJx;ik0+)pOlZT%Lao>{}VIyT!8IyH4X`W1yUPj9XVJ zM^#A(0%g_?nli`#F73}wosX3~{>RaPf)!885h>Iou~E1-dUoqZ8vR#!0w*UjvzU zFJ=6}X8*gH2NeshS_VdBlPZc>PfjAYm=jUh8#Zn;P^f3auc}mAxWvO5uhhQR2}PYE zdt(AQ{>4E9!v39Q2ep@l))lGl4SVA%VIfzo?G58tZ0n_DuHa$0mV;{fp5)C(=^~W&|X=>joBEBQK2w&sC9Lj^|1W3fQ3O*R|;} z!~g?{W(S)Xh!=RHCKXU=ZZPX^w(#k55RGC)cw$Yu2i!xk>@|1ghmiq_o@)}?+@o)A;_8uak?mES7%iFGGgk<|g z2`!q$mhSHPLbv?W2c>#ySYAc7#mkRF!#y`bA51fi_n8u$kiEqqa!3@(sw;X_npo19 zHi*f~@@C8LJLXHgkh?QGDcsWI0W+Xh8Li;+;IT=rkTozehA0+J&Gwfv+g6B!viDQ}3#xmRBZ6d{`1@<_@UX zq9%_wlB5!sp}*zXJNjIAR~dYHJ5?Rt{kKBvr)B3-tj|jxxcC`2FmUp{<9m!;VsG8wFEBM%<_b8Cb*( z>DGOhiIU_{V?-8sRQw*86W2U;X|p9(QrJ0;FNyMW2ArmeV1-SK+AcG&e)lpNP0a2LcQk?bF|Hbhu z(R4+VJu8ZO`!1G+%H}^CEB(5!f82m!yaxkj8)?|3pJ5C>&G#o*Y;_qib@KMIORmr6 zSGlH#lp6oyuw-zO3P;{+GT|PDkGmPb3SU#G%2*K!A&EcsEONomM&qYi8sB}g?7Ke- z!7~sz3&@EKDJ)8)lfm5Nfus~94q zj~AmlK`8jc<~juHQ%?4!YiYinXC@m?^g$cfr(@onk@f{Pz!| zP$c43^Ia_+Sxtf#m?mrzCKE2{8e@lkz!?KMfGR$OjN45cdn%lZh+^8pB5rTuyV;J4n}=r zW8-JjBKvfSP;`|OTN-JhAkiOqj^D3#=er;8=CxjYPyMb{&@_=hT0bSKC8}1q1gE%W z3_5fil%}Spg4vky3!y$X-;Fmxq4ob;^bvu7CK+k zSBiyBvYASk7q#>-5usgQ1UIHA(MKAVE)bjC7#^jK4)^Rp&i@Ziw~f`;yMsI_0b)v z)~f297ZSKQZ!T#%MSyYsCA2_s#ek`O`%Qx1BCYy;l|FQXNxG&FcE^?V!05_%PScj0 zIn)ysZP<@_@>S|J@5Sz0W3P7ZX!Ep4yl z@g!5AV|TYmRea3H(`*=K^xB*XDlkxkBVbFd zU!jS0Kf{FXekTQ8(JjLVp%E=?089ZG$7e&$K~XY)!(^hRm4+d{=uYZ1((wXrH-JLW zo&`sS;wy`;i5?Mp$Qclr@QF<)dMz@h|^}(@8$P4z~l8$?m$zu7`z%1?W2aECjvFq)-cX z2;~pCdakCg$xjT3Z-Ylp5ELQe)PWG1V&9I2Pa3DNbV%L}46zQ-vn~3I^IVAvW%|5a zBVwZTNb(cx$T9F7MoRUwCEefdTg23YPoc!75||~aRBb}_7)lRKbN6uMxClAD zjB4-H*ZqGbeW_9*d!Z6=vyx~bS%Zxe$H3S+RDe##fG^=3%|RSzE-at%0_j^Yemy=H zJo(~!`L$V)C}MBP6qjPFf~tn>Kf}aaxmG#Y-~b0RJ-ylKPgfeeL*lGvwpUVC!|+mb zlkxObzT3E&`m5caZzIAKT>nriC0i>TEA!^P<3J5(Oyp)DrOyX#@1>@Yl>0Mts`T&B8~4X!;RzZ7j<{FN=^ZKXXHanPPgI_+sFwQjkT@Rq?5XA@ zC^W8@QkxC^Y=|Rg{gXLk?h(-4K7z3gN8cSHMMcG@hZ_WpfBD)>gC_N*rKM0X@Cq3m ze;hwQtZw#xS*$hOn4V_$Jkc*{3L?i4cbW)8B{)w5K(;Z?N#b|@z8o2I?oVJo*B>yJ zw$A|a$yLO3Fgi&|puD;HsyIoh16=a1%t|;NQOe>uo}JILZlPCiNYYL8{m6u~SoQH0 z%N2{y?J`{`tHgzBIIx&yiQif%&_vH5*Cha;%54h4SILkEzyxZ5-9!G3z7v|@W-#Nd?89uYl z{4U<7#ikB0!)kk)5KLt?{Aj3mG4AMZ`y=>!XIwR(C_NkvR9Qd89p%dqfO`rbKK7NJ zEM)wt$FD4JR@^oCwCp|f`Nqd5_8~JKJ0BD5?knwkBHS5E zc|t(NpW?F0FF4R!nh1AzeAml^ylSr^UgfoVzgw2~DOKKOFm7U>U(=j|lF3Cf84t?1 zu#WOL(XYaT#$4e`e0*X0v2J($pT-aGeziFll@p~rcXaXk-u_c5lBP-eNotwIhNnZ2 z{0B>Tj^0~7l?e)v1^#2~#TJE7+nE6xSR7JY^Fr!uQcvfZgCz3SU9Or7Y1`?#=gYl= zeW?XtIU$g7c#kgL6`nhqbDd6gttOHn-^$~zwnPnV1IFv8KPg(TQ4HI0T7 z%Rjsa_dPe()_ljl7`(KX?J;kbzS# z90U=(rIa}ru--);|Mh{{jWqzh;_!dGB?5F#Fv;aYymxP_=(AEofsj*LxZX5OYs1%wB< zVjv^#N2~saF`CBu4s_5LYV^R!%fs{8P8{pYK?;E5oWA+&#Q=9Wwztg=FC&BW!(>|f zy;cEzuJ>jO0&3E=W0icrvFt7!#Oct{-*qqNCYF%h;<3$7@k>7wCoTS`T5$T zoW9T9uS!k3$GjJv=iBm&x1{&u!b7KS6Z{CvJU?-fX4k%A(!)jOeMmz@OptVlq|mZ3mN|pBs~jr;1TiDwD}M;*En68_2(4 z)IWOninQcR;+a^JY$65`Id&bC5;^)j58O3mZSc9hMTBNB=Vy#$Xw*O^z^!C5rSv&& z`eDSZtRxwF9+_48H1nv%@^n@rDI+dZuHL{jiUK{#Mnrhe?0+iU`dfqIS<+lH0P&Wk z8SQ7pVY0~W-a93cGlW;>Ua=!tG1KmpIb|_yI0N!p)*I2}ryp4tyc!=E;3rO~jEys?K#AVUn?>GUV-VbshX~~;>Pl?$ZV!z7F%>2{r9Gs^lBED@k zhZzdAS>qmVZdE#sp{{^~c0MsN0T5SB00&!CM3o|kgA9k0-^^rt`)|v0%{gq(8$flG zDHEpL4dmKEAcRA~1;0s~qa+x+!SXg~rAH}8y9V@S0*G)gm%A^e={X`dl^dD`Ne)|m9IEX7Mav+l_SFfn8b=+V42SPYE4-e1~ zhT|P4j({y<@>@pwfcN@g5P%xp*6N4+Z+tdL_?$K!PrhBeA{PNz>#b6@HWO~tfQf+t z$(y#X4Ke#35|u56;Pl~wf4Aw6TaU=K0v+))SRcOqrYu(|Gt-2zx3Pd_?v!lk_G^M#24GOU$ zfgLZriD=StxBu<>hRE+TxZ3Y^n^&#tzRiAprJR$G9}T2;0uh4>%ikA6=+gSr5@iaJ z0%RHTk&Vrm6(!F*A;{bjIOdk$e z16`FN(W?7fHYD*>CjY&~*5~`6C{|&s8LRJ9D6BGs(#pn_ynB7SGjji!K1A)!R{ zQvnS9PAq+Xc`B;^YM&mvLp@XOK7Qyy*0~xND zIGjKQWJLGB9SwVETu2Y-r)<5p9R|rP?!0D{&4Jmx^~_f9M9xFJZ{}kqQ(NJ%*)Yoa5XNLT zTdW42GU;8bSX%D+-Tc}^`lzS1s%zj0=qyJX{yauz zcznbF_@S6E2r1SJ)Dr@S(K#MO^%nDGdf%MUyH*M7*^RsB+NffM?T_d5YPw(Ay;-Yo zy{^W)pPjiZ5Pvo$%gy}!;w8Qmk1ZYmD@q>Dpjs~GQkcNI;cL6y|Kvf+-cVF+4!Ca! z%HX{$FJ~x}`E8i$&fBgdAa^v(L{+0}#Nj)+{UcWnz;Jp2uqcA&!>>`S9bYRs)4~T- z{`js7J#>yR9Lti=OSCLSPh7`(2?&D|xaZ1#6?*gTIg0gr+aXI4U{4DvvwcD8wwWw| zXut;49oj(d*P@2 zno6`>l1sqiNE@qpvDWrugW1o2TYZMU2kqgy$gltsI2)ID_SF6IbQ<9%C*_|qbDZJ! zNLCyAj@5bbv+u(EUh@A!wH=S(=jTU#7Ef^S=ekbtUjPCqpR8B(L ziBLNr7l0;sykhRY_GL}_Nu4CCI1BhaiLY!;@FAS}y#O5mxSo#I*3Jg4U0ots<3Mf# zRp93C&L7(dc7x)q-D(R{Wn-i3^W$Cj{d_Io-r-_xZDr+7Hpu7zk-)b<91~ey3i06Y zUjZ;)__}vMBwkotd_yYa1-8WN;pOOmkyRiOwhm&#E1IuyQlt}%T7O+Lfruk6y8jo^ zA5S469Y;FO|M_ou8B6!$Imp@ucg_F{(FyzofEMfns1^uZ=)-vD^;Tn7YyMC72JZ73 zBM(miqg+XQ*Y(ct{__6*(~$G-ScWUW54-|7fy1;bND=K2U30qU(9(kmZ*<_TexEVeQ;? ziDal-dWE`5N`dr-kK-BlqCQOc?yh@+_SnUDZ7$pxDVk!kTO7DYZ`=#}7@GoZHyFV{ zpa+c(W=i?$_5S}Zz-OWB{-m*~q;jtT=u~kA&nCK4E%kdWM5u&dxe^@N*5ghCTX%+> zT?j?ferhmwz4q^&m%Ce#g8S@0BOsuW(D^RUzFqr|k5yST)@z%(uiW4Ei~7pye1mZRw)vpGj|lK4t#+jSn$Mn+asw z3rkCFiywW!dIS;453B$MQ(A6=#VbEU9iIrv!lI%+_g{n0F+@BL`$tFW6g6%PXri-R z;^F2x84809RRG zK;~O#zj_MDx!`y5ymtv^x!!@}K)gViAw?bi47}h&Qr;tV)_4s#u!rL*0MCJl$Ii;i z3InQ)5C#JgaqFfU{}W-ajr*EXvz?Tk3=|Z`7Y@RRA?X;xKX6?IS*?>~V zqwi^qZRSK+BG~d@l3VNrrG9tp58oTR6S(~`qZWVtfP7B6p1K}(qe+D`5yVZSLyOH& z>1rFAV4H5&S0tGR-9{coXXQL1bKMty8%w;;bvx%zo4jOg`?sdjNy9`!?2dj)UPnil z1Qm{eB~x*(xaHkjdSx4e(P%N=@pwMUJX%WFdDJk9E_^fvo*BcsiprxZVhF`yUqd0b z^TKZpNnytsnUrYZ-Rr|12Sc?9*NHJlb>`x{Fep8ATj>gTmV z#bIn${~Dod88K4NRUCiap5T(+W-CJ?B!5id2k10f#E=T#$J2*Zd_?Z;1#*_X+M3)CoJDg|8!qiUi@7Zey*935Je2~Aek*2YOvQGvlFVIL16QcvPA>oNCV zEiF8XvvMU^AZ-mpBa)_nnvU~#yZe(?d07^K&{4w}DfdTsV($VhW+3A;6fBOG>PbjQ zDlU};azp?vH;u&rEdN#@V!=i{0hr8s?P&H=mmt@6uyMaO8I3jon4AYFl6iP|hycl( zAQP+#d{N4HxVW&9;r}ugWz?*R?BCv4 zdpYbX4sxRX6w3X=J=3o*g0LvXh0YjA9(UeM=C?h)vF1Irv2MuWu$KGT+tpZ4OLNwm z%LFB)w*c!$ZS^>tNof*{Je)XPqU`zf+tKca@2AAh{mJxd=(fL_rfUpl4^HES^1WmgnYXuF9 zU=H|26zlDf(xbfj9U4PK>UXr|bJ|{8)1rwet6aMMcPCT0R8dba%YGGfZSH)ChoJK3 z*lMY1=tD7Ny92t&6&1mwk5)~NTx;p2ruxK%tJOXlH=grw``-tB!9#(8{hX`JT-05-!<%xKls)d@1xq;ZvS;`MUgxn;gB;2mUNcwg-1xG(5{abtj6 zPBo{mheuH)qo+p{a4=g&2FT*2)>z`GeUN;IGF7{ImV)MH4HaLyHogF0 zuuz`m@a%5y*S+}hGqYM=VG6R8&76-66hmHxVP zyH^d$R>_H^W3iGDan>>du|-A2Y0k56MI`L@@#gw3t3!re2logDwNCds?{nSPDW+ky zONf1knZ(5Etkn)3I%E8g652tMt6fnl4f|{yMz%iHy#+wmnIeAo>psVaD773~CCNRS za!z&*p$^VM-`x+NGFQOn%%Fd-aQ{S>3oHNo;%?qH>;0*~-&ZW|Y2G~v1paHb0HV&W zbXbKZwK;!sow*dzOq}Cg21g87+;1}!KI8~!Aj*!^n$ST1>a1bbxbj34wY9OqMpQZG z;3W%ps_K*&Ud5$Pu4ayiAGQEdMJPd(vX}3sBzbKS(Ni9H2fSJ~C((*w=Eb&QQX%3+O0max^&N3tV8G zg07KVxiNlU*qg~eE}*EHrtc!Frj~Trk?m?Q3qzV-T3+>2wR~=R+E_=^)U?Ruy6fqB z^_Bes%7&Ta7R;l%z(WT_6kR<%)B5z&hrfGsQ>Z@ARQ^!@f%G zIO}NiEHzXioOVoZod7W1EpSizEBKzQ`VD@Mlo63dMrOL%#dBfnOXeYAhu*LCk4RCt z%-@bv4ZF`4Rt3=rPP}4aw()XQ%lDmm5q-Z4Br?+%qnC@xU?BexC7`A;3S@HPy{~rU zK5pKlKkW`P)c3h~dLwl6iy#c22R{TI8A*=b99AKq;3YrwS&<;v*HRo&3{gqs;J2;6 z_i>ivxAvQl53o>{DU_cmVy@Ey+}PpKvtN!}X^eo5H@aWpX2kkA266+|@4sjC9NNu6 z>icwmWg4nPPou7tu~0mQS0RAmklPinT9Ay?>)QUqG)~xKCgLrSj8_S)N|LQ_y*YU! z4m-dzZf-VBVz{B~AadLr+${0D`WhN3XDWl+wxw~!(@XE3A5PWeQW7!&-1p!T zoOqV|q351Z%48fF(T5%2%&ysv;z2dNK3WDd@1yDLra|P|Am;;CNP@NtOsg%zlWM6Z z7@*+=To=BSS_2a5+M1Bv5*uVM+Yh~<-`zlhH?pKq)d+}yiprW9F6JC1%f{u`Is#CT zuW2H$!`lACGx(1!CbG-j+DDEtIj9@Ifm0l0gg2vxkE&Pl8FQc|*1#mstf2^#sjI8! z&~<0g$7Z(*fIh7y$>r*D2aX!!^m&uEnWeU$$f0pU&7$e=_d#+Ad=y^gDOi@tGr+aB zqeB2k^3v}GXlX^-q*7}+dV&O1`1eYJCX2tMQNEs|;&2q%sCXBuBFk}!Q}9~;hpbcl zgF7Y!kye(19*>eKlQG{`y7vZEnL<`k=vYRn{JC#dsq>-6C)e9z9Rz<07_m+Qj2M}! zjZ%%}ugmW1Iz{C@5gYI9FZb(T9yjwwQ(0UMJnx3chHO<|Ef}oqU#1D(HOz90-1PZ< zA&;jGq^E%@!5}sk^O5{d1*r}a8sVrUu0;I#zI}1b`|?*M%LWRT_}{T`Eb(4GZ75wi zmjf?QYN4>J-j^(zw&HToNj-I?HzsO14Xd|gkRVEnluPDXQxHWKmBDeL&U!-{p5Ih8 z7+&nOHt^XXo8v0<84Y4@W3#QlBlc zT#iCx1&fMYJ%%5OA~`XJnHhYfxd4yz!?kk3*W)pI2M$x${TM?9{zno-Je5Dg$I5E8 zC8J;u*?ay$E~^+dc&gAu@Ma7G80EhNs`m_0?cy|m5BlnDaP!Oap=**KMDbMfgO0A%^k3@h;D`+o@8wb~ zIaa^w(`VV^n^5Ha^$LIoO+D+uXo6P&c{nC08(nKpUf`oy-VSqs&A zyhc5clo0dP>v=6hVfVl4gZMODhQo&)&%jS12MpYcVSHu8K%#{Ey5x6L-x;^csYN$v zs`3Gsm+0Z+AevOGYvcL&bvhQ7=rldk-DV%Z=LYXh*MmW?R<#bd+f=+HCC+U&1{Qlv z_7KG=oAys%ETr%c)*lfTYHc%w?&>`8UaqcJPloXtLVS(;Cj|Fa1own5-j``-)-48u z9uFsJoEAIHL3}1QY_gkP3ACz4ITb8+c9FFehmK*Zhg#SU1z||un{+jdAXiLL0gR76 zKG)ZRCxqNKbC(CRD(jp8N%B5+2bF{)86$D@Pt}z7lHtuk))o=i2gETY+ zE)pa%WW}wcrj+L&Oh8MM9ravAWu;!HM>SbnN6lNT2sT?K7hEi785IvPnEQn^E0wFM z9|m7cn$2|W?G?+_xln<}m2atYPr%^dvo4u+f8i?hi9GNqN-kJUIF6Ju6*7x~XLc=< zU$M;DKE|Dl8Z-&a(pJ+6ObRinq+!MKZLq-=gz|y{a)mBALgMG00Zb4DOTs^Y#xE0%<_}bStIC3b-dBK0wq!*>gn3jU4lq=)do9vJrwAX7{cMG2^|Rh=!_|FZQJWs(O$`ob)8@1K(F zNk|UdbfI%dNJ>onUmTA-S8E-Y9k>0Pxd|Y-r|P_4Fh+0~-EXx1#L7aZ6j6!IVu-mf zkEK8xvagn`%~L1<3cZ&CHcb`dr!8j&=mo8TJ3!#3PJ_K5Zj5)XC&)!X52jZFUdwJH z_i5uO%c0`;F>c@O{bIMsMFkpYC=E^4whJWvbgF6gwAcd`zuV+;pbK`OY8iU5v4m7o zLN8gv)tL4Z5CimCT8(;%IQ`j*%AEu7ZbM78a@$aKQW&c=Qe;)d43n(52xA6J&b*jb z5axA**zL91Mew8`1hNkFy%BkA%zJtSC$9jokffmA3#>4ur8idCfv#c=VXVBS zTtzWnrFh~0UxX~gj@#h_eXe*66F|*7^D``Osy7|VEcLz^=gk9ZQBIy9^y~);t5>ck zsL4hX@t%PG?ET7O7d*J`z+%whw%s2wk_Z|_av>-b9PR{*?yu#bNs)&!=j_43!Lr;> zHjlSwppok6;Lx?3X&a1DYRpjAWSwbq`~Zv*tG@N7n#T}uN^^6kjnZI6tgPrN&hqVX zG099-2h1l{Pf5&s)knes|3pw2<+Pk2hI@FGtsU-%*a4$lloRq$X=0o%YI7(alujJ_N-0Qy{eTU!}GRs%q79YAPfBdmhZ z?%Dyrn-+}qks2Y=M5U_xe9vpXH}9j{3DvmWdHj=5EWvw&nnnlGX7VG>-xBR9K3(j1 z)2@0T;oofR)jNBBOBbED&o?ZevmSpnXB=(z9s%R|T7hPw700Zoya-o>4X`{C`v4*1 zS$c+6pG`tQU=tC;H4r(bzJC1yLdJsRri9@~otY`)e?)BV&Z0E+%c_Pc;hnLu z@tyQotM@N-z(7^dxYbxFZBbH%f>`@^Wgw0g$77*aruW3fMvv|kt#TKM6 zJ+l3zcYo7;(-^WqoUGRutBu-PKddw>fAHl3=!jUP><)G^U3|gl*p*Myzor&lLE}49 z;^R8`Q_g&o(ERV+f&r5F31n!*(H&1ihT2MU0#u=d4)yudY~az6^}t>S=_`4TNcnH= z{7X)VBpgXg-h=y|s|y~}KT~ozzmdwpa7fX8 zJxmx*hA1EC;{bUBKee~Ufpw+|Yge7_XGm9SE8GUcgG{C5zY-ng*~UEVlg}Ls@aCp7 zd4$eH<%gmrVLF%wC-Y}(DaH1D-&9h%es>=0^M6vef>v7{zRj+bCZP$5;d|6wM8NEmL|@jX9=FH%EcUfaN+Wb9r>F)$4j z5r`Q<*{!3hKn4ODvk+TCm1%23kX(@JCQzFJZQ17nIUPRKm$TDD`by)J>-r+;NaeMb zN=v3oC^48x^jb(GB~Z6=9~4L6SvrFovglx)({$?^rYpZe+sJ$56qGeZhLr#@Oq=iy zAneS)f(>q&r)WxA{bUl%I4NI7_dZ>NX zAVUn<+}+^RNo8Eci{^b)V{luHQ7pKetrSK^iwqC3HHvmn&}KKV6$m$7`14zth9ttz zZqaPQdZ_nWR4+AA1Vty9D>-@*v_V}=&IFGY+InUt#h@$ob>V1%MlT|qrBzhLYjF8~ zSj0QM1;*S>&kWc*7LO0Xq^#3b0z|n>5~)s3Pn{oR`9C0b%2)@)trx0&{sQ4kCLuaQ z6~4C+w@NbcqK_mMbhJJ;yl!aFl($ON=8e;a za!Qtl;}{z@7qvgL+Ide{D+DkqtEmZ#81v%GM!XgRLq|bKjFe~J{^C^aB)>0cop+*1&H0X{Fx#7Nu{;1)-{HlG|jsWbA^e>${u3UC9#oc`kUZ7*koWD z)HxD92NtGNc-)))zhJrEt6dQTqs5|D#4bBM+bx&{5lGje6z`hQJC-|+)76f5uP5}+ z&Pvim$pciOlHZifeM8Vi3D{oG3^ZL}#Sj-zU_CtUSUau_rGTc?{?^tDo!N$4ML=J+ zy|uM9Rh1*`bhA<+MWxwsrgkM>a>Ol*R#U9oYeAiBMJzt~h5b$>M|^3;8xZY%)zo4t zx093|#|OTEdtw)GLNJf(Z0dPHO1RLRJU$m{Q;}AeFM z2HRF^k3=Y>^wH_M-8WD!cJR+B(cvAhJ`9(FuqhMt+Yt40sv@ZBSAK{~2L>a-!k=&ZZnY;a5IjkGcqU8x9${y1t=%v{N;p{h zVKKax$q*-2Qy{!DH!Xfsa5G!;NroBaTUxEtrsQD#T`xNXKM`d0u-{OMq{}N{T*Ty_ zphoY84pNpU>JooFf7PF-k?5L z`wYmeafbXoLjra>9co2ullC8tn|8lQuP(gVz4P8=GCf*h?oDVTpY+nyoY>RnN4)0Z z#BJhQAt?5u9cy8ycg;K|?ZZGKAUv(<@cJ zdQ6#adTNZ({>Kpxr-cLtPL5_~WICSc0#zxnJxdQm9C z+?~x%-dg>dsQb}n4*j-DLu*FWd5|_3O#+pb3;1Upe}BL`)RTSpuWa9uGtd))N?!D> z=+>9u>vqLo63a%tZ5>|N58s_gX-#dVTrLQE(na*j$Y!GaQWwAT+leq zGFzcll*YD&@XPL8B;giWQgXy=9rko}9F~KEV4K$PjfNTq;U)5|X#W9b3qV4KL880U z4j`vju*W!B+VxOnnAnY**@_hTX={4l^|OMTs7UQT2*V7tU^v4YnGCqF;wriY3f(oI zlSeL0dhCf3lLUbxLT(}*i$$phxAbj@ZJAF9f=QjD@b&BvRk)k^P&zEE!Fn(cYb_VL_|^7g zm>|4)y2P2Pih~6oDL}deJu!tV?i#oH_0TAJ9Epz8urhsV=kvK|YRxL`9o)8K(n+Nd z3g4YW-zzd@Ew9}1W4C@aKDj#YUz5Dxts|MQDr}MPuk+hadYvxSU-}yiW2!B=e@x%s z|2h9>mbhH%TsY+c0$pT5C+J0aU+)d`%c~XEuTbeu_jY; zV##C)yw5dehIn=*+^0;Hc4p68uRv%A9;68d!XV0q5mD$iu1U}AO$dLll<(}PWib*# zFQN=6m1}AFCiACUS&k2t!&yOJPc3^95W5VLcvHn4;q5<0Tb@W5gCOvA zcsqG*A{|z}0==7c>If9xGPUe!Y7Q-5C>Ab4g=*?>Ad7;iYi;SmNj&9BVzo@0mLMr( zOyiV_5Ldy$C3eect>)Se8vzl<4u`9s0%fwjg7;-hA~z$LYtq=3Wg$wl6A(r{v?mh+ z`-2|^tNe0Tn*Toc&=+3LWhgyN{z>xb{cl_%eIi#2X!IaBEi$Am4(Wis6~t25HhcV2 z%HUQlyn_^JKw=r?hZ(+G8Vw~voDdDs=MB5_@j*W^`SUWlDL`=8;CwmJIGrvJ=(rwP zEsQ@=z5r6jS%(`EBzfIIy(WwcFmjP6ysu?U)n#RdlX9aaU}ZnMD66${4h~r7KO`6A z(UUW8=~7{pVeGsGoR89$EYOM72swx;JtxiDpq$9tXYOZ^FhQM4(S08j*6;vAcBeML~Le z>>;+aEsdZmI+=Lh zf&5Le`E?`q+(a01$VeqH*LTw4eM`3h-cODi7hge@jGr53dwXnW{xy${z452u zw)IVsw!$s$yMNnKbSD@>BdZ5~ud`RgQ|T)6CE|Hvawm5IL6cKfBs?(H? z2~yX4SCQp@5QC~T+s>e%u*SquM$;>m2|g^^xU7!WaZ^15eDwxPmMT<};+1MnuGu&*vC2ub~rT}R?h#q zfx}~MP!{gUDwMgYp1=fo^@7$ektXc9`iTfFP>&JHxGs-b{i<@-iTbOK${Lkh zC-v^N1p6lqmBteMtV>zK{fanwvA!*_@=uB6a``*7C^I1k*MujWz%IienblRAk)HAd zUp_NbfrWbxQLn~`PfMH$_dfI~*W%#oSw_L(WAwIU)KXVtQh(m?i60I(%A6N)sBw>+ z|6DtEC#dSL_2iGq>Bue4?`#UZ<(3HJbRU?!INgwbeKO_(Yl%?4qCR7we=$t&DjJo= zse_B;+hSx3(Lj_I(&!+g3Cha(#mX*=(WSrFBK20)j5}NNxwr~@+H=3w|A#cxdy87| zFr9Rn;eRwx@#xL;-Cj8LDmh)vqQ3rv#pWCBmD-vw^hwEG6-B8Y;EU#ZRO;QWyys)a z%z|KWD5G;yiu;VtV}qKT`EGfqP~9uBs`=FUdA0YDr!id^arzhV-6F^NE1sGC4jhjx zFauqOvZ>^fJ!Hff9E>hZm4vvmSormoRo&%K)e&V`ITc2>EB+*!&AI%qAESxbZ+aBG zOO21{M6wgxb=jjuS^G#SVK_3(3#^tb>ZhKYY7_i1u#Lu%lW0N(h;{#o*+R$~gvcv$ zh~dc^Mu;2%&+o+f}uW|74>K3;wMr5z%IVKx%eBA0S?t zywfRMUK%(a1%Vg!>>ClfAI0FS}M3Ez*Gyl;Xk?7X#zFqG}EyZEX0bD z>3Ml;v11B_MJD3Yct%rdC;9FaWhCD6eC^~QFf>+yK*ts)ebh`gp0(E}aN`*{E&mR` zT42!Aah}XN@2O?=o7xMAM?=z96PamcN0A4aWxw+aad2!` z@*eePMnN%8>NFE37824h!EnK*Tpac{+IPP;IAFe^RY*G&Z8fJ?c#E=UJWM!prBzTC zbU}vxQ+oS`q3>|sYA{v_ibZ~xaDT?c#o1M1L?J#lUNat(=IqDi+)shb<6ql;$#-IK z<-79R7Tb1%`h$*1q)*-J2hA8sus>7Dae70J~(P>Lek^cauWHJ zY)>>&B50ptuE(Uke~j^D{m!xiI!d9KehnbX?^PAZzYEvmA&_d%?J1?TJ}qc{YAxUFPAQ40w>ak#WX%@u zYv|rY(!WOGBU}kdyHvz&T_i? zWBFL%slbBU+E!zf9L!nkrY)bVA9^5mTI|t@5DgOMZfmHXhZE6BDg(b{}bI{4I|2q9lTMKYcd!uVU?H zE;l}mnhzkahQpK1K8tBNjR z_9rFJu6_&t;>pa%coj6PN*SmulX==((|#+=Ot_dDt8u3Cc%>+d9~N#Y(wQi$?ldiY znHliBFG!Q{+NH{^*}$%!R4sI}VlKcyF&~;oml(XXL<7Z-tp0o>s(Q;%;PJEk#@1gN zPut#uNlo{ICIpeISQtddoqhyen6<9wtkO^H2Ao7Nc z*-IY!XbB}w=1rc}X8l&7oS8f0$v-w*8ovz1PZF`qZYK4u_&`guQcJ%N1bmioWGyTV5%@MNQbW;jUo~41*T_X8VJ?n?H z6F0s%C&n)Sv)*rAd0k$4J|$4H5V)cF*iMJI8zzfkVAJkXrW4__l8aLeB_olnvACzr zd(2Glx18RVi#`@$?}mjSBa^Qd&o_H)H<#?R2L*Y&7HJrUk-rN7P^|YCkq>CuN!o@+l+B?Y3PFUu**fP?b zr!BxCzzd%;d<$C{7UF(|0coh!GvUZ*tk0(RZSGg9{+_VUNocH0#)lH|nYgNhf6mAj zu{umF8Dd?ma#*DIu|y?xlnv8HhS2X^qT!q6vaYcQnmYGnpmME$yDqAX@=|0krVmbg zN85iNSfBcTspXf`_0LEeEcV0~un@>6Y-@Pidj5m_&ZA;#hBMH#ix*OC%!rSz8jNo#o z?2o#zp=qz+01UoDVc4C5L>bopXBmvIjJWy4z-c-^HG$BTM`_$hiFAqI)IfYDkUwai zfQEkFBZP0H>#gfU9^AcahveN}*r+Rh#1hF~Y5UQ?l>gDAx@3>e(Hy}$hE@9P!2dTx zL&+17S^nkO2j|U?nWZD!as&6u3YW=avK6}evE*c$Wcw8k>q9B@Yc+qOGu|+~%OW8n z(IzJ94(Ef@M3OHvQ1RwQCS?xxKbH@s*O1J(_N^=f!;6>egV_vIE9U-64$*I(n`3-( zaV*K7n;*0|y7DXD8HH9x9;N#8A|H?wajdP?fC;;l$UlGTo$lzC=`D0oma+WM)lX}T z5w##IJTOb3x&9hLM$18=r;=7&y?Z|?$ocfik{Qv%OyhH3l())l%<`>g;_n|4j1+uq z+l)TN#yu3%Om=2*#c(;BE28gN9M;NH%?a${XDl`}RO7Vj104eJUN@}8N1+@eVWi&J zkQO64D)>s&KnL^octhNbqmpOUqkKswq6JD85k}`LL7NvhHWdeYhNZmrS;Y5S+NzoK2NZ)7mmSgik?DtF=R)3S>FjG9CV{rri&epkjiEiLrcfy!bZh zO+v|r4tsAqe&2M|X1t{w3i$@Z=<(EA5pbh4-)L;K|7`t2IP*Po3tw8i2DP524kI2@ z!I|_hNQhZr|07~xebOb}(I^8C`_-TkYF!-OyiAM3oL5=q=GYB zcJ5(^&+ac}{#n0HYShu8tDv(HOprt!1>;BI;3{<7>o|xjzhHx##Jl`+lDjqqbpn0^ zF2C15N|#Hd3Et6QvVrrXTrnnL)mT#&K?u51kSc2R`R#ML@#JOeceO+AbcoR0M)j7HSdb=|7p6{Y;r_2eR=(Urg#t5^}&AW#p5;X ziyv1QLKxvl6AjrJ%4-d`&Xqa?iO!y%voj*BHJi8Hvy>2ITH%RDR{7_8Fd_^7rahtz zyAS>c7efj9=ihoN?qoz)-+0BkOYORRetVvtbV*`GBs(Bz)4{N7x_~P~_TwkI(=;Eq z;G3$ff@oRYL3Mrx-EWlJ{{@IKMC9u&?JHs)^U$;9va&@0q zcrIKgvRFc}Fg6iIBg>H=h=*YV!CKgUGZm^VqI@imMYFahPj-i9_%GSbdmDbP!>t6v zug!x(vhQ^r^N_2xC@MLL@$(C|KPvC{Owsk%${1(xy#1R}5j+#iu2td9OPj>bRWf9Z zJUYw`0T)L^1@96f3WDf`L$JA5s3FeFuCr!D>j-!iP3`OL5YSZ(|FUx|T5*r;%irH+ z7IucZH}#MRzxU`cDf4f^)o?Ub2&4cS(cLdfrJ~5kyx_R=o*MD?4~iFc%dP8!gRKIK zAS_stlvw~P#rSW7dFt&Iu}6G*#IjJ-G=x};7=mp8lyA~_mjgk&z_A<_6Si~_)J|}h zqI`0yMoy>4?@uptiwCgI+?*~a38Qm+Foxyb^PIrryk*+r?%P)miSyAsf{cldVnsJL zc+{!bglI|s3vXCQrEMDooW=CMBj&Tpb#oOCKc*pqG&a&O);#a(UK@K68T8`;KO|7R zJCcM#!#<%t@il)cI|?dB`TmwZ*Ts()aN5bv49F*BJ0aOI=s~n~jO5(~fa-eguc!XDKQ$h5JP-@WrYyP5W$qPa->P0 z@k3A35=E^7)u^NF*fD z2fvyP6@%iK7+BQby(Q#A1|KgKX#5C>AeKXDH@}bzSJAlGlii)mNx~9XAaNyhIdQyp z62B=bT}cog>P@gNV#Tb)2L4g<_hi%1)57Z|XIT6}vxqg%)n14kX)k0)aDF$;*uB4} zC)_o<3mVs8FH%&2F|DISh9D!z+<6Atq!({58h@a+2u|ukm{`#1iM}a!%0NmGYY=}V z&KI^o^N=73#EKBJKc;m4f=^Ay04=2-hSinD%Su?lS=`cvz90OIe0oIGQ;*jbdi<51 z`1Y4JX*y!(&Q!Y~V=>5$9unC(k=bJHXJWc~{5BxN0RI)&wq1r9*5uCPejosi16TTgHXH$9OTxmda%#}k#Y1lyOKq+KDfVhUZnQrARS zC5&{;a}1A2IZ3A6+cWY=kSH0J9CK5PBHEKO_wu{dR>%6ZC8%V3uk6D2*kzyK=S%GO z)f@q-F;1U5u2(;aql82B@Bm~HZ~MI0a;NOzscT$MgXt^kkeWRE7g}1_D~s*_HjDB; zh?5I&8vh!(mfP%_Mg(c8+-@0R+V+hsFMVMvku$1xmCS^h%U3F#3EmUlSU)`(ccXf~ z;+Jm{sz*3k3&lf-K9YE$Yk&@`!Lo=!pi5{yRQ#)Nig46ykskCLmOxHUo4Mrr4DC={ zd-JP_r2AN2E_6u>&4ogvA<>M)s^4JEL%6TsJ-ZsoDX6?5a2m8A1A;oFIs`>U3M(gy zhAnH+VYaA+FtM_-kr3tjyY?r@{5qmz7RzM~xoIdA!Uv-dhAGSDLU}g zg_vTDz zJJAK(_NFb(joU|+Gb3YK@!M(7bn*C{<(q>AsfACVC_J48F;~|kw3r3I;MQ_qYmNU7 z9aJ)Y{0-KT`>^o5`uW9L>?>GRbO@^0{;8iBCQN6m#aSYIJWOMX%Ij3q(HK{I*_z%s%s`iHU(c9+ zamhoYxro~x3TIj)!It!iADd%%p~2EzHu8dkMpyT^R=6}K@qFE4OhWW1(rRc|{ccv- z`eEXoH##Cd8?uRCO+7=tsMRQlBl@lBXX=Kvf2<7W;7FPlNE!76RFz)qO=CZ`u*5?J zaq@Jn6Vc9wTBu~dW&Iyg{a!?y?1%EvJ89wBF|VdSPODg;3QfP}T4Z_13*vUWV6kKy z6xQq&7hh`&jySxxze-3`VcNp2pD*`kk%b@^^uIEPFvg=|J@{>(w`xkY)-QceX4U7E(b7UI!;^|64*)yH8pxTwnQz|KW4 z({7+!gvY9n!$TlMsN`W=+3xuoC{zoc4B-Nb`{MLECoo~wh(SPTAZ6&%c=*F74&_Mi(MQq*7Yo3~XEavgNm+6Sl-OQAK>P0|3Jk;PcT&q5w%)PlveQQ{i zlSWvN9;a+;-YdA8`%I@JBXx8`d?S8Ed8&dZx^la6yS!XgFi+<+OJ^X>J|m-8yfRdf zJ&jH@>V_6MMqaSHXlYvK0Y5zXuKZi;4yk((7zg@>&xaRsH(G@l+1S<8tE z-PYpuntq^7rvOzY(BOD6bUe`9G%Xwgs!IOb(1o5|%$?<{?72ZlPQP~Ib%w@&FO_*q zP5n271a@Me7%dcpL;(|q#PAQ}!j66T!Z>|9q#JI&7xGej9UL#OT=4xW_G)$l&ere84# zBk%MO2!p@~1feN6;Ny0hyD_a)%?Agv@mL8d4Z;Ey{Nmbc8_J0yhi&u4P7mg4o1#P7 zAL=qm38yla?*D=6;r9bPUbW0V0|-79qtn$%ELq|eJN~$5FQ3@bs8~u?+B=;(tBrrz z1?H}@b)WpavLefO@dG60f)I#~0HZa33bg2bMLth?c?2$RXlQ7%!XOAJ?|`as8R#eU z-#q+vDS9<&{t0x}0bu%MqvOoClfP>Ff=m=o$^k7*a=*g^P;~t3zi~h;1cujZyDh-4 z9;Oy;OJE9h>!3Z#+R4CDW*T(+3)KC;fNrm3gXgedV@dfp$1w zmPdL}{JRw<>zvW4FJBaSREl(d`1t=h+N|olXi3)6D|H7#U334zen7nGUR2UM?cI6a zwrt`$KSe8-D!d~U9g2~E|9zyt;a)&<;5b}+%j zKLdS5hTTmJ5F_F%l`Dp+PDZJ=+F>ZzKzGBg$q4YZWI&qR>@$k(L()UDCcidDpt$7k zcq-hnx&EjmNuG(Ij5ph4j{c7cIjh=gEnfFV+_LCpmzV`kloDf;{)ju}&5L0_nv>SJ z)?L>ZA+*F`APqjL962y99SkMsQeZ$HT!$mdqWR<&Ed=eDNb3Un+E*d}ehw)loUi`@XTmHH1p=ZH zA0HoJa2Y>*XfpM2Lt1U~dCa1*it z%U=?Q;?R43CWKY&BCzfN*C7z!xsAyDJ*(@FVFd7wP@wVVOeT;ki0Dr}4z{(Y%_Smd z!f8dCLFMhQ>un0W5B2t%jc~m#ROu?}-2QMLMBrNrtWi|i( zDy-W1nLc%^>0fsztM}*IL4lE~de@a0NEmtOVIJKoHO9rE-{hk#)yXi1P_1q(lgL#f zF-UCiHepRH(h3cMW1oDZp$JoUeD>)rEe`lDAy>kCLI%Bcu#&=8p}9SV&$^8)U)wlM zI{cxL2B^}6U)Q=VB6%8iVmusE&_PxIrv)$&J&V+WpkBB3#`MlD+^e8ng?YWkFj7B* zu%H;2)b5or-G3)chFP{tM@frm!U@zw%hFamry^k(m)VxQ!1G^IM5I*mh?ZwR)kstQRub%1_%tegI`Oi9x4{Ycr=p{yEqfyX+axgSP)kh2^Lyf~S$q6{ zQm2QbGTY-NF6YO)x8D4CcimXTbqPE2>=X1I-_1+x)gtg71NEd$q0gr6t4fn*o#F!1 zscfKE2KKs_gR4M`3oOpSp%vO~A;tD87|bSS0h;VZNe@Q29M@>U=O@BFoURs16Arsvx&Mz2y|)aX4QPXJupFm7j4cpx)>YJ z0l|DKkR7@%112GGnH<08NU#x3w-b4&um2QSTN19z{O1t}<~4#3_4ty@A$9lx*uee{ zTyuGC+IkGY2{Hgh_#XgQ05s40+Sdv(43@D#?gh*!Cje-n#GV8saqb4Q;XzrCx!XKF zx#Okrcp9fD1G8uS4GS+zvUS?7_n{!@;NZ|~r?~Kguu#Phn%UmnMTdVV=Km6emM~1ooD823vPUifqu(cW$F=EB3B~$GI{>GxIr=EwORVx zw2eLl-B7x&<0~_O4MGR@Ei0vdNN#grULr+)qWgJ_LkJ!gvVTiE{!=w2_4n+~cE{PH zE;GM`Yu*Pn?=*RLhW!M-?oWv2!Vu(c0y)~tN`d~EzJQbYx8SQbe0>IBP1pV&y4`5x zg)CT_yeFGpuHa1Wq)K5{Os@W&c{skg^6V6X&*+}Q(&Ix7ann(b6 z*aYw?lc~4-!-5p=-1vv1`>if@(!fT<`9$U9l%ex?j@)yqfUWe$0fYd(!@$3Dw~QOw&W41S*qLL1CqsibMfWS<{xv`Pm~r*aqu>4=AojQcE$8L30U-$} z&Dn-)_3%!Pj<4UmS*W#D3kV1Rz$?C#&t9ykLLJ`W+K}!g;5mwy*eV6lE(m}gZnqua zG&CrSj)1Z1!^YtvAWMKiH*){)&f@u=Hw5jF6NVbDcLuN?utshf)fQG(Is;2Tu*7EH z50Kb615W!O08cOjQfc6@iU%JH7_;szco51B~apkQ?!oT4rkrB~AHX0)I_LeDS%tbU~1& ztM4+j;J^71vgEFXCx$)*+=11>msK5qw$4wk;q@7ULuikA1aBA-dsaHI57s_>!ON>B z&~EH(e#`TS^z`-N`dn3x`;f{^)*YWQv+HgD2H!M}b^?^+a54Y z+Q=CnM(5@JAVsG+xP1cVO#m!36BWhdC$HUg%JNvldoQ?cT=H+)oWgr8_axYSRM04d z^;Rk$BL)J|(8hL|dM{kkY5exuA5SM*G6_D;b}odM#Z%+-I<%$nv|%L?@<6@QDEL0> z_t{d@O_@`hpA+Wc6y{$(5X*IB*_Q*q^Jn!gB84HTXWTb7G#~P}S`>*zjVS#X@$+O{ z$Ui=Gy&`Jx;xkq{yEEY5f6St8ul&BR2L@U-yo3Hj3=qEyZ+yRhk^_JakF(qqy}aIf zb^!+nKujzFKoDiGGS|x_<<;x|e!l}ABf%Rjm3Ky1=PuSOs>H1+C7;@OGptj z>-;PN@{^3>(StZ9A*-)#&o({(0C_uEIIg8q@{%O<2Y3f+IyySq+D!7}hlhqTGBSXc z)zkEqYl$)^3<8qVOK>ajq@IBF3%GEHr#1tIX5Rt&q9|Fwuo`_yFB;V1fkTo%i+ilk z0~Z&Fli#~Zud`4+zdX`U4>um93)=$G_q}=7UZ8B{Qcp`9-MAugkoz(7-e3O?u)LM< z_~F{0r)?l?|2+->d&K{~xd?OnW^pE9CdGiwvej}QJUBjPy4TE@Hgo2>ZjCQC4>S;UAr}=JojS+S)aH+qbJawzjTSbq0bB8J!Q{ zkIEv+!#1V)uyu$thGPr1O0Dxi()!!ysm12C;#R)lAQ>Mf+IgyoNfWL$a`;nU??V2& z0H4Oi_RL0v4v#gU3rRlv6PFqPdyVXsx!c=(<@&3PH0ig3Ud51%yz&3KHm~*e-@~5? zFCS!Zk<+fNt!16at4w!~Im=#%-+o#+liDvX37ku$lx%=4kwC#F!vFO}`u+1{u#*p) zR^qQo`5Xx3cy1V^7#uOEWO)bxF4t?|k&`~07*A5}v)lvRJ2Buj2&IzvPWV&+fnd@L z1UT>7z+(jw7BI^wCGPLz5AJsYX}v@oiDJ9 z;UoAuR}cTr>X79u+BvCN#AX_M_YIZL-42mr4c(Ld?~B^Ndhk^aCp0oY36gk>`I?3_^sYfc=+?D z+7J2dzyhi!@JPir!1n>5IcUp)M*>O8?{9L^kcDY+z0{^#11Q)_+^2q}3E0st7l4lG z4^HSspOrXWP4RFcuI>*yEP52WAP48FK@w}F{u_j6ppf*@xUkOF~r4bK+z?FZlq zRkgLv*S=m~f21)E_Cce>qYr6dk(QK|4SFpAO5ukb8JQGSQ4tY&_9P3zwBa?I!uy~& zQDS>mI6kdvCRxV+kRo*UP5U^Js$GC;(9>R6hcLYzzrM547m#C+Ub!k|J4B~imVS@p z)Bo6uL~iku>cSK zj{NdJXX$sgx3{k`GlNY2sl#e(-DmFU3oLavWt^mb<)2mHy1i9Z3W+@n{0BBZ|SszcTGOo8*WW%Qxt}|0()sN5Efw{M4?rXIMTDphJ7HLQdI(It@3jG=9F_Tj?(y zcyN*b{Q7UG`THEgZWJL1?!axs2r|662G)0=mw`zh-RNBw%!5Nf?&l8H6cww4Cn#e` zSzNn%!pWWH_7*!ki|vc;0rB?)pmihxQ;_xDGhlH9-)bjV1CDo%&CJ*m-f=7|+uGU! zg3F2+fI&p(gG>S}tb}*}051ryuRpXa>y^JPrmF*V%91Pp0$(f-Q>~LR zw@yiFW3O*G7G`pdi)}AqGy_neCp-Twcs0)ZK6}=zO3z5NWk1WCAM(aVXS_mGqwbIj zGcokooAap@C=d+HC2!Mwda07=6)+65gbJ!L8!{L&%@`l`FH+L*%qIh4?l*Ac9bvqe{a({hD#1P`n`bz{Rk3_><2dvlRDfE}~+Ex|t^-x!}ALorypp;jk>XZn1&sP+NL&Ey>zoGq1@M zaN_+P{pWA?Mbb4`v*LGY-=18qL|at9-Yt?Y<|6jYk5nvTVrY+krEYq>Iw zd+BIN&9*!2IU^fO6pyZZ|C4Um{@*#SZ$dZB`?k1P@oC>s+~*?Ed-n|V_1S=C^ zfBrxBA>Imusz4CvY|XWFb!j1eRyCkNW(D5o+D{A^J-pE`P>hj=b`K4yYK#L2X9sZl zJpg2FfCVOp!$4o<&m~tGAka44#dAhqjc(8@fHzoQun}$s!{~==0YY2I=V0lwVowHv z7d$B2D1-5OP>0vnUe0#B4$k7mj&X@%p-JDPL3_whgJ)_+|W4*tZI(Xk0iXU&bgzD z&gIkK(rw5#ueQJtxpjX?hoNVXfxU>|{!msr!#Hg9tT8S0!MLabKp@gr+8SNE=iCOO z(5Tq2`mJ)lFjStr;9&S56r~m3EWiLs79RIRq0kljRe=X!N?Op_50D&WHq6Y=?}!G` zq6n)uL)ZAQFyiVRw&O99-J?}jpWJWN-`&~DVFlmb;{A?`KifrWuSU$p9w4Z1 z#h4sCUj*K$L0kG7$S3vTWpuM5d|BBwfo7_}(Eho>{`w0GZv)iA&cF7N0LEjb#luEv zvgylafrxm3c>G}>?E`+ZHpAmEridC~jg|rt?9yld_AM~Ge9H>@Imiu)E5LbTqS$I- zi;@R?K48;31An#Z-V9cYmRS9zp*lxqasLGLwD8)1zE)VJ%JnjS<^A|pOCW%Dz5iV0 zc$(5=izC~R%GOl-m#xm#yL zXyagy<}6Dl&@08`6|^s{`hfk-{g4)v9p#oxGmcMz&jqjyqvUa45&$Uk3MA+p^j&=p z;~ei^J0Qj7jj@vNcfo^z>7P5>lLm4I+GII^6U_Azq|sT27J@!tFOFv38?EK5Ise}G{_9;9U|axi730rTl|tjVJHMN zga}vW$wwD!;=g>O*~Dxrz+noANg4t+KO30&T*zF1c9fJSM|#h3@u99UIR(K(i(VCx z$Hb#ZXJu(;-!NP;K@@oRsPp1taDZ~KL#_v((n799_h(Zic<7*6IXDWAfahtI-s<0L z1CxI?&W(tbA3`ADKzqdXc)iOD>s{CEc=y#XPJrm?v~8C-+~3!jkCqJd>9C+S-NgHl za>>o#;_)@m@R9B_LF6V2X-4LT6@I?DaM1pDPjXc1eBz9l{0UzLr0WsG9>33+X8Da4 z+r{txq6uBP%2Et4i0jpH%AB*kwm(}H1^^KXn#!U!?-T#-b()`C)sSSi9Vp7C#!b2x z+cE_%g)osk_W#?k+j;+D>e7k$gInFH)CB^On+ji2ct*1CuCSUuMKk#^SLJ+ZPggMO zrXGfgh$y%pSjLH!mR)~?dUzy#=H6o9cEgx8nmhJ>lHEj|NSt6Q5wK?Z>+6No$yXB zPYW3LViUqqVKl0|MS9xFu44Ry(g;ip3 zS6Zs)z@2p-*lEcWE0ihoWeym{EiHcl#w<#)01R}&J2*Hv%{{tX zko>65D4hflTWGm8&l3LqP5@P%r^KzB;|L5c(q_=v$!T}80w7ocm+i$ZHFrnpzdLpJ z<~`M_^UvZM&GropLQy!DXL%6@FK*T%4+VU#)*Fv-(Z#+7mhj6BmS!Dqbkt{^hUT~E zjnDwSj5aWM0aTUg;KZrsM>C;dd%}A#>#nP=RQEp^DpuFtZ?AAn1;{gF9DN&>5HcYJe>+Llq&{R3vI< z#^7LYuTnG%>Qgb-nY1v8aRg&jw%S4aS8U3K?vDSlfyg5dXhXgoGT3q849s7s+Yc-l zkqP3WO&rY(?1ey{&n;1VtFhXfe5;bU`}b)B?ym?Akp#27r>!TNG8Bf(YqP(?1k~lz zp{B3)r#~HaonHZ@+`8IwBm_=`Wx*h)iAN|*M>m|%NW)Y%3NXVUpuBv<+||#RT?O$A zT)xRHi3w;&*^0|+i^2F`*$`Hp$b+#5A$@l|W4C>q{#jk4QOqg`IKF!Nz950+*{XKS zgG1tgPVtj0(BN@hW+i`YD&{wq*u8b~QkqeF%Jkqv{;K!>lSRK`>EnNyK*J*wLZ2xw zG!S8_o-OeU0DeX{4wH4l68Xu7St)waVUCL(3;QR-zHe5<7X1(7ESW$626U#mwr6#V z`->ov1idPbZVlmUEC@L0&VxlEwjun?;d{j5{{M$phXC}T5h!1SfEoYQZSy@bO+hAbH3)L#W)o-N`uAA>(9JZA_|K!` zWUSrx_!`0Q_S>$8i+J5UfaPF z90nJsZfTge8i&OWuUYa>89$qTHc^q=C?7WndtqAz$`FAeXsx#pO!3?-Khy-89eby) zb(;L6w>Htm;_>4Rf;gs^JvW{%iq==GTPB8d!|trZiGthgZ${G`kgnfu%5MJlp>8Bg z^luL+!sD1lTjcg@x)K|Qhxi^od?-5weQ8_O<_UU=orlMMqU}#0(7N>Wd!iaIKd`0R zo^tB?Zl)c@hU~5SN#3Kxu#bOlHhJ0;W-gdXoejfU2hiqUf(`ukI`=^{oTc{U>#4t4 zL3Z*?%M#_k2S!u-bN{8tHvTW&{W9{IkZ7OLY(mAsA5#mlm}@96uYHs1p~<@dP^k^h z3r~!`fPNEZvy{miRNYyU-g^MD3kok#>VO1b2Y^;Jwdn@G{y&<|I;yJf`}&u@^rcgz zrMtUCx}-a$>mjAPL%O9wQo2(Fq`Q$6QIHmpemCED{Dx!bAIe3}*=L`%*P8P)F(6=2 zox|_k1DxGbg)l1G4bqdP-h9V30Gq92Q4|ufO}DaYTmctK201BO2JEH;>;O-lOs5Eb z-1D1z(DP>ly@4H3MSTSM89#9Td*)SM>`$QtsWGIESc1l{a#Ms9RXaWdJNssX)fhkt zXlVgLJE*1VOE#0O-)iv^V}O~NA-#NEtMeun1a}At0_b3XL+tbP_yG1<#?t2oFgWzF zj@^O?4=pW!u)JdIu~4_OJkRBe1FpDZP1#DQ*@@bw9d%hY6+)o2Z##^3;o#ssP${~; zqcPebMsZ{;EG@Y$)znlp8=>H7YH6WLfCo@0Z~R^3j0+4B#MB38_!BkJR~$>dejv{@ zOQUq4TKuESo|Z|T@9|5gF5AT@|2q`-uHsTbFf0p69&u={46Z}<5JZO23*@>=Ub?X5 z`P!~jwRhc($-jlj^{L|0``rgfv#Gxy`rC+vG6DIv9DbMKMEn6V#1d~VrO9nQ0)}R< z*sc~#fBV1Q4`X^06sCzpTbg1mPfdu3OMdpHBl5!AP-c{&FlYuK6Yl2cRJ)rTY&Uxz z@L#)TNPfe)s^^61X!~q)RRYAOevx1Df0T(xttdN#C!qIp?u6lZ?#-C|MqB2@HE5t z_H#sp<11z1ijJ1Wqm@Idpb#D@l>bi588ucm-=5aPg<9 zs4IZwOJ^Vl)-iy7Qo>B*9&V-r#g5x{Awg~*r}j~)K#ETi)CEF`)czmheGpjY&QIzS z@A&XBP?L_5Pw&i18A{ow6E_ zYW=7%0SaVDP>rRoKu#A!S;*&;qsi@l@A%W@yxac0?NuCi!tq<(m$4e(?*JV5X@>X{5~Sz*spC`d z1E|XA^C~K9;UUAGQQGG#je7a)zphcJa`;Ka0bsVrxb+cqU$$xw1-f`Cu>bjiq>6uQ zn@?i4vn2p+D;y|S6<-+5eoop#{_92_$=PbM z8Nhm$Zo{DO07HP2!u2C#Io5a9uJ2wDh$)ZhqA=ykH0y_<=!gYTVQLV4hQJT*2p=3!*b#*FmK z@9yZQbBkZbz^^*g$Xl3HCeg@8^pdy|MM6?YK~U&-5CdU1|4D*@$Ln~#^SE=&qdG|Z zgF-Wa*L#D@(#-1-{qx0)3p5A;UV{E(n&9LF->FH&-kzuz?f`}yX?-|Z;Z zx?1z}ODr}Ln=?B$1Gdzj$X5;sHXIsb3|6iB=InWE;QdluqxHXg+NAMY|Foz3Hrt78 zRqLsbMp{Uz@Ls`c;Q-AKLGD9>X)v9TGBdx$e_g1MnBvvimATR8;lWZ$CfwqhO%LgM zm|O$Mulov!Tm1iN0p{`_y^q^_mCNUPBsel!8R_C0Wtb||UTYCB%zO+f(lGm7Q?R2^ zW<$4pRK$Wi6eNLt173JQH~}I(U5;0Je=mIwETVH=GV(GD8MD}HH&juN-Dv%KjgA2! zc(?_HJ~tdqrqZB)wn~(S$>wx#AfTJ~0@`?@hfz6bixpMM%C#s^!Jx2QqU?zvBA8pA zaBwFUX%It;DV5s7^FVf^^EfZyXN4p#*S*-}4TwT0wp|TINvMd@f<~Jc?vjKIaB-3$|EpmeFDJ| zFU1489PpiNYy@_qXdnfkh6UvxX}p5gcku33D9z5vDeVFU=N{OXyujQZB)wErRQv`3 zZ%B}GnQ&PqWMiM-rNI&s>G@zxZz+&7wigEDnmk;O@%_53&TokN})qJN?D- zt@J=U%1K%})S*~Yla3q(TZw-m+ycH6n7@J9 z?Ks$8!RLcOAq%CsCNPWAE{YdmmU$h*uK#zL^1nW3EP@(&P+DZts@QMd)#>MYqWCz+?Gp;Ow|XyotRtr*~=LQQMuEg*eJ^;Lce7yUB`kV%fHuHK{)C7=@O7SScs%R+hZedCvpN}?xag-lG3R=^*%^~VnY{u`QyJz z6F5k?2Bc4ul{vP9oRsu{S31$zzM`_?CoT_UFpU6ZgtkMGRkVwz;0-YvE{=D92vRTc zQ!MA*OM# z3>p@iEs!GuqFO+I6Ck9pAfQQarMK%%gq*Zlyl} z^Ja|!#eoM<Fozc+H;3pQisAmm&W?%w97QyMd^Hq)( zu+Yu3-!b5izH0=HVel4^igFiv0Y&uuv#f4p)D7sYY7Bq?&QPil0Ma=W`c zzPpc|0x3`pStHmKT&JK4dI_GK)*Sw;&xc3}=(z|MeS%U@`EcQ|*eFdbEJ{Mgw~olV z;XR?-2ZxCcHu`}_9_Y9un<}|HPQ5eg7?cfbZ=J|BF>MIl5%5EJ715a@AO73<=06N0 z<!LWA+OYdZh9>PMh&uiF_JA?KPDCCq zo~E=H5)32H+#(M%?W<=Wh_pHW&oQQAlgs3@2Q9?B?L%W z$Ak$7i8;I{vmj@GNDs8Z*AS zdEjOjc=}7i4I^Yyhf7P9#0eT}gKY=BJbw=(?MH*Y(SoWo?6)wlZ{J~5R8+x9%;=`V z7WU{K=K2(85zevDzx|O4&@HIFadRF|8EKK==;J0KhsKe3ZGn?ZZMN@3JpXzSLuEyU zp38m+*iK_*~_6Z1OoSf1#KLeDj0%}VwmiOhsf zetYa7g|8Igs;~GRw@2_@{CG>6URlKF4(qK>ydSc0!0#i+HU1O+j^T9BC#%UBHFzO9o`{_4a?r8mhkL!3lHhENkD1b1^t@5JnEWj$hLP8Zh#x# zFQ*Bi>{APhKwbZhC7ghIAkZ~Pph`KtxX6S!K#DY`Q068s{^tY#hTp~71R0X7|Mh_) z#NbQtWD*s-;8Cv*V%Km-dK7yD>$egXXc-*R5}00Ln`+ZWy)L1Jj-)Ilr?(B0!jH1L$ng+Aa9z5R0|2X|T){ zsAzyf8Kh{Vgvd)}1V2%fK0DByH8y=aG80xaLjfIfW9!I4hned7uM8YK!;fp4s z*6W~5f%ns9c*ybVpFxX>}bqkx0{NxmR24 ztt{_%&dS-J=1#DLyS&^CEc9qY+G7!~#ZvqF7F4KSiydleEkJ?}T~;>&yYN_p;VZSh zQ+oR79+NZvYP4E!0ZZ^*UrSw?X*_zrVLPFuj27xXIh9snSm>_;=R{q}q-d|GAROnw z>bwWNvkWN(LTLd~xJmLQYJ4rtuMi0c4L$+Gu#XS|zKRq?pdW6?4K5H8WC0%%`yzf* zOXsph1~n7seN4=!%nw%@NZ))BzZ-r!_2t5e9n{onZWQFzCn!^R>+i4k{{2%h!hh{N zaDQB?U@Be#lytnNSDP}J54{#gs_kvR%X_~48xb_kKuBAm)28-9=7Q7Aj1zJEbIVCdA!kzlJw?E-z29eA2_=gigh>IF z4M^6lWg!G(t=ci@c1Nv6SePl4YRxko2BbTn-j2iu=>}lWHIE~4b66|GN2fYC@R8@y z2AlZgxR#cdC{S9byix@eO+u+}*deGk*<9BL%=VR)h8Z{tKlt&O(b}k2(%n6bW(eT% z5#Gr46ZbVkf~fwOVnDgW1R=O#G%`ZUw(xKeqCU7xNCG0<7BL4hmH;toj3ychX-yoR zTYKFUictKk*=-k&H2L9CCK&aIB^N%%TFa4S3=Fvleg;V$sZ@f2S`+d!s+9=K%9f;! z+2NfV#D!eU>fq60_>cI-I?{zxul?Nt0)MSF5L)mro2IH8VOl!a*di&14zgGK{Oj3o z@o&9s-Ou;@nS~ct=bzoj@&xP^ugvE`~O<6X_yE!a5%hKYU zWP-p33;JMY9X)^m*CzmFGI5wRWV_22z>6bBYn&x_mC?lb=8*+61U?KX5jm=gh@C9l zx)weXD&&|@p%)54JkTsbfv~|)zDjtTdQWDBaGJue!%?od<68Uy4GRgJ5JAGBOqwkz zQ_b7F9}vp7C>zp@D<4}|@x$Q@ESX5KUI$5dXqmuavC;pqm`M-Tq?-4ww4U?%_x^qMI+|X@efxBGQ%fmzK`=!IRg+T;FgQ-F@Q_1IS z-)QyoKYSX0(Ukek|795+H<>`t_wIzJ-v# zflxYDO`CX5!wuoD=$g`kIV&}jum zN}r+0$v?wlp16=2X+|oRcWWPGeqScbFZumwXA1%XiRdUsg5-$(WqM8P-%g#!J(fCi zV_lLW3~)n@zvIj?=oeyfcn?y3a(#AsKQU=VnK?yLw8~#iVnk+7j0=&L`+$hV3ah77 zj40HI{_i_1{couYE)r(#2f6ani2YJ2B#BoVD{7%_H3UtBN_e!A=asZ=jLVGT)9K1z ztXm(?1&v$)vn6mM8znM@%cZZKhS)LS zC@oOrx>Ym6=yuOdMbMFvGYCC6^~K^y^TSTm1o_80SN?+X`~#C0Qih zoT*K5+Y-!Gt^1`OhZ=c$E<+f+1cSPAW$yLxj5X3WZ`TO*sTwrwDMAz{;Xc?)FSZ}L zK9A7{MxPG_9l@2y$Oc`3#; z-z03D2)r_#e^YaNYk+z}I$+P;Z_B#GFThNtMIK$-50RC@*g~Aej6D6tdzD9jLJ2`O zbxoG2IZl!N(o%@Z6{22N`tE~ika<#--CxUu?}?UTcF5*kX1^qSIR(_D$+UkGxXA8? zmC77*em8%uDZ^uzrrtqPH<~RrDS&Og&dQM`6OP1wQ#--F_QE>Ami@6-R`s#o6Zw41cTpJ`?O0P$Ix%G0F1*`&UDW`iELj2s{h(Uwb8lbR-6;RF$DVeGUOq+IRncc}IIXmK!q_ zYQM<{^?c--U4cQ;`qso~qI3^oal{S1QYs{R71jljbwm_b1sxQsACX0E;5jw+8 zcANm+UBiLoiRTn;U#Ecs_HgF`t-QArMm9^WjRZl6CWn8zWe*i$>lzb?ii>pe{7+! z5f60tWw4CTld-2qMQxN{B-OX8;DsnM;$k%aw~8ZDEfwBkPEL+r+|zDWT>-5LFN4Gs z@Sr2vDn@)LtoZ^%JdzGp9HXa;c11#VVKvBv?OfR)S+%j!r8?Vg&WgEz|C5JEY-($P z46#*KQnbmpwMh=r(7y|4)~=fsO=&R+dEvFvaG|pH@v0sB8Xd80HVHmOu1&544o+v_ zD~_9^v^@in>2yre)F*f`_;iSQb$ko=kRwT}h;hxXcoyWM>N4y|?NdQC$wj4We$|J9 z0+K<<*SImg;5c4gHwNe+B2XP9+)D87_0uDW}@vaBZpAf&Z04RP)}g z1>`LkCs$04{N}Ceg;32vfhbmihZ&i8nD6|HVPU6EdV62?>?FQ1#y0I$hc!xC#-NIb z8FO-!5n+`LRLJ@~y%sT7snJ>lyq%TYi!2}0h|rK|ZcZ8OU3d->=1*sE!xjnMV(>X0 zmJZ*Y5#*}(+9r63CNK%utJQ-u&LG5`KbwvF$q&vTTy|R3aBn*Qw2@=Lb?miW-p;tb zol3AQx@sCZ{J0r4VT*dm>qnGiSHxSw7-E@uj?_3fy~;Z`$A~DlkBDaR$JV)=ifg{E zfVHeoN|&@Saat8A03x*&icn}p)f6RJ|Lq59b9rHI))I~sY8c!X(mj+{281l=G)N^} zl#N{svoVZPe|V6TkfaAon=}~Z!Y4j|k@r}B!j+>Ar$8S~8Ltdk`M`G|QIAX?E!PLn zVnd1XVbr|tCDw^N?X9sfrdDtz?-pfGo*h{{4_XjGNrR);*lNbBQZ6Wb2quIDQR>5r zv5etd7)+TKYL>bnK^Mxg$NUej73m9%MFu9370e_tzxecyST1~93Z*d>nRiW%Q60e!njEo|^ru9F2q8?9Q!wNJFE4fHE40xHe%;&)pV zA6)!8wS=TbNkU2p7EXjQG1m}`332u>wo)@){JWgO@jBWX2`VY1$r?K?EG*YGX<{iF z^9y>Ae7bC}i}4jsS4wT>(sog;E#L0b(Q zFO4zhva#R><%0X7gTrGeM>KNZSjn}F3PXa(epQ*m%5-OFa%>gYBi{6(Cd6uxBp}Et zGb=Q?w32FR{>_hQuk=8hIfUovMuqTkXEh^m?Bo<71zFUQ@5|fbLt@j@`Q6*nwR!nq z9S_;_{{>w5u#Q8VX&{Mq*v)I>6-Xil#ncr)3nT1_DLzsm9-a(Hh$qW0^9e+z+qA9e zVe}zF(L+POdDjIYzNv^>x}^^f&Y9fW%iwUva)29AW0K@7d3*YB2A6637n2f$%DYmx zpr7^IexYED6^s&|1L<8CvWjNcMi-a#sP3l_HyiYh^%`X6aJa4}w6{Wf%93G0@VIp^ z>L8a)Qt<7jj2!6UIC5mABIG;GqwAAcsWG3lafZ@j%@b|!h@(K#OPtT=VgzZLMi!LA zrQN;eOd*s2lJqbmczk!588)L#nT`}RjI=PsnhJW9gwx!P?eVfiy&~iu`XSW;Ve6A9 zZV8NO{$b=@^^*S&bTz7|evi2L7o!PFMX+jQ200W3NRY`Wl3B78zM?4~eBiyMy40F? zF>w&e#fZjxq1HNEyj+M#q!f;^{T{+1-yFfzERf+bzChP86h`8hi@_Jp-;q@)jf zqd%p;l&IXg$#`@*biTKHE#X$9kzl}>PCiU!&c$~@N{{m0T$9toQ`sa7m$OFQ4^Q@Q z^A9Y$nzcxQ*k1cM--){xKONreLXW0NyKiSJiSch5SCSdXUR_xv#Q(H`le=)A56a>j zFtlWFf}4NWt;O^1J0e+#wypiuzoWwXDv2MT-|E{(GjK#6eqh?utG!+gP4v3_dvMhH z62Aolo)LjHC(hK>nS&nDRlPRtyF0a70Fxt;u~93gYFCRLX6x6uaP*57wG~a0PdCZ7 z-s!wtJkwuzRvd0hCTlh7zlKucA%7OTA7*@8j>Jt=_t7>uJ(+RPuhbtEl@*HBZ=UT6 zqbV!F_yP;sTAyupv3Zyty5v*6o-{aB(5(?1--*L}|3%pc>zfXfrP3$0c_!$A*=YUP zWzpPT)E5rzhwC9se>ZGFJAXPkANd%IG>FZDw(Sz)K66D>la+@0 z-LCN5FLLcwG*s`6Q#4kQG82ARHEFJ53ktM~^gC7@rWE3_e>S?@tPv-qegqX^_pu~T(ZlTH%f3(@T*30yMBa7f`Re%v50^YP|*Go)E-ZGp;U2E&9 zpUOEu>5jH~DX1#GdUj%>4!^1yT060eLwJ?0A$WY76Zv%cb@N*6G)Sk$r+?9CWzxKU zxk!YoM&Gz`+XoJTaV=TY@KW@C*s?OT#Yo7!6FDeaNyv~4GAszg#i#e@ZLg*9KkXm9 zI|kT>2B|koCH<>QH{THg&Z2h+~JV_6y%4 zN;TP4w=Sva$X`Urx(o}Fghd=Od2p?~h2uu+-rmJ{pu*_q42Hyi z%sRWwa&qF;Y@GPKk5Crz4i(}@uK)C};#Ioq>HsyfVGb-Svp|hzULog$V6F5EO>5uZ zKO$J+s=rjPgnmgk`y^GNIla!nVdu`}`qO^Ts>|<4k*&Xr-;qQ$R_Wj66 zO-iFg!CJLx4LzlmuHQoOo%s5$XawP-qu9z7I>cT}4Khn1NIA$RTH`w7*v+4t7flf( z2dD=H1UO}7LIB%gYh63Qc2^%q`>in-ugCd;#z?D3>@3rCgK=ZHWwmh)K`13`{c641 zu~Vr^s^*7>rLLK!x($*ChWpg}Qnf<`YdvO&y{Mi2r_@<}S2n|`X4dY<1tP+qv){># zYf$XGf361zl=_u&A1=(NBSVIp-lAz7fIxdTk$%>S0r&PMwaV@7`{^Gm@2BkG>jqr-U0r>g`0Q5na$%6Z*$dsyHaCV`d3uEt+B%-Zqbr6*78E27qTFbw~cPC(vml|*ndL6g1o72(xC zUa-Ml8ck8cVK%6?Yz!HD%UvFLFlO>AQ*7e!*9T?hHEqaaE5X7Q9()zQ!@sq;EKO1~ zwa?nPwMLBTB5PDTm{Nv~0}Ex3dF?S=!hhbG;KZKe%JK6`@i9r2gzsyc^E7=m-K3qf zE)H4`)8&E7)o4eek(XA zcj!dAbMJl$aR%%Ab2Dhx1v9)H@yiQUjOO^jl)B;$vd<5s1AT6PTVD%YuXotn&P!5p zVzL%E+HfKV@iMqehimMeZLUx7q*J&uba2tVK*LQxv`ML0Lu`coK%lgh1R5a|eYXqD zm9QnZzU^a^Nr=i6-mDX*4Op~S=JW>&A{L@yT4QUy+>r+saD1|Rwa`gN4QG8slVXg}S>qBXB zNS!HsuUraskv2xeyO8!Y1;HeF%HH42Yk>}K<-WbehZQO-J+urKzOg#g6VRg^6FZZT zW#Yy;ViN1w=8u93dkZ|rCCI;9Uej>J+Ab^D*L9c@#4g~f%oHK{{wWi%sF>G=3<>w4 z#I+!$B3xLj1}=^NQiZ6E3`N&u=f0cns2G$bbs5tlTXVtWQEKQIJQh-5kE3r7DNN(% zT$V`HoTKEShqpGx!FxTIU(OTt#;Hq|Tfn&xFD9_c(}Z%)amlu&Jgq1SmeaOI;aL6a zpC63|i4=wlj#0^Wpd&5|PY-VAV$rUr4&9s@DN~%j3L@U{SDvU))R`-Sa34K~-iO%U z$3bJ^@p^1w>isfPP7b@+AqTjvu#o7$tbQp<*d=7(^4?8VC-nLqsq&CnMA{n3?CYb((38z!lws`@54%4Ixmj**e`1=i~@Q-t1y{& zn=4ytRO+$f#ARx7Whr@jDhVYYHBEJ58{~A7DPp%=e@Bwfz-5a#aEd6XoJO0Ctd{)! z3&)VC1%gT`qbAw2lXS7}_OxuVVyrS03@FHz`16Ev#KPJhP`t1-_Dk_=owDF!Y>83D zcEU`non586M;mXOu3;bBIhtNl!s`9y&<}s`rJ@!oS-x{E@9f88$%4zF;|a=~ubTLcCtU<*T2nB}u(u?3L)kGWaNAd6PX>`KIU{R)FC3JW{;zNB zL3Q)4uc+H=m}1R!HkX(Og*b_?eW#1ltoZmp+_>OKDB6_x>DySd&%0QjT`EwqkLaF; zj%o3`*Xo}U`doL1%aB^>FnfIyAwxx<7JMaFyGzGbU3y}EPKXlYP!l~Fq7HVurxtvzeSlOyMtt~m zOj8juL{*;0gzchVAXQ3@%=7h1b4o3%XpTL~wEl9o6hQ@@N^Bb3g!LPnw-A!JWky!% zWkvx{(>21z92}+M3_*kqwK&d(C7K7n5l^B?p`Y_}9b9iQ!U{d8iWf5x(v;|tQ6i1~ zTrU;bYdg=&3~DJ>8@$-Mefsn$g+}?#AAaTi(6rYY##whYg@Zycp+O6Myrz<<6Mg>k z;U{f02`Gt36pdk+POSta4ke#n`)VU=e4z5<{DoQuw-jN`yku_Kyyfn;LAA4kru>S7 z9$9o74Oz7hZxpv-`@#0G_@`mnKP%4CKHJz=$f7v@-`~AE{)(8(I?9uToI+0|#a&Af z4kzWi+|29hP3?jt7oNEwL#lp1JOY2Q#f3P zO~9$G8TYG=e0sOyZeBymfg@o$T5S@8!!7L8bix^5r4U|{x~PxCIYbPc_Ky*b)YK<& z?nvJ#q4%UX?IRdbO(%EF49E(2Lv5cSL7bCF`$v{&fCi`l*J1n_$qX{VGDgs1^V`gg zM?29P=Y{*X9CMI= z*pX5pEza>O$MH%b^afobM@tL-TbhDz9LDk4edA{_w<#0j?u+dU_O5{Iz)Bji_Uk|G z3_3{%SFqcF?&A#mkvQvQ7g`Hee~N3j-6<%Go^RTwC+4z)dFpRwGE(*Otbo`a;p6H! z{?9Wx@y9AIWGO{vFYF%q(l5T5n$9=-I3QRADWAEnYt^LNT<}|%;?KwkOcORu$HNh_ zW)?$;9qVLVnT%vK*&sPk_2yW0z7pATTZ*jp)nht|or%gw#GqcQjG~s!f4M(LoL}&+ zWk6m`jh-Et^x*yPK8+fXN*Akqv~fvHSR~n(p^h64vP)Gd`ox7fcl2XU29la=xICnD zrA_J_#;b2b{|W~$_mB|f&!_1Xes)9MT=bffbLkqM-EmJ`*D)L>dDcB!Vrw(vFKzEv z^Jy^B%+gyNms~|idP;{KB;lps%*6e$TS>ETl-DSroD{Mmo=ZMs+wiQWl_2Fp%XrZU z>s7|JC98Q-=ctw^1^Kc&At7~6;}r2Z{3sdqU&)9NBkOSfopN>f;yPZ`>P50F&g->c)Eqs}FY-+Aa-_Qo>!R^P95xguzC%cp>h!<< zy;P>*j=g17NXfC;;+0i7ymP#7LWDILOC$UdG zk_i;iSqi}^Cbg^bpG6FnJS?kjLiVjf6c!o~9TZW5HAk-xK}udw#n#s-oY=UjXQ~SL zuMk@d3=HJu<^5y}4Gjf<#?>!v4!`l+C`+d1oNJZ&N?aa=X7>jH28sD+GqF^x5%-h$ z>?KQ22V)N2cfdI`&kc4P@G?6P%x2HlA>WK9h0M^3A-dxmZFUKNx}lumB*ltKSCSv7 ztZAOjwog@{o4WWyd5WOx8H-a=AIB`glcrJA>8B=bS4L^&QI0>ugeP*Z z6EJ7qsp0M%zy90B!ndKc^<;m>U-yrd8!u?SB+8ceM3sl27}>t7(C%9i(ex}7nYyNV z9xaA*qJRX)`CNuiP{E*Qew~EpOaG@4l`HYp-#zcmoF&kjv>v@i7F#dH@$HOwh9B!@ z#1%#gv@;6bjdUVrTaJL>6Cy{8t3YBx#EqiBNVl-HMF}B6OxQ;o)-FVx7OdzD+_i_& zBHEVXz0lLDk+$W}(k#?0Oa;FXP4%=veN0}i)oc|B={Dy7u2r?rwiQ?J zC)NvehI)p)KSth zJA1p@k$=WwY$kKLR#)5?&mjL-Lk#u79>Xz_I^WpxONdgaCBC|n*e9m|V!IU;log+m zKv(If-BttvSa^tsyJaFg?Aw#^Q2?X6=v#VbMiK^$ zkdGB_3vsj$)ASd@dyYE}`J6X^TkW<5gVI;Fd3n`nBbZKm0s9Yl-m0eleI;S<{yPg4*;t}~ zrvU`M}yNviMDI7^@+-!mqC=*62 zRQ!Zq(+M|}G*{%6*TG8Tlt|ze`eEX~Gu20z>pWEoxBIk_lJ2Me6XC0{(ls^})%EGK zLNCqP%68^%EG*?u?JDkT+TtqQ1r=KP2?q|@5k~O!NnX|0 zl$OTleYnxMND%-;Px!l+2R+VBDeFv6s$<%a0O1mx`=T{$(l43k2xHBe+Q5e*2Ekz{kF=$ zBw#OXdwbhC(CszYeJ02DW`T}Y)MF9Ds0!Tahv&TQG=Ovi;;_zI`#(p*$920vc=q`6 z>FnjRKL^}xz#!Y~xmvY<_Vr@kj;gh=nX?8cUy4V=k*GHDN$M&brpKIVP*<*u;|IsT zmQNv5*y1!B^ZshID{n7&9l>#Q*R!wdjo10IhBx@NeLZT)D^!haN&@mBzu#_;Wk3HT ze^C=9ul9am$j^C-=bd1R^-dA{i?*ppl#RfXxxlB-)`5?)Yabs^#7(Yi&|BVT+>r9p z)a|frM;k{*|Dl=GvSHbe$)>lL$k9-b$qaDAn&iKnwaAQV6{NM)-KJMftpDa-lU62s zaEkbF@y?0wV`kj}ze%9Zs9K{9KIb6)Mzlj(p!Et`-;>uY^s8nJ=*-O|o&a=8f1xCu z7GI7sZOXIl1K>?ovc|$S8q~PVfhqcNRHx^>l-<9gynKJ0d+M(Ud;Sy<3|2Na&NWnn ze_zzw`&1B$rBFV%U!26f?az~4AKcA3Ss6pHXsx9&fes1_4e}cjjZg(b#}mwT?7Kg%<+w{3+m zfA0DZShv~@*|L-XC2Q+#{%xwpGhc_k2nfX=0Nu#=`k=%AdhI%nd(uV(%U=FVK}5TE zNV0&1v5GX3)7O5O(mQGxoxChWhRtSII= zI)bmB%U=UUe8pg|NDH6ef0UUmpJX0TkX1^%>-#72`nYvB0#kV7p|khl_NsHUbsvZS zX}|S)!>#Np=`53u7yS!mHIf--!rRX%2neA>M&4%i`W2~T5Zn=3nvRF7wZqHGjBy^x z+KPq*%gs6`j3<;r8Pqr7XE?6t68( zMb7tvU5RMd3fL?BQVO5bZTs48>J()R3=DymC2m`vz)dx#oZsK90DkmN%WC!sKbCl` zbWy_|l@-poBGquMYT;gn`sOS#&@bb=r`VoFIR`T-zW&_eIpT>DSn_{No98Q8h@8;VZv2$PvRb;ecC{2X` zUwk_-P48XY01N78@Eibk3d-q^6jkl21@n5{8g1n+Rj;X%XY0emt;ch&^S9GFmp_0; zx#79y$bNLMQK63`<^AzhSSA3m0b5n4`)>_Mi{odL-k$Pupz#L?4Kk;;?M^@;1Z6<) zoQiOo0^RPbTJ0)H*g7z2KCAq+bc}dC4(2ZpmjKT*ApwhI^m#`+Ga9psh}Kky;DUxY@cHh>^Iu(F zf`UY&q6yG7As_cKF);4;*aNdxg*~oCJ)fR28DJy*{TFZ@!SB!RTWE=`yTI8xf(@3W>I|Tn4!!WrwHCh3ALglwDOHmuu&I+{7P7*x7IH_jCe1ZZ>@X zE>^j$BYG=mYF=v4aaLR`8U$XB83cCUQ79%?;0G1<_piA9gq$YpNiY>Vrf~6AD_sc} z$+3k{EAN)3ch}-aMy$)=LWn|81J+%K2^0shU%17g@7ybQ4eP&JUj#pfYs zA5BFfmOkaFXOQ4xRjYAaX(-iXvONOTjg0EMqu#|uJMdxx(#}YV^VyEew`IysKH@|3 z|902keQ99@k~&_m?;bh@_o}&%(HshrJYjhTRd`)^R?LvziP@PM*Fl}nLR&!yka@di zu$Yc~`3(aPhE)xkY?A8#f<+6e&hG%U_FJ%ohJCX>3=vST0D^18 zau#-W{r4{r%L+`^fH(e6z_tMV*L;vC4}3s;IlT4Lmp2Wj48q!Po;&QR|-%R>Lp6)=vVi zg{hypbI;BKO(tFdJZQn<^~+^ZD6pFjv@76cL|UcXJghNi`q?qh`V`If1_TH`Y)^=T zS}fo{VBK#26%t!awCb^l9dJ5;&fa4=oVW3ooY6y2uSQmjoD``M(`{#&BNy4=^%{{- z`QM(?FBi`g6r4BYHy0+4tCLjWZkl zlu$q+tu=Yr$%xjhEaffPy@)Cj5}NqMp)su_y*Ku-UYhz1Z9Bc$9clCF!HuZ* zzeQe?r*~6nPxbDJ7=Bp7N6WUuD$*gs(E#}2x;7a5cp8b)Wr<6=pg^KFj_dDF_Dc05 zytKWL_6|fg#X*=5-DLPfjso3?CxjL;z-LaC;{A_8;Q9AFm^Tg`t!D@eIBea43C5?d zDZAh;^yJ54w|U&ZNff@13dNHjo^E8w|9pG0uy%CyaW_#sU}&7=vJOkuu+{KY**sf( zT!|3hE=cUc=)9-QhpClIleaA!DcTNkmU%3t0M0O<~z7SdRU>##3 zj72~|csJM-b_q@r_%O9nFe#Gh_}<=f%${svSaUx72g6(DOj0EYA7;o+HM zD0;olZ7~#ge0&T(1YjBb00N=sq!-km0O+q#0aLGKCWtC&`AjBW z!yEUwaH~okqM?Qpe|3MeMh?W|`?(icK5AZ?^F}ji zcfmnBUB;H5XklH|0R3B)k~Xy5P{ z>xgZfQif?eor!m>fzQ$B2$(K!evQ1k34TdLOe~F5ysYa@6F+JBWl!eQ{0|Ov^fdhy z^W}GBerM2MO6utW7t0;6AElzU`}^;=3Rqd#v#9GF z-ta;FktT?(b9-IUroC~yr=$2(B$;iLr63*JRiJJ#{BMNrRqpBboa$2>jQX#Ap-Gk2 zsAPBLmuc5-j!#lh7aDlOu90_NJJX>4#S}{PaNf1(lak|~71Qe`Z^i}h{?wmL-HU0lqVcHtagoD`wwLqpS#XSf!~kB0^G2a5k$#TaP{-v|IB*EDDS^t0`HCIb^asi4WQ&{0T9M5 zRU~->a2f)045{dXhf$;WRt$Irfz_QkU-s zuYOMC?2l(zDD4IQ8+h5Y2s{Rlf2_stzyTq%`y0R-b%JUz(yZ;jz0qfy+%psv5ItWE zo5iImlmZ$dfQKISK0SivoC%Xsuw856Wa1iv^uwW!^h+v?!qQ(s)qL_aorH}Bub{juZfdo~I>Xf`Opgjh zJ6%s58?QhJu%51KDNcgBp8IV?*MOGV>Q}Kr3fsh)E=z)oJqI>ghp8b~FHL0~=Vg6& zZ-p*XI8Mh$?D5i!e>dTU1bBFc0cSH?t}n4m;q)W}zBTE}?t?kcUJ?{pr^pOK8YXo! zmcB)I%}-oCAulmj(h`^KSNPAyfpO|$HQ*jtNY9%50t-8xzp92pa3*|lOc(Q?*uinx zANZ@G`1-R#KI&`NF)q?V0@kS?kwhymZZov{7FZlA9|x`SP3U4t{={Ie2&XZ1;sPAZMTO`bEO7Nwlgea zr@xy|M|DeT-FGLMUw@%Xkf(plX={$dm>|z=0j6vQ7l7Liyul~J1VMjxlNIO-5;u{6 zl)V{44!Q{QpTL32Ke7bs%7?Mg^X9;70N`)m z${X|x)O8wMw;nzr2*$Ag(JYWL^31nC7vPS-3j|J?)+6#Y@Cz{b1}(@~mz-YSfKz(g z=LbO7KYUgVmNVcv76q6N>A%0d+OE^>N_Bug7hFi-mhHRpJhLc^n3F!86=L;2^BOojcxV>Yem zhbht#H|YNk#(I`2=M*sG zxf{FKXg`p+Q|pj2v?WawKVliXyX0X20i^f~z;gw4dG#zMC1oxY)X7Fh+4~>_>(}X} z23HN36N8$y2~-fEuD{v~1>avR{ySs9o)^1WCm@Lq+eAWC1hz;^&eqn9LHLUBd(LDf z1_H60V{i&PO?^=o1K4eWZO}^suua^i^C-A%9(cFFptuZxwnND;SW|{rwMxxnL{9o? zTUuI5ZZCnORv2v5iLVmhXU`HVEoGr`3qo!EqZb4gmCA`M?J$W;9)JEgpiyFYJ?ho^ znu+vx^Jb}Jt9sFY3}-*12>%e(656y3D2>2`d71J02HyWHRM`zaLW}Md7!$r82Bp_8 z@95hLv9<54tu(xQXTF1q#A&)UCGzFapl!7ReLrBt=K$#NA@F*4oeSB|*Q+r12w26~ zSdZ%d3-*u>9dVaVeR2kK`C|VeD2%4Zq+T8#G>|ag8UFeydwTNj#xYihf-y!pE=8;7 zA#0q)^T3}(7ulzg-&&>uz4|^eX1h=3Mn3bOJNliqDD)q*&P7pa0y-&6Pi>qr(VK-Z zk(Y0oW>2D{i0o9C(F0|*F6K(_BDTDE4CyiSF4n4f_gR3RNJ2@6x3Stm? z`Z{kV=U&2HH+xq~1Aq{X zUhd5lwOx)l83M0sioR)e@Hn(`m4uPS(C=t}t z2-_fanFfky&qgh)6uyD_bnGEQhCPM#$vbt0u~Xde5Qimz(lj=1uRVMif1MvJ{4&(t z*JqcNeqg{T^RN6ZaSE^lhcKusZoz%;2XckLGy)uuosT%&y}Ur*$EIID?z{-dUR2n^ zV|wMr!JA0^;~~�if+FFm}hpwx}`*-~OEfPaVkp1K;D%n@EP);mTsk+z$!`m{?e` zXxAX%1Hcy0z>__j9u-1`-KA>hFnl11-JAMHFe9==gGHXh1-vwdek*UKSv>wmivBCr zIVA#I;xWYgp!*N}|{`k+`#7Yld{I0bg@J0LA!|^cAX<}c18>iVz zMyONg16^E*IA$;u72O1;J-d^16RTa?iHKK9W=&G7opq>P_|))`C6^Hgbxuok8;#1y764ZM$1OYocA{R^h2B+I|dt0t9Khb2L-;z*1yUX1K z-A-Xm7N1A51M9cFpp9C*X(dmOOfpN9qf|DH_I2T}CYR={J5OISD9Pt-@CEYtC zh(NRpjBUAs{eoeuzW@=K?v|WIiv9(#g=7zRci8xU>NX|ZX)){NJVCZ6{twwLyHkZi zaj%PJ6bjV6yy}jwdrV!{JHw^TCDVk*ie_v9Z1ol#M5phJZaRYPiD zRxc8w>4FprOs&PikxvuveRsJH{xkZ$^8kCSq@|Q4+@Q^NetvFQwE#dJW2v09NeUPe zYuN9i8XH|#R#psFR%&!hR9R_aPD13JAqp(a7W(nxCa9$}G+f!-crT-dSV)wF-CCoV z6Wj^0hj1n3)7eZgOv+cUFIEDtFZ4m72VvPFvnri@`+5pX$wAaOPo!^oBHn>FL|K`Q z{n~ocl{CLtleOVn%ZKl|JJ^czwV#HzTK_&bk}I5gKzzQayV)3*68j+kWg|e6VwbW@ zig4pZvF*bcf!N=1Dauc2pzjzkAAYwMa{XU+6@wzlOM0>>6beDpB-2LvH*Z->y2i_} zBp5dF3BsfEQ#lL0#v{Ax_0G75@%@QA(WDx?6g^cQnKOUrkPAensXQQf7V>ZBv!6XA zo+K5giqn1AFpes6mQbwvM8}@-Uy*DZM~OtEj=C=M+q}=P>(lIKWpn$_Z`L@wby}X0 zD!$}qC~I54`tSax?|jLDF%7qdrlh35jY4+FO~rw9viK~Z=bn=TI>JF#R`J3+1V#7C z!?D8XhwqS9Sz`oxjsz;IlEn9~{Jby;+7G%mA!dE@?z#I55+$WIJf3-KQqu^x9^5y> z*4MM2$nUe?K6^8V$UQC*ikeG$I9uPT+k*9h-(iiGk&)4>S7RXL;@9Y>>x;JgmNNiG zr~{AX8!&(bCEysix=L%ZxGV<2GU*b`fDN5HFk&h|VF?=XM-fb+ZjL!cU_@L8N?wyl zN=+tGg@TU`OKD0BU~QsT_Z#%;Pb5L!MqFGRSe}@Iz7`PDZ$PD~bUpzl6UpGd15Hyb zIlm|P;qHC_uyK}BLE3f6>gkKnhY!e}!Y$*3mMK9f#~k}A-i4vSAN-y;FckyCjX96V zh=@+a!Ol+q_9yIshO;+~(utPex7z!|ILs`+>h; z1ZA$uq{`Bh`uJaKg4|D88T{LJY6(Jsv>_JHRUI@}Z#}XrdC`XoVQ3F0lmsRK0sfiD z9|{&y)QxT{T+sD~h(BUt!XM=Evt`$%JQQ z{vp*l0>shGuw_-Yus}*e!x9Y_u5TT|tx8tgF}vKP-!yHzVRcF<#F9wC z)h`|4#8z>!l4+ijLr*-)HCP;fEQ7w*e1Ls_&Y_Y`)W`Kzg|>dZ^F+V1$!xLCjBih^ zy>!@@$#O@Vs?`lme~#HeL_E*QGS7*RcY}K1Bc|rvFWK6)4=^m4*_XfJmbx|6B;3x! zuu#ixc^Y%hH@o}2!%icZfmQ=x2beGHx!06udmvIO-{?i4_7+V{CU3;bsy6`CYRH!K@Vkpc2r9;Qgi7M#6GL4UvlWM_*D4=vwB zV$KXZp$+R{)*gU!PYf*oezdJ#r%+ZFYqO;XS-DxzCutBt08xIl0ZRW=zlWwE=6zz~ zLlpoBdgZf?>fb<=(Y-&It3A}fR)zZwm?yGH=4m`OA0;eEmYTuF;Q;WO!K6%`5IrV! za_}cj+sPm+I0ZF}JUl%kJXXu*MCJ~6ch{3cb8MwFIu!~u6tq=J>z)#?)voDdKa-WH zn&V)XgLX&Ns4~`JqrtF_6f8)cL7Gf!z3*9B;8{_rehSorAz`u7f+=O6js`x>421_F z5F6+wgt|}ohdaY?*xu!VNqUmv6Dy`97CNW4iyzPR-)sh4h*43Hi?tC4wOT`mm5MJ<@uvUeLvXU zX{nojHlF^Om77|h9U^>suxBb-9Emd`k;IRRG&d3P;myjNRhD&uoeJikPoOi zjzGNFu6w#$D^N!`y}4eJAOt56ZN!d?v$Hd3IF=mh3|b60MvaA%m1do_sFWd_DaR}hwjzEqMV(A@78!DT76fUWyrM1rsG7l3^K!y4s zEO^)!{a2(JutAcI1ozT}X^1CfFV`=LH1S@rNJ)BkX)%OffiagZ!R+vsr?$)Q>c0G9#l=%| zW^s(&Nx))MV2Uw`j#NsrUSo^+Ek>E48VcUaba7GAd>oel^5;=e6UkFw;*3KXjE>Emz?GIZwOV;$xcdA$U@S9@j+A5W+Q9hsSIX*ufYZX!k86PWt;sqo zrf5AYacU^Fb8ENP-vr{16;Qi_srdNB1X09#1d}zFCWo53IpIf_55d&1N&Tn-jy8~L zQ7HU(yD)!e$ZHJS{9B^en|~vl7rtY3CCj!X8R8^*RA^M85zwDCfP;I zmbE+(-NX*6G8IP+WF>7dGP-)9vbdczQwqM7MA5OeZ|l~xB{GSyuvrXSOr4E&SFg^x zKCKcbhjzI!=!I_8cC{;-1u zD-iauc~JeW zw0h?`^Zh@-P<93KML;$LI}xyNupG_QVM$^mkDfsXlUG$H?YAEv7Q+DYnrxa76?h!_ z-2Z0txlx^?0qEZP`=baq3dL@B`9W_9<{P-wiRv+Klro)eM_0Gc*_(eh8U7GE`AX{} z8%g-Fj~pi#g-Si+b0-artFWV7eIzls=+dMdlO+Z^@TU2-;3VN;VV1YrVPF$ zb_CN~`!=EpkUVpA9Y`qc`L_%4EA1!mwz<6;7P*(4x*3Y<@LFw_B%IslB(ZAr>bqL2 zW{+*EpXtC=#{KQ~qWPMKOuTD82|oTqqv&<_hXCvOP1>8q^=u{5G9df|V7|82%UQ5P zm0>|OGy5LiyYtC-NWSF@f*f5%c^ju~G^1$?Z0iF+UDj%~Ujh!yLYb<-`+$pKz?-fb zyG+MRcO9jtlL&UBqxXlyA~}P~|M=ME9X<1kjDCzX#v%%|%&Eeb2DyR>T=OP{lpSy{ zbo%IK8;aFVyh;62(tuAnnC;@`T%~p3vz#0|%YN(T1LA>`xQ||skM*cB>0>@tS&fMT z2{x$k6TyZPfMUU#_cs^=$i2OxbKUz^r~`{v^^U>Kfq!#e!yH5=%^?H=hqjl znJroj0hb=6g*Lq;i+q@Wje^GK;RvUtrFE;#%1f%{Ujd_cz+`qHnm+uXm9esaeb?VF z2hwiTWz>E)vTxr!tiK*^>~mS`o*;v{n%^5hvoOT^8Mxsh%T^zYBqlI^<^|26=df0UQ7X{9_&pj5)CB?=gA%v zvEWy#KVVi-+4Drtc~l}l>B+M%N@H0XGLVQ20_>bQZf}0?*alT zpYpKbq4P82aludoXKeNB1>f6*r zWh{^G5b>Rtyl*%upTb0l<8QALT&%EW@9@Aj8hEv2Ya(29O<%e&Bd2XQ&Vntm#gz&T$~a6;ZfGumPgSB$)Eg)u!?1Zm?#wU z>RE+H>=e&KaPS7bM@NMY6GxCPF{#w`MP<4;8`x1b>T1R5hLJ}e9?JnoM`ibI{L8-A zPW9bZ1VTT*jy+r8|Mom?4ZY;Re%h&+;kTWIdSRlrw6q{kl1%w(;I(7x<*$b=(D#nV zCiBh5pA|)qJ46_jvxZ@1STn^(+@s>;8W0{Q@r4=<(|6yezTm%)qgs7FyAJ{|Z+~C3 z)`0Refx6E_FNNX)QWBy@p6~$5Zld&L@$^i?vwPe8@~rjlK9N8B`s_UAf6t=iTVxan zjOBOw0U|ws*d|FmFE7vFlR>N{TF`%pl1(t9bf)fp0fxb#ssvT+IuHUNdbJ_k+-%q( z;O*N6OL|}M9u>c|Lu&;4GrL!>-fsq~_I{T_@^C(mUOy~s%v=NlZOaub^jA#-5uU5=m8ErsH|DIH8~8T^aeOJ?ivdQz|j=ySLI4hC-vD ziiyNT@toyBOLNICJ`B-TAwm!@irQ1pr52~ALtXqSCj8G#gO(>Q#VAmvEL2Ly{eRMv z1o317FNVIa91R91eq8AMP_(66;%L^s!^}(O5!rMv{3gYr_S4)%{1kdgje4U-|9ef_ z0?DjuRAM^kkha=2yC6=KcGuEiZ==O}wa?rmRI`Elyz`@LiC5TS9* zbS3SStfCsOFD0HFia9MLFBa*DZ)5_dCpYOihJp>YkE*-kmM&ZMTm83y!UU;m$3vGe z!!&pm$W=K#nR9N);o))azJ7j5oQAzvIX26Nb3^~6|K1S<94A~KHr)qfff43hhO*yJ z;IS`Iw=q7+-!}9P1T)V2@kmh~t5J#)kGzSU-CY|VGVsj@v#Ts_t2i2x?unQYt4~0S zcL-qiMt22B$g6beYA{Pcot0&LnVMBj#-bqab)*{hB(LSb`He5yV8Af{(I; zZu46xJP2hfLLx{!W^yt#y**9YTS1}oRVIJw($pLY&)&|CI4M^LuiAXfjlE~JrWb{* zVBBA6kr+YTOKm%xa_NCkDHIv*Qn(WCJ8CNR#XlmWxjgxO(G8NHXQP%qNvk2yL=+NY z%orgx3K#Xx`Uolv?&S$VN0~(z?tC?-TzIZJCI06x@#r>*F-at}W3lKH?j7#&#a)(4pqHDe~!P{RJ%R8f55vr31*-`7ymoqK!a~9w`_$k6UY0UO6fb z6u4uOaD`lbAf@}{&Oc7XVP>hE%i^v>30Q%6dLPAw+F|*GX!2%q_WIb<^rcipH(ocw%!b#FX$gc5svo_=1G= zl+`e-L^Jy9GoI6)=8$udvG#*EYayWow=w%Ze5&%$Wj9m&UOaRXX=%bt^3U2qZoL9I zo3>R9`i>2c5;@bWA)E}wM4zWx8SwE4CG<9|+KE0;q=yorkl{;~(u_9a8?jJh_qw#> z#E^$%pB+L^xO5ykk#Usl?NIqIFcj?SjW~31yB6|Bj% z8O)8R3?^bR++ARrS5e1p9>z)vg{8(LWN0FS<5(M-pKdosknik9n4^}T{X)Ti35nz~ z{wRxCdQ>|rgNBVSmm4-N8B`HPZ3@Ql5ETTfvFfrjd-c|~DuUuui^0zdIT-T$u8gu4 zyd!3a0vh4PlUNpA)(#AGnB{Mu$w~9do=L3xt;h-~SrCz9h zg)vOS?$kWSSdtt<7iClV)f%-o-nIHq8cGoL2J;-BF1&H`>SL@BhYwdwl~^D1;+&wU z$LH$R@H$E}4&wzW0Z!`=@_IeQ!swgdF75WUmr^G(OhYOU=DTYH-hWz}ToC*8so(2K{AeW;5lpE0ZjPRq3xW`u7W>A zK)eJPx#TlqxpIH?e13~cDQk>vSEE!&@*Oi62a|91zf8^#Cemc`X(o7znDXiH7qHN{ z4jvd9rwm~?dazn$t1H>&VV(U%g(e{sF`|p%0__m=k32ZRu`CgwG4uMj43jaAshV9J zh4#_Ee*J%1fNF`nRiE9ylBxiqY& zsh|`nR4*pJ@^38LjSyi{q3d&O`l=C1M&(QlZ=a|Y*Ux4?f+-Kn!9L(|A5%pM34QYQ znRe5s?2`$vaO`FIS-L$`yhUX8VVS*h7(%!<{(;VTf&0aCd6-3hh8H)sWNI@Srl}H9 zG^gU%l}-`{yan5hbR?rS@gfypr3WS&-)9fRIV(zOou%_k{+%dgnBd#09@7@Mxkxsx z$(HoclROt_q50ZBc6V{zo)JidKQ%ox2M-?2*BrdC_62k2w?~H-h;^q}7Z5Dv4fX#b zC7YhO;ShZi9Z2hpXC3tVq}g~JnvS%3fw7EhqMeCFn?&|!anjFJ@?(Wag2TLOi_7z4 zyJuiuTaWbhtEOzJ*QreaImFS+>bu?Efak<~J)GA+v~L!kny&Ik3D?7zNx0`Gnsh98t{!!$RrmW@X)Bdm-rSO%Zqh2jvrH?F^MmHZZB~^ zOxF9M67$C?A;75jmw3|)lf}E0#O8|4z8?}3$+D%46dbyEn-mU#j@KPQr^=1>t2!s1 zTJ0o<0%*#7TO)*@`wbiZ8H~f5la3evamoBzCAuW#@`AwZ^FCJaa=~Hs;pp(^P7=?sl=*dh0kRRW%^^ z4rajWk3yY01G`B=rpl8dR(JvM5ahNMqo6I@)J z8G&IY?t+^f!3c}sTzhK%2{#eN2O|mAu%&tQ>n`*epUxBsK@F}J83L@ukSB_w{J@H6zI#e?K8qJGQi09YDLsUij+8Sn<_*7$LbwXp2$C$8xy3w7GO^w?;lTIIlsVnt^L`pV^Oz zi)@&S_^$iDCYH$CZXMf1>AU1t2j1uoSXkJ^3q=e_2qaeKy*o7}6tAP333qyb9Hrjg z&l8_PoWc-)KT@^7ef=1m)XkNjAq*X=WqEsfAFR|MAF<&)D7fX<5sA&wI+K>+mS}OI z@kC;{6V^Gpp|dtty6+Ha4#t>YMvC6^VNk%taNkk-K;P%=bh(IAbWeP(JM&@nW}P7H z|EXHqd3sf8ub=|`Gb(%3NDw)OMxO5NfBhkM#TJDo7TKQ=631DOD}mM^EHf+R5+t!R zjr}VKOAdi@qJ|@w$Z99fJ4z@ZL8%7$&WN`7tUkAPD|M07x!5ltr3ib3j3*S=7zqtV z5jF_#9Wl3*k+~EXn=L-yEN{z0GeJX_9^ovR3cTq~q57}=ES+Q;%b#F>Hs>eJM%3f< z_4(4Tw}Q(-_KB&X&~!|Ntyp!JFa@{?Hc`ko^PGXDeAKwd#<(&SC)hHP9WYmv80i9N zFV!XTa0t@1o`?_*kr!clZ|))dt&qG+3j1-T!>DdDX0JRbU;msoZ_zJ~U;a(U{ijSR z_GN9!{59@;scvheFUcjFFqTP>k_=30*<^WK=vj2lt^%_Y3A~Js2lKiA-_eY~5txVj z&o0CXw_}UuAK_PWd~uma&)E;TC(^~4vEO}wuzyS$K~&d2i#Ex7>Fn_DL;uN|y6>mI zY&3+=2}T?TRxMCosVrJls$B-~biTl=*TL#LxQi<2Rrr}u-ruM}(t z0UK|;X%9$XRJJ(vG$huP(y_uvKh^tF{!?8^?iZwa_Um=Hy**92TyjxWf56rR<6ceg zx!=JZ7qEWX|MbrE=KH0euwh+b!^Kcy!zuvNShoL3*+)q>%I@=ReyT3NC`CDo7>gRx zg+zIxhyhc9*oaD?aBs5GMhyN@6|kOuJH_zdThXH(<8BFIE?a0pc56nb+>a08N}kn% z^J(k$BUh2xx@})Jjgs*Qp4Y!hq1CJ*xWbeg52Wm$cc1>gHz4a)(7fOinbJ89`-RPa zWQl?(#p5=9+YT3YG$-wrQS!5KUfg`ez7Vj~O8G@Y=2>f_y53B~l4za^PWcz7Ju(M! z>mROKh)vdnZ_{%edObJ>Q-!mS7WzFPjxn<_uWkR;T5>UljaX`F&8@Ai6&FYL5b)V1 z4SU`%GkNfD%Dvwmanbdn$x31FOa16RC)^f>H+Nk)hRxumg?`$iKW~kYWPLKUR7%3Q zM9$!FGn=nx2puVqsyD}M-VPGv-R}X9E*Zcn{EB17?{O-~UKiRZQ>$v(h z+KoiT|GkYN?QCU>NUPvWfUBrU*2!S!wb_p&_Q|Nz`%Qi_WX&v_-|)Uq9kn9tJ)?+y2M-TXL;nf zL{(7GGU3n2KkfD>?&pPlML$tQOX8Ih-Jz`WzB7#@KZXU$9YAq6wqaoM_&|ETLWKSBjg=2DnPI$1)|2rZa=< z#TpB2P)KPmj`b2ZN)X~BuQYW6`8PbzYo>0z*DpLsDKrVPy?3a!n$v~tt~cadr7+l{ z1u(;X3+$XPk9DmAitT!Ul#e4r(8kp#zY~X!$2f|WunxC3$&j!`_`9d~);IbV+wtal z1$IBj#o;){=Z#*|FBS_QJL|qSrFe@tle->tCm-D#k*N_-hZ}Tx{8W(9C)ev|NlQEA z7O=-$DHxq8(weJswe&FX@ctLRww|N?eLu$|l254{voQqTj-=veFm%K}9zN~rvP<#) zZ=lk!b^#kM@ug`#d4mxX&jN;YQISb?mQ%I7+iR_6ViPy2NSnlJ>KO6fk3eWdqa z220-;r_HQS{8neozhH+xIud^b3+-Azk#HI$?MAWX{RjW9mAZ{Gd(C5b(d-g%lT0CI z!bI$4k4s@MkOMicmHbY=Dz@m}G`kto@^$x(0G|y?UF#b(T<^^%ZcUd$hH_}ka)Vdd zea$0$2UdOzXN5Xq8HotYFK1LigfPSVsV>peh0Ryp*XLmzoHbL>vMs|v3JhWw1P-Jk zIB}N{m{N<~mrG6PiOb5Gs-)FBFzHt4 zJym|1#%3FOjQ%Wejla!rudGW?osYGXqwgBb`q(vMx6rxE^(W$J&49zw@Q&{&2o55YNq0!r5DgCw<^=hBIY^3;RxU zs8(|6&E?rx+rJYmj(XIrcYKdv{V^gQO&HKnZIhK9f>)wjuZ@tER&@p_R9X;up3wXQ zo+_&XeqQ3lSQ(bz;*7H|u21XQ1iv{=4x6dS)}l$)w4CiW+tRzlxJeWqsVOqBb7i4x;$o30y2duH&Ab_0cLV) z&+Xb?ezV3KlcJDQ)8lv z%dLr{S-&P^N6He4SJK(^c@kBSieMoywMt$fEsP#-k4iIOq5N3FvQ+OTAz$*e#v2hw zcht5aBmA5io~^g*`qB5=2JKkiJYdcZhaw^r8YbUN$VW0oXm6Rvn#)6hC0geBl(PU! zPi6b^M%$9EYG>KqS-viOOTCDO59T=S`%YPA-c=A+^d#IIcQ`7o{12>#d8rj z$0J&uS6-EWHpqs*li>fOdMW{ZWvaQV_~l?YJ?!ILu~5x{1}THdo_iQ)7_835n3*P~ zriHYk!?{I&&<&NrBaaoUPf-DiCqdo$8ZsZO*v)T5L~h0CR^~D)kQUI(wt4!U_+T#P-c81|XdJHPZYW-)>WRA7>Ff3#lV5 z$71M!Z@?hUP;}X1Lf(n5QdJ%pZOb!D+-`0#OGSYKJgJYUJuv%ER(RIG9QiPWCF|_Z zz1Y=a)26Q1e|~V`uUxlgUUMNInR{(uHjBZR8#EQHn2%?1dy(K)ljmID1;H#`QjRsp zQ+)ByEBW=6Ls?MYqfNpR|Nn;Y((KO}pQHyWK1QH%_kQ(fZzx*Ykbx0gFV3@R>Dm>f zCK{H$cEz+_n#OTi&YhUZY8@D{L;v^B;wkkl;|#S-7Oh)?m`>cRfLFt_EY^D)0Azb;P=Jihj17 zM>nd@C9Pzwe}tVVxLFRj+p3QvgrmTVzR+3}sI!tPNNAr5zbW=phhSIzxbW3{>c0{n zKD6GS>PCx{&D>e#;eOKHkhttj_uKfTG5+?kN2*7x`-ijj#-H1!V>2<2-WgdEY452P z%p%DwN4#V^}xl41f@3ly(v} zz5M!7g%-r_FL|LSB$ypUR|}=B9$UEC@SbOsfs^n@ZO%JZ_&;hX_Cm6Wj6*`Z5U%aZ zvDz;s(??BLJU+)!Xoz=B7$MX^?{N0~UOKpH`#k8WcpRndKucwxZMeGbehN_mv{90k7 z4I;NOUx|-kMs^}UUN6)Nukm1J$}s&fU2^%1dRI0Sc1HLlv&UlN)#`l759aT4gF@0M zZFlDm8|91~0)Bb)l}4xDB}sg}Zrl*l$7qDZ^7wC7N-R*bD&Ih@;hvkG&>mB(DjklL zA~duFNG-6In#MoIrH9m-w}+`j{ny`sPS}GIn#0*CjZ@hiS$P{iW`44DB3CiLVm}L% zJSbFe!ETCbGDaZC+_|`A2_M&vK$!71L9}cZGYa`M&OJk7&F`+!!_L?58mL-7OXdPx z87{{DPy1k1-CKJ|kWh%CVo>He!MLfM9NQY7mAC1eV{|8IP(5dKMq|xY;Uo<$*{Vmk z1p3E;mvSL=axsq#t7+|RBuiOdY0{JD(I?k3F8rRBVQnZb)v#M&6NsZ|lKVJ~;=F<_ z!((fNJYQP@1c7lgmAg$5g202#rSn82n%PfnT?E2wc%4(!DCv3uIje3d&INZwB?OUk zR5-JP74fyLoSWq0w=gM50cyVXWo5z)bqWUlN|6Yj!$DW}+D3dEzrJ*~DV5|L&%^89 zu-@#fWiMfKJK-zAG)42J^$$e$#dy94mB-@332qr@EL2<#_7r`Ak~+| z)}t9XF@7zW7Ttf^EGLV3+#JKH#D$MErpa|R!#BHMn%l<1de_#)bFklke~tpa(ROWP z-M13i$lDnF^eJW{1Jev(&u5oWp8Zk z3X9O)2hpfh*y@r7C_#@L7v-V-Y#^ekeK?JK1QwZMAdg|J)y0{jMX@iLJ8jQr&}Re` z{lHpewa;e3m4=Ao&cg4SZ;X7=^LQIaje2u>G;2y`rN_``D@i_q0e~UwWl+7>!|8Qj z=c<Sl`<+hx+duvqD_aWrIi?(?(e~rbh z5%fHWo>a5LBB|*KD(MS>^?T;fEg3T;()4-7Xe!aA(1b+0Gc2KqhpRQ{x){t|ytv%n zpVlyrv1l1?JhQ`-dZU$%tGwj+ehD%~UO{o!mQvS2d&582g!^wKnp1|EzJN|@(Yk~e zqF3`H#2fgMni?AyJ@QLl%uh^6NSnZZxeAn=egCiMA?Fe(te%LIz65*M2Ux+FK;7Fu z*pxjV_!GbjtH$=^UC#r%CTYOj{*sDIqi9Z0P;hX|l`jLY>-a%4c)noOg|O!#Q(*3M z38C8A-=ES2ew9q1T1$@Cw&{UYDQ|VZ0r;qZKktm1@2>WNZ$WIx(W&k@Yrn*(O*CP^ z+fm&!1*cb_Y{pjF9cWiXfFbM+Fox-asM$Jwb}Ozlra0gY@B~@)&)ml)&VQHg&)U}b zi$3pjDbai|G>IJt6U@I=f@~mLH{*mJ*isoztvBSy;$3}&gd%E;AV-%b+u8dA=)OUP_Gf#!L~c|tgcU-UP%i1nM}p}uAt~s z7$tiOE+q-?)qYKI-bk$(rm_63bXMmS9P8Eco+@VdkktNL5s}W+uW-a@SYnieG0z7L zR!>2fX8beVnV(`cM}0?^@0;#VGt>hYoxm(BJ zH&CtP5HjasSSdMUI^!$X`t=&U!%=_hf;kFTBjl;PM15)UHE~~1zfn3pW12`z!ik|@ zD8fA<*?&c*yF%XCdWje|+2x>w@jGdCcy6jGdxCWz{Ol z6b)$H_E95UcpBm6SaVPWJlZv70S+sT9)LqMr#}|})F`dMJv5ri2|za7ab^XHoV{cr z(tm+dP9NC1fJPb-1e_p`sAXhiuCA`YhF&z=i7(@UmIV~9Vt;c9mQ(uTpO%?5@WUL2v{oSyY@V}Sj!VK$}eqEm-KI{ z;8Q7W`jl`<&J-C_xcJ(O<2hua6LB+@HSAsaNU&Fb{`A|^VC)xMj!i@I@DQV}eC34Bl2r7} znc3VV*9KKK$>SWuxo+1>|JEH1nW8xNRQg!kvY{p4FlC7kz1^DdumWz{$sh>PQ(|4l zC58f|mwE3*yrv-FNx(`L126{E-r zavMDYqzGUyq4b>3)hsG8YnoPkB z?*u3SSD-uwTEPdYH=1gR=EH*md|b#lGcQPut}+P2R4LJH0^F1b*P(6ARqI7dnQnVRA;;^86 z(n(B#Jam<{WGZg8bb{qK_ZBHedkS!#JcWW^jgDGcyBkOTRyj__p{7qA=FAu*B=r&G=_aTM?gFEjVT5vySmd<nbnXGkr>iqlEdcPuUf^!td6D7CVI}YJ z&Lv=M$-{P^QO}CWLxs-AF8Yf9+}K>70U9q zBDT1BKSC6swQ}FJWq@(RK!>Bi$yaZC3?%zmz(xRk`s^uH6RLeKAV`?VqDrUFC7Ipe z3K$m-ii~b;MYG=k{tuu=sMs|C9~7!~i|-kD2=3SA=mD~Hkzj>LbmQ{f83NvdxDl=$0e_(Zhcgk+24bSF%ObkGZ#Kgor_@#j^ z_S!PTp%&cXwM{4~FZ4Xug0XzFE&wEek1Q7JnfKb)ytu2sg<_vZEWN-J3Dnt>lPO`O z4`d@?8!vmXL-_udHTgd+fJ}yV&xmD}28#jsAh9xL?_ShWeFlN`4>{_<)>xDB2>>?$ zfD59OE!tvO=H=-L{%3CBXQtc2lN4Sk80Uq+ZS(x@h0LKiA9t80sfyH)R#54&w&Ttf zz{ouFv$W!B(67z=@s;C^hs7(eSFVDK27~8D3U3q^uQ6|bk}EJIpo|3q?X6y`|0VFk zYqO^``S~hkh|Dc5tu8ISihzX|j1Eg+9p3(v;xM|-BllcA%$I+iQ~gultvBjV1v= zc+50!|2qXLYk{+AZeU=iyC0VdPnZyt^JCb>?P6;dzNMv0t54 ze#H{;gmvrk$0vZz0w>Ako=mF;Xm#gCU^GPu%#*>xe=Za?7 z45OBA-O4at4(;k4aF2hw}Tik>G~+sp;XZQWgfV_`FDT~04K^Ic)UP|g-z@mP7FVVRrLWsk8I zgitXhDWn*9>f}v~D*$>6)7yy1NPu?o2f{^QS6C~q0u=OAGtiSfxCI(inWWW7`~HBp z#-*P+G83rNNwRJM9kEW-fm#}DaRwN(Zywxr5AN8tkVnt_k2|!C0v@*my6aPReQ)r& zfmwIXBV)v}Lfbum%9>~I?21>cnL(4&>;RmMqTrhjD#0y2?WZ)s1gH-Tit2jM8zuw z+@dXh=q~9Fk?!u0MnVyh?(P!l6r{U5M5Mb@S~{dtx z*P6fCvH5RWEctLFEwVBXLd&6TOvT|S3DLErYx1WShaM2qSFTt zv)#^-_xb)g6~K)Wj?wJeWpe{3J3@>q<)?^yi!u1%yS=G_Y#_8wGa?Tf*@1eX;7L@%=oKO&wk@=Aj>3_Pe}|MCgGk?ROo1ZFC$_e-4hwiEr}Hjm;c;AJEXi*;@}K&=YE zJbxZ2lcMkkyXZ;bF-6aJfq6LP^6L68EonH!)paY(lBI`&Pj@VR6p{1d5Jap02{GUZ zSzXnq79g&%lIQ66tkmgyvq)uptRQ#XerJxcJ$KRd{Bc`veTFpYv)6$#@~+9hC=G{T zBZk5{Hp|0Y%$_P;HmIo>T#lc~2Eb>&4A{JvfRgyt=mxl4KG=Nt*~dLDasem-WqPe{ zmX-|YJ>Y=I7W@>XBqGuTzIyQZCRo=m+VVjSsUo2I0l3}z5*&%L4RH8A9WT}s-~hV(}6fGwm1W&+{kx&$>vE5Ja|ojX1~1SQNS;JS3d zf&W1t1kgtUB<I$ zG;HwX47`PyfmGr702NuR4xUZmD)4*S_0xBql!@}}`QvHYe$tz@)_P%o@tGe|{q3Zk z7!Jh+-YKuh@9|9N*D@O0-Rvjv`&-J>Cm&T+EC@dRw)uj4K6+16H4h0&T0i2g32xQU z?Lds850B*J2nm!xv0Hw zR>@X70bn?4*0rO2FKHOfu;GD^#fXT+4iw=4gmazJ`5~bt^yUq4LaZ1^u-dxc z0BhwBjnXuw9Vi7%#KYOa;S~JAUWet4*5$y|A?D**t}(pr{0{E}Qxf2|UEgfRn2^Ce z(Mn5avY;+1MN(*6sj@8i!#K4OCGU;QnngxQe(xBH%)kf|`rKy5oD3jfKJJxR&j8k^ z)#+>!u!TFd4CcG zSz5#zeo@Hs*%ly5-N^QN>f~dZjH$fs{OHrUn^}2!YHH-(m8ycPnMM0B1bHdwV!Oi` zM7y;5t-_xxHs;~-Cx8JDvpckSNpk!O$4^^*+?S4DdyF8d^6#CESYSndWjmbG*aR((pJ(``&Ds9&meUu|Y}qm!_zOS;DKBE+XsI)EiEn)gIM@GbJUJ2dnga39 z3;myJ$9iA{#lwY^cAQ#bGJ_jLB_x1$-CvMuvft#60Kv*&z;Fk^!!-2#;5?89ve!Oz z`4tI7zc1H-(Hy9GoQlO!(gjr~7>odz2DE#UL?Q?F7vSWsiePK81VyIW5cIe|ob}v# zKY{oA*zQGSbu!NK=7t=n_rQ7rf|AV59-%b(5wJ zz|C8O1EnuU)F4jG%$P;)A0M;2;tA}oLxH%^+8fw{y zz13`SE#4I91aAjIR&Btew^^zJG#Z9N;9c{XR_1T67$ySWYtI#UBLExH`9WzVFcw1T z@^OIH2_O!3fOgYIXpjlsIG}$G!e{=N={MFKeHhE9jk9c;?+k0AJ5cRNgdTZ&KRB}V zdN|#X2c7;UQD(dW%{nNhh{!3ln$CVi>vrg4)y2JcWPB&XrA$b(#K?ff%D*T&AN9-W2n-Q-^s2Be$r zuzQ~Lq_wA=6QAqHIdZpgxOg3m#4j8OuBf&(wg#IoRDTlj2pFmeZdB0?#5mTAOBm;a$79cFWyBJgp8iQ4S6H8&w#7CF_94p zql#z!zQ5lxYD|qkw`8W}@U2`&liR0l{>HVt?MiR6$wu~6E|0~g;=wPIv|rHjdDciE ziYtB0Y>z*I6!kbE<(I`}c5qjJC@agHy-Oq8T@PDfbWs%dw)nv$PIFUBXZ0-3;TQRu zNBRxuEU$;56*j;p8v$DPf5a^Chj^XF9NR@M0Q0%`ngQqzz*PpQJ*~VXQA%_d92Zs{S3mc-UPKrg?9;tD|vX9zB5olf7!U_$-*nHfM!{0Mr~)*a7m z3zulY-C4%=84Vq)@*&P@NvvA%5(ec{;ZD4@}sSvuNnoLPtzOwQ_lO~M{Nu1iq z)&K_=I21tx3HV57HN$LZfw@3BN#p7ZarlM(rdma#(g3t3pk4h1om{WJ06N;Y{gnkiWN^6yCGh#+V`qsv z6Q6xMPzDeEd>?_>1PEQg{b#;*jr6g6;kI44Qor_lQR~2G8QT#DU|I%XseKk#->a$~ z=Zea@7wJ7jCUtxeD;gc?!JRC+-)p!N z@SH}%5s)E+6Q3=%mmAc$W`A#y<0+IgZk!)UkE^`VNqS)2fg(Kbf58y$1MLj~-Dd7L zbA4r|+a0)TI7Fa=d*yQ%(pAfGgTd+LA5mhsP`RF$ii}f;4Y^pw5SYOWJw6kHtLKpu z{9fKbQ9q~@@yOCVMlawd z--%1$z#Zhm9-xIPs!bBx_9x56eh(u*IqvwgRr~ov`Dx4|{qo6@)mhdWdeu{t&t2xC z7nG)`Q4V$LA5W35;L)Km&sv-F=#1*};RfGQdOypZx(}>*QLLWb&G|iaUqlf=7A*x@ zv`WMJQs@6PYB8?V=nr;w9Ii&N-ObN+x*bz^9bJJt_ZM1_x7d5OkLPKDttKJ$8b!2B zOc=5b3g(%nI&s8{;{w=O-bdv*eznndr?-uc&4hVv-~68=x<^3SdR%Ap=z|Nhv9xy9qlS#{_rlHZ)91{)hmaCJ2t>^5Hvu=!6PVbE zWF|r6h!$H)Bac0R@GpWZW>2#M!k`M;V;eiMoSgJ@XQNF(G|PQE$ZNxDjPJ494*H|Z znU9Q^DxSkj4{d7gb@%dmEr+ZQHImd17@tX2^an9dem?2F_->I&-uz@ApVv~qQRrC>x7QUE_ z@d4QM3gjMtWO~3)0NBiDa9!NQ3_q{mho||7%NNVz2_q9r3r28B)?E9dsfyeJz?cOSo}J#nV@rdudAeBH|4uVSrn83tF-T($zCWLht?WERA6cizPkVH}==@YbeJF z^~Y*@?=&qNNaQG8{yF%0J(o8=Z7m*lJS8`_9n0~rDMb!RlS{lEfiu*Y)vnqTP?5Ii z9+_|Xdogx@R;vGLPk7bCSuDq(&b``r%(UjQ@O3HU2&0k;nR$k-u=&{<^YbX7H;#<~ zIMj`d?e>LMmx?;&(Dw_pzL@M6Zxyg78xU`~mBV^?3i$Qw7gW6l*7RqfLcc55sO)^Y z14Yco&bt|)WCO~<&)(kNe3=L`nqplxoANMApp%2|acx)Zy4jWwG`YYwyzsgGP!Gz9 zj;Gz++?34-3k%aI^*78q&w#X+)F7eQJ$RP zHQ+KI=-kodXZ2KR%W|q=y{`ld1DY3SIh2TJ(9g!%8GvDFqZ3|J`+;y zlhv%7jj8lKO4hNzEqcC)d2IGY!^#bj9G0M$cYQ;T@*;s&A)fX}l8aPO{gAr)AT#!D zhx=>{KzFzOcFoA9&Ps0{i+#1hC#pz@u+mC|>oI-qZ{f^jyWHfI>t_gNiO_Kfrr=gC zmak(PrxTtG$cA+Lyz+vPJi_h0b%mWM7g|?1qKJm&E3<8cprFEzjSe6~$9q$^00OMv zUG0+oMfzOg9V&!FkoUO7G>S7y2;33|`i=I-qSsGCxMmRaf7gxa%Eptko)k;H{$|Kvg(hH3SvavzWKR3R{1$^0t1z1 zm0|=Z145x@TzYm1j9~_WD*IB3>$hVMzV3%1EHHWNP@@9t^~4&^ol01`lnS|+2r zYNpP3N+)jBQ!DGps30v#)8lTxsBA_Odn8MA%JP-uddhGT2t!!L3KWR$nSg_=Ns*!; zD;BfD_$zTT&1ieNH(XyHihVi}G8Nk_gSJ=Bbh zN|>)PGq=I;`suofuZNaOMiJfoKHc!*L$C(t4ZW2wn`psK6zc&j*iK~b^e_t(7cqqU)yz{g(xYi;)mj9l~ zD<~$0NtH7WCm-Ki_;AY3pCyVi7*o@_11P^ehTj4ruv2%pt!VNfv*l!H}D4bkxCPeW{aL_?!cBeh=p|;m~x0na1DAUgIsDgQWiN{Ch}pSq6a?;Fnwl9 z#qOKO0no@GnMG^r2R(T2H3IVHYFjg8;=6?@opafk70fzb>(U?b6L_v^3<)pgqWzLZ zzC(RJ3*3cg|HqnzBNv-&Vq#*N@3Jp{nfD0$Io+>!K^HMXC+Se6r6WTFEJI;X8ViQC zH&^=p0Q8|5cDRD z4oUDh&U~y8dq_38^1gklzNC8nMRVyXc1}zTN0q0>2>z9?Ap&b}RIXpPm$g!x{pez| z%g#+U`)tiAaXGOlG7FCX6#NF?9~ucrf~%l}a4W5otKP8_o{dp+79rZqs#7dw`>tpa z*sijJPN~KH@*0wTWJh1XR+gLly9Z>|Kpx|8Y1Q(!DG%)g;rG@TdJ-fKDae zd@Gl8(@THS`Ft(5jn+(t^Hg^pV`_F<$WCbDQZc6ZqJaE11m9oB>7_z?&QlR{j?a}A z1a0ROz6$&4QQWr^jL*S%-V5-`_mlKyP_d`nJ=o*iKc{-qzpW(6+^Iec$XLkEIEorp zk8e52t`2Sp}d%01OSvmvtx#3Q#Ul3;_a<<{T_2Z0LXr8lbH@ zjQdartPsFeY76Qau;h}EkPtz@3e8l>X8;}C64-zFk6rr|^L!KYIuC5Od}Xf%fa}9c z5GIy1#;MOj0F`6_YjEv4ijOVsSDYz0#&Sq2%@Im;)7JI6$=nbVblk?pjeFnxgu*u| zP*N5AdfN%KoB?}0L?O+(6*JaQh894>qqSE}S_(ksz>}I+>(4{9GN+;cDV#&k1FkRZ3Cg^3# zb_hnAfs1S*m`h+r|W& z>&?SAIV2q$6vN;Zw-sK6c&N11*>;!`K>0giF-Jq>e9z$NJlpRE2_iitQ?{f|1k1Vp zPn}VpygyvScU?wc`*+GUMm9)E@!pS*5WiiVBX{bZtekf^y)6l4U>V7-4aNTXx5%8LvZ{U#y_ z?y%vRlF4+@@x@er6nM=yV~1_!KMTt1svFcCLX3#uK~%~xG-c^|sq=BqjNOYEV&a;~ zgSL>*=~SqVA|*bAmB;x9dHyH5d9XUhk{$Lt2gOsnLyUxBueYto9rq<4Y{rESANg0$ zdb)O_Hcc!#o*y6K;J~&=`J8oy6I!jR=@vJ(&h16FLx!RJ@Cb(O&_#<@B- z$1f=8F(HX07_3)1lc~3dt*MfiJiHAS48Zle0w7D^blgLk{SA%ntkIhYXar;+db06h zivWT|=GX<;m^y4JqT?4LF`62yQ&X@kj@g?yzAHL@h-c~SS2}Yr zG4Zxoodk5Q;1z&aYBD-vUW<#1OT)#hN$AM*%;;d9z~4H!bm!`krE=&~|uMBCOj$Bl__P>)L1U z2n#*t%i9TU5&WM`qS$4;L)QrI=L_^M%}2`~E*R3B05tkTPZ>?RLIrpIfY8Yr#=(qI z>KJ*CPUzSBTir2j@Vx?xnAe(WUB|=uusBf9q!%2$jg7C${&0iZX4z~vF&s`q`#otk zD?%D=b(l<-R|CI#ddwNV6q72Pd{eu*6-j3aB~(fV+IB*72w25!A}gOa(t!J`U7Mc9ef@LQADb`)k1T z>kKwI1@8bQI8BPbzxd9>sU0ZGkIix22)Of5)6pn^R=HJD7)`E9Lf>a zjekgDXSz-)Ko?rThzL6K2Fs=ZfSm=lx`4g`60}M-Bitr803>rf*qMBE3#dCD0~gDd zJYdlVb)9tWMwTzbXJJ@>=L(4pbz67B{(7B>Q(#w+yA|sl-ipU=7Bsd9%#!0r&Gj}W zZf=_1SZjkdoU^m4*~LNu9j5?Mo5TYCh$33Rcbhw`ZknjWNG&!q68krZ6OfM0m+W1$ZgY`|ogFFc{5aSol%VWa1K1;7Y3-s~fLde{^V1V|& zCjK~`biL}M%9rxkYnCGba!XS>ykma-4SUU2;Qj_hCd1k|jruJgu3X(Nu?;>NqwmV0 z{^wo2PUs8)aF?@(&Bq@nzOr@P7@PB%PX|H1a^$C>Ba`2_t?a$;k-ESVbA8JDn*HeK zpmTjYzU(+TAii?VPT21GthJ6XWA2n2%=D6=MhWiG1*{9q`#qSRB;8YNam<`ZC!td4 zcF+tCb=;q(vqMl9-OISh+xJkpKBgxzgo}AU-GUq5z})TrH){%BU6mWDTUFg=PsW!e zgiYM{3re#@7saYR)&Rrn=i$07@!VzlNliL38f>FYNYl&G>vt9AQU6ZQE)v<6^f*g2 zW=FVk_;bEdLZy}O#^tPtzJDL4Vm_|*+cL6!hqtmBAly!$F5$#)|FS~U*TU%AkAIj% zTg{R#jD_%~^&3i!7|xD{7}F7)dn3S_fcXz}Wz?XtFh z0RmJ60i7^f=zswyY*Qd(R7ma`I$4K8?152%${$S1p`iCE04DsSA`Is8V3k`$WO;23 zP+ma-r0jM3`wK?8B-d-dKV~EH=)x?Q7^ZzVnPb>fLpCyew7Ch z`=H%}81}U+@h_^Ee%KYK+@lnDsvjt@lW3Y6oLz)Q3PabrfZ(t-^zPdu&Y?C~_sxcu zZ|39KtU!k!c@kDpL=$*p8rHv z$tfD*!SzdgNfTjry()AqpQ^Lg!)kOK@=uEA_$Q+1gg$9(^v46WS2pvFCOC8PZ`fOU zH3J;6x%_qchJT~QTPSl8sBr&#)7;3u!PdN|;RfJJ#fy`t<$Nk5~^UF)q8;Q6s;YusGt!ldO3$ z-Y}LeqlQVpqYLLE)N`HueS^sHGkik@eU6C+8<#U+%7T_Jm=E{OSJ#5txY+%+fsrgV zp9!@=!bncg$}@wH)o^WB@1PFefBy~@K(YuskFrh$Mt!Vo1Fys+$^f$%s#>KCTa|lJFM}2mx>^mE2Uh0h zcDpoG!a|G$goHfz^1ilM%Vhal7YuKocL$^K+s;p{&7F3*n*JUuaVty)Ybuyug%8^w zmFQfcv}K^x0Y%Z#*IjPe2}oR>AfWiO3W5cv9}s__y#2$=JCMW#!(;skqLe)VS=YHr zs!@a?n1W?ASFN6NgLR3)#Uj6%QR=n|i8wYi${JXG|4mxR42u=Ab^+lbC6U{<_bE`K|UnxW`O-`3gu#!P+`f+?T?~&{7j^gM0Acc&B6(=+mJHJwE=QN~&6)azC9hYYxnqpM=86N8YCoU2eF~sq< z2=l&&`cb@dM@aIC@VZj-{o~k~$^~iI@i>;ilnJ#?d5k%#bxB)A>L|mO<^Re!Kvo#h!7FQr@rYQ_P9d*psVMtAnp@{}v)U?AuT%W*cq(Mn zK0s0)-gORB?xwQs!=Rh$#|Al8Ovvvl?|s+3sN1kRc5B}|Pbsr6!Wbb(yfP*s%f|Sj z9MR};tsb<&#`SwP7+Dlki_VDg?5@8nX$&Qa(g3J%xhr?0A6}7Yz{rgYC zGBzt=-9lqg(VU9XV4)#AAH*&?14@Aa%ukZjD>Xl zMNlU92}c>e1u?EVwlsv~QezJ(O=(uN*tKbf&I*IY@Cx>$<;3geD)Lqf`Yh4+{AUNw zLbTqEV6GqtY-j%N<}r)8<|LeL%a>tPv@MBKzI_VEUfwHsNY0~jB_`!uLeuFtPphVQfDUUoCdua;4$PJT{<8qE|_s9x?pUpOXIay!ZDSIoI=d zRZ-`$8r3@A$3gPpXP3>by*>923UsXM657Ent3>RHHBRlBukNunZTP4cZvRf|8U{E-(L z`1tuMW`AA7#Yl<6<<5knHD_|7=on#f@P*PCc=XfFT3Wv!jPZ}Co9jCrDVY9NVw5(qt zta_J8G*_<1J_eE!8=*C62vs%3LaLA4D|orCA>We9y|k~EW7Egr`CViu)_(X1XBCf9 zAvP8`t3d4e=U>Eb^u1!IL9i9F9{_VF(TgOQIIEB&rPG20^3}EYP@P*Cnb+{vOQ9Nv zYSc(w2lF3k>0s8GX{wD`y7Eq)P3_jzD?drfsu_-dguFIrhV_}cC<#{sB7}Qp zi@H&QPf~J$+Yu6m!EIOVnt{|3*qT+}GPSbu5wZ{%Wa9%)_gH{_bYHJ;wRI8aHH3|f zTPVv>NLOCxb*lJM0O3eI`j{0?&ejM8d!}`#`jr_>fH5WC;FmtCCPi3OyJndNcdpVkG-pvH!V(cWFcWgg#mvJFs z54AD%agz;E9xP;377oIV8y8AVph3|Kb0btwDDzG99kOAVGV@6ATU$f)+RzVF%;wS ztu^oLFo?e#0f%#;4C)^(rrOX^ioGq$u zr|ulRzE(_r$oEogqShLj~GSeP3f`w$en&4Lqa2Z35%eK5op8=Ypn1R);JD!euY{V!cNL-Gc)-9 z2p65&kO7fJU1z!|g^nQ*Svp~SpDhAHLljVyK5CvCHbJdPXOYavZ)-2k*6v|!x{TuL zA%fgK-|G|9V|d<8HT#L9`h}A!D~3SRuRmM`(|TgInTNxw>hw4zy+j#zlJcB_o|qq# z6(Z>R!lIIHm@@1>q*HTq;|dFCSU|kK%g|%!{o`p=EE?%Whq)Z>$Yb?>(y2%Cq-e(_ z(4^tFp5=__q-A$UST1!V+wTyVi~VrMBA~p5ZKRpTZQ#n!>#lHC6Y~qrplM zP0cyTbzgyy$|*wL!I`Py_p|-g;H-{Y)7j+x_yxi637hBLLWtV&@4?rJl*Swl00R)i0!M%1QRHfh1{ z6JjG@nJA$3)@XcVZ7sEdtoO#_YrZ*F!;YqeOVsWby+&%NxR8dOGPEPk67tEQ$Iz*I zfdy-%Or{gfIuD%;AV2J0_^=%o^)h)RGJ zoJfUOhHu0Wa4ydqX8eL-X`@$aqZW(C=%bTl!ghm0GltYlG@HD-ciJNUbw+lYD7KAL zSPNHt!?24rI0)t$45V?{ZF?FY)Wv8#xiR1XQPXm#o|?zai(tqr$p{Xh#ajk`ZnSD4=Vz_HF@hJ!dbCktRb! zacO?g6iw-%v|`Wu6UTNpJI?6I@M>RCRARaeApubOk<9hFBG|HQG~o?|!#hQ490@nb zNK_vSHn4V2P(Zi{%H8$ZbiB?LEa8|fFYS5N5R~9w?5|YRjl<2N#Jf9R;=1HRM(F~i zbdYql3E?!V5nKHg(A|D8Xz)RZUgQ!dYU_puXj;6$s9uNfl_n{R3;6*ftOr9Hk$hrv z5IALNH8Ulcveg5B;J(fqDnVj@Vamdk5UfkFjq$EsQoCT5*6agaf#sc!1bo-wyRLm* z8ZN9_ z%$;d{ck-SNr%UbLr>Nm$nFSeN0;_ks%-P(4+|+!k!^^DnVPquHG@p&Jnj z`n0I?uy4l_1ZHf@J(VH3Z9fc)Bm`s`ZTEkCbwo`~>+qSrQe@sK05|aBP|$H#ejvRQHnQP z#YKP56t{e~Fhbvcl!s`#;yXg<-lG3CjHrT1%{_cF7)YsmE%IJA*XU(v3MMA6o>3u% zp~t-uaRj`}oJNG;dNIcQTS)9zY$7fC@ zksvg@y-7%eSLf_&@5|i}NOD0pF>+PAuNmOVap0*sqkLRQ8h2y8DK_5^n*BtX6}wl& z>nFi4^L}B3sSCp^NwSWIvD)rCejjfMhzV@CPB**s9G^cz{J%OAOZ8YbP@y+13l{lj zPz6d`l58oaE>;=lbBavFh>HezpmElJA80?K!>jkQ@m$DTqAGiaiC*FPXF^Rv5j#w! z@Rlh+wEPbSCL&~TAUV^Ioo>NM;`EuXdisgvewXs;Xx708Y~9HXT~pb9(`UuH!m86| zJvj(ujrw&plA6}|sV0q4VgDpa$j@thEg&ciKlV7VQFxzN)ZqIGyD)}y!zcZP)u)m9 z8ke#1Ow+Hum*p;VG6t(g&BDh_b-s}_?{LkSJv#g>d$L;6Fi85g}y>FX;Nq&@Jo zG3>mwLc<1MAQ?3oIn%r!l+Y5VZFPFU5B_t00HWjt{KgDQ61L?P_~>7|<%=!Pt<}(7 z9u@jCLe}3}jrWc+@h1F{ZL{1oB{HW7Lyda!?VB)W{rPQh4lm@}Vs!jz@fJ%BA@USSZkJWtcP$b-77HVav>v*;*A~Im$|;VE1777Ch^P z0^L*Gd(`c0SB}&cVhn`=cQ7@Ndbz*J?Q^(fCZ0}1Oq*vt^@5V%DL-rpwfSqhZHUDHjrq=TzDx77BQmq>oMEeS0AvansI!(wOSUbd5YV2IQ{~~%}Wsxuqk<;Ti@QTv1LyVAL>m4^1p2=Y~ zxg2s8h%Nogu`PsO=lQG`z7c<9CNL#2D=1H@nf)Cp%G&Y1E0f8PAc?5b_z(53n!hCqWHi}|#)qW@ z@$(mh*1Lj8TDyU6Ju+O%FbPv3<>r>Nh~xDTfIIx^sIL3=%Ce<;y09pUz=)5s=~Pm#w4!%mabxJSE|_cx+) zJTDEE4a(xoq`C%kDgLXDIueYbv1pi}FL-ap3DV}nlj@rA>Yd<@8_oNPs#boco?KWt zEO{vtP}6YET4TT|QknJ=*|Cbb!ool_|Nv zM!3Gza)0l4Ef5cP`5n z71N9NoZ1=w#?m~u`7jXC5Y8P@pSXA8WtA58ORGO)84izs8&z%=pC$3S%mWxks|w7= zxBgf;#8xvj+BcsM8H;&sEeuc0VHp%+wQQc8wB(vpL6|;Cs@mO5@3L8#q-1|ac|gxU zQhIN5cmHZCS`Nscs8K!zRKd~+_p7LO3y9?`hSWBo9j?{r$xxY&v209ZIK5#foHjgM9x z61A7))P#XaN4zB)BBkiK%Ta!D>hm}0Elw`~C&?lP%hvt-u_b3oho|A$`t=KkwQwW! z?#Nygd=~=UMml~T)5P=eLW&>WF>R(Wo!G(Cown~r2(Pp@cEWRB^X)(~Mky3L~(^ zAXomTB;$D)$YS@f816fpqCbwOqS)qn5NDo9QO&r-^r7HUcvB7@*ztV*paZ{u_u|7c zX)D))M1{ycGB&|qis6Z1h$Y|%y59yAʿ%3{2Ol;*uQ&DO!aH2M32WreaCT^;S4 zWLEhHi~0@M>IQ#WtN6$UO@1@CB@d~o?y(CF8%VZIk0noR#9OwP8E=s2v!W;@{AH;f zC?5}l5-tl6cXK)X^ByqU`d}sUcpg3~{~8SKbY(XUtQ+qS;*bwwnTMd0O2Cw$Akt!F z94G((E&$eH-ejPHlHV2adnvJ>6~bxrC5C);^eAyNd!)fJxDSnNWeQ>N59t#D>3#^ih8Z!sv%f= z(UilAYJ`u&4!gSZu43IvZSyQh&JyiRQcqiAe$&_*PNb7QzUU52@SShfjs_Hl$&Hlq zmCI7OZ$$#iiR^eFH0K{6iO95}ORr*y&tN+#IdBt5^BHjApUmfs2y3Qpy^(3tu z1*9+_Ycjf--FeE69{Uo(56{L|@Y99+et8>D>kFMBY8izW1tn)we_-Nsm2>u=tR2uNg0Wh|?tcXZ+ z4cY+%3D{4szZM;u4gA559#0j>AsjDXO@;OCCp<^*=MGclS*Q0}HZCN}F&RTWN4F9=DB&i` zsOog6>%>$6)K#QkPQJ-}gP2;K*kA5w-%tcfc*m>=tl;3V~44cJw=F@@;tLs9 zV`d3VMe%t_Mybw%lj>358b9cx;`ANexHqoO$V@Sh1wMJ{U^0Uc`Enm35b6A;6IC+sui zWe?isx3sH1MPm*wku~k}HYKq10PM-LB;y~m6)mg?jp-a0O7$=qEyNm(1awl2-5?4W zqyzN*1GTpsd`k{F?u?7XCeF2-G3b_eEo)MhmXlW zE$u^Vw}?A;`nP|vX=awK1cXCbFp$=7!0U3$K|LM6&>-&i_p)!p1_TzRLC1k&o7yTATLL`W z^Kd(z?@`tR4kLCIRw{5ccGw98@GzOfKfk@T$YV<wxbJ>*l5ofyXYl*H^ASdza)Cm*ZF4mm({Nxh z*=oB60aAe(FHFttu@1&HEO-gh+%Bqd`K;Ck<|^^hj33JR!uzT;Y#6p1wadLL1bBIq z#p zmR`8;GpG@L8fIK0J3=;%&WFA&(yDXf!inW4#)XSS1&z3CCqu-S5{ri9651GB1-6AD zl-|21lb72bFE4z8s_Si>`yi$Yj7HA91X_%;53Os<;$9M%%N7LZE?=w}C$!;MuxDWC z>Kj>37EglcHNxtvhJr7`7WTe0GHVWsp8o#Fidp|So^gX3+S*bjT#0;CU*eQH9ATyS zbwc|Gsf^poXEBljqxS)v(TIz2ZV7LwR{!NW4JmZ}#{A^;S%=RAZ{IKK-*Sm-M{ar9 zCw3+ePUdy;WY6~OQFEAHrz55p8l@k`F9NBm!Z3O!EDJ<Yxe$Ft7 z0jt^2c1@|9Zn3)1nt|`_HleQLPY~BWW66`Kmc6U2q=bixFn$4&3FE*iy8;JEug$%h zv|qcOkCW307#D!ct{r-)V8ANX6&&SF7Yco*y_77x@@y~inYO3)J#Bg4`J zQ--x{V0W>U(5M|-nQ-2rCQw@dj6s;H4u6&vi~Xfhpyt{IJsn*a`r7-~_0FtA(%LpU zrNIkii4F%JG%{%wV%e)@JolCHV(PhDaxM69S=Twis*k*q;kMTwFff-RbHp}%?LMZ@ zPZ!;1iStJyl%W*?%K<%_RDaZFHyZQ(e6M-(KHz2PE`= zXgUvYs{i-@A2TGI%tH3c-jP){9eeMUl@-cf2^ksLqwJY=jD*Z=nHfcd4-&G<{NLyM zzkYRHU0q$r+sS#2dp#dd7=mL_`2)RtfKY)LW$6TdN)0pMu?+%41X@2}&|{#;#>N)) zA_~LMkj;J_tw08kUIQ~9J#h1dX(RX6&ElqyJFYMYat8CyMMxTIYM7p@(pOWgZa>`u zB_e3H_zBTRkyzFUj3dlE7Acwn6GBJYaoe`UiBz z5@(GJOnp;RQ((|}o&~E16!70crwH^pP^tmJDisL}H0CWRaA3X=E&0~vqHz^`v4kMX9Q;#0Prds!Gf$7lTjfJHa0N8+F4ScwX+kgAi&;jeE#nZ0V zunmM3DM+w;oql`-@Dy-Oy*kYp3$Snc_VfV!6l!k!Er$Y-sTZxJ1Mwo7pwbRs64Sc^ z8qy={T5;Im;^Oe9K38hM*wUzooMM~^^Im|KSQZdfjCE*f0kLQ=6u)Z9)@0>ECZwCORnA%9{(GrJ8BYqsB+9FzI59&$na>m%93{7qDSS4_nck%yWS z!StmBIiR{G_EDw4E zpBL`rGm#0GixPPLi`$JH|2r0ZrBGx}L%&6rAtWs>j!7BvSv5yVd_=a^%rI|5B(r(r zw``;Ft9uqQCHx`kea9yUA5ShlHYYw_!)>$2tW_2!vzgoL=QBLc;I1}rS$PS9m^3e$ zf5k5!2qhM#42@EcYp`nM%p@w3D3D>0%2&MFI8)Tk;bNC`nxgwB&qUY_19Epw8+0s! zJK|S)v-w{c8O~GXmQJ>xWhnNr;D7FuWcpi`*1mnSs zI$!fL*stTf54t!7*cEzW7(T%7H^dgvv!7^T`^5$Ta43U~F$xQ-(eqItJUuKH`?wi` zMdSwqx^zm93bdPHMi;^#1LoGP@(NIq0Pt|hMHJ3VxJp|=$nwJK>*L01P+WpdtOF<` zng0MP;%|EgD0}$5D3su_!4EO6$-(N;3$qyyP&a+Xy&>FLz;^z;l2A3;zD^0^1V ze$ui4Q|Hg0KZDk+Zq5bv8}Me)j7A_Y;)ndvt)C+d19eUCzdvhewhoL1ns88{aYJZs zT>CC8(VC#G%@VYaHvlUDv-0!<=-`&TyRyE+4buEg0iFoLW}s>U)*`P1eIKA)=HLh7 z4*{N*hK7IctfR2vPhezly%_@YQ-B0L12&lyz)(a*k717IU!B@rom|Y;II0EC2U+_A z19WbG*KfA(MIPDEQ8Ez-weP+NY6uEIgGfOgW}&mio&WuQRb9}xQ`1^Ji=>(WEmqg> z#)#3|pat)^jP6dzEDpI66%2H(n(5<8o%7nL`FjupfU_`*?>=ufDjv98{!YXKBk@1) z5~O6DM5JZwBXxH26R~dLlQVtkqwR$~+Yxjrly|jtb#in8Y@7t3;0B%T_^R8E0y@)OUMlWyNIde7^Oj1DsMvyK1<3v3TCKA#-+jS9W{&^9w3G zMYY#2y8@4Uf4rdIOI^Bm+P(YTcX6r1>4^tL_dx)qap?%<4I_>h7nsdB)k3|?k{EQe z)jM>w*2icruy5NzPA-yst*oWq0i;%Lz&-o0gUTZ!JoNROlB04p@bLZ5=`=EA>IDXZ zg!$>Q@7VV~yT6%F?cX$tT714*elGj3)5sxs)8eXV`Z497K(~d907VJ}o52$4JsP{a z)c&F0MwjH#>z(mR_PNC^yN7x^7YiFx-u<~BlD#LGgjg`JLs?juu$8eRP82iPDQ`(* z<1;5?VlvC8W3ORRPz2<2s8Ps$!k`GfM$OjE!*pkp<=)b7DT8U6c3I7D3r$iOWO+*_ z+sVIDr&|I){4Uec+sIwI74&?U5l{W>JFxC_WP`U3Hz)J_*MV&ac+0z>f_at9rZFFU zbzx#`4BAcL4ui2Rtn?epKr3=~aj9e|(dFVbs{0A7weLXZv8hcbu?D#S;`Piaj8Wi( zz*_A-XoH{!W@I58As^HzLV|_|c*d8){QU2Mmfi1lrJNpcevRGWC*-3?ASFcEv}VAPl;F zx%UEucxGNY^%Ql6T$1sVQ(wIou3C10BeMl>sdxL0g;pGzH;10Yh3m)^D4~FH0bJ-Q z%wvunw4r00BG1j&YJB)%_noU!1oMMnkP)}C0d@C4Fry80N#Fg?ay)ic*)+59@_{4M($cc^RP;exiagialLcD`C6uKS#dy}H z`(rohKSEcx)L5j)bX4&R8g!4!PGOcgz|pa^tX;(9m!=v{zsfADT6+ScHpa_wDysU= zg=_aeFRMA^pbg^5HN{mj-5LthFB8-L{I%>0sb<@lX=8ua;>;UKEmUoma$b{tl1h2^ z>ZosbG2m|ru?Yl=pdS2lG{hJ9?9;j7Tes)d(+jNgELUoYVM-{8XZrNs%4T;F=bCzE<~T`<^_5de z4p~}^`LG9%_X+XL^t3=cb^Q)WIq_r1?!#jNtk*%O$?RiSGfY2?1C_qfzWWdLsA!PR z30XW|i;2x)NkS0fd0%t#D;o!I*ZY!^epKZUUq@SQL4hfsn5YOL;bBueO@(5DE+;=4ul@$}uVTIpAL=NUT#YpuGO6rw-ty7b;pu z;kB5_WOJ=>O(v=)%^Q{NuQEzufI7J+FnQo74CES{nB)cg84$4PL-@godGoIMImF*P z9aooqw|0L2&Xw>g9?&)hh8#dKn|ot>nM3@fbJf{sY|ui;z`&0bucJ+8dEX2Ty}65) zPaqDDkADNl3WSw^(RA6^Uiy9k&0Sl;e{gkSa8M9L%75~pciZPDB$oJ@2pvuk+2mJO zYpNRYP`NJ10QW1Br1buqnR$piumeE?XAvBH1?t;9Fysfqjja?NO{4Yz zf0?`{-m@q=H$M;yGxR4RJx5=CGUK6&B!PR#i3B@)AK3E@Uc&-_ZJ31PVS8(9etsV2 z)xV+H>F7vN&%i8Ioo%;uo3GEv3#>e%7<*~1<)<)3svyX$6&X{wy2huzV!zfY*2ocb zeOzWp(8ZWq89tF9bEvGPHD(hWU@B*bx!Ak zIM?X&Qyl4}*2^n8{=wyWbky`cyMF0Bc%FHNHjc9>I{i3MzWM35Er2d;!Da89cX90O z;+z37Bll^mmh8c=lHALo6{Xk$24;SKL8Uor7Wqh$0o|6k+7G{yj*KL4RDFHfpEo_3 z^NMvv6+1Mg+F6apGghdtl0U?Z*7)St7d7H%&()rsBn0g|FPj|8n*6F5j$vCiuK$KZ z&i(~Kn%xHh1`01HPn(x4+7D1orUWiI5lhs{3o7@8TqQ=`Z=dZnY)*DO`-H<7o$n%H z{*?cn9!X5+N^u>9UiiSHc!ZIzlgoV(anmzl!ce})RME8WG&m6u>W|p;5gob%A@h1{ z@5BwU?~V&kukQPDrIuGF3`VE2!kzJYkIBuQnReYXX~Ot1Nyb}qcb^G2FnvX>BDx<;1qrC;IILF zMA%7sf z0quMhxNb`TrdtX)7;zEZ7LUhQ!IiUIoU-YUPemS|oIoHpKK|ZmvUt`a_#0}+vjwu= z^;(;LG}{7rkQ0R}8R~3F%CPO&$XghWhO zxYDq8<39+hcf@UywdUJW-NTBL#2$|WgF z12jp|)A-nV2jEM=Tc36Y{_!~lt5D)A*TVc z;QG3K_rJ%FAEO_M#*YBJ^bS=lu+eP=jW{TfWzC#_ZekZCb{$ABcs5cXLcwy{hA<7s z&4YO)WnfX{_P(__w5%1@ZmPX8f4V8Lkv%yvf#iBaW=@bseEX$$Omn1eOp#f?YN}5t z`S#XtJ~`WyQNZr8P3Bc!^M#hDRrY5q@}Ewzddum;>@P8Q zO&b%nV0T}^RBddUu7hlOnK#h}W=k>A;T|EJ8hc7M*S`mW7@d802(ROE#636Q&*s`v z`@Zu{3qx`)eMAy6(eBXotE;C@xlHq3A-RyoOMvzqe9ybu>-gPq=Tr9xoxxpc%Q5fy z>YDoN7%V!j1M4nN$9X9~VX~FAxBI`Z!7j_jMc{>n2pt2<{9+@+L6ubSLE0+i!JKy> z^c2ztIH}vmRP`0sb;Bzc$V6gFovr_|mDUule${yOD#RR^gxwHUv)(k5usQWcMAd$x zxwEhmCI7*+xuV>F7Vu8&Amr?rY=qQ{rL#Q1wt^V>Cjs{u<1{VPGsAcup()>9uJ=wG1mW$@6C>zU4g2Ye17 zJB=*S8~1_G7=cd?*#)L0Tv`Dx1MNShr=(mo4SZf+L2?n`t9CVaMkksLAcswFNefy}Q`a|GDX>~Inj<}F;wR#F z&Vm-b<`)(=?wY>;xmQ$#sc+ zVDTvb4`T>@h=4s7Hl@KtB;g;U!tE8&5_t^vTSqNI9c{m${&6+md(+)V5(jgOE{3^d z|Km*}$&DU&^RwN0;_o{qOP4hjd~IS23ns(s8^4;`ZC0>nSA|X-|1PR9Y+sX-+A`6T zh&VR%G-abJ+?sII4PsK8ualO_yZfZC^}`29=DwQpNj*RQb?0hl^7{H%)3Zb7AZFyr)uE@WvAhiX!A~ zs&{`aA4Mt4zCFlC7Rs1M(#QtYk%O8kj$9;bgzVA7hYv-we9UG&vIAs-cN%AD-$MQ- zLtqJ?H5z9K98wVw(gCmsof(4&k1tgfR>yOAWf&tGBw9cBt8^b-ctPJMP+l|^%rI?m zd;|g156MU?>V*E0&52EK9UUD9hmsk|98E00^$dsT&viUZiab=@RIyOafvquMOBO=% z){qB+OjxD>+PhL`A)9j%CDm%Kehy#Ix4Ms(wZPu_2x&dv$3rA%(GPjA*c0NzCKSqZ z0C+=@BxptwXlpLu;|T=rV~()X1jcn@eisJ>Y(!pOUMuP})gpj#BtY3{)@&9ETA=KQ z((A~dwIOs;u6FmMaH{G(C_ABFz^<7s9(4W}suCg5X!itk76HW7lkd;n4Gauw4FgtQ zvNz12AEzOM)cc2Ej7;Q;nD80l<+v%A-AUuI`dg@f>NuQBEsr!crU+EOQWlbDzxG%A zxr;KlU{?FuyGQtq*wKvtaUZg0_Kl{un6#J<+X|uVW_ymm&QS5veVUmosPgsW7)m+T z{cQhY;>c;%XVtCk?quNPh*+M-It^^O0s?A@+k!K)9I6B=1w3E%nJ1UB#@+-05WmuI zKeA~l@a*~0g&W_S*blX(;;humh!9jhLvhuo(956cD8t26K+=9W_E1n##+dsTil5#b8J@5W)|(j{H~w2Q%d0fC9ef1a{O-lzZnxKf9N15oJs;c zbR1?=Xc+qQo*WjvF(0IHh?CB}sG^xxq5mS)Cy*7* zu5K%qs>)eJZSybSJIb}sP%a!LAK6zFai0ZVt zQip4hgTH_OR%8|cT|H3ThFNAkM_2d#3bgrAKFqoNa}TpbT>*YOGBOfZd(+(SN7BPv zoD>AOff5^iCkvC4?;73Q-QleL26ukZQ^tT(*T1@7P*4C0LvH|7!!D)W zIEQq2VB?@~3ew;=Qj(H{lTw5Vu|KXs?d;^9Eao1d-IHffy%zzajpeCv50rO~%Ttc& zPuLSKy}Rc@z4uE{Hiq7D}5rYJ;GVWiT$YHD`I+&rJ@JyL9_{CkYy2axyI^PLqJ>^jFHhHaDI2feOMbune$PEGx8-=J+dSZSq~&a+WmAfMcpfYGQs*-4`d#s@_tz7|JvZ5p(;K`u8~iXo zr=GpQO~${D?20A!_7!V-ma5~;*wP4q(AMb|vMih=04Gppb5*Qcb-PY191|95&1KgiN7Nd@%s=+A+4F^Ef2kZ>$x?Lnok zvA*65jzTf}3fBc7P9T+;BJGUWw8Y*F!AVY%#95i7Su(kK2u2uV(yPOagaJ!hEM&+* ze!@r+E9xkKvs1`7S1)}3{`)Kla63pKf_{UtQ9r#`94AVT@68QrtUc*nVDchR>mS#AtgZcm z?EyG*v_>43CG^35zY+s>_J4kWSVI>?p@hm=s$68~%@XAcp*C7D%c)b$Q6^8CsklWW zWY6DsdUm$D-z?z&hR$s=e{y}XK!kZaPN>%IOaQI!C4r^=WTO?r!h2ZaJC*Myx(PgR+yRFZy zA3m(r-COK=GPeF>E9(loE9Q1Y?lx&;HTlw6^7AX7UJHYdAGQuka-@L*e=|4O=`@8? zn~cCR^;MTQ_gh5or%-;}wJX;(ZZPsV_;hXL8p0L7Kw{(7s~dzI|Cnq{IY>MbDFt!a zItL5JPU~IX;Qt0H-PX!M(Ao0c4&P3zjh)7;2ApfGyZ>sk+?p?+%eHS^>T<35&yHqU zxO3mtTYALhb0=x=@T72iTQzb8j^gKw`~4EA$)(AxYPDh{unEsM&`v?%WcBPblSW!& zRlLfTP=WU|N5{RpT30f8L*JepXnau5R*vh7b8Uy}SZk{kB#R%wfvRXtzm1|XQaq*W z_+L_^{-gcGoIkZmIfyo_E=X|1#Kf*M#mPAh>m$(gzXEMPD71}22?JvAaIZ_W2$m|= z$JZCkDB#>QaK3;H5^i^@Sf!ylzyi(A%&hyH2L}h2>8h4Y!hJ4#`XvD!3RE3|e_JCk zYtb${psbATwchtzM7b`YMnd4mh6rBxmOGQ8WKR2LaTYKFfU+y?is)lLk*+B!>OHxD z9JI=$$qlG2XxbdwzXhUV5fPCbC?7$HJ8evF(-*h7y}kYG*Ey7C!~u2(VFsX7mSA5X zp|O>!E>eAbd<@+yH0fMEdURBC0R?F$eScYn@{YsMT3yP-cr!F3pjCi2xq!?8Sd~Rh zO>ew@`{&Ng*HuElVBAj&cX1Q%aJReNdmH?s-hP>M&iLi?!I#P>_UmD zZ~29V54S3~Pv*|ZRXNAJhSpv?_-i{At@fx|`m=vYv0U@4<$tQa65|}7-k2b>muN;o z60xR0q#kI!q?Q+aJmMa-dTF;|{?YME*Y>FFAd2neX4LXEw?%@Y|5Ohe7=P#IYgm3ER~7xii-sdoTv8Z+@ANQc zlnLN>Hor+lqA3#)uzs7Fu^*m<71g`CzrMZ>-vaUK)hjrdA##A?4h#c;Ul07nc-y+x zF~$ahZO5uzs868T63~b5f)f42<{_*cnf;eq(h!^ibr1@DP7qx{b>-EM>x_7qOg-B* z9qqOQuVLkJ+6~77<{ZlN(Cwe`F)7Ur3`01ZvcZkd5{wN#M@`i@C3_@6#u_u>&Z=8q}CyfThQ6DA3E}^`&!u^MR-+ z@M7D--Pu{kc;G(fdusB~;PZXE)e)JZm$vxQ=#4Y>BnCkxdTxNa<+EG+c<_#Q#sNKu*; z8&mtGVqChYevJpkpHi+P{q3m3hi)1@95GM0Z^Wn`)R=!fg{vw9n%&b&=f$ojIa!iY z^HTE3(UtpS1IlId<3XQSYQZnk?jSAjI0K7giP&`^gWpPC$3~59@)}}3{;@Gu)OU4b zfg3-s(>%IcWC;BCk-T7~#S#8LDP$q7>zVET>3mxaZW|7j;(UoZBeTS+M&w9vLOZ!9n^bQddT^Zr_DZCSt7y^PM4XE@bk;qD8b> znXv7pH${YJ1n45IG45+=X_+Z}EEi3rroj>&z8m%9g`=v)EMHR6p$lqN=R4=s)qp1A z5-$lU*e6fw8Ji`kua?GBR#EXBpyBP%NrFTTiq1aUGr;880(6SD>bK*M3zysNSFLc6 z-2!(Nh+zPG0mmphdT6wL3m3gE*AX-ur6FrXr=nqUXrt? z4cQ^M4XUxVd8lq2IIr})1cy;DY)Ml7imFOM79ixnje|J!Q3ei7Sd0?O~sMliG;Zi%?Kw)juXU70S0ZAS0FI>}{mf@-yD{Y#HVy z`p>iFXWu<$J9W~c2qYO&?y=68O0sYxcYZ;-JuT%EQC?pzO($36qcSqVlrmKwqF5gtJ8GP65~)(hyfF{h1kVB|vE{WhzbqsZK9ZQs)+s=I{l57O$9JHgyP7}QHMX}`=ZTxAnHIh*LqlURMOzI4i1qmqmMjF*b%sc+1APVMl`2vmtL{xEUA-(Ww6PC^=` z552YVBi*)n?+NNOV+0zs{Ck_NKPTAX5dZh8WeIr0`0Rn^1QcrR ztW(_yGJ>vfvyNo47VTllGH5a5jg+X5s4?j6+akPKTa+JZJj7(yeAKx0!}`l0He1TP z{pu<4uWTv8RnM&5_>}o}lE&`9_qPMC^|9Zd5mo+1v~l|I@X6Vm!@ zzJj}_XG>*@B+U5L{F@QUlh_jSS|0{xHVh9!@;L{|j_X1YR6%fG;3mkN{s%>JH(&=1 zrf^oTx_y2B>7Wg)z6{fR0QiNbM?)E|eHV=(FaByPlc!aLRD?=ez*3&1&*NG{9twgo z-N#^g2!QCSsw#*9vYGg`GKB|z%E9!{sJA){6IfAaK2BO_W2b7u6~t2I9IAsBF=UA7 z;tRZ0xXE5INa^EQ#Tke9BVG>_+8Lo-p-q(sR52Kl+lDf}_y+h7OhLAZ-8oe;VGI`t zd-4E0KAu5prq*TlsI!yu?6+&v#i>|=Xj9|m^4XHRv&))?`RKY$u0l)$b5W_PwG|?T zvq%kLZt`zFQ)Iikd8ORC;FrX$JJr!ngjq)>)7bIGTuxzY;_?KsCQvBK-RsiEeDWVGM4^j zVL=Ll@mia{SJYPCCndqU&1)_DX1)tYf63!zeuG=@VfXVlx1N2ixgB%}b02vUUbAqF z$q`^UOhp-Prd+#@&>^(++L%(9y;h(r-x@ChPlInXnRqLLA}+KL6Wd8?Fs#l};*0(5 zFjAJu;zK1LSi*loHnTsV^*iVj&dsoFATqn= z2QQD_4mfEv`2P3%PE-5UfD~nV7sUJxoyZ8$Th9jeLxs^d;b|a--YP1FX;! z0RSSLe(3J|FQfWMinw~14glUs=AceoZB=X!`g_AN&VcSvRnvF$k$eeWvykP{^)61Vk?LM1*M!c2%<}ZA&GE8`=+*{Vtyd`)1&7p@3vAo_uW)5?X)r zJq)EC@VG;Ff*IhBLccAz$=Q+xzB{d&iJ3pMxq-!hxbCsq2ey2UfYF8H5S&Fl%}$^k zQiJjn?3MvM9Dw{w%Y-psz1JmD`p@rpeIzUI`Fe}4ml>PX)Z*73-di;_;dq_AZXU4< z%S0ZrLaa=WMNS>A&WsRWLT;aCS5$RXSbJTLE-lfBxn*y+W`q{la^W*3Z)%AZHf`1J zC2c#JAl060u(W;H(% zMcPl#-f5lEClW#r;iA9p^t7)){>otV>Q9Tq z-Jx|s`>&7fX*d7#XD*@0D=+_now2?LWo0-Dvt)zOWsL1sND&R7gVSQVo=yIR(!f-}stcPbVpSa7 zM}&{?W(|H?**D_nU<{!!QlPoE~Mh8jAHe-;AQkUkg2>27MsH5F{5>ENk)r#Oj&( z?o1Z^eSyqL{MGA<%=^&rbcaA32zqEQmpHXHu+uyJ?;Rl5VO*dO$IQaw>UhB&Vtz`HpA{8VwCsiBjykJ~+qw5}~F*?_(oMS<<1 z)_J+U@)Vhzq4%$^=NC6G4@0j>N=Yp}bM}Feh>qa(&)PV1@@%ADO7nNLvcH1G zr@pExQ}>e0@qggW@rl}tzW?On$8VKJiAmLPEO|NSVhIGY+u$KBl|x6 z?IO%cB9qs4@}=+}ob+=XQs$H=Jo;Y@)YkjOT-+UE^ zu-B~f>8CsvF#q&2-tt}lRlk%ScpQr`$7jX9##x|Sn&U^PU6;6;P41yd$Rl}O=*Qpr zHWM#r5n)mR9Y3;p9+slinL@lcxg=yJ>74~L&+*q<3_nT3nQ5@?I9bI7vRY89*X#_i zGH;l4QpTd5AvG*BH&rh)MN6Wm!eWH_{OqEMrF^&=dox5I{Q2+MG{UgTytHD2meljs z%MT(QdY7iLpRT(1eFt9*LDJ|bFnq_l=TqYZ0*t>>yv+=_%}fGS1`?kB5Ec;uW*)?l zFx3Nf;Yn1e5Z^R^l@(mHP%yfL$dQWAG@5vst)US$0%Ut=fuD^n;X1*DPnAR8{m?R~XLq!W5_+o?OTmJb}C~*Js8k z_U2t&OZnN{4Y-NDEY9X<<5)G~8uZg#v7Wa4_ku9@=7wm5zSW$8+<=#hV-+Q*#&~Lk zF~VNz-6KogFSGA{CVbwTw!AZPy_}}!+@kS7Sa?!hg#-Ca_cw{~nKo8Fa#=ems!~_V zT2*zFrXk8cPN0~#FCBL>b*(TZ&U)d_JdSR12>0E~hjibc{OGP)uc7F4Y|wgrh_`+1 zer-hJ&6m*vJBHb=QbeO%b}C=-ZbVXaa_@2^hM_W;M|p?{F?g$dG+Q^njkeb%*beW9 z1PCbi<#ZaP?&gRJ=rWI_YiMgqlTs0c)RA-kEl>iZtj}Q+>h*YmO^;(0DeK88OC(dG zn3094xW0`DkK#}1Y~eM=?*J#)(h|RdPnyy=q3e1_x#ts2bBkoL9Oblj_@fAuLOg6G z`wK;ORwUy)e$|*~IK*Rus^;a^KTAtWkN(RE^NyIqz2kh_^l;!2!Mj_bpQNNOA3n{R z=M;gyhOBD2RKu;}de6SbnfBYQ)!dOY1PkT8A;d8y9A$CgV6hjtE|kYfHMoW(iSv*AKS?%vj=U(eQe41 z*Us9JgC5!5l7Ub}nMb+$Y27%wf&1)+n+g-Om|fy3G4Pk6AY=|AneA4nP24!@{&A0U z4Vo001GWt_P+?5M6D-TPRS!bal8l{lOy8w~fbfVVR=Y4@9%;zvL@0mFJR z{j0@PsXt0+12C7>mvQw=$%xsz^gpHQ zF=gm=r63-(EJnSo)IIoQRaO?E)R^$yP)IloCHeN04C_t^sX!HtzyyLcWum)slequu z7Ue~_a@$eAp`r(?p4W#5Il`@&6Qx?8*72D+aLVN02`j2idH;NsO-|VQdczol%U+U@ zZYiLVjURDl(9G+qMZhKFCb^H}SA3GszxTAK#H?cFW=5Qv3U>UY-T{ zRL!C|LAx39*l_~`cd>9>0dh+FaoQLI>aSFsS%yj)i&WVeM>@LEr76MI_ICYT08}6te@2fU#MP%WfVajo=j74oM6Pw#y1BOfplhM;d3|BX zL6!LpZ^J96TAmjsizf=KsT(annei0uEk7SfSK*L~h>5)@sI)z$OSF_@L>Mkw+m;c$ zRw)~@lg&^U6%$h<{l-;RNTC`L6lfMcLxSXR(vA`6Q&o~ptt6LEfByTdK!0j+Q96BK zMy&9f&>Yl$IlZX|JzMA#dKngAyk*>O)~9hmf|4v`wT0>-2kS;!FHIrQkR1q#iIlGu zS{pkK6wYLpRhf3uXF9!g?Q?W)Qc6#}%`6m0eHgu0A_%htDoRq$pGgGQ$tna5D?V8& z5z-PqK2#B@3Eg0@6eQj?C`1&cW9qE8-9d?Xk-6B|u-f0c+o>K!h$Y~7olM&_IsdU{ za;N}uoTLZQN13j#O<*iIqHMxQ9AY28B5k2K{MwXTFkR<^*F)S^%yedjX4-wm2a-&N zwmak`TBThiPX7sJTHnZ;eLBG0i;z{6c3Qj6OjTb#NE8wsTrCp9Z&8=biI9AA2e+V5 zcU{hrd|7Xyu!dAv59zGFm#Z|b$q}X-azC#8l8A}1-fjprS(~m~Z0xx93_mQL1sAuf zXguyt%;Hhm4Ja)l2F#hr%aznP} z8CVcoQ#Uj0?(4PoZv8^PtR)zSHs zq@~7fQFEpkJ^rjiGJ@t9dp};KY8&O~wdNCwH02bosum=_WI^=ZCu0;W&SHLI&neIG ziaaq~uY~Q(LP8BYZrFY3o3Dj2Z~J^8FDfD%j1Xs6J zsXq^EI`P{JFnq(teV~|`7|m||GGfk#LaA1Gl;^i0q5&I`Wn}rpwf{a}y)q`FW(4C@ zoDr=gp$}K8KGVcN1A!c4Z1W>I3MRcgTubSFLPmQ|N9M#RoIbg}__|~PPJ*5=G6UA? z?R-p*>SzjxYy>)_Ue5LZ-z>ma7 z#m?m1`{Al?KowQ_mxy_?$4`b5kr=K`Rq4$$MYb4;V!WS8vGGj&nLHvKMJY$p9L0{o z8%aPKQRb~9_#Jx!t4F zQ#usix@9q2N}^6&Vo>)e-pD#>Qvl%AmFpo}F8HLywVBWLTFmdQ{2jbGz>F+?68^Yv zWBqd4Has1RKW0lM)x0$7Ryhj>JtOnGU1!$w_sVa7#v)ayZrC)kb?F`P>t`?xpQp-k zhc|1F$H*nI>GB&YAmS*(_2V9nYU}T7WNN9f7ElN$C*m0Wru(HPR%{g>5%c?5V>A{g zGF9XqzAAY}nEs=hMfKvbbwc9;LDq?Jb%BtEIo;H9elHS{;^Ytxsuu*kE-fjPy?$>; z=jLs?2?Z*;G{^g9*e41Jg3Pz&*%mAauD|#1eMpcftjLICO}SiC8aJW-I$hCll!(Y} z^LJzzB7iZ1fRe=t592i@SC}HjFTLsD@Ng_`-J#g(xlbBQZf;ZWw9mqXuQjo{;Qx&6 z4UIeYX(t>LkqIdm^G)d{f2{k3EbII3A_>85$#LVyWR$KFnGr_Cl3a;`E>U4A7JMyM z1@u(%pE@I~sa9VrbMqEp)LFG#vn6CG(fs&wlvY<;NUqkphScm^B}<#ZO>XFI#;oxB zP!z`Km{=8kKSm+aDjyH;g8%)GCr1A50od9~OSg*}CA_?`o{|hdX!VydMtKF;zr|0j z9;PMC+{`o_(-ipQfw`=#GJ_Cy!U@4Ae}KvVi?YxaH`vQ*2a~2q41a^ zgj!L-Y{E+-%({%xdcaRa*%bd~#pn8bpI*u;E@I`-pUt(rqU`+m#nd|5%#wkHc8<*2 zhr&iZKB0)PR>_+7(St4NDgXSCX`bHlWMYJ);YExtFA{(DftuNHQQs*xN62x3D|0Ui z2L~>ZHMY}wC-cXLp`4Da-DJL;_o{==r=E&06An-Qe7q2fWtW2WMAWo-U>vX1^P{VV zags!wWyV?$zOkD2eadzuhnlE-tfkXvWSDmrExEw8)vRqgX*?Y5l-yD0nJ0yD4`RnF z#V~jg0@YOoAFD`>pD;M?>?u#aN_Ul%kB*6_=46QD?j<8i&r)tJV7zZk;VH;{&DeQD zv=a}bfmTzWEhuW95GUsQ{Mebo;WwV#4^d>^H0nw8 z-YH}!)t7K>!)+CRbd;s3hr^6C^1Xi;owNDQPO5)aRiH|^5Yjf|P$!MoJdSKMFUlkz z-o8J3CO*)Q5mNCXm#tGj6M^I!Y?>^uYg9^O)xIushjq))MoE z8}W35B(+Dt$dSHAEv;AZ;orss8j0Z5NSwEdbAxMmLS7S@L;ocSu;pWW8s*ac{3jO? z%3wytQyhiQ)n~;Jd5;D$O5s3&2d-Lj^4MuJBpBF?Vd++nB6(U3a3zE*{xgfgqhcw< zbRr0U-}K=nx#ygrmk_`{vW$M?ib5po>u9fa%E1iEJi960rZ)k4k-S^z(pU zD_PF?(Uc-Kk5o@x_-CRnACj7-XIgPG6%^%`j_2fs+M~=Iv6r_h5*4)TT!^YLl%$sn z5i9&tj5x?ROFAD#?)wtj95*jM{cit#gm)my%E^N?^d4MpLvUIeekZ+%(W9yl)5U}L zbuESw6Pv2knovNArE`#x=o3@QD`syz5=%Kll4?F;N}+pCB@)!%rQ+S6Hoq?7Y&acY z5tnlVhnFO+&03)TsR*MHwYU{YV2s|^)oGVsTRAa*g0Tl#De8Ih>|Qetq8LjN6qtQX zR$TVjA&5KgXkQDQ$zv?6D1^Ik*hs|@_;XVwq&4U-hkcf#$R}CGh=_X6B%ypCTl&og z;b%LH5wjAumml>vYXaUhMpajmFTca*dG}^89%Jumyq2+?3X)7UIX;>=&M1s$#MORo z{Mo~j)4QBke-P3xUQZC(MZEWNamiFDJQK;PUzt+H)9s{}Dte$eTl+>@G&Hmgx(ZCx zC>=4V`E6QpDeQ1Ue1w}NWUQz4?TadKgzj zxMieP_xMR(SnmZEuI|NZD^U!67381xa2@(|MknAwsE9|?6?;b~Y^O^Al^Ir@S!Zud z7TxJ%FStfTWc373H5|iO-h=!-4o114S5>VDVj~Qn7Ej*Wr~CE$%KwZGVq_o8R$d#? zMk=#>qQHITXtQza#w(6A?c=0fhW+suV}=I-kFYxN>9SD2rl;uX2CXd-KB6fP1c>{u zaXMlVK8tuq-Dg%#lJba(YjVNfOZiLwcv4({Y>Oh*IE`@emuZ|ax*&5(J4_^##jU%# zBzP|KVe7c!ZV_JUq1dH_{Ly5>pN{3oiUb!aQX<=Cm$g4CjIb1h@S0in4pJI*8Np0Ps7Q0r>h+ zM2Pwv>ZTTAvhMk){qHX@<(i+DR|oOOYMAi!D^!Q*f`gJG!aj74QcMF+KV~oY`8d=3?Jh6BZ)Gqre*970p5LotnY4fb8`bTfFOc`q-`YZt^`U^r+a{ z$95DQ{4|sw6|Nwu%FfD+@5*sU@y>=0R^T1U1#;UP4E2)hrQ21{RvGfy@qB*ZF5kynkg|%=LWepx4Jl^rxq3L0MUWu9H#; z7dxhSsL)8j(h2eK=JY zo#7RQjL>~o8(Rah8MlDlFH3`BGskKZcq-w}j_=>z(1>|>gZ8l-1C4CepTfty2g5_b zvCrMcw1Q_|zIPO`{|{5zlUIMc27|GeOxk>S1BZOo)xP7S;K3id-OJ?{y{&z9A#Bcv z7XtovA~aj-;nAGP0xq!%gtJ)N*=P5|`ReM#=7yD*hDNp&oQ_)Iq{(`$(TD-v&QxnI zySbP0B$t6^Z?*qkC|P*f^L)t?Q@NJ#{n<6TfRT8QEHFM8vU(f2&)kfh#t}Xo+!oz1 zlfCFYTMK*Qiqt<`G{WvQW=WYzWAJ<}>LF{vY{w+vI>KF^y>Q;uo!}Bs%%QsLMKS!h zZC$EMeUJn(@^=0eOEb|n53S*c0{{Hx&9f7GS{;SL6*p-Mp6gY9z4x@u@~FJkop|+B z2DKvxDBM|Ue`GkgNL{WsirF&|>x$}`HmnrdAfhqz`*5mQm09IQW@a0nT`b)Wl)h}5 zZH?*m_A+sE_N#+TFj+5|e%Bn= zyT7X(4e#XsUM=<9uZcFtHuT!92|5Tr^Ij&Wz_FU}QEhSQ`Vkjd$U>MH(8)ZhnngcO zjI(UjNq5Pp7gtJv-Gz9p7)Sdw47!A`?72xE-9XfT3Hiu?FvT!tvl7n|ww8%=O^%4f z3z@!#*!ypPB!F;hDn5Y}QR$3GhtO^^W^%9PrznPa=Q6e1zTR$vd)}YiwaT$IUS8U) ztqO!GmSrVwNOCWuyHRWCS9at`mPPQx#}fAQQzAj_H?5Y%bF;L=|26M)F)4_b&?08k zlkDt-E_PgP>3F77#<;PU)K3~s&uVrn)TME=qev??MdLUj8F8kxKbv3LO*R_Ps zjojeP62%Q-5~_Cf;oE(-87*ykaKX&cH#HLjjYXG|b6IAqjH7ofjILB_6fQG&y5zf_ z_PzSo=j$HdlprU)A3S&VTf8i$az{r`%r`e97eVNO_nJbH)(=hG8l zVxvfy!n^2>GC9F;}N0NTJk&y&*J#5=gw>Pd|Scxr~ zDR*E7b^Lmthf~q{d^H`*c`b5%gq|N45%GwsU76`jA!`(0XfO%**7LQgJb#VcKCcSw*L2&+y;$(A ztggUCbn!p5Omd+k8(UQ9duUH8+1h=M-|D`~*5jy>wX*WG*GZ+}4QeaIDD$7Rd6ZdE zd$Hw)KBu+;9T%;5sY7`S)~HOrJ(0N1$$dN$9lh6!ZDlOlB!rZ5P9Ne6_}7t=NnJ5m z2tpPFVHbv19D*N0Lsl$CDqXvdHAz|5Z+|D_zuYu$_Y0XD<^+|OFqTG#I? zSKxWRDm6O3!x1w1GuUga6&7J7z^SS^=*#w|qH$13zi!rND&ysgh02D~Y6DE9QO6V4 z!K{9&RI6x9zMRM{{4X#e$9=#G|t%?j(v}-GF$_+ zS6X)N9aO{=)iwvz7Ib|lzrGbbu5**!4$?V8n8nL>P}crbUZ08NNRAI}?2f^uY1VPk zo(Y~!RA>FZJf+5d(!sCz756pC3->emKTKwkcI(usUtObbpAx|#2-caN7P*=whu$t+ zq+G8k7`R|+T+rxs`53V@_-Z(eiJ+IW?_I!3O$Uc79?~{g7JvB_ey{s?lQEXmAfMRe z;?Cr~8v9Q7s$8YOqHzeKyJrR4Ip)%Wb6|~WreKi8u6o!-i9RdoJiviszCmb80^doO z*c8Im*6w}SLaYdnjKXXa#|dngo~`^(;ue zEnrE8xxP2Y#fJJJ4r0t=!Z=LQXpA=@JtAj&!AC#wE3-WLbG>*uBsx%``L_E_Wl;}1 zBuLFLG11(mnKkt8)g5i7J)bSJN}Ao@)SSbv#*8C^mJkdi9TW(ADRMrXNh}!%hM?Fa z<-;b5$jK=(4fg(dKeNN$He6W0pAqAeG&}|qZcW$7k@k^00}u6h>0%;>>Ymv!_FND< zIhNFLM^C7g=IEt4dRBVbhnJlCDAz5rq=dXnf?7_TKBx1Sb1US1RHb;k)5m`rJb0V` zc1f)ne0Tt1!kztP<$T1@mkPLtLM^38A&hW_pRj{lr)kUQNlj}$Ywly81xpCG$2SIX z9-@6~QsRdw8;JCWWHDk`LhP^Txe-W6IDg|p zWA&|l-m5F;WARDTUkB;#uBp=xxrbVGpisnjbiLP*+-sAG1KJPHeI|Ppg8@gqvsIki zVqUM4J*j!BjElrubxOwfezkVM&MCJSaI1EB zAKm9|+>!dCb}lRVrR4qq9Ku3Mbuq+hvQly5%Wyp6z4$d#)rltC>*(IYzlw*`k!#t@ zN-V=p)h51W8|0d3d5@6=lYm$T$)C>|N!$g;H(86IiPXY_45cB6G&b_{<%jboEt)is z_mufI-?0C@_>AP=7mReM`ly2BH^^XRTJvS~P{aUnc=8U$avixxb6#})58;fpJ_id# zucbF97EqtBds%LcE)#V3k&XBKUbN|@vdC+`nq|vsn6OqGJW-o`<6AJMXXG&LM)b_4 ze3KG(@M5m;^BTgB>SU2| z<6Yy0u?l3%4jW6!oaxEg6EqU2y|DJ><2!^e{o+XB>r-P{B9Stwl<;U*BQ;)%3AKjT z<)G!DHDNz2xO5apt3{&Y=vo&&L5z8Sn=E|>Z6-V;WKL2MD4Hks~9}dip->2qxs##@1D-q+Xuw> zWV$qB$+h7&*^did2TQJU&q~EHrzo^9>KV6=9GsCUGs6!u4HaRbt_%ev+Sth?-{Iy_ zuGpN3SRQ7T_Ztkde|S5|Yf7ci&i1%JD8x)Am-X*w{Q=KD-t42l*`tnH+O%v+lDO+XprW+)338?Mj^?c?l!6X-&G{oAR&LmEk}lr103W}Sn3M$#|j zOS=MJ**z_Y7DyA+NWUslUH~&Z7}3&t{%^Mz?OU0HuPv1G(exlRGcw4M0j7lS6Gsl zIbsWWB92QNYeyDDckefbas8w(j;3z@EDgFj?_a}~1>k~s7Z6_l`Z;4+ z>|YG?C75kfA4w=uS|$dP356-CI3wPA4XqP0{|IPQc6^R{^Y}F6_*8&WD(;GsfZnj_ zXwv-li0GfxfHeKRW|LG8FF#f1}S(sGTB2+WE8w&6>OgA*CaiQ({pY?wM{Gkut zT*;N#4Zk!9T`>oqdW}PncHY(1Rqr%IC)OtUz3iI+`k&uub#-;iuKG%p@fs$9ba?kg z6ScLpY-Z6d{5xLuqakp(|K0w{2@(vk4lSf8!R&>zW$|N2LR~~&UbSk~)pov;bu-_Kq?NF9y>vBTa3-7i`-Y#xj#pS}9TcaPXHw@~EGz+_OFpng0Pyi7}3+hDdsZ%bh%Z5asU|AJW7#fl8{Nfr8<}z6y zp5L%mKb!M+{dILUUCYIWnKNYC_$Qf5i(^Nee%6s;_`ll z+$nw+(d6a6EwFtdG-vDOUx-k|qjj_eUBaznSyya^8Ae!Z-BW+YWEPK6kNv34L`YXHU96-lh-+NSu3o4b%#y*L%D*QHt;=<3eai)>Wx zIONoWbYsS+k1HdYvV;#P4Q zn1(DdV^R#)}KPGiZNK4G8v>yrNd z%iF6Jn{|KM^65m1=Uk!UaVAPB=xA8vn!T1Ch=y!L!&Z>2Urr}aj*^$sv<&xZ2Li2- z*x*g=_0rnqh-Wlb!E$zdCxYx(Yy&B?oX%@Ox+Hd*J};U~vo}gCDQZ@W4^tGs2gbu> zIuLBhD#d(km=UVRSOFvyINhQAR>u@P443F=BtOtoUd^(WE9eZ>7|(i>Fhz1yFjAoj z^t{VHpuh=x`=8Lg+)fROI^H?>YM0M`$lzdekKcGJt?!q{&6hMUKA#TO@^D2JK7`Bw z*J5fEv*4=H8gph*t~#(^RRs>|jIBti22NZ_yIPjnAIq0mT*Y^|qKiN{N{S znT+E@|4fECOFBA)h5%s(ta3Wtoun^dLs{XYri#57oeNlt6v*y@aZ3pFznC@yK-Cu{ln?qb?%RZ~y(w}xDlNdLjZTp{!SUz$2{PuQccKpEq zD&8fEHsoSVy!k8O{a_=cYF_A?QadO~mobj+%5Y`U!-+op9`jovT&6L?Mp!1VDc0`Q z%Ehe0aKqN4<+H_KVy}J82zL(FRkwG|G{`8jf6zH-emZPC8`r&eC2guO_mNX&u;2#* zBNSD?TwX3d#sP2ASXrGG^T6*w&yd|J(k|ZnuFn|7q)o+v!%~|kt);spmaTM&-bsOj z{KDOkAS0TsqYILs#e+>xX;=ba5rD% z0uPi`TxOpGtQhSf$LmA)@82)BUq>Ikeg(F6!Sef48N2O?vKswYdoy*w-sC1P|GG35 z?~&bXy*d2IHxT?+uy!l5RhSmuxdhf9kE_~0d|)8Qy8}vs6?YnhtJ%KCB(Uaw2kf#Y zubqs8P})9{vOi!i&HQ419I$Of{r}n_-B?I*G0`F7SUH(|4Hi;+?ccp##>x(VLMDnF zCo7Gs&7{9N&zl6DfZGYI6obzSNOudsKvG^_F49_T@V{LEVEJnSv`J)*l>k-zH{h;_ z`}60IK6gEU(0!_GasYeN0E!n~Y%h_Qr1|@+%VHS-3}g#f|J@ubFRwIeMAg;;%VrhD znApY~id$C~oO5$HPm+X8D8I}5oi9!}SzKf+We+tU4{cB$XHFh`ntw^j%caHFQ_7>M zT*Rv}TQq@E?;uUxjl|e#f4)EUzU`ts{_<~{8lKHV=SX02d$b!8xNL7be_kTlcD5V; zsJ-4w(_V9_U_5#bvCqv+{1>woQ5cFxWF4BLyDHG8G7B}g24EO7tC;AUir^;Vljc~#-4T3tsy zUfY|^NPp8?fV-rhru60G8qX~}!g+qS>f+*U`L^dIULFPoI$o$qms(_!I{FEm4;_h6 z3{9baZnq3vjGsR(2`m}?^c19vSNNEdj}sx?|K%*2LDFzAZ}V|C1L;iLr>3q0vMTj2 z-=4qkH5GjDHR2hh{t1)F;9$U)jx*~!&39A}CyJYWJ_~j~3JP0oiYs4#f{rBVj4@-F zb`Y1)ucx=haQ^X=kd1bB>En8Fkq}c%adpY3`pB|UKuRP zQRZS`)e(fei+$YfUZdAiQB0)4g@7@-do*7*J}yqK?jLXhU4sod_voO*P8<-d-YduC zHe+8hlqV^9I|J5+8{AeSo8{rLFQzlcH4E(4Y^!)zu}$ zj-Y+&9SDx7sP}GGNPCU%ZX8$T=6v(l$(=iTOSO5o+Bl7BbDx}CLxqcr3w)v$lLPS4`jY4+Eqe)ir)urTo!idWfltY6f6lKvicmb@^p4H+ z8vtrod+B|^hBG+k;QDu7Y`X?_@wZ}rC=U0HagpoK%Bd{oxNf~h(fsvho#9}s+wWw% z&Z&RK#BbW=QOm!Tu6lWa6dJA}PT34=1N)9hh3%H*mHA7v<=KII=fn%VHX!NRPs%%W3Qbq99ldHApi@Bni9+7Og~*8AeF zh;9C{(IX*Z%Tj~kOplwvaSzj=y(-5Sx5Px5YON<3z(pB}hc9A_6YoBj|L!t#x@yq= znthrC=L(P<^p=A9PG|IM%*Q0iv$*G`FKikkU-1I$_gc`6cKb2D{(2O63Z|R zwq9(1@&UK3d3V&8VNq3G-S$Nn0}2@!*%iQS2=po>Jh&e#*J9!Vs5zqVdf}Gg<6EA9 zWakWw_EHRdo|_tx#6`8>n!DrPRT?*^{$PQ8#ok*9l-bV!3w9y^>@tzKd5;Upo4fzN zF{lA>D}eiA6_Gf(bN|hx21hM0Xx%Ni0}#i~T;rEj1d4ldauNrlQ~j;5vHK6!@^WA~ zik8T$l8q+1W3*#Y8bJ^Mgs8KSzN~-6Pw){~r2)cg4seLlsf%1(a;oZ#EK}Q zgob>B7migkdG;o1w582CNbO;m=wnfK#$Lvn=9~jMaF|){k$iU3`f?ZIj|X-`J)5D{ z1Ml%%ExpHuPo4AX!6o1~GNQ_|8XOY#rq;?}2>Ap}iYRm-8}_&nx%|gXb|K*M9nQZ0 zcEZUk)A_v9{qzbqLJSv|SA##!*{16#@|TRsa%XYD-3rwam;C@F1J zoj-)Bb~w)Xy@L5P0z=psN&Eqbh!|bdKdbE=hYyYiW?4gYQ@;L?MuZ#I-tO75-+F5dt12RTaCEy{#bTliM91OrUB~nV>o8@6* zWF!XMZon+w=TD6HH`WoZT6UchRjOBz6Zw$EXtg7|=E6_7OML9Hr0 z=ZaBOTV1UYsheS4q*Vss1i@XAxBxUrqG*FWzYbv!~xQj{p+8)-r7_Gp52%HEOO75JVl zTgBAirDyyS0H&snRb=JA*hq?AF9(ogaV6RlQ<36^bd)B^b=E)raXIo_57hrPNkbAP z#f--5Z+6C&llDU#_O(?Pj9}?l1Vf4+7&BPpL_F;Jw%@V`T`jN&OzzL2qP?Lg)vGIG+?Hi$8}nF@L53VxZc5- zbM-MzG~Y=Wa5ae=i#G(k@274YwFJ1h6UW&xZnzQlj@8FUpn6X{x#VP^$xy z7nk57o%!k#k{n@UWfV-?QNZlIdCDateH*X`@Xv;T+w8aTPVeQ*OK>VJ0nru4m^U&x zI@%$kXC*%gW?Yt*ANvktAi(ni{yQJQ9+*X0t2S^DKpEEHP_-{E);a1e4;GAZK-`A4 zo8Z(r1FI52H=uTj(dp`n(><4!ji}s#%yMkNcRCybFN@}*UTWY66$1u=qakX91E4Bx43yOGSzqL1@)NcmNIXd8UOkq|eq7(B4IW16!NL59}-q=S)a2!p8&#im) zgM!2%hDa-5hQ0)T_*}`5n=wUtaf_Z9@HuYoJXosZF1PGJps&UzRBxld$2g8oZ7oc;=GYFz?}icn7;bG zi;Ea;vi3#l_-}^MZ|bia_e7zM#rp= z9*eoz+cqD-*FdbiKdW3(I8ow*gDJJABf4D5Qhi}c?cBVz0QyVpj*dYHl3^8mv4h`L zdfd;O>stKIp?No+m=P%nDKejDDwcI;I7e!%+N|0f$)C{)cmgYR;C!XSKG0a&vOwZ z5Q_m(WFUaNnHMsm94C3Z4@Q{!-J;S|C)|!NV}dnH`!rM~apqo)^Uz;N-%x&aS=*Sj zQKUJ1RGTDuqd$4FHC|$g1UM)4Z7tgJs5aRrhz3%+tBbqf&yq1 zuiFw=iDk6PCV(!+*9*``tdTtyHTSt*=;-K>gQENXeO(=$bWk+ViTyhsk)*tDI$Mur z;tzh>9XJ|T^~vMEQSca&!!Uc1RL20kzOAJ1b!F@iUD2Zqd{P(0%%blF0QAmNI^FB932gf(e{vA;IWby25QO*0Gpei?axC} z#{R&@{dfDKiS(W&(`PoUmYpkjW&PQB+qovWCd@Cznk$1TDmci!zx=kg5D5r4)1w_HTTSa`~fhp$+agEOIkn z14g08pT!`(fBB5EQDUHnjKC%YCCVM^3S6A{`1p4L zH6SzM=fRn3Dwvv?K!^UVc>n_ojxngidf|Y9UOBn5x6seUVi1E+hleHJxR48#cv>f;2_-Np79nVFx;`{95~v^V?p79fad`;<$@ zCGtjgPI!%*JONtZ4(1FpL?!@62k0YqLgn^3q2{V`e=e^RSIH?TL=%reo$%SD^)8qI zcnlBD@<6T=fXc^C?sysNb=U%U3{#3P;C#IIUBTsl`m+A9>r%(ZL{Sy;fye5g3^@P? zf=yXVUj7?+NjM=G4w)xbIt<#*1mx0Dkfz^2Q;RcC^4FTh%7vT^zT*LCIya^z`tsXuOx-hO( z#R?hy5QcHrCiQRPawjEE&V_NHL z&hZ~Tc=fIGND@gL{kycC3;DCO+!@-hmI=wg1V3QPxl<7H5gThJ_HZNY;mAD1ZUUC7 zjW|3Hb_a(zOQ2IK{dbi%(> z+hRi;d890ozN|lDws7q$txq5j177j+;O>lVK{dZbG`VLndERe8#V)u)6LCU`@IK3N~wi{BVryvo0D|D z;(w4(JZ`V6OWSu2fD(DWE3uxUtpK5TCprVhA&An*^=MxV8RYN#yF0B-EcpX9C~_|v z3=ZJXd+<;IETVF1p`DN@c)-5JQyk1SYD?vC78R%>-aUJonDzv8v`xju#Z66y8Ec&p zIN%HK4myCngxuZ$5#)1e=_XJfg&xd*u}#|S2W{tF3oXY8YEJA3>Gs7Dkvvcc0UY}~ zB^L04iabkD_;^BiaxQG5t$jQwIN~}?4<2stSOx>x2WXao0Hp?O;KTW=TF@{ay75!b1LCq>a`O}c@|i+9lx4H zF9awA<579uv&Q2P!_TQw4!n5wPRd&22M;;W;)HLi_p;hB9yTe>p1s1y6sK-8J)3JO zVK?^CJy>ws-agZCRw2bAKniy7hJNCk{eVmB{XXQ>O>X^*O*_gwXhJQl4?|+HbLY2u zP*EhY3MWH4*gr8FnsK=bz6GM$Y@>&5kn#LaR#orasiURAY#UM91O>szqZBY!qH#D* z25WGQ6VOI+&&CvO{lwdVlyL?A$nUNOex@Mqb0YUmvV%iLjl@4J&BsO{2!&(`$%ILEPy z6-Ca=sZx}RGiq<$JG7z80kX1kwblh-*s|!FZXG9kK%Mz}KJwhZ;k>;D&Xr_}p}=au z2v>A))KSk7+{i8u`ju%K3??4xy{k01ZJ*T+b2u~T1w4$rFV$M9r_6T)dTu1^z|kJj zm*^s9WU*-0{q#a4Q4kcjfRF3CB!xVRyUzsz#T=wfi^{iw3w{KU-vO}}2r)ZA7kVe^ zwGG|^lkFiP(BXMtu7`4Jc&8btc~=okuqWysV+HaMgQ1zo20(T&iG!$s@^Tal{$lLw zO$-?z5MEvERNXmqyZPjT3yPmR zSUJ!|a*Ba+({VCwL`U`j_NI9MFk~y@*m2tUH?vy%_xfpfXK8o>w{14M2(5JS%pHWAXy`dKe&D* zy2D3&W@@^u1GS-{!b@0EcR>z^%Oxj!vx=$8OmvdhNRoTQflDY{J>J{H2ym@`$^Fft zI{iprfjLrS!_0t4Gx%z`4gI$wfjFPu{AH~I-- zvZe|)kqx&zi#=dku@l0L3`F8Yc^OHElr)4?X9b|B{CZxBhlkWTD;m7!F|+mADlhc( z==Wh)dIaVx{9@FTpKrasA!xYjiC1c!@6_*$(0hyj7`s~y zqpay`ZaIN$a<5y?mdNrIAPuGe1f@Bs@M{VSF&Tw`8Uds?oyn7#8N=Qw(7h2too0L# z@h6*O%qdFTrfpZ?@}k(KKyTuPads~s0wyE4@xv-GgDLghK3Ha_4>*HO;I(;m4OB3 z=b*j42Rb4^p}`0X{csFWOOswzlK?KfF_=9Lh<0|Ca~sE7QT`1^(~Kdd{0kfdsP5g1 zi>Ppo&06q#=7|>8ez2SQ+q`f&8(`U&D@zjI3-=A;eA|zrCpG(be?IvtSn28SciJU@ zJ@P2;(tJkQp6Sn?T?-Uj=#M;+>o%CGd^6$Xd$Kn0o@;(SY zHA4)kba@R58rNI)U?H z8wCpuG2c=abCi|+#{PWOaL8WRr7a?nI-w8!n>?x*9a;$`Zh**@iF(N~6g4#d^P!zY zm2#^>Jwiif(&qS&&G=_5-X(7!i3)t@uh`HSBEI$SR_^@BQj#uV`IY%>^1a8fNJJvF zXCLj?vT~HmNF}O%4WjB~>f*E_|N3%RKc?>W=9hj?*w+qfZWaRj zsB@aUyUbNoRq^rg5F6=Q!;NO+ppX`R1PTmL{=~Df=BqZiE_VTP5<28nHtFd4tZY_| z{o!-?<1@2A7ZPX~QgbTUfzt7YKRgY8J!Vm5tIM*!_W<#I=g4{LP}X|Q0MjxGZD2}M zmrs1DtA20s4C(XbGuhex%vsm*BVs0F*UoQEeWhjPr~ZMcLNU?HBR$i1xBqq(15QgJ zrigTOQbEh+R*ues^@y`M|4x_UdDrgl4P&vtGdt1$bW;=M32?gFEFy6g3kbgrpWZ5U z@DtBXO@X~*bjvLzSw5W(Bb(ljba~Hz>qIx^hr8;Y&_&k-J=TpPsTLro96G;1!;mBp zXNA6Rf46`j!HTGs*?MPO?7*Ho4ocQ#bFVKEg8D6W^?E>MmWz7SI}4d z>pAqL5vC=gME60I3=z%sEOiPTuv_GsV|I{W5QU~3c4lk&C>S<3?yRY|H5U11VL(yE z)AqMJl41i@;~OCdhSU{#rcAD$8dRyKt+Z|((dOZM=d)gm;H9DW;>EKZA?3*vU$JMf zbFh&ECgk!-RmJv;6_=G@_5x`D#f5{q)ugI`14tDQ9o>Oup;u{SU|=xa78GDSNoA>c zf+)|=Uj`GUwzZ6xuV!z+zkve?u1;5DCa0aIyRH_L>!9NU(WanUC>2ej7JlYC=RDuU zN*W8OxL~RSI!&BE{;kFKO_j4?Z|IJa3aF?wBJPC+9F1(ojIkU@C98vl)~<;4#enEj zTCg=@yp6YPk~iW50$c7{;nGq&kSYl}Fp6INRF*M~9{nBRUy&&V6y+(?87%fdzASQDo@D;tE7tBhCJ=9pLQUz*ei}D<;5<8g~lpHY? zIy={!c@g=jsWem1h4~}LPx5Mc)5Otb>FMCY!5x+f7O(l3BCYOcRNnhj9wq8|QGzJK z-IK{h4PEQe4(V+BcilG<|0vB62(&~#CJ5|Tci<2`INbFZ2|WKr2`X%|Sh)^*4KM2- zE7q*6ua{lXtc3o+q>k5!igQI_T%lHYSs03;Vh{8w2qbytJ`VVm$yto0-h)24|5g6@ z%mrsDe`0)8+{8x)M8QI4IQmIGX|@dhvP|g0X_#}-WrPq zd=0$N3XJa%nQr0cRuhUw=bDFev6|=7egEs&pBG!6KiT4#wENEI51jw~16uCQe|c-P zs%5LCIYdyUQ=^FTkXnDX+i-qa%rNn-L8k!Qs6N6pZAmHhpX6jBVkxf%Lu5_wvwl8D zzf15jHCRen83mGI2%v9?1DFvg6Wkeyl}g6J?nIL1Fck*WDRuab(lp)L56ryL!?=}6 z^TG1a{`8lUhK7G2So#j$-ajx9D@PjJ1KPejO!H1toBcQ&^bZ95ynZ4pkWJpN1Qr4S zh&(bGv9xEK4nJsZ13%2lHF~=JN7~a2VY%v%|CU2+h7{ zv}Cb8#Y1m_+@VeorMcoE4ja`K-x!WLSZO58O}=~b$3xc<8Hdy8(w4Wk(+ z!t8q+;N;_mp5sSw_UXhpPcD=ZGg6QU-lu&BuxeDze0EP#%;RvW z6UnvZY1Q3(00zK8`+gs zvVKrWj_FakRynIj`%jAH1tl-cx?tvZnTU2$J{2@Y;%90vK~a!r@^6*1 z@&?|<7RaQh`v^u`$Jr;0g`i{w8}r zxR7Pd6iDeD0D+7=FW1k`O~Gsil!n?=)sLw#^hNQsl+lQoLF0BLmmj%J$@feWW z_?=4Of3|-OMljz|MlW8hpIyoifuR5mSn!tvMFl2$WHMOzlMdU`5~fJPea|hg8EN|0 zzf~qh18(^lY>_Zwy?eyA+~f>OYx&X(kTC=*s4b8md+gr|I?5kbsJ6CXpreOdYB5xb z(a*wn{EOcY12MP*?ZKmgq~7G5w>)feE}rGyIlaJV4%`r z8pWX^Qb1Sh@?`I9x5W>1q+9dNN%=ct7g0L0AWSvu*GESmY?_!zVzfSf9vnW4Tx}qR zcDOMPAsA8Yc2MY>Cp^_>yVLAk{fT?CxG5_&3RU@NqzUOFRvi3tK?b zk=*)koQW`DK+<3#-*Of}*7OyyU$gud@KlbEY5JwvB|ViCPHCc0)K%xli;ncSy#&+} zo}G>Zi+*RVtvi#iubnd&wAM+npJ|x`jU-!qP;f7ny z0e6^t|7_;nV#Xaa{Cyl#^eai1$|J+M`A&4hUNk{s{~~mip?{sP3zMd954?NOzB-&c zcF)BDt@<9E56o!L@->vw-AQCjs?Nfw?p^KdglMWLCOiPQpci=Kw%?^WgP9M=|Jeru zHAzVlakF+N9>ouk#0S*9FW?XFv*;l#V+Vw2$Q8WbMrZ>nr?j9piObzDo}#pPugT?O z6+VKBSPHh)RQX*6WYmG#yX%i-*9LIB(CxWpUJjt-#$DCb6JY7zjA67ZKT|;l=Zzhh zb^(EjF3A?`wZ;{I=@0JXLQrVD2bIR>#!(G{?5pKydO3n1&>$3QaiZ;lXRol3g(4p8 zlnC|;si#>}Sq^gGV}~cMa>Sv)h#Y)v@%Yx=_zPS}PzU^DI-tMfK7Uj`kqd~TU_llu z!TWb`1};}g)%)|F;wO+=7Ttt5tZekNKLIJ35o$T<7~gJ*H$vTn9-R0Eb-H$Z1MWa!INn^~PC}-!w*lC7K**rD^J%o*o?(exroBBqi?{L3fcS+?rhC{Z zx3fALJ2X=PuMuiWzkbAh(~`6f$)<@%x5 zo>(@yPjfksV1XxFnJgx%D*><~NDiGS)XiH+Nc}C?$^fH3LqBDPmz*Oz*c9~ZM?!UU zm@CC099lCds_;gSyk|w)0cXF_$7j;(RI?L8=XmFvRO0$+K9!B&F-pbQ{-_qHw#juO zq?k+#at+t3UHkMMM`$C#-#hF=G8G3==zmCZ$qEiPeUxVA#r2J2Wn(de4tYGQr*V>y zoR_RF`Py8~#*4&C%+M!JGirGf^L$9jtL1xMUS8nz_oPe!wj0y|3us*cE22%6m!QEM z0!vFka|2Jm@c0u4iwa8+z1a?kkHJ?*ipG<=x(R)&!R_rR)X&u;aQrmSERf(r<)lk$ zm$*FvLs1k81*SOh-}rjLs;Oa}14zdRSCG}!9m8kgwV6G20;5bY{Lj-@Ag82!ny3xR zdJPRJSmS0vJeaC9=Ve%IJbS0B%Sy_xu3b2e)%TcD&S)7VG}Zz7D0oMJrvWTX|EC!D z*({Q?sNN}BCLe^pRI6t4g69ROgFujeFMbKqx40O4yo_rs{%w`yiGt-Y5W;I@H0Phi z5XCWz$qlLdb*i_KZI73X%#WAg$TDKFd=R2aP>CugemtgsaPqI_2WhO1TbaptGLk*b zp9>WYxt{3iQQp7bG+{pR?K>7j2kR24WuymYuEmEzYf`pyJZtNJJ!y$S&I zmLd8%JhT^Fvay0o9tNrv)XhXLzfHPc{gbw+aaw>=qbCgUisk zMZqaPabf4u)MC#u@zK#w@Y8dPxWfXC8?6T(5u=M(T+V_`L62S%h(QV+P2Jde(vpd&ZJnGXaB}AdT4e%+6S<7+LNBR`rn2Q)*l%29N6*ukye7F7A9gW z+={dQiM*96Z863%c=23v4+&(BkKjcH{GAgA_*(0**g1XfVtYgIEXKvhKWf>O`5yj> zzZZR8&|#wVZl1R@`3T4B_v3j~#eB0*W@hGPV4LPg1vDw96fn<3^4A!3!0u5A^wjTx zI^g)ieX_-4a`}fs&o?v z{YKOlkebaytVm8p%-)29*8S-H5y{&NYf3_%Qw&Xrm>m@n+3_?pR2lNSK(kM?6U}IF znw3krHjF_(v6G0NuHSoz$0}!Us7I5P>ykruEFn^;B>#^c7b+RY)%_J4X@r9pA15N! zK@WXEY7M$#D-DV;;F4rB5&-j6#d8L{&`2qnES6Ma4hSY?9`N5Z$%Xg){-?@7vkv8; zkvzVY+U30mus9D zutUr+OlR{pnXZ%dYsqkn1h=GRd3fp2cvU_fiXZzwCWhJSxRu7#Wa@fqd$uUIw`u^@+?rqoATfWS&W*aXr z?~m)E(kA~_XEa$vHxxlEvDTyQvp2V9$Sb3y{dj2<9xIT5AvL3B>h()^q8MiGFzzcx z0>dQeW>E6K{xwM!%i_@hrpNM*AU_(V1NMI(*~#WRbi#(nL`I7m8wHdugFHaB?Fr22bZe0$~B9`GWE-| zgWKP4U3?CH6T-m-Eg0>Xvu?PLwHoYkQN6s}#T2fwJ`wXcfOmRFIy9__K|f9!9Zzh0 z5G`wU&KgG?Z5V=~LxV+s{_sM{&kRhMVX;E0QxeQ}McjEMqZ>zAoQIB52!;$sBP#;S zM3Im6gdj&$3dN1lko+S}g;$FsHPo_;ZzG0}Ay+w` z4?({EH`K>(Ei~9}339Dn>mykTINL{Dxyw15oTl}e;*c>cB|OKsHD?P)-V|Op|1Jdu z!6d%^uL3MNW}5Z0c?xN4I7700oE-61Ka;gn!xK3)LXN=um@mjtO$^mb78-HY4O&KV zkq^)r+0xVB{`cGCc}V}@)lJq$ZGekKM^*bl<$v9N#?SWO*`+!Eeq!l|gt!2LDw6g?iup6c8#pMG;}InB{o{h zp1PmNY**Pr!V#XLZXR~?$IUV{XqdR>E3zbNLpAi8n8fbWuUcCoV7UmG;gJ!WV?Uyv zaQ2xmE`%$LHbIZOJVS(znDb73GJ3i%()C`*Je7)4Q#@TH)sQAi}SjFNw0^~HAr^KF(C zahO4QG1EX|BtA=MR)9w#Di*feqMf*lKJRuZYx_R<$GxidmFkZz?i_N&(_JX|YL2v< zmZ2kVWcI#MHUNxS;oeUYPaDkoGVn!AFq$B_Fu&GD=kF(1?)&FzzC+@Z?v>Q{tzK4J zR^+2CMwMOgHPP0 znB^_qJJPSirVEJn;26F(ZAocXY4v zn@pKqGU2Te>Jt92SxKVp_E^?*H|6;l)TAp4&Z8BtuHY5OF5u8oLw_7F5N`SO{SKQL z2P>nO;J)g_?ea+)plDl{YFs)X}9JS>wn*z{iUPixb_;HZK`zs$ZeV&x^h* zE^$fzEWB_3GmaTgrEYpWEl+eOlK%GJ&lLCUjfs!qJ3Vp=nS0$U>WU>qJy4?}jy*BgbEm2Tvb77!4SZfW>7?^>Tf z)TM$0v+rwPbsi^=Ka>&81d|QcML`V(U<@H;3P5KNzpN!d^=oa$7L{ z`}5vblQzV8x6VP~&wtY`wsST=TKP$EeEr2;v1PensdJYZl9$9LtZVyQ`>`cW)n&<~ zFUsq*KjnwYFIMB9r>^^dX#My-Dqmnn(Y0Pr(^@j?fjgZ-R)hIdhShH|hS0~e+GT-n zyNWMUJ|Bjb$}+ISsH=Iz27Y<Lh>bpv(JXV5xiQsw6*mgWFHj)_|lahw1`bpQgb+9kmO1R zBkj%M5?KUAzMATSRE2`yI^W$YaCUF5l^YYK^_74sDt>c?&^X6FQ{1!s5mX;Pq`w1m@!^(#kZeK9q@XDi^%r{#XeW9Hu>(7$`Jm zD#d#pY(GE{JUn9+2vuK74ArjBvLwJ z-V||qNY<_HJHbFimzu{(Y707F*S8Zs>O?Gt@g$k<2xJ$b>+L1F&h~hND zOR*k}m6kGv+J$PcnTAjUvu)xIs*35rVoui0ukTOAQM6(?$$7t{q4QKBk{eB~^Ph)> zCw}5exQPU2T`L`8;c((jO{I~M_BT^sJV#HTYV(K09c_y2Z4rk(d_PlJdj`}vy)xQM zW3`io8fp&v52w?QBuYs)5Jhmv$UCsuHo_X zA*$Buo)6aK#Xla)TInmRa~(fJ_97J!;Krv%s|Tr?dnWMwF*)ewv`(%6~Kw z=Yo(SZSkK6?%6FL$+#j}6>FWolyLC$n9Ak z_&l1gj)#rkWTL;l9hmyp{W}&}>pmrWgo>e@iTNc4b)HP5C32l8 zlHx#mEiW}lEqsst;^@*4-}x^mp{&2q?GA%Ftj8;xO@$KQp29 zza>r3W{O|N<@XUKZ4$G~A3+8Z4e%{LLS~@>V>~Gw!mE%8<@uHd3kEH>mkckd7-(Wd z--{=ryzoy)=urA5lH2C(U_`h5aaN@i<-~KkgLO7h^z)Dl0V-x`7IW2)$AFiRc?-5P zE-?(=NHh)uc;v8w7v5oQTQwzA`e7@?=`+#)OI+95FCV?d=#jJyp4OE>SekP z-&zfOsC{Htwx6tdJ{Fp+WdX6>d)Yp-;20TG=p6h~sqDA8a>C?Uw=5#*h|iy0_Sohs zT`?Pv9l76PPpsWzK}<7ZlMG5xEHpMcy@gDQA4m|NAB4_O`0P8xB(R z4VRH2yb}6!{T~x>@k%N<7I1=To;XA@iw?Z^j^RnvYJ#|YSud8>jv(i=H$q2XrJM21 z(fGz7Ma(1B9UT2Wm?!{$c56PqIZ939kHSxX!r(dF;tkIDxoD_l|4Wmf2eqM?YOj)7 z(AqumYW0$FX4R=2xaV1F^j69fGY$S+zGp~&4+_v&U%n0GCSq%YGR8IPQ0ZUiuJ*6v z?b@hsBg+^_0{4hNCFjokC)D^RQ(kHyc%ImJQ%6aXtY6Fo9~EAeWSm}BH?_8b5RoRp zD>~?JSJ2&z^O_=kv(Hz^n)i!EFUfJA-}%*QI{D>LQniA~Hp4*YwLlFkLYX?^%6Ld&&56W*n+Vi&OUCeg`Ov75s!`S~MEY${ao6fRykc?SB zCX#-T6mBg36PFq05nu9ix&+FboTOM6vigVAIl4P!R6_-;P42~iY$WcMll$#;1$GlO z-wD3Tdd=D;zmcDMkL6Z`sv&o#-bOS`B^n|OkDT7`allQ`(Z5iv)ni6<_W!#8FwXjp zbf#(}E-kg)Z;|Mz*}p}(`uOeWNHF55`NWs3eJz;Ji$)uQUVoA^MXxE^IA?I<*7;2% zFC{2%2aArRDi4xWKFa0p3|z_0?Ds%s`1F@PXr5GCVhV%U979ekz?n$O&b)n`Ru`91 zb-hW%OZ#EHBgkm|fAq*Lp=G%cTv?x}!$Pac%%7PhPc^oAohpIoi|u%QtMLpqm^P#&lhawH z>%W(yTJmTi4vxBx3x!E9I%_MF4j8o0wih_cCiss{4B!0pC3`55oQ5$Kh-@iOY0JL!8t^gP8bXsReDGZWsAXWMzRc9v^b z?}p_0jVcUW9Ay|pZAlv5WUW#XA@l~tAS5WUF>=0TC4nfuL<;sK4@R9)ql@mN6z|>6`|kIf5yf)BHA2`K|3W2D)?PviF(9N>^VV7JhXTVkO2kSxlao!9;*gq03mBV=igy@Uu?TnMbSPer}*sbsr~r=4@bwwYWt2f&kHO+^Y)C z>srhG^3r<-y&1d;^T(z21N42QD?<<2wd@+r)%%AZeku)i_SRFfKta5utz9V?(pM=E zNXyM(hc#G1HpYa19#cqMoiQ`7&y?PO_s>wLQa(>Yzi57-JVdVPh!SGJh~=BGqh;*V z-Ql-EYKXV?l8AQyuPiBK?u1A$OMzN$FiBx~kSf(SMd4$^J8x)r5+%vPPg4Tn^xtB! zf)Knxq1#NPd9|gD#o~9|Gb*9*gmzB)PTm9QAzzt{5ULnPJTkOKO^Xp`c-ZJYt;#S_|(j8qVeyKrHd_`ZJw^r*kdBlZ` ze3!J_Sm=}2V#M%`zc<6ofSDYByEOUe))S9TIe^YB!cZt-6V|A$s32pne$s@~oIRWf z3HHDqPfX<5FAPgM_sST@^|?3&U+rJGdd8qpEfEr?*;ihBiZlqpMh-R%tW!3 zE34$!OI1>(qLm!8kxEw{r*&g<8?~j{gs*C*zeX7_b+fy9?AU)vCtGT6LR=oqj6M-b zv7Q;l*Pc$zmFaeM+QC6!H1RINkc1DXNcn>S`58ryyAsSCHfrf{dp=p=I>79GB!@%kS|g7$vA zalx-a6cWT1z$Ow0*GM%pvx5#TkXMc-| z-fh>SCYxW(hGrn8lrrg#;3lPzG0Hg2ABZ@aFRoxdte>s@x;Vdk)os1Xl55N#tM`MM z{)pNs=Iw{y;Tmt2 znp}IKZE@+w`Wn7K;~`_cF^x4pV+eJf!8v8(pN~<%Xqda)BBVRv2@WfoO`G`E@b*1` zZp6M13jEWKf4(+uK_By~am6EnJ1jx=g-FkhaaevySwQ_)ct*0Z-G6e6r6(w!X=eAu==H{YmJO!Gj?5lc7Cm{D?OvCspAtXZ zvrzuJKRC0ezf{cdT|mv?htb&|LVo#J%Vcu!CKL_Ast(~J52yK$w=u_@cT~(h6hR&> zg{$r}8Fjo|p=p8oCdc;f5Njx6!V8Xlm6iJM;Nx#Jzp2cp+(lJ1wLc@+>b~+JDBnZh zye3MCm^n@WdnALFd4xd=_A$LE+{ z*zus*-w0$4o@Y%vbch!(1lnnmhpAG}<2Wr@SaM>M#Yv1TJlg-dZSj+-u8N%S2@hu% zWuV&Ep=2}BFV{9T!z1}@RR}{OG+9og54`PrL1`58CerE`B4VCt=n0SK;Uy*m!H_n4 z&$PvkLV1|}yiCf)W;XzKLpV=w$bRv8M zj?Z+h-UKgn@E_{OW>Z(u0`8x-yT3zwdacyYlSA9F^DU@A;xQ(v=>_#Q82aaF2zbRMn>`!ESAUOG$)IM3^TvV#ts zi>RA^5>`z6j{NEk7qJWJpARO%z@Lc$W1wKq8IJhw3q-8s$L|T!{{Ga?v5A6|W^cc;f@PLlx2Z#tIUzadr-;ev;kBX4F)oC+znAD>@CYYZs5EY78amrI!FMRT z&(rvT=SVu)2{DJ$MYl0ZZxWYB?L9m~Qe{f*&8T%8B(lyS;=vc7lSph+&2A+;l_PRb z*!Dv&GSA@|BIj$e)z$ZH3V={(yuZ+@Ao)``oXgQyto~UYcZZ`V)+R2{Ci0Rdi8F<1 zBWJ$y#k|szQB0j~N+s(R3a33;-o~?& zx$6GFu~pFwbL1$3XvYzu$h$H`a>;(4!-cZL!um=lw~c%Le9hYyT=PFmjfy6DRqMbX zorPUX{Ba|T17+692;0_%HKSH^6munMbK4QC8F@LvG2TyfC}Sb8laukaA%C>kJZlF4 z;mWehR+nfmnH3YDizbFah-_b(#K}Pr#Z(dDYQ;Wbw7slmHB4VdoGf5$CZe={V*V`N zuSy^_Mwk}r*yoAh^wA+0bs__a$d*w_n1GtPM2<^IoSv9&v~v6T=y4 zs)D|fJ?wV$@m9PwV5{c+$w@jp8;4KWgn|M&ss@ z7_t9ba_Qx5WPkK5YOD?A;y=o_#%lj3IUljlgliG^2cu(BbCU@MsV2e-k7h)|{UEdO zE5f`FXY`F;k8;!BA>9>d;{XfJ*8)SQ>>w^KX>oQY#uxcRMqh>)*JvjS)t;PoRqU-r zHIYsFS7|Fz?>i3i%x|lZf7da0Z8llEc4tX9Cw}4ZbeeAcT;z>UG1u%!B}rqz<;oeU zz%|s$Yd0<;p^?AHkKTnq8gn{IkDGw4{6AZFI&((*A7mfCCA&x#A4r1qHR zJaKx2o{@ce+&Q1^=ep^b^TXhN{vQP`*1w=roCca&u{nxB?ua`Dnp*W<}^wdE@YAF0gX zRpZ=M5nYhdO+K>p7Bt6?LP9tC%PqePL?ve8IZ7Z;)Q&Gz>zCNtz%8(|={rlvM(|UX zIKASuMdI|@CSlKBgm?HX)gMLjdda}X?%fcyNosJeSilw|(eq=G%_i~q4X-rz3becH zE~_cMrq5AB9{(vR3PRD75G1C*|Fm+UgVigPe@GSPB=opf+S?xmJ`+b^d=5_lWs5{w zZIl`qIRO>rAfc+f?YXgG35Y&H30{(15b-Dm5_+qn)-BdCw7%L?^kklI7Cfs)51aSx zhO86Be4CtGu>s%@Wr}jLZ%8d?c7ZuyZf-Suiuwru^mh4nmSC;4)eGK#3_s;H z#vWwsCy1-U3$ia>=NE^~B92{GTS^YUo{-J{YldM>paiiUFbk15V8d&f6c`bt;W;|0 zyq-Nek~GD>dsz8ic64KybCMXXW^LAmS7ePPZ6f5w)jF zR-qmEJ5p@{n-Py@Fmr~k9sTm(zbwu;F0-H4>}J~%af;syHJJzy#jp`giR@r~?A)1$ z#OD3O^(@QH;pTVUQ>?L3#c}yOEJjsY4^RT$niOUi7Hou9{ru*?K3nQR?bohkO7Ia) z^tAD%w%poUTD*Zo49PNJjW0=P?MjKl?A)YiDo)qi+nXpNTzvgzClA?VnHMMX5uIw) zcOM2;MTwnOH7&u`-SW0<<*Z$^JmYdR*pkj3+xpI)Bk0M5lSy)R(>dbv z_wGHUDS2cCnijW2?o+J(JI%OCs+$rK4nmza=?M`n=;i0@d~IH&qHIT_%9TJ@#{Ww7 zo+S#e)zX#q4GpJfR(>Q?sJc7~RoG{BI~Xz&{LnW*4lHDtfpVGsP7|<>4twonm5MVW z+k7=NEG_v9%j&k@)Tf^#&y~?jnCF2m<_)uFX2|hfVv4~6O!{5vUuBqhSCOZb!&oFo z<7Gjcw0s2=Mu-HrnA0K7PuTJT)pV3ddTltRuBeLDmf*I@4gV+h+Z`{JW?vbSBy~H( zqvpa(>83d%;&qCAf))))TC&I?r>P~yFN<~CmNEf4*r*NCMbSJvp#`hNBnX<^lSP}K zGp#6`BRTR8v90@M98=WpemMwG1ZBLYgHAP)U(r**k?M#x#G3i?x%Uc5Ng^V_8k3lk z64gg75H12l65JW7T5V__4uN%jIWIf=^z1B8vpCFSO>_+bOV|Y<>1<&oDL~`~q;NDe zG`TxB-N*dO*oa7s%q4D|iMpLWr+*kV8fRSiA$*D4SMo!yF$jH6kH_Af_1jvr8<98> z1tX^|!0Rfor5Af=g1-4Rv;$drA0_$*Zj|m^Ge5^~MBx#dhJWBhE z2TYr$5dCeZs`S~br_)$~lToD8sQ3xq8i2+CXwO0bA94e%$TUk-w)zkeW|8_<3H{!yme< z1%?U}SBMxkZIaW>mSRCenf>spgR8P&TISF{QB{Lhg9wW^*o4ho{|J?x4idXGe&RAN zwK6fXhW)lJx;!aKf#T3i3^izktFa$jbZpv$XBRioZa6Ky$n+OD8RHQ-;WAvT`x>}e zKuE0h(fy5C4%cRe3{iT3(UkcSr3C0q0A^D2eC{e>v(!|yf#X(~>{#X8Bvk%~@Rs=BC8fo`s=oi1gj z&H;SeJUl%?cnG??fdv`d4ZR3zdAnE22i-3yC@_Ayod?hdHp|K4@)_{Likn*)_)Hy~mFRh-X0tOXMT1FViuG#>&K z&mS3)+aEwQ3Pec6h#@;4QsqlpTCNTnrY0>oP4V9YQT{VE3wW?+TIVvB9PIAavDiP} z-@G#P_yJyftHlWJkShhy?1PnK|NBi!l@fK%B!kbDjLU>gb|ylXtV@I=^#NwYAw)E# zC?xgY=x%0X(q$okS1zo+yScr4XJR17!@H#=?7aPV^;FL6qJ{|OP^}r`L|ve(Tdf5V zMOUxJ9zUNc8C5O#yz@)07SVj+p04@A$<;93y&y#r77>R&A%;uB`4w&G@Oar_*=t6= zu_aC$vt(qmGrj{MbC;UhjKwSWO|ZQPTA!U-Wz@3l8^QPp-WXaP2S1(HvnV<%qOsb{ zkJ@GVhZhm<--we#H04oak(+wuI#R#6ISf&t6OCV%$2%j%nTBx=YHUm4HKVH%%>qjb z6f*gbjGP*hh?B%S&><#ct*HPDY_>-^R`Sx;usM>g(a$e&88NHBgy~n{<5iF~5OFtO zI)34_k~6jEJC9$@K@zdO7|xBUty_Az*9%CQ{_SVCMrlIXOi7 zMLB$}&*?_&vrsYi?a|TE`>hm>Fu-)(7$kM}2P~K4yT|AF0n^43_=63p={non7>MoY z=rEX1HNB2^4mbxVYRrODu}GD{0LUPAa-K5TQ7{3cb4%ooaqlhit4=5^@W7nXen}1v zq9`GT{VbFReAq950s-h#u>)qDncVQ)L#__XtBx%z-RED9$5$P_guSGpyLL^tyDCAx zfN_XNuLyKn8tE*0>Q%GgbpE}2+)6io4`kASjJaEy?UW_t?Ia^3Q=rQKc)bZWs?nZ( zv)_Xc0n38--QC^J0sH#KH={aWbNJ`V$^-b~SPWWSfOPHo?}2(6JO_CnwlnE-eavPR zsxSrqe0*i#L$Tx__mZ01>{G_|42PJlO$j$GV@XOBPqU)9j(tejRJzEAu& z#mNnL9h*!1^HAncZb6BXHycsE>4^Wd1`?H0LO#x@LYZx(q~$Zmiy3P>N2)B zyFXmc3njS!kTRAOUgFmj*vMZGqGPn=gkwk+M}zKD`adB*Yp6$GJ&G2;ZM)bXB6B|| zj^@Hs7OzOloc=oS$eTNKo3~eLF1;x<*PP~BlA>v&wkpB+Vc*gFe#e=@|MA8e&L4z$ zez5xA;p>|pcdcVvDrd{xIX2z;M$DeI2*s1?1V@IOR54$ZvawfzzoQ;qT?DssDkO%Z2$#t4zFX@j{`dx`Fb3_O#R~5IbZNE7ahC+ zegvF6m?o3-VD^Mn{4^paknljE*T6ag0d^EdfxU;&3f z`X+MRdEPh&pokPiM7mwRp1j#W>k|Ola#PRSz`88F2LJ~@|8OD*soo!zoqQ%3HrgT} zAUyvcfFhJKV(||^u%4IW-@SV$&3C_gbNr=g`Q^C-_D=_)S=}gU+Rs01>6fD2J9HU+ zj0#nmlrs4bXU5*@-$YsmUQ+fTPIEKUP!FPoi#OvD4Zt;AOa4mOY6~Ho8^1HhRH`U^ zjb?!@iK8QCTbxwuo`j=!5aLJayR{WkV{oyUC5|1UrW4qDn;JXy;Wlv@8CgLU8qV~S zbGJ2k)UqSZ(DkN;t!tz6T;vUlUom@wj8gAlQ}FWS#b38@^@_pE zM4U)$fG0ir6zH)TO(}GGHr9PUT=eu|w8GI1KQqo zwX@G1gsE45q0-Pyh8@YKQGWQ77}T-V45k~kd-H{|At!?oeBq9Ai*d7dN7kxYDKW=( ziV_Mlmq<}frtGd6hcc41D4lHUCeuHucO6$g!y*b{-wW#S)?(6&92=qCpV0qtxBKYl z0+?yuQ%Fnn(@rCL@ETH6?l53_pO%R{*dlPrSBMvU9@~NdWZKb@(`4K69Y`*LPKO~? z9=fkc*8uzgp0N^H+>9^#t~z(#iQFwt#ceNmt^gW|m$x@S-1t8LCEI+BH`xw&aPIGV zp6qg6km#^g!})5*)v(L^^vN(&#C&J{HE@Bu{`n!Z<0dfV8W*2>d>1ZW;5tdd zFk#i8W8gY4-~F%YZNRR=b4)%tsa^j8gq)Y)(~ZgI20h*wUw?n4EVxTM?+J&kR-lS4 z{tGB5!J!2?q>Q&WUoa3~K4#fh-we4wo>o@X_ySI{(T6R9NdcTM;P-$Y#sx8t(PAMq zoPR@I@{?!YfF_KHm{>i2pAuli?2TCJ8LvMC+zBZ=$m>y}`+V9e$`%LFlJ*`*W5$k$|J z(f@Y{lxm}1wSX?8X|FZ!N!%u;TFatS}X%|ZJ^iW>6r52ffc}J zCV1vS&PCk68^@(`cQ(& zD=6rvHQCKp#=dgz zn#Z{t!L;EnE-vt6rHa^j(^S<*HJP+v+GUElNmDzZavc*;p~VGJ4e+J3t1vJzy}Z1v zI5PnR=<@1n-mb|*@Szt0%O{r)+zAG3I&A45U#rTej%~e0gIlQHlre3CqK1m{dx2Q~ zHt1%o)Eqo6nTp%-e5>q*4(AH6rr#Sm?vrf*;`YqJvK&hz2OKe={nYLDzX2k2E1*CF z*USTmt{}SW7<$fvj96*v-4s{*y0iZ#DL{wb0?nNJpF|lDly?AP2|%xG3v{2862QFR zx8MG1R&foIkNf{b9{yu2%P|Z7?{BISvhWmo-)9bOrc|JQcAt9JvSZ}83VyBLcUr8M zV&nlx&>`1ny)h{uoC8H8`_?`e*yyS@uqTwca5%3AqKCM&I0EJy@G(~am?-4B8?DAe zrXgL|+-k<5-6#;AAmKSzu|hq6iW>-f9qtvTArYQ9Y>7L4{e9=@!_>PSR)Ot&$A{D- z2RD1lBcnR%MCFN~sZKtlZfBQ#i%x55#=Eq5n)gMXjcSMN)6h7JhmTB}^iB)mV<*zsR-C4AEaMDX6IVRgIb9?Liw2z76l^ibuBpe;45H;Cwc=$E=MI zg6q-y?YQSaXv^_I-x|Q;eFsSefBdsGep*_p2{_ybYol`BYv&5wVUw=elKo1nwM0() zm(w=JeW!P*rP+eNp(O?z-?gxwj@F5}tQ9kbX;QMHaCcK2+dkfOE7BooU(H(|BaV+B zG+nMc>b5_;seqBfHkXt;g0zEH4by`SA@{t)x&ho7Ft;`be&rHP!7tK=--)Gu02SqP zu$!&YS#AyhaRw8dOixc+7$sn}o=*zZl$Dj$)EtBA`3Z>Kk3Osd?KLtI60gHTt-~Vc zK*++ zODK}ui)b(IPM>rY3TzodkSSUn7VFp7*SE&fgFuxf0+RXeyOm=$gVqypu8AoqjGF9H z#YWj*0-FR5^J-1zHiPQ2vZ1SotFGe!AVyrgTfOQA1wZ%zmzu7BXrcKnEiD}#`tpl_ z9&Q3;K0xob7Dvnl@jtXTzMF6kYyiSxEG(?&ul;!CuFK#A)UiA5`_-ouIUWHM8<-uB$d_S! z=5R^so3^PyWt9DW;~sty;T*SpYS-6^Bcgzy87@x6YEf*7KO#&06Jo-c>|$uUgoF8M zk&J4GWhOE7t7brwMVa7n$5R2Y4mkKRMEO?BUR?&dIq-^4>9S zkQgr(j)E#baqp@BRNi&XcLJP1>3@tOl~$a@a4ouw3x(^Pt9vu;hD%2{!vf!<^nMkW zdErJ&AqTnq_!bKX1YL@sYp?aQc?E3~M=V$-Q@`qW7GAz~YS@e~(mo(!B`bqgErOMO zX?%=VXh3Xv2WaPeceAfwMks_I9aRJ1DM@eVRVRI-Y~kGd8_juU_YelBrTVXcR_6Zc z-M`ky+x;i=ffXp}o4}9e6j-Q=ZbyeXXTCxX`PAW)2Au2v{&fj(a=I;;Mm}tRGk$mX zt4fCa!^H)$#^{#6$_}k4a{U*Cz-;vx{B!IE7#I^@gmml;8SrOzF8~t07Idq+yWdkoL3b)u$EYG@)Pj@p;javah94lt zn~yrdDPFzsdFN8;z}dvnp=e*5* zht0UVQv}q5V=V!cgg{CinTmcxB`?6!ptX&hpom(|5zD5zAqTA>;b|qKA+cZhU zTuzT_+6YO(0i3o#K%aK*T&E{rS!|ekyf86-qUe|s0AIx?fq(oWm58%36NsXQgH%2T zgs_!E@ARK*B@&{1YngHFf?R-1lDG7y@?W-h-WCSW-mLeD4C9&Wzkx)!8}#=K z<^w?c&IqKw{_jO>XNvvb2W>>tZ=c^hlVF1pFd5Rv-gaH6H$fk2o^ERZ!F)crU9C>! zVi~Z(uh+wX56kL@uTSI_ z#(Dn}g?@5hol$PEf#T7yAwtbZY)gYdi}R=!i;@Hh+^GR)8hN)VmW!NmOAQ;`7(ta= zZgkl%PKE(57~F5?#*bSv?*D#mg20Vt9z&!<2mdnPyyKshyU?u0Y^yDfDAOcnuB->j zJ_wMfRjs#`V+l|<2R5PI=%>KF@}l?L{0l)-D?K^O>S18Vpz81+6@nc2wk`et+&j;Y*2O^Cu+Bm&7f)Qz!n;c_wVr=eT?y9v%jrm(wB; zg9~HXd_ih=>SG{@;+kAH0Lvigff! zS${ql6{xm?ruK7w@B*FCWzumOpcT(6wW^vO7L(*lz(>$2=b-d5fC9E~=Yu5R_#sgX zg6~@b_(}SUe8snK-*R&)&Zl0Z6sP341G+wA#dmg1Cg1DzwN;lsj0%svU*}V#&Wuox zUPS2><M z`Fz7DniY&qLINGw-H;yWBYMK0x0sINz<6O;btCdP65rsRce;*60q;W)Wk;+c{Qhs7 z#(V$kLJ!RH@Si92P8C6;#d;-svOz&XMDnl^Ec=A5o2JlObIRW$d|#`rObXw9|L_u5 z9qBC$8UV^QGVfofcZV(0=Sj3JYubHq2U9di<2gL`fUgfgq01%dM%#CfAoTRpz6nE) z<1zvRfK)Ub!;68p6#ge-;0%QRrp}&-$>29PsFF6ZLn10NdrYusV{Oo6Y1g2ma3AN8 zVHGf2Y7{_ovtJudq`djvb+r<7ktoyM{14PdTkRC)29;l8{|S0A*)&p~IRe95yd#?AST z4pS;9vMMxFG?6|U&Oh`~KANzUKleev*|W5<0bPi2Q=jjs^!~pQC~KAEyH9OQOnT3S zmz%*2eQLm#K4B#zEnUCJV$OmR3cv`L;C;{HFqdXaubQ<>j1v?R1oX*EE@ct(tp;Wg zNZA8!FF9swi*v3((dEz-=ds&FcMX{T5JcSpw7#7qtf->e$r- zF!0b~N<9uZ_B;MCJB*5kQ43!#) zeMygKsyQ{T`Ke!MuB(all|`(wCfC~4gWF{b*27Lca}Pnmj8N?lH*b|UImZE4Su0kP zf<8Z-czU+kErk`~f@wLVX`>lX6vVL6zHw#7{r2Y<`T5_9Wz%7ZFIknh!yay#-1gyp zCT8f#SFR~Hvf~I+r)lWC9YMBGCoB3NA9*EprOm2SbK&??veehH`Ecr~P;UF%mm)9U z0|y~*Z_l9hSYv)L;K365Q>#lQLh)gwzuq}$X9CbD_jbz)WpQcdJ;^1~FkY+6XOD{y zT6Mp9JQqT4bRZ1VWU>^<3@Mlx@c_MfnYL1^$*j!%(Q07+yEgtrI^-I~Z~fAj8!6ky z+PSYhzpcN+qc>>cITTW;k*#0@s$81ma(r-Cv}7R^Mwcu%*|zhcdY=T0aq0R5($z_{ z>Nfe~h;YKy_&=Qe0WhakaFZcZ_?R?7S3o6P1Ax%rzMER0GWJ@ud)^7oCunLc zH9zcw8J>5~-3qX4bQo)H;7!q%59j1R1YS6IKlR!sm4lf~wPfTMUe>SU{VGJDD{0#P z#fp!X=4i=DJSfSQj!>@7D98A*K5q(3<>&xDSdtTkS*#XjzumZAu++2mZ=>w(^)i5i zSrlhV#q(b;$%=j`$H8`hUOk2e3yLzu&>$|oT_mxk16Y0%g zp4t>xkaI1Ipb`#R`Mp&p>|~vmdZze-&1JNWr3nVsv;mWua<`N0qd#QC_Fcb}4C*ir z@{69no2ichiYknD!49;hdl?a51LL*kSH`|_%$1s>yNVGXlN}mmF8^xgsNJ=RN!Hak zAw*9|2p`dMjd#cy4ixa1&-M1c&XA=Qez9D4z#f;tzcGA1+3~y0+W7h;5>rMK%gbh! z-*Tha#?%iy#~o*WYi|n86W;OWO)&%OpOl|_OZ(M#rxSbteu?3Jm9<>Y2Vn=PX{xr# zl4QF{j7gGyEw910Me%e|DO()28Gl|LXNioceqVV*k%j^fH+RZ}RUbof#2vO6 z9=?VN)1-KLyvo}qHr20$R&iNw`(6kwX*oqf+Jb3^s>|Ea599hR8rj|#LivXCk>vle zK>|sO8#%p(rSH*2nJOANYw;07Os@^*4M9f^+=~v&Y=p6(B?I$tcEGxMUL1HHlz}J1aftW3>13NZpMW8o z4@hO{l8I9xLO*&on~K$&f0l`WP*pdG)yN& z>rILIGP~E0I1V~9a(3%DM(VU-kJ6wV#?5J<8Q)b@=67W9~`}tcmOBF0TKDEF}7PS?>NZ?J-{ z`#g;wmkM7je}#7V{s?u%O;%J=#<=`X3d(t`e&d2u- zn^(Nt?9d{f*^ytH#OW83zVmZ)UxZ-!=WAE#-TOB|XQ6zg@pT`}g$`Sb#yS0(#dN#! zPhsc7|04c`DyDJpDXaDisrU}n&v%}kryI39Z7wAyz8Dz3ImJ3=QPBZ)&oQlBd%vUN zR6vckLhEvZJjK^#-bd9t<|JMb=ZaK6}sCNka%f{=7vzeTs|HiPY_|r zL9dMW=9^CUSt>&eNB|kKy@x#w8Ml7^1g;71nt-L~aTK+~MWj z_X8i@=e8%Z*vSdlfd?*nm%b~jT|aF8xV63YUq^>&?}G{P5@2Ef{cF>(*a^W-Z?WTr}2i!V4j?q zm@!rU8H~fFz}Nx|d_jnwpRnQrF%}b3p3(85*0D{OoYQWphEOwY4`7t?!p4R&%M$3Zd*;_pDhny_4Dr_?@Hg+~w)xGJW-s_q3q{KyJqc z&I04ERu)mv9w!8h-xHDKC-KKm*XVi2Pk&|BwC!J?WCg<`(#&fKC_xw^vZPKHg~T8- z?4`U)@9uk_myVw1?07IxsZKX3-}|l^Q@lGX=R0mhzjZ5*r~gpTZr1Da_G2Iw-&g+V z?9LB@+2hA%8f(PbjXX$~$>Hf>DCVLuRaw(M=6l6Pk4$A~&{g z+@4r2O>GwEBO@VMFjEg>IT;H!ZZ(specm7feSEOs6b;Ja`81Z*)7_@2=UFt0`;WQSArHy(oH;ok*! z90%O;xSrc5mdBf|GZ76V!rr&}x?dC---LlaMfYj;+PxJRnW>NZcIA$3VS-sZ=p1dz zC81DdO>6`D`t#tmk8eN} zm+dkN0#n-~Fs%Ze;YXNrfIU$}a&j`^nr$2H-R&*QmjN*9sRtRtb7=#rFb9Y6(azo7 zUC^w313bPfwX47?&a54)ZY_9*7xVS2KK=uEfw(w_U#)cpY%*#VA2S?Hw?VR=K4w{> zS%dqv$*9pLkwe5`4f&jww~1YzR!zpFJ+uZs%H-96Zw4_m{!c8%#7rSoP|Qy~i^zpVJoRr_0fq z;S(X157r#%e8JO9V=nV=4$Z|JQ0TO59k%1+znmU<1Al`pU?8{bKGju5HzxA5_4ewQ zU*7Ymvfvf9(q}t%#FSSIKkf2bum6bDbU*3y-On#9gkI%!r_)dmpqgY5EWlYh#AX%e zOQL;u-PU-r`)LHP)aNnxi-z;n1cQ=13#o4aMt>6E* zCX!K^`|&`#^hb`TeY2HUUt0T&OWMr!*4YN@TlC+rREtwm%Z z0>j=KLjW~@lWrg``E1zh0sfEz6n`>HS#t{BU=+2GPhM3C42hzl`|K#uHj5Nv#Wd*Q z00qYaZ1VyF1~aT0F3AYY=M3DWfAJ2iZO zMcq38;1_chAfogitsF2IdmcapT~--CZ&W1H*8NQ7tsIs1(#ygjC35uC>u@e*A|@jv zA|f8SB#@EW#<4Hapsm>PBYujYszp-h8xQ_Gw;l5%z- zUyCdef9Et}#mbIktB`3`kde>6F1=04cR!}TdX~ui_CcGIEkW>%N9bTX^Ce2Ml|V4u zw?TFVssaz4f~=1zs2@*~6xbF$cW3NM^jM^m@milA%Z&V{L(W@XpxeY7tctNlU>NNY z_KizM?+fPLj9aH2o{~_1lh!-zj(;)cM!&$Gx+rdDAA?LQmqeGlPZdX2&Jgo>^YKYo znDhF1L9}iD@i{o*RXy&tyVZ1EVcrLi6JCJ%#j2f;cE0yv^S?t7yQ?%y7i=-8=LBE& z;XOX~gK_6EK$Q3Ftae}t{>?91{nz}U{nds?KZ&XYfv6U~>R(?y!A_evs%Rzg>k{tZ zkb-qE%eDi{Rnjy{O`ENR#3sZI(V$G+XN91IW^S%HXc#07mk;VS<_%(t&bW6OxL&09 zbe^A8Rd=3lKg}%N{JxoQ2e+yV*94%``@PH8_a`V6;WEcA%eO)y=NN2{Tm zAac~Mfot=I@cmqw^VY51)$D195&+5>s`Uz_62r8~m z@$s-uw-S#(uewiOR8^u;IpEi)vmBUc&;! zYhhvGuPJBLDuVS;*ixAfXi@-b_9^(Jzs@_k|Bt4#jEaK$-uBR4ia63B($d`^Al=dp z(jC$%EsZE0Qqno3G{Qr7r_zmd^Pc&w_y5VYkYVbaefHkxzOSu#_;z6P6i6kuy^1ni z2F&7wiNbO(s26KMW^vQzXz&Xh5D^b!78ZmO_~Y;aW{VCjha-Z=Lz^xDRICf)`0?Gh zZ{RQEVq@R$ycY!*HO?&GeZXj6*bH2R0Z*m$SsK82T5u;wiC%Uc00wz@fJ%164>qw4 zL>YpsuF@qRAZQz6^ocAfoIg-#*?Zqjog2GFs2V#Y8^>Q)O{T*6sIOaH134x$f#O?L9%TT zM_bgmqJK~5qqLnq>WZ5kP8OU`-`Ca!AkM*-aQiSTt3a8)|?Ly3?h2k ze$f>8P>kQztc0MTs90dVu)6B_TaC0&9Dn0ShTF!}6d^<&{bT=+P(o>vK?Ol}ettf- zjC_zU)boR@3MFv!uq!i!&1X<&ML+=0l#juk#5WcLn>VSN&GUA;U#h3cu0ZZlU0n_I zF9c$J8Me-9px|;7$p#`HucCR0T>L&~aN`9&;JO)}brU*k=O+Dn<1Nr4i;jy^R#XJ7 zW2H)P#aj!S=O7&bLp9`6Hu+-TY!g#ZP>_;h2d@F?0X&HXlW<$WCBu(QMXK0!P&{6P zoM#oR-2;pcXe|g9@OBH}D|?n}4sNzkd+>U|I-54ou9j< z(Eb-{)@=sUeQ^*-xN&|TS`cuz#ZdI?=B~Z{tbg^vrc&z|tDCK-A_;JJyg7y@9YzG%X?1{MHx5bMVj0l_+ydNnHf}Ar7rbZhUVE-5e*7PA%Qbl zFMG-fIsc}K#Z#`M_gT!MuF3UN=Ig1o@)|{tXKf?y>@&T};~c6_4Sjm{s>A_x*<*V# z>(05ThEhfxwQV)SK^m#RqI79Ou30+5w^i-<80)a>d#pOkcdSsM+StL(rD%2ngD8bP zeuVpM+_S??t*7k7@2rE%^P6wz0+WM=U)s7^F!XDNp!8@{=!~izbe)s^K_BgB`({_y zzv)6$vuMTjgI>+qwA-m%W5l0^tbh=MXv*F_CqQi9+{VRb*5Er}TPG(ExP2I)iK^`MLjrU8p^>345Vm+?kI+dgF-xDw|BNpXL z43%t*?%Ta9ne>eF`H$kUP6k#eJeXgDArs6v&r`5fEi>bWjPfVz3>|L_=SqrxC!Ujf zn3W4nhz6c=FOQ@ms~BekU6A%i{~(g`whDGMT^dzik8dIR5FEWR5bByuUzZ` zqs%>#k+wSUAmuQs0lub#GJLxQ*+mnMJo@?-v?YPgDv1W<`zyxvu zT1(EdVlZf6(k9{r(WHD;luvpQhNp>spKeQR-d99Ueco6zQOr71S;9VDbmZIylZR_? z=EUd3eys_PCkqsO_35yd?Kxyw1ev~E7!4+E>*C^KXGh3pi~xmhdUB{1D~3*KZb^-% znEzFQ6q1#sR~y-)V$gO;T`)<7sPf-$7@dtB|akA(USe5 zvmJ(}x+q3gKazD7pQ&q+spUyDM=pu{eqlS~MeJ*vWRD|maY_vNu zMEjpbuPH-LGVlY}rREy#6&QS)l#@lg%{5@D;JQ&pw|%0CUgiV z>xzlb9W6&w7b@i4`Ahj;E9L*}u3Yr_64MnKFUoZrMwnhuk-a6OrKRREcodm2cIp1b z1Z{J3iweUMHL`QwYpOv1%(kcgA&doMU6xChM|>qHyd$MS-*f!sh=A?--Q&OOhIaeL zGR$cUv&MK=DqXpp)J7QeZR)lRJw-%Gn$WJi7>9nCYW%Ys3C}0L{sAWc`rF;e=PMV@ znGNrUJKl_s1tr9a4Whq}h#w6Sw)d4yV|WAYc4nu5Wd@_p_Y75uQxe|FDVOWSRR8I* z`aY(_Cu$a+*(&J4Ga;GR&Ik7KH49Dt>xdjT>KN3JKwD_g{;NEp_lRkc*mQyNW5F<3 zfGwLgbdRQ#`_tsKy$qKgg%e4PT@tNz)h48xgYEU@1Uisl%d!jcWh5wlLaLyd<2)c> zpF{G=%=iC<;Q?g|-KR%l#qK2<;!Q(9P{T(~j%QV?3FOYKp-ev$Ln0!a$%}}(r2oi8 zu7IqjM*N-X6$=LX`#}C()(!h8mJ~_++uNu>9yttaNHDt$NiU5WqVsb@{}EZKI*P6e zHVf2`l+0irtbhNgm=5;V5azU3k-fIas1rWt4nM+f?T_Y^Cc2)xrhUuwAD*!@mN4Zo zm@1;eWwJmZK0bt|b77&y-KeYU`VCDWLSrJ=Sk!Y<$t*NltIW&*o!Y=Kh(bDeu1I!4 zhKA{lA>rs16h2V1q-tq)VJ_|Yr3hq{M!&w^WkCDg4(y?xf(no5gj zxjdzSl#LK!0=+{Fii9s)Bk{@CcRQ0PE-uay3ldL@59E41x5MFMiR505L57*D{!=qhlE1vas;QW7>sB?|g1_R6zJ{?0_AA;1t7_w165fB;ua#Mr ztslcj1#`HN|5SH> zr(oIzN>V%XA<&6Y|C!U1tEbU(C1fWNuoLV=Ez`GBT((5(=CJ-)EnU_e3*n)({mGlM z#OLTqc}i0<`QHmki)XW(O1TP`stGtEiz0${Tug#0Y!Y4O%1ivU9)>f~rXEReBVccR zmt38AIcbh8_G?pfk&{ySVcr$v}PuuT^Yt%3*W| zZFfS-1-Lw5VhHg;JB+GzZz6W>-r*$)r!P{5A5bb@Zui(rRbhQUv@R~LG|4@-LK7yaMrP`ybUP5 zXMJqRDQbrAcyg~;?HCPL5c;TH$A|2e%|}row_Dl1;n7^3Ok6af9hoT-jtKk){Dv;g zzcQhXC|csg=qU8@h=DA*?ZGO>VoYLz*ct-b^uUTn=tce&HPP_Giu2>l$PS+s&;F>8 z;7*H5WO`IZ_Gs|)Qmu_LM_auJf_b1 zLQqpF1$*3lhq%rP|68h~fgmM|y~$OIex`z19ISSUj@Qk`$muqzfPjtkagEQRHd)C| z0t&_m%N&e|XLx^lHav#ijgMJeQm)54w;D$u%&vmkC3emi8Cf&iX%Qx6M8Cf=09|1>8zJDl(ygO!@(kL-(=VdD-i{K44 zjf(xrxi@ioc(JuiFh!1L`0+ax-k*7N%v4PC;{M;B7aDlSg$^wf)j0ZiOSt0R*|THt z81w5vMr#nq6%d5vCx&z_S|yZ(R)3%vdw>7Ch0Hc;H#yWby%pVN`lvuXeIQ z=b~J0swH{uo@Z%f#0*lgJ`aI?P^X7^{j8Re_S*vnj{{CWqlx-1eti13;D=_& zUbE;gNbbC*J-_;zfn`!rhX?(&!Ylz!&XO4AzcaxBqvk!IW8Zz%?Y1-Y9U2X81rAAd zo337DMrqi;DE$!v)M?blAQz^wBGxuQ9`QeZ6bf)Azw81?eCr;^Y8O}q&is& z|G#v+%ZXehxY{-Scj9@vXMWi-J3LrqAFoHrU4K?zZVaP^oGTFzbL~GDY-R2p#D{Pq zt+65-W4lZK@*_hGlR|~2R_9jl2n=&9R<&l?veDJZyn+OWd}Q4-cYQYRb}Uld3d>z_ zaLZO2(jjlXu$-zR5s#BzS=8OC-nX`Nc)(pohD^5dGz&jgum^E8xwLwXzBZ{I^&8H5 zJ~3@w;)bXem`jepT;Qbb9TuH0ZLF4B;yqo?TYu#m_wc8!vnt?j*+cKOF4q+AN*NNh z42oK%R4!uDPl)k{)8F-zB)3NaZhy0ivZCe3CuKjz8Pw+$9T{B?EbqVTE+oWkn9;y^ z-(5Z@TlhC7z(C#4ZWzN+|1qj_dU*FeF#YP!W@cn?={9oECCf;Y%nJ4R=0E6JZrm)v&}Z(ybt63n)g#v3-y(I|B%Pm7zjz> zB{C6P1-rc5?+>Eam49x|(&RnLiCmM`)!X*HV(|~4xG9mc>}o1M6bFVRWA~9OoI3h{ z&YUK^r|>-)|1(R*ekeus134Tyq5tw@XPBi`hk4i35a+Kw)+^4^!|#Xnj1_lg-Po2c zlNwYJ$X96*c15AG1e-3KdmAZO?z0q0b=;mOou#_fubbK1mJHPA$E$>W?9~UjX}>eJ zrwZKirwn-sbvvTHxa8L_O}8cQzx^`pMjiFV?JM?uET`m%_}P zLfif{iMkm_r(ZitZLQmHS~|H92R!Eg(gGqP{v$|z*;1=kv(LZJbazNpfAgyEGHYFA z*coE^!Rk^V6U318ngrCWgfL0@sAjsfwryNI3jh1+$By{%A+kgQj!cnua5rvVaEMd$ z5)-M^aBe=9*G~cXr*w zuP}^O{m)&&VcvTlz4_|xS9*hjT(;-lx6>Cj4FS`7?JX}Gvj4jbN9c|?y!~$HR^H~P z(UoM2d6S@cwyi7!u3QStR1i5@59O&yzXcIUtG%7l{n;o$_GVYxJuiOTNnO~e?S5$Ci>>sUu;Me-s2>VkWEO4F6~l38@Ae}Z z(1gsAB*of}>I);L=>B*Q!E>z{?sbQmbD6&oGZ8rjJ-1xwzkiV(XGjd?D&x!uAHj@>1)3oR8Us4F&{ExDdwt9F;{IJ1 zSDQ}UB!Arelw@`}9UZGX_U9>R6A5Zq{f>5?L|-@h!pMsiL-O)aM>cviL{3o5W|}F9 zXbbdN(n=IZJgnB{xtlWX4%Wsr;yHQz#)rH(d&D|34zm^=w3Pa6c-#tUmNIXQFXF z0K`5c84?$%PaI8#dncO{SV8 zepY1e>M?%t)@{aRlskPsOjO)_Y(BygeNBls0v$iK!n*M@;eLU@9EdM*^b>wV(h`l>_b=7chA6}@3lR_yYFRAAB z7F{uz)CJw)S?##Se^*+AF$`qAdGdXI(~8+Gw;u*C{g+!-FBQn9z4;lx@hI?oxpCqO z(eqo#_BS6(wbP)WAR&{t^?Hw-MjIWLPxb0KZFcLnRs+`iL+2B5XoeOqWnXBlkY*>vZuo zZ~MdW@uSm86Ggcf!hd2;@2}mren&^j(5ej-*B><<@<=z|pYZ{>P#Y+NT~q$8IM;IIjq}g-n>40&R&o1a*O*TCLZ0>KT>>{V8+&;@b*#B1Zz!ddD_0w$tJXw2vyr%hmnnaHw} zutWGb&IUh`8sjgoEF8@{Sr1$z8*`;}Vx;79r{Hgn*%`ZGC(Ds!VpgFN=)WAM)qg2b z|0>Q0l8y}xjl>CP-MvLJs?~1$r&yp*kcAYUCLPDN#uSgpIq*S&cfE!D znkj=UPGE{6QYGqkkGmu2ALP^zL#w0z8p}ZNOEf){r2}CjHfcCp|Li}L@HT5JhOJOz z##ac^GT8m`ztAnPtzINRoMO<3aNj^W$kl>&w@Q4DRzCGp8a@$k$Tj^Hy6%>FWusdY zjGKcJv4*9B6&Z{v%NLK+O~gYc2}g!W#zUDr(H{hA{I`F?57d>c!-xVf=tzV7Xrf$=AL?6wvgu&1+M=- z!c7&rmY5t{=Un3gKY9)>z4iLkE!vSqH{ZeA5##C|zWFUPs4EvE6k+`hk)OdWbs2m;R-yYBQw z`?X<7PvYxUd}K}(HVLsnu8`d>zJqf0&}kC#>&KWu!*<@4`vo>72`Xdch_yy1l>Et0 zGYjQmIg;kkuaxC>x04=B6X{P^;ry!o|1yRHOS1CLEBcgDbH^_Y(}%GiE&PLzhy=b% zn17Kn9~0_${Rbnk2R+gbsg9X4Ge^;_zVB_rl_@hT3PSYd0Rbh_OJQ}VFvQujm#Ky_ z1_;tRR+2p8y5--5J#}f#2j}F2gL3jXYW`GwoXa|mmPzIy?mU68@4>?M8z-)4Fwhao z6-mTP4!wksKI*-Y&H=*{Cnnr{n>pKIFT}i$(x?WomkZ0cMmS6mq{pxkO{r)&dDC#J z(BI2J>UPJd=EuHN>}DbBw5_fDJ0N#tAnXZb8?sL1qUz2sG5BOqy8T-dkb)|NDDe{(jLAx%wIMOcvJcbHQ@S7{5i=`vyNfYea@|8jlyAft zQ6%vEaScb1rNVN^o9{pAtMKq5LkbX(CWATuL|s(q<2CeGILbKdWsmbLg&=qahF2)9 zbzm|us1R!_Q`=8}i^HX4rWBt*Y@)$@h{sN94b(u`h{#CdiD#_RkV+~EsqPL}2{Vzlz~xPdxtYGuQcl=#T}(x_nfI*T~v3r~i3A zQ-ccb3q$dGQ*kqkJH&T(#uDrihIEM$3Dk)&W(5CWGlY(%qYGICQ*`3ZIb-vq2A6GQ zaZBmQOiPPnalQ(vSf`BOi=Z^<)xOZSgXfT^Z zO@cJiiuqcB9Oe1f<6M$T`!jV6bPV%m{XXQk1!8OVn3<7dI#7X~L0j~oPgNX9oEx!$ zR1?&KUt}yfoP^#}6QYwft`L!XUZ~K%Z~I+X}#OyvdYHi=;qNg($(XIZ#^Sq(Anv@%M2nim$LlxdKp3(I7bkOf*Xe+zo&T0B9)@X$e-f?aU+Str1B8Y8rwe$GmIKfzrY0rid3Ys zSw~@}nODIPZ>;e{s&01AvE|KBFKYbo)3_|&bnlY7^Djmq_W}z`q?zg zrFEHcvD{MPZs++2g2;?l&M&3bBhpMl|jqjmy{8&071GG!!^ftvxjmDKR0+i z@Xb%zp60|NYSN;cXt|xab4ps=ql}?-u`VVP*1l{Lu2p%s?Z}XsEy^3CUR0Gt<}n}0 z&F3JRmn7`pLRFmI zVJ?UqI`ggW&q~GP+e1r_{>Qc+x8>pQ@Mel${noWmOAUT-M1Oxs277Jo4`LoQxpLUo z8l1Nl=L*79=4pjl`}_)ldX?M}34`057JE-p@9!;sGhW>vO3?ZEOUW^$bKwVS_EuqZ zlLqp|BJfSjlM+<_w@{7jEv)=;UvBq&Z)e)3T$@=*@dGfo^f58ZplkIEYgSKH{_P=n zR@)+cRrupA&rd~r4iJxK z8-!-Bc(t$6)v<*kRdMq3S5GEt1?mD@@a&(qLr9hSt|LsEHbl$!cOgS6VTKqYIy!?f zI+eJXc1qlL*)J>p_4E0uoXUr!-}UqYOgj}g1eb>Gf)+!1m)kS(MaJa|m;B~p=N>!K zMb!RJ3$QBBTwOr@pOt;|ihy{Qb%QZNzVSFwbI2#(DikliSj#%AFVd-@9G3g&oS{cU zsgvok#jYD4_XsR9`;A$cMFu49crO9PLv> zL+d}SaVeI6bvMwGK&-lnwaes-1$O1|@Nm8IZ^w6JJ4p{sGsNT(Ax`=G)~gp2Gz7Zn z)X#A1fO@iAv((WI8u=}YOAhOOuk*nz3nxH}#Tr1K2jm?B?Ch#=%b;Vyh` z_h(!I^uVZOzn^T_*`9wf`kaBnThBsik}y!w&FVS#!WSCZ`yY5YYQ?}{2N!z+&GMIZ zPba7N2y)Lg1cr|y*%Txsuah<_RCEPf=dUePf>kc%7G7$$(CjfjTqcC#nW4;- zFXU@FcYeHPLzh4or$Gl$9SJdfsN|0`a=U3A$C3v!ar}^SmJSg8oncXj$e&2R=c}it zFmBx<%;8(sThLi_Guyu&)wAQKMo-;TCPZu-l`bew?BW@O)Xgkd%{qq?%8{HdsN2YY zGqTus4VJca7~ymLb<$mz*-GXs8#XXvm9s76aEvZ`Rc!jD$)9&}>8#2`+n{^w@P|@hV`h1i~6f>^js;Qcyl|xSRx8JgqQxlFZOdfo^;y%zZz=bJ4yi z!=JdXOl@;J?>J!ln@iKdoqr-t+i^qU9M)&x_bs2jy}iW+QaqsQ<*a?tz$0%=DCJDb zn?4Mu?*MX%TO}hWV5D$xOxVD{A_AhvqE8}@FkMI&8+P}}jGm(~07E$Z`qvAWuSNZE zX~OOpz*r2|5j&!hjo1Mi3jfP_OSqbdMpkIl)xZPT6V1%bm^90-fuz~z{$F291yiVo zu604VED#+(+;58pEbkj?F%zZU{OhAw1rj45GOU2>2EQ2H1LtuTA3r}sfzVnY65Q7b zoLaHng$0SHQg7!Py_4^cK!uC7QC_|-Sku0t6 z7LTXje(SBJleTqTmQS3zg3-{(4mguaaKd9ivO5Jgq`U_+fH&+5he^;C89~;I?n}pnY~i3Jdi+ z>+vzGRFXPAu8q68tpYCI$i777Pm(lizg|0aM*Lc-m(OJTP&l!qoX{EBBMB2G2B_$s zd!>6#v`>fA$>hrd$l=#G8hl~k=667%Mu_nD&}(*I_^ZH;JYv= zQdU%G19%VFI{$+#A7#GXkl$ZkZeyXWUaj+8`n9W-ev|W?_CAEG*zx>NJ_8LS8@4^8 z&*)j&aN)L>D&cUp=+o_fC0r7G2{f}xqWc%OfX@Sr$E}&47K*hXMb$Mf>7?s>7Y z-yWQp@i*eN@9Gm!fO887crQ8kg_>AJBfOSP%&F9<)X38hQ*$h8gH(OkGNMkbFsl3p zb0E5tOH!_+fRcqA^ZfN*Yms@OmRzD?jZ=`L79ZI1V$j5uKxj6JY$EWw>!e5V$}XCDrj3 zIZ1x@f;-#rp>~hw;@H=6ledlfW$dBT&^X=OfQyL&pRZx4W3#+J6u=};SGTHhhC~#a zcfmT(+4I?Ye>P$eIfW2*q2oY*%ND4SkT6cA$C$39nj{{xi>H&n8Lt~R9HnASN#)s& zxX{PLx78GFbsDD@cj<1v$a`&;y6_gA2D|ef$K~fcZ33;*-{S;Sq-^=ypBR7gS;KTI zwxw=W!MK; zR{P`A*R%bAmWP+tUO;3U$ic>j-}eu=<8Og494NaT=BmWynud86GeFb<5#zs27s>Nw zpj`#Lm<7O^${gJm6eX0KGq7UtptwmHpP;DLv;5&*noW5c{!`xo$;!%-g$n+QO? zI;qI(?m$O2Zv$HaK)%t_6aLueQmTnGyvuHrfT9D8nRVvz*zJu(IG}<#CN^+dG%kchrZ{ZP}xL%w{a;rZ{Yg+dTnhDK+f8L6~+&|jaKkea{uc; z?)x(jk5@prI3oCT3Utt)+Fw{ct&0|xYhO)RtHwDy?sI((F!0*Em<>405?yUDUb(+& z-xn^*a@O@vCwM30wpUEZXuYdp9j2y|7TG`>b`Zif)ywmblx0ed$r)R8fSPNDWPF{q`04q z6Ol_E$n=qNH+8~e#3_NjS4Mmcm8Ono;zco!IN=_b-+6Ah=;?TzL+vmR;1s3q^Cj=P zRIBWTVlMiy2Q?sDrF|$2JE?OK_36Iyx#~RFondU|zNKsm zujn6c*lhzyxJ+C#`v;!F#Fo=8jZ8N`BHRlVXoUf!3L7@a36BAE6>yhWfkf8V_a2Zq z{3p{_&MQ1x_W@bYe9Hy~z}yXnF8yybBlX+;{ovs4PMaigLVIA;(O}GcJn~reczUod zIoJ+uQ-c{IX?q{_i8v>)Z=nIJ()oFveU)xGv3=b*Fi-X;z5yO{JD9Gc_hdStG$zvg zHvM;*z2ZOjCtOick((H2)_sx-@O6Fxjw#8!N@%9X4BGL{u(IB?$Bvs#BVigL63gxVFBxK~ zhCj6HYFcMX0V<}h9x577H%B^2w*laJFz5Ne~38Gsx5vO4DxA0Q`_{SHKo0iqbKYgM%8{i&- z#pwK_p75I_)!bo7uzd_(&ip z;J7X6V?NT~FlE(L`TFwk#%0TAGskfWU)bL_fq`Bw80(0xP0fX7~U z7m&wSTn69DF;Ge3BLlMaD`Mi?1+Dg5IQj9W=ean(@0M$}0YLTvW9Uodps#8IiP-$H zNrRhkJe3WMxuZmzd1n6t?f?R2Q4(_U`b2+UUww3TcJ^xT3!7Q@@&uvm(Qp$_;FNgi z7QOEV`UCvOJ&5yg3T2tk-A0n~9f3_(xg@#9UTq5 z=yIx^5q|_Jx9MiP|G&S=hHz(Z$5r?4KUpI|Uf=Crm~y2~^LmBi7cVNPY%qqJ7QX3* zn$(lBqW$r{_UVtUQQH;?lcs*vq=QvK(2uEVhTQ&Urx&#*99^ebn30q}r-^U$&riDJ z>|3nHl>7tmgcUaA|;9iMinma@Uhc^wsPy*+3A&YUwq zj+JoM&~F(B07KV#9rb^8E43XfC?-qM>(8bZBY3dXA91t8K4!%*J~Nic+q?is(wm<3 zxI3R=;3wne;P_hT4eXtjUca#+ugRhm zWxqI;id#VD^b=IA5_O3w%u;ryok=FCP=`7?N|1HhAf_|k^-#7fefQ(W!AIq%bKN|UWPDTKe!__VC@kc!gk$ZIC8}< zuo`QZ9|2C3MvN<09S;Ci*G$krx_b8qFTfLkfw_rez{J9WuVAcoa-nsrAzpWtHa!n< zOk^Ymb)woxI`8p2irXvoZs3@Kgo>+yi{qUZrlDj4$>VM>bvlhh)1Mc4X13 zKL8#x;r+5&k@H+U1HX%D6MWER2!i{h!A>lK8Mb=ENkM*nWFAvC++QS_2r+eSAw+^i za6a;4qw~5W*eGz%5}-FQ5q;U`EmKa+Z*LDUHT?_9%63pwg1YwyJ@pUZW}=b(I=-|5 zOW6vOyi{7lo)T!XeqHNzwF`FYUhxof0Vms70HEOjUrU9-ogGs7Jn{6Cq=^MWHE_L} zfwvCMWSPW#75W`6FOZ967VZJ|O>j!2mT4mm*)t-?w@}n43Li&ehxZ=}aaKolOACcF zTdz`GTsWS`fyj#%apkVGMFCQz09XAhGaQ z(-iMy8~vQ>n#g(7R5Ek&pbZP>q(NS+tkn85zOSD=TmslMn&m#ilMiCg8#_S!0%-4S z0Z^Uuy^N9W7SGwp|7vSLuN95ZUsXZ+va6HDvFjh*QH#m?VD{~XqL%-W2d$~DSN+#a zo>wu^4%OGsIHfszJA|bXQRB*P*1-&i`?_`Sp)-DCp=>SUb=~*Qq;fA{hxl(Fq6JJt zA^U~XQ{Eqv5@@S?bOqkw7LWlBX^LyT$>Er#i0iKNd$NFu7NrVJ8?(Gk3FYre4sTT0 z0|}}b*k=UkG?8ejmDa{kW6`wZgw3KyiFb;*X`KC6=vXrvjQ$Sz<}HbE5q)Il;p%uS0)?XivM)` zTko@|yXF&jUG}MzhZIxbH4Zty7KG1gT+R5JtH;~TTy&)+j4f_Ke(ZGMV3O6{b70=~Uf+?G$KA^7xYGNZ2 z=5iQ08JQoj8m-^up@F5r6}C^+mm4ssL3jNDZ@rrMA1f8{|%nI83#g`@pLO z@JW@kXE(oaN>>1PkhSHY1{`iI;KhQi2ewDnw*AVlCIH}M0O%1(Nh~7fYe2*o0Eoml z(@oo1cScVaMqh~D5|fd^U2UMeR)N5G8*lr~F!0Rvo7-stF%bN%!p|uXctD;j3VMqX z|8-S;yq*|FmY)DF5@o(f0zRq}d)BYIgQm)>?0uO|L(X)NcvUm75B-*fK~YsUAs~YS zG3;)u9Tq)dln;o=Sl^p9^msHZo$WK( zz6?+X&c_TzZT2G)Y=3(Wq0Kqa2)kBuMYbNw4<+$g&@X@nb<#Ge)OIMi>f&oTvNVrI z=BYAp@t#(iQ+&oGT8AZ~*s3IkU7EzyLRV+t)%yv2)kJ|x?v!}v;fo~@#KB?h_wS1l zFe2Yb#PXxtE^NMSYRNXgr45z=L)O#%mtpcDp!CD5aHo!jbaA1Y> zINP;?I^_Zw1D^ov-zK`6_z7$SKrbx#3R0tYBH01pVg8r3*%(wHq&u`;%mD5Hywu3hKA%ig)UIc;_$%8{%_Esr|WQ9cP4s#2$Ahou}vbrxk9{LDEECgzt6>nNBgyUw%@gf zEt=7lYle-*v4Qs9lVGbbMIa ze!PiqAV;vuQ>=w@LQtuxC(9@&aK&u#R)*wDqt*swteg8VoPBM<8GU!5 zHiw$I`m*!F2TVbQ{r-@e1GmEI!x=w!iKJO7b?md0Z{(s~MDI_tjIx}%FyJO^e{eLT zh~l}f<>(b6J_9$ZmT!G_&nzjz4mpi_v{Q$4X3Ys$(wi$qj z*VotE3x2P+w^y@5Ux(#8KxiDp`$I9(z-)W;KZ-dZewORCv!Dm-v!JcPVSUFzcdkuK zI+mH!i_7b1{*%Q0{iux`;hYw({`;ya!y9s&k#3B?n?yxK7N$Qm-qtVyI&Z*X`!YWA zEAULvb>p>Y_IJjJ8-Z^kStDQkgP|i~{-#DUeKoTOD`U#?dAxZs_&pqU0^rc2Z@&8$ zYF5~k3ytpceP$^CkQ62qt6KVj!pm(~Jx)IW0D;M!Ivm<>4$XR@R2fiq0%z;!H4s+5#jgZ@EeA^sUlJL5q=7vjX&IaJPxl&Q;>3o)3OMk#)mol7f>mv z{s{lZF103!s^dY$_V%=`DAF6;kvD2yaL)j};WI$F!tFi*-3@-z`9FYMw=a9_TLcle z?IyJT2)m@MZC`0J#g?J2lTF_c3$CNJZ^}_`i%9l5>0Eu-3Nl*#my{i5qCx*jCrj_02CmVlc_vm!50#0{f zh=UEI6Zc{FIQ=A$d&U~!fhB+eG6vC}gxA3rgmFjEc!c-6`$Qj|!7v0f=BOX)V6qn+ z+WqPkKLBEa%IOvi;yD3Lef?_FP#o1yWPq$pg=|))R_x~C5f>fpndJeRo;sH0+g@@E z44{%^O_uwH{27boI!LRK$imLhLVB;8wjxP@9yr@iwPklH%rUQ%ZrPGpphH)2hK7B z!~&+`#o@V7j!mL-g#k<>86JE<hkJyH-^z37N9sDRwyNgokH(3lr#bfr&tZzS^x!v+uJEb zFXI;Lxq++w`$%Xyw3F+L146Xk?Ym$E>ug!v96`K-Y9}q<^Iv8n7yYGe^|!rr*#a8@ z0d7*OcBMT6YyvFGe94iK16IXC8Q*bcU(h`*%YvF}ZbFzyFeB!BuaeL4-JRvAz~gmt zd-HZ%_v36XM65Zr*=AhplksPV(@@pg2F=duM=hiY45vg|5Z8>u3jF@t=?o{oxu`sD zS_{SkEQo0kS2fPFt}<%w|J!`JXZ`+bKeMkBeV-oLS}7_4LA{4i=4h%U**GO5I=JR{ zr5cv|z*%L>k*(uOK>PDpK~0-sjNGWusuIT67RHWpQP5nDZ-=`mo=rReg4NsG&=@Rw z*TtCw9UF3^8G>;g)2PeScG_l&cT7%8J9W&Ja*rnR7?hLMUd$SnIKE%bR`UKlyT*81 zn#>Ei=8l+luxB`a4S0QEEoiD)=PC332?4_u3_yb%AZ9Xu=Yz7eAF9qw#FRXc_(t9a zwgAXcfaVhMUs~)(=GH=Z-=u->YS8HM6of)38QHY%T^k6Q)Y|%&|EC4$gQNC$-E?`| z9SWvE_-$MZl?ZY*EY^U4&IX|IfGQYl2%Xycf?7{N!2poqD${B(_-uL#JSltyC0F`x zg59xc(36HU+#QlIbrL;-nb{&Yn6?EqrecC@hd;7 zkdKJKm)~}*Hw|yCM*fYkb+A?j<2Rqm9*?rJ+Dw(pG+_GuMV!*s6Om#J1+d?Av+e`Q zYAlN%Drbd8OQ&f;L7zDot9_X|>QB@rs6i&{#Xzqp*B!5!#%Xec`-D9aka<52Ii1%p zFa&m0anR?n1l067(S2}Fr&urnC^98?$1RRG3GeHzt8(7H_%V*U7NbiOBIt7|_E0cz z6>w5%ATp3F!?#dWnTF84yWd1hQAhOJ ztt)(bPowBzyzU<>JNc{hl>9n4(iI`YYYPnoqq^9EO*N5KKY$nVJ(?JlEBJS0=Zt#c ztfu_wSI#~Ue*LZ8g)tlS+v(D<9`953Vq3#rZK;jPFtqz#toT3f=m_3ODsJ8GBQFze zG+WQCNh=NY-8y!g%Z`KC@af5;mEx|pG?qoiX9Andd*@ZhYeAOCRqaHna;LYq3A_-` zmWkp!rPFyE%=}zfxkdQ7Z0g0ts zLb@C25|DK19HEy+94K*?y+2(@=|~Ovlm0`-k+yh4drL z>M^wxLgoPeS$x@`fxrUJN4kSN!4p+5iD?SnRknTMg_c@uqL6X+sp6hP*dWhmJFh)s z(fkRX2EPKYI}pHx+&6dAQ!imG93^10lm{eFT90xl-dO$}9jI^OK4po6Es^TAq zEfg(cb+a(FJy$ApCv%9&dr=}%p4S`RFD;n|+{KM$(0Fz@bk^lg)a%9Cuy!7}tqCkH zrJINR-hLhAP2it*5ya&^UZn?e)W4w*?NsDkD?@*!>^0}v_UoJzFkxvb-FX(tPrv*N z4ILag*!G`P3%whDcH8MV#9Aj@RfjzP$4T;)8F`|yBf~YabdrzM7|mGRd)C}3-QztM zp^BxGMOjUWM3cx>FBzL^mbO6a)T_mn4RaLhPg!=_HApalgOKAt2QNR_@HIp2-=&XY zb(%_T>(nLMb@4H2a%Fb&qrvzRJ|zpS!I~7^p-$S~8WUBvwf|(jH7z=eY6}^ZpW7)< zInRfE+%d05^Eexu_7q_&OUIEM+yCD7Qd<7QUuEjyI6CS!9|x1(4#gF2O_12$oWQkkt-Z-ANilJ|$CbxXYp-zKz`V3^t=1hfote?&lF5S0| zU#~x;JO8QI+Jbyprc+QZGhsMBI&CcaDx%)=vb|Hd-~hA;AdlU904WU!40LYIX>9@) z8M#?D`MtoIr1$nL5bRaJ@E%zPMDQR`FO>e4EH4FonPApueGm7KsQSyx6;_9Jr0pAU zECK)}&SL=EwuCKmKzsKE9fS`ci#FM#00DQ#+`lSGV_Blc=mfLo%l-_M0_{La0F(?s!Sav1@An%( zvXUP)tku_1NlZw(ytgxg5PTB=>DNwU< zo_qhSK}s9tmJX1B-=@d6x(HDAp0>SF{)(zTKA2+z1d3Hpghj|9(|2F8@i#&r^8N1n zx_cdk7fCb%pUA5jbmvhQ75S8D`@Bq_$`a^bhe?bVe#kwk7g}>!>Doz&x&Kia`uBr4 z*b#K31m3biHa0e!T|TQW2wda}Jbi42&2Oyxc^qh?;7P1IFQcF$?D#@p(Knk9C%Z|~ zcJIpG#>6})-Z4IU>5UMqE-K%kwd2rMc=!#5XLMSv!F zLrVYhOK(@^G|{8dv{JaTI^hA3I6X$Lpa^&q3D)G@#SFk6y)tVzu9X6n3G=`6nGtmU z0Mlu<91KbXfSOmeRE1b06H+SE27yT?(8WAr4}te;kW@YVQ4V3bDh-gKl$5_DxO1nZ zfbR!7&8y_|TToR4_Bhi3qeS&Ag)xsc9>hBrGt&5U7|5;Os^p->m8ckrJ&iWA=k*x2 z12vVMk#F3}8^~tCdUpkj3K&)aQIc^juqAR`Z5lb_skywqz6XKT!xnzu_QG-?FrqDwm+Ylw zI$>*GYl7J4{Y0#Nu$j7v)G)iObPwKLZ~r&uhbQ&>OI+hmE^ZEv#4h`&F=5<#Zm|OH zvV+%_Wl19H9YvBity76f9;UdTDYE#*+)oM-b^lhzq_0oUH;zj|Dlz9y85OfiA zjR(j$_;?52;Lu00Ui+GRj-W5RobriyDu2=&vMa1#V|ah2qv7c6a2R_sn1kD8EsqXO^hdVB+(A&Q(A^ek0IhkD+c={T2 z-GUDzKb*GS)X_=&9L@o2)NMLX^k4JeNIGZZ+~`ucWtzzxU=Fr5bm|CODZft_qEj;# z)|&0-!I=4(H-u+~SQb9H$TVwvK)NN&R+FcCG)`bC^mp2uNH1_U=T$P1`AU|&g7YT+ z!{MIJmZm-PS!vH8s9~M|yK=AZwDT#RC7*1-Wts|D(hqcl{uTV$fimfu2%@ z;k$_xq&{`z^e5`|b9y-hZx#)C=@I!xc#B;$;2o5=AHkQ`LdN_;#1`GkI=k6E$! z?jmGTy#&bC885gxhf`f!-^@MYl9voZ=O%5{C9fn$VRja}syq$0#|g4c^BfUdpzSw9 z3ffEd<{ytIWk-8{RdE1c*odvVGmlaU=d(P&`+tEC_l)+wHy0!Yf$Hy;1LGhXzAo|8=0;8D!HFuwJUXwk{-1N5F-2B&Syy_>(M;#5p zElHD8sXgw)>_o*`#9PWA|55Vo$Nn6s(`w`e-oHKw4eATVH}qU21li9ZDEsySwMYo~ z!mrFeWCy;%Bgm<{pZ3o@3YU1g0ZcU3x9<_24)r}ZabFe3AWfBKXee1wpXis^V-z$r zQ+8c88(#A$LX<(>MXmY84U*+Y-}h;rWtzX>ye$14rrek182aJ*pl>dv}syL>aqi%hN_;A-;qf11Qfpp*s7IFLiJ;0*QSgL+seXw*C+{&tk7iSOY+JvmHpf&@RYL0aR8l7L>}DN7ZS8r;0ZDwS_-pw7ya^7g`--V95@j-iin9hA{kmUWi=m z*47rN)7LMLjg6JTL8&Bz&|CXz!*I{rta%^DuFDHJTZHqea6C;P!W@OEDH01fT3_Mi ztCS(L(AY#c-=A_sylp)}3^kbTb8@;DzHEVpyLS2@} zX4x`M9}j}vJO7NLnfLw7D7y{v+U^M=>rXbXAGL%O(Z4JYmv8-;xbJ}C&2Cm!p@gNhlfW?l?SRO0T87-aZ{b1?dE?%xcCDI z=t~zLy*fXO0_$`w$+ae1Y{E@o2`RM0QaEEW)1rM0jIADUJ zLXKIGSyG}7{P^J!8rp|gOZ9>H&jfB=0JV(AfX{Qc5nya7F|(T|OqmFF*8E^6Zk>9W zwk*M;T?Wu>E$$!An?*v=|FF*S8T7zrj2?{V|3)VZ&DnmV-t1u>xXe?LaHyjQ41ruI# zsNMdVAy0PRb|Pb7UM2u_?Z`w{70cxJG^uq)7hZj}HB2-vMkeSBf})#OH^c0BC73&Q zI9Xewwn}Uiij)Ojq8j!dBTM9kzQ^c&cOyV4xvbGaG} z#OM2*@fhBLNlg2`&Koa7Uz{5NO8zczdZtu<%b8~W>b4M}KMh++nKX#f`X<(kAmX3! zb+b>9n@c`ELfJ{b2Xg5~I^Vr&~+2DgM6E3#8wpo9~tSb5quNaNte5C!7b(co03#`y$z{Ox%? z>N>==`t$}5LD31gQ0i;@IQ5?_yKe;FNHQ}|fYd`#l1R><%RV92@?_S705xehcn913 zF%SVpEg%PsA}G^l%bWx{7r+_yeKes2zjN|m16pYYr%@BIvjr8z%KuHdJUUhM?L<>? zb$(05Yr`VgO*;Q1a(r@xT|B`TwOB*Qs9)KFZ$bI;81}8XCHnaEczL#jc!;N_eij>7 zgs!OOSo936{h14iuh0VYnaCmEq^pKgY?o$Qlq3(?H7&>iy$y3(^_wd?C2xXKRC@^H zepV^n0M2rL$a%q;QJ7W};WJ)86J7-xNnRY~#LYHQO?m8{GSP!5&0^ibUnLvX8bLr1 z_U8w`##c4$x%KV6zihS`tDh0YYHoG6BR*8*(@z9csFE2a68)L2-NjikztpC0!BCFB z%(tA)E5x-dKJT)gWk#bOZp0}gD2f`yDo<1nL(v6FtH46tR7#CT=&oB$;$dJD9rL`q`|bFNz{l6UP6g|Yh5X-_ zgb38)1&B^(V*{Vw=&S`4fa%2bS~ zn!z1j-FNw{_Em*d%oqQcPhB7$67KILGo}9iwyp&BzO|IrLEC=0%A8lmpF}Ps(EHc7 zd}pz_CCHh%DAFG3;o9s8Bw<$W8kO3c)-#fm zOYsEi)Xy9BjdNacnba=RJTGlt1qlUPD=F9m4`Bk+W@X@yt8}NFGUDSTtuM1XyATj8v)lGk!x2 zDrFgZ%Bleav;u4481?gBRhBiZzjH+jOY=;m3xj*H997K#BVK^}Vg}0xTWS6KC z^9qqMOdZ&KILmb9Ji60%aWbN$*|taNtQ~3xo=$j#CGcK#O!sI1h-jH1>K2q)Yp}cY zL8eNZC7*5NW8WV{vR1OC3vZUXs}p=D;k|G4aC@$97_x<)R4p|8+n;fnXLD0hZv?vk z8Cezs+MvW<$(qTQpvOEf3rVJT`#Ge4zch@4w3h1SVU00Sj?Qb?#SSA-V?bpNYgXd0 zf+o;qF_lar-6%{a`h4qnB1}zK1VPS9BJS?fk7ni#^JTYI!(%_;ZN0)r zk`X)+NC}Hh$-y-65UDIOuD4wf#Vtup8PyaY#-Z~NZy6?u#T+|pmw}YHNO6Bz9_px0 zHW)n%gE>EQWS%O5k)XifS{9}gvDgE$c3uqmlKXMaLj)}mTG0>(=kM8{YgL6P>U2y5 zX--zEKRFBjz4uETPGk2KsCNhx=2@B87J>MwzglRifyv||(8bUK{D5D&&ah>NmgaRt z(zy~N60E!|&XT|;2tv(?AjGt@nD1;6P+8YWeM!Mh3@f2xw&Jq37=>XfE5tiO64=TF zFP<@3?{sE_v=_q@{YVSwzBY|0TJ+PfEKZQ+GR8=Zr6o_WdWf(s!(1(5RUH`4_#D3m zHDW_hKI;0F%Iy<`89r~jfON&1QL6X_Erd*5=oHY@d7BXFEl4J!UVq z*lNsAVn$2b3!OP4!YXN&Iu#Xy!qzd?PqhC5hND@EbOg2fC{`TxC4r?3XS^u;69YYI z6twi|@<}w&R2%&Xjgo&+Am%Tc_Rr(@o82B#?Gz+3#Ke~OT%>&MCc9b@mHAo&q+FvI-)r!+4<-+ z2q61nlwDZQ=y;wpRQ{)gIuu7moFb4Q!z8O#gv`9dtt%&xM@P(0X;ULBou;sQCTiFf8crL%eeG;Z7>K4-) ziQ;L9b;9|hUsD(>IaL`wnwYm~cF5QnIfVyD6=@bnQPw$&Nl7E}vu-zMIAMa4o<aC32o^6r5@i;P(q3lSF@kjU$U;vA;W9iWW2B;F=_m! z?f_Vp%|9NRn|Zr(ZvNL52Der$!gNf}u!-hUOFBC$hGSHsMu(#cxhq5j#GvG!Jt-5N zR~B=~Z^3$%`$4vj~ch%-VsP*$-#NG&v5FiN?tyq%%}l zvM`lDX)qoy(~-zWTqVDL(vD>Gxu4aRq|_6p;pQ+wJ&&$a)ZS}LZ}7n_pwVkAA759< zzhWIv#K+6Rh%Rw6#*vSg0_s}SZauEHU*Lr2cJaJq zM{HflwP0cUofB@NBD;=@F8js}bqt3R)6ez%GiQYHlcsEeW4#jF9^>#iRSKS`L>m6$ zWbgjVeZ6`17Ac?`ZRl?+ci*a{++oKCtNp)(48m}c7ZU$$n%N-^BP1-i$>G|nY>HUM zvK;eyVZQT}uio;%O+amoO0QXsw)>%6;;Je%^|oc$qMY=+wXM)MB&Jxd)wC}o`y1|2 za-Syzx^`<*XL75M&pF=J>FkE?^pV}z<-uQU-msd9kg!8GhJS3U$J4lKi zSmWu1P0|wwym4`Tq+9XXaOkMa35AuSv9v?ZUG@n@kj3?QI(bnk2X1-3Yog)2r3`cgR7GD0!$Z`SDPnwk4f3Lb?Rie{WHdirsVx20DD#=}llclJ`vFGZyB&k3WkZIDYFVzV? z`Y7UY9uBmmfU)x+X5;(C6F-Ny%&b3|*>;=ReVW(RWrNZYX4k?HrX)A6TN7MHXFX2{ zN2wIy4f|9iN-MTPtSs-WgiG8ZKIy(rAsq`}a)u-^q zg2gWjeNA{9`3nCkiUn~l;IGo75x&cN>{Iak?FIW9A1ZIyXhKFAFJZ>*Kf1-Rx!$IA zY!sM78k2mxANj@8aR^Z`baX4s z3KDt?aVe*EU`-3kma&pnp_{6+68Ee;-Lm>SKdU_~CWVtz`7Z&I$N7&;iiI}qaOFsc zxOVAV>x}yKzEM|pM^lfzBM05n9~P(8j$3z}S4>M8V(oknIxrVVEx||`E)`b6TgAvd zZ`k%RDpzTXn8`1NVHpV{uI$TFs4^?&vt+UVT89Km6>HwiH8|$S7v&BqmDgIXEE&C! zk>?mzNlc7$LW%CI!>*L=qB+FuHzu$XfD9Aumc87@mY^jjR$A9F+WdblK-!7F5o`hLp8zZBU0aesdJbiH8Urg-BrFU^mWVSVslP=dDo(4y zR70r8|EC-Gzn#jI6ikyN2v(TO6Vb3R`s8RW1tP@(Ru}_gk)SDE(n6!Ea<*#ldGiJ-bwhp$i#m;$6;sc@o*tgc-P1?q?5Z$+KiRTLGHMHGfPkoY5WF zCjy9)z;0CzIQM%pD0lx86zifh51X$LoUbGq9nC%okBcIh3bDn6G?&UrDtB|giYsC& zjdF;_Y$;r3+P;PN(5Rt&Ipqs~HIXE9ski11oK1PAk#I_L&wT6{$rALVV>|dSj_O(d zfQmALM47oOTYB|9GQT_cZXf(r z(iRCz7E+boqSI>vtGHr%1)pyeii*XN%_%a&;dO)|y^7@- zJv1c5NhfNFVvW3|+X~Tvd~$NE$~LQ|7aTnfPRKTnjl*o<{LD&M6h?;M=~}4jj%1@* z{~iL5ZcY@*|JC41Mbhq;1%w`dX;$h5anQ7Vv7=K6ZBT(7;UED|n(vk}Ev({MVC8`I zVo9EEo;a%>fuaR&aujb^nD6?IyTI$CSb_J7Qb|2t6s-iUlwUERuxjL*#O|6uBNRvl z1HWO=0A8uDW0o31F%3w9NJl;{tLq4Mg4V)z@BgwD*yn$miYI6L(S!40qBG6iNqqlx z#Zrp$fH#2<0jjC<5Vh6XcN^RBKC|M}^z~bllyUE8mKW|%dQZ^XiL+gVh@(?zJC;<& zFC@4=eV40f@_g-OV+_+~K`cdvW*xsGvsM`=sjj_W z>?g8zEkx#O%X{I4>r>KLV^qAS&6nTq+a&6{NVTli+8EypK}@8(+*q<({yt=)TCAfZ zs1ubRFnP4#9GOGX5?M2T%{}-*Y*kV3RU)EPn#UokUr>2G*i?u3=_*IZ{$^**M3B>l zNVpAm)B5)t3skXjf5nMLi}&8d;j>kH4uK36;j$^)2(Y*Z4b~X(yPv(k8VTXAt}kX( z>uN4+tBTCV4AarJJGt`FkfwSQklAxn+fiBE@cUn2G~FFa#w?e&nQfJpGQ9OH4VOz_@i0qsW&Vb6$Fk5Iu=gN1sR z)sH@JLqGMFwVaDlOh-Zaqg9dnDTSCtEZKCz)W^hDjH}!?$`dE>rmMc)b)8dU!1l3X zc%d&6HuDN?6a={V9o;{6S@tKsm%c;zXiwkAWOR2?8LO*BWow~8WsDtM6xPqTMw5kj4Fmp29|AdxyGKz)A6%1cmpd(<{=hb{C`W z7m5{v_>0woq1!_o*=}DKlNio^Pr#gMPm-3bweN+#t?8z8ecw6^2Qn+ zH_emBAv8ldP*%17Th&r$$+{xK4NJc28$Y*;zZ1eD)JHd^KoVbzB{F|6Nv!?u1)*!r5Y2HWJGa8VaL*(4AW1C|Rdl1xe5E z#f6j{q_A+NS@-zg+GK+jV=sss=dP$|{eNW#IAsR=itp@5OWl9X!R_p5*In(hU7zgv z=jiH`!g&1UEJiSDMRPjX@P8R-V&WE`zdRupsE&dRF11FgIkdAwsXJYE#u ztyVmoUhGk21)0}|nv{ix4>8gs??+#Kw*96gKM&huC7>g%p)j>?uAEEpgw5%EuS;;J z`Qmc#&xl2(v2i2wUoP1oONTa9t&e>X0U-rzNum(6lt1N3f}ybA`?y4m=*iox%NVOgRl3hb%zrY}FnE&m0M!zB8AHh<^e*~DI509Yma0Pc3wpUR!^{g#(Wm}AG+Mu( zX7lN$-H)gK+Raa0=@1L*U+0(gLpGfB#_6|>r1b6KPBV#>YeJi6X` zdNArZT%aWiZvY~jjM${JK+Qt;I(1FwKc8%u_<35t-KJr%`|({t$9BB1EL?`2f_6v- z4=3Da;kwQOBF}?tj7*;}uz9obllGzU*LJtrKZ^UYq?4EjZ424)mgoNF(+=C z#nye4l-`f8P6F0!2>TrW? zKw8|+9W_nTZC~Px)73M|e=bZ8J2!Av=9rKyF}U8O_=gh8M;bK$^O*;n>#hsWn> z2SNAWmkhRk4lajWp9>eJ&0tCY2oq=tgObG1zh@ZCq1do(J}8!s3Fq_|L$WA?BlB8C z$Wp9KbVeKX;!jEwqda)*yrfbr?7Qfe{=32zX#En2;V_oT3gUcTZOAd1j)M0o>xc1kQJc zbCoX8L05Pk)z#&dlZzw~68!DxOeuxJ^Lx4kTx{ipBwxB|Zl@`Nd#-P2zT39HYtkW2 z=*xTFmX7?%wZn)O7XAf7$5Tm^alYI`b|;MApEk1piKQz~P~Q$ zcQ}i<%T0bEFy*XMx`(eV!$M!SkXyA}4T&g_DtnJme;>)W)fH-79n>sjI%Y5|iI3rw zC>B=K{Ryd-2peeM>x%^XEnCzq)e3}_m5w{$h>4TqI^8k;>^xp(e*1oiy!?7sAl5of zn(`oomDHJ(f3!yzKy4M99U0q=pU%@UK}?CPWHexoxUmJLMFKUr0t(VCs7t zW)8_^@>i_fAYrIemu5e`ugg_TX5wWeL=I|KWoE$LPDzT(=aJ?XxtDb380oIR|AmX+ zrz`dKM~yEZ4)xlb{jkl$4iB^R@oH71Rh%X%f0YzK9h+b!5Wy@?XC5-1Ky@TY9hqfq zP~}`$<{=Ui0vfjmScA;GrWBG+#us@$#Wt$x7YNNmCkWR+7FOm)uW?Y&SjB{*A>WQHL)F|4vU|#)S0!oh;}g zezA}4x=qg4)04eeU>yEJ{QGJWv(@t-wL4QU)#nDq@D9Wo_wb%X@ z=OQ+v3_f)JSIAKABP%4(GJ<%KCFtzy(#-c#4+rH-Cz940j7DgXHRq7)df91@Cn0G2 zAcDBj7b9*bObtETF+NAX^*V_@A`x21xBpR+Gn(E~lq`*71PhiD)wU)udwr;e2s@o{ zgG=UrA)rovF}%y_OHCdI$FgaRvM^pVe1W3a^aN8H%cq8f`W^q6hXK{Mt_B-N*(xt; zrxM7tEW_(7%v($uh2c1nepxr2#0I9!Xu{@a@fAIK&Qg#82zzv@3UI1K%HSy_)0lY4e)N`dpOLtAGE`g_ z-SV95i~TW9HozwQ8YR-48}bol^(WKHDw|bjk>6}VELCS5(j4%Q@m2pu6mDdFZ)k}Owrm$$T;F#!W zFhlCM1pBJ#$MS&H)rYaI%ot@oQ};eSpZ2Zp5BAnD zr~&~JA&KV*F0Y%`Vk{QdT*s&ansowYTH#kRQfq;D*e)57pPH`jh6i?P;R^y2Vq$ID>Z~E>?b%lTpnTiiQrFM_svEeP9ZRATq z4Gf1I@IOUX!7Pa)SoY}_OE@#-%5DO~kQ_5K);t6bQQE)%%W~U`ki%eul_DJ`D|n(2 z(?>;x6{SwljcT=pO?H{c5#oB6g?ARgJ=kHT@C1*-7b-#YI74(%P?4ZY6N%e0LBa z!+Wm9nc-86sJ85R!=BI1k%gj$As4}gQlw3!k_}J4C!$9!gU_-{Br?fz>9oGtyeNm$ zaF9NDTZ|A9$f2f0u#U-c&+dmdT;AThE%f%cY5guA_N1QCv5Ar#3@(^C(5pm&sN52% zsQAV?COE1p$+B8f>TsEJb^Kkr=+W%-ttoe`JFx5GKi1IxBv-7-N%pqveHF}My{+y9 zuBo-3hHZf=#Z8K}al@9i zd;3GuzretZ_Qwf!F`VfAZx$ajj-ioQRF$@=ELn{I2G&d~4~Spk$Oe$Uffwnjj0&K--u?yeIPuLy!|?v7m}UZjry~>g1Tcxk{NV$g|JJOYJirjd{-V8x9{x9ZNTP!s z9ngVF&ZKGA88z>NKS~2eP*QHd2AoZ&T%+)D9EcY9i|drDW&`gx&oP=CFmvUclcD~> zY#HrKfE!pJ3Ji^gsu;Z1qX|1hG4Gy81`M7Nk0wy?2mSsr5Q%;cBze|jNj-ZRCJ>$p zRPqRPAtP22FSDCR^&QZXNIl$nF9_fc0cc&DE3t}Pak8-{V7APE?KDlg-Du*RYm4Ynm5o_Z5+JF;LHZ!5RiZDj1Wb#kr3V2ozF~6)*q*G=jKvW%1meUeDDFV zz+;5efErA#joa`&c~@`Jepa6A7625l26d*Djy`^vQF<632JKVE2HOARSibSw{F{ z-O}*TFQf+^9|^PDY{6AD@e?@2byP4k325m4xIhV_cv|1it43-&!J^^sbvcoK(9j;T zsmQaz^pI6#N$(Od*j@`12*_uYpY+c3jUt|B>@u1VAt?sUfM5|muFbZ*dL*h|#cT;v z43?K%@X@OxQd#H4Bqi4mP*OsooqrIsxD8RLub%R;?Km)MbdV}sHqvNzUFRZs`{X<= zO+FUh6Hl+icAC10K-jHwv1Z8%g3fmjZLxuCA_f7-|b52fJhMXe9$K!3!0)`IHP$rQ|?S z%#SnF#A&wdy59TuxH^;Dd~0))Ch@Wb$n3zp)>G4tA3&w{h$@gTo&~~B@JSqI%S%wX z<*THmByg}8ViX^v#td1O^Nx9jT;y(Z0+}3m$&d3eV&dXRs#!diK#C9MNiKpz!FY*= z z*cd4OlORCSG0gA*+>!Q{mI{vL#l;FJv22@7JRyA~rz!+#uA8E1aRx}4Ken|YE+iIk z(y>P5?0x%x$mZnxeTnL;M6Vz8LUJ(0U_eDGhKr{X{rVnZ55;%ca$Iz{#H_qQqONFo ziuSA*aI7|O-RncImxZ^E9JaahQBf=O71xPq6?T(R*;C^^L=bbU1Hn(QVnj$2w5uGY zZt_z%=TDxNR(Z^su-@j&7OXEIcK!FN~B{7yE@FQeZ_I6eytS}`0VO8k1 ztxc#WdcCi)8zIsP6HU6^9n9cnM=C-_UwMt zab?nxY1F;E(yL2ee(Jrs)+%^(nWa^-J7>5@wU->Z)_`h+BDu^sEHLFEqHe)?f6^@< zgI8Ir_C}6g)8h>7bi*sFl3j_O)R(D(1x6Q>9_Hrx>ml^1Nw>xtQOa1md>h*rcUAD# zqyf?KQDEjZ+~x{7dezm~_!CU^AzS3^H&_mxfoT{pq3?G0`{$$Q`meo7wz5Zt2l!C@ z$X6f@pWoOB0?J`#qBvEMq)g|PmzTQ;QN0~`6jlPYoEspv(9qBn9OMJ-9Xho~Sh=H* zj}HJOuLBwA8v>xnd#$6h09=ht+0)@8N8aYkK5RAulzUQrV0{wD4FgcvO{LCW~I z0FOh0^{4l|tM1@+$@6zl`0Z4!txKFfu4x2;32QLWSHNo>?U=J{>OBRxI2o$UC&$N+ z+?mHYDWITLXHI)3_4g2vH+a0heL^4*#qu$Hqkyk@E#Ll#Ed!&brH`0AFh0=pHJ*cv z%d`V9e(TM;eZPH5`H=5x%#~@<6VPgF1Qv3m>pGZ5{P~9brT^SB@=BzQ4P+*zb8vCD{_kfodSY^T6%B-* zAoT@B$6pR!y{cyK8BxE_9r^AZJBp(M+Xr3h)mU3dhsu$J6I2{%;ml{lqm%o||6RCu`DC1x`FULts!ns9$92-!z z7M$~z16A$gA@>c;QF&kTK7W(7C;ntf-WQJ^^uvVDSr=A4X-Idljf&4{Zh8QMxDU*cFe)be{x!*s zjsw$VC4msgg@ECunL_E%t&gu06B8kDamma{$$ys%fJk-^gy?_<@{hf6rtjw{6_``m z0>5qn%bF(W$AHcU^&X)4#2NB`0FMR;Rv%|mblLw)Q(kHVe1y-uV;f*Ifp3?r4bVEB zEcvgr1t_1Ay#-^gxl+M@(a24CT-RDOSzrCTTIM!s{8@@Z+8r z20FH*Xe@EQB7YV!453wk^m3!p<0#lZ8%Md3mhn<&dr_7ervP3L?$yfFl4-cKN(g}=pS ztC-bYdRIimnU3^H*%q#arP9=?>g~6AX|0A^{>S@&DoV9n5Q^*HR+P;*_CT^V4%(8{wQ73e5+XU z$Z#oF&(9gRf$JDJ%F-){P;;fL6dYuGUI0#`keUvm6&RFe+q(n+8+V|C@97Et+Dm-1 z=3Gy~Z)elIYSpNO0jBtW`hTphuKwG*HO-l^yLGC;O(F8bY* zM>u#uV0ySLjoeR@!Ld6ciIFuK?V|{nxi~Fg+Dq775pEeA8fVyBG)A zp(BX$@$p?McLw6^bv)6_S=s{cv*N=2zlB=U-HV*%p^O z2@>R%FCz}X+ynoNM7J*IOTSH@f`i-u_m`iaDxzYdHp>yL_-%iR&4-5m_-T)ZpM&XO ziKHxx7KCLU@nS-+QUSB`#EsZ@(-j%gZP+lFB3Bo9fA9RZVD5ciK6q5)HX8@Mwr^gU>BOl+N)J!PxlO`tW0N&2zC2|Q^J4C? z1l+diI^PbiM8&f_e*2{=J-}i6R*G_sGAmJWG@9%7cc=3p7>sv<5P@cvhX8C7{XT-m z%32zX+TVP)!|O=-q%`YIoES;Tt>2sXyw7t20nZl@eWrXi0Y>qWbtMh5gU90qWGfzp z@{v*Sh-5yurx;V(6QbN1zn*X}Eh5y#J^yxHoyZsS&=2$A$T8U_=wlUXhJET!t6`l& zA=%P@ZCat-v;MVj8+o-ntk2Ri0gQN#+rX@MJRS%4v?oTR73oI|%!6Ak1|s|=z@U&+ z$lY&n=1FHxf}uMA(^6vcLKxWtT5f1JfW8gx`~YiK+Hsy~`=(^>kSt6N2NX25tv`~tK(F!%Zh*j^`Kyzmk2cKc%M5wD)E z$_zq|K3c#q1L2GW_|(psw`Eo$BzBZ(+R;NhU{vJ_kIT0ysyuS%~76sttsNgdU?G@FISL6fbq$#=u}X8kb7D624|>m_Zt+Fn@Gep;cn{ zIqN6bE8f%V>+8Ee{lRr+X0}P?{;?6@(!e+`8)f2k6pXE+NK?xG4X>h)2*LZY0ajEb z3^~HLny-BQlBlHBk$>6@0#IOds%f{ct5-KtSGg9X3$KuJtxuPrHr0ipii8NlfRX-4 z^?nlYZNRASqu2M9d>pdWjpygEhKa-Q30_RAQn=K6=1RW&CLD;(UYAF;9e5Z%d%}+;TXbw2;h$O zf6NI?zvQ?RdufbDZ_&agLERT*66Uxp;`a-#zfDw8PE^NSsM(_We&EGkb8PuR)i=@nE-DoJ3E12+d z9}aKo?0gN_bi~Z6N=!uTCjf>t-MK3Q5`QFuLjbp|i9c`vQ37L3NRO-$aKM4v z-MkNs9e}^J1@NP>C*I%n49v>lJ#{Ch9?)nnvR`1vTDx))fU2cDzYb^f+dlz|9B}eh z)x}T0y)O9GD4s-|s0b4F>?ynDIx}aOB14J@pml;!cx7=>fh`z>*(S!u)MK5GH21&D zeRwot0@Qf;)FPS~04;nh8wq_@GWSRp04Hv3Z7m4&54q8mz0?J9Q8#wsaYd#PSfcDT zxP)^auYyma_g-YG

    -t(&m9Lb?>v*lJ-vGG8XJeYt2cz%Ep93ewbz53$wGXIy<*( z(%+OJl98tI{YQ3x_ z<~Z6}NE9m#*;m^fbL5IN*=Yn`|2q2c5EfBm)-6PWH@5q;UdtFI{53j}{~S_Lp2)@t z=h)HrV<$(q2)F)&L=+SCJgJtf8D%!XM5M1^1hS3O8iZ{uQ8Ei(EKeFGOo(QxE2~}I z-DB!*&9e9RGr&OQB2R)O;KV3r-a@A(R7w!QE<@~9!dtJOF(K6kOt|J z5|HlhR9cYkMnFnhy1P515k5*lx=Xsd;U4aN?&Du;m=kNSz0bR}654{$iSia+31}Rz zrn|1y%J^Ky0@jzuRH>ETG`^@JJhcEhTE^zaglsE4_BUfx}9)OVL=jR89EP%c^#4|GF)!MY==YIl-IAG*(qkYht z0Z~Yr0%Ti)uapb%^e4luxk^$^0Ma61!)*yzRG_(Exc>Bfp4DiSRmz%o+_+>vnl37?-%z}5k^K)|?OcX-O zt~;KBPXIgj03eXS46U~rIr}t{uBW2H?gKE)BZB<#((liP*oAJ#q9mfo$}Hvp&oXgj z3%q`H0_q-6?f_6mU_8Fx@$=@NG%Lbh@I3%o>A9@oXF75ghcqPYkK>}e>n|)#_a99U z4bJJ!&B)Z`bldekyxg=45&vP+&6$tagWTEX68{DAIHK@RjO#cy!lCNYVcuJMF|>#8 z#>J&CMp)5R{{Yk-L2I?-cdhY!g@ST}eP&&Ov+N8#Z#NzSqc~|1Xnf$PG=}*Gs+g!G ziWD?2R4iBQou}CKx2?2Cq4vhZ&~LAFZ(~0oQMhrs1WCW#jMlvl3GVK4Df(1vW1R+Q zBnF=S)IcwjUB&j}QBOGUR}#HYbX<+YVw1D$gl6HXn#e^&v~4AXVPI3426AMdEY0$y zM31jbG#9kup#}jPIp5-^Y_ReGp##Ylz~??g9Ptg*1jtwjzDu_Q z@*7AVl^cI15dg0Qz!3R=NNzXlqYTg0zjrxY0FSStlJi0I$4mZ$+SobfiK!Ql+~S=| zS`sL*B30B5Q0_ThOlpjz^0E_0)YsPscN-zSy93Z{HlSsJl{-B(Ro~D64%X4?Qa=P_ zPb?kBzWEjqK{40cDF?WS zHs8Frq7SDoh3+4hS~eH(BgL4(hr{^}33EuHCq@;8stLnX#b2s@q!8)15Sm#}YiK{~ zc!)xi#!Pj-Cmcu2@?HO$ao2{}K+WR6J-Pe&@&5C(sSKXC;!6C?r^BhLBx2?hhpZ~{ zSi|C_t&(Y|D=48v<}01s244jx&`;fK2esc9`f+MZN~+7C%6jG(PLn$7+9Mxp|*r*uEl zM6R+>`&!u67E2QMmnFJop~EzG;Mfa#n4~hRoaB=4r;n0rk8(4 zKADfSew*Y%zM-aM%Z0F_3BDVg)l zYa zHArkS*gvl3D_zzx;O(^~@jCR%aqBnCU!Sy%Fb%Rfz7f`j1M%li5kq=7z70f5LrZ~( zNJRraC4sUH{XP-aDjmz=iW2xNZ(ydt$(#ex5H|BuoUNMl^vq_kz`r>uA)*%O#2nke zaBAJJ9<;Qw;*@)aVFLLA7Ua1kRW$(1WAlfDgsvS&cW~*0FAqT0fGYs%NwwZJAX}=W ztY_>~0diI;M*H^bz7S^2&7O6m3?kff<^{#k>l-nNKk`7 zSD*^~sWkguqE$Q1IIUDD>}U7@49*0o>-4V!bl}PMbGjOB-ZTCOkSx5synt&?AHNIQ zqq4+d+e$=x$^(vki=bYdn&NUhHI*9&8)I*8Pv~yH2AF>0OL6_C&zVTw82q%ci}tCY zg#vn7vdJ7pZY`a|!^3&XW25K>sD1W~b+Ys-vPz2Bbe(&a#4XjNMXtq$uQ=qkx)S}- z$OL9a!>P%=8|Sm`reg?KI?j~7vZbX}nZv94ON-`q%??0%!yb))W+#VBy?`vP{1+1? zxIHt&>vr17)$o2~8u}*A`w6+=0dS>@i^?_xkyR!se&>hF`^!k>nwp`$gqM^ENgLef z%z7+e*86%oG05D$pYL(_;3QySsU7kfW*eAE6ln36r2ql@(ESjE+}z*$7ZARwF384X zN)Y40d!fjl;BCR)Q;9==#6JR|(3_JH{Xmv^NZAbF{xK?WADYd^Ow zUV$%DnDm67+Dh=`wEj->z#?`2a=2d|9da?%U-fv=R`Ce@en1Pg3vqVxGuLO#1`SUg z3wA#G{fR_%i(j24snV#zZ^S-ZcQ-o)8->un)Kpg5Foc@F3nYF4l~n%|r-q_uT=Shd zCw=cVF%kr}a>qK<{`7DVfcf6E9pUmhKLhbqp0U(8VP3M)Xr=&p9!1;>vRjIb#t|m> zxQG05eaepQ-iSTq32xuJY}IPgts(dEb`T-?bZkTIlvT0JxVrO_V{3aYk$_9y_7_>LyJ{ zMZxE|iiHQZ719)lfg$ub@w;A9PwNW|w`5{R3=WNyDQ9+tjH~H9_CJ;L+}9QB;KV0; zpn_L|2A2rjrGg%>ai^Y3H7Ff!ud=k2m6gd@5*v-?PwRwtn{N93%yh;@un)lk;8X+2 zJO7?n=i}WGnEcn!=p$rD%K>drj$!Z{3W-sq#i?;nAb>ChkjSK4f6^ENlQ=AJU{q=H z2%6QVQecdL3g_V908|a&Bp-N?fm+zuOgg+A8k*vlPt%l@+)bTEATFDSs$hYgxXo0v2CFLc*>jFRbUx6UJjSge{mSdmU*O zB>UmY6M_>?&-{=X%nA$a;20j4=Vu~|V!7SZsaF};;>QWS*7}^P6P@>tKMEG-xYhij z3QC2<0rRLL{6h0qGmc&E?xkJA@6iaUSd8YI{8^T^?$( z!A8fZ210POFjX8W5vebg49F0*H&~-i(13aIy}6W6dJjSkscx4S4G)$Q9wnwH2TAe4 zEH({zVLfQ9S<&3DgGtO|Zl7AaBvli8dY(S_vm!K)G<5S0~9 z+HwXkAZ1pYy^$B_=m@7ljF7JFPfk6c@J9o4ESNYe#=ts$b-(QonoUte++05;l~?i- z4XV4cf_q;`(=Ep}wn$U1g4SoNn;j{~&*3fT_*B^uV8rcc=<`}VH<;YE^km2X#gAc` z4i<~4y2bt8lwaI6O@CU>eZ_O~q{sgF=DCOPspm`p2yhFl>GFf^eLFpk>5Jh!nfkgq zKyEbv+6?g5_>MaeojO*`h;ho%WDFL~`twI#O}((Ljx%2R z05q@Kj-FdC5WLVyhKgU5d9{o(O3E4o7@*7v4i5GdB#-J-NOd*_lCsfiz8>_?3<-ma zlPU64-)RrfLhxUxb7c2ZHM! z1ZOsb?lmxRpUK_i;Z;CTDaAZr+)1jOwf!oFYkNX0)sTEbOfRg*c zD+3kf4-!5Wyy$UT)}nVFY9K@LSGV`e&z zKMu$fy4g{X5of`XR-K&qKm|Xrf@gn|ch!2is}q4vH@}#~;@B7JE2`?J4I2c|QD(I- zzv`8tS@>&f(uYS0=il;Eg5Mon;|l8D_S?8D{TO-fLC&hu*Zm8imP1IEh+O!gWKy&I z(A*`?DnysV$qDdRr=;y^&z$sMwa4*U-QQZzF6Rp}oacapjgia|>X07(+vZSpGNAm^ zrzs4^X(TE})`tU4IPGlB5#@M_CH?i*d4a_k_stQ5dy`Oil`fX;O= zc+|`U=1)KcTvu239G`l9vJRq3IOSRZYb`k?g~&uS1hDn!r~_$j=z~BS$}@Nh@3g2O z!#Mmo9u1s+>02N%1cVfUzJcg}i3Z-h=A#tz_1&wL@M^v5_V{t6?~Hq%6DGIcM6pF6 zF`LV&Adx{p@r_O{xWFNYAEI`!EC%=7>;x?gWfc7NS}%2z=76Mr^Tyg{En5a z9QLzUH9i+4Gkw|~%>t&bq8aLQ8V=B-LubFtIf~8s(r*V<-lloRP-d1VcrrHt8~6A& z(3Y@DQ8SLD;2&Fr@K-x1M&}qV0!`O9pHnHJ>+^Aiiq^|uoi8nQn#Xb<8G+pnTJhp!qDlU;(J{^2%@Nj^ydIm?% zI?+0(&7X|SB?D92HY#aZsyJW$Km*UG0>gpAN$CR^DCnOWu4HzC@e;H78zC+<1T0yT znlH)kt!G$SyD~L?h}a_et!)P0S$;v4Fq&8AIX(V>=>-(^QO%JUnAkM#iR51b*PU(% zKqUS=_R6dMB=_G8S^8^KEyFmX)v5Zr6V23jq`vDs`9+CpZyu&6>>v#>qKLHSCpLw_ z=Qd}&Unqg6-%iO!3!Ba{h(~Y+Mtu_~V!-i%nQe@hC%kyHjliQ*8{+Xy%es}xo&#`&3tKU3f@SQwcE9pxHHSE&>rPsfhgnE9 z<;}@VUhUMdDi=36)gK6YUuo#;UeC4o=m0yR3pI*}*RrQ_6+#8~DjV%b8NIv8Bbs(m zo4ge7fR+UsEYCI$+SPLO`sdZLIJd$5vBLGY2-gYEc2l=&ky6moLGGJ6``PmuTIOm zYU&AE6|i}9TN@iD{O=yJGmGn~A*hII4A8s3_vFmM|s0}Ih`^Rz}8$C3z98je-8=Ejp&^aUo- z`TiM8EeAIYN{0JFj@r`C>wO)iA}nra9P_V(tCjV#{#NwvlcZhAFzNw%+T22GanzNM z_r(+4@5BF}1t2nxUA|p7)nR^K#I(iNCAW+eaTu?8>@xjPpUI3)4669M_tKKC`!iLb z;R^HvNG+QJt@;@G0^Thk3C*U0OGmqcUKYrw4G_))YdUJkMid;3RDxrf;#hDabfzz3Ktmn9lqcma^nPp>oIF zQF}X9*IBD3BTXI{WJT4>J5Swf4zJ43rxAYvFT6qCezTeKIn_Sci10%&ni7*LN?-aG zb!Dr5Oy5Gn;D*7;rtw{P z^eH$|6t3024OAi^)Qo4_4=f^Ny~_KiPm6a!;xcnQ)*T$hjNhC`d!?L{nnUaP=I1D;kad^P<0W@*oyVrYn#bkrmZye0^kctouTei$ zl3&b++<(!^Ux%+c-wWIgg}Gr0RCkA9<6aiJa9MMF_{(Y$l}2Bb#&lRUlD=@lyOr{C zK?>g2b^uihkqSi`K6ZElp3>KrfRvwD?gOf;ZU320{qjI71-+_)z`q^(bZwo1rfVxQ(g znf|mVYHk*hAPSWxg2mkVzbxNQRPrtg^o>)@&)Tx@JvZ(Y+#h{&c=O=(yaX(gBiG}@ zz)9&v6PpztNy2*y2J=8Qv<50_&|(BO4Y==BRB*4F%6ERs=7Wj9l;KEH{!1_>s6;RH zQ?^*RGz^)&7Zg9Zy_%c~{{FtCl~1_xl%FgM#%t=>7w(;dFhxN{`8@IY7I&y}L$SCPx*bBC#nM_ahZgx#U>X zJlSZMuS2(iR&0zFO^+MJXaxcj{mJ?sLDWOFG1p9#+9~jSQ$u+oyC;8l+8OD9VQhid?MhVq)c{*+3~6(OB_mlE+ASF{YS#f~+Ln&tvGTe#t(0 z{^aS$%P#V#^Q(r1<8M)d=KT914}A)Re8zZ=2xK3N;-ZN7gsE$F32G|Rz*NF}0oUWv z`J=`-R=c0mXj;pPuONuIPe!nH|GG9?WN?Ryp-@FAA z%^%nm&3J5k2Xe7Cm=#fbp&@&&x;PS&W+sa|V&obE2WL&yzN+{Zg+u*cG~K^$Jk9dz zIHdU$pueZ2Xgl$K{M_Fl4|jDP!GN7#{Hn#hyGi_;LI&CxTPJ*$a!&HfF8hvBw;K8= z=OS}c-08Xk4~DPa>Cb$pt(IC z?_u*6nTLy6Gl1}Or9=gO05$LrNwMkk-r#(@o`Efou`j$_+)U1Q2kqd#ltqB(y-k7O z`y2<1lcenNsWh+-N3UETxzP}JX_@lV#X-_L2=6Lj2Df$Vmzjw^vbPd z-=I-sCQG49p(Me{6{vodLZU@byGQo3siJ*DPp5l2 zM+2|sygfUK_`|Kq>OZJZ#zP$>QAWD6+r2oBRzG6!o`OO1Q@n0O>cR>+N9&o;b1V>FJ^&PkI=&8H9=&F8e2&h(%NkEkDxz>Lm%Bxkz z)D(V$=PwCIS)?kZ<}ZmQN(~=P3P(j7gee6@yvnsT7fF+@LtZ0!_=G7_gv$Sg*m|i) z^XQ?d-GICGP#t|o2p5Mc7w&z?c34GW3=#gl(ICJ7llu(;-3nw=8nH~a#*$MSj?QDU z_i%Vxn<)^nJX`-`z&};|Y#d0)#Kdd4P_d@ABniSFAs^pBaxp~>iIU?AWu~Ru!vi)t zQ^diGp-IPEG(LPW*AQ{z;+LGEc#!ubNQ$wNABZ}GsRaUuM`@^+@jnPt5-^=jW$V(_ z|K&{LL--b#Nnyl}2A8%Q69C~1C0@Pwb%7U1pX~!L%Xi^N<6@XVLfYb@2MmD5nqG&`zC5A#VBAi{E*s!3KNN}ISEyeze zF7J+~Z$xcakn@F4rc$-QEryegMJrAwGs=unkrv@pV*ZbL4A(~`M+{y?%i>scw)==6 zEDO9&MVg=vwbPY`gNt&AxU8nMFq?lX(v(Zjk{}%Sej&FVDG`eA=dX(94X-Kgql6o&a#ovr%cxNm{@XxME6Br7^I6HyFHZB$0NjsQhi5?$<; z8Jd+E;BdZOxkP%D=Nk|@@p<=r%Wlb2WN6qs?@1*8Azi`r#t=UT-T#M*0oAW9xo!Nn zkM(rxUw**9$V_l7PkM+CeS<_xry-A9Z3(YRxOv-(K~b`VtS!iE2(Qd3EQOSxouGzf zX_j(Ik!2moFw1sY<>TMELjRuV>HU$!B-u}k+AgBMFL|q+UK^aq<$bbMlC02B9HWoV zK#d>xDU-LdFqLbdW1v!bM$i}+&T3ObCXE7V3;Yv;m}sf#U#Acee2h{gf1&1F6qs9B zh?|WH?+cYcgi)r+7kv-3#8F07gh8_5l%r4C3A`9~kcz5Ay9!iAzxsOgAQQ%q4(J`4 zA>+d3<>&e0Exy*^o(v)>Al04Llshv_b-rv2Xk#yTLNK^p-D3H5{1=RYKK86DIDO{t zQu;cB9n1VUJNxFFxBx|>4ZGua0te2WWFS_#5Z}B}+gDf4suzT7^n+4bu#Sfsa7vkv zJgNHA8(0w)%2HDuw9>v5_LYVSSZYI%<71#Afw}$zU#Ez2ORe9B{6kd!D$*#zQ{b-x zM=#Lj)O-Px6$$l~2$LU!zK#|w#G`Uudrw{Kp4x$^IPw|-#qQdfK;LwAmTM-Gy~Yc~ zX2p_3F)*`}q7Q^jQg?tm(K~3z-Lz3>K}$ctV2F)zhO3paHF6P( zY0?;kevyZ6QVd7c+bN>DOEpg_EUo)F|8c+1a;Kj4JP96fy5$w(p{1sPp-?NsD-X~+ zz3EqfrE~u!;mI9I_n~5J!on^~UOmdhN%LTe`L(DVVu zImhow8}MI6uBb9B1amC_2g^V)0|$OB;5G(Fib5DME0Erp5v41L(dreeR3n8T1@wgI z0_g(jM8pdWkFbOohu}~M((N&bxVx$Oi)0HXVI5O#fPbNl3QY|0uA>?`ulOZvAONB(r~PIPD8o&3V;brFB*y+F#~hHl07%g_5Wqz$ig-GuYs z>%>YEO}sMd?Lspmm9?Wu!lURK_tbhpp6Dg!k0VV-?GI1E4X==Hy7YD_N&in>t7B5=PNJfue0=0)S6~B4 z^~MWtl%7DIg`iSSY5ZYfr(N@L58tj1`ojEIc!<&{A&MGmB&4+ZhX#(T-*o$y+ zVU|RU9kb*AmTDx=9psT7vQ!O!^Of65#rF>t_s<5fkB!F&3#m2KyH9I5ZG8P3(#*1x z8JsGbQD;8kv2x}3dU%j{C{&X@jD3V%sL4!zbU$ec zOLy=mP-jXMnU@^->393%eP78@V?VsAm%%JQ&qB!?uU^x=c>}M9-I(=OTdHDh&5Ndw z?_LrjGPEr)B=mk89OEJEC+wm@{5CzEBM;Y+Xoz!y#b#3prTYEN0Bx5!NEG|m)4Lb> zKO(!CJJE7_lxY1RahGZxb~S|xZ*;^oK54ueXw&^pP5y=e{PZ%f1I(Q#C5oHh`@vsE zGfg&p%EtCn5d27qHi!o4_rDB4%R`jy`bzx0*#bz>rN55iv7Bf+E%~ zZ`I>k8l7}J32r#tj@+OO{$g~|$#hWiCQ3od9lHC%H)u5>bJmZh(3>C-9?ica_Lv6; z+tH<(MF;saN$H`OPTRgux&NJdN!~#BDWWw;=}wpgrDhd|mDBbBRWUJ%GA9ut(mO?Z z3L=So8}ovcVyRW~jI1h@OLb(?5KNe1(7zUPh1$Q@hSXt#0?09xI7*x_cfR>?vruzH zJ*~iQkjlm#c~Mp#9+79z8Sc9&d{7NP1<|e>r~hIQf;run_}Y1Ob-Q|!a1O2y8G7oL zYiXD`ut|ZGMIo4kF4`eCL-GT$c|K+ClxbMbb;|^eD0pOAjYcG@j254wog4aHU3b)r zdswH!EB+kC|2}rlS^^n?6*zIm!P@@~B=59i+(=npq@FKUsM#FKs!I zW81%ZV-T;UHLK;Qu2yUYLq8Y_t)-+5Q_SS}8|EK@&RLoD*4a<-O`Zq|?YXdGZQf2O zt_Wiq3WjGkmrZ=Uo|@K8;ZXRrSDM4+&j^>lmxkwITU@Vc9EP_WVCYRryG z#O%ZGO)gdUAGibCCpUu}zo4rh^}EYV)CJplMrwX}RqwRR+HTi$7A6togqykF^7TA* z9$`xKk++^7K6Da#!tFvUxm3`lo=^)(^yCfRGxah*4dIXXZ)XY5kji6_Ewl@zheOo= zDXP;b79E1e#1Yl^$NT5^d8~J9IhfxNuG*dK>>U{+!{V+K-zN8z>uGL#YxW(@V`uCt zyoa(fAnuX_3BP4kYui*(#Hu484Nn&R5^$T;a0KTp9ea`O zkGko?zC?M7SiyEgHQ8}uZp?AG;{zO|u+UH;3T*fv(9p^rJawA~XlOK+M(3bcr{vO@6iT18cuvUotG5u&(#Z!FjR=+bEWTEus5W z1>5VC(0edi+JK&vVJw#qz_s=-ou zd6$gqv41G`-=8Mf$mh;G3tDD)DP5=0ud7qB8tMtkir3$boy*(m3)f36!Qk-1 zwX{egzinX_*b2~8qFq~Cv`pTf zyi!d^gN#40*A!Tv9CXuzpUxMT9~ONchJEg%Sb4kFRT|+I5N8QXGjVCSiZSD zn0Hya-Z1E8s4$vh`cXG!Mr6_fXLo({wAJAKaOAa@@re8DOya9Z6sAT?rj#9P?{B0? z+x2_AGj`A0{=TQvjAb9zhusPvq&+`=mXbC=nnJ|5%3PE;R4P>e?b~3?pP}<@Yhy>RI^6F%EF+*#h>XwcEO5z{ zsP8SIye|%)UbAjhLghD;%C!jGKqA$5O&6UmPhQc-haN{OmvE`Zj+i}I@<>q>WtyMd z14E$jLo%L^H*W*xZ|6qFJeE7kbdoYS>BMz6u5(>0^VQ@|7_%WhkK1Rl(}K7UzZ=^9 z*HMDq7%rr=RXVPTfcc1+Y%KVLeCcr}o~P;C^b!P5b@DH2KMV~*)ZG*}%i1dGq26JJ zb;IE@UPbP`Zbe8UEFjt8pjy`j$wgYXh!KMliLD|?9G?F>j_rn%muU3x7x<{;OW3pH z+caQiyY$Cj`FM6c%79N5z5RF&<&doFZg-t4F zE61X4Je59a>mx)9OQ#y#8!-QAyJpjG|K4Em@erH;eSOY^J#jH=PaZldY=J1ic6F(f zyz$gi*1&7a2`zZ#rQ*+h0a8&Sj3Ri)xp<1r?J6H1u~%POzwX7)|5r1WQeRKUt5&KS z1P!D>;#leNa*s3n<8`pr=G$4OX_G&9^Y@=?5M-KN=scX3!a_6?)!9z)idSIoSKyn5 zfkwA6R}n;>E~$X4ih=r|;*IHPsWKxCIB)$4mo!XWUo1P`7DDu}oPhgn=r^$4;1K`C z?{VTR8!#FMV+D5Z#)-fCj~wGGy1z9fqa?tz$(NT&C#<|T5=nLd5gDmA5X57yHj$J6 zGa)r4Tv&3~%Td%a4!-%PDV#*IZs>1BhLydashzOYj7qkF1&QrujEmKk_z+G*NY02K ztS$KBx5(t8B$8N?Y?rArnQXL&DOoZce1avXQjbQwdd|#0MPGzZXKO2!0s>u~&~FIW zumKW7);@e1I=fN29_-jkM`s=-L=rhW9;G8B{nrLwXnVNt36}wk4$;q5iD=LQ}`jv^bv>HI5M#Yo>(tYi1Iu{3pQ` zbE*KN0i9rBFPs%iH5U&iM%*_emGMw?;B_Au6BmvycXq-yv0R;^Y0^CaVht8gHSjl3gmYyABw17N{gbn?&`){A}an8v3bdp zKFB=1X`zy+S%Imt2CPoEwPW^;rdKQINj*!ZsubCxRqmo!tu@hayCw&j+b#yEUe|f& zzM>|!fGegBkN?EoVndl)R7yDR&tliIUcol1x}K49ZNbseEcMwuVUIOV919T&kCLWn z-_kp4xu$HQ1`E~5RLa@Lu+b@>YsNT%h|0K^kmgrgwGq2mTu=RYs}!}a96OjhK# zMddQe(sx76#p+wZF3N017FBCnx)UN*xwTVcda6!nC}Q+g_SSm8#ZtU%?e9-!_}~6@ zqpnD!z`@4#(;&a0*Upc14E8W?EV3$OdbKCtV$O74oGDS<8L2hd$-B87wYZVC zrY@o3mcca2@nkO26bm;Xq=Id&u;@Xorz)wZ`Yq3a_hMV&pT{z>v`b6psvHqb%wp97 z3>%h=_`C|#$hY!UTOl}WJ{2nO3Z-d|JYC=4O}&?8)uv^u8+**7S08H@@zBqRpSNaD zoB32(a8OlG*CR0m&t*%K_Ns9E1+Hm|9ySDV5`sQfykyy}Er>L4>GM{v{>%-bLK3f; zS$Q$WkxDcRHPKcC-v@nBTeT?Sa95+uk%nl5>i~XIxS8GpweA3+{#JHlhhZ^YK+TWi zq7}8q_X{GIf_6T=@8oHY`j?^KgLR$lbCybTH>^_?Vcs^oVecJ-vJ1H)a&-5&546Q2 zI2Wc?oM9V($X+S;*Jt8u>B^vfmRU6Us98Mq_3vT=AKd`nOUE9XW_S?VOFYlbL>=aL zI{988*Wz4EO(OR1*Ez?qYAX{vtF&TguZeGs)_q(q#q61?{pu69PI?tK_7_c9FPU1d zsLkXw4Cc?ar<>KC+r@-*e(q10Yuz%n8y@lD8m{*$OF=c`0x7zxZ(D17uimQ3t5y5L z;8s+h?uw^zvTVg0R06*YFYVr;txzx(af1S8;rWhz6zE>JoF(oyZ$2t47IPw}J2u8|V*|a>YtG<|It8vG)$~T)H z9w%G>9W%c84vx!~s%}lXy45rTa;)9A^TIMkEmL>?2c*ClVUCHi$(H+_J821$$Y>+q zG}-4t&H)P>Y8H;%`3(+w6+&pS?PPugZSUbuv2=(J1*;!XCT-+JUVA-_1EO#znz8bP zm+4i`QYGoq7ws=BXf2W+H#Rkzg}0^$?+)B+rC(y2Y`D(cm0m3z^0^U2u4?tt(Dp_I zB@3w*X%0RuUAc8Rm6WbnJvG#Pyc3#aDS&`tM1Trik86c-qE185=gtHvXu+MMZXGKw;@_&ZQxK$!{_5K3~T5 zOC3M0TzAC%qP+g8SlCUidtL9NH3pflfE!xN>{;EuWHEdK>R9UnMD3zn@9?GCi0@RS z0bH4-RP*oao|7Win4YQ(HL98Imte_!>KYVJ64#(vX8uy_E2-N)kM|encO)^1@#k}| z8i!Z8mS;MGJ-=Z1g|*WBHfoI~7KXOxMPzA7Aw}X1OQsIB_y5>^nKazgYym+a%A1;!Y6t}3sY#^m39(+hQ94uz{& ze#M}-ahG?D_aWYk3f9-9CM}$5a$pa+P?e}ZwW=+*+i$z)m1FdNszwe)WIn+>n^u5p<*W^P}I zpGIcibLx=&XPnQh!p+z?Mq*v|E&_Ww@*Ta^SFe=ZFG`dj|CLM9YRja-AMm{`uWicB zEmpkKE((cuK*e$@wpY5XV0`=PJxHljo zwTj5hr;?h28~^8kWXFJSg-3jTMZCaS2bH1c#MlrVkR}%JP83Mk4Kn(Ar14a_l-E0- zT{zzq{1cAUboma4+kv;XU1w7QsVHX}5@Y7_2>XrQkIP$$aHmHDcTlg?re4<=_+6^S z%e&a2NRYxf@|_`N_U|xNvF{tb+2V_(GL+Ide*!g|O&!o>!iW?h9Rmv-CNl-JA2a`` zTX-E6kBx`)OHtVCIAegSLsCXkSD~KETj>gr2mhux&0b*PSv8iSAf)8o=^6r=$J1yz z5zzFZQ|TnNIP>pH0tsqJ7SsGkqWumcNz_mFkatOyahkO_BI5%Uc504_stqMVv#IbZ z+qL!I%OIo`U-|2m{o_Kmh8#Y@XvWH3edij!fEumO+oQ&Jm z4r={lFjk&4U3pA12~-|rHN-i^X;2oKl+Yqit-0RsbR&zH4RbN9cg6>#@nQ+_@VBEG zDSQ1;?HE?gBqIo!!6rnJAjS(3j0uid(De&)tWNcx`>}%k9(dk+$o@kQBfE_u14Dzk}D_i5$rB2jH0ZY?rFS{O}y(HBA9ZSvp zN1?W-o04zH;gqrnSR4qSy@QZQFbTR)qZv5yayru*mc$ZDiV>a4|L(gYH5Xtl)+4HK z*|y&l|4(~Y{?GK|$KQq-Ip!v0lbHLg<_I}jqZlEE=DrnjE!W(V5SuMFA$R3WqgKgT zjYJPoxn?!xC`TpblkYzN!uRLT>zCK>ugB~0dOe>%JfBaezn;M7k10cGc|R6ve-H2Z zp$3xGcfQU`BwVEH^YDU}Q78f2pW*2c2{pcVB|c9In@5~sL9w(aruTh^SJA#Jn0_iTh#GMWIfg3%Z$BMTKo80yu>pS!u`hSDNzc@$7h zWv?wI@9#cM*`+1Md^;96o<9|nXzCGFH1iAYzqOy3A z0uth$$TYYVK*-3HI>O&pDE2k|SJ|!E~iUWal69 z!WBu+Toy)2<3_`R&Qgu>`Fui#%ksB${mL=X^uOziEbze8mB*aDv#pn8)VOcQ27nO} zLQQ#^)f<%Gs(sxKbe+DlB@%oh+sv@4pbcTb&VV)!X7je6yFUNI7~@6Ane_LOHNxrz zLqzdlJpy7=6yyyi^1GHwQ#z0??a;Vp4zCsxLI%N*u`qAVOew`A^XPZJL;44%TjIrS zg&GUDC9LFB^kZW%eK(L7kbkWZ00=B30&fqb`hlhF7CXNacMDC1v(66^Y_!Yo^{cqZ z%xWt$OFnI;yqPu&ZiexxlYj&IG=F33uX=iVsIr0eaao~kK>rZUpTb1w$vK`|!`B0i z+L!tH?F>d3K+uM05in;@aKn^S$j=2d?yoYg$l7pK7O!7X(nx$WJa&CSg3AWJM`~Fs z`t5=n+g$VR>k9b@BFhj8=Np_X#FFNmuANbECAnPx+*3uQj$2u2VBGx4Vz!_xxGM=) zJ6rhUo&1*U3|tua&3x=JYn zral5&vz8L-ZFM=XNVUVmhSe^ard3l_38E1B;-dIxK=9Np1m zM77FKosGbd5W%y^8c(SyQ!RIQm6}orP$pS;83v2y>~V%(UV|>07Z6f_S`#!+ARw;T zqnqZUx8J1fo+*|LFPqKqJQmtOtohZZ~bzp{J&+?*KGd%EC3l~sXmZ{K~ znVWRj)s4~)I0mkNcyLmriR70@b_G-2`@~cUZypDDv+&}h3=sA=xrrELj z+_%>aV-koCe&?pTOC}NonM?&o0O-9S(6Tm|3ND`XaBSPcK6A5$iw1G zGhWmDNylH81ct7Dzr;Ue;u8Kv4&vC+-jI)FriXhSI-@%dg9}qjpgyTDH~+RIs+@#o z^W+|$PP2L+&`mE$hg4AsHg0{BNv;3VMKychQ4c=3y5aabD{r|++6axOWSLdJX8V8# zu~S5XRfh!%tEQ;1egnksPvwTy3WLExBm^TBmpg;DJ~G9KEHpqokDpd;q<0ze+9Kc? zKpsoRCs!9?fkz>U-LpjfkbTHz`Ep}FD6<7dZ3hp4~06>91#rV(oHX~ly}F~=xxN-6>|mFBUtsNqMrN*eqK-Xn z3h$lj8P2_#Pr6B&^q&px&VMk5b3AfTVocApGSXp`4!W%Ly+XlRavyYr zmOxRE@$}6yMc(r#W!#g`m64c}Ij&Ej{l^psFFCst_eHb@)sPk!`!1IZ% zo;y^!Wt;X)sMc{7oswPOK>~oj?w3LjS89H=z;q)FXLF!$2F+u%m+xE(oJZrC`s*QBonCavdsCc1oUUi#qv_;`1u3{7M0D__ zZJoW@`^?&k6(UKe>(Q9Mcv}Lb`a@I9qj!0OsQRr3m~%0a232s{oI71DZBf1)5^^zgoma zSZbid2huzgKImM*$=@dn3TfZE6d}=-@lmCxC~e>(JO1J+Tj|F7l{T|UZYY`NRbM<3 znDdg{w`a2a*}MH*P@OyJDsGKJB_{h_aI*@CVr8qJa@U;;D=u&gXiV##_LGkCG!OJE zThD9NSM>yZXtQ=`C141aJ#&=V;q%?VI^O(|+Ab literal 0 HcmV?d00001 diff --git a/docs/source/tts/svs_music_score.md b/docs/source/tts/svs_music_score.md new file mode 100644 index 000000000..05645d2b2 --- /dev/null +++ b/docs/source/tts/svs_music_score.md @@ -0,0 +1,178 @@ +本人非音乐专业人士,如文档中有误欢迎指正。 + +# 一、常见基础 +## 1.1 简谱和音名(note) +

    + +

    + +上图从左往右的黑键音名分别是:C#/Db,D#/Db,F#/Db,G#/Ab,A#/Bb +钢琴88键如下图,分为大字一组,大字组,小字组,小字一组,小字二组,小字三组,小字四组。分别对应音名的后缀是 1 2 3 4 5 6,例如小字一组(C大调)包含的键分别为: C4,C#4/Db4,D4,D#4/Eb4,E4,F4,F#4/Gb4,G4,G#4/Ab4,A4,A#4/Bb4,B4 +钢琴八度音就是12345671八个音,最后一个音是高1。**遵循:全全半全全全半** 就会得到 1 2 3 4 5 6 7 (高)1 的音 + +

    + +

    + +## 1.2 十二大调 +“#”表示升调 + +

    + +

    + +“b”表示降调 + +

    + +

    + +什么大调表示Do(简谱1) 这个音从哪个键开始,例如D大调,则用D这个键来表示 Do这个音。 +下图是十二大调下简谱与音名的对应表。 + +

    + +

    + + +## 1.3 Tempo +Tempo 用于表示速度(Speed of the beat/pulse),一分钟里面有几拍(beats per mimute BPM) + +

    + +

    + +whole note --> 4 beats
    +half note --> 2 beats
    +quarter note --> 1 beat
    +eighth note --> 1/2 beat
    +sixteenth note --> 1/4 beat
    + + +# 二、应用试验 +## 2.1 从谱中获取 music scores +music scores 包含:note,note_dur,is_slur + +

    + +

    + +从左上角的谱信息 *bE* 可以得出该谱子是 **降E大调**,可以对应1.2小节十二大调简谱音名对照表根据 简谱获取对应的note +从左上角的谱信息 *quarter note* 可以得出该谱子的速度是 **一分钟95拍(beat)**,一拍的时长 = **60/95 = 0.631578s** +从左上角的谱信息 *4/4* 可以得出该谱子表示四分音符为一拍(分母的4),每小节有4拍(分子的4) + +从该简谱上可以获取 music score 如下: + +|text |phone |简谱(辅助)后面的点表示高八音 |note (从小字组开始算) |几拍(辅助) |note_dur |is_slur| +:-------------:| :------------:| :-----: | -----: | :-----: |:-----:| :-----: | +|小 |x |5 |A#3/Bb3 |半 |0.315789 |0 | +| |iao |5 |A#3/Bb3 |半 |0.315789 |0 | +|酒 |j |1. |D#4/Eb4 |半 |0.315789 |0 | +| |iu |1. |D#4/Eb4 |半 |0.315789 |0 | +|窝 |w |2. |F4 |半 |0.315789 |0 | +| |o |2. |F4 |半 |0.315789 |0 | +|长 |ch |3. |G4 |半 |0.315789 |0 | +| |ang |3. |G4 |半 |0.315789 |0 | +| |ang |1. |D#4/Eb4 |半 |0.315789 |1 | +|睫 |j |1. |D#4/Eb4 |半 |0.315789 |0 | +| |ie |1. |D#4/Eb4 |半 |0.315789 |0 | +| |ie |5 |A#3/Bb3 |半 |0.315789 |1 | +|毛 |m |5 |A#3/Bb3 |一 |0.631578 |0 | +| |ao |5 |A#3/Bb3 |一 |0.631578 |0 | +|是 |sh |5 |A#3/Bb3 |半 |0.315789 |0 | +| |i |5 |A#3/Bb3 |半 |0.315789 |0 | +|你 |n |3. |G4 |半 |0.315789 |0 | +| |i |3. |G4 |半 |0.315789 |0 | +|最 |z |2. |F4 |半 |0.315789 |0 | +| |ui |2. |F4 |半 |0.315789 |0 | +|美 |m |3. |G4 |半 |0.315789 |0 | +| |ei |3. |G4 |半 |0.315789 |0 | +|的 |d |2. |F4 |半 |0.315789 |0 | +| |e |2. |F4 |半 |0.315789 |0 | +|记 |j |7 |D4 |半 |0.315789 |0 | +| |i |7 |D4 |半 |0.315789 |0 | +|号 |h |5 |A#3/Bb3 |半 |0.315789 |0 | +| |ao |5 |A#3/Bb3 |半 |0.315789 |0 | + + +## 2.2 一些实验 + +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    序号 说明 合成音频(diffsinger_opencpop + pwgan_opencpop)
    1 原始 opencpop 标注的 notes,note_durs,is_slurs,升F大调,起始在小字组(第3组) + +
    +
    2 原始 opencpop 标注的 notes 和 is_slurs,note_durs 改变(从谱子获取) + +
    +
    3 原始 opencpop 标注的 notes 去掉 rest(毛字一拍),is_slurs 和 note_durs 改变(从谱子获取) + +
    +
    4 从谱子获取 notes,note durs,is_slurs,不含 rest(毛字一拍),起始在小字一组(第3组) + +
    +
    5 从谱子获取 notes,note durs,is_slurs,加上 rest (毛字半拍,rest半拍),起始在小字一组(第3组) + +
    +
    6 从谱子获取 notes, is_slurs,包含 rest,note_durs 从原始标注获取,起始在小字一组(第3组) + +
    +
    7 从谱子获取 notes,note durs,is_slurs,不含 rest(毛字一拍),起始在小字一组(第4组) + +
    +
    + +
    + +# 三、其他 +## 3.1 读取midi + +```python +import mido +mid = mido.MidiFile('2093.midi') +``` From 6894a2a77d55e006ebc2caf200dd0ac59f138e29 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 22 Mar 2023 18:24:47 +0800 Subject: [PATCH 35/37] [TTS]fix elementwise_floordiv's fill_constant (#3075) * fix elementwise_floordiv's fill_constant * add float converter for min_value in attention --- paddlespeech/t2s/modules/conformer/encoder_layer.py | 4 ---- paddlespeech/t2s/modules/transformer/attention.py | 8 +++----- paddlespeech/t2s/modules/transformer/embedding.py | 3 ++- paddlespeech/t2s/modules/transformer/multi_layer_conv.py | 4 ++-- 4 files changed, 7 insertions(+), 12 deletions(-) diff --git a/paddlespeech/t2s/modules/conformer/encoder_layer.py b/paddlespeech/t2s/modules/conformer/encoder_layer.py index 26a354565..6c416088b 100644 --- a/paddlespeech/t2s/modules/conformer/encoder_layer.py +++ b/paddlespeech/t2s/modules/conformer/encoder_layer.py @@ -113,7 +113,6 @@ class EncoderLayer(nn.Layer): x, pos_emb = x_input[0], x_input[1] else: x, pos_emb = x_input, None - skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. @@ -121,14 +120,12 @@ class EncoderLayer(nn.Layer): if self.training and self.stochastic_depth_rate > 0: skip_layer = paddle.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) - if skip_layer: if cache is not None: x = paddle.concat([cache, x], axis=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask - # whether to use macaron style if self.feed_forward_macaron is not None: residual = x @@ -138,7 +135,6 @@ class EncoderLayer(nn.Layer): self.feed_forward_macaron(x)) if not self.normalize_before: x = self.norm_ff_macaron(x) - # multi-headed self-attention module residual = x if self.normalize_before: diff --git a/paddlespeech/t2s/modules/transformer/attention.py b/paddlespeech/t2s/modules/transformer/attention.py index e3c9a992a..3237be1b6 100644 --- a/paddlespeech/t2s/modules/transformer/attention.py +++ b/paddlespeech/t2s/modules/transformer/attention.py @@ -103,7 +103,7 @@ class MultiHeadedAttention(nn.Layer): mask = paddle.logical_not(mask) # assume scores.dtype==paddle.float32, we only use "float32" here dtype = str(scores.dtype).split(".")[-1] - min_value = numpy.finfo(dtype).min + min_value = float(numpy.finfo(dtype).min) scores = masked_fill(scores, mask, min_value) # (batch, head, time1, time2) self.attn = softmax(scores) @@ -192,12 +192,11 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): x_padded = paddle.concat([zero_pad, x], axis=-1) x_padded = x_padded.reshape([b, h, t2 + 1, t1]) # only keep the positions from 0 to time2 - x = x_padded[:, :, 1:].reshape([b, h, t1, t2])[:, :, :, :t2 // 2 + 1] - + new_t = paddle.cast(paddle.floor(t2 / 2) + 1, dtype='int32') + x = x_padded[:, :, 1:].reshape([b, h, t1, t2])[:, :, :, :new_t] if self.zero_triu: ones = paddle.ones((t1, t2)) x = x * paddle.tril(ones, t2 - t1)[None, None, :, :] - return x def forward(self, query, key, value, pos_emb, mask): @@ -221,7 +220,6 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention): q, k, v = self.forward_qkv(query, key, value) # (batch, time1, head, d_k) q = q.transpose([0, 2, 1, 3]) - n_batch_pos = paddle.shape(pos_emb)[0] p = self.linear_pos(pos_emb).reshape( [n_batch_pos, -1, self.h, self.d_k]) diff --git a/paddlespeech/t2s/modules/transformer/embedding.py b/paddlespeech/t2s/modules/transformer/embedding.py index 7ba301cbd..f90eb44a4 100644 --- a/paddlespeech/t2s/modules/transformer/embedding.py +++ b/paddlespeech/t2s/modules/transformer/embedding.py @@ -198,7 +198,8 @@ class RelPositionalEncoding(nn.Layer): x = x * self.xscale T = paddle.shape(x)[1] pe_size = paddle.shape(self.pe) - pos_emb = self.pe[:, pe_size[1] // 2 - T + 1:pe_size[1] // 2 + T, ] + tmp = paddle.cast(paddle.floor(pe_size[1] / 2), dtype='int32') + pos_emb = self.pe[:, tmp - T + 1:tmp + T, ] return self.dropout(x), self.dropout(pos_emb) diff --git a/paddlespeech/t2s/modules/transformer/multi_layer_conv.py b/paddlespeech/t2s/modules/transformer/multi_layer_conv.py index 91d67ca58..a322becd0 100644 --- a/paddlespeech/t2s/modules/transformer/multi_layer_conv.py +++ b/paddlespeech/t2s/modules/transformer/multi_layer_conv.py @@ -69,8 +69,8 @@ class MultiLayeredConv1d(nn.Layer): Tensor: Batch of output tensors (B, T, in_chans). """ x = self.relu(self.w_1(x.transpose([0, 2, 1]))).transpose([0, 2, 1]) - return self.w_2(self.dropout(x).transpose([0, 2, 1])).transpose( - [0, 2, 1]) + out = self.w_2(self.dropout(x).transpose([0, 2, 1])).transpose([0, 2, 1]) + return out class Conv1dLinear(nn.Layer): From cc02b007fe527e872cf5628f63174875aa87eb9f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Thu, 23 Mar 2023 14:40:11 +0800 Subject: [PATCH 36/37] fix paddle2onnx's install version, install the newest paddle2onnx in run.sh (#3084) --- examples/aishell3/tts3/run.sh | 5 +---- examples/canton/tts3/run.sh | 5 +---- examples/csmsc/tts2/run.sh | 5 +---- examples/csmsc/tts3/run.sh | 5 +---- examples/csmsc/tts3/run_cnndecoder.sh | 10 ++-------- examples/csmsc/vits/run.sh | 5 +---- examples/ljspeech/tts3/run.sh | 5 +---- examples/vctk/tts3/run.sh | 5 +---- examples/zh_en_tts/tts3/run.sh | 5 +---- 9 files changed, 10 insertions(+), 40 deletions(-) diff --git a/examples/aishell3/tts3/run.sh b/examples/aishell3/tts3/run.sh index b5da076b2..8dcecaa03 100755 --- a/examples/aishell3/tts3/run.sh +++ b/examples/aishell3/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3 # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 diff --git a/examples/canton/tts3/run.sh b/examples/canton/tts3/run.sh index 3a3dfe0a5..acfc50223 100755 --- a/examples/canton/tts3/run.sh +++ b/examples/canton/tts3/run.sh @@ -46,10 +46,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ../../csmsc/tts3/local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_canton # considering the balance between speed and quality, we recommend that you use hifigan as vocoder # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts2/run.sh b/examples/csmsc/tts2/run.sh index 6279ec579..5732ea3c7 100755 --- a/examples/csmsc/tts2/run.sh +++ b/examples/csmsc/tts2/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index dd8c9f3e6..a7b4e4239 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc diff --git a/examples/csmsc/tts3/run_cnndecoder.sh b/examples/csmsc/tts3/run_cnndecoder.sh index 96b446c52..f356f3133 100755 --- a/examples/csmsc/tts3/run_cnndecoder.sh +++ b/examples/csmsc/tts3/run_cnndecoder.sh @@ -58,10 +58,7 @@ fi # paddle2onnx non streaming if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_csmsc @@ -77,10 +74,7 @@ fi # paddle2onnx streaming if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade # streaming acoustic model ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer ./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_decoder diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index f2c5d452f..03c59702b 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -45,10 +45,7 @@ fi # # we have only tested the following models so far # if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # # install paddle2onnx -# version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') -# if [[ -z "$version" || ${version} != '1.0.0' ]]; then -# pip install paddle2onnx==1.0.0 -# fi +# pip install paddle2onnx --upgrade # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx vits_csmsc # fi diff --git a/examples/ljspeech/tts3/run.sh b/examples/ljspeech/tts3/run.sh index aacd4cc03..0d8da920c 100755 --- a/examples/ljspeech/tts3/run.sh +++ b/examples/ljspeech/tts3/run.sh @@ -45,10 +45,7 @@ fi # we have only tested the following models so far if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_ljspeech # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_ljspeech diff --git a/examples/vctk/tts3/run.sh b/examples/vctk/tts3/run.sh index a112b94b7..76307bd5f 100755 --- a/examples/vctk/tts3/run.sh +++ b/examples/vctk/tts3/run.sh @@ -43,10 +43,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_vctk # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_vctk diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh index 12f99081a..a4d86480b 100755 --- a/examples/zh_en_tts/tts3/run.sh +++ b/examples/zh_en_tts/tts3/run.sh @@ -46,10 +46,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # install paddle2onnx - version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') - if [[ -z "$version" || ${version} != '1.0.0' ]]; then - pip install paddle2onnx==1.0.0 - fi + pip install paddle2onnx --upgrade ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 From 793effa12232debc5a47fea8d812244df20a9c89 Mon Sep 17 00:00:00 2001 From: liangym <34430015+lym0302@users.noreply.github.com> Date: Fri, 24 Mar 2023 10:29:12 +0800 Subject: [PATCH 37/37] [TTS] update svs_music_score.md (#3085) --- docs/source/tts/svs_music_score.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/source/tts/svs_music_score.md b/docs/source/tts/svs_music_score.md index 05645d2b2..9f351c001 100644 --- a/docs/source/tts/svs_music_score.md +++ b/docs/source/tts/svs_music_score.md @@ -169,6 +169,11 @@ music scores 包含:note,note_dur,is_slur + +上述实验表明通过该方法来提取 music score 是可行的,但是在应用中可以**灵活地在歌词中加"AP"(用来表示吸气声)和"SP"(用来表示停顿声)**,对应的在 **note 上加 rest**,会使得整体的歌声合成更自然。 +除此之外,还要考虑哪一个大调并且以哪一组为起始**得到的 note 在训练数据集中出现过**,如若推理时传入训练数据中没有见过的 note, 合成出来的音频可能不是我们期待的音调。 + + # 三、其他 ## 3.1 读取midi