From 6b2dd1684503f45165e3dd1d34a605245b37aea3 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Thu, 24 Feb 2022 17:44:56 +0800 Subject: [PATCH 01/39] update server cli, test=doc --- demos/speech_server/conf/application.yaml | 16 +++++-- demos/speech_server/conf/asr/asr.yaml | 7 +-- demos/speech_server/conf/asr/asr_pd.yaml | 25 ++++++++++ demos/speech_server/conf/tts/tts.yaml | 2 +- demos/speech_server/conf/tts/tts_pd.yaml | 21 ++++----- .../server/bin/paddlespeech_server.py | 13 ++--- paddlespeech/server/conf/application.yaml | 13 +++-- paddlespeech/server/conf/asr/asr.yaml | 1 + paddlespeech/server/conf/asr/asr_pd.yaml | 2 +- paddlespeech/server/conf/tts/tts.yaml | 2 +- paddlespeech/server/conf/tts/tts_pd.yaml | 25 +++++----- .../server/engine/asr/python/asr_engine.py | 12 +---- .../engine/tts/paddleinference/tts_engine.py | 1 - paddlespeech/server/restful/tts_api.py | 47 ++++++++++++------- paddlespeech/server/utils/paddle_predictor.py | 5 +- 15 files changed, 115 insertions(+), 77 deletions(-) create mode 100644 demos/speech_server/conf/asr/asr_pd.yaml diff --git a/demos/speech_server/conf/application.yaml b/demos/speech_server/conf/application.yaml index c8d71f2f..fd4f5f37 100644 --- a/demos/speech_server/conf/application.yaml +++ b/demos/speech_server/conf/application.yaml @@ -9,9 +9,17 @@ port: 8090 ################################################################## # CONFIG FILE # ################################################################## -# add engine type (Options: asr, tts) and config file here. +# The engine_type of speech task needs to keep the same type as the config file of speech task. +# E.g: The engine_type of asr is 'python', the engine_backend of asr is 'XX/asr.yaml' +# E.g: The engine_type of asr is 'inference', the engine_backend of asr is 'XX/asr_pd.yaml' +# +# add engine type (Options: python, inference) +engine_type: + asr: 'inference' + tts: 'inference' +# add engine backend type (Options: asr, tts) and config file here. +# Adding a speech task to engine_backend means starting the service. engine_backend: - asr: 'conf/asr/asr.yaml' - tts: 'conf/tts/tts.yaml' - + asr: 'conf/asr/asr_pd.yaml' + tts: 'conf/tts/tts_pd.yaml' diff --git a/demos/speech_server/conf/asr/asr.yaml b/demos/speech_server/conf/asr/asr.yaml index 4c3b0a67..b1ef558d 100644 --- a/demos/speech_server/conf/asr/asr.yaml +++ b/demos/speech_server/conf/asr/asr.yaml @@ -1,7 +1,8 @@ model: 'conformer_wenetspeech' lang: 'zh' sample_rate: 16000 -cfg_path: -ckpt_path: +cfg_path: # [optional] +ckpt_path: # [optional] decode_method: 'attention_rescoring' -force_yes: False +force_yes: True +device: 'gpu:3' # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/asr/asr_pd.yaml b/demos/speech_server/conf/asr/asr_pd.yaml new file mode 100644 index 00000000..21bf7177 --- /dev/null +++ b/demos/speech_server/conf/asr/asr_pd.yaml @@ -0,0 +1,25 @@ +# This is the parameter configuration file for ASR server. +# These are the static models that support paddle inference. + +################################################################## +# ACOUSTIC MODEL SETTING # +# am choices=['deepspeech2offline_aishell'] TODO +################################################################## +model_type: 'deepspeech2offline_aishell' +am_model: # the pdmodel file of am static model [optional] +am_params: # the pdiparams file of am static model [optional] +lang: 'zh' +sample_rate: 16000 +cfg_path: +decode_method: +force_yes: True + +am_predictor_conf: + device: 'gpu:3' # set 'gpu:id' or 'cpu' + enable_mkldnn: True + switch_ir_optim: True + + +################################################################## +# OTHERS # +################################################################## diff --git a/demos/speech_server/conf/tts/tts.yaml b/demos/speech_server/conf/tts/tts.yaml index cb4829c8..8d45aec5 100644 --- a/demos/speech_server/conf/tts/tts.yaml +++ b/demos/speech_server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: 'gpu:2' +device: 'gpu:3' # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/tts/tts_pd.yaml b/demos/speech_server/conf/tts/tts_pd.yaml index c268c6a3..ecfa3a3b 100644 --- a/demos/speech_server/conf/tts/tts_pd.yaml +++ b/demos/speech_server/conf/tts/tts_pd.yaml @@ -6,8 +6,8 @@ # am choices=['speedyspeech_csmsc', 'fastspeech2_csmsc'] ################################################################## am: 'fastspeech2_csmsc' -am_model: # the pdmodel file of am static model -am_params: # the pdiparams file of am static model +am_model: # the pdmodel file of your am static model (XX.pdmodel) +am_params: # the pdiparams file of your am static model (XX.pdipparams) am_sample_rate: 24000 phones_dict: tones_dict: @@ -15,9 +15,9 @@ speaker_dict: spk_id: 0 am_predictor_conf: - use_gpu: True - enable_mkldnn: True - switch_ir_optim: True + device: 'gpu:3' # set 'gpu:id' or 'cpu' + enable_mkldnn: False + switch_ir_optim: False ################################################################## @@ -25,17 +25,16 @@ am_predictor_conf: # voc choices=['pwgan_csmsc', 'mb_melgan_csmsc','hifigan_csmsc'] ################################################################## voc: 'pwgan_csmsc' -voc_model: # the pdmodel file of vocoder static model -voc_params: # the pdiparams file of vocoder static model +voc_model: # the pdmodel file of your vocoder static model (XX.pdmodel) +voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) voc_sample_rate: 24000 voc_predictor_conf: - use_gpu: True - enable_mkldnn: True - switch_ir_optim: True + device: 'gpu:3' # set 'gpu:id' or 'cpu' + enable_mkldnn: False + switch_ir_optim: False ################################################################## # OTHERS # ################################################################## lang: 'zh' -device: paddle.get_device() diff --git a/paddlespeech/server/bin/paddlespeech_server.py b/paddlespeech/server/bin/paddlespeech_server.py index 7c88d8a0..ad62d3f6 100644 --- a/paddlespeech/server/bin/paddlespeech_server.py +++ b/paddlespeech/server/bin/paddlespeech_server.py @@ -20,7 +20,7 @@ from fastapi import FastAPI from ..executor import BaseExecutor from ..util import cli_server_register from ..util import stats_wrapper -from paddlespeech.server.engine.engine_factory import EngineFactory +from paddlespeech.server.engine.engine_pool import init_engine_pool from paddlespeech.server.restful.api import setup_router from paddlespeech.server.utils.config import get_config @@ -51,8 +51,10 @@ class ServerExecutor(BaseExecutor): def init(self, config) -> bool: """system initialization + Args: config (CfgNode): config object + Returns: bool: """ @@ -61,13 +63,8 @@ class ServerExecutor(BaseExecutor): api_router = setup_router(api_list) app.include_router(api_router) - # init engine - engine_pool = [] - for engine in config.engine_backend: - engine_pool.append(EngineFactory.get_engine(engine_name=engine)) - if not engine_pool[-1].init( - config_file=config.engine_backend[engine]): - return False + if not init_engine_pool(config): + return False return True diff --git a/paddlespeech/server/conf/application.yaml b/paddlespeech/server/conf/application.yaml index 154ef9af..cc08665e 100644 --- a/paddlespeech/server/conf/application.yaml +++ b/paddlespeech/server/conf/application.yaml @@ -9,12 +9,17 @@ port: 8090 ################################################################## # CONFIG FILE # ################################################################## +# The engine_type of speech task needs to keep the same type as the config file of speech task. +# E.g: The engine_type of asr is 'python', the engine_backend of asr is 'XX/asr.yaml' +# E.g: The engine_type of asr is 'inference', the engine_backend of asr is 'XX/asr_pd.yaml' +# # add engine type (Options: python, inference) engine_type: - asr: 'inference' - # tts: 'inference' + asr: 'python' + tts: 'python' # add engine backend type (Options: asr, tts) and config file here. +# Adding a speech task to engine_backend means starting the service. engine_backend: - asr: 'conf/asr/asr_pd.yaml' - #tts: 'conf/tts/tts_pd.yaml' + asr: 'conf/asr/asr.yaml' + tts: 'conf/tts/tts.yaml' diff --git a/paddlespeech/server/conf/asr/asr.yaml b/paddlespeech/server/conf/asr/asr.yaml index 50e55a3c..b1ef558d 100644 --- a/paddlespeech/server/conf/asr/asr.yaml +++ b/paddlespeech/server/conf/asr/asr.yaml @@ -5,3 +5,4 @@ cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' force_yes: True +device: 'gpu:3' # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/asr/asr_pd.yaml b/paddlespeech/server/conf/asr/asr_pd.yaml index 43a63f1b..21bf7177 100644 --- a/paddlespeech/server/conf/asr/asr_pd.yaml +++ b/paddlespeech/server/conf/asr/asr_pd.yaml @@ -15,7 +15,7 @@ decode_method: force_yes: True am_predictor_conf: - use_gpu: True + device: 'gpu:3' # set 'gpu:id' or 'cpu' enable_mkldnn: True switch_ir_optim: True diff --git a/paddlespeech/server/conf/tts/tts.yaml b/paddlespeech/server/conf/tts/tts.yaml index d0e128ea..8d45aec5 100644 --- a/paddlespeech/server/conf/tts/tts.yaml +++ b/paddlespeech/server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: paddle.get_device() \ No newline at end of file +device: 'gpu:3' # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/tts/tts_pd.yaml b/paddlespeech/server/conf/tts/tts_pd.yaml index c268c6a3..cd4b8583 100644 --- a/paddlespeech/server/conf/tts/tts_pd.yaml +++ b/paddlespeech/server/conf/tts/tts_pd.yaml @@ -6,18 +6,18 @@ # am choices=['speedyspeech_csmsc', 'fastspeech2_csmsc'] ################################################################## am: 'fastspeech2_csmsc' -am_model: # the pdmodel file of am static model -am_params: # the pdiparams file of am static model -am_sample_rate: 24000 +am_model: # the pdmodel file of your am static model (XX.pdmodel) +am_params: # the pdiparams file of your am static model (XX.pdipparams) +am_sample_rate: 24000 # must match the model phones_dict: tones_dict: speaker_dict: spk_id: 0 am_predictor_conf: - use_gpu: True - enable_mkldnn: True - switch_ir_optim: True + device: 'gpu:3' # set 'gpu:id' or 'cpu' + enable_mkldnn: False + switch_ir_optim: False ################################################################## @@ -25,17 +25,16 @@ am_predictor_conf: # voc choices=['pwgan_csmsc', 'mb_melgan_csmsc','hifigan_csmsc'] ################################################################## voc: 'pwgan_csmsc' -voc_model: # the pdmodel file of vocoder static model -voc_params: # the pdiparams file of vocoder static model -voc_sample_rate: 24000 +voc_model: # the pdmodel file of your vocoder static model (XX.pdmodel) +voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) +voc_sample_rate: 24000 #must match the model voc_predictor_conf: - use_gpu: True - enable_mkldnn: True - switch_ir_optim: True + device: 'gpu:3' # set 'gpu:id' or 'cpu' + enable_mkldnn: False + switch_ir_optim: False ################################################################## # OTHERS # ################################################################## lang: 'zh' -device: paddle.get_device() diff --git a/paddlespeech/server/engine/asr/python/asr_engine.py b/paddlespeech/server/engine/asr/python/asr_engine.py index fd67b029..60040051 100644 --- a/paddlespeech/server/engine/asr/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/python/asr_engine.py @@ -12,21 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import io -import os -from typing import List -from typing import Optional -from typing import Union -import librosa import paddle -import soundfile from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger -from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer -from paddlespeech.s2t.transform.transformation import Transformation -from paddlespeech.s2t.utils.dynamic_import import dynamic_import -from paddlespeech.s2t.utils.utility import UpdateConfig from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.config import get_config @@ -63,7 +53,7 @@ class ASREngine(BaseEngine): self.executor = ASRServerExecutor() self.config = get_config(config_file) - paddle.set_device(paddle.get_device()) + paddle.set_device(self.config.device) self.executor._init_from_path( self.config.model, self.config.lang, self.config.sample_rate, self.config.cfg_path, self.config.decode_method, diff --git a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py index 7679b02f..ecd2b0b6 100644 --- a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py +++ b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py @@ -344,7 +344,6 @@ class TTSEngine(BaseEngine): try: self.config = get_config(config_file) - self.executor._init_from_path( am=self.config.am, am_model=self.config.am_model, diff --git a/paddlespeech/server/restful/tts_api.py b/paddlespeech/server/restful/tts_api.py index d5fa1d42..11105147 100644 --- a/paddlespeech/server/restful/tts_api.py +++ b/paddlespeech/server/restful/tts_api.py @@ -16,7 +16,7 @@ from typing import Union from fastapi import APIRouter -from paddlespeech.server.engine.tts.paddleinference.tts_engine import TTSEngine +from paddlespeech.server.engine.engine_pool import get_engine_pool from paddlespeech.server.restful.request import TTSRequest from paddlespeech.server.restful.response import ErrorResponse from paddlespeech.server.restful.response import TTSResponse @@ -60,28 +60,41 @@ def tts(request_body: TTSRequest): Returns: json: [description] """ - # json to dict - item_dict = request_body.dict() - sentence = item_dict['text'] - spk_id = item_dict['spk_id'] - speed = item_dict['speed'] - volume = item_dict['volume'] - sample_rate = item_dict['sample_rate'] - save_path = item_dict['save_path'] + # get params + text = request_body.text + spk_id = request_body.spk_id + speed = request_body.speed + volume = request_body.volume + sample_rate = request_body.sample_rate + save_path = request_body.save_path # Check parameters - if speed <=0 or speed > 3 or volume <=0 or volume > 3 or \ - sample_rate not in [0, 16000, 8000] or \ - (save_path is not None and not save_path.endswith("pcm") and not save_path.endswith("wav")): - return failed_response(ErrorCode.SERVER_PARAM_ERR) - - # single - tts_engine = TTSEngine() + if speed <= 0 or speed > 3: + return failed_response( + ErrorCode.SERVER_PARAM_ERR, + "invalid speed value, the value should be between 0 and 3.") + if volume <= 0 or volume > 3: + return failed_response( + ErrorCode.SERVER_PARAM_ERR, + "invalid volume value, the value should be between 0 and 3.") + if sample_rate not in [0, 16000, 8000]: + return failed_response( + ErrorCode.SERVER_PARAM_ERR, + "invalid sample_rate value, the choice of value is 0, 8000, 16000.") + if save_path is not None and not save_path.endswith( + "pcm") and not save_path.endswith("wav"): + return failed_response( + ErrorCode.SERVER_PARAM_ERR, + "invalid save_path, saved audio formats support pcm and wav") # run try: + # get single engine from engine pool + engine_pool = get_engine_pool() + tts_engine = engine_pool['tts'] + lang, target_sample_rate, wav_base64 = tts_engine.run( - sentence, spk_id, speed, volume, sample_rate, save_path) + text, spk_id, speed, volume, sample_rate, save_path) response = { "success": True, diff --git a/paddlespeech/server/utils/paddle_predictor.py b/paddlespeech/server/utils/paddle_predictor.py index f910161b..f4216d74 100644 --- a/paddlespeech/server/utils/paddle_predictor.py +++ b/paddlespeech/server/utils/paddle_predictor.py @@ -41,8 +41,9 @@ def init_predictor(model_dir: Optional[os.PathLike]=None, config = Config(model_file, params_file) config.enable_memory_optim() - if predictor_conf["use_gpu"]: - config.enable_use_gpu(1000, 0) + if "gpu" in predictor_conf["device"]: + gpu_id = predictor_conf["device"].split(":")[-1] + config.enable_use_gpu(1000, int(gpu_id)) if predictor_conf["enable_mkldnn"]: config.enable_mkldnn() if predictor_conf["switch_ir_optim"]: From 0a5624fe614c8316e85572c6f180c1214ef7fd10 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 24 Feb 2022 10:58:31 +0000 Subject: [PATCH 02/39] update ctc loss compare --- docs/topic/ctc/ctc_loss_compare.ipynb | 150 +++++++++++++------------- 1 file changed, 73 insertions(+), 77 deletions(-) diff --git a/docs/topic/ctc/ctc_loss_compare.ipynb b/docs/topic/ctc/ctc_loss_compare.ipynb index 95b2af50..c313710c 100644 --- a/docs/topic/ctc/ctc_loss_compare.ipynb +++ b/docs/topic/ctc/ctc_loss_compare.ipynb @@ -30,12 +30,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Cloning into 'warp-ctc'...\n", - "remote: Enumerating objects: 829, done.\u001b[K\n", - "remote: Total 829 (delta 0), reused 0 (delta 0), pack-reused 829\u001b[K\n", - "Receiving objects: 100% (829/829), 388.85 KiB | 140.00 KiB/s, done.\n", - "Resolving deltas: 100% (419/419), done.\n", - "Checking connectivity... done.\n" + "fatal: destination path 'warp-ctc' already exists and is not an empty directory.\r\n" ] } ], @@ -99,30 +94,6 @@ "name": "stdout", "output_type": "stream", "text": [ - "-- The C compiler identification is GNU 5.4.0\n", - "-- The CXX compiler identification is GNU 5.4.0\n", - "-- Check for working C compiler: /usr/bin/cc\n", - "-- Check for working C compiler: /usr/bin/cc -- works\n", - "-- Detecting C compiler ABI info\n", - "-- Detecting C compiler ABI info - done\n", - "-- Detecting C compile features\n", - "-- Detecting C compile features - done\n", - "-- Check for working CXX compiler: /usr/bin/c++\n", - "-- Check for working CXX compiler: /usr/bin/c++ -- works\n", - "-- Detecting CXX compiler ABI info\n", - "-- Detecting CXX compiler ABI info - done\n", - "-- Detecting CXX compile features\n", - "-- Detecting CXX compile features - done\n", - "-- Looking for pthread.h\n", - "-- Looking for pthread.h - found\n", - "-- Performing Test CMAKE_HAVE_LIBC_PTHREAD\n", - "-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed\n", - "-- Looking for pthread_create in pthreads\n", - "-- Looking for pthread_create in pthreads - not found\n", - "-- Looking for pthread_create in pthread\n", - "-- Looking for pthread_create in pthread - found\n", - "-- Found Threads: TRUE \n", - "-- Found CUDA: /usr/local/cuda (found suitable version \"10.2\", minimum required is \"6.5\") \n", "-- cuda found TRUE\n", "-- Building shared library with GPU support\n", "-- Configuring done\n", @@ -145,20 +116,11 @@ "name": "stdout", "output_type": "stream", "text": [ - "[ 11%] \u001b[34m\u001b[1mBuilding NVCC (Device) object CMakeFiles/warpctc.dir/src/warpctc_generated_reduce.cu.o\u001b[0m\n", - "[ 22%] \u001b[34m\u001b[1mBuilding NVCC (Device) object CMakeFiles/warpctc.dir/src/warpctc_generated_ctc_entrypoint.cu.o\u001b[0m\n", - "\u001b[35m\u001b[1mScanning dependencies of target warpctc\u001b[0m\n", - "[ 33%] \u001b[32m\u001b[1mLinking CXX shared library libwarpctc.so\u001b[0m\n", + "[ 11%] \u001b[32m\u001b[1mLinking CXX shared library libwarpctc.so\u001b[0m\n", "[ 33%] Built target warpctc\n", - "[ 44%] \u001b[34m\u001b[1mBuilding NVCC (Device) object CMakeFiles/test_gpu.dir/tests/test_gpu_generated_test_gpu.cu.o\u001b[0m\n", - "\u001b[35m\u001b[1mScanning dependencies of target test_cpu\u001b[0m\n", - "[ 55%] \u001b[32mBuilding CXX object CMakeFiles/test_cpu.dir/tests/test_cpu.cpp.o\u001b[0m\n", - "[ 66%] \u001b[32mBuilding CXX object CMakeFiles/test_cpu.dir/tests/random.cpp.o\u001b[0m\n", - "[ 77%] \u001b[32m\u001b[1mLinking CXX executable test_cpu\u001b[0m\n", + "[ 44%] \u001b[32m\u001b[1mLinking CXX executable test_cpu\u001b[0m\n", + "[ 55%] \u001b[32m\u001b[1mLinking CXX executable test_gpu\u001b[0m\n", "[ 77%] Built target test_cpu\n", - "\u001b[35m\u001b[1mScanning dependencies of target test_gpu\u001b[0m\n", - "[ 88%] \u001b[32mBuilding CXX object CMakeFiles/test_gpu.dir/tests/random.cpp.o\u001b[0m\n", - "[100%] \u001b[32m\u001b[1mLinking CXX executable test_gpu\u001b[0m\n", "[100%] Built target test_gpu\n" ] } @@ -169,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "31761a31", "metadata": {}, "outputs": [ @@ -187,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "id": "f53316f6", "metadata": {}, "outputs": [ @@ -205,7 +167,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "id": "084f1e49", "metadata": {}, "outputs": [ @@ -216,29 +178,20 @@ "running install\n", "running bdist_egg\n", "running egg_info\n", - "creating warpctc_pytorch.egg-info\n", "writing warpctc_pytorch.egg-info/PKG-INFO\n", "writing dependency_links to warpctc_pytorch.egg-info/dependency_links.txt\n", "writing top-level names to warpctc_pytorch.egg-info/top_level.txt\n", "writing manifest file 'warpctc_pytorch.egg-info/SOURCES.txt'\n", - "writing manifest file 'warpctc_pytorch.egg-info/SOURCES.txt'\n", "installing library code to build/bdist.linux-x86_64/egg\n", "running install_lib\n", "running build_py\n", - "creating build\n", - "creating build/lib.linux-x86_64-3.9\n", - "creating build/lib.linux-x86_64-3.9/warpctc_pytorch\n", - "copying warpctc_pytorch/__init__.py -> build/lib.linux-x86_64-3.9/warpctc_pytorch\n", "running build_ext\n", "building 'warpctc_pytorch._warp_ctc' extension\n", - "creating /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9\n", - "creating /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9/src\n", "Emitting ninja build file /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9/build.ninja...\n", "Compiling objects...\n", "Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)\n", - "[1/1] c++ -MMD -MF /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9/src/binding.o.d -pthread -B /workspace/zhanghui/DeepSpeech-2.x/tools/venv/compiler_compat -Wl,--sysroot=/ -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /workspace/zhanghui/DeepSpeech-2.x/tools/venv/include -fPIC -O2 -isystem /workspace/zhanghui/DeepSpeech-2.x/tools/venv/include -fPIC -I/workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/include -I/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib/python3.9/site-packages/torch/include -I/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -I/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib/python3.9/site-packages/torch/include/TH -I/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib/python3.9/site-packages/torch/include/THC -I/usr/local/cuda/include -I/workspace/zhanghui/DeepSpeech-2.x/tools/venv/include/python3.9 -c -c /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/src/binding.cpp -o /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9/src/binding.o -std=c++14 -fPIC -DWARPCTC_ENABLE_GPU -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE=\"_gcc\"' '-DPYBIND11_STDLIB=\"_libstdcpp\"' '-DPYBIND11_BUILD_ABI=\"_cxxabi1011\"' -DTORCH_EXTENSION_NAME=_warp_ctc -D_GLIBCXX_USE_CXX11_ABI=0\n", + "ninja: no work to do.\n", "g++ -pthread -B /workspace/zhanghui/DeepSpeech-2.x/tools/venv/compiler_compat -Wl,--sysroot=/ -shared -Wl,-rpath,/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib -Wl,-rpath-link,/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib -L/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib -Wl,-rpath,/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib -Wl,-rpath-link,/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib -L/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib /workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/pytorch_binding/build/temp.linux-x86_64-3.9/src/binding.o -L/workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/build -L/workspace/zhanghui/DeepSpeech-2.x/tools/venv/lib/python3.9/site-packages/torch/lib -L/usr/local/cuda/lib64 -lwarpctc -lc10 -ltorch -ltorch_cpu -ltorch_python -lcudart -lc10_cuda -ltorch_cuda -o build/lib.linux-x86_64-3.9/warpctc_pytorch/_warp_ctc.cpython-39-x86_64-linux-gnu.so -Wl,-rpath,/workspace/zhanghui/DeepSpeech-2.x/docs/topic/ctc/warp-ctc/build\n", - "creating build/bdist.linux-x86_64\n", "creating build/bdist.linux-x86_64/egg\n", "creating build/bdist.linux-x86_64/egg/warpctc_pytorch\n", "copying build/lib.linux-x86_64-3.9/warpctc_pytorch/__init__.py -> build/bdist.linux-x86_64/egg/warpctc_pytorch\n", @@ -254,7 +207,6 @@ "writing build/bdist.linux-x86_64/egg/EGG-INFO/native_libs.txt\n", "zip_safe flag not set; analyzing archive contents...\n", "warpctc_pytorch.__pycache__._warp_ctc.cpython-39: module references __file__\n", - "creating dist\n", "creating 'dist/warpctc_pytorch-0.1-py3.9-linux-x86_64.egg' and adding 'build/bdist.linux-x86_64/egg' to it\n", "removing 'build/bdist.linux-x86_64/egg' (and everything under it)\n", "Processing warpctc_pytorch-0.1-py3.9-linux-x86_64.egg\n", @@ -275,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "id": "ee4ca9e3", "metadata": {}, "outputs": [ @@ -293,7 +245,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 13, "id": "59255ed8", "metadata": {}, "outputs": [ @@ -311,21 +263,14 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 22, "id": "1dae09b9", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "grep: warning: GREP_OPTIONS is deprecated; please use an alias or script\n" - ] - } - ], + "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", + "import torch.nn.functional as F\n", "import warpctc_pytorch as wp\n", "import paddle.nn as pn\n", "import paddle" @@ -333,7 +278,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 15, "id": "83d0762e", "metadata": {}, "outputs": [ @@ -343,7 +288,7 @@ "'1.10.0+cu102'" ] }, - "execution_count": 16, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -354,17 +299,17 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 16, "id": "62501e2c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'2.2.0'" + "'2.2.1'" ] }, - "execution_count": 17, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -375,7 +320,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 17, "id": "9e8e0f40", "metadata": {}, "outputs": [ @@ -392,6 +337,7 @@ } ], "source": [ + "# warpctc_pytorch CTCLoss\n", "probs = torch.FloatTensor([[\n", " [0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]\n", " ]]).transpose(0, 1).contiguous()\n", @@ -412,7 +358,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 18, "id": "2cd46569", "metadata": {}, "outputs": [ @@ -428,6 +374,7 @@ } ], "source": [ + "# pytorch CTCLoss\n", "probs = torch.FloatTensor([[\n", " [0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]\n", " ]]).transpose(0, 1).contiguous()\n", @@ -449,7 +396,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 27, "id": "85c3461a", "metadata": {}, "outputs": [ @@ -467,6 +414,7 @@ } ], "source": [ + "# Paddle CTCLoss\n", "paddle.set_device('cpu')\n", "probs = paddle.to_tensor([[\n", " [0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1],\n", @@ -490,7 +438,55 @@ { "cell_type": "code", "execution_count": null, - "id": "d390cd91", + "id": "8cdf76c2", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "2c305eaf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([2, 1, 5])\n", + "2.4628584384918213\n", + "[[[ 0.17703117 -0.7081247 0.17703117 0.17703117 0.17703117]]\n", + "\n", + " [[ 0.17703117 0.17703117 -0.7081247 0.17703117 0.17703117]]]\n" + ] + } + ], + "source": [ + "# warpctc_pytorch CTCLoss, log_softmax idempotent\n", + "probs = torch.FloatTensor([[\n", + " [0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]\n", + " ]]).transpose(0, 1).contiguous()\n", + "print(probs.size())\n", + "labels = torch.IntTensor([1, 2])\n", + "label_sizes = torch.IntTensor([2])\n", + "probs_sizes = torch.IntTensor([2])\n", + "probs.requires_grad_(True)\n", + "bs = probs.size(1)\n", + "\n", + "ctc_loss = wp.CTCLoss(size_average=False, length_average=False)\n", + "\n", + "log_probs = torch.log_softmax(probs, axis=-1)\n", + "cost = ctc_loss(log_probs, labels, probs_sizes, label_sizes)\n", + "cost = cost.sum() / bs\n", + "print(cost.item())\n", + "cost.backward()\n", + "print(probs.grad.numpy())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "443336f0", "metadata": {}, "outputs": [], "source": [] From 6b1fe701008de6d344576eb4e56b66250102380b Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 24 Feb 2022 11:14:30 +0000 Subject: [PATCH 03/39] format code,test=doc --- .pre-commit-config.yaml | 3 +- dataset/voxceleb/voxceleb1.py | 4 +++ examples/ami/sd0/local/ami_prepare.py | 14 ++++----- .../sv0/local/make_voxceleb_kaldi_trial.py | 31 +++++++++++-------- paddlespeech/__init__.py | 11 ------- paddlespeech/cli/asr/infer.py | 3 +- paddlespeech/s2t/io/utility.py | 2 +- paddlespeech/t2s/datasets/dataset.py | 2 +- utils/DER.py | 3 +- 9 files changed, 36 insertions(+), 37 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 60f0b92f..7fb01708 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,12 +50,13 @@ repos: entry: bash .pre-commit-hooks/clang-format.hook -i language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$ + exclude: (?=speechx/speechx/kaldi).*(\.cpp|\.cc|\.h|\.py)$ - id: copyright_checker name: copyright_checker entry: python .pre-commit-hooks/copyright-check.hook language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py)$ - exclude: (?=third_party|pypinyin).*(\.cpp|\.h|\.py)$ + exclude: (?=third_party|pypinyin|speechx/speechx/kaldi).*(\.cpp|\.cc|\.h|\.py)$ - repo: https://github.com/asottile/reorder_python_imports rev: v2.4.0 hooks: diff --git a/dataset/voxceleb/voxceleb1.py b/dataset/voxceleb/voxceleb1.py index ce744751..e50c91bc 100644 --- a/dataset/voxceleb/voxceleb1.py +++ b/dataset/voxceleb/voxceleb1.py @@ -80,6 +80,7 @@ parser.add_argument( args = parser.parse_args() + def create_manifest(data_dir, manifest_path_prefix): print("Creating manifest %s ..." % manifest_path_prefix) json_lines = [] @@ -128,6 +129,7 @@ def create_manifest(data_dir, manifest_path_prefix): print(f"{total_text / total_sec} text/sec", file=f) print(f"{total_sec / total_num} sec/utt", file=f) + def prepare_dataset(base_url, data_list, target_dir, manifest_path, target_data): if not os.path.exists(target_dir): @@ -164,6 +166,7 @@ def prepare_dataset(base_url, data_list, target_dir, manifest_path, # create the manifest file create_manifest(data_dir=target_dir, manifest_path_prefix=manifest_path) + def main(): if args.target_dir.startswith('~'): args.target_dir = os.path.expanduser(args.target_dir) @@ -184,5 +187,6 @@ def main(): print("Manifest prepare done!") + if __name__ == '__main__': main() diff --git a/examples/ami/sd0/local/ami_prepare.py b/examples/ami/sd0/local/ami_prepare.py index b7bb8e67..d03810a7 100644 --- a/examples/ami/sd0/local/ami_prepare.py +++ b/examples/ami/sd0/local/ami_prepare.py @@ -22,19 +22,17 @@ Authors * qingenz123@126.com (Qingen ZHAO) 2022 """ - -import os -import logging import argparse -import xml.etree.ElementTree as et import glob import json -from ami_splits import get_AMI_split +import logging +import os +import xml.etree.ElementTree as et from distutils.util import strtobool -from dataio import ( - load_pkl, - save_pkl, ) +from ami_splits import get_AMI_split +from dataio import load_pkl +from dataio import save_pkl logger = logging.getLogger(__name__) SAMPLERATE = 16000 diff --git a/examples/voxceleb/sv0/local/make_voxceleb_kaldi_trial.py b/examples/voxceleb/sv0/local/make_voxceleb_kaldi_trial.py index c92ede1a..4e9639dc 100644 --- a/examples/voxceleb/sv0/local/make_voxceleb_kaldi_trial.py +++ b/examples/voxceleb/sv0/local/make_voxceleb_kaldi_trial.py @@ -12,28 +12,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Make VoxCeleb1 trial of kaldi format this script creat the test trial from kaldi trial voxceleb1_test_v2.txt or official trial veri_test2.txt to kaldi trial format """ - import argparse import codecs import os parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument("--voxceleb_trial", - default="voxceleb1_test_v2", - type=str, - help="VoxCeleb trial file. Default we use the kaldi trial voxceleb1_test_v2.txt") -parser.add_argument("--trial", - default="data/test/trial", - type=str, - help="Kaldi format trial file") +parser.add_argument( + "--voxceleb_trial", + default="voxceleb1_test_v2", + type=str, + help="VoxCeleb trial file. Default we use the kaldi trial voxceleb1_test_v2.txt" +) +parser.add_argument( + "--trial", + default="data/test/trial", + type=str, + help="Kaldi format trial file") args = parser.parse_args() + def main(voxceleb_trial, trial): """ VoxCeleb provide several trial file, which format is different with kaldi format. @@ -58,7 +60,9 @@ def main(voxceleb_trial, trial): """ print("Start convert the voxceleb trial to kaldi format") if not os.path.exists(voxceleb_trial): - raise RuntimeError("{} does not exist. Pleas input the correct file path".format(voxceleb_trial)) + raise RuntimeError( + "{} does not exist. Pleas input the correct file path".format( + voxceleb_trial)) trial_dirname = os.path.dirname(trial) if not os.path.exists(trial_dirname): @@ -66,9 +70,9 @@ def main(voxceleb_trial, trial): with codecs.open(voxceleb_trial, 'r', encoding='utf-8') as f, \ codecs.open(trial, 'w', encoding='utf-8') as w: - for line in f: + for line in f: target_or_nontarget, path1, path2 = line.strip().split() - + utt_id1 = "-".join(path1.split("/")) utt_id2 = "-".join(path2.split("/")) target = "nontarget" @@ -77,5 +81,6 @@ def main(voxceleb_trial, trial): w.write("{} {} {}\n".format(utt_id1, utt_id2, target)) print("Convert the voxceleb trial to kaldi format successfully") + if __name__ == "__main__": main(args.voxceleb_trial, args.trial) diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py index 42537b15..185a92b8 100644 --- a/paddlespeech/__init__.py +++ b/paddlespeech/__init__.py @@ -11,14 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - - - - - - - - - - diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index 7f648b4c..1fb4be43 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -413,7 +413,8 @@ class ASRExecutor(BaseExecutor): def _check(self, audio_file: str, sample_rate: int, force_yes: bool): self.sample_rate = sample_rate if self.sample_rate != 16000 and self.sample_rate != 8000: - logger.error("invalid sample rate, please input --sr 8000 or --sr 16000") + logger.error( + "invalid sample rate, please input --sr 8000 or --sr 16000") return False if isinstance(audio_file, (str, os.PathLike)): diff --git a/paddlespeech/s2t/io/utility.py b/paddlespeech/s2t/io/utility.py index ce5e7723..c08b5535 100644 --- a/paddlespeech/s2t/io/utility.py +++ b/paddlespeech/s2t/io/utility.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List from io import BytesIO +from typing import List import numpy as np diff --git a/paddlespeech/t2s/datasets/dataset.py b/paddlespeech/t2s/datasets/dataset.py index f81c2877..2d6c03cb 100644 --- a/paddlespeech/t2s/datasets/dataset.py +++ b/paddlespeech/t2s/datasets/dataset.py @@ -258,4 +258,4 @@ class ChainDataset(Dataset): return dataset[i] i -= len(dataset) - raise IndexError("dataset index out of range") \ No newline at end of file + raise IndexError("dataset index out of range") diff --git a/utils/DER.py b/utils/DER.py index 5b62094d..d6ab695d 100755 --- a/utils/DER.py +++ b/utils/DER.py @@ -23,10 +23,11 @@ Credits This code is adapted from https://github.com/nryant/dscore """ import argparse -from distutils.util import strtobool import os import re import subprocess +from distutils.util import strtobool + import numpy as np FILE_IDS = re.compile(r"(?<=Speaker Diarization for).+(?=\*\*\*)") From cbcbddf93e3ad49148f33ff74a1dadc856e843ae Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 24 Feb 2022 11:38:54 +0000 Subject: [PATCH 04/39] format, test=doct --- setup.py | 80 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 43 insertions(+), 37 deletions(-) diff --git a/setup.py b/setup.py index 9bb11d0d..31dfa0bc 100644 --- a/setup.py +++ b/setup.py @@ -29,44 +29,50 @@ HERE = Path(os.path.abspath(os.path.dirname(__file__))) VERSION = '0.1.1' +base = [ + "editdistance", + "g2p_en", + "g2pM", + "h5py", + "inflect", + "jieba", + "jsonlines", + "kaldiio", + "librosa", + "loguru", + "matplotlib", + "nara_wpe", + "pandas", + "paddleaudio", + "paddlenlp", + "paddlespeech_feat", + "praatio==5.0.0", + "pypinyin", + "python-dateutil", + "pyworld", + "resampy==0.2.2", + "sacrebleu", + "scipy", + "sentencepiece~=0.1.96", + "soundfile~=0.10", + "textgrid", + "timer", + "tqdm", + "typeguard", + "visualdl", + "webrtcvad", + "yacs~=0.1.8", +] + +server = [ + "fastapi", + "uvicorn", + "pattern_singleton", +] + requirements = { - "install": [ - "editdistance", - "g2p_en", - "g2pM", - "h5py", - "inflect", - "jieba", - "jsonlines", - "kaldiio", - "librosa", - "loguru", - "matplotlib", - "nara_wpe", - "pandas", - "paddleaudio", - "paddlenlp", - "paddlespeech_feat", - "praatio==5.0.0", - "pypinyin", - "python-dateutil", - "pyworld", - "resampy==0.2.2", - "sacrebleu", - "scipy", - "sentencepiece~=0.1.96", - "soundfile~=0.10", - "textgrid", - "timer", - "tqdm", - "typeguard", - "visualdl", - "webrtcvad", - "yacs~=0.1.8", - # fastapi server - "fastapi", - "uvicorn", - ], + "install": + base + server, "develop": [ "ConfigArgParse", "coverage", From 920b2c808cd489aeae590c71dbfbcb1f2373665f Mon Sep 17 00:00:00 2001 From: lym0302 Date: Thu, 24 Feb 2022 19:58:16 +0800 Subject: [PATCH 05/39] paras required, test=doc --- paddlespeech/server/bin/paddlespeech_client.py | 12 +++++++----- paddlespeech/server/bin/paddlespeech_server.py | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/paddlespeech/server/bin/paddlespeech_client.py b/paddlespeech/server/bin/paddlespeech_client.py index 3730d607..889df8d5 100644 --- a/paddlespeech/server/bin/paddlespeech_client.py +++ b/paddlespeech/server/bin/paddlespeech_client.py @@ -48,8 +48,9 @@ class TTSClientExecutor(BaseExecutor): self.parser.add_argument( '--input', type=str, - default="你好,欢迎使用语音合成服务", - help='A sentence to be synthesized.') + default=None, + help='Text to be synthesized.', + required=True) self.parser.add_argument( '--spk_id', type=int, default=0, help='Speaker id') self.parser.add_argument( @@ -181,8 +182,9 @@ class ASRClientExecutor(BaseExecutor): self.parser.add_argument( '--input', type=str, - default="./paddlespeech/server/tests/16_audio.wav", - help='Audio file to be recognized') + default=None, + help='Audio file to be recognized', + required=True) self.parser.add_argument( '--sample_rate', type=int, default=16000, help='audio sample rate') self.parser.add_argument( @@ -241,4 +243,4 @@ class ASRClientExecutor(BaseExecutor): print(r.json()) print("time cost %f s." % (time_end - time_start)) except: - print("Failed to speech recognition.") \ No newline at end of file + print("Failed to speech recognition.") diff --git a/paddlespeech/server/bin/paddlespeech_server.py b/paddlespeech/server/bin/paddlespeech_server.py index ad62d3f6..aff77d54 100644 --- a/paddlespeech/server/bin/paddlespeech_server.py +++ b/paddlespeech/server/bin/paddlespeech_server.py @@ -41,7 +41,8 @@ class ServerExecutor(BaseExecutor): "--config_file", action="store", help="yaml file of the app", - default="./conf/application.yaml") + default=None, + required=True) self.parser.add_argument( "--log_file", From 434708cff4edb8ca157c6fb7e39aa1926fb25223 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Thu, 24 Feb 2022 20:14:07 +0800 Subject: [PATCH 06/39] set device cpu, test=doc --- demos/speech_server/conf/asr/asr.yaml | 2 +- demos/speech_server/conf/asr/asr_pd.yaml | 2 +- demos/speech_server/conf/tts/tts.yaml | 2 +- demos/speech_server/conf/tts/tts_pd.yaml | 4 ++-- paddlespeech/server/conf/asr/asr.yaml | 2 +- paddlespeech/server/conf/asr/asr_pd.yaml | 2 +- paddlespeech/server/conf/tts/tts.yaml | 2 +- paddlespeech/server/conf/tts/tts_pd.yaml | 4 ++-- paddlespeech/server/engine/asr/python/asr_engine.py | 5 ++++- paddlespeech/server/engine/tts/python/tts_engine.py | 5 ++++- 10 files changed, 18 insertions(+), 12 deletions(-) diff --git a/demos/speech_server/conf/asr/asr.yaml b/demos/speech_server/conf/asr/asr.yaml index b1ef558d..1a805142 100644 --- a/demos/speech_server/conf/asr/asr.yaml +++ b/demos/speech_server/conf/asr/asr.yaml @@ -5,4 +5,4 @@ cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' force_yes: True -device: 'gpu:3' # set 'gpu:id' or 'cpu' +device: 'cpu' # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/asr/asr_pd.yaml b/demos/speech_server/conf/asr/asr_pd.yaml index 21bf7177..6cddb450 100644 --- a/demos/speech_server/conf/asr/asr_pd.yaml +++ b/demos/speech_server/conf/asr/asr_pd.yaml @@ -15,7 +15,7 @@ decode_method: force_yes: True am_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: True switch_ir_optim: True diff --git a/demos/speech_server/conf/tts/tts.yaml b/demos/speech_server/conf/tts/tts.yaml index 8d45aec5..19e8874e 100644 --- a/demos/speech_server/conf/tts/tts.yaml +++ b/demos/speech_server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: 'gpu:3' # set 'gpu:id' or 'cpu' +device: 'cpu' # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/tts/tts_pd.yaml b/demos/speech_server/conf/tts/tts_pd.yaml index ecfa3a3b..97df5261 100644 --- a/demos/speech_server/conf/tts/tts_pd.yaml +++ b/demos/speech_server/conf/tts/tts_pd.yaml @@ -15,7 +15,7 @@ speaker_dict: spk_id: 0 am_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: False switch_ir_optim: False @@ -30,7 +30,7 @@ voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) voc_sample_rate: 24000 voc_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: False switch_ir_optim: False diff --git a/paddlespeech/server/conf/asr/asr.yaml b/paddlespeech/server/conf/asr/asr.yaml index b1ef558d..1a805142 100644 --- a/paddlespeech/server/conf/asr/asr.yaml +++ b/paddlespeech/server/conf/asr/asr.yaml @@ -5,4 +5,4 @@ cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' force_yes: True -device: 'gpu:3' # set 'gpu:id' or 'cpu' +device: 'cpu' # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/asr/asr_pd.yaml b/paddlespeech/server/conf/asr/asr_pd.yaml index 21bf7177..6cddb450 100644 --- a/paddlespeech/server/conf/asr/asr_pd.yaml +++ b/paddlespeech/server/conf/asr/asr_pd.yaml @@ -15,7 +15,7 @@ decode_method: force_yes: True am_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: True switch_ir_optim: True diff --git a/paddlespeech/server/conf/tts/tts.yaml b/paddlespeech/server/conf/tts/tts.yaml index 8d45aec5..19e8874e 100644 --- a/paddlespeech/server/conf/tts/tts.yaml +++ b/paddlespeech/server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: 'gpu:3' # set 'gpu:id' or 'cpu' +device: 'cpu' # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/tts/tts_pd.yaml b/paddlespeech/server/conf/tts/tts_pd.yaml index cd4b8583..019c7ed6 100644 --- a/paddlespeech/server/conf/tts/tts_pd.yaml +++ b/paddlespeech/server/conf/tts/tts_pd.yaml @@ -15,7 +15,7 @@ speaker_dict: spk_id: 0 am_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: False switch_ir_optim: False @@ -30,7 +30,7 @@ voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) voc_sample_rate: 24000 #must match the model voc_predictor_conf: - device: 'gpu:3' # set 'gpu:id' or 'cpu' + device: 'cpu' # set 'gpu:id' or 'cpu' enable_mkldnn: False switch_ir_optim: False diff --git a/paddlespeech/server/engine/asr/python/asr_engine.py b/paddlespeech/server/engine/asr/python/asr_engine.py index 60040051..9fac487d 100644 --- a/paddlespeech/server/engine/asr/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/python/asr_engine.py @@ -53,7 +53,10 @@ class ASREngine(BaseEngine): self.executor = ASRServerExecutor() self.config = get_config(config_file) - paddle.set_device(self.config.device) + if self.config.device is None: + paddle.set_device(paddle.get_device()) + else: + paddle.set_device(self.config.device) self.executor._init_from_path( self.config.model, self.config.lang, self.config.sample_rate, self.config.cfg_path, self.config.decode_method, diff --git a/paddlespeech/server/engine/tts/python/tts_engine.py b/paddlespeech/server/engine/tts/python/tts_engine.py index e11cfb1d..508a1f35 100644 --- a/paddlespeech/server/engine/tts/python/tts_engine.py +++ b/paddlespeech/server/engine/tts/python/tts_engine.py @@ -54,7 +54,10 @@ class TTSEngine(BaseEngine): try: self.config = get_config(config_file) - paddle.set_device(self.config.device) + if self.config.device is None: + paddle.set_device(paddle.get_device()) + else: + paddle.set_device(self.config.device) self.executor._init_from_path( am=self.config.am, From f29ae92a88451d8f2b6d486870db66cc2635dffe Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 06:14:31 +0000 Subject: [PATCH 07/39] add unit test for deepspeech2online inference --- .../unit/asr/deepspeech2_online_model_test.py | 85 +++++++++++++++++- .../test_data/static_ds2online_inputs.pickle | Bin 0 -> 45895 bytes 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 tests/unit/asr/test_data/static_ds2online_inputs.pickle diff --git a/tests/unit/asr/deepspeech2_online_model_test.py b/tests/unit/asr/deepspeech2_online_model_test.py index f623c5ac..3d634945 100644 --- a/tests/unit/asr/deepspeech2_online_model_test.py +++ b/tests/unit/asr/deepspeech2_online_model_test.py @@ -15,9 +15,12 @@ import unittest import numpy as np import paddle +import pickle +import os +from paddle import inference from paddlespeech.s2t.models.ds2_online import DeepSpeech2ModelOnline - +from paddlespeech.s2t.models.ds2_online import DeepSpeech2InferModelOnline class TestDeepSpeech2ModelOnline(unittest.TestCase): def setUp(self): @@ -182,5 +185,85 @@ class TestDeepSpeech2ModelOnline(unittest.TestCase): paddle.allclose(final_state_c_box, final_state_c_box_chk), True) + + +class TestDeepSpeech2StaticModelOnline(unittest.TestCase): + + def setUp(self): + export_prefix = "exp/deepspeech2_online/checkpoints/test_export" + os.makedirs( os.path.dirname(export_prefix), mode=0o755) + infer_model = DeepSpeech2InferModelOnline( + feat_size=161, + dict_size=4233, + num_conv_layers=2, + num_rnn_layers=5, + rnn_size=1024, + num_fc_layers=0, + fc_layers_size_list=[-1], + use_gru=False) + static_model = infer_model.export() + paddle.jit.save(static_model, export_prefix) + + with open("test_data/static_ds2online_inputs.pickle", "rb") as f: + self.data_dict = pickle.load(f) + + self.setup_model(export_prefix) + + + def setup_model(self, export_prefix): + deepspeech_config = inference.Config( + export_prefix + ".pdmodel", + export_prefix + ".pdiparams") + if ('CUDA_VISIBLE_DEVICES' in os.environ.keys() and os.environ['CUDA_VISIBLE_DEVICES'].strip() != ''): + deepspeech_config.enable_use_gpu(100, 0) + deepspeech_config.enable_memory_optim() + deepspeech_predictor = inference.create_predictor(deepspeech_config) + self.predictor = deepspeech_predictor + + def test_unit(self): + input_names = self.predictor.get_input_names() + audio_handle = self.predictor.get_input_handle(input_names[0]) + audio_len_handle = self.predictor.get_input_handle(input_names[1]) + h_box_handle = self.predictor.get_input_handle(input_names[2]) + c_box_handle = self.predictor.get_input_handle(input_names[3]) + + + x_chunk = self.data_dict["audio_chunk"] + x_chunk_lens = self.data_dict["audio_chunk_lens"] + chunk_state_h_box = self.data_dict["chunk_state_h_box"] + chunk_state_c_box = self.data_dict["chunk_state_c_bos"] + + audio_handle.reshape(x_chunk.shape) + audio_handle.copy_from_cpu(x_chunk) + + audio_len_handle.reshape(x_chunk_lens.shape) + audio_len_handle.copy_from_cpu(x_chunk_lens) + + h_box_handle.reshape(chunk_state_h_box.shape) + h_box_handle.copy_from_cpu(chunk_state_h_box) + + c_box_handle.reshape(chunk_state_c_box.shape) + c_box_handle.copy_from_cpu(chunk_state_c_box) + + + + output_names = self.predictor.get_output_names() + output_handle = self.predictor.get_output_handle( + output_names[0]) + output_lens_handle = self.predictor.get_output_handle( + output_names[1]) + output_state_h_handle = self.predictor.get_output_handle( + output_names[2]) + output_state_c_handle = self.predictor.get_output_handle( + output_names[3]) + self.predictor.run() + + output_chunk_probs = output_handle.copy_to_cpu() + output_chunk_lens = output_lens_handle.copy_to_cpu() + chunk_state_h_box = output_state_h_handle.copy_to_cpu() + chunk_state_c_box = output_state_c_handle.copy_to_cpu() + return True + + if __name__ == '__main__': unittest.main() diff --git a/tests/unit/asr/test_data/static_ds2online_inputs.pickle b/tests/unit/asr/test_data/static_ds2online_inputs.pickle new file mode 100644 index 0000000000000000000000000000000000000000..8aca0543ad69f8bb64b77664375bbd281a3747c6 GIT binary patch literal 45895 zcmZU)XH?cq)HX_$4uTW`=?E%<6@fdMiGqTH6+xs3Vnt986(744>AeU_5ky3a6_Ii$ zGaFFsA{NAo4HPWcuq%GN?|06Rvre*Bek8MIuVf~BU;EnEj+EXVE-_}Ql7xgrV8p6* zVF4?HB0@vLC0B+)ixU<)^AxCxM@@1cEx~At5=4FZr-vfV&xXaaH;=0 zN-=cR|9u)R?IsZ$F5@J*B3w2;e3*N<+!!~>p;ON7aD2G@|1)^imhBr?D~2m9l^U|S zW@@-%VnTell6$!F7&qyql0)~{aFzM<=ezzJI{x?KCb1=4bw#3Mp}K^`HgLw>_x(`) zgXeU&@p^c7q#Q>5OoHnpPlN94Jm?RtfzS6UK!~e^lJrRE&EF2tH5+dJodqx~8lIW$ zgtJbr@Z#eZNHEKU&n=R$cO(mkKO_S7(g%+>cd4e6F)VIw0qd+`VBXzCoJlVo0IvOrT%mGO} zNGG4cVBT5&TgYiLt3Hr>s#89-$cU~ z{U)z=9wJ>&{?W&?OW?}$Bp6%P2BW9$gF&-E_<5zAZa=L=b?vs2B|+cN4&z|_s;-pd z)OT@TPKMBm{e5_ymMx|$>ZwF|5iEA!2?4f=;1P2kavINrka84mkO(MF_W-#l574sN z0nb{OLzeD%m|0o??!N87uPua#tHp3w?+`3^iUK@s8yI=dhDBdRFv0aL?9I@Cchb6a zYa{kNVclUN^O?H9DGCN?@`aJlPb_{l(dQCSbTEO|B zqu@8$9;#m)rD|Vt>C4`!u+=&f5}#Q>zKb5b(F=ofCo@6f&mvG>)&hT`ep367s_^tt zD&&sCknvC#8aEoiQ|o#-)#wY80xIC>-lGsaZ7=wq+X50Xxvi8rs!4RpHAUoxGD^{o(Z2{Yrx^eL0YG;5A%a9;Ewh_NIn(|@*8d8X@3Fi z_1_N`J&_>3Hyh6Paj*kB!;Y0Cy!~K76P*l1;WNDbD$(| zXztTRkhmch9-W*I7YpQ?c4X&5omDKn<0gWmxe>&fg~Gb62jQ5LUX%Iw^{_)?Cn)?2 zgqgomU{3o1$go=jih(&$@-P4#AG$%w$RfCM*ASvtU!)UF9YYDkx(j3gvDlm=!o(KaQ%-17|Kq76Hgdut|v6} z?I3+}Y9koC7=Y2Z?=W{j!7CcwKZlz?QmacwYWs%h&nj9ECh4w z@n9l6reJIeZP(mEZ*mOnFgi$+W!8Xi#4u3VYXn!`uY-BqZ~AO%0KE8>3g?U$!}Af( zsi$y_PWT`X;~me_`xaJE%1eX2&m9Uw-*9HQEetOsJA=(h&nnu!6}! z&d|3`2OccCLr)Z$!mOwyP|=Zv7V}|{cR--yD*NbWm%R|SNd=PUzM=;kEugU=7u3|_ zpr+;_HJtpCt}7D3zmQT;8}i|rp+||2b{baLjJc?qC+Ox?d+Z@$P4ZJt(k|WuY{o`F zuxc)ZN>+g8nM|n8FNH-1=04iq5!u_SgVTa;S+*x}7 zREiS;PE+tX9|E$@KWUNvEEqN~1iJ3rr=v^_V5F@zoV@XZX8PoSZ0BTP_V>~LR8{C) zG7nA;*21X+^WjEGAjr0Egv61Gur0&|TzFZS{I-Z%-_?hi$J41)%4r(@NDrcFvH|wq z2hoRAsLQd2mI`fnkednr#ms><8f!pymL)7wHiCf2bJVJPG^8|GLC@O^SYeq5y~GB> zZ^zJ}r#_IA{)^T&UZ7nwl|Z{CnQD(QfcKx{;igO`j2N5;-HM}Nhm!}G$(V!P=VI8X zDuP7K1E6NB4IXWi;n}4mNHEz6(HYLLRJD;lzp4od?i$cp=tiS*b7!ex1SEaa zAnxEdx+?ND`SqPr@mVXlzBQHZ9cv7hbJA#woEMFh3#Z#=H?gxyi|DC09rV)*7UU#8 z!%1fhK1M)YRz>}V8nx?u2 zRF_#pg1r*tE{=h3=iXD@S8M6*_HXoi!X&tLp%x6U#gbbtQ@OILK+OkY#ES& z0EqKH4BsjX!SvS^2uO~Bt*2eUY~eY0`=kb*-`fxN`az#RGJ&l#zf)ez96ByJfO!0J=+}HlTTWb}HpOco zc2*mB-#!hyl@@~1LPeOiLlHE@E8)$cIs8ybhE2_l@N!oa)Vuh=+lLQfcGUoV+H)G( z79bGZOTpQH0tCK@1&@tk&^*ruG%Bj;<(>}|a09Ge#=_mzj?lCFCY&_72e5WKjC0ry z;_mU#|93OoV$wh;+XV`<^}zj68FW7y4GCIn;iLOyy8ci*?BXW^<+nn)Rv}y&R|mq8 z5D4171}+)9fla?Ld>t1FN#=#{)@dUA^pF8$;|T|Z<-qpHfo6dW_~a~v_h$}4r9A`R zKOF_@hIkn1nFWW#G9Xl10+i_%YU6JKqf4z|)Q5-ko_z|e_!JAG2rKyXB?;16&Qd8e zFKT3|1r}qMfP7dq%()o}odXVFE+v8^-jU$3XE{`@{Q=o2LvgBRkeZHbqAqf+U>$UY z>R1fWhg!F(zxjA@v2}w~#K5QS4Ul>!1lk;cb{`6-r{>vF`#-Lf^Ok|#UJfu=V@J2$ zBM_THAt_CkdLER4*E!BG=I3fCGE|4Xn-_rYv1%we76%`z;y@Ih20x6B!F#_3_+We< z&U$Fc4o0Nqg7O`SrjGteP@Xmml+GrDmE2Y^Ogsyr z&Viu+a0f`dTL-pdO5x*^gYfj}G}x|V2(IPEprf7)@?*z=lGq(w+Kxhaa4oFZ5eQ4a zg+lR|A)nAA3vWs@VR73~>^sZAT3;{7w{ig`#tEd33}KPL9dL0}Jw)h+LjQ#v*x;NB z8-E=D>G-QO%;+aovvq`#WyK)na}4CB-hl@J4bZKd4D&}-!nf2Qh zyxzvaDgXcA^V}Gi^4<%?Z3ghCeFQ{=h~R#o9(1jY0jp1YK<`!>`2D#6!Mm0~eXkSD zj8lTHDkCsS+CGFq{9%#*Z758)h2~>-shhSxXdB1DPkt`QoiPLN`uT9E{5UB7X9^=l zN5MWs3wHj|2HlZ2>81n?IQZBYZficG=gN%Xf+!w*FRTT-owK0zT`0IHuYq-6wBb@$ z6l~6)3=7Vr&`|AJkU6S>4&>ON%=892R`L>@_%T9YARmh6+Jk)E78pD8Aw)T3L)GdO z0Js0Z{cbX3s{yQ-z8wC5ENuKT3%*D#hSo#Y&^saw?heI}U#U|;%5)NJE@wdg#Rb~a zzn$teU#2YIO?6EqVcdW=d>rlp-^LBC^N|YbK2e@(L_ep?$4rMe{u(g2P#R`~E$r;v z1kCS?@O{jGkoa*D;K)b_D5!!5KSIE?XA!LQUj{XF8pGdcc_`d$0xR8LfufT-*hloxf5Z{oq>q4S*mpWx;UIOKzmQs%*V7Ea z7gR45!X(vrSYJ5{R+!I+=Ug_7w3!HZc{h0Do(ek7N5N~$A-H1O1*;N{gG+Y_WM`Mc zijbqwe6SSmCGLm7(n#=|@E=t6?1AdN_o;4xDJ)$R1uaL5z{@BN0*5$(w-J|UYl{~& zb{fLf;r|Wg7x}QrDGjog--Db5JiJ=70YZoN?uDFSID%{FyL+BgJN6ZIm68Cj8eNc? z^pfsptl(D7E}%!&LtsuUjN6n1!+uA=z?(L3ahn0N5Bq?c-CL@cZ2{L#*uep{{czs- zGO*nNuxwWWygN7>f)C_^^OlRS;;<_mNvebCcN$>J#N9CQ=q~6DJ`K&^z2U*y1Za)B z2^;ntf}n>C?4B$SbBki&=iqQSqUC|qPMxB2ji*ztm*xdpAHdx(n9 zai&y(7be6yQbxAC@t6E=LKKbgo70noCC<65Yqvhib6U=PXv`FvGlIqWSFQ2R*^98A zVjr(omvax)AZ@7CN4hLbE(f@!A+Gtn{&$#T(M7|1VAc#(%*m^Qb(r{eUbR?SuZkn znh{MjJp)?|o9fqsLPaIT{))zCOIVv2@AACnV_svDplQhwk6TYm9t$`4jGk}gv^ALU- z??KUTw~(~UU2NW3cf6x$Jia~E4hhGHp$n6jis!zM5$_LDgeBYmk>*6zm?+f!?+GvoLnXF-g0!G=0ox6q&PGoEQ7|6KaVU%e`Do|5`C=R zL0;edi#_V4h=KQP;@fn<{8iQ19u>|W zRZ*imLiUhY(M{oUiCD}|i)U@8rwc(@n&^{}4W9e$8Oo9{A^xksB0aevT7#na6)D>I zw|gWpnluc%l?)^K8iD+ZVPU+|U^AN#Hj0Fo8gZj!3VWy_8mF}^!8dKB zP-b2>nmX|&nw56|U6}G&*r6tc)78yL^VEDqulb=nRo>L@Ey8!bI)pI!vCM&v#mL+8 z2zeCDkd>d0kgINixq`gieUn}#~> zYyT^98LvqPRi_D}wh~f)wv?1>6r-Z$%kd-CF8YMsNnW@xLfWGce)i}BviC(3FQ^!x zSnbQgt@MY&Zm(hZ*oRs+*Y`Iv(+w7OX_lg3vIQl*jS$i_ZjiN?Q^+n`4IDh5L3240N+w^Bb7I!$yld-IM~ck6!A@(K0Vpzpmxduqfd!cY0E48VZ0Sv zWTk)?=2(#p{-=eT)@4M}eK)Dze7Ujv>}%%p;0C1CKaJ6RdV}roX<$vHPGDolQTVl5 zIe|sp?B%(o>|?uL@tc{SX|MSQR55Up4*wa3qGS)!#*Kw+_i}YCx2>5S*fUK~8`Pn) zleN%~U1`i}&*AWDxIjF^?g;;^PqNw72}u5q1{?572kZIiA>TQL#60?q*gHax_^F)} zwp8sz8n+UV%(K7z3ggeL!n31n&GRE@!u0{Ba#IS`G&n^Ry{+jg?0`Ezl?ZhMvDjR? z8OvO65)Aw#VeL+_kTbsn{Rqxvd)89+iLV;Y%lv|T2jg*ep*%$~MnpEbjXBP13NcQh z4)X7`gwFhI(V-?EykTz>YUp-HpU0}8^4A<$+N3Cm6~r@^Gr|7x7_wxStuS|t1Y4DPkk+iLXTPQvphk}eAmJzEks#hG!&-p3P2XzCGw*ntPWB0S(gJ)gLw_E9W6`tyv|6F32Fu z;#GuCO6s_{rVC|{tid5p0p#wsYpk39ea1cDfr8DC^)DJ zX^!>83Yj;U%NZ#ss!N_s_5Z&cV(ID_`u46DJ6+-gO+8mZg6CE9u@Al2pw{*H=XOcFp|cUSBz3S8%D>VVNj{|X ztU0Z*F=vb8w@{JkNhHwctpDI~tWk2BeXU^#mF*_v&vI)tV_+%j89&I5sNI3eHrBG0 z@7AJImqMxD^P9rij%fVnt0AtAiosW=KOshE?eG`_Niw+BiNE5ZjF0#4BQGMV$ne5q ziJs-wRP)uRRn#|>4PCa_G{jacx zt7mndOuzmO3helDag^SlT4mmDqy4ixhHNZh?+m z7x>?8f$07wz^j~P>ArVUM8az=Y_+%sU2C0>?1SVH+?YrY7`L|1tmE!XS{$;K519N`+`L~BR|lo@r$WymTg4^l;Xo7K zJt_|UYWgPBzJG_jbJgj{^YSD#_K0ZqMPu%KpfxFLNkQeq?viMUFXWy}8cldx=y7m19= zP;;Y4%=+6yn9VW~DIC2ZUTifFZCW8_rg!DzsKvinIm1$N>8%IJ(b>d*DL9Gr&tJmN zGb35$?^&p7VKJL9?irc0X9*7LOhJ;$-Dqb|J#w`?g|0437m@doRJ!Lsl3bfY3M(Yp zktLS}<1i`wK2*S3Kb&Z;#C`(achQ-TGS4%R_m5<9 zJ9lHl3MC6xW3MZXEF6aAHYW=T71{L5-TlNjFpJf#=P5GF5N)xH6~)kQR5#w5^cye2 zOl>rM+uA_=rgb2RWrxV$^l`W;xR2c?AxZzMxkgH3BM5z=fW|-B%x`TlQc9+@4;g%e=huDoCJDY9#(4CnnLm3?*E0o@$o!B$(u^>_U6?%%j7M zhrbWXsUC_WD_;m_536(WzQ_17hhO+jaWxvgauI!1cm&l-9vaz8z)QsV3KkbiNSL*YJM+) z%D-u0lb@aFX|oDK(fPu?OeJJGgk!o2fAilRSn}(&EO;!*VGS~Eg#+QSLPlK_S`#Qs zsrLdl<}ip;B^C%ZCP(q{ksj>2GxoU1@(ht5oy+grx&T%7XbP)-7t%vtKBKa==E8UV zedNpdiR>?T1=4p2p={;NXxC$T^3O;DwtEK*weYh8vSGT8k_IL~4-)e~a;%auuia5q)-y;5y?Y|+SjP0ZFq zDyYzg6~4}p;=0yP6Ee;k;^TgiBx^Js53MPDz*q7Wq+dhe4tnfC85>8Ytlua$licLycB{LCKa_Z@9kclJ;A zX!R`Ia7&8SuMyDMkzF5UL3$NG~!(V(8 zOJw6V^3$chppGm7PetSSg=H>e-kfStQ2Q34xm=nXd`9rVtbL+_-h83wkQPc!I886^ zj%9;d{xRov0lVgOHTrs9o7-L^VXM*=gMYM51Wu7rajcC;xcJ*EV28xT+B$F{z4b7 zeo2;w^3=F~5>@&lL9 zq(e*5y|LDC|C$l0EK3z$UXaX5E98E=|+k+5Fs*l>~^*QPujs||do(`6E{=ae8p zoFa`XT(X&#NzVl11sthguatxi!h4Z}@UU~#$A#U-5=uo8}6$9_21 zte!plA`gGVYjO8oz$OO@gueqWOw(K-p&xHyRXm-%DjdUm74O02a0P!%Z(!_{X9&{6 z`%&HKx#E!vQplskZZv&wwji3??yzfO2LH(L5`LH%%x1hhk5U5=8#Ypn{Ulc)Ufhha zX@oBE?F|&2dA^U0w>ZU5QIrwxN2X6xU-28)noh^Hi6(T5Zyb$>QY=^EgC2B^W&O7m z2xm8EGP(sDgohov@F$7o_U%_dpM7>v*IN~M^aX49ZsUpShkX>Ce9Air^{uSF@+SQe+=hd`L=t7C+4#q!sYK__GThc-$kmokLaw;g;iYFNbM47du|aSp+i^e% zZ@Hzz7N3!bAG4Ygh z?d;@`S7LU=IT}V<@RqW1WYO8T!phVrv142g9T$F+juspsPK-F$f>>IiX@b^d&ESTk z7D3)i0!rl<3Hvra!!EUd@Tcd?_%@Gx>@7bEGZoj^@!rYI7|C#I!=5FlX9*6jrzE*; zY9nc7j0q|z_Q5-+nb0c=%H-+lk!Y3eC48VMj$b7w&w4bUM%Nb&TFkq9(KFpK~lXWx#T+zq%c2`-p+f2kKf&hG+a$Er!tkzdNvJi zI*bL0CJUy+cP7q=yXNrCELw=o>R_yMqJ>SDr&6<7pUC3sY%FzVElTx$PF5Osu$`rH zToBYV%1CN?1fUI;F!r;FXe@ED!FL>`e>1vPKvO!;wbaSzrS%#XN9moZvneR zYX3xss z;1Fxit~}aFJ}3pURT8%7j~-9o#1G)t$&c|G`G@GVMk-zfs3o-xB7`*Yh}vFgLUc6UzZelOh^3`En zBBJfe&gN0q)=4mBNUc%9AI1K5-|)zbNsI|rM0cW&Q`bvX%&)u8ggrT)^wsy1C@d0$ z7hY@F>JCe0Sx6Ed5j`KZsaax46i3zd89G5fR=C_`EwbPFi=F#G4?fGRATxM<_EMw- z?X6Q6Cy&+zlLgb@USv1s)iUu&!%OVmAFo7jvWNIEK@q~eo=mfKN@6!$V$ji112pPc zJk^~!gyBJ2uwH+yu}(aQmARGt2Icp5Ws4HYpGiYHq?Kjlm+L5eA*=~CM&CodeR`xN zW(QgMbby@9lg59S?#EN|QmCC#1NE*=qgp8{;!_=xXi}Lu*}15WFJCqTHkvol>t(UR z-O+>zTCK+BPBb3UtQtUHW+$^KL`>eT?c_twOcuUhTrNyAPGfrljJUuB8TOwl{^DQt zQ^E7#I8@P`f>K(u*-GuDcBmm}hdChK_VPh(497hSJufX4qZ zX6mK^G5Mg)i414qn}aKn#}0S?du}+M;Jk<3;B-xpP0tXb%)aoCU=ymUzuV}^AiCq$ zN$SI9A&mut%!Rqb=#Ckc=zSAKiXqPYmLWe-t7(RI-Pc7+!}U<*?W>H>=~~1y!|~s2@ zUcz^>$@aG>*h+~xyE~Iz#Tb*92OhI>NgB|mlIGwdTZG(I%f+g44@i%}Yr1pHTxzjT zi-yi04}Tb4a`~bWU0>279C{dsyA}VTsZD0wicyyxyo6Kq{>9rgj>M+}ewn zxSSPsw613*!~?>vGu`CPmG^v4`9T4vs57?S@|^v?6YP&!f&4Q2;SPrf(uH`9w@g_3 zO5uiYK7I7an9sicRWO`^5i#*;n2&*l_knfu3C~ck(k)9SaquyGNo(B`<8{>l&&;NBSLwHm21s^Ubi#c2J9Nu2ti zH?28nO;5Z}LGLS1kPEL4@T)o=VAjGJY37ED-pEYBe%IUa><Ac+X}hXRe^I+JQ;2J@9-1aL#38=U zq*GOuCf!rP`RA1R_ML_J@ZlAxGY^s9vwt$my64vmNs2A9KuBgtuAKhAIPH!Gl zW+r9%HriJ<()XU#tkTsl?C&NCq!>4a`f1K4sT)_b4yI#Z|HyDWP;`xOP3P!vo6+zt z<%f`Ra5_<$rpDzI9~FG&?j%R=Sdsdf^Br{N^Gw~N%?`7wmht~OEFn;BopAKa*G9EI zEADTV9k5bK;dM_JfeqBT^NhP7b`((-vp>~ ze<_TJm?7HuYd$k)^BFuk=`&rX9mJk@{M1+y;CbXYVv-+_nHw=qFn)Q{56y zwP^7Z+Gk^|Hk)VTV%TSPE<$Tikx-;ImNS1Og-(R65E;F+CEuqD#Qe@#KKi>cx6yGv z?bUD)9;|u8R*aT|sf7!KPR#>o_l_|fy|R;!@fHz-%PePZBgJLfBW`NHC3NlaAonBp z@vZ$D;5|Q*?N7YHe(^?J-{2^CEV)&vT4e|(%u}4@`Gwi_XPYo~Y9TT^`w+hkiNfdK z>T?f{s&Kn+cMM4gEn#kQ8T;w&452Hh2RkcG;RfHfu(i+fg~?rNP_5_vVMhMAPt=Q{? z66^`6M6x~#+^P?H5Yw)J#67cllk8Nyr)?Mt%$69^7`X@r!IQX-t{67zWP!u?2-~54 zqHAo&uNd++l8P%k5?R~-CJgoATp>0om(Y<^H+KHBT6V<3om6VUbgm1@lEux9_=9x@ zT{JeIS8iL3szO{z)r%_()|o=0D=~~Q|4;lm#thf>E^iF&*W#>tH?a%4(**qsUTpYz zRXElX#NU(-cc|Z|#{HH0LzRF2!KZfbVLlw|q#Z6PxYU_2GUi6aHW>4_*IPnWpBve- zM^|XPTu=771n@D5a-wZ&6S%j9`rPZ+A84&;6RG@~j3<}vMRBuJQ2gR1c0L!y82soJ zi}PZM79c49%aG!BS@zV(|7d@Jhzr^El>OqKg1=NMv%6mF5tTDM-?&+aTRi=$O1Y4h z?fyqbyB82U)4TYpL@SLLR9>(Ke?A}06f6H%t=GIqMQD%a>$>~NVWK)Y4m z;gj1ZLag5y?m)2&1XpE|ZKzYU*S?eOf2hwLO1;6z_I;z@4m@f|oOc0Br%r}ak%A!j z)uXAKj6_jy7m(P*`*iFh1>|LHNJ;2LzS3Wk9Wa>2Dg?4H`_?f2NzY&0cw7f=9{Nn> zZ+{h(Ed9v9>*mH3O3%(zbD`{ET( z>ckRvrg3NHms3~%2i@wRC?u|`;< zkB&1LZIfU(?(7A!bF~Gvx>>Bmu>{&PVz#}J7GVSK=`C9Vv2KZ_3uf zVH$#*gfh)2(x5yEK0MQB#y&kRy6RruS0e=nV*T< z3v!DuEcNu83|QTDr%#tWG%&*0>Ml18!VShy3b z&uq4etE=mJf}Iu|7p|+BqK{p7#ft+Q`Jb(AOv9Ldlxf*O3-231` zT5ISq29srnqFJ4?Tf)Cy1tRqTp+DF4NJ-iue5})w3*UQ&PMV#AZLdt>PQNZ;r@9#l z@0TmWhmQ#jN;h=*ssLH&xOjuVq+litl(q=fjjaxBpE2{aZ-WqKY$61Ar7{J(hjA-3 z9MFjFNhHPmP($$G0#<+DNbbCn9DVPR!+TV^)0u%CqO1)vRLyIj$lS4vk+YL!J>Rb; zGgs@T=p82bt><1$h3Oqj;Ff zeTM~31`zZ~A3kJck{_B^Mep9&kV{u5auWmmuz2`*I<;pcjJkT696oIWwcA`tRCprm zS1ApLpO0d4ElZea&He1G-X}~Z(G!I12gViq8bi)FV`4R_YE~)Xrtc2pF|BWt++Ky z%!sq%U))ks$rcPdz=})Ok)!L=&?4tqWTn3nakzY3h*wsEftjY9HXBKLubXjNjzhKc zAJc^Hg3F|4p$51e{Kl7AwF$=_R0-$H-B8+{=|uHJFzV6UNxx!wevbE0TwExEiR?z< zuDdBG(J@1a-6Rd8ljpL3AWU#C0Ct7>2O3>40W`kK;+A?lI*}RuD7Wl zZgY&DjD6g&(PauSf9>er(NC7T)8ON6p3 z4(#(mYpDABm06a;LDDdj2A$Zcq@7!W6CO~uW!^F2p!s;(x$q+m{`eL744X{W#uo}j zpH#&&R@}kaUH#Z_wmu|2cI3^5`ox_0Qu6zp0*92xF%!JI`CSoV`2E6Mq&DRY^LCsx z&>IPif0sPFX#8-vw?ql&{5&R1Oi^IUD%I(TYlBG6Rh#|zz?sj^(*hw^j&Xc!!zsRw z7YbkeV=m}D6N;Z)ad1>G5$J!>MC$ofq*m1-uFAG*I9)u8o%a5OFnoy;
kR@=~(3#i9NSdGI_WLGm8u@#0z9AEO2XQ9wdLp;3ntRVBTw;{KvO_+DA9Yw#^42z1fEur~4O?j3N=46cmvK!}7_|Uo)7us^Y~5G)`)ie`=K7YUq%{b&HRq_^R3VsUu}|+sn5oCeWt~8HMqEz>6~)V zG}>Zzh<4Qe6OOMON&dYmM->|9afd8!tkv@tWgOM#J;_S4c*!H-SJGj^0`9pf`#4+M6NV-e*N>JfG6}mX@G3Y$W&ZcOF%5e#>j-%i-kZ zn0vjZf<+y->C2O+nR^SL5Z5hV8vGwGX4cQSD&BfjQK+{T;fF9446>5=sdE_i_3`Ia z>#q~vEx#X~VWW7nuXQ{lEiJlWJq~wN+z?798iC}9*`kXxzKUN+xCtF|{}YdGm*ze1 zd5Y#sY#YK6id>gRj9^hbhWoTc#I@*d782G?7AI9arqiO;Amrdgc(=o>@yrS*#QZo- zH>#&Qum{vZ-yn`%pMF<(8Qm>RaiJoK(dG~;m(MtaY)7AtMI(0MXl`VCXZ}vr7q|aRktt)5c}S5$hBNHF)-itn-fO+@_kF+aiIZvMolnZRE$Je>c_fL{ z*uH^kJ*CN>y(1t7i|kS55_O7R+9i5(@21E(R+^kGehEL=b&%3ZSWLfoCK7&>ddE8v zxk~u!L@fTY?`}u?D?9rnu#tAwi!V5Y( z?Wg25wb-tGh4}D#B_?5-h(EANoBY04i`A1m1Yho!qH4uj`P$GSIC*O!B{6>hTIDAJ z$#r61lDq}`xJvvcGMk2?nr<;Ff$#8Cpif&YG(;a>Rq`)2sqwW|%M&ScstC(H|B;!^ zHBkGK8M}CUJ#z=WhdB|(bU;4ArfZf9b6%>jS)=(Ne|0zU_|Xc&P0f*0nsqC+2h zaAA6dVC}QzqLqq&_#d9zvhIuf!N&!ixO9#>%fa?+KnEW+WRAh#pQoe7lj-DQu!E2& zp3Uz6p^DzkP@}FNsKpunh9b#B)2PI=`fO))3LzS{gC-9&kg2LWC&pvw?8Eer;FyFq zujZ8&ckZg&y!ct`$f-{Yz+zuFBFVPDM5ef{ zIrko?f@{EL=o_-A-=(^W375-EG14T67ma$$1|Ds7=Bu{Eo_}j%gPN2A#@zV=+$rKgpGNZ1qz{{V%48 znjVq66$G8{Jjm2D(QTHs))ZD)CHhPl3uf)kX3UPB=6p$!<0_7S!-sc{344X5MC~DTep_7Xj z6A@j@sY#LM-n=D4^?uw>sGTemU8r#)xL+-VnU~bz?2QVwhO#74u7p~iNb)qn(j$ZHD z&e@u8B@|A6DR}H+#D2NtMbw$4!gTi_I&q_Mo5avtINp>6)3X2 zcD(*Sbp%v*(nE1 zuI0bbTP%EvEgw zHEg2Y1$C{B=SOj_xvKFaNe-zBxV@`uy<_=1d=E7C0RPN3Z@6ystUMF@9=- zW}Li9mp&ue9XtE!<3c;ue^3b!b6+$2)023`(`30bWo1Nvc;#?<_kifduI12vbr)av zR1qAtecc|g$yvOk{VZ5G86q9e9{z%`HTZG47W?PuQgF=MlJ)1_0S3DF9pqi4IFB{X z(-L*TWU1yu_;K4XQxR1_vl?f_-kf~;{nG|u@VS96VN=v9HQCvLx8`m8x5t;bah-Ok zbNmY|k*MLgc_vp*f^gS9-gY@ml?^ex!fih#Cz7AZL9NrR1tZP1MUz6yh;&0rJVrO;C5G^6Nj%l0{ybJvZ% zq7H4fV@)0_Gde%C+q?gh5Ungr6}GOdhHAea@OOqQl0MSfw#W6nNQv@R_)M&*Pn$Lq zMz!S;#u;gp>%Rc8{&lQb`Pv`)Mc)Z})zp`~=d)AD6+3_Pvdjwzo%k7i)&5%HYF&BW z+3`~Q89^DMjIEtuPk@2&;3OF|JzD{}W@U&ZqEh(2oz0y7ef1*su4TMkWC?+#F5-W> zyV33@8Tw8ShH0hj|-6WdSN5raP^mNCu)NYh~@H%^bz#7O^(B(ZfM$q zj8n=aZW|)HOft#8RviveA18?hH+kU3rQ;8tpb9Ksc`kFzk<0_=AiorrV)-I&gfpa4gXuEHdPi>KEI^lI2b&s zBwW@QB2bz9ALn@^2kl%R-abBmfNbzkhc*SbTeEEWY=h=Ak>^exst4u5WW{p0?v=WI zeBM0j-C-f#el>$yGI~Q8x2K4s-qgekTwEo}G1up))dkX?e>B)o&#gpsb2@MGmZUQM3xCr{nwa(Tnk6FBL^ZfbCLs{W#WnI?#lLdCa zc7oTd{0iO|hA{ELKFr&QaLQy!ENAbRVcVvr>4F;pwcNd?Ewta$3cjU>A$R1J25GoP zmC-$Sj($bN39C2O3)j|oGqb-p3r+`D3e|8C@5}8_j5qOFuS3TKRyxM?F4>!HPo@;} z-b`)brnWkv<)vn3~27w@E&Q)!}6Ns6;cLXPM0(v0SZNWh-bbApvOTFCDM z8KM^#{IFw+x5(Q}hg#D=pRe#x5=aZbh#Ks#;qcnM;Em`6FEp{9Y`J9ER=C7~`}aW( z;jbP;e+WMeWYlWf2iI!yMjm-EXWG--qBMq~msFPEaMv0-<2XW{{!+}Gj2q;}2l+Gm{dUGJDv&cWb`H_d6!(&P=rSG4~uZUX%e4A2|)sl!pZBs$W% zS;!y7!W&hUutfEkXbo=@PS$?`qZ*URj~muSQQf{PyfBS;ix#+FEIcrsWkUXPOh6@UPgyrv? zk&T_R*rO-*on$zW7vhKTkMECKH%|IUH61W#HOC?TbmWR~*z>ZTf=50t&Df6anD`It z87zQV0Ui`_^peQ@ku7hqqM0fhS_*mBN`;$!*HZK=3U+n9b!bkWMDE%!K-xSHB(zmB zo422xM7@n2!(G;U$QTU+?0U3aq&aODfl|y_^HCwN`e1~_Z+9w4pI}3#KIes(cM4zK z9DrjNIc)2r*L2v$-gf7RG=2Di6Lu=oW?97^??F4b@|tJK~Vu=cr+R-DXY^rEbZIk>#W#p z$(?Xx%?9o*0U>6OAoL_bO=u!z$8mD;C6{J?v0wJ{GPD{CqHdi#BAhgH9*e{tgNONZ z*}NeUJzkl`{FO=Ibcqz`T~&!-5pOR4s^_})jx-V~jQ@a5=U-7BM)jh+KZwbk$qGn6 zLrS6fI#~39;7>^WWP)E2aN1%+=ult8lQ9#}mzUSmf&M$GJz9yN(n>`3&w59#9Nta) zp+y|Q-Q9Ex6)k-8*pV&kOc8jmcHlk{?PDZ7q$%x`EMc0GE|~l~iaeTGMM}&5;=0MF z3H?l>n3xPn5M0uXYp!1voUT~M(+(ZtS$7l=U8?=eA^qdLZh<3W&+G&;<;|j7CmwTr zY)+EJj>-0E?P_Fll?7E^`@kWse!4KHO&Y;TrQlDoDw?tK1YcpSTO{}O1;4Mpj{CgK z3}2XN;%u@RC7P);YT)fTrnU7q|B#-IFzA(%uylG3H9t@ZT-&mr6HVR2&nC@g*KdE$ zL`4=r&i88^WmPR;Gv8cb<#&Q`72YQ98A);$@$U(eKUab_N`Y7c_wZ&nG?V`%89IMg zG#PrYQeYQygWH=ZDKbiz!_u4U@v^9|cAOpob4yK`zbQM2UtPAr;SYHiKY`kKnFh`M znq6wAD^n|4(W%FiIGYJGCq1Fh&BziyFqdXsc1x4*E{wseB3=GXNhe|2un7b78-*`kO9#VxkOR zO&{iVBuWZT9;+n_<}8BaOC;!3M@`7gX%kF`u`;{h)&SfUW`geyrVELFeR%R{5k({) zg*D$yp+m(lZsiP9hMccPDkcQs_>=_#tF&?aAXx>j?0?KZYv{(ex$MRZUq6SNo^XoE z-li)sf3FB$Z!hF8?d1W{+3&*5*{9&fdDBI+?EYgCJ@g&kt@bB_Hx##r9No)o?tRU4 z$gZKT9_O>|v-An2u#2Mgc0FxNytM=;XD)8u8!Ux_uNDH+a82YG;sOiC{@_>n*O=o; z#?;uBEYfe!3)mN{hw48>3Zd%_=KSSDcJF&H@>fqwfZh{Vu*L1^U{~c@!rSB?R#OV% z)5a zu_+DDg(J1^c%iFP>0R5zpp=#zVsPaiZp)UtgiH?~1iV!geSBR&pUyffsE)7^Di)_Q zSG&`MO3gNG`OW3zGgo;CX~vXb-oi!VPvn8R9|p5yoV zRT4#aD%#f$?`!S;_#e6Eo)2#-SAx}DST7tjUw||J)p9yLwty{_qxPZci@C8Xw`pLi z0dK8RCei{5+obl<_W1KEAe4S7@>zKm)McLHByZgYeViT1NEIJ}5-q`wYMXHXjb6oD zSHC2_%XaYeb99)(Mne?rGsv-j@)#?sxq(~zH-m1W1IXxXAwTx-!1D*KL=lO`>{7Ug zKGq}0f~{(z9dB8Frp$Md5g2ABD@l^6CpI~hFQ39Vnj50!{gb%y_4!SrNN08eR*A+V z&3NjYJKLk(JcX8LgM@O%Wdw0qhvRJyMUT97+0^6p+#`C=sE2}1Songp_}#$&bz6yrcArJR&80aV^Nsi`MihWZ zTRf*eDiy>^U*tX0YNC?-4XE9|5rR{nYv|8BbF_6lSu`rA%9iAqQpYSPaCd_NINQ+% z^a z_DItIe!U>)x;PWt*W~fl!+Suhr90nRp`Y{Vn=Bipmq{#s{2r>GdnOz^_O!kCK^8T- z{H~Z2a1Erc8Ke{=n?xh7w}mB+-K0P5DR}#>liJVpQs-nj6tU?obXjGA&h7K07TAdY zjf;1JV4)#eao?MmQE~)#^-ZOIOZU*r_uF%5r!n|8>J>E3?*RRM^}?EYcEonI6lVSf zcknRDhiZ9yk8@YCKvYP2icXvxp^slTrjj(I(6J}7bXVLoR$|{6T%l3O|F>{Ec&B*| zpS#{l_6tfGYe^C5=HtnmV|tC>xk`#tHvE|Qe113Sku*T5mk+k*f4d}Xi*Chx?iA3` zPWwqil_f+>)>6j8-+|gZyqUP5P%9$SayiZW@(I_{NM?ulj$9gS#C>@65uYpnfIn}y z1R@=^na5qGB6&9{9BS!HU%TeU`*Y&~&vRn{C{1kU=xe*tODeeN<%j)3Io^mcXMHHy zuh>SUO-`lvSEW-rb|%89Q46URYbUdN=1%3mQy0%+pVdI17Xq+OLUifl6-fFGGX@{6 z_=u8ZGR8Ux;RYpi0G_WQi95>=GC?-4glfuMro?3o`W$N75$!kLfFzQQP-M!L8#rS+5kAbHUY z!@MmPl<~7y z_#RIpc-dlN=gUDnKO&YX@9Ty;f;zB%q#65VBAWZRWfI|b@E+g6aUmIIY=ACQ{sebz zv-vJ2S1AMVN?2UbAh^3-8qN75$v5*Fg0%s&i3baR<6HxTl^%$=t|TJsFA*e(M#N_$ zkJvF;oI!c8jsLc$lz+vf7o;psB^zJWaXSi}$!h5^e5|t#CwlVCn&i4060g-9 zh#HsU@W{Ez>}o#;zM9(xO4`waUa2BSPPE-5s~`M=52`l-`bi=2u8rnDdM-~TRrOJU zRTEUhGF>8aM>nl;@HzK`XDOdQNs3qeS|06_*iLWG&cS6ru5kO~&cn^m+{s+sY2bEK z53MNPjfdPBq_@;g25w@%bhSqvR-2~DYxIL-FdYmpEPD#CMk|8n zD#<{5Z8~%6&2~}Tp+jUG+rY=mTZo4R^U;kgYkI`XQJjIALX0V8(7&CnMDGJ@+UNet zrKFXDi8s|#+LKl^5*gR@nCSX>bhzIvL=;RBy-nIjH@+1IJ-q;XxKM*XSZs`j6^e*I zdE5AlrtOC3p01#38JfQ@I0>hR^$TY%Ucz+N-GVVcpAnzKJgL$}UgTNpcB0|)HsRax zHZD_k}r+U5TU8=b*O8)2j+&7tciIUGrmHa9vNuvr=T*=TN` z*!2508GSiSc-PAusFv&#wa-{du8zGW(wQAW^DXxAy;QZ}Tz;X0b7C(4r{xEVQ81vl zW!v&<#L&Fee?noB<1f-8e40>KUjZG`@g+^W_b@F#fAJL)7E}926NFev0&aIXExK9r zoIG-3kh0%Bk6iXt58f*|NTuuiqxgj(gp|yrAU#>eJ zKcw~PJ}+lWok-m zJ+h~Af~Fyb#k+a+GSU2HO@zqjnk>U}RwHCzIte!~Hb$4)pNag^THw|n6^vYOD#_V= zn=yw^);`Gk!zM9%~~C#Bwr6wfcp`0$3=qTPM@%-l`0_&ts?!of|-z`|b=3;s(H79>BQ zPPyg@XIQ`IzHOBy&wqObzv*Yi+cBv-WL za~|oDJfq#`r2?4So+f;$EQ1xbv$=JXqZw_lhh&Xj5F@CPC1wu4&ARG17V{vpiIPw^666P^(1G#|^m!Ra!5&!3cM^>9CkEG( zJNvfrAK$qQ_v|re_IVZaAKF|e+6Rou$cI;8+^x;j)e92n<_S5o~e@b#2-{_=b} zG*bz!9+yOZI-jY{>44s)B#AulX7jgJC{s2gdRS3bgMNS58J{_S3wHEef?3(#M8MQH zoQ%AEMB2T6O22w_#9GTK@q-ep0uJ6n>u+4+@@Z&*w6E`Q*w`a2V8 zu5C=g+~e@myKEd{c^MeUm|jSBPD02*;F zyd4XE2;Ju%g`4*&pg3s@Zqf-!G+6fl8`jNYoMXEoXLc{|a`}0odEF~mw8>A@E^&yS z%ULa|TvmfsPbTn#ZddVb%+$!w50~Nj-H&0j$2fd<$bH#?`G*i?ue2n^R zhD7^^r_rw2>7uZ<9y(834&8{IPj}1f(ns4{=pQ_D=4eV95u^|;YO%Ev#Wbl48=0%% z)j=P6#)>?eR@O#Vj~^0h$;$+tRinHOZIEqVp$wE>T%kfv8uH64{{sKq-{i`tBG|8* zEyPjM+&5Ex!Vl;qW&Yn7Z==gLT6NrwcW37))0aF9w?$4vJrYTL|0`$d^mmJ}0jox- zS>{kLmTJ&mvyCNexs?d=8SfL}JsuX)r<$Q?QecLj2(@(apsL~j*b z+i?*nG)ocBB^D7M60P{Uoxx1Z4t?~gxQsXDd=6~kO@iAGI^gXk@|NgtB(%CYXw5%GCUChm^_za4CtKWzj8;9Y^V^$RC zE#sXJ6GEA3EBQ%)r7b^{KwWD~cIj7LYG$Z21pQa|2O9np{gke{g+s67xJF5d^rF2R1Pd1*A(n=VGmUgIysLPRo27PLw4$YBicCT0l6#hCmLL(ug9GX!UP$C&+ZhTyFKVF<=3ALQ<>+y%eM^}zbR3$Rmh zCTL#z2U~uLVydEa*sD1__);VV6;9Q|8A%V|yP?~3e)b0@E%hHDbN9fAr|qC%Vjpw( z$Qo!GBZT8Y71;h~4s)$~6*JFn3|K{dVSYa*n99?N@Fh{ktUk8@#->{1l#A}j<%St! zn41Tkr=Eh($Fp#5Q!GwRI?ZTh{-PCsOTvKab9mZ^Ka6z%moBiIgLR^3!8LR$c6^u( zXI9eGEu(yz&-gG!j)VMa*q@F~%RA8oiA7eYypFP!wR}-t?6Q}_V zG6HbUG$Sn0c?-n+GiP+}0d{oD17_brMYg0i7VdKIz`bA;|7iIOcHRntC)ZwMdiiG9 zYeXW09DT{T8HswPWqyzXRF2dgPe~I~YAS2)(xDK&&$lF`o^v zf$1dn(e3%rDLxN&zj;SO?22`cWpj=jm5bhRZPiOHFj`36x*~E zV%x6@Bz$E_-(GYKR7~<_A5>L9)mlv)Q=bE>&S}EK$OR9)*2CYmmXMv<&Nyn;M;u1Q zqWag>IQG|hSh9T~(sz-iu)%+^%=GKw?9vSd6Z9g{FJP6~vrXcB?_n5Nl z{;&iovB&wz46&s`qb4h$LH@*N4-k%7Cl$H0D;LAzb%j z7xeoj#7l!tFh6fRCaYiCvW<;>%>0#47^39>{r0F00=MF@bBI0!8=TNUZ5})#tH~y9 zkB6>FJ8@B59{roX0oOOo#nsLo&{caDHosR)mtBjc4<7prG|khP9aGc6jqaIj*oF*X zyx9~Lxfr1}x2#!?voxyTUkOjbschTdRM1215QB~)3nkxjij!5kP!V;YZq zp^mZ$8|n4XyfzC;=qZwN>F?IKW1afVw`?V z6<<{6LIo29Kdd+iGh2OO?^+`$m@r|Nd1#=FoMLElKaY;Mv=Lvrv4xrUCk8FKBFj!S z-wxY|)ws!z!7gi7U~elU9KJRH?l3DLL;Pa*@%JC^`{ zM9f9vbPv33wGzKKo(JOm+#vH*i&|-C#&+nNL!T05_RaAUX!h>{-e^UW_ZDA;mtR;z zh51jJr^+RGahr1nZy7WZ(X$ zF2;uG;u9;jF(w;rQDi{?RCd>98osGOE4_zgO#C|Z>+>!0?#8p&rvDNn)3q9VS3H2% z6PGgUWJjT;Kn5)^Tfn{voxxV$abzM|L!p@;7v$EDfv0j}pH)~RO#a;tZ;UL*HkOm| z!h4!9>`@smTxmdToaD{u&B-TtYqy}=F5&RP^P{kBKob63KLgjK&u4xLUiLc~)G?Wa+2iEB>}PAWH`{ukOTT&_a;0-vDyj3bBD_6O_9kU{qYr z(uzZ+%zJMM^suxOxNWvTgw#SD_uUlgO?O2ZB{JyiUy`<69RjtL%b1rt+nGbsJE3;< zLU?TLOS~yTmA!n{hc28W?xP^e=&q}qSd#-1r11f5@WE^Z`Yh7Kt8bIY$*GswQ89pB z@`mU@+fU4db}mzUqXABCh=eI|4D|mo8>`jHLa@S@{C=#G|0~FcHIA`|vwJLASE$C$ zzNrjP*KLA2dyC;~-FKkqy#W-6mN9$FGjPeFXF%K3jaJw%V8+gBpgi9?Ona@thqD&4 zuj`AzRo{P@|VOu@Xx+V;ImTh8FJC`!8HHy&BcrQG=odC9Z zuITgqWN=<<3QDmp!};$lak=>ic>bRh3KUL3)_yy%{$6>cFk*)t9j%!OZw#|n+Tskp z_zrdTM-|718U9Wl^QCka9grxbVYwG7b~yu|zD&hOMqbiSqhrwK>EU2l(Sq4*bQ5e$ z@MVn;D#L;uv(T1^sc5t_7+HT3b0|j;aGjcri>xNWcingJ=xbwk%G&F6(X%LawABX0 z>?{Q@>!TU#uEW4FY7E+Dt--6e+~@l|%z{eHDX98$A3V?h3#Q$#WClmB(MfkG=*+%m z9u-Z&5fKqgn|uyk5Gw;+I$Q98#lOi%xl2)wxEp2ugKGMDcL}xXW|?><>0laT)1m8! zcwn+r3JuX$A^yYCPaKVvT1@^(7@^}rBsvG!zr zXfxPhY7D)HbdYG83B#4Y4u;RwF^wx+(A1J)ST?Pd3Hn`xABwe_z1^Sj%GWbt;|CS= z&@BuG&Dn+B@=U;%hCp~jbrbw^yo$DQ3q;GBn?YoYBAQ|83A}BV;|i@4Oq0%YIx-;> zW@pVn`d6fY?wS2C+C~a|UMz(x*kUL<_z7fHr@-(_`Y`U?XQ+Q96Et1EjZ-S~@yL=O z9JT%s*p;Y{`=KffIkgy{XXZo)Yk%@`*8+9gT7Ebx^%54v#Ujq2N*< zV<;JerJt!m|GppezBkR#An-nv@+yOF;|a{RO(mcwSpb!$cfqZ*nqkPdM5q>Y8YYf4 zgVT>C*|%vtu*%pOFi9B>jNoj9OfK#}`KyrQ;;=M`x(D9xTc*fJMKR z*#DgG2!hg_!Fr3`cqnQn%v71gNJ^Z+`F@Xaf0F{-ca6(Vu8SmZ)k^?sojQ`0F2RrF zRzoAJ5}c#EoK5@p7Cl01FPujD=TaglzI%U32WZS zlZ$5d&~m0?U*psv!7^Xa0dUH_r#Y7wYy zdIn@{#=y}p_duj|A4XRWFrHV|qs_P2KQ$J;xD>@asY`^W-QxEU?>UoavWhvm{XHzT{mf7NxdJra%E8-@sNnh9 z5+wc48sA+c<{l*npp9t^P<`cye!dQeVD%Evy44BuzV> zB>U<1E8I2579YHHL3}m)!?_8A%v`5fq$P$O6s2FFKWMwcdt0tU+s_U7qg@SHz_(|w z-*Ewo3!<3kU-O_pkie4z(qKq!G<(@)IjZNR;131*=!;qpoHscaiY6@4{fMpT2dbs} z?{5ZAH$}jWl0ncYtdjiZ7KRkojpIpMJlHsmMi_W(K5G(I4fl)hlD*QWnKKjj;ha_5 zLF&K+d|JO7USIVMsOFzx#=ILroWlq>R3Xj~skbt-xE`z=bDO-M7lk#_+Husd4V3n6 z!(M--fRX7Cy>s>%W_RH@?DE+|rWoCTN<%jI)A@GX=mp?5GYKrUBa-zgm4jD}M2uTl z2u{iFg@c}fOlx>P=m~uW1WumJz*TW)*y-y`o7GVW4L_2gwg#M9-vPuuKq$(j5c)aA zFy*x;8Cj3R%pQsbYYLXak{_n9bZsy5;9v^*-l-q2GRwtUn-+th9rIz)pG+W|d;~V| zRM~%%PD2Z~3A{DzB$L_dh1W!XpS>nA07ZWz;8Rm ze3b3dV1w=uJd>{h-^klzzpZktr@uFBYGfI|-+3_f_-sb?{u`LOP9Gf!TtJV)05;&P zIuq>~0hb-=fd};inWSZxn0J#LK*<@1AH~l^n`<7x>VTj8yIxXE+j%XZ=COnwj+qH$ zOxmEsi3D_P)d{$MCLhJc&mgSuMYu_G6*6j50n%IqTu+A9ZhN_7(9&+&R z<_co?t_LgM-A5n2pNNcWgV2(2b$I)o*vtRTjy=_Jg$damOWxXkpJ=}~S?v9FWwSr^ z;I~%};d_I3;L*b_?7+I+z;wWs9bCJDa%$A4sZTqo;l0~f=YMBl?SWpnejp0v8O=bi zqSmuEQHj{(c|QJ@t%4>#y<)~A@4~vTf#|7ZDb&C3hw|^bu$R0faN4LO`{;Ng`(ST1 z{JL-eua?}(ZW{Ru*L6%`zpXt0p)J6>mrrA_zwCpN{ma;QML9&l5gFukJP~=1lI+_3 z#khB99}s=~3+H61pkCi4DD3n%X6H3|)0%oKFBa5JnI)lgDZ^2p+Jwdhye1F-z61Y^2u9#$OwpsJ&$+ zdv4Bk@%Ji#T|5o6KO_`Qerd?+*)Cn0;H zh}D~!urDWG#+|2R!e_&tm`=Fl>3@vXs3|ZI1*6xl0b;gPBD?yJ6Z4@=pAGc*g!gqE zW$HMIEUjgUf~%LXiB6U%?c^XlBJ%|He;WmVT@0|Kt}5HSW*sB7E08S_NPy~X1iXGI zihb;Z@CSP*@czg=jMMw@Rl^2sa&bGm##suFSiHt|ry6igggj2-+wgY$sG?Q*lHl5H zc^KZQ2lsV3vtD9Ww9`2qVj8an^CSP^)I*`@z2;x?mE1Ra-M0kRN3|5hIr~82+)G$w z-b*GuU5VCwoJCJvcMt0}Fu0P{rl9+@qt zhrJgA6wr55oJ~(4hqnC#(K@BLvg9G@)YOX2o+@HrCJZvOm!h5XZZeB4t$>P4I(=wZ zpZ!e$z{R#uA&?BcjieZ@QV4{2GfOkP08>FOW1uyp<`d{;)Sd+`CgX8J0y{8SH|tyYXl@)5K3oD!<_ zbq4dZ1M#mlJXS)g2A@)sVzV^0>A~xYu=&5UjGki{HXd8gpv4dIzX}m_w<{x}|i#ZCp776MXYO^jsBaq1_T_peP4s+v-3efh{qT36_dirS>e8)1E zG00tsEG4C(qxlP5JAZ`S@^3asDez@2Y=-b6zsoQpJp}rBFfjXW4Scw3J-nu{0-d|n z&-k7VhPl|3bssT9W#LDdb&1oM(hupdcK$TfeN*gZA@_)60y!K!`ORC6xDR&To62|=_UN{Suvbq91GSZSTb@2ThJvR9jLuB20YW719ET#%v$&uGzpJm z$@fiALNo#sVppQ3;usK9X^hXM#52`0F*s<_F4Q8k2u#+F!!JhL>BW_U*z~`T_yzRD zLS{ds<+&E6)XT6h)t|uBr+G~O$S|&}_X24XuK{mZhb${MWzCC!z}D-3vG-Jf@4cP^ zDwk-p$stpKls~{GUzZ_`GcK^>@N-C<-v_^`%|@x;#fJFd<&VtLb`?;vH;8UM8^H#AQbWde%ItD%!R%@C zWXvX+u?h~4@V9?kSfiu_#Jin~2fucMlSRk z_a(eliZf9qpw>(+)+ZPkiV!r{7xF5#KOSWTaokH<^ zc^fi@i*c+;I5hs*20qm0!$a=&Y@0?8Ep(Sdj^74AbhR!$IeZBk4va(rp0^=)`C_JQ z#yCtZi)F{T$>^EpCGfH`44uo`37jgAgO#P0xRMt{yv&-6vd3j%_`{1ZFzp49%tvsZ zq9QsW*nr2P}(8UEUIzGs^)XBdht?m-^FfdsprS!ep!b4 z3{Nw|t996)Z`ILb<<%%;);##!_77}dvVon|?!ngd^I^?OBQ)K1BV0fDl}=yp4!7DK z#4cj?-7&#;X3N7Bs9em=Sn=Wob51!0gvv|GVO&F3iPCrU%#|>{5z*h;$Xic&La*#eK^pe~{YH1na&*{g3uj_64 zo$MuGbf*~>wroe8%mT)2MY zYIva!$%WA<|LrZxcw#3@dB$S=MG5tG?8W2dX7Cnk3ofmzXB^V)aDG_}*4B?^Rla+( zD=%LHfCzNj- z#zxEcV~g4q>`cOu|C}dot=wP;fMUl4n4 z2Mc@a=v$W_L85aCYg(<(+6db~Ypw(vv}!%$ahPP~2F~L*Ymb1gFWO}2zw>w#_dDEs z-jkhYbPj}E%VbQeuETRX;!q)H0_3P{WVd`!BcsiSz{!HOY)JKd^4;t+qQ?s?*$4k- z2yc6+!jYOqjAhqBX!z?dzE);R43&-(Y-=d{CVDHIvM`fb_kKQ77?MXLf!k0|w+c9O z1G1#|8$zJ+RqPQzg1gNcp-kO&_P)5n>Ag66I`O6l3}noO9Su#m)UOFQ*0wMO zPTJt)sMs_6%a)yY=>x9%;|6q#71?#;Kj~!+tJxN>B-Y%16mjT!yJj6t0XX1T+B)TiHfL8tFNN!+?uubA7 zHX^o#$!+#P(?-(Z!BI~%Z^<@Ry-o`Ul^S7R-UQ6MCFaTz2XVu$t5|bxpSTNb4K00H zOPrD1$Qbm!ryB02utK>&cALjqY;Z^!?YFoH9w+Q#=iGK?72PMHRo>#x>_kf@?jslH zx9t~qvsaPHBP4p?kVP7=P(X0)26oqoE5wKe%RaiH{drY*sZ9_%I_(008^rx7pEt6J z=Q~9WL$xCL+DYsc1#QxyV*|=~coisynLy&rRxCl}0yP6=Y_y+%6RrvDiOn%ATxHHi zp%C`Fjsx0rOx!D*gu@-|56AsyU-cp-&iXyM^%9vsd`d4Gr{S zuNI@TZa#?q{R|rXDW#VUe<7~TPhelBE5Y@<^-<5eEi7_92tRhkvpSMXpk+c0SZ1LF z&;E@^#(DQ)-&F;yc)MDJ^KEf*Fc*ZSiT8xt5h}VsG54p|!IR#JY^TQ^^1{_3_(X3xm&>0o|JOHF5X0apZ zH?ywV8T70{8Ibqao*i=A!#wXlMrVr;$#msTpyn#acD$R#uHa~~IYxVcWo`|aqZNdT zWDDu7k8F|T`Wfu{b!%~4_Bd^zOMoqk+nGH+Vb~+EnK|tf#nyfsB>CBkQNyzR;J>$9 zSx221!XQ19In3&U_?qc#PRReO=sW{*`u+gkP?AKY5)BQNh@$m8_nv#&N*YQkLVNGM z3rSg7CCMz3klA?dJ?9uvW@cn%6p9Fu`jP+rzkFUiFP`VzbI?T5iMu>&&ok5haA{oHw70QZbU~y)rpoOhl(^_;K4j+5*#5x%8kpg z(_;!Ac{P>nUzh+ZzA|J*xfOXG@d`1C1x&*GrEp}h3ZHQ^AQHOUm|X3z?3RER%xWVI zM(^<(EVs1@3_j^a%HzxM4yn2Lm&^+2k}wJ;T`0s~`+tGBu`JZQ+kwPX~dzP^?`8h$RgbNHw{a7 zvWTgCL>=YF$|XLi($m*Gzyk(}q_Js)EB2nns!~zVtT_%IbgJcostz+neP0>ZrO9N1 zmppbaiUQ)3rSOAyQ6%Nk6Xucrbs!buKxUrwCkMA~!O9)sw6m-{xDxr8fltR1Lk*gF zRi?Lv!QV_c=22dth^KqH)^_**33;@-?0XKX0Pl>M5M)%nY=rAsb2utD=a4Koawi zW4+{N&{t&RVb-`zxIQ}>WnU^rpQrXQVtZGT%Tzvu6!0I~C7u>jbgN z2|%v5l=&jJ8K)o5g;Js>q!-lAc+F+8zM2a1e&fRgD0U$23>#odpF|ThGsrT@TkO>= zLuT<_H}2UYH#jpnm;T}0h{Z-;0=26)WZGM4WIETEmJ7&&{X8R(lA8?Ej%$EbTbGgz zGiBfvu#Cy%Op%!gakBR5&=@)VXtIFqzW)pNB0o&kC1X=fFSu;79n9=jLoQPT;bE~7Ec@977=3D> zKR@jSTDp$#=GxoBp8gERTuT|=E{-9QZqcx=%!XVZQpX1il&Hr%i}cs+RdAZl6Gl9L z7I|awzD4!oQs}=fl32IfbCvt}ntKfmq-&<&nOXj%a>f~8Ep;A?{bo?bk#j)riwG!8 z%p^ta_qZD`GvHs-4D#fC45^C{;YXA*zPF;EuAdY@5+91wYV)~kpLEIHs1O*?)4+s%xy&@`+OZRr_{-t8LZ(;t z6Iz%a#3jC71)radVx^NsqH5=KXnVzskv0ZKg}!-^8Ezv*E&qRbboPOHA#k1(=vblY&h`(5BOj9QG>{ zwWv^VpZ~m)f4*zWZGgMcc+#}ghgQp~qYj1&AT6iO*e(b|n|ZILR;?$F{~o|5UEPGa zK|Z0l7=h8yc=E~P4jNInpd2zH$!Ro=%rR;O^1kWt>%bfwHu*jZ4ZI1wY~oRb?s24i zAd9(S=fEXsRx!J#*D*f~L%0M}KYTXKllV@W1C^dlrrYOPz}KseagEm!VETapx_!lN z6l1Rndz59k?LI8)qvX!K+I$I57e(XI&WTPRV>0HO+3aDVMC(*o5noFq9Uw28ieO!!`}Y5g_a1zzfq*LH*% zmV%;luFBBOk7sAzr>jmK6@M;`9+JKcO&SUm$ZpX9g+Jn2bSnxjf9-efymD(KYjpElAq9%)0 zFmPTUcH1w+Cv+0Y#oj2mZ$&QLv|urFfB$Ay+U_6lqjCu{F#~hTf*{zZNrq3E!%y%V zv(I3FK4kV2ZTTS1Jip(ETX>(HS#UNobWA3C%8Q`O{%h!;T7m_*~ zMGBiOIKj&X#2miEwJv-?(~Xr3wJ(AA!D3FnD4U3_$|83rLHyTr5v=+vfnBub z5Xljm`F*1i4ca{fPYvdfHW*5>4dj_fmLkiO5(!!0M%156;ceFAM8*dO@xN8GS+Nc2 zq~7l$E>~FwzdrDWw};I@=@fo8FaHh|7gS0oDI|lkr>7YrUrtx*CKJK5m8AGWGLe=K zhpVc8Ak)7!AWmZe_!f}~?c;Y*i+``C&)yo(oIX>7?w1mv816t8#|lB2j6ac^nZRVP z&m#t#Q(9EkUBTZfZZnGgfiTOq6qm~x606!EayO=rdBlX^P`7$+p36Ga!1ge5tp+?p zCzR}q*F}Ci7LkeH#BluGXsA-Z3+P+jMql-Fm_oNJ;E(H0COapB=*{&2q0A}ZQ58sb z-j{``+hmaZF=tS%q=li$0{Ae@2PkC%>=Wus{MI%AJwX~Qyp)7)2p<9GN1;$+|l@$^`iLE7G}sK68VZ-f-#{k?mu6{nBTpP4PIxGdV|C4U)AO0 z%%-*6tK-MG=uO^a)qDr&F;|D=W-cW1>MGb&*&jKH&H+<3N75+22HTlx(PC+`q-I4d zT=#q%mKt>@E1aXqo(CuK>MJy(Jy{p8J{JYY%LK#z!pF?<(KRSJ?PqS zc(hPajY#}9ZaF->7~8F=VU!>G!RK3+kgvO@FiU1_MD=z0pekPz-uOCV>4u2h9%k4ih z1za+lf$txlig)X5ruME)hpK<|s6x4Xa#?l~yt@AcV|L^a*v-eGbEZF{zw6cGjzv0f zwdPAud{Pby#?0Uge{;raTPa@nI}Sv6Igy4ydw8rZ2P)23h0iG&lQC8ry<143Jt&6x z9rJ)W=B18w7uqwM-C|JU>I2w3Z4VOP@Rrqx!1G-`nAjlp}s}Y7D3)F6Q2^F@rrS zMqK(c4|J>40oi*jCkpj$q~nJx>}sgT(W@ZGOa}1ASTG7!gN;8XD*Gp!|V(ltZ|929raA&41e0-(R=31!J+4PC|niNo1cJNWhQuO zAx&_=JL*b*8WjG`MqT;hY|cB)!?b);#LET*@Vv}oE^6Fqs^siFaCt={O!>v%SyxAro{ig>BvzK3 zc1>z&<8$(lS`2u{AVsEhOER+uBx#Am{CAaRGl>)LBijiISZA{~53Y2=zrXtvlTt&% zt@(_uegQNtI8T4R!;p6-YoO1|z3j;5$uPxeh*3>zz)25Q!Gh2JFpyt2S?xr!p6|&T z-VzAceAU1eO|gWzIUfBuZ41rLRnu}q<&?aeBVCjAjtbq+dr!`EGpV|vu4V)&m^3w{bpM>S_zCD%OCIkwMy{Zw8D|_lL)n6PV!3Uht7=0(0@^Kd|_O z1n8UR3TtzZqV|;&!F@ho>$c=J=bRKz;IrGf=u{r*l1L-g!_p+b4KZ#FMNEgz9fsbb zfs$9v!KV(-fFW-T;omjtaPiz-wC1_(!Z=AOzJF*k`_i-%Y47@ro6JtIk|wgq9DSl* zPYEKy(I1(!?_S_(P85;;oj}}HrNOoG{!nvXFigAIPftl{ru~n2lcibi@Cv(|^)Ip| znMz+cL*;9H4BwjHSXH3S3@La&_MvXZQTvh2- zVb7uyT;;}SlJ&=eybBn@f(%#Tq0}O({kA(P_!vei=Kn-47VFu8@86jw_48oKcQ+Ah zbAy|ZH=cE~e=J&4!H{Eay9MoBz92J6hBUt_7QH?dOn+|6CcSljw873SBK>Cy_09Gw z<5zu6C>#D2SomJVZ@Yt_%GMm({qRN*@k_Cz!I&+PXlXg$;RElz zSWKrWqeOi=3eq(vTX0QL~-W|z|gjyFjQ@>ZZ$8e!Y|MLX)_HAoobC`yA zOKjk?N5=Saemkl;lLaU84zIo~2L*Q@NkA*xKE|@^tZ;W~A}p_PA(`D_Fmz`))^nOl z7o9$YQ>F6B(pLn9M7CJHzoW`UsD}$ACYu3}7(rQ_^d>S@nNYRsEVn*(CAm9eBKI#U z4Z76!Gb58s;qoafsp^~AFwrgxezz_Z?2eLQ{tH?O$Ffp|kuiToO=TIdJhn?{^{f`F zjTRx3lx5`Y@GYGDW-?n8bOGcUC6oGx*HP}Lm)L(AUtLH_gKAUT7^Zp#S9~Fo_~y+K zD%y$(1Lw{F#*W&}S63CG(i7QmzmWp_`rJ?IPrfxg=RbyCSbpaw#pIHo1-Ymse~`X) zyabS#Y{EXyB`MR-(vg%k_rRb?;N*S<=crHSB9)AxrT#_`ohD1aJ<&_sx_dH3k@=+L zxIX#&yp5CG??{%}#$e~+L>REyf|bh+gAdJZAsXjS^gB{W_tP)jKG#rUyiy6M)T-bG z8q?dtPYAowP@7ie6+r1FpU!PB%mmL$z_o<+-p8jyiHyUm_auz#oDMSCltWk%7F(MP& zz_av5>h@S!bI5F80VByZ|9XE0=mf;TzWH;YcGw}h=Y9eu79I)@jn9B78v@~$ogc84 z%qUKtpGvAG2jB|DSfcyr1QYJ&ND4}vX}v9esBpa$t(-mF*yKK+$el43g4ZH;hdbZ* zw$q~dXXiq0=zS>bgr*3#Z}^Ix%{O!JExScx+bUXAhVIfbHB*u5cFZQ#W}}hsnxw#! zg14^j$LH@Eky+(BwCBssK-tujGu4eJHq&y5dVnL$rL1nD5==|ABI7J~fNcxUv1`vLQngu0MA3B>=~Ia#_5G`bUu(5F-}W@3 zW1S8gq63JhO#-PnHJdK5CtRjf99a}0LRN3Y(Zif3?u0@r`CT`{JlX$=zU&^|7K!F-OUjmEKi3SQJB8rp+($-;$VfjuzBSjJ#=|$4fl-qS;QH))4tI~!q_XL z^j|X%Sh&9fNytqDvXvJo7cE(CYj-yg3rihLI%~AbhE^hD3}dz}Uq(@Y?ruxPEjh=U`+_dxeC-b+XrSK)4P3Qd&kU zW=BEos(Kpkj3s>+pR@7&`|0fE6u#cN-RjQ3YO-k27T!JgfsM=DLl!<29kyg2KCwv$ zG{1=;x)BGFS@8r=cqoO;A_#@1ZKi8iDAA31lJKDR81??dQIQSj##F>B(@8JvU|EAk zbA{_-y2sHT3L>_mhx4OgWY|Tnbl@W+LubI}iI-UC1CeloWG8igem<0#T0teWPU58Z zJhVbmO~6t|NfhyHE<3(=zSZNrRH*3Yz}{=@q<_a`LKCNJEn&g=#HH*X+VU#~%7tu0 z$M`*RcSE^Atbae3x(1M`wLU0y3dQdJ`$9PWN*>hTC5v>vUZAF*?dEzzmXIBjwo#s3 z5kCw!Bs!uh%kPExf(2BEF!rG@eD<`Ao7n0G^A(dx>%1S$-IrrY{WBL!*2sm`?`FsZ zWp8|AVhKH~cP_m8Xf}STxf@AO^d*~&-w02HYYFx*p+xhYtLcO#G}&@vlq!!9V=Bv1 z;L`;*q)XEV`L92TJt9hw!oUvnsHj4uyXAXJz^-maj@^uzukj@Cl9?!Ka0Q2yZi@co zU!=xW_*33_c~GY^i&U*#3A+p~;q&)XiGJl&(wkQ)40j(#Tg#;qttI-5j=MCw`0qCG z^H7z@_dj23&(}vqt8;PeX+)Dl0`zUHgoLCz8xaU?SCAUY*N78#CG7WN)I9o(E?%6yW`DR>9jxUBPut zBSEH}50uu{U>{hQQn}TiQ2(|jrXp8?vbmfo*tcbV^D?b;IAb<|SL(!tzfvl>Q_po- zyA2yq+UFctKXQh3GqWPlY>4^mvySOY*QO#xwwx)O3NJWi!h6b-h}R8Ya#JN6zS^EZ z*voBHT0{uldqxg!-Iod*YHPXa#WU!cpL#`FjjQ26!g6AE&=ZcBrjoYZKU*ez$cO)> zRskS-slO;;a#0YfL6K+!iKC z333ysKVU~KbKwi=G@_wa!nB^Fxr-MNo3@sBK`h-RJYX(IPn#YEOFx=J_p+`=uPp#i z-{DTS&-Q{-XCx7eX=Ugp&$zbG&mrLH1Q_0F2j_m7_kM&0I>>JE9?r9~gYUsdABa+E+vpHPjuUKgL5Yq?yjkp${7u-IREyBnZ@6ky2 zeDv*HKH<}1?D~sqtxOKY@%(;&;@lNWYCH|WilnsCTu7R4*^~jNuLsZ1VJf1W84m_tX3p`_Y zvEx!dqO;HvZm2oN`pdl#J=w60tC*GzPcylKH`NN{pj06F_VOcwGrO1%b(+*3=Q+@r zGiv^<&`Zy&%VvU7tcAN`W!Pcs5O~%lkRD42g-PyCOwPdpu5)GvXe*2(3UvZ%o`w=R zV3kd-49b#W6?3chchTf5y_<4W%M*$+!=ReoC){^W8{gd}1G>eo3E+h!5_h#-=)5%- zx~5D+`{WOEkMk5sRZk`~MHa9K=+I9#+p>Q5ZNaIZRlJ9-&+2xSY)hju$IehYEt2!# z_xh_cM6A6;OZTfHb*Wue_L0VTdy6uRb`E2H6;@dOb#xcnih$5ybTxH;)?9M??L_je z*Mb_hv|=i$Q6id{P#- zv~nr@C1DF&Tsx`2yDJG--AkYA?!`{mt)PeP8hEYNlM={m;>>4kWa45qVd|4yGCOb{ zeQ?x_INa4G2X17-S-Swa!8=p8pWQy-9V!@jQ4OZ~WSQ$iR!Niv4`-kS`|{V39018F%O~1F)R22Sri82>QNL%$h}+F$=Ka=ZU(RzDxm50AcK3zT(P7Ww6hS^4wD9>s)60Rr><4zQSms(Xo zM>hmMiq9k7u9e)6v(`kf;U6|yZb_P6s*~Z{8g$a&5l$vxI`&m4r3=`9R``JkFDQ0| zF8pOaSvY#(br$~|dB>2F8*>D{Mmczkw32Ay;zJ^pvsdT}oz>7m3~sf;8ZMkVd>T?y3#xYZ~Kr#8ng4s$8){HviqCB zag9Ot>bq#HlQLNt)o>C0{ucx%$K=3;HzRo0lO}4ru@tCi4T8*!0S1^5>chGWGXLCG z)+hI;z#*xGGM2awmR|Cv`+K*uzIA|-tmPfO4l_9u@kyfIMVFDvvTT@VBF6>}0b(1n zk-aia3+_Fn!ZgeefxV8g*r)Oc+xuh=iLRE1#SU`3Ie02_eytefd16kdu|A!}c&QHUbw`= zL1adsKU0Fr+@7F4%~Qeal!?Gd=n9TD>A`q6XBecgRdBh&kmP;)Mp07yyTR-fyYkOv zQM-i(SL862Y<(}ujP;D6#ZAY!sYEr-(SttdF0IK7%pdFeKCwWzgpX6i8uyChT(VV#>F-(QDpng3TWy z;LbUk_|NoAPEXtk&o=+T72kyL=z5;{qIeEixf~Gq)GXt;gYod1bTM9SlgCNM57Who z>zK{154cih0a>|5gMD>Im)qWCNvsr`g#U$DAnoHAG>-oU3}Uyko}-a4KQNo`;aN&e zFP0&>Q(WZ4^0Omq*#LCun{51MJ*c%CuX^VPyC^4ZcrHfNBb=a3kNF znbAB*&)ZZ$kFGq>qJqLRI{+1mPemE2LFq;8EZ4!&iL;*`<%np146W(8=?h=a-R z&SB5!eDFiJm}ylP19=sHIYTuka?_Z14f$#ab3IN9Pkea+mgm<3a6KLN4EM8c_tpS~ ztV1BqVkvPM%Ec;4YH$t{O#XYlh+GeN4|av5(~Uinpfd3c@RCf1o%`dM!_ipCE_}eM zdL-km@ztVz{%$Zr!IBOh<9mV5wDCFXHDt{rPm-YhRZu)36%e^9aKg`$T-7{HOQ+S) zcCNaVWX%tul$;(js!;&`iya2=)(hZ`Ug7j}W=v`5BQ!0o2x-SA!jFYl=tf%__Bs@T zQ+H!votHg~oa;yiV%0d+7JaZQ`UdT>C<*55Jk3>na3-%y0_l@$_EIwA1;n&t5|`7V z&WOn=p+jTkqD2!|(Nd#tTD~$9K?N@v3Vn;K72a z^qh}Va9h2OXIG2mJhz0*}yQOi4gA$P84VjduBPof-vP{I@OKXO#u?;)Qm|cGV-= z$bqA3?pRY+3*zB~tl7Bt`c%S?=uqMJgfIlI1n4XP?E@lIajOMSf>`OO1b6|;%) zy9o0BSt1&~^OT719}RootM_)@+C7l-UnJ?dmMz?-PRbr$v!AsVYY6 z>uUI#9|79FrcXFAC!8)pJGqVj`C;$Dg~Hj4ALsul8!O!9eQkfcz+#9f-@TGFRpLeH zTwk*PTRav0eZ8oKpJ&ntYY_SG@Uw`^@nN-nPJ=q#(=8s$?oyK9&VZxh*61zoOTXEc z2bc0SjD1H#Ii+P;P-<-;+h5?#EW9#IrS)i$z4I?|Z;w_}jgEWic3lT9>|QCY`_%zf z9}5Om$Hx=B#aeXZhJ&1QS|;4ZR)TxU)%29AH1c)9Haf=WD(^*?MC(s5WN|_TThLm< z<=@}Ke0}^=)Z4!k#k+muG!`VnS0}b`iNmtsc#If2n;QjHRTUV|wbNQkT`XZ=Z5y4F zln3L&pKuT6ClQ;biF9@bVs%4|824ZSnDdkc*DeV0#F%`Ldg6{~yhIDoeshc+z;d0dh9-->aPzlz{AS#C~MFTG}gKBcIl zg2uo8MXOX@R&3Wcdr^5kNK0K{~(;>x|gDSmSPmed5g}+^q zLQ8WaU{3W>&i=`4D8mH73rh?{2@725&`dw*K4L)M+dYn`8y@C1{tP0U{#~LETrgzk z$_2xF9WmtD{ctXR^IC5A>0i0c z^xMv82C&XVJ~)Ydy!-=9ZcU<}P9EUG>(_Cmx!rW8S{Rh@d_pZ|vYClyJDI_0|4}`w z7LsE1ZRkmHHk6E1W#W5fxdDrv;8|QAyjoPv4n^kSHSreM>E1^4)af**$u33J^BP5g zz9o!CiYoOpu9T^qVhAo|^1n~T5XXH80!vFd5SS$mi}SuS&vyvW==*rslC6nk+N3$o zVJpr$oR3=al41KXZzv-(8@%pb%B&ybiz9qq$TkC3 ztvW|)KlzSp$fk1Swu?<@Yv!G$WI8ywu3{KM9Mb|vi!B)~%NbY1gX#CJa z4}2`4Zn&G^-^+Q&cDIl)b93Q1x$A7mf<4T|hD12%J|K*cP2iqcxCnIJ2WY7qRT!kq z28D;6Y2Ah?$ntssJa~EqWD`q;RqoT^)Niwi+O<5CI+6|NP$z{xVW)7KY8-s@c@gh{ z-NTt~KOh=IC$XBB3ti?YVnz$n>5YYE#3()#=J9i1COtrxr9He3n$j^F8+B2D3C)a2^pP9ivd}HEN`9F7)Yb2F5R%=?>XAkXIH0^+f@!XM8rvb^F5n2))8Q zS#g%$P_Yy~XHDo&nL5~1=nG8(73uhyA&@*74?p_3lO2i?95cm??pQyLdhp~w)Vn1f zrj_)uSq62~)Nvcwx4{q5*zgcto25l3;CrrQR^W0@1)lzF0jBP&K(kJ`(A=C@_@PD?`I@c;s;$GQX0bO^4~1Zo&H(juz8UG9 zM$k*(M2vTD#QV2yr>Uv|>cBU5d~$9av3j$NRt{7~1!H_(Lw_gt$3+Y!yFI3!Rp`?klqNN{@ow*|n%iCIZ&}<+&VgQXIPcnAO{xN#{k1=-t7OQ0K{|B<7`vdSz)z z>dSAmTncs}I|HV2C)5n7CGv=Q^tuPdcT5ua>DAzrMZdYj-W=N;zmTJ z#)K<~$^NqN~~XxADGFD1!PM->*5S6ZQP_srDx4bo9gqjN`a4+~XHSoZG^hNfw$jL8>dnc_y5)D%lX9uOu`ycWO2d@AC literal 0 HcmV?d00001 From 162361d878030308d0cfea1f7e8b88067fcb794c Mon Sep 17 00:00:00 2001 From: lym0302 Date: Fri, 25 Feb 2022 14:15:27 +0800 Subject: [PATCH 08/39] format code, test=doc --- paddlespeech/server/bin/main.py | 2 +- .../server/bin/paddlespeech_client.py | 8 ++-- .../engine/asr/paddleinference/asr_engine.py | 38 ++++++++----------- paddlespeech/server/engine/base_engine.py | 2 - paddlespeech/server/engine/engine_factory.py | 1 - paddlespeech/server/engine/engine_pool.py | 6 ++- .../engine/tts/paddleinference/tts_engine.py | 16 +++++--- .../server/engine/tts/python/tts_engine.py | 19 +++++++--- paddlespeech/server/restful/asr_api.py | 3 +- paddlespeech/server/restful/request.py | 1 - paddlespeech/server/restful/response.py | 3 -- paddlespeech/server/restful/tts_api.py | 2 +- paddlespeech/server/tests/asr/http_client.py | 22 +++++------ paddlespeech/server/tests/tts/test_client.py | 3 +- paddlespeech/server/util.py | 2 +- 15 files changed, 65 insertions(+), 63 deletions(-) diff --git a/paddlespeech/server/bin/main.py b/paddlespeech/server/bin/main.py index dda0bbd7..360d295e 100644 --- a/paddlespeech/server/bin/main.py +++ b/paddlespeech/server/bin/main.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse + import uvicorn -import yaml from fastapi import FastAPI from paddlespeech.server.engine.engine_pool import init_engine_pool diff --git a/paddlespeech/server/bin/paddlespeech_client.py b/paddlespeech/server/bin/paddlespeech_client.py index 889df8d5..853d272f 100644 --- a/paddlespeech/server/bin/paddlespeech_client.py +++ b/paddlespeech/server/bin/paddlespeech_client.py @@ -124,7 +124,7 @@ class TTSClientExecutor(BaseExecutor): logger.info("RTF: %f " % (time_consume / duration)) return True - except: + except BaseException: logger.error("Failed to synthesized audio.") return False @@ -164,7 +164,7 @@ class TTSClientExecutor(BaseExecutor): print("Audio duration: %f s." % (duration)) print("Response time: %f s." % (time_consume)) print("RTF: %f " % (time_consume / duration)) - except: + except BaseException: print("Failed to synthesized audio.") @@ -211,7 +211,7 @@ class ASRClientExecutor(BaseExecutor): logger.info(r.json()) logger.info("time cost %f s." % (time_end - time_start)) return True - except: + except BaseException: logger.error("Failed to speech recognition.") return False @@ -242,5 +242,5 @@ class ASRClientExecutor(BaseExecutor): time_end = time.time() print(r.json()) print("time cost %f s." % (time_end - time_start)) - except: + except BaseException: print("Failed to speech recognition.") diff --git a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py index 6d072322..5d4c4fa6 100644 --- a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py +++ b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py @@ -13,31 +13,24 @@ # limitations under the License. import io import os -from typing import List from typing import Optional -from typing import Union -import librosa import paddle -import soundfile from yacs.config import CfgNode -from paddlespeech.cli.utils import MODEL_HOME -from paddlespeech.s2t.modules.ctc import CTCDecoder from paddlespeech.cli.asr.infer import ASRExecutor from paddlespeech.cli.log import logger +from paddlespeech.cli.utils import MODEL_HOME from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer -from paddlespeech.s2t.transform.transformation import Transformation -from paddlespeech.s2t.utils.dynamic_import import dynamic_import +from paddlespeech.s2t.modules.ctc import CTCDecoder from paddlespeech.s2t.utils.utility import UpdateConfig +from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.config import get_config from paddlespeech.server.utils.paddle_predictor import init_predictor from paddlespeech.server.utils.paddle_predictor import run_model -from paddlespeech.server.engine.base_engine import BaseEngine __all__ = ['ASREngine'] - pretrained_models = { "deepspeech2offline_aishell-zh-16k": { 'url': @@ -143,7 +136,6 @@ class ASRServerExecutor(ASRExecutor): batch_average=True, # sum / batch_size grad_norm_type=self.config.get('ctc_grad_norm_type', None)) - @paddle.no_grad() def infer(self, model_type: str): """ @@ -161,9 +153,8 @@ class ASRServerExecutor(ASRExecutor): cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n, cfg.num_proc_bsearch) - output_data = run_model( - self.am_predictor, - [audio.numpy(), audio_len.numpy()]) + output_data = run_model(self.am_predictor, + [audio.numpy(), audio_len.numpy()]) probs = output_data[0] eouts_len = output_data[1] @@ -208,14 +199,14 @@ class ASREngine(BaseEngine): paddle.set_device(paddle.get_device()) self.executor._init_from_path( - model_type=self.config.model_type, - am_model=self.config.am_model, - am_params=self.config.am_params, - lang=self.config.lang, - sample_rate=self.config.sample_rate, - cfg_path=self.config.cfg_path, - decode_method=self.config.decode_method, - am_predictor_conf=self.config.am_predictor_conf) + model_type=self.config.model_type, + am_model=self.config.am_model, + am_params=self.config.am_params, + lang=self.config.lang, + sample_rate=self.config.sample_rate, + cfg_path=self.config.cfg_path, + decode_method=self.config.decode_method, + am_predictor_conf=self.config.am_predictor_conf) logger.info("Initialize ASR server engine successfully.") return True @@ -230,7 +221,8 @@ class ASREngine(BaseEngine): io.BytesIO(audio_data), self.config.sample_rate, self.config.force_yes): logger.info("start running asr engine") - self.executor.preprocess(self.config.model_type, io.BytesIO(audio_data)) + self.executor.preprocess(self.config.model_type, + io.BytesIO(audio_data)) self.executor.infer(self.config.model_type) self.output = self.executor.postprocess() # Retrieve result of asr. logger.info("end inferring asr engine") diff --git a/paddlespeech/server/engine/base_engine.py b/paddlespeech/server/engine/base_engine.py index 0cc20209..0f020d1c 100644 --- a/paddlespeech/server/engine/base_engine.py +++ b/paddlespeech/server/engine/base_engine.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -from typing import Any -from typing import List from typing import Union from pattern_singleton import Singleton diff --git a/paddlespeech/server/engine/engine_factory.py b/paddlespeech/server/engine/engine_factory.py index 05f13568..546541ed 100644 --- a/paddlespeech/server/engine/engine_factory.py +++ b/paddlespeech/server/engine/engine_factory.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Text - __all__ = ['EngineFactory'] diff --git a/paddlespeech/server/engine/engine_pool.py b/paddlespeech/server/engine/engine_pool.py index 0198bd80..f6a4d2aa 100644 --- a/paddlespeech/server/engine/engine_pool.py +++ b/paddlespeech/server/engine/engine_pool.py @@ -29,8 +29,10 @@ def init_engine_pool(config) -> bool: """ global ENGINE_POOL for engine in config.engine_backend: - ENGINE_POOL[engine] = EngineFactory.get_engine(engine_name=engine, engine_type=config.engine_type[engine]) - if not ENGINE_POOL[engine].init(config_file=config.engine_backend[engine]): + ENGINE_POOL[engine] = EngineFactory.get_engine( + engine_name=engine, engine_type=config.engine_type[engine]) + if not ENGINE_POOL[engine].init( + config_file=config.engine_backend[engine]): return False return True diff --git a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py index ecd2b0b6..a9dc5f4e 100644 --- a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py +++ b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py @@ -360,8 +360,8 @@ class TTSEngine(BaseEngine): am_predictor_conf=self.config.am_predictor_conf, voc_predictor_conf=self.config.voc_predictor_conf, ) - except: - logger.info("Initialize TTS server engine Failed.") + except BaseException: + logger.error("Initialize TTS server engine Failed.") return False logger.info("Initialize TTS server engine successfully.") @@ -405,11 +405,13 @@ class TTSEngine(BaseEngine): # transform speed try: # windows not support soxbindings wav_speed = change_speed(wav_vol, speed, target_fs) - except: + except ServerBaseException: raise ServerBaseException( ErrorCode.SERVER_INTERNAL_ERR, "Transform speed failed. Can not install soxbindings on your system. \ You need to set speed value 1.0.") + except BaseException: + logger.error("Transform speed failed.") # wav to base64 buf = io.BytesIO() @@ -462,9 +464,11 @@ class TTSEngine(BaseEngine): try: self.executor.infer( text=sentence, lang=lang, am=self.config.am, spk_id=spk_id) - except: + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts infer failed.") + except BaseException: + logger.error("tts infer failed.") try: target_sample_rate, wav_base64 = self.postprocess( @@ -474,8 +478,10 @@ class TTSEngine(BaseEngine): volume=volume, speed=speed, audio_path=save_path) - except: + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts postprocess failed.") + except BaseException: + logger.error("tts postprocess failed.") return lang, target_sample_rate, wav_base64 diff --git a/paddlespeech/server/engine/tts/python/tts_engine.py b/paddlespeech/server/engine/tts/python/tts_engine.py index 508a1f35..20b4e0fe 100644 --- a/paddlespeech/server/engine/tts/python/tts_engine.py +++ b/paddlespeech/server/engine/tts/python/tts_engine.py @@ -72,8 +72,8 @@ class TTSEngine(BaseEngine): voc_ckpt=self.config.voc_ckpt, voc_stat=self.config.voc_stat, lang=self.config.lang) - except: - logger.info("Initialize TTS server engine Failed.") + except BaseException: + logger.error("Initialize TTS server engine Failed.") return False logger.info("Initialize TTS server engine successfully.") @@ -117,10 +117,13 @@ class TTSEngine(BaseEngine): # transform speed try: # windows not support soxbindings wav_speed = change_speed(wav_vol, speed, target_fs) - except: + except ServerBaseException: raise ServerBaseException( ErrorCode.SERVER_INTERNAL_ERR, - "Can not install soxbindings on your system.") + "Transform speed failed. Can not install soxbindings on your system. \ + You need to set speed value 1.0.") + except BaseException: + logger.error("Transform speed failed.") # wav to base64 buf = io.BytesIO() @@ -173,9 +176,11 @@ class TTSEngine(BaseEngine): try: self.executor.infer( text=sentence, lang=lang, am=self.config.am, spk_id=spk_id) - except: + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts infer failed.") + except BaseException: + logger.error("tts infer failed.") try: target_sample_rate, wav_base64 = self.postprocess( @@ -185,8 +190,10 @@ class TTSEngine(BaseEngine): volume=volume, speed=speed, audio_path=save_path) - except: + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts postprocess failed.") + except BaseException: + logger.error("tts postprocess failed.") return lang, target_sample_rate, wav_base64 diff --git a/paddlespeech/server/restful/asr_api.py b/paddlespeech/server/restful/asr_api.py index 4806c042..cf46735d 100644 --- a/paddlespeech/server/restful/asr_api.py +++ b/paddlespeech/server/restful/asr_api.py @@ -14,6 +14,7 @@ import base64 import traceback from typing import Union + from fastapi import APIRouter from paddlespeech.server.engine.engine_pool import get_engine_pool @@ -83,7 +84,7 @@ def asr(request_body: ASRRequest): except ServerBaseException as e: response = failed_response(e.error_code, e.msg) - except: + except BaseException: response = failed_response(ErrorCode.SERVER_UNKOWN_ERR) traceback.print_exc() diff --git a/paddlespeech/server/restful/request.py b/paddlespeech/server/restful/request.py index 2be5f0e5..28908801 100644 --- a/paddlespeech/server/restful/request.py +++ b/paddlespeech/server/restful/request.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List from typing import Optional from pydantic import BaseModel diff --git a/paddlespeech/server/restful/response.py b/paddlespeech/server/restful/response.py index ab5e395b..4e18ee0d 100644 --- a/paddlespeech/server/restful/response.py +++ b/paddlespeech/server/restful/response.py @@ -11,9 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List -from typing import Optional - from pydantic import BaseModel __all__ = ['ASRResponse', 'TTSResponse'] diff --git a/paddlespeech/server/restful/tts_api.py b/paddlespeech/server/restful/tts_api.py index 11105147..c7e91300 100644 --- a/paddlespeech/server/restful/tts_api.py +++ b/paddlespeech/server/restful/tts_api.py @@ -114,7 +114,7 @@ def tts(request_body: TTSRequest): } except ServerBaseException as e: response = failed_response(e.error_code, e.msg) - except: + except BaseException: response = failed_response(ErrorCode.SERVER_UNKOWN_ERR) traceback.print_exc() diff --git a/paddlespeech/server/tests/asr/http_client.py b/paddlespeech/server/tests/asr/http_client.py index 14adb574..49f2adf7 100644 --- a/paddlespeech/server/tests/asr/http_client.py +++ b/paddlespeech/server/tests/asr/http_client.py @@ -10,11 +10,11 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the -import requests +import base64 import json import time -import base64 -import io + +import requests def readwav2base64(wav_file): @@ -34,23 +34,23 @@ def main(): url = "http://127.0.0.1:8090/paddlespeech/asr" # start Timestamp - time_start=time.time() + time_start = time.time() test_audio_dir = "./16_audio.wav" audio = readwav2base64(test_audio_dir) data = { - "audio": audio, - "audio_format": "wav", - "sample_rate": 16000, - "lang": "zh_cn", - } + "audio": audio, + "audio_format": "wav", + "sample_rate": 16000, + "lang": "zh_cn", + } r = requests.post(url=url, data=json.dumps(data)) # ending Timestamp - time_end=time.time() - print('time cost',time_end - time_start, 's') + time_end = time.time() + print('time cost', time_end - time_start, 's') print(r.json()) diff --git a/paddlespeech/server/tests/tts/test_client.py b/paddlespeech/server/tests/tts/test_client.py index 65f4ccfe..e42c9bcf 100644 --- a/paddlespeech/server/tests/tts/test_client.py +++ b/paddlespeech/server/tests/tts/test_client.py @@ -25,6 +25,7 @@ import soundfile from paddlespeech.server.utils.audio_process import wav2pcm + # Request and response def tts_client(args): """ Request and response @@ -99,5 +100,5 @@ if __name__ == "__main__": print("Inference time: %f" % (time_consume)) print("The duration of synthesized audio: %f" % (duration)) print("The RTF is: %f" % (rtf)) - except: + except BaseException: print("Failed to synthesized audio.") diff --git a/paddlespeech/server/util.py b/paddlespeech/server/util.py index 48c4b8cb..1f1b0be1 100644 --- a/paddlespeech/server/util.py +++ b/paddlespeech/server/util.py @@ -219,7 +219,7 @@ class ConfigCache: try: cfg = yaml.load(file, Loader=yaml.FullLoader) self._data.update(cfg) - except: + except BaseException: self.flush() @property From 2ecab4e08f8fe36c3518be59cd6c9b06d99f0d4b Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 25 Feb 2022 14:45:50 +0800 Subject: [PATCH 09/39] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 31dfa0bc..86ac964d 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ base = [ "jieba", "jsonlines", "kaldiio", - "librosa", + "librosa==0.8.1", "loguru", "matplotlib", "nara_wpe", From e8fea28384595e016ed7e081a91292d5fe57d3d5 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 06:58:02 +0000 Subject: [PATCH 10/39] fix setup --- requirements.txt | 48 ------------------------------------------------ setup.py | 2 +- 2 files changed, 1 insertion(+), 49 deletions(-) delete mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 76082166..00000000 --- a/requirements.txt +++ /dev/null @@ -1,48 +0,0 @@ -ConfigArgParse -coverage -editdistance -g2p_en -g2pM -gpustat -h5py -inflect -jieba -jsonlines -kaldiio -librosa -loguru -matplotlib -nara_wpe -nltk -paddleaudio -paddlenlp -paddlespeech_ctcdecoders -paddlespeech_feat -pandas -phkit -Pillow -praatio==5.0.0 -pre-commit -pybind11 -pypi-kenlm -pypinyin -python-dateutil -pyworld -resampy==0.2.2 -sacrebleu -scipy -sentencepiece~=0.1.96 -snakeviz -soundfile~=0.10 -sox -soxbindings -textgrid -timer -tqdm -typeguard -unidecode -visualdl -webrtcvad -yacs~=0.1.8 -yq -zhon diff --git a/setup.py b/setup.py index 86ac964d..3f3632b3 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ from setuptools.command.install import install HERE = Path(os.path.abspath(os.path.dirname(__file__))) -VERSION = '0.1.1' +VERSION = '0.1.2' base = [ "editdistance", From 7249d0ba59b8ea8601f80c37d0e2c548c78079f0 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 07:41:05 +0000 Subject: [PATCH 11/39] fix benchmark --- tests/test_tipc/configs/conformer/train_benchmark.txt | 2 +- tests/test_tipc/configs/pwgan/train_benchmark.txt | 2 +- tests/test_tipc/prepare.sh | 10 +++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/test_tipc/configs/conformer/train_benchmark.txt b/tests/test_tipc/configs/conformer/train_benchmark.txt index 3833f144..33b1debd 100644 --- a/tests/test_tipc/configs/conformer/train_benchmark.txt +++ b/tests/test_tipc/configs/conformer/train_benchmark.txt @@ -54,4 +54,4 @@ batch_size:16|30 fp_items:fp32 iteration:50 --profiler-options:"batch_range=[10,35];state=GPU;tracer_option=Default;profile_path=model.profile" -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +flags:null diff --git a/tests/test_tipc/configs/pwgan/train_benchmark.txt b/tests/test_tipc/configs/pwgan/train_benchmark.txt index e936da3c..c64984dc 100644 --- a/tests/test_tipc/configs/pwgan/train_benchmark.txt +++ b/tests/test_tipc/configs/pwgan/train_benchmark.txt @@ -54,4 +54,4 @@ batch_size:6|16 fp_items:fp32 iteration:50 --profiler_options:"batch_range=[10,35];state=GPU;tracer_option=Default;profile_path=model.profile" -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +flags:null diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index 0280e5d4..f10cb06c 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -26,7 +26,9 @@ if [ ${MODE} = "benchmark_train" ];then curPath=$(readlink -f "$(dirname "$0")") echo "curPath:"${curPath} cd ${curPath}/../.. - pip install . + apt-get install libsndfile1 + pip install pytest-runner kaldiio setuptools_scm -i https://pypi.tuna.tsinghua.edu.cn/simple + pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple cd - if [ ${model_name} == "conformer" ]; then # set the URL for aishell_tiny dataset @@ -35,6 +37,8 @@ if [ ${MODE} = "benchmark_train" ];then if [ ${URL} == 'None' ];then echo "please contact author to get the URL.\n" exit + else + wget -P ${curPath}/../../dataset/aishell/ ${URL} fi sed -i "s#^URL_ROOT_TAG#URL_ROOT = '${URL}'#g" ${curPath}/conformer/scripts/aishell_tiny.py cp ${curPath}/conformer/scripts/aishell_tiny.py ${curPath}/../../dataset/aishell/ @@ -42,6 +46,7 @@ if [ ${MODE} = "benchmark_train" ];then source path.sh # download audio data sed -i "s#aishell.py#aishell_tiny.py#g" ./local/data.sh + sed -i "s#python3#python#g" ./local/data.sh bash ./local/data.sh || exit -1 if [ $? -ne 0 ]; then exit 1 @@ -56,7 +61,6 @@ if [ ${MODE} = "benchmark_train" ];then sed -i "s#conf/#test_tipc/conformer/benchmark_train/conf/#g" ${curPath}/conformer/benchmark_train/conf/conformer.yaml sed -i "s#data/#test_tipc/conformer/benchmark_train/data/#g" ${curPath}/conformer/benchmark_train/conf/tuning/decode.yaml sed -i "s#data/#test_tipc/conformer/benchmark_train/data/#g" ${curPath}/conformer/benchmark_train/conf/preprocess.yaml - fi if [ ${model_name} == "pwgan" ]; then @@ -73,4 +77,4 @@ if [ ${MODE} = "benchmark_train" ];then python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/test/raw/metadata.jsonl --dumpdir=dump/test/norm --stats=dump/train/feats_stats.npy fi -fi \ No newline at end of file +fi From cea5728dd7c0be96578a13eb8ea3ddbe7665fc25 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 08:01:15 +0000 Subject: [PATCH 12/39] fix unit test --- tests/unit/asr/deepspeech2_online_model_test.py | 3 ++- tests/unit/asr/deepspeech2_online_model_test.sh | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 tests/unit/asr/deepspeech2_online_model_test.sh diff --git a/tests/unit/asr/deepspeech2_online_model_test.py b/tests/unit/asr/deepspeech2_online_model_test.py index 3d634945..d26e5b15 100644 --- a/tests/unit/asr/deepspeech2_online_model_test.py +++ b/tests/unit/asr/deepspeech2_online_model_test.py @@ -191,7 +191,8 @@ class TestDeepSpeech2StaticModelOnline(unittest.TestCase): def setUp(self): export_prefix = "exp/deepspeech2_online/checkpoints/test_export" - os.makedirs( os.path.dirname(export_prefix), mode=0o755) + if not os.path.exists(os.path.dirname(export_prefix)): + os.makedirs(os.path.dirname(export_prefix), mode=0o755) infer_model = DeepSpeech2InferModelOnline( feat_size=161, dict_size=4233, diff --git a/tests/unit/asr/deepspeech2_online_model_test.sh b/tests/unit/asr/deepspeech2_online_model_test.sh new file mode 100644 index 00000000..cd5a2d3a --- /dev/null +++ b/tests/unit/asr/deepspeech2_online_model_test.sh @@ -0,0 +1,3 @@ +mkdir -p test_data +wget -P test_data https://paddlespeech.bj.bcebos.com/datasets/unit_test/asr/static_ds2online_inputs.pickle +python deepspeech2_online_model_test.py From f2eb5db0bdc7fdc350993478797c5bfc3862b5e1 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 08:49:34 +0000 Subject: [PATCH 13/39] rename config files --- .../conformer/{train_benchmark.txt => train_infer_python.txt} | 0 .../pwgan/{train_benchmark.txt => train_infer_python.txt} | 0 tests/test_tipc/prepare.sh | 2 +- 3 files changed, 1 insertion(+), 1 deletion(-) rename tests/test_tipc/configs/conformer/{train_benchmark.txt => train_infer_python.txt} (100%) rename tests/test_tipc/configs/pwgan/{train_benchmark.txt => train_infer_python.txt} (100%) diff --git a/tests/test_tipc/configs/conformer/train_benchmark.txt b/tests/test_tipc/configs/conformer/train_infer_python.txt similarity index 100% rename from tests/test_tipc/configs/conformer/train_benchmark.txt rename to tests/test_tipc/configs/conformer/train_infer_python.txt diff --git a/tests/test_tipc/configs/pwgan/train_benchmark.txt b/tests/test_tipc/configs/pwgan/train_infer_python.txt similarity index 100% rename from tests/test_tipc/configs/pwgan/train_benchmark.txt rename to tests/test_tipc/configs/pwgan/train_infer_python.txt diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index f10cb06c..b46b2032 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -32,7 +32,7 @@ if [ ${MODE} = "benchmark_train" ];then cd - if [ ${model_name} == "conformer" ]; then # set the URL for aishell_tiny dataset - URL='None' + URL=${conformer_data_URL:-"None"} echo "URL:"${URL} if [ ${URL} == 'None' ];then echo "please contact author to get the URL.\n" From f8375764b998af28a31983c76db11e2434ca6aae Mon Sep 17 00:00:00 2001 From: lym0302 Date: Fri, 25 Feb 2022 19:21:18 +0800 Subject: [PATCH 14/39] add paddlespeech stats, test=doc --- paddlespeech/cli/__init__.py | 1 + paddlespeech/cli/stats/__init__.py | 14 +++ paddlespeech/cli/stats/infer.py | 145 +++++++++++++++++++++++++++++ setup.py | 1 + 4 files changed, 161 insertions(+) create mode 100644 paddlespeech/cli/stats/__init__.py create mode 100644 paddlespeech/cli/stats/infer.py diff --git a/paddlespeech/cli/__init__.py b/paddlespeech/cli/__init__.py index cecf76fe..12ff9919 100644 --- a/paddlespeech/cli/__init__.py +++ b/paddlespeech/cli/__init__.py @@ -20,5 +20,6 @@ from .cls import CLSExecutor from .st import STExecutor from .text import TextExecutor from .tts import TTSExecutor +from .stats import StatsExecutor _locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8']) diff --git a/paddlespeech/cli/stats/__init__.py b/paddlespeech/cli/stats/__init__.py new file mode 100644 index 00000000..9fe6c4ab --- /dev/null +++ b/paddlespeech/cli/stats/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .infer import StatsExecutor diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py new file mode 100644 index 00000000..c50fc4f9 --- /dev/null +++ b/paddlespeech/cli/stats/infer.py @@ -0,0 +1,145 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +from typing import List + +from prettytable import PrettyTable + +from ..log import logger +from ..utils import cli_register +from ..utils import stats_wrapper + +__all__ = ['StatsExecutor'] + +model_name_format = { + 'asr': 'Model-Language-Sample Rate', + 'cls': 'Model-Sample Rate', + 'st': 'Model-Source language-Target language', + 'text': 'Model-Task-Sample Rate', + 'tts': 'Model-Language' +} + + +@cli_register(name='paddlespeech.stats', description='Text infer command.') +class StatsExecutor(): + def __init__(self): + super(StatsExecutor, self).__init__() + + self.parser = argparse.ArgumentParser( + prog='paddlespeech.stats', add_help=True) + self.parser.add_argument( + '--task', + type=str, + default='asr', + choices=['asr', 'cls', 'st', 'text', 'tts'], + help='Choose speech task.', + required=True) + self.task_choices = ['asr', 'cls', 'st', 'text', 'tts'] + + def show_support_models(self, pretrained_models: dict): + fields = model_name_format[self.task].split("-") + table = PrettyTable(fields) + for key in pretrained_models: + table.add_row(key.split("-")) + print(table) + + def execute(self, argv: List[str]) -> bool: + """ + Command line entry. + """ + parser_args = self.parser.parse_args(argv) + self.task = parser_args.task + if self.task not in self.task_choices: + logger.error( + "Please input correct speech task, choices = ['asr', 'cls', 'st', 'text', 'tts']" + ) + return False + + if self.task == 'asr': + try: + from ..asr.infer import pretrained_models + logger.info( + "Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + # TODO show pretrained static model + return True + except BaseException: + logger.error("Failed to get the list of ASR pretrained models.") + return False + + elif self.task == 'cls': + try: + from ..cls.infer import pretrained_models + logger.info( + "Here is the list of CLS pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + return True + except BaseException: + logger.error("Failed to get the list of CLS pretrained models.") + return False + + elif self.task == 'st': + try: + from ..st.infer import pretrained_models + logger.info( + "Here is the list of ST pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + return True + except BaseException: + logger.error("Failed to get the list of ST pretrained models.") + return False + + elif self.task == 'text': + try: + from ..text.infer import pretrained_models + logger.info( + "Here is the list of TEXT pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + return True + except BaseException: + logger.error( + "Failed to get the list of TEXT pretrained models.") + return False + + elif self.task == 'tts': + try: + from ..tts.infer import pretrained_models + logger.info( + "Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + # TODO show pretrained static model + return True + except BaseException: + logger.error("Failed to get the list of TTS pretrained models.") + return False + + @stats_wrapper + def __call__( + self, + task: str=None, ): + """ + Python API to call an executor. + """ + if task not in ['asr', 'cls', 'st', 'text', 'tts']: + print( + "Please input correct speech task, choices = ['asr', 'cls', 'st', 'text', 'tts']" + ) + res = "" + + return res diff --git a/setup.py b/setup.py index 9bb11d0d..ca19a575 100644 --- a/setup.py +++ b/setup.py @@ -66,6 +66,7 @@ requirements = { # fastapi server "fastapi", "uvicorn", + "prettytable" ], "develop": [ "ConfigArgParse", From 50bcb581410760f6f107ece977eca76a1f62f350 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 11:40:10 +0000 Subject: [PATCH 15/39] add ctc_loss speed compare topic, test=doc --- docs/topic/ctc/ctc_loss_speed_compare.ipynb | 356 ++++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100644 docs/topic/ctc/ctc_loss_speed_compare.ipynb diff --git a/docs/topic/ctc/ctc_loss_speed_compare.ipynb b/docs/topic/ctc/ctc_loss_speed_compare.ipynb new file mode 100644 index 00000000..0682247f --- /dev/null +++ b/docs/topic/ctc/ctc_loss_speed_compare.ipynb @@ -0,0 +1,356 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a1e738e0", + "metadata": {}, + "source": [ + "## 获取测试的 logit 数据" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "29d3368b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hlens.npy\n", + "logits.npy\n", + "ys_lens.npy\n", + "ys_pad.npy\n" + ] + } + ], + "source": [ + "!mkdir -p ./test_data\n", + "!test -f ./test_data/ctc_loss_compare_data.tgz || wget -P ./test_data https://paddlespeech.bj.bcebos.com/datasets/unit_test/asr/ctc_loss_compare_data.tgz\n", + "!tar xzvf test_data/ctc_loss_compare_data.tgz -C ./test_data\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "240caf1d", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import numpy as np\n", + "import time\n", + "\n", + "data_dir=\"./test_data\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "91bad949", + "metadata": {}, + "outputs": [], + "source": [ + "logits_np = np.load(os.path.join(data_dir, \"logits.npy\"))\n", + "ys_pad_np = np.load(os.path.join(data_dir, \"ys_pad.npy\"))\n", + "hlens_np = np.load(os.path.join(data_dir, \"hlens.npy\"))\n", + "ys_lens_np = np.load(os.path.join(data_dir, \"ys_lens.npy\"))" + ] + }, + { + "cell_type": "markdown", + "id": "4cef2f15", + "metadata": {}, + "source": [ + "## 使用 torch 的 ctc loss" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "90612004", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'1.10.1+cu102'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import torch\n", + "torch.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "00799f97", + "metadata": {}, + "outputs": [], + "source": [ + "def torch_ctc_loss(use_cpu):\n", + " if use_cpu:\n", + " device = torch.device(\"cpu\")\n", + " else:\n", + " device = torch.device(\"cuda\")\n", + "\n", + " reduction_type = \"sum\" \n", + "\n", + " ctc_loss = torch.nn.CTCLoss(reduction=reduction_type)\n", + "\n", + " ys_hat = torch.tensor(logits_np, device = device)\n", + " ys_pad = torch.tensor(ys_pad_np, device = device)\n", + " hlens = torch.tensor(hlens_np, device = device)\n", + " ys_lens = torch.tensor(ys_lens_np, device = device)\n", + "\n", + " ys_hat = ys_hat.transpose(0, 1)\n", + " \n", + " # 开始计算时间\n", + " start_time = time.time()\n", + " ys_hat = ys_hat.log_softmax(2)\n", + " loss = ctc_loss(ys_hat, ys_pad, hlens, ys_lens)\n", + " end_time = time.time()\n", + " \n", + " loss = loss / ys_hat.size(1)\n", + " return end_time - start_time, loss.item()" + ] + }, + { + "cell_type": "markdown", + "id": "ba47b5a4", + "metadata": {}, + "source": [ + "## 使用 paddle 的 ctc loss" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "6882a06e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'2.2.2'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import paddle\n", + "paddle.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3cfa3b7c", + "metadata": {}, + "outputs": [], + "source": [ + "def paddle_ctc_loss(use_cpu): \n", + " import paddle.nn as pn\n", + " if use_cpu:\n", + " device = \"cpu\"\n", + " else:\n", + " device = \"gpu\"\n", + "\n", + " paddle.set_device(device)\n", + "\n", + " logits = paddle.to_tensor(logits_np)\n", + " ys_pad = paddle.to_tensor(ys_pad_np,dtype='int32')\n", + " hlens = paddle.to_tensor(hlens_np, dtype='int64')\n", + " ys_lens = paddle.to_tensor(ys_lens_np, dtype='int64')\n", + "\n", + " logits = logits.transpose([1,0,2])\n", + "\n", + " ctc_loss = pn.CTCLoss(reduction='sum')\n", + " # 开始计算时间\n", + " start_time = time.time()\n", + " pn_loss = ctc_loss(logits, ys_pad, hlens, ys_lens)\n", + " end_time = time.time()\n", + " \n", + " pn_loss = pn_loss / logits.shape[1]\n", + " return end_time - start_time, pn_loss.item()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "40413ef9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU, iteration 10\n", + "torch_ctc_loss 159.17137145996094\n", + "paddle_ctc_loss 159.16574096679688\n", + "paddle average time 1.718252992630005\n", + "torch average time 0.17536230087280275\n", + "paddle time / torch time (cpu) 9.798303193320452\n", + "\n", + "GPU, iteration 10\n", + "torch_ctc_loss 159.172119140625\n", + "paddle_ctc_loss 159.17205810546875\n", + "paddle average time 0.018606925010681154\n", + "torch average time 0.0026710033416748047\n", + "paddle time / torch time (gpu) 6.966267963938231\n" + ] + } + ], + "source": [ + "# 使用 CPU\n", + "\n", + "iteration = 10\n", + "use_cpu = True\n", + "torch_total_time = 0\n", + "paddle_total_time = 0\n", + "for _ in range(iteration):\n", + " cost_time, torch_loss = torch_ctc_loss(use_cpu)\n", + " torch_total_time += cost_time\n", + "for _ in range(iteration):\n", + " cost_time, paddle_loss = paddle_ctc_loss(use_cpu)\n", + " paddle_total_time += cost_time\n", + "print (\"CPU, iteration\", iteration)\n", + "print (\"torch_ctc_loss\", torch_loss)\n", + "print (\"paddle_ctc_loss\", paddle_loss)\n", + "print (\"paddle average time\", paddle_total_time / iteration)\n", + "print (\"torch average time\", torch_total_time / iteration)\n", + "print (\"paddle time / torch time (cpu)\" , paddle_total_time/ torch_total_time)\n", + "\n", + "print (\"\")\n", + "\n", + "# 使用 GPU\n", + "\n", + "use_cpu = False\n", + "torch_total_time = 0\n", + "paddle_total_time = 0\n", + "for _ in range(iteration):\n", + " cost_time, torch_loss = torch_ctc_loss(use_cpu)\n", + " torch_total_time += cost_time\n", + "for _ in range(iteration):\n", + " cost_time, paddle_loss = paddle_ctc_loss(use_cpu)\n", + " paddle_total_time += cost_time\n", + "print (\"GPU, iteration\", iteration)\n", + "print (\"torch_ctc_loss\", torch_loss)\n", + "print (\"paddle_ctc_loss\", paddle_loss)\n", + "print (\"paddle average time\", paddle_total_time / iteration)\n", + "print (\"torch average time\", torch_total_time / iteration)\n", + "print (\"paddle time / torch time (gpu)\" , paddle_total_time/ torch_total_time)" + ] + }, + { + "cell_type": "markdown", + "id": "7cdf8697", + "metadata": {}, + "source": [ + "## 其他: 使用 PaddleSpeech 中的 ctcloss 查一下loss值" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "73fad81d", + "metadata": {}, + "outputs": [], + "source": [ + "logits_np = np.load(os.path.join(data_dir, \"logits.npy\"))\n", + "ys_pad_np = np.load(os.path.join(data_dir, \"ys_pad.npy\"))\n", + "hlens_np = np.load(os.path.join(data_dir, \"hlens.npy\"))\n", + "ys_lens_np = np.load(os.path.join(data_dir, \"ys_lens.npy\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "2b41e45d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2022-02-25 11:34:34.143 | INFO | paddlespeech.s2t.modules.loss:__init__:41 - CTCLoss Loss reduction: sum, div-bs: True\n", + "2022-02-25 11:34:34.143 | INFO | paddlespeech.s2t.modules.loss:__init__:42 - CTCLoss Grad Norm Type: instance\n", + "2022-02-25 11:34:34.144 | INFO | paddlespeech.s2t.modules.loss:__init__:73 - CTCLoss() kwargs:{'norm_by_times': True}, not support: {'norm_by_batchsize': False, 'norm_by_total_logits_len': False}\n", + "loss 159.17205810546875\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/miniconda3/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py:253: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float32, but right dtype is paddle.int32, the right dtype will convert to paddle.float32\n", + " format(lhs_dtype, rhs_dtype, lhs_dtype))\n" + ] + } + ], + "source": [ + "use_cpu = False\n", + "\n", + "from paddlespeech.s2t.modules.loss import CTCLoss\n", + "\n", + "if use_cpu:\n", + " device = \"cpu\"\n", + "else:\n", + " device = \"gpu\"\n", + "\n", + "paddle.set_device(device)\n", + "\n", + "blank_id=0\n", + "reduction_type='sum'\n", + "batch_average= True\n", + "grad_norm_type='instance'\n", + "\n", + "criterion = CTCLoss(\n", + " blank=blank_id,\n", + " reduction=reduction_type,\n", + " batch_average=batch_average,\n", + " grad_norm_type=grad_norm_type)\n", + "\n", + "logits = paddle.to_tensor(logits_np)\n", + "ys_pad = paddle.to_tensor(ys_pad_np,dtype='int32')\n", + "hlens = paddle.to_tensor(hlens_np, dtype='int64')\n", + "ys_lens = paddle.to_tensor(ys_lens_np, dtype='int64')\n", + "\n", + "pn_ctc_loss = criterion(logits, ys_pad, hlens, ys_lens)\n", + "print(\"loss\", pn_ctc_loss.item())\n", + " " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 35357e775e74ca94bbcb0aefc6ffa15a33875c05 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Fri, 25 Feb 2022 19:46:44 +0800 Subject: [PATCH 16/39] update, test=doc --- paddlespeech/cli/stats/infer.py | 22 +++++++++++++++++++--- setup.py | 2 +- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py index c50fc4f9..7e6df3d2 100644 --- a/paddlespeech/cli/stats/infer.py +++ b/paddlespeech/cli/stats/infer.py @@ -31,7 +31,9 @@ model_name_format = { } -@cli_register(name='paddlespeech.stats', description='Text infer command.') +@cli_register( + name='paddlespeech.stats', + description='Get speech tasks support models list.') class StatsExecutor(): def __init__(self): super(StatsExecutor, self).__init__() @@ -73,7 +75,14 @@ class StatsExecutor(): "Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API" ) self.show_support_models(pretrained_models) - # TODO show pretrained static model + + # show ASR static pretrained model + from paddlespeech.server.engine.asr.paddleinference.asr_engine import pretrained_models + logger.info( + "Here is the list of ASR static pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + return True except BaseException: logger.error("Failed to get the list of ASR pretrained models.") @@ -123,7 +132,14 @@ class StatsExecutor(): "Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API" ) self.show_support_models(pretrained_models) - # TODO show pretrained static model + + # show TTS static pretrained model + from paddlespeech.server.engine.tts.paddleinference.tts_engine import pretrained_models + logger.info( + "Here is the list of TTS static pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + return True except BaseException: logger.error("Failed to get the list of TTS pretrained models.") diff --git a/setup.py b/setup.py index 0823cc38..d7bd9682 100644 --- a/setup.py +++ b/setup.py @@ -62,13 +62,13 @@ base = [ "visualdl", "webrtcvad", "yacs~=0.1.8", + "prettytable", ] server = [ "fastapi", "uvicorn", "pattern_singleton", - "prettytable", ] requirements = { From d60813a9e79af3f9e1bc474a56f3a8a08159921a Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 25 Feb 2022 11:47:15 +0000 Subject: [PATCH 17/39] remove pickle data --- tests/unit/asr/deepspeech2_online_model_test.sh | 4 ++-- .../test_data/static_ds2online_inputs.pickle | Bin 45895 -> 0 bytes 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 tests/unit/asr/test_data/static_ds2online_inputs.pickle diff --git a/tests/unit/asr/deepspeech2_online_model_test.sh b/tests/unit/asr/deepspeech2_online_model_test.sh index cd5a2d3a..629238fd 100644 --- a/tests/unit/asr/deepspeech2_online_model_test.sh +++ b/tests/unit/asr/deepspeech2_online_model_test.sh @@ -1,3 +1,3 @@ -mkdir -p test_data -wget -P test_data https://paddlespeech.bj.bcebos.com/datasets/unit_test/asr/static_ds2online_inputs.pickle +mkdir -p ./test_data +wget -P ./test_data https://paddlespeech.bj.bcebos.com/datasets/unit_test/asr/static_ds2online_inputs.pickle python deepspeech2_online_model_test.py diff --git a/tests/unit/asr/test_data/static_ds2online_inputs.pickle b/tests/unit/asr/test_data/static_ds2online_inputs.pickle deleted file mode 100644 index 8aca0543ad69f8bb64b77664375bbd281a3747c6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45895 zcmZU)XH?cq)HX_$4uTW`=?E%<6@fdMiGqTH6+xs3Vnt986(744>AeU_5ky3a6_Ii$ zGaFFsA{NAo4HPWcuq%GN?|06Rvre*Bek8MIuVf~BU;EnEj+EXVE-_}Ql7xgrV8p6* zVF4?HB0@vLC0B+)ixU<)^AxCxM@@1cEx~At5=4FZr-vfV&xXaaH;=0 zN-=cR|9u)R?IsZ$F5@J*B3w2;e3*N<+!!~>p;ON7aD2G@|1)^imhBr?D~2m9l^U|S zW@@-%VnTell6$!F7&qyql0)~{aFzM<=ezzJI{x?KCb1=4bw#3Mp}K^`HgLw>_x(`) zgXeU&@p^c7q#Q>5OoHnpPlN94Jm?RtfzS6UK!~e^lJrRE&EF2tH5+dJodqx~8lIW$ zgtJbr@Z#eZNHEKU&n=R$cO(mkKO_S7(g%+>cd4e6F)VIw0qd+`VBXzCoJlVo0IvOrT%mGO} zNGG4cVBT5&TgYiLt3Hr>s#89-$cU~ z{U)z=9wJ>&{?W&?OW?}$Bp6%P2BW9$gF&-E_<5zAZa=L=b?vs2B|+cN4&z|_s;-pd z)OT@TPKMBm{e5_ymMx|$>ZwF|5iEA!2?4f=;1P2kavINrka84mkO(MF_W-#l574sN z0nb{OLzeD%m|0o??!N87uPua#tHp3w?+`3^iUK@s8yI=dhDBdRFv0aL?9I@Cchb6a zYa{kNVclUN^O?H9DGCN?@`aJlPb_{l(dQCSbTEO|B zqu@8$9;#m)rD|Vt>C4`!u+=&f5}#Q>zKb5b(F=ofCo@6f&mvG>)&hT`ep367s_^tt zD&&sCknvC#8aEoiQ|o#-)#wY80xIC>-lGsaZ7=wq+X50Xxvi8rs!4RpHAUoxGD^{o(Z2{Yrx^eL0YG;5A%a9;Ewh_NIn(|@*8d8X@3Fi z_1_N`J&_>3Hyh6Paj*kB!;Y0Cy!~K76P*l1;WNDbD$(| zXztTRkhmch9-W*I7YpQ?c4X&5omDKn<0gWmxe>&fg~Gb62jQ5LUX%Iw^{_)?Cn)?2 zgqgomU{3o1$go=jih(&$@-P4#AG$%w$RfCM*ASvtU!)UF9YYDkx(j3gvDlm=!o(KaQ%-17|Kq76Hgdut|v6} z?I3+}Y9koC7=Y2Z?=W{j!7CcwKZlz?QmacwYWs%h&nj9ECh4w z@n9l6reJIeZP(mEZ*mOnFgi$+W!8Xi#4u3VYXn!`uY-BqZ~AO%0KE8>3g?U$!}Af( zsi$y_PWT`X;~me_`xaJE%1eX2&m9Uw-*9HQEetOsJA=(h&nnu!6}! z&d|3`2OccCLr)Z$!mOwyP|=Zv7V}|{cR--yD*NbWm%R|SNd=PUzM=;kEugU=7u3|_ zpr+;_HJtpCt}7D3zmQT;8}i|rp+||2b{baLjJc?qC+Ox?d+Z@$P4ZJt(k|WuY{o`F zuxc)ZN>+g8nM|n8FNH-1=04iq5!u_SgVTa;S+*x}7 zREiS;PE+tX9|E$@KWUNvEEqN~1iJ3rr=v^_V5F@zoV@XZX8PoSZ0BTP_V>~LR8{C) zG7nA;*21X+^WjEGAjr0Egv61Gur0&|TzFZS{I-Z%-_?hi$J41)%4r(@NDrcFvH|wq z2hoRAsLQd2mI`fnkednr#ms><8f!pymL)7wHiCf2bJVJPG^8|GLC@O^SYeq5y~GB> zZ^zJ}r#_IA{)^T&UZ7nwl|Z{CnQD(QfcKx{;igO`j2N5;-HM}Nhm!}G$(V!P=VI8X zDuP7K1E6NB4IXWi;n}4mNHEz6(HYLLRJD;lzp4od?i$cp=tiS*b7!ex1SEaa zAnxEdx+?ND`SqPr@mVXlzBQHZ9cv7hbJA#woEMFh3#Z#=H?gxyi|DC09rV)*7UU#8 z!%1fhK1M)YRz>}V8nx?u2 zRF_#pg1r*tE{=h3=iXD@S8M6*_HXoi!X&tLp%x6U#gbbtQ@OILK+OkY#ES& z0EqKH4BsjX!SvS^2uO~Bt*2eUY~eY0`=kb*-`fxN`az#RGJ&l#zf)ez96ByJfO!0J=+}HlTTWb}HpOco zc2*mB-#!hyl@@~1LPeOiLlHE@E8)$cIs8ybhE2_l@N!oa)Vuh=+lLQfcGUoV+H)G( z79bGZOTpQH0tCK@1&@tk&^*ruG%Bj;<(>}|a09Ge#=_mzj?lCFCY&_72e5WKjC0ry z;_mU#|93OoV$wh;+XV`<^}zj68FW7y4GCIn;iLOyy8ci*?BXW^<+nn)Rv}y&R|mq8 z5D4171}+)9fla?Ld>t1FN#=#{)@dUA^pF8$;|T|Z<-qpHfo6dW_~a~v_h$}4r9A`R zKOF_@hIkn1nFWW#G9Xl10+i_%YU6JKqf4z|)Q5-ko_z|e_!JAG2rKyXB?;16&Qd8e zFKT3|1r}qMfP7dq%()o}odXVFE+v8^-jU$3XE{`@{Q=o2LvgBRkeZHbqAqf+U>$UY z>R1fWhg!F(zxjA@v2}w~#K5QS4Ul>!1lk;cb{`6-r{>vF`#-Lf^Ok|#UJfu=V@J2$ zBM_THAt_CkdLER4*E!BG=I3fCGE|4Xn-_rYv1%we76%`z;y@Ih20x6B!F#_3_+We< z&U$Fc4o0Nqg7O`SrjGteP@Xmml+GrDmE2Y^Ogsyr z&Viu+a0f`dTL-pdO5x*^gYfj}G}x|V2(IPEprf7)@?*z=lGq(w+Kxhaa4oFZ5eQ4a zg+lR|A)nAA3vWs@VR73~>^sZAT3;{7w{ig`#tEd33}KPL9dL0}Jw)h+LjQ#v*x;NB z8-E=D>G-QO%;+aovvq`#WyK)na}4CB-hl@J4bZKd4D&}-!nf2Qh zyxzvaDgXcA^V}Gi^4<%?Z3ghCeFQ{=h~R#o9(1jY0jp1YK<`!>`2D#6!Mm0~eXkSD zj8lTHDkCsS+CGFq{9%#*Z758)h2~>-shhSxXdB1DPkt`QoiPLN`uT9E{5UB7X9^=l zN5MWs3wHj|2HlZ2>81n?IQZBYZficG=gN%Xf+!w*FRTT-owK0zT`0IHuYq-6wBb@$ z6l~6)3=7Vr&`|AJkU6S>4&>ON%=892R`L>@_%T9YARmh6+Jk)E78pD8Aw)T3L)GdO z0Js0Z{cbX3s{yQ-z8wC5ENuKT3%*D#hSo#Y&^saw?heI}U#U|;%5)NJE@wdg#Rb~a zzn$teU#2YIO?6EqVcdW=d>rlp-^LBC^N|YbK2e@(L_ep?$4rMe{u(g2P#R`~E$r;v z1kCS?@O{jGkoa*D;K)b_D5!!5KSIE?XA!LQUj{XF8pGdcc_`d$0xR8LfufT-*hloxf5Z{oq>q4S*mpWx;UIOKzmQs%*V7Ea z7gR45!X(vrSYJ5{R+!I+=Ug_7w3!HZc{h0Do(ek7N5N~$A-H1O1*;N{gG+Y_WM`Mc zijbqwe6SSmCGLm7(n#=|@E=t6?1AdN_o;4xDJ)$R1uaL5z{@BN0*5$(w-J|UYl{~& zb{fLf;r|Wg7x}QrDGjog--Db5JiJ=70YZoN?uDFSID%{FyL+BgJN6ZIm68Cj8eNc? z^pfsptl(D7E}%!&LtsuUjN6n1!+uA=z?(L3ahn0N5Bq?c-CL@cZ2{L#*uep{{czs- zGO*nNuxwWWygN7>f)C_^^OlRS;;<_mNvebCcN$>J#N9CQ=q~6DJ`K&^z2U*y1Za)B z2^;ntf}n>C?4B$SbBki&=iqQSqUC|qPMxB2ji*ztm*xdpAHdx(n9 zai&y(7be6yQbxAC@t6E=LKKbgo70noCC<65Yqvhib6U=PXv`FvGlIqWSFQ2R*^98A zVjr(omvax)AZ@7CN4hLbE(f@!A+Gtn{&$#T(M7|1VAc#(%*m^Qb(r{eUbR?SuZkn znh{MjJp)?|o9fqsLPaIT{))zCOIVv2@AACnV_svDplQhwk6TYm9t$`4jGk}gv^ALU- z??KUTw~(~UU2NW3cf6x$Jia~E4hhGHp$n6jis!zM5$_LDgeBYmk>*6zm?+f!?+GvoLnXF-g0!G=0ox6q&PGoEQ7|6KaVU%e`Do|5`C=R zL0;edi#_V4h=KQP;@fn<{8iQ19u>|W zRZ*imLiUhY(M{oUiCD}|i)U@8rwc(@n&^{}4W9e$8Oo9{A^xksB0aevT7#na6)D>I zw|gWpnluc%l?)^K8iD+ZVPU+|U^AN#Hj0Fo8gZj!3VWy_8mF}^!8dKB zP-b2>nmX|&nw56|U6}G&*r6tc)78yL^VEDqulb=nRo>L@Ey8!bI)pI!vCM&v#mL+8 z2zeCDkd>d0kgINixq`gieUn}#~> zYyT^98LvqPRi_D}wh~f)wv?1>6r-Z$%kd-CF8YMsNnW@xLfWGce)i}BviC(3FQ^!x zSnbQgt@MY&Zm(hZ*oRs+*Y`Iv(+w7OX_lg3vIQl*jS$i_ZjiN?Q^+n`4IDh5L3240N+w^Bb7I!$yld-IM~ck6!A@(K0Vpzpmxduqfd!cY0E48VZ0Sv zWTk)?=2(#p{-=eT)@4M}eK)Dze7Ujv>}%%p;0C1CKaJ6RdV}roX<$vHPGDolQTVl5 zIe|sp?B%(o>|?uL@tc{SX|MSQR55Up4*wa3qGS)!#*Kw+_i}YCx2>5S*fUK~8`Pn) zleN%~U1`i}&*AWDxIjF^?g;;^PqNw72}u5q1{?572kZIiA>TQL#60?q*gHax_^F)} zwp8sz8n+UV%(K7z3ggeL!n31n&GRE@!u0{Ba#IS`G&n^Ry{+jg?0`Ezl?ZhMvDjR? z8OvO65)Aw#VeL+_kTbsn{Rqxvd)89+iLV;Y%lv|T2jg*ep*%$~MnpEbjXBP13NcQh z4)X7`gwFhI(V-?EykTz>YUp-HpU0}8^4A<$+N3Cm6~r@^Gr|7x7_wxStuS|t1Y4DPkk+iLXTPQvphk}eAmJzEks#hG!&-p3P2XzCGw*ntPWB0S(gJ)gLw_E9W6`tyv|6F32Fu z;#GuCO6s_{rVC|{tid5p0p#wsYpk39ea1cDfr8DC^)DJ zX^!>83Yj;U%NZ#ss!N_s_5Z&cV(ID_`u46DJ6+-gO+8mZg6CE9u@Al2pw{*H=XOcFp|cUSBz3S8%D>VVNj{|X ztU0Z*F=vb8w@{JkNhHwctpDI~tWk2BeXU^#mF*_v&vI)tV_+%j89&I5sNI3eHrBG0 z@7AJImqMxD^P9rij%fVnt0AtAiosW=KOshE?eG`_Niw+BiNE5ZjF0#4BQGMV$ne5q ziJs-wRP)uRRn#|>4PCa_G{jacx zt7mndOuzmO3helDag^SlT4mmDqy4ixhHNZh?+m z7x>?8f$07wz^j~P>ArVUM8az=Y_+%sU2C0>?1SVH+?YrY7`L|1tmE!XS{$;K519N`+`L~BR|lo@r$WymTg4^l;Xo7K zJt_|UYWgPBzJG_jbJgj{^YSD#_K0ZqMPu%KpfxFLNkQeq?viMUFXWy}8cldx=y7m19= zP;;Y4%=+6yn9VW~DIC2ZUTifFZCW8_rg!DzsKvinIm1$N>8%IJ(b>d*DL9Gr&tJmN zGb35$?^&p7VKJL9?irc0X9*7LOhJ;$-Dqb|J#w`?g|0437m@doRJ!Lsl3bfY3M(Yp zktLS}<1i`wK2*S3Kb&Z;#C`(achQ-TGS4%R_m5<9 zJ9lHl3MC6xW3MZXEF6aAHYW=T71{L5-TlNjFpJf#=P5GF5N)xH6~)kQR5#w5^cye2 zOl>rM+uA_=rgb2RWrxV$^l`W;xR2c?AxZzMxkgH3BM5z=fW|-B%x`TlQc9+@4;g%e=huDoCJDY9#(4CnnLm3?*E0o@$o!B$(u^>_U6?%%j7M zhrbWXsUC_WD_;m_536(WzQ_17hhO+jaWxvgauI!1cm&l-9vaz8z)QsV3KkbiNSL*YJM+) z%D-u0lb@aFX|oDK(fPu?OeJJGgk!o2fAilRSn}(&EO;!*VGS~Eg#+QSLPlK_S`#Qs zsrLdl<}ip;B^C%ZCP(q{ksj>2GxoU1@(ht5oy+grx&T%7XbP)-7t%vtKBKa==E8UV zedNpdiR>?T1=4p2p={;NXxC$T^3O;DwtEK*weYh8vSGT8k_IL~4-)e~a;%auuia5q)-y;5y?Y|+SjP0ZFq zDyYzg6~4}p;=0yP6Ee;k;^TgiBx^Js53MPDz*q7Wq+dhe4tnfC85>8Ytlua$licLycB{LCKa_Z@9kclJ;A zX!R`Ia7&8SuMyDMkzF5UL3$NG~!(V(8 zOJw6V^3$chppGm7PetSSg=H>e-kfStQ2Q34xm=nXd`9rVtbL+_-h83wkQPc!I886^ zj%9;d{xRov0lVgOHTrs9o7-L^VXM*=gMYM51Wu7rajcC;xcJ*EV28xT+B$F{z4b7 zeo2;w^3=F~5>@&lL9 zq(e*5y|LDC|C$l0EK3z$UXaX5E98E=|+k+5Fs*l>~^*QPujs||do(`6E{=ae8p zoFa`XT(X&#NzVl11sthguatxi!h4Z}@UU~#$A#U-5=uo8}6$9_21 zte!plA`gGVYjO8oz$OO@gueqWOw(K-p&xHyRXm-%DjdUm74O02a0P!%Z(!_{X9&{6 z`%&HKx#E!vQplskZZv&wwji3??yzfO2LH(L5`LH%%x1hhk5U5=8#Ypn{Ulc)Ufhha zX@oBE?F|&2dA^U0w>ZU5QIrwxN2X6xU-28)noh^Hi6(T5Zyb$>QY=^EgC2B^W&O7m z2xm8EGP(sDgohov@F$7o_U%_dpM7>v*IN~M^aX49ZsUpShkX>Ce9Air^{uSF@+SQe+=hd`L=t7C+4#q!sYK__GThc-$kmokLaw;g;iYFNbM47du|aSp+i^e% zZ@Hzz7N3!bAG4Ygh z?d;@`S7LU=IT}V<@RqW1WYO8T!phVrv142g9T$F+juspsPK-F$f>>IiX@b^d&ESTk z7D3)i0!rl<3Hvra!!EUd@Tcd?_%@Gx>@7bEGZoj^@!rYI7|C#I!=5FlX9*6jrzE*; zY9nc7j0q|z_Q5-+nb0c=%H-+lk!Y3eC48VMj$b7w&w4bUM%Nb&TFkq9(KFpK~lXWx#T+zq%c2`-p+f2kKf&hG+a$Er!tkzdNvJi zI*bL0CJUy+cP7q=yXNrCELw=o>R_yMqJ>SDr&6<7pUC3sY%FzVElTx$PF5Osu$`rH zToBYV%1CN?1fUI;F!r;FXe@ED!FL>`e>1vPKvO!;wbaSzrS%#XN9moZvneR zYX3xss z;1Fxit~}aFJ}3pURT8%7j~-9o#1G)t$&c|G`G@GVMk-zfs3o-xB7`*Yh}vFgLUc6UzZelOh^3`En zBBJfe&gN0q)=4mBNUc%9AI1K5-|)zbNsI|rM0cW&Q`bvX%&)u8ggrT)^wsy1C@d0$ z7hY@F>JCe0Sx6Ed5j`KZsaax46i3zd89G5fR=C_`EwbPFi=F#G4?fGRATxM<_EMw- z?X6Q6Cy&+zlLgb@USv1s)iUu&!%OVmAFo7jvWNIEK@q~eo=mfKN@6!$V$ji112pPc zJk^~!gyBJ2uwH+yu}(aQmARGt2Icp5Ws4HYpGiYHq?Kjlm+L5eA*=~CM&CodeR`xN zW(QgMbby@9lg59S?#EN|QmCC#1NE*=qgp8{;!_=xXi}Lu*}15WFJCqTHkvol>t(UR z-O+>zTCK+BPBb3UtQtUHW+$^KL`>eT?c_twOcuUhTrNyAPGfrljJUuB8TOwl{^DQt zQ^E7#I8@P`f>K(u*-GuDcBmm}hdChK_VPh(497hSJufX4qZ zX6mK^G5Mg)i414qn}aKn#}0S?du}+M;Jk<3;B-xpP0tXb%)aoCU=ymUzuV}^AiCq$ zN$SI9A&mut%!Rqb=#Ckc=zSAKiXqPYmLWe-t7(RI-Pc7+!}U<*?W>H>=~~1y!|~s2@ zUcz^>$@aG>*h+~xyE~Iz#Tb*92OhI>NgB|mlIGwdTZG(I%f+g44@i%}Yr1pHTxzjT zi-yi04}Tb4a`~bWU0>279C{dsyA}VTsZD0wicyyxyo6Kq{>9rgj>M+}ewn zxSSPsw613*!~?>vGu`CPmG^v4`9T4vs57?S@|^v?6YP&!f&4Q2;SPrf(uH`9w@g_3 zO5uiYK7I7an9sicRWO`^5i#*;n2&*l_knfu3C~ck(k)9SaquyGNo(B`<8{>l&&;NBSLwHm21s^Ubi#c2J9Nu2ti zH?28nO;5Z}LGLS1kPEL4@T)o=VAjGJY37ED-pEYBe%IUa><Ac+X}hXRe^I+JQ;2J@9-1aL#38=U zq*GOuCf!rP`RA1R_ML_J@ZlAxGY^s9vwt$my64vmNs2A9KuBgtuAKhAIPH!Gl zW+r9%HriJ<()XU#tkTsl?C&NCq!>4a`f1K4sT)_b4yI#Z|HyDWP;`xOP3P!vo6+zt z<%f`Ra5_<$rpDzI9~FG&?j%R=Sdsdf^Br{N^Gw~N%?`7wmht~OEFn;BopAKa*G9EI zEADTV9k5bK;dM_JfeqBT^NhP7b`((-vp>~ ze<_TJm?7HuYd$k)^BFuk=`&rX9mJk@{M1+y;CbXYVv-+_nHw=qFn)Q{56y zwP^7Z+Gk^|Hk)VTV%TSPE<$Tikx-;ImNS1Og-(R65E;F+CEuqD#Qe@#KKi>cx6yGv z?bUD)9;|u8R*aT|sf7!KPR#>o_l_|fy|R;!@fHz-%PePZBgJLfBW`NHC3NlaAonBp z@vZ$D;5|Q*?N7YHe(^?J-{2^CEV)&vT4e|(%u}4@`Gwi_XPYo~Y9TT^`w+hkiNfdK z>T?f{s&Kn+cMM4gEn#kQ8T;w&452Hh2RkcG;RfHfu(i+fg~?rNP_5_vVMhMAPt=Q{? z66^`6M6x~#+^P?H5Yw)J#67cllk8Nyr)?Mt%$69^7`X@r!IQX-t{67zWP!u?2-~54 zqHAo&uNd++l8P%k5?R~-CJgoATp>0om(Y<^H+KHBT6V<3om6VUbgm1@lEux9_=9x@ zT{JeIS8iL3szO{z)r%_()|o=0D=~~Q|4;lm#thf>E^iF&*W#>tH?a%4(**qsUTpYz zRXElX#NU(-cc|Z|#{HH0LzRF2!KZfbVLlw|q#Z6PxYU_2GUi6aHW>4_*IPnWpBve- zM^|XPTu=771n@D5a-wZ&6S%j9`rPZ+A84&;6RG@~j3<}vMRBuJQ2gR1c0L!y82soJ zi}PZM79c49%aG!BS@zV(|7d@Jhzr^El>OqKg1=NMv%6mF5tTDM-?&+aTRi=$O1Y4h z?fyqbyB82U)4TYpL@SLLR9>(Ke?A}06f6H%t=GIqMQD%a>$>~NVWK)Y4m z;gj1ZLag5y?m)2&1XpE|ZKzYU*S?eOf2hwLO1;6z_I;z@4m@f|oOc0Br%r}ak%A!j z)uXAKj6_jy7m(P*`*iFh1>|LHNJ;2LzS3Wk9Wa>2Dg?4H`_?f2NzY&0cw7f=9{Nn> zZ+{h(Ed9v9>*mH3O3%(zbD`{ET( z>ckRvrg3NHms3~%2i@wRC?u|`;< zkB&1LZIfU(?(7A!bF~Gvx>>Bmu>{&PVz#}J7GVSK=`C9Vv2KZ_3uf zVH$#*gfh)2(x5yEK0MQB#y&kRy6RruS0e=nV*T< z3v!DuEcNu83|QTDr%#tWG%&*0>Ml18!VShy3b z&uq4etE=mJf}Iu|7p|+BqK{p7#ft+Q`Jb(AOv9Ldlxf*O3-231` zT5ISq29srnqFJ4?Tf)Cy1tRqTp+DF4NJ-iue5})w3*UQ&PMV#AZLdt>PQNZ;r@9#l z@0TmWhmQ#jN;h=*ssLH&xOjuVq+litl(q=fjjaxBpE2{aZ-WqKY$61Ar7{J(hjA-3 z9MFjFNhHPmP($$G0#<+DNbbCn9DVPR!+TV^)0u%CqO1)vRLyIj$lS4vk+YL!J>Rb; zGgs@T=p82bt><1$h3Oqj;Ff zeTM~31`zZ~A3kJck{_B^Mep9&kV{u5auWmmuz2`*I<;pcjJkT696oIWwcA`tRCprm zS1ApLpO0d4ElZea&He1G-X}~Z(G!I12gViq8bi)FV`4R_YE~)Xrtc2pF|BWt++Ky z%!sq%U))ks$rcPdz=})Ok)!L=&?4tqWTn3nakzY3h*wsEftjY9HXBKLubXjNjzhKc zAJc^Hg3F|4p$51e{Kl7AwF$=_R0-$H-B8+{=|uHJFzV6UNxx!wevbE0TwExEiR?z< zuDdBG(J@1a-6Rd8ljpL3AWU#C0Ct7>2O3>40W`kK;+A?lI*}RuD7Wl zZgY&DjD6g&(PauSf9>er(NC7T)8ON6p3 z4(#(mYpDABm06a;LDDdj2A$Zcq@7!W6CO~uW!^F2p!s;(x$q+m{`eL744X{W#uo}j zpH#&&R@}kaUH#Z_wmu|2cI3^5`ox_0Qu6zp0*92xF%!JI`CSoV`2E6Mq&DRY^LCsx z&>IPif0sPFX#8-vw?ql&{5&R1Oi^IUD%I(TYlBG6Rh#|zz?sj^(*hw^j&Xc!!zsRw z7YbkeV=m}D6N;Z)ad1>G5$J!>MC$ofq*m1-uFAG*I9)u8o%a5OFnoy;
kR@=~(3#i9NSdGI_WLGm8u@#0z9AEO2XQ9wdLp;3ntRVBTw;{KvO_+DA9Yw#^42z1fEur~4O?j3N=46cmvK!}7_|Uo)7us^Y~5G)`)ie`=K7YUq%{b&HRq_^R3VsUu}|+sn5oCeWt~8HMqEz>6~)V zG}>Zzh<4Qe6OOMON&dYmM->|9afd8!tkv@tWgOM#J;_S4c*!H-SJGj^0`9pf`#4+M6NV-e*N>JfG6}mX@G3Y$W&ZcOF%5e#>j-%i-kZ zn0vjZf<+y->C2O+nR^SL5Z5hV8vGwGX4cQSD&BfjQK+{T;fF9446>5=sdE_i_3`Ia z>#q~vEx#X~VWW7nuXQ{lEiJlWJq~wN+z?798iC}9*`kXxzKUN+xCtF|{}YdGm*ze1 zd5Y#sY#YK6id>gRj9^hbhWoTc#I@*d782G?7AI9arqiO;Amrdgc(=o>@yrS*#QZo- zH>#&Qum{vZ-yn`%pMF<(8Qm>RaiJoK(dG~;m(MtaY)7AtMI(0MXl`VCXZ}vr7q|aRktt)5c}S5$hBNHF)-itn-fO+@_kF+aiIZvMolnZRE$Je>c_fL{ z*uH^kJ*CN>y(1t7i|kS55_O7R+9i5(@21E(R+^kGehEL=b&%3ZSWLfoCK7&>ddE8v zxk~u!L@fTY?`}u?D?9rnu#tAwi!V5Y( z?Wg25wb-tGh4}D#B_?5-h(EANoBY04i`A1m1Yho!qH4uj`P$GSIC*O!B{6>hTIDAJ z$#r61lDq}`xJvvcGMk2?nr<;Ff$#8Cpif&YG(;a>Rq`)2sqwW|%M&ScstC(H|B;!^ zHBkGK8M}CUJ#z=WhdB|(bU;4ArfZf9b6%>jS)=(Ne|0zU_|Xc&P0f*0nsqC+2h zaAA6dVC}QzqLqq&_#d9zvhIuf!N&!ixO9#>%fa?+KnEW+WRAh#pQoe7lj-DQu!E2& zp3Uz6p^DzkP@}FNsKpunh9b#B)2PI=`fO))3LzS{gC-9&kg2LWC&pvw?8Eer;FyFq zujZ8&ckZg&y!ct`$f-{Yz+zuFBFVPDM5ef{ zIrko?f@{EL=o_-A-=(^W375-EG14T67ma$$1|Ds7=Bu{Eo_}j%gPN2A#@zV=+$rKgpGNZ1qz{{V%48 znjVq66$G8{Jjm2D(QTHs))ZD)CHhPl3uf)kX3UPB=6p$!<0_7S!-sc{344X5MC~DTep_7Xj z6A@j@sY#LM-n=D4^?uw>sGTemU8r#)xL+-VnU~bz?2QVwhO#74u7p~iNb)qn(j$ZHD z&e@u8B@|A6DR}H+#D2NtMbw$4!gTi_I&q_Mo5avtINp>6)3X2 zcD(*Sbp%v*(nE1 zuI0bbTP%EvEgw zHEg2Y1$C{B=SOj_xvKFaNe-zBxV@`uy<_=1d=E7C0RPN3Z@6ystUMF@9=- zW}Li9mp&ue9XtE!<3c;ue^3b!b6+$2)023`(`30bWo1Nvc;#?<_kifduI12vbr)av zR1qAtecc|g$yvOk{VZ5G86q9e9{z%`HTZG47W?PuQgF=MlJ)1_0S3DF9pqi4IFB{X z(-L*TWU1yu_;K4XQxR1_vl?f_-kf~;{nG|u@VS96VN=v9HQCvLx8`m8x5t;bah-Ok zbNmY|k*MLgc_vp*f^gS9-gY@ml?^ex!fih#Cz7AZL9NrR1tZP1MUz6yh;&0rJVrO;C5G^6Nj%l0{ybJvZ% zq7H4fV@)0_Gde%C+q?gh5Ungr6}GOdhHAea@OOqQl0MSfw#W6nNQv@R_)M&*Pn$Lq zMz!S;#u;gp>%Rc8{&lQb`Pv`)Mc)Z})zp`~=d)AD6+3_Pvdjwzo%k7i)&5%HYF&BW z+3`~Q89^DMjIEtuPk@2&;3OF|JzD{}W@U&ZqEh(2oz0y7ef1*su4TMkWC?+#F5-W> zyV33@8Tw8ShH0hj|-6WdSN5raP^mNCu)NYh~@H%^bz#7O^(B(ZfM$q zj8n=aZW|)HOft#8RviveA18?hH+kU3rQ;8tpb9Ksc`kFzk<0_=AiorrV)-I&gfpa4gXuEHdPi>KEI^lI2b&s zBwW@QB2bz9ALn@^2kl%R-abBmfNbzkhc*SbTeEEWY=h=Ak>^exst4u5WW{p0?v=WI zeBM0j-C-f#el>$yGI~Q8x2K4s-qgekTwEo}G1up))dkX?e>B)o&#gpsb2@MGmZUQM3xCr{nwa(Tnk6FBL^ZfbCLs{W#WnI?#lLdCa zc7oTd{0iO|hA{ELKFr&QaLQy!ENAbRVcVvr>4F;pwcNd?Ewta$3cjU>A$R1J25GoP zmC-$Sj($bN39C2O3)j|oGqb-p3r+`D3e|8C@5}8_j5qOFuS3TKRyxM?F4>!HPo@;} z-b`)brnWkv<)vn3~27w@E&Q)!}6Ns6;cLXPM0(v0SZNWh-bbApvOTFCDM z8KM^#{IFw+x5(Q}hg#D=pRe#x5=aZbh#Ks#;qcnM;Em`6FEp{9Y`J9ER=C7~`}aW( z;jbP;e+WMeWYlWf2iI!yMjm-EXWG--qBMq~msFPEaMv0-<2XW{{!+}Gj2q;}2l+Gm{dUGJDv&cWb`H_d6!(&P=rSG4~uZUX%e4A2|)sl!pZBs$W% zS;!y7!W&hUutfEkXbo=@PS$?`qZ*URj~muSQQf{PyfBS;ix#+FEIcrsWkUXPOh6@UPgyrv? zk&T_R*rO-*on$zW7vhKTkMECKH%|IUH61W#HOC?TbmWR~*z>ZTf=50t&Df6anD`It z87zQV0Ui`_^peQ@ku7hqqM0fhS_*mBN`;$!*HZK=3U+n9b!bkWMDE%!K-xSHB(zmB zo422xM7@n2!(G;U$QTU+?0U3aq&aODfl|y_^HCwN`e1~_Z+9w4pI}3#KIes(cM4zK z9DrjNIc)2r*L2v$-gf7RG=2Di6Lu=oW?97^??F4b@|tJK~Vu=cr+R-DXY^rEbZIk>#W#p z$(?Xx%?9o*0U>6OAoL_bO=u!z$8mD;C6{J?v0wJ{GPD{CqHdi#BAhgH9*e{tgNONZ z*}NeUJzkl`{FO=Ibcqz`T~&!-5pOR4s^_})jx-V~jQ@a5=U-7BM)jh+KZwbk$qGn6 zLrS6fI#~39;7>^WWP)E2aN1%+=ult8lQ9#}mzUSmf&M$GJz9yN(n>`3&w59#9Nta) zp+y|Q-Q9Ex6)k-8*pV&kOc8jmcHlk{?PDZ7q$%x`EMc0GE|~l~iaeTGMM}&5;=0MF z3H?l>n3xPn5M0uXYp!1voUT~M(+(ZtS$7l=U8?=eA^qdLZh<3W&+G&;<;|j7CmwTr zY)+EJj>-0E?P_Fll?7E^`@kWse!4KHO&Y;TrQlDoDw?tK1YcpSTO{}O1;4Mpj{CgK z3}2XN;%u@RC7P);YT)fTrnU7q|B#-IFzA(%uylG3H9t@ZT-&mr6HVR2&nC@g*KdE$ zL`4=r&i88^WmPR;Gv8cb<#&Q`72YQ98A);$@$U(eKUab_N`Y7c_wZ&nG?V`%89IMg zG#PrYQeYQygWH=ZDKbiz!_u4U@v^9|cAOpob4yK`zbQM2UtPAr;SYHiKY`kKnFh`M znq6wAD^n|4(W%FiIGYJGCq1Fh&BziyFqdXsc1x4*E{wseB3=GXNhe|2un7b78-*`kO9#VxkOR zO&{iVBuWZT9;+n_<}8BaOC;!3M@`7gX%kF`u`;{h)&SfUW`geyrVELFeR%R{5k({) zg*D$yp+m(lZsiP9hMccPDkcQs_>=_#tF&?aAXx>j?0?KZYv{(ex$MRZUq6SNo^XoE z-li)sf3FB$Z!hF8?d1W{+3&*5*{9&fdDBI+?EYgCJ@g&kt@bB_Hx##r9No)o?tRU4 z$gZKT9_O>|v-An2u#2Mgc0FxNytM=;XD)8u8!Ux_uNDH+a82YG;sOiC{@_>n*O=o; z#?;uBEYfe!3)mN{hw48>3Zd%_=KSSDcJF&H@>fqwfZh{Vu*L1^U{~c@!rSB?R#OV% z)5a zu_+DDg(J1^c%iFP>0R5zpp=#zVsPaiZp)UtgiH?~1iV!geSBR&pUyffsE)7^Di)_Q zSG&`MO3gNG`OW3zGgo;CX~vXb-oi!VPvn8R9|p5yoV zRT4#aD%#f$?`!S;_#e6Eo)2#-SAx}DST7tjUw||J)p9yLwty{_qxPZci@C8Xw`pLi z0dK8RCei{5+obl<_W1KEAe4S7@>zKm)McLHByZgYeViT1NEIJ}5-q`wYMXHXjb6oD zSHC2_%XaYeb99)(Mne?rGsv-j@)#?sxq(~zH-m1W1IXxXAwTx-!1D*KL=lO`>{7Ug zKGq}0f~{(z9dB8Frp$Md5g2ABD@l^6CpI~hFQ39Vnj50!{gb%y_4!SrNN08eR*A+V z&3NjYJKLk(JcX8LgM@O%Wdw0qhvRJyMUT97+0^6p+#`C=sE2}1Songp_}#$&bz6yrcArJR&80aV^Nsi`MihWZ zTRf*eDiy>^U*tX0YNC?-4XE9|5rR{nYv|8BbF_6lSu`rA%9iAqQpYSPaCd_NINQ+% z^a z_DItIe!U>)x;PWt*W~fl!+Suhr90nRp`Y{Vn=Bipmq{#s{2r>GdnOz^_O!kCK^8T- z{H~Z2a1Erc8Ke{=n?xh7w}mB+-K0P5DR}#>liJVpQs-nj6tU?obXjGA&h7K07TAdY zjf;1JV4)#eao?MmQE~)#^-ZOIOZU*r_uF%5r!n|8>J>E3?*RRM^}?EYcEonI6lVSf zcknRDhiZ9yk8@YCKvYP2icXvxp^slTrjj(I(6J}7bXVLoR$|{6T%l3O|F>{Ec&B*| zpS#{l_6tfGYe^C5=HtnmV|tC>xk`#tHvE|Qe113Sku*T5mk+k*f4d}Xi*Chx?iA3` zPWwqil_f+>)>6j8-+|gZyqUP5P%9$SayiZW@(I_{NM?ulj$9gS#C>@65uYpnfIn}y z1R@=^na5qGB6&9{9BS!HU%TeU`*Y&~&vRn{C{1kU=xe*tODeeN<%j)3Io^mcXMHHy zuh>SUO-`lvSEW-rb|%89Q46URYbUdN=1%3mQy0%+pVdI17Xq+OLUifl6-fFGGX@{6 z_=u8ZGR8Ux;RYpi0G_WQi95>=GC?-4glfuMro?3o`W$N75$!kLfFzQQP-M!L8#rS+5kAbHUY z!@MmPl<~7y z_#RIpc-dlN=gUDnKO&YX@9Ty;f;zB%q#65VBAWZRWfI|b@E+g6aUmIIY=ACQ{sebz zv-vJ2S1AMVN?2UbAh^3-8qN75$v5*Fg0%s&i3baR<6HxTl^%$=t|TJsFA*e(M#N_$ zkJvF;oI!c8jsLc$lz+vf7o;psB^zJWaXSi}$!h5^e5|t#CwlVCn&i4060g-9 zh#HsU@W{Ez>}o#;zM9(xO4`waUa2BSPPE-5s~`M=52`l-`bi=2u8rnDdM-~TRrOJU zRTEUhGF>8aM>nl;@HzK`XDOdQNs3qeS|06_*iLWG&cS6ru5kO~&cn^m+{s+sY2bEK z53MNPjfdPBq_@;g25w@%bhSqvR-2~DYxIL-FdYmpEPD#CMk|8n zD#<{5Z8~%6&2~}Tp+jUG+rY=mTZo4R^U;kgYkI`XQJjIALX0V8(7&CnMDGJ@+UNet zrKFXDi8s|#+LKl^5*gR@nCSX>bhzIvL=;RBy-nIjH@+1IJ-q;XxKM*XSZs`j6^e*I zdE5AlrtOC3p01#38JfQ@I0>hR^$TY%Ucz+N-GVVcpAnzKJgL$}UgTNpcB0|)HsRax zHZD_k}r+U5TU8=b*O8)2j+&7tciIUGrmHa9vNuvr=T*=TN` z*!2508GSiSc-PAusFv&#wa-{du8zGW(wQAW^DXxAy;QZ}Tz;X0b7C(4r{xEVQ81vl zW!v&<#L&Fee?noB<1f-8e40>KUjZG`@g+^W_b@F#fAJL)7E}926NFev0&aIXExK9r zoIG-3kh0%Bk6iXt58f*|NTuuiqxgj(gp|yrAU#>eJ zKcw~PJ}+lWok-m zJ+h~Af~Fyb#k+a+GSU2HO@zqjnk>U}RwHCzIte!~Hb$4)pNag^THw|n6^vYOD#_V= zn=yw^);`Gk!zM9%~~C#Bwr6wfcp`0$3=qTPM@%-l`0_&ts?!of|-z`|b=3;s(H79>BQ zPPyg@XIQ`IzHOBy&wqObzv*Yi+cBv-WL za~|oDJfq#`r2?4So+f;$EQ1xbv$=JXqZw_lhh&Xj5F@CPC1wu4&ARG17V{vpiIPw^666P^(1G#|^m!Ra!5&!3cM^>9CkEG( zJNvfrAK$qQ_v|re_IVZaAKF|e+6Rou$cI;8+^x;j)e92n<_S5o~e@b#2-{_=b} zG*bz!9+yOZI-jY{>44s)B#AulX7jgJC{s2gdRS3bgMNS58J{_S3wHEef?3(#M8MQH zoQ%AEMB2T6O22w_#9GTK@q-ep0uJ6n>u+4+@@Z&*w6E`Q*w`a2V8 zu5C=g+~e@myKEd{c^MeUm|jSBPD02*;F zyd4XE2;Ju%g`4*&pg3s@Zqf-!G+6fl8`jNYoMXEoXLc{|a`}0odEF~mw8>A@E^&yS z%ULa|TvmfsPbTn#ZddVb%+$!w50~Nj-H&0j$2fd<$bH#?`G*i?ue2n^R zhD7^^r_rw2>7uZ<9y(834&8{IPj}1f(ns4{=pQ_D=4eV95u^|;YO%Ev#Wbl48=0%% z)j=P6#)>?eR@O#Vj~^0h$;$+tRinHOZIEqVp$wE>T%kfv8uH64{{sKq-{i`tBG|8* zEyPjM+&5Ex!Vl;qW&Yn7Z==gLT6NrwcW37))0aF9w?$4vJrYTL|0`$d^mmJ}0jox- zS>{kLmTJ&mvyCNexs?d=8SfL}JsuX)r<$Q?QecLj2(@(apsL~j*b z+i?*nG)ocBB^D7M60P{Uoxx1Z4t?~gxQsXDd=6~kO@iAGI^gXk@|NgtB(%CYXw5%GCUChm^_za4CtKWzj8;9Y^V^$RC zE#sXJ6GEA3EBQ%)r7b^{KwWD~cIj7LYG$Z21pQa|2O9np{gke{g+s67xJF5d^rF2R1Pd1*A(n=VGmUgIysLPRo27PLw4$YBicCT0l6#hCmLL(ug9GX!UP$C&+ZhTyFKVF<=3ALQ<>+y%eM^}zbR3$Rmh zCTL#z2U~uLVydEa*sD1__);VV6;9Q|8A%V|yP?~3e)b0@E%hHDbN9fAr|qC%Vjpw( z$Qo!GBZT8Y71;h~4s)$~6*JFn3|K{dVSYa*n99?N@Fh{ktUk8@#->{1l#A}j<%St! zn41Tkr=Eh($Fp#5Q!GwRI?ZTh{-PCsOTvKab9mZ^Ka6z%moBiIgLR^3!8LR$c6^u( zXI9eGEu(yz&-gG!j)VMa*q@F~%RA8oiA7eYypFP!wR}-t?6Q}_V zG6HbUG$Sn0c?-n+GiP+}0d{oD17_brMYg0i7VdKIz`bA;|7iIOcHRntC)ZwMdiiG9 zYeXW09DT{T8HswPWqyzXRF2dgPe~I~YAS2)(xDK&&$lF`o^v zf$1dn(e3%rDLxN&zj;SO?22`cWpj=jm5bhRZPiOHFj`36x*~E zV%x6@Bz$E_-(GYKR7~<_A5>L9)mlv)Q=bE>&S}EK$OR9)*2CYmmXMv<&Nyn;M;u1Q zqWag>IQG|hSh9T~(sz-iu)%+^%=GKw?9vSd6Z9g{FJP6~vrXcB?_n5Nl z{;&iovB&wz46&s`qb4h$LH@*N4-k%7Cl$H0D;LAzb%j z7xeoj#7l!tFh6fRCaYiCvW<;>%>0#47^39>{r0F00=MF@bBI0!8=TNUZ5})#tH~y9 zkB6>FJ8@B59{roX0oOOo#nsLo&{caDHosR)mtBjc4<7prG|khP9aGc6jqaIj*oF*X zyx9~Lxfr1}x2#!?voxyTUkOjbschTdRM1215QB~)3nkxjij!5kP!V;YZq zp^mZ$8|n4XyfzC;=qZwN>F?IKW1afVw`?V z6<<{6LIo29Kdd+iGh2OO?^+`$m@r|Nd1#=FoMLElKaY;Mv=Lvrv4xrUCk8FKBFj!S z-wxY|)ws!z!7gi7U~elU9KJRH?l3DLL;Pa*@%JC^`{ zM9f9vbPv33wGzKKo(JOm+#vH*i&|-C#&+nNL!T05_RaAUX!h>{-e^UW_ZDA;mtR;z zh51jJr^+RGahr1nZy7WZ(X$ zF2;uG;u9;jF(w;rQDi{?RCd>98osGOE4_zgO#C|Z>+>!0?#8p&rvDNn)3q9VS3H2% z6PGgUWJjT;Kn5)^Tfn{voxxV$abzM|L!p@;7v$EDfv0j}pH)~RO#a;tZ;UL*HkOm| z!h4!9>`@smTxmdToaD{u&B-TtYqy}=F5&RP^P{kBKob63KLgjK&u4xLUiLc~)G?Wa+2iEB>}PAWH`{ukOTT&_a;0-vDyj3bBD_6O_9kU{qYr z(uzZ+%zJMM^suxOxNWvTgw#SD_uUlgO?O2ZB{JyiUy`<69RjtL%b1rt+nGbsJE3;< zLU?TLOS~yTmA!n{hc28W?xP^e=&q}qSd#-1r11f5@WE^Z`Yh7Kt8bIY$*GswQ89pB z@`mU@+fU4db}mzUqXABCh=eI|4D|mo8>`jHLa@S@{C=#G|0~FcHIA`|vwJLASE$C$ zzNrjP*KLA2dyC;~-FKkqy#W-6mN9$FGjPeFXF%K3jaJw%V8+gBpgi9?Ona@thqD&4 zuj`AzRo{P@|VOu@Xx+V;ImTh8FJC`!8HHy&BcrQG=odC9Z zuITgqWN=<<3QDmp!};$lak=>ic>bRh3KUL3)_yy%{$6>cFk*)t9j%!OZw#|n+Tskp z_zrdTM-|718U9Wl^QCka9grxbVYwG7b~yu|zD&hOMqbiSqhrwK>EU2l(Sq4*bQ5e$ z@MVn;D#L;uv(T1^sc5t_7+HT3b0|j;aGjcri>xNWcingJ=xbwk%G&F6(X%LawABX0 z>?{Q@>!TU#uEW4FY7E+Dt--6e+~@l|%z{eHDX98$A3V?h3#Q$#WClmB(MfkG=*+%m z9u-Z&5fKqgn|uyk5Gw;+I$Q98#lOi%xl2)wxEp2ugKGMDcL}xXW|?><>0laT)1m8! zcwn+r3JuX$A^yYCPaKVvT1@^(7@^}rBsvG!zr zXfxPhY7D)HbdYG83B#4Y4u;RwF^wx+(A1J)ST?Pd3Hn`xABwe_z1^Sj%GWbt;|CS= z&@BuG&Dn+B@=U;%hCp~jbrbw^yo$DQ3q;GBn?YoYBAQ|83A}BV;|i@4Oq0%YIx-;> zW@pVn`d6fY?wS2C+C~a|UMz(x*kUL<_z7fHr@-(_`Y`U?XQ+Q96Et1EjZ-S~@yL=O z9JT%s*p;Y{`=KffIkgy{XXZo)Yk%@`*8+9gT7Ebx^%54v#Ujq2N*< zV<;JerJt!m|GppezBkR#An-nv@+yOF;|a{RO(mcwSpb!$cfqZ*nqkPdM5q>Y8YYf4 zgVT>C*|%vtu*%pOFi9B>jNoj9OfK#}`KyrQ;;=M`x(D9xTc*fJMKR z*#DgG2!hg_!Fr3`cqnQn%v71gNJ^Z+`F@Xaf0F{-ca6(Vu8SmZ)k^?sojQ`0F2RrF zRzoAJ5}c#EoK5@p7Cl01FPujD=TaglzI%U32WZS zlZ$5d&~m0?U*psv!7^Xa0dUH_r#Y7wYy zdIn@{#=y}p_duj|A4XRWFrHV|qs_P2KQ$J;xD>@asY`^W-QxEU?>UoavWhvm{XHzT{mf7NxdJra%E8-@sNnh9 z5+wc48sA+c<{l*npp9t^P<`cye!dQeVD%Evy44BuzV> zB>U<1E8I2579YHHL3}m)!?_8A%v`5fq$P$O6s2FFKWMwcdt0tU+s_U7qg@SHz_(|w z-*Ewo3!<3kU-O_pkie4z(qKq!G<(@)IjZNR;131*=!;qpoHscaiY6@4{fMpT2dbs} z?{5ZAH$}jWl0ncYtdjiZ7KRkojpIpMJlHsmMi_W(K5G(I4fl)hlD*QWnKKjj;ha_5 zLF&K+d|JO7USIVMsOFzx#=ILroWlq>R3Xj~skbt-xE`z=bDO-M7lk#_+Husd4V3n6 z!(M--fRX7Cy>s>%W_RH@?DE+|rWoCTN<%jI)A@GX=mp?5GYKrUBa-zgm4jD}M2uTl z2u{iFg@c}fOlx>P=m~uW1WumJz*TW)*y-y`o7GVW4L_2gwg#M9-vPuuKq$(j5c)aA zFy*x;8Cj3R%pQsbYYLXak{_n9bZsy5;9v^*-l-q2GRwtUn-+th9rIz)pG+W|d;~V| zRM~%%PD2Z~3A{DzB$L_dh1W!XpS>nA07ZWz;8Rm ze3b3dV1w=uJd>{h-^klzzpZktr@uFBYGfI|-+3_f_-sb?{u`LOP9Gf!TtJV)05;&P zIuq>~0hb-=fd};inWSZxn0J#LK*<@1AH~l^n`<7x>VTj8yIxXE+j%XZ=COnwj+qH$ zOxmEsi3D_P)d{$MCLhJc&mgSuMYu_G6*6j50n%IqTu+A9ZhN_7(9&+&R z<_co?t_LgM-A5n2pNNcWgV2(2b$I)o*vtRTjy=_Jg$damOWxXkpJ=}~S?v9FWwSr^ z;I~%};d_I3;L*b_?7+I+z;wWs9bCJDa%$A4sZTqo;l0~f=YMBl?SWpnejp0v8O=bi zqSmuEQHj{(c|QJ@t%4>#y<)~A@4~vTf#|7ZDb&C3hw|^bu$R0faN4LO`{;Ng`(ST1 z{JL-eua?}(ZW{Ru*L6%`zpXt0p)J6>mrrA_zwCpN{ma;QML9&l5gFukJP~=1lI+_3 z#khB99}s=~3+H61pkCi4DD3n%X6H3|)0%oKFBa5JnI)lgDZ^2p+Jwdhye1F-z61Y^2u9#$OwpsJ&$+ zdv4Bk@%Ji#T|5o6KO_`Qerd?+*)Cn0;H zh}D~!urDWG#+|2R!e_&tm`=Fl>3@vXs3|ZI1*6xl0b;gPBD?yJ6Z4@=pAGc*g!gqE zW$HMIEUjgUf~%LXiB6U%?c^XlBJ%|He;WmVT@0|Kt}5HSW*sB7E08S_NPy~X1iXGI zihb;Z@CSP*@czg=jMMw@Rl^2sa&bGm##suFSiHt|ry6igggj2-+wgY$sG?Q*lHl5H zc^KZQ2lsV3vtD9Ww9`2qVj8an^CSP^)I*`@z2;x?mE1Ra-M0kRN3|5hIr~82+)G$w z-b*GuU5VCwoJCJvcMt0}Fu0P{rl9+@qt zhrJgA6wr55oJ~(4hqnC#(K@BLvg9G@)YOX2o+@HrCJZvOm!h5XZZeB4t$>P4I(=wZ zpZ!e$z{R#uA&?BcjieZ@QV4{2GfOkP08>FOW1uyp<`d{;)Sd+`CgX8J0y{8SH|tyYXl@)5K3oD!<_ zbq4dZ1M#mlJXS)g2A@)sVzV^0>A~xYu=&5UjGki{HXd8gpv4dIzX}m_w<{x}|i#ZCp776MXYO^jsBaq1_T_peP4s+v-3efh{qT36_dirS>e8)1E zG00tsEG4C(qxlP5JAZ`S@^3asDez@2Y=-b6zsoQpJp}rBFfjXW4Scw3J-nu{0-d|n z&-k7VhPl|3bssT9W#LDdb&1oM(hupdcK$TfeN*gZA@_)60y!K!`ORC6xDR&To62|=_UN{Suvbq91GSZSTb@2ThJvR9jLuB20YW719ET#%v$&uGzpJm z$@fiALNo#sVppQ3;usK9X^hXM#52`0F*s<_F4Q8k2u#+F!!JhL>BW_U*z~`T_yzRD zLS{ds<+&E6)XT6h)t|uBr+G~O$S|&}_X24XuK{mZhb${MWzCC!z}D-3vG-Jf@4cP^ zDwk-p$stpKls~{GUzZ_`GcK^>@N-C<-v_^`%|@x;#fJFd<&VtLb`?;vH;8UM8^H#AQbWde%ItD%!R%@C zWXvX+u?h~4@V9?kSfiu_#Jin~2fucMlSRk z_a(eliZf9qpw>(+)+ZPkiV!r{7xF5#KOSWTaokH<^ zc^fi@i*c+;I5hs*20qm0!$a=&Y@0?8Ep(Sdj^74AbhR!$IeZBk4va(rp0^=)`C_JQ z#yCtZi)F{T$>^EpCGfH`44uo`37jgAgO#P0xRMt{yv&-6vd3j%_`{1ZFzp49%tvsZ zq9QsW*nr2P}(8UEUIzGs^)XBdht?m-^FfdsprS!ep!b4 z3{Nw|t996)Z`ILb<<%%;);##!_77}dvVon|?!ngd^I^?OBQ)K1BV0fDl}=yp4!7DK z#4cj?-7&#;X3N7Bs9em=Sn=Wob51!0gvv|GVO&F3iPCrU%#|>{5z*h;$Xic&La*#eK^pe~{YH1na&*{g3uj_64 zo$MuGbf*~>wroe8%mT)2MY zYIva!$%WA<|LrZxcw#3@dB$S=MG5tG?8W2dX7Cnk3ofmzXB^V)aDG_}*4B?^Rla+( zD=%LHfCzNj- z#zxEcV~g4q>`cOu|C}dot=wP;fMUl4n4 z2Mc@a=v$W_L85aCYg(<(+6db~Ypw(vv}!%$ahPP~2F~L*Ymb1gFWO}2zw>w#_dDEs z-jkhYbPj}E%VbQeuETRX;!q)H0_3P{WVd`!BcsiSz{!HOY)JKd^4;t+qQ?s?*$4k- z2yc6+!jYOqjAhqBX!z?dzE);R43&-(Y-=d{CVDHIvM`fb_kKQ77?MXLf!k0|w+c9O z1G1#|8$zJ+RqPQzg1gNcp-kO&_P)5n>Ag66I`O6l3}noO9Su#m)UOFQ*0wMO zPTJt)sMs_6%a)yY=>x9%;|6q#71?#;Kj~!+tJxN>B-Y%16mjT!yJj6t0XX1T+B)TiHfL8tFNN!+?uubA7 zHX^o#$!+#P(?-(Z!BI~%Z^<@Ry-o`Ul^S7R-UQ6MCFaTz2XVu$t5|bxpSTNb4K00H zOPrD1$Qbm!ryB02utK>&cALjqY;Z^!?YFoH9w+Q#=iGK?72PMHRo>#x>_kf@?jslH zx9t~qvsaPHBP4p?kVP7=P(X0)26oqoE5wKe%RaiH{drY*sZ9_%I_(008^rx7pEt6J z=Q~9WL$xCL+DYsc1#QxyV*|=~coisynLy&rRxCl}0yP6=Y_y+%6RrvDiOn%ATxHHi zp%C`Fjsx0rOx!D*gu@-|56AsyU-cp-&iXyM^%9vsd`d4Gr{S zuNI@TZa#?q{R|rXDW#VUe<7~TPhelBE5Y@<^-<5eEi7_92tRhkvpSMXpk+c0SZ1LF z&;E@^#(DQ)-&F;yc)MDJ^KEf*Fc*ZSiT8xt5h}VsG54p|!IR#JY^TQ^^1{_3_(X3xm&>0o|JOHF5X0apZ zH?ywV8T70{8Ibqao*i=A!#wXlMrVr;$#msTpyn#acD$R#uHa~~IYxVcWo`|aqZNdT zWDDu7k8F|T`Wfu{b!%~4_Bd^zOMoqk+nGH+Vb~+EnK|tf#nyfsB>CBkQNyzR;J>$9 zSx221!XQ19In3&U_?qc#PRReO=sW{*`u+gkP?AKY5)BQNh@$m8_nv#&N*YQkLVNGM z3rSg7CCMz3klA?dJ?9uvW@cn%6p9Fu`jP+rzkFUiFP`VzbI?T5iMu>&&ok5haA{oHw70QZbU~y)rpoOhl(^_;K4j+5*#5x%8kpg z(_;!Ac{P>nUzh+ZzA|J*xfOXG@d`1C1x&*GrEp}h3ZHQ^AQHOUm|X3z?3RER%xWVI zM(^<(EVs1@3_j^a%HzxM4yn2Lm&^+2k}wJ;T`0s~`+tGBu`JZQ+kwPX~dzP^?`8h$RgbNHw{a7 zvWTgCL>=YF$|XLi($m*Gzyk(}q_Js)EB2nns!~zVtT_%IbgJcostz+neP0>ZrO9N1 zmppbaiUQ)3rSOAyQ6%Nk6Xucrbs!buKxUrwCkMA~!O9)sw6m-{xDxr8fltR1Lk*gF zRi?Lv!QV_c=22dth^KqH)^_**33;@-?0XKX0Pl>M5M)%nY=rAsb2utD=a4Koawi zW4+{N&{t&RVb-`zxIQ}>WnU^rpQrXQVtZGT%Tzvu6!0I~C7u>jbgN z2|%v5l=&jJ8K)o5g;Js>q!-lAc+F+8zM2a1e&fRgD0U$23>#odpF|ThGsrT@TkO>= zLuT<_H}2UYH#jpnm;T}0h{Z-;0=26)WZGM4WIETEmJ7&&{X8R(lA8?Ej%$EbTbGgz zGiBfvu#Cy%Op%!gakBR5&=@)VXtIFqzW)pNB0o&kC1X=fFSu;79n9=jLoQPT;bE~7Ec@977=3D> zKR@jSTDp$#=GxoBp8gERTuT|=E{-9QZqcx=%!XVZQpX1il&Hr%i}cs+RdAZl6Gl9L z7I|awzD4!oQs}=fl32IfbCvt}ntKfmq-&<&nOXj%a>f~8Ep;A?{bo?bk#j)riwG!8 z%p^ta_qZD`GvHs-4D#fC45^C{;YXA*zPF;EuAdY@5+91wYV)~kpLEIHs1O*?)4+s%xy&@`+OZRr_{-t8LZ(;t z6Iz%a#3jC71)radVx^NsqH5=KXnVzskv0ZKg}!-^8Ezv*E&qRbboPOHA#k1(=vblY&h`(5BOj9QG>{ zwWv^VpZ~m)f4*zWZGgMcc+#}ghgQp~qYj1&AT6iO*e(b|n|ZILR;?$F{~o|5UEPGa zK|Z0l7=h8yc=E~P4jNInpd2zH$!Ro=%rR;O^1kWt>%bfwHu*jZ4ZI1wY~oRb?s24i zAd9(S=fEXsRx!J#*D*f~L%0M}KYTXKllV@W1C^dlrrYOPz}KseagEm!VETapx_!lN z6l1Rndz59k?LI8)qvX!K+I$I57e(XI&WTPRV>0HO+3aDVMC(*o5noFq9Uw28ieO!!`}Y5g_a1zzfq*LH*% zmV%;luFBBOk7sAzr>jmK6@M;`9+JKcO&SUm$ZpX9g+Jn2bSnxjf9-efymD(KYjpElAq9%)0 zFmPTUcH1w+Cv+0Y#oj2mZ$&QLv|urFfB$Ay+U_6lqjCu{F#~hTf*{zZNrq3E!%y%V zv(I3FK4kV2ZTTS1Jip(ETX>(HS#UNobWA3C%8Q`O{%h!;T7m_*~ zMGBiOIKj&X#2miEwJv-?(~Xr3wJ(AA!D3FnD4U3_$|83rLHyTr5v=+vfnBub z5Xljm`F*1i4ca{fPYvdfHW*5>4dj_fmLkiO5(!!0M%156;ceFAM8*dO@xN8GS+Nc2 zq~7l$E>~FwzdrDWw};I@=@fo8FaHh|7gS0oDI|lkr>7YrUrtx*CKJK5m8AGWGLe=K zhpVc8Ak)7!AWmZe_!f}~?c;Y*i+``C&)yo(oIX>7?w1mv816t8#|lB2j6ac^nZRVP z&m#t#Q(9EkUBTZfZZnGgfiTOq6qm~x606!EayO=rdBlX^P`7$+p36Ga!1ge5tp+?p zCzR}q*F}Ci7LkeH#BluGXsA-Z3+P+jMql-Fm_oNJ;E(H0COapB=*{&2q0A}ZQ58sb z-j{``+hmaZF=tS%q=li$0{Ae@2PkC%>=Wus{MI%AJwX~Qyp)7)2p<9GN1;$+|l@$^`iLE7G}sK68VZ-f-#{k?mu6{nBTpP4PIxGdV|C4U)AO0 z%%-*6tK-MG=uO^a)qDr&F;|D=W-cW1>MGb&*&jKH&H+<3N75+22HTlx(PC+`q-I4d zT=#q%mKt>@E1aXqo(CuK>MJy(Jy{p8J{JYY%LK#z!pF?<(KRSJ?PqS zc(hPajY#}9ZaF->7~8F=VU!>G!RK3+kgvO@FiU1_MD=z0pekPz-uOCV>4u2h9%k4ih z1za+lf$txlig)X5ruME)hpK<|s6x4Xa#?l~yt@AcV|L^a*v-eGbEZF{zw6cGjzv0f zwdPAud{Pby#?0Uge{;raTPa@nI}Sv6Igy4ydw8rZ2P)23h0iG&lQC8ry<143Jt&6x z9rJ)W=B18w7uqwM-C|JU>I2w3Z4VOP@Rrqx!1G-`nAjlp}s}Y7D3)F6Q2^F@rrS zMqK(c4|J>40oi*jCkpj$q~nJx>}sgT(W@ZGOa}1ASTG7!gN;8XD*Gp!|V(ltZ|929raA&41e0-(R=31!J+4PC|niNo1cJNWhQuO zAx&_=JL*b*8WjG`MqT;hY|cB)!?b);#LET*@Vv}oE^6Fqs^siFaCt={O!>v%SyxAro{ig>BvzK3 zc1>z&<8$(lS`2u{AVsEhOER+uBx#Am{CAaRGl>)LBijiISZA{~53Y2=zrXtvlTt&% zt@(_uegQNtI8T4R!;p6-YoO1|z3j;5$uPxeh*3>zz)25Q!Gh2JFpyt2S?xr!p6|&T z-VzAceAU1eO|gWzIUfBuZ41rLRnu}q<&?aeBVCjAjtbq+dr!`EGpV|vu4V)&m^3w{bpM>S_zCD%OCIkwMy{Zw8D|_lL)n6PV!3Uht7=0(0@^Kd|_O z1n8UR3TtzZqV|;&!F@ho>$c=J=bRKz;IrGf=u{r*l1L-g!_p+b4KZ#FMNEgz9fsbb zfs$9v!KV(-fFW-T;omjtaPiz-wC1_(!Z=AOzJF*k`_i-%Y47@ro6JtIk|wgq9DSl* zPYEKy(I1(!?_S_(P85;;oj}}HrNOoG{!nvXFigAIPftl{ru~n2lcibi@Cv(|^)Ip| znMz+cL*;9H4BwjHSXH3S3@La&_MvXZQTvh2- zVb7uyT;;}SlJ&=eybBn@f(%#Tq0}O({kA(P_!vei=Kn-47VFu8@86jw_48oKcQ+Ah zbAy|ZH=cE~e=J&4!H{Eay9MoBz92J6hBUt_7QH?dOn+|6CcSljw873SBK>Cy_09Gw z<5zu6C>#D2SomJVZ@Yt_%GMm({qRN*@k_Cz!I&+PXlXg$;RElz zSWKrWqeOi=3eq(vTX0QL~-W|z|gjyFjQ@>ZZ$8e!Y|MLX)_HAoobC`yA zOKjk?N5=Saemkl;lLaU84zIo~2L*Q@NkA*xKE|@^tZ;W~A}p_PA(`D_Fmz`))^nOl z7o9$YQ>F6B(pLn9M7CJHzoW`UsD}$ACYu3}7(rQ_^d>S@nNYRsEVn*(CAm9eBKI#U z4Z76!Gb58s;qoafsp^~AFwrgxezz_Z?2eLQ{tH?O$Ffp|kuiToO=TIdJhn?{^{f`F zjTRx3lx5`Y@GYGDW-?n8bOGcUC6oGx*HP}Lm)L(AUtLH_gKAUT7^Zp#S9~Fo_~y+K zD%y$(1Lw{F#*W&}S63CG(i7QmzmWp_`rJ?IPrfxg=RbyCSbpaw#pIHo1-Ymse~`X) zyabS#Y{EXyB`MR-(vg%k_rRb?;N*S<=crHSB9)AxrT#_`ohD1aJ<&_sx_dH3k@=+L zxIX#&yp5CG??{%}#$e~+L>REyf|bh+gAdJZAsXjS^gB{W_tP)jKG#rUyiy6M)T-bG z8q?dtPYAowP@7ie6+r1FpU!PB%mmL$z_o<+-p8jyiHyUm_auz#oDMSCltWk%7F(MP& zz_av5>h@S!bI5F80VByZ|9XE0=mf;TzWH;YcGw}h=Y9eu79I)@jn9B78v@~$ogc84 z%qUKtpGvAG2jB|DSfcyr1QYJ&ND4}vX}v9esBpa$t(-mF*yKK+$el43g4ZH;hdbZ* zw$q~dXXiq0=zS>bgr*3#Z}^Ix%{O!JExScx+bUXAhVIfbHB*u5cFZQ#W}}hsnxw#! zg14^j$LH@Eky+(BwCBssK-tujGu4eJHq&y5dVnL$rL1nD5==|ABI7J~fNcxUv1`vLQngu0MA3B>=~Ia#_5G`bUu(5F-}W@3 zW1S8gq63JhO#-PnHJdK5CtRjf99a}0LRN3Y(Zif3?u0@r`CT`{JlX$=zU&^|7K!F-OUjmEKi3SQJB8rp+($-;$VfjuzBSjJ#=|$4fl-qS;QH))4tI~!q_XL z^j|X%Sh&9fNytqDvXvJo7cE(CYj-yg3rihLI%~AbhE^hD3}dz}Uq(@Y?ruxPEjh=U`+_dxeC-b+XrSK)4P3Qd&kU zW=BEos(Kpkj3s>+pR@7&`|0fE6u#cN-RjQ3YO-k27T!JgfsM=DLl!<29kyg2KCwv$ zG{1=;x)BGFS@8r=cqoO;A_#@1ZKi8iDAA31lJKDR81??dQIQSj##F>B(@8JvU|EAk zbA{_-y2sHT3L>_mhx4OgWY|Tnbl@W+LubI}iI-UC1CeloWG8igem<0#T0teWPU58Z zJhVbmO~6t|NfhyHE<3(=zSZNrRH*3Yz}{=@q<_a`LKCNJEn&g=#HH*X+VU#~%7tu0 z$M`*RcSE^Atbae3x(1M`wLU0y3dQdJ`$9PWN*>hTC5v>vUZAF*?dEzzmXIBjwo#s3 z5kCw!Bs!uh%kPExf(2BEF!rG@eD<`Ao7n0G^A(dx>%1S$-IrrY{WBL!*2sm`?`FsZ zWp8|AVhKH~cP_m8Xf}STxf@AO^d*~&-w02HYYFx*p+xhYtLcO#G}&@vlq!!9V=Bv1 z;L`;*q)XEV`L92TJt9hw!oUvnsHj4uyXAXJz^-maj@^uzukj@Cl9?!Ka0Q2yZi@co zU!=xW_*33_c~GY^i&U*#3A+p~;q&)XiGJl&(wkQ)40j(#Tg#;qttI-5j=MCw`0qCG z^H7z@_dj23&(}vqt8;PeX+)Dl0`zUHgoLCz8xaU?SCAUY*N78#CG7WN)I9o(E?%6yW`DR>9jxUBPut zBSEH}50uu{U>{hQQn}TiQ2(|jrXp8?vbmfo*tcbV^D?b;IAb<|SL(!tzfvl>Q_po- zyA2yq+UFctKXQh3GqWPlY>4^mvySOY*QO#xwwx)O3NJWi!h6b-h}R8Ya#JN6zS^EZ z*voBHT0{uldqxg!-Iod*YHPXa#WU!cpL#`FjjQ26!g6AE&=ZcBrjoYZKU*ez$cO)> zRskS-slO;;a#0YfL6K+!iKC z333ysKVU~KbKwi=G@_wa!nB^Fxr-MNo3@sBK`h-RJYX(IPn#YEOFx=J_p+`=uPp#i z-{DTS&-Q{-XCx7eX=Ugp&$zbG&mrLH1Q_0F2j_m7_kM&0I>>JE9?r9~gYUsdABa+E+vpHPjuUKgL5Yq?yjkp${7u-IREyBnZ@6ky2 zeDv*HKH<}1?D~sqtxOKY@%(;&;@lNWYCH|WilnsCTu7R4*^~jNuLsZ1VJf1W84m_tX3p`_Y zvEx!dqO;HvZm2oN`pdl#J=w60tC*GzPcylKH`NN{pj06F_VOcwGrO1%b(+*3=Q+@r zGiv^<&`Zy&%VvU7tcAN`W!Pcs5O~%lkRD42g-PyCOwPdpu5)GvXe*2(3UvZ%o`w=R zV3kd-49b#W6?3chchTf5y_<4W%M*$+!=ReoC){^W8{gd}1G>eo3E+h!5_h#-=)5%- zx~5D+`{WOEkMk5sRZk`~MHa9K=+I9#+p>Q5ZNaIZRlJ9-&+2xSY)hju$IehYEt2!# z_xh_cM6A6;OZTfHb*Wue_L0VTdy6uRb`E2H6;@dOb#xcnih$5ybTxH;)?9M??L_je z*Mb_hv|=i$Q6id{P#- zv~nr@C1DF&Tsx`2yDJG--AkYA?!`{mt)PeP8hEYNlM={m;>>4kWa45qVd|4yGCOb{ zeQ?x_INa4G2X17-S-Swa!8=p8pWQy-9V!@jQ4OZ~WSQ$iR!Niv4`-kS`|{V39018F%O~1F)R22Sri82>QNL%$h}+F$=Ka=ZU(RzDxm50AcK3zT(P7Ww6hS^4wD9>s)60Rr><4zQSms(Xo zM>hmMiq9k7u9e)6v(`kf;U6|yZb_P6s*~Z{8g$a&5l$vxI`&m4r3=`9R``JkFDQ0| zF8pOaSvY#(br$~|dB>2F8*>D{Mmczkw32Ay;zJ^pvsdT}oz>7m3~sf;8ZMkVd>T?y3#xYZ~Kr#8ng4s$8){HviqCB zag9Ot>bq#HlQLNt)o>C0{ucx%$K=3;HzRo0lO}4ru@tCi4T8*!0S1^5>chGWGXLCG z)+hI;z#*xGGM2awmR|Cv`+K*uzIA|-tmPfO4l_9u@kyfIMVFDvvTT@VBF6>}0b(1n zk-aia3+_Fn!ZgeefxV8g*r)Oc+xuh=iLRE1#SU`3Ie02_eytefd16kdu|A!}c&QHUbw`= zL1adsKU0Fr+@7F4%~Qeal!?Gd=n9TD>A`q6XBecgRdBh&kmP;)Mp07yyTR-fyYkOv zQM-i(SL862Y<(}ujP;D6#ZAY!sYEr-(SttdF0IK7%pdFeKCwWzgpX6i8uyChT(VV#>F-(QDpng3TWy z;LbUk_|NoAPEXtk&o=+T72kyL=z5;{qIeEixf~Gq)GXt;gYod1bTM9SlgCNM57Who z>zK{154cih0a>|5gMD>Im)qWCNvsr`g#U$DAnoHAG>-oU3}Uyko}-a4KQNo`;aN&e zFP0&>Q(WZ4^0Omq*#LCun{51MJ*c%CuX^VPyC^4ZcrHfNBb=a3kNF znbAB*&)ZZ$kFGq>qJqLRI{+1mPemE2LFq;8EZ4!&iL;*`<%np146W(8=?h=a-R z&SB5!eDFiJm}ylP19=sHIYTuka?_Z14f$#ab3IN9Pkea+mgm<3a6KLN4EM8c_tpS~ ztV1BqVkvPM%Ec;4YH$t{O#XYlh+GeN4|av5(~Uinpfd3c@RCf1o%`dM!_ipCE_}eM zdL-km@ztVz{%$Zr!IBOh<9mV5wDCFXHDt{rPm-YhRZu)36%e^9aKg`$T-7{HOQ+S) zcCNaVWX%tul$;(js!;&`iya2=)(hZ`Ug7j}W=v`5BQ!0o2x-SA!jFYl=tf%__Bs@T zQ+H!votHg~oa;yiV%0d+7JaZQ`UdT>C<*55Jk3>na3-%y0_l@$_EIwA1;n&t5|`7V z&WOn=p+jTkqD2!|(Nd#tTD~$9K?N@v3Vn;K72a z^qh}Va9h2OXIG2mJhz0*}yQOi4gA$P84VjduBPof-vP{I@OKXO#u?;)Qm|cGV-= z$bqA3?pRY+3*zB~tl7Bt`c%S?=uqMJgfIlI1n4XP?E@lIajOMSf>`OO1b6|;%) zy9o0BSt1&~^OT719}RootM_)@+C7l-UnJ?dmMz?-PRbr$v!AsVYY6 z>uUI#9|79FrcXFAC!8)pJGqVj`C;$Dg~Hj4ALsul8!O!9eQkfcz+#9f-@TGFRpLeH zTwk*PTRav0eZ8oKpJ&ntYY_SG@Uw`^@nN-nPJ=q#(=8s$?oyK9&VZxh*61zoOTXEc z2bc0SjD1H#Ii+P;P-<-;+h5?#EW9#IrS)i$z4I?|Z;w_}jgEWic3lT9>|QCY`_%zf z9}5Om$Hx=B#aeXZhJ&1QS|;4ZR)TxU)%29AH1c)9Haf=WD(^*?MC(s5WN|_TThLm< z<=@}Ke0}^=)Z4!k#k+muG!`VnS0}b`iNmtsc#If2n;QjHRTUV|wbNQkT`XZ=Z5y4F zln3L&pKuT6ClQ;biF9@bVs%4|824ZSnDdkc*DeV0#F%`Ldg6{~yhIDoeshc+z;d0dh9-->aPzlz{AS#C~MFTG}gKBcIl zg2uo8MXOX@R&3Wcdr^5kNK0K{~(;>x|gDSmSPmed5g}+^q zLQ8WaU{3W>&i=`4D8mH73rh?{2@725&`dw*K4L)M+dYn`8y@C1{tP0U{#~LETrgzk z$_2xF9WmtD{ctXR^IC5A>0i0c z^xMv82C&XVJ~)Ydy!-=9ZcU<}P9EUG>(_Cmx!rW8S{Rh@d_pZ|vYClyJDI_0|4}`w z7LsE1ZRkmHHk6E1W#W5fxdDrv;8|QAyjoPv4n^kSHSreM>E1^4)af**$u33J^BP5g zz9o!CiYoOpu9T^qVhAo|^1n~T5XXH80!vFd5SS$mi}SuS&vyvW==*rslC6nk+N3$o zVJpr$oR3=al41KXZzv-(8@%pb%B&ybiz9qq$TkC3 ztvW|)KlzSp$fk1Swu?<@Yv!G$WI8ywu3{KM9Mb|vi!B)~%NbY1gX#CJa z4}2`4Zn&G^-^+Q&cDIl)b93Q1x$A7mf<4T|hD12%J|K*cP2iqcxCnIJ2WY7qRT!kq z28D;6Y2Ah?$ntssJa~EqWD`q;RqoT^)Niwi+O<5CI+6|NP$z{xVW)7KY8-s@c@gh{ z-NTt~KOh=IC$XBB3ti?YVnz$n>5YYE#3()#=J9i1COtrxr9He3n$j^F8+B2D3C)a2^pP9ivd}HEN`9F7)Yb2F5R%=?>XAkXIH0^+f@!XM8rvb^F5n2))8Q zS#g%$P_Yy~XHDo&nL5~1=nG8(73uhyA&@*74?p_3lO2i?95cm??pQyLdhp~w)Vn1f zrj_)uSq62~)Nvcwx4{q5*zgcto25l3;CrrQR^W0@1)lzF0jBP&K(kJ`(A=C@_@PD?`I@c;s;$GQX0bO^4~1Zo&H(juz8UG9 zM$k*(M2vTD#QV2yr>Uv|>cBU5d~$9av3j$NRt{7~1!H_(Lw_gt$3+Y!yFI3!Rp`?klqNN{@ow*|n%iCIZ&}<+&VgQXIPcnAO{xN#{k1=-t7OQ0K{|B<7`vdSz)z z>dSAmTncs}I|HV2C)5n7CGv=Q^tuPdcT5ua>DAzrMZdYj-W=N;zmTJ z#)K<~$^NqN~~XxADGFD1!PM->*5S6ZQP_srDx4bo9gqjN`a4+~XHSoZG^hNfw$jL8>dnc_y5)D%lX9uOu`ycWO2d@AC From 5f1728f8552909ec3507c65ef7157cf1c6210ee1 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Mon, 28 Feb 2022 14:11:17 +0800 Subject: [PATCH 18/39] rm server related, test=doc --- paddlespeech/cli/stats/infer.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py index 7e6df3d2..676f5f73 100644 --- a/paddlespeech/cli/stats/infer.py +++ b/paddlespeech/cli/stats/infer.py @@ -75,14 +75,6 @@ class StatsExecutor(): "Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API" ) self.show_support_models(pretrained_models) - - # show ASR static pretrained model - from paddlespeech.server.engine.asr.paddleinference.asr_engine import pretrained_models - logger.info( - "Here is the list of ASR static pretrained models released by PaddleSpeech that can be used by command line and python API" - ) - self.show_support_models(pretrained_models) - return True except BaseException: logger.error("Failed to get the list of ASR pretrained models.") @@ -132,14 +124,6 @@ class StatsExecutor(): "Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API" ) self.show_support_models(pretrained_models) - - # show TTS static pretrained model - from paddlespeech.server.engine.tts.paddleinference.tts_engine import pretrained_models - logger.info( - "Here is the list of TTS static pretrained models released by PaddleSpeech that can be used by command line and python API" - ) - self.show_support_models(pretrained_models) - return True except BaseException: logger.error("Failed to get the list of TTS pretrained models.") From 02056b3b02bfc7adfd8c5a30cca61dd59ede6ccb Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Mon, 28 Feb 2022 07:44:44 +0000 Subject: [PATCH 19/39] add conclusion --- docs/topic/ctc/ctc_loss_speed_compare.ipynb | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/topic/ctc/ctc_loss_speed_compare.ipynb b/docs/topic/ctc/ctc_loss_speed_compare.ipynb index 0682247f..eb7a030c 100644 --- a/docs/topic/ctc/ctc_loss_speed_compare.ipynb +++ b/docs/topic/ctc/ctc_loss_speed_compare.ipynb @@ -330,6 +330,19 @@ "print(\"loss\", pn_ctc_loss.item())\n", " " ] + }, + { + "cell_type": "markdown", + "id": "de525d38", + "metadata": {}, + "source": [ + "## 结论\n", + "在 CPU 环境下: torch 的 CTC loss 的计算速度是 paddle 的 9.8 倍 \n", + "在 GPU 环境下: torch 的 CTC loss 的计算速度是 paddle 的 6.87 倍\n", + "\n", + "## 其他结论\n", + "torch 的 ctc loss 在 CPU 和 GPU 下 都没有完全对齐。其中CPU的前向对齐精度大约为 1e-2。 GPU 的前向对齐精度大约为 1e-4 。" + ] } ], "metadata": { From 96abb33b5b71be351b30c402b6e74de16673d1a8 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Mon, 28 Feb 2022 15:55:09 +0800 Subject: [PATCH 20/39] add __call__, test=doc --- paddlespeech/cli/stats/infer.py | 55 +++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py index 676f5f73..76b2f47b 100644 --- a/paddlespeech/cli/stats/infer.py +++ b/paddlespeech/cli/stats/infer.py @@ -136,10 +136,59 @@ class StatsExecutor(): """ Python API to call an executor. """ - if task not in ['asr', 'cls', 'st', 'text', 'tts']: + self.task = task + if self.task not in self.task_choices: print( "Please input correct speech task, choices = ['asr', 'cls', 'st', 'text', 'tts']" ) - res = "" - return res + elif self.task == 'asr': + try: + from ..asr.infer import pretrained_models + print( + "Here is the list of ASR pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + except BaseException: + print("Failed to get the list of ASR pretrained models.") + + elif self.task == 'cls': + try: + from ..cls.infer import pretrained_models + print( + "Here is the list of CLS pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + except BaseException: + print("Failed to get the list of CLS pretrained models.") + + elif self.task == 'st': + try: + from ..st.infer import pretrained_models + print( + "Here is the list of ST pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + except BaseException: + print("Failed to get the list of ST pretrained models.") + + elif self.task == 'text': + try: + from ..text.infer import pretrained_models + print( + "Here is the list of TEXT pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + except BaseException: + print( + "Failed to get the list of TEXT pretrained models.") + + elif self.task == 'tts': + try: + from ..tts.infer import pretrained_models + print( + "Here is the list of TTS pretrained models released by PaddleSpeech that can be used by command line and python API" + ) + self.show_support_models(pretrained_models) + except BaseException: + print("Failed to get the list of TTS pretrained models.") \ No newline at end of file From 66a8beb27f7ee8b537b635513c8ac63606ae6e48 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 28 Feb 2022 09:47:06 +0000 Subject: [PATCH 21/39] update text frontend, test=tts --- README.md | 1 + README_cn.md | 1 + examples/aishell3/tts3/README.md | 4 +- examples/aishell3/tts3/conf/conformer.yaml | 110 ++++++++++++++++++ examples/other/g2p/README.md | 2 +- paddlespeech/t2s/frontend/tone_sandhi.py | 6 +- paddlespeech/t2s/frontend/zh_frontend.py | 22 ++++ .../frontend/zh_normalization/chronology.py | 10 +- .../t2s/frontend/zh_normalization/num.py | 7 +- .../zh_normalization/text_normlization.py | 9 ++ setup.py | 1 + 11 files changed, 165 insertions(+), 8 deletions(-) create mode 100644 examples/aishell3/tts3/conf/conformer.yaml diff --git a/README.md b/README.md index 46730797..e96d0710 100644 --- a/README.md +++ b/README.md @@ -561,6 +561,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P - Many thanks to [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) for developing Virtual Uploader(VUP)/Virtual YouTuber(VTuber) with PaddleSpeech TTS function. - Many thanks to [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) for contributing Punctuation Restoration model. - Many thanks to [kslz](https://github.com/745165806) for supplementary Chinese documents. +- Many thanks to [awmmmm](https://github.com/awmmmm) for contributing fastspeech2 aishell3 conformer pretrained model. Besides, PaddleSpeech depends on a lot of open source repositories. See [references](./docs/source/reference.md) for more information. diff --git a/README_cn.md b/README_cn.md index 9782240a..32d5c518 100644 --- a/README_cn.md +++ b/README_cn.md @@ -556,6 +556,7 @@ year={2021} - 非常感谢 [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) 采用 PaddleSpeech 语音合成功能实现 Virtual Uploader(VUP)/Virtual YouTuber(VTuber) 虚拟主播。 - 非常感谢 [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) 贡献标点重建相关模型。 - 非常感谢 [kslz](https://github.com/kslz) 补充中文文档。 +- 非常感谢 [awmmmm](https://github.com/awmmmm) 提供 fastspeech2 aishell3 conformer 预训练模型。 此外,PaddleSpeech 依赖于许多开源存储库。有关更多信息,请参阅 [references](./docs/source/reference.md)。 diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md index 281ad836..d02ad1b6 100644 --- a/examples/aishell3/tts3/README.md +++ b/examples/aishell3/tts3/README.md @@ -225,7 +225,9 @@ optional arguments: 9. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. ## Pretrained Model -Pretrained FastSpeech2 model with no silence in the edge of audios. [fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip) +Pretrained FastSpeech2 model with no silence in the edge of audios: +- [fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip) +- [fastspeech2_conformer_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_aishell3_ckpt_0.2.0.zip) (Thanks for [@awmmmm](https://github.com/awmmmm)'s contribution) FastSpeech2 checkpoint contains files listed below. diff --git a/examples/aishell3/tts3/conf/conformer.yaml b/examples/aishell3/tts3/conf/conformer.yaml new file mode 100644 index 00000000..ea73593d --- /dev/null +++ b/examples/aishell3/tts3/conf/conformer.yaml @@ -0,0 +1,110 @@ +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 24000 # sr +n_fft: 2048 # FFT size (samples). +n_shift: 300 # Hop size (samples). 12.5ms +win_length: 1200 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + +# Only used for feats_type != raw + +fmin: 80 # Minimum frequency of Mel basis. +fmax: 7600 # Maximum frequency of Mel basis. +n_mels: 80 # The number of mel basis. + +# Only used for the model using pitch features (e.g. FastSpeech2) +f0min: 80 # Maximum f0 for pitch extraction. +f0max: 400 # Minimum f0 for pitch extraction. + + +########################################################### +# DATA SETTING # +########################################################### +batch_size: 32 +num_workers: 4 + + +########################################################### +# MODEL SETTING # +########################################################### +model: + adim: 384 # attention dimension + aheads: 2 # number of attention heads + elayers: 4 # number of encoder layers + eunits: 1536 # number of encoder ff units + dlayers: 4 # number of decoder layers + dunits: 1536 # number of decoder ff units + positionwise_layer_type: conv1d # type of position-wise layer + positionwise_conv_kernel_size: 3 # kernel size of position wise conv layer + duration_predictor_layers: 2 # number of layers of duration predictor + duration_predictor_chans: 256 # number of channels of duration predictor + duration_predictor_kernel_size: 3 # filter size of duration predictor + postnet_layers: 5 # number of layers of postnset + postnet_filts: 5 # filter size of conv layers in postnet + postnet_chans: 256 # number of channels of conv layers in postnet + encoder_normalize_before: True # whether to perform layer normalization before the input + decoder_normalize_before: True # whether to perform layer normalization before the input + reduction_factor: 1 # reduction factor + encoder_type: conformer # encoder type + decoder_type: conformer # decoder type + conformer_pos_enc_layer_type: rel_pos # conformer positional encoding type + conformer_self_attn_layer_type: rel_selfattn # conformer self-attention type + conformer_activation_type: swish # conformer activation type + use_macaron_style_in_conformer: true # whether to use macaron style in conformer + use_cnn_in_conformer: true # whether to use CNN in conformer + conformer_enc_kernel_size: 7 # kernel size in CNN module of conformer-based encoder + conformer_dec_kernel_size: 31 # kernel size in CNN module of conformer-based decoder + init_type: xavier_uniform # initialization type + transformer_enc_dropout_rate: 0.2 # dropout rate for transformer encoder layer + transformer_enc_positional_dropout_rate: 0.2 # dropout rate for transformer encoder positional encoding + transformer_enc_attn_dropout_rate: 0.2 # dropout rate for transformer encoder attention layer + transformer_dec_dropout_rate: 0.2 # dropout rate for transformer decoder layer + transformer_dec_positional_dropout_rate: 0.2 # dropout rate for transformer decoder positional encoding + transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer + pitch_predictor_layers: 5 # number of conv layers in pitch predictor + pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor + pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor + pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor + pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch + pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch + stop_gradient_from_pitch_predictor: true # whether to stop the gradient from pitch predictor to encoder + energy_predictor_layers: 2 # number of conv layers in energy predictor + energy_predictor_chans: 256 # number of channels of conv layers in energy predictor + energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor + energy_predictor_dropout: 0.5 # dropout rate in energy predictor + energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy + energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy + stop_gradient_from_energy_predictor: false # whether to stop the gradient from energy predictor to encoder + spk_embed_dim: 256 # speaker embedding dimension + spk_embed_integration_type: concat # speaker embedding integration type + + +########################################################### +# UPDATER SETTING # +########################################################### +updater: + use_masking: True # whether to apply masking for padded part in loss calculation + + + +########################################################### +# OPTIMIZER SETTING # +########################################################### +optimizer: + optim: adam # optimizer type + learning_rate: 0.001 # learning rate + +########################################################### +# TRAINING SETTING # +########################################################### +max_epoch: 1000 +num_snapshots: 5 + + +########################################################### +# OTHER SETTING # +########################################################### +seed: 10086 diff --git a/examples/other/g2p/README.md b/examples/other/g2p/README.md index c0f55bd4..141f7f74 100644 --- a/examples/other/g2p/README.md +++ b/examples/other/g2p/README.md @@ -10,7 +10,7 @@ Run the command below to get the results of the test. ```bash ./run.sh ``` -The `avg WER` of g2p is: 0.027124048652822204 +The `avg WER` of g2p is: 0.026014352515701198 ```text ,--------------------------------------------------------------------. | | # Snt # Wrd | Corr Sub Del Ins Err S.Err | diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py index 5264e068..07f7fa2b 100644 --- a/paddlespeech/t2s/frontend/tone_sandhi.py +++ b/paddlespeech/t2s/frontend/tone_sandhi.py @@ -63,7 +63,7 @@ class ToneSandhi(): '扫把', '惦记' } self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子" + "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" } self.punc = ":,;。?!“”‘’':,;.?!" @@ -77,7 +77,9 @@ class ToneSandhi(): # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in {"n", "v", "a"}: + if j - 1 >= 0 and item == word[j - 1] and pos[0] in { + "n", "v", "a" + } and word not in self.must_not_neural_tone_words: finals[j] = finals[j][:-1] + "5" ge_idx = word.find("个") if len(word) >= 1 and word[-1] in "吧呢哈啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": diff --git a/paddlespeech/t2s/frontend/zh_frontend.py b/paddlespeech/t2s/frontend/zh_frontend.py index a905c412..bb8ed5b4 100644 --- a/paddlespeech/t2s/frontend/zh_frontend.py +++ b/paddlespeech/t2s/frontend/zh_frontend.py @@ -20,7 +20,10 @@ import numpy as np import paddle from g2pM import G2pM from pypinyin import lazy_pinyin +from pypinyin import load_phrases_dict +from pypinyin import load_single_dict from pypinyin import Style +from pypinyin_dict.phrase_pinyin_data import large_pinyin from paddlespeech.t2s.frontend.generate_lexicon import generate_lexicon from paddlespeech.t2s.frontend.tone_sandhi import ToneSandhi @@ -41,6 +44,8 @@ class Frontend(): self.g2pM_model = G2pM() self.pinyin2phone = generate_lexicon( with_tone=True, with_erhua=False) + else: + self.__init__pypinyin() self.must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"} self.not_erhua = { "虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿", @@ -62,6 +67,23 @@ class Frontend(): for tone, id in tone_id: self.vocab_tones[tone] = int(id) + def __init__pypinyin(self): + large_pinyin.load() + + load_phrases_dict({u'开户行': [[u'ka1i'], [u'hu4'], [u'hang2']]}) + load_phrases_dict({u'发卡行': [[u'fa4'], [u'ka3'], [u'hang2']]}) + load_phrases_dict({u'放款行': [[u'fa4ng'], [u'kua3n'], [u'hang2']]}) + load_phrases_dict({u'茧行': [[u'jia3n'], [u'hang2']]}) + load_phrases_dict({u'行号': [[u'hang2'], [u'ha4o']]}) + load_phrases_dict({u'各地': [[u'ge4'], [u'di4']]}) + load_phrases_dict({u'借还款': [[u'jie4'], [u'hua2n'], [u'kua3n']]}) + load_phrases_dict({u'时间为': [[u'shi2'], [u'jia1n'], [u'we2i']]}) + load_phrases_dict({u'为准': [[u'we2i'], [u'zhu3n']]}) + load_phrases_dict({u'色差': [[u'se4'], [u'cha1']]}) + + # 调整字的拼音顺序 + load_single_dict({ord(u'地'): u'de,di4'}) + def _get_initials_finals(self, word: str) -> List[List[str]]: initials = [] finals = [] diff --git a/paddlespeech/t2s/frontend/zh_normalization/chronology.py b/paddlespeech/t2s/frontend/zh_normalization/chronology.py index bfa7d2b1..ea518913 100644 --- a/paddlespeech/t2s/frontend/zh_normalization/chronology.py +++ b/paddlespeech/t2s/frontend/zh_normalization/chronology.py @@ -63,7 +63,10 @@ def replace_time(match) -> str: result = f"{num2str(hour)}点" if minute.lstrip('0'): - result += f"{_time_num2str(minute)}分" + if int(minute) == 30: + result += f"半" + else: + result += f"{_time_num2str(minute)}分" if second and second.lstrip('0'): result += f"{_time_num2str(second)}秒" @@ -71,7 +74,10 @@ def replace_time(match) -> str: result += "至" result += f"{num2str(hour_2)}点" if minute_2.lstrip('0'): - result += f"{_time_num2str(minute_2)}分" + if int(minute) == 30: + result += f"半" + else: + result += f"{_time_num2str(minute_2)}分" if second_2 and second_2.lstrip('0'): result += f"{_time_num2str(second_2)}秒" diff --git a/paddlespeech/t2s/frontend/zh_normalization/num.py b/paddlespeech/t2s/frontend/zh_normalization/num.py index 27a2f846..a83b42a4 100644 --- a/paddlespeech/t2s/frontend/zh_normalization/num.py +++ b/paddlespeech/t2s/frontend/zh_normalization/num.py @@ -28,7 +28,7 @@ UNITS = OrderedDict({ 8: '亿', }) -COM_QUANTIFIERS = '(朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' +COM_QUANTIFIERS = '(所|朵|匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|毫|厘|(公)分|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|小时|旬|纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块|元|(亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|美|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' # 分数表达式 RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') @@ -110,7 +110,7 @@ def replace_default_num(match): # 纯小数 RE_DECIMAL_NUM = re.compile(r'(-?)((\d+)(\.\d+))' r'|(\.(\d+))') # 正整数 + 量词 -RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几])?" + COM_QUANTIFIERS) +RE_POSITIVE_QUANTIFIERS = re.compile(r"(\d+)([多余几\+])?" + COM_QUANTIFIERS) RE_NUMBER = re.compile(r'(-?)((\d+)(\.\d+)?)' r'|(\.(\d+))') @@ -123,6 +123,8 @@ def replace_positive_quantifier(match) -> str: """ number = match.group(1) match_2 = match.group(2) + if match_2 == "+": + match_2 = "多" match_2: str = match_2 if match_2 else "" quantifiers: str = match.group(3) number: str = num2str(number) @@ -151,6 +153,7 @@ def replace_number(match) -> str: # 范围表达式 # match.group(1) and match.group(8) are copy from RE_NUMBER + RE_RANGE = re.compile( r'((-?)((\d+)(\.\d+)?)|(\.(\d+)))[-~]((-?)((\d+)(\.\d+)?)|(\.(\d+)))') diff --git a/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py b/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py index f9d1b8cb..bc663c70 100644 --- a/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py +++ b/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py @@ -63,11 +63,19 @@ class TextNormalizer(): # Only for pure Chinese here if lang == "zh": text = text.replace(" ", "") + # 过滤掉特殊字符 + text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text) text = self.SENTENCE_SPLITOR.sub(r'\1\n', text) text = text.strip() sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] return sentences + def _post_replace(self, sentence: str) -> str: + sentence = sentence.replace('/', '每') + sentence = sentence.replace('~', '至') + + return sentence + def normalize_sentence(self, sentence: str) -> str: # basic character conversions sentence = tranditional_to_simplified(sentence) @@ -97,6 +105,7 @@ class TextNormalizer(): sentence) sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence) sentence = RE_NUMBER.sub(replace_number, sentence) + sentence = self._post_replace(sentence) return sentence diff --git a/setup.py b/setup.py index 3f3632b3..c1c29437 100644 --- a/setup.py +++ b/setup.py @@ -48,6 +48,7 @@ base = [ "paddlespeech_feat", "praatio==5.0.0", "pypinyin", + "pypinyin-dict", "python-dateutil", "pyworld", "resampy==0.2.2", From 54341c88a6e5d7595d20bfbb3a21cd84ecdaebfc Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 28 Feb 2022 10:39:19 +0000 Subject: [PATCH 22/39] cli batch and shell pipe, test=doc --- README.md | 15 +++++++++++++-- README_cn.md | 11 +++++++++++ demos/speech_recognition/.gitignore | 1 + demos/speech_recognition/README.md | 2 ++ demos/speech_recognition/README_cn.md | 2 ++ demos/speech_recognition/run.sh | 6 ++++++ demos/text_to_speech/README.md | 5 ++++- demos/text_to_speech/README_cn.md | 4 ++++ demos/text_to_speech/run.sh | 4 ++++ 9 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 demos/speech_recognition/.gitignore diff --git a/README.md b/README.md index 46730797..a142cb5e 100644 --- a/README.md +++ b/README.md @@ -196,16 +196,18 @@ Developers can have a try of our models with [PaddleSpeech Command Line](./paddl ```shell paddlespeech cls --input input.wav ``` + **Automatic Speech Recognition** ```shell paddlespeech asr --lang zh --input input_16k.wav ``` -**Speech Translation** (English to Chinese) +**Speech Translation** (English to Chinese) (not support for Mac and Windows now) ```shell paddlespeech st --input input_16k.wav ``` + **Text-to-Speech** ```shell paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --output output.wav @@ -218,7 +220,16 @@ paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --ou paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 ``` - +**Batch Process** +``` +echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts +``` + +**Shell Pipeline** +ASR + Punc: +``` +paddlespeech asr --input ./zh.wav | paddlespeech text --task punc +``` For more command lines, please see: [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos) diff --git a/README_cn.md b/README_cn.md index 9782240a..366d9a02 100644 --- a/README_cn.md +++ b/README_cn.md @@ -216,6 +216,17 @@ paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架! paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 ``` +**批处理** +``` +echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts +``` + +**Shell管道** +ASR + Punc: +``` +paddlespeech asr --input ./zh.wav | paddlespeech text --task punc +``` + 更多命令行命令请参考 [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos) > Note: 如果需要训练或者微调,请查看[语音识别](./docs/source/asr/quick_start.md), [语音合成](./docs/source/tts/quick_start.md)。 diff --git a/demos/speech_recognition/.gitignore b/demos/speech_recognition/.gitignore new file mode 100644 index 00000000..d8dd7532 --- /dev/null +++ b/demos/speech_recognition/.gitignore @@ -0,0 +1 @@ +*.wav diff --git a/demos/speech_recognition/README.md b/demos/speech_recognition/README.md index c49afa35..5d964fce 100644 --- a/demos/speech_recognition/README.md +++ b/demos/speech_recognition/README.md @@ -27,6 +27,8 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee paddlespeech asr --input ./zh.wav # English paddlespeech asr --model transformer_librispeech --lang en --input ./en.wav + # Chinese ASR + Punctuation Restoration + paddlespeech asr --input ./zh.wav | paddlespeech text --task punc ``` (It doesn't matter if package `paddlespeech-ctcdecoders` is not found, this package is optional.) diff --git a/demos/speech_recognition/README_cn.md b/demos/speech_recognition/README_cn.md index c2e38c91..ba1f1d65 100644 --- a/demos/speech_recognition/README_cn.md +++ b/demos/speech_recognition/README_cn.md @@ -25,6 +25,8 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee paddlespeech asr --input ./zh.wav # 英文 paddlespeech asr --model transformer_librispeech --lang en --input ./en.wav + # 中文 + 标点恢复 + paddlespeech asr --input ./zh.wav | paddlespeech text --task punc ``` (如果显示 `paddlespeech-ctcdecoders` 这个 python 包没有找到的 Error,没有关系,这个包是非必须的。) diff --git a/demos/speech_recognition/run.sh b/demos/speech_recognition/run.sh index 5efc8b81..06466928 100755 --- a/demos/speech_recognition/run.sh +++ b/demos/speech_recognition/run.sh @@ -1,4 +1,10 @@ #!/bin/bash wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav + +# asr paddlespeech asr --input ./zh.wav + + +# asr + punc +paddlespeech asr --input ./zh.wav | paddlespeech text --task punc \ No newline at end of file diff --git a/demos/text_to_speech/README.md b/demos/text_to_speech/README.md index 9d3c4ac5..2df72a82 100644 --- a/demos/text_to_speech/README.md +++ b/demos/text_to_speech/README.md @@ -17,11 +17,14 @@ The input of this demo should be a text of the specific language that can be pas ### 3. Usage - Command Line (Recommended) - Chinese - The default acoustic model is `Fastspeech2`, and the default vocoder is `Parallel WaveGAN`. ```bash paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" ``` + - Batch Process + ```bash + echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts + ``` - Chinese, use `SpeedySpeech` as the acoustic model ```bash paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" diff --git a/demos/text_to_speech/README_cn.md b/demos/text_to_speech/README_cn.md index f075efda..7e02b962 100644 --- a/demos/text_to_speech/README_cn.md +++ b/demos/text_to_speech/README_cn.md @@ -24,6 +24,10 @@ ```bash paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" ``` + - 批处理 + ```bash + echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts + ``` - 中文,使用 `SpeedySpeech` 作为声学模型 ```bash paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" diff --git a/demos/text_to_speech/run.sh b/demos/text_to_speech/run.sh index c2487aee..b1340241 100755 --- a/demos/text_to_speech/run.sh +++ b/demos/text_to_speech/run.sh @@ -1,3 +1,7 @@ #!/bin/bash +# single process paddlespeech tts --input 今天的天气不错啊 + +# Batch process +echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts \ No newline at end of file From 75098698d8eae48d1d0343cd683c7b315ea4a02d Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 28 Feb 2022 10:45:39 +0000 Subject: [PATCH 23/39] format,test=doc --- paddlespeech/s2t/io/sampler.py | 2 +- paddlespeech/s2t/models/u2_st/u2_st.py | 4 +-- .../t2s/modules/transformer/repeat.py | 2 +- .../unit/asr/deepspeech2_online_model_test.py | 36 ++++++++----------- 4 files changed, 17 insertions(+), 27 deletions(-) diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py index 89752bb9..ac55af12 100644 --- a/paddlespeech/s2t/io/sampler.py +++ b/paddlespeech/s2t/io/sampler.py @@ -51,7 +51,7 @@ def _batch_shuffle(indices, batch_size, epoch, clipped=False): """ rng = np.random.RandomState(epoch) shift_len = rng.randint(0, batch_size - 1) - batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size)) + batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size)) rng.shuffle(batch_indices) batch_indices = [item for batch in batch_indices for item in batch] assert clipped is False diff --git a/paddlespeech/s2t/models/u2_st/u2_st.py b/paddlespeech/s2t/models/u2_st/u2_st.py index f7b05714..999723e5 100644 --- a/paddlespeech/s2t/models/u2_st/u2_st.py +++ b/paddlespeech/s2t/models/u2_st/u2_st.py @@ -33,8 +33,6 @@ from paddlespeech.s2t.modules.decoder import TransformerDecoder from paddlespeech.s2t.modules.encoder import ConformerEncoder from paddlespeech.s2t.modules.encoder import TransformerEncoder from paddlespeech.s2t.modules.loss import LabelSmoothingLoss -from paddlespeech.s2t.modules.mask import mask_finished_preds -from paddlespeech.s2t.modules.mask import mask_finished_scores from paddlespeech.s2t.modules.mask import subsequent_mask from paddlespeech.s2t.utils import checkpoint from paddlespeech.s2t.utils import layer_tools @@ -291,7 +289,7 @@ class U2STBaseModel(nn.Layer): device = speech.place # Let's assume B = batch_size and N = beam_size - # 1. Encoder and init hypothesis + # 1. Encoder and init hypothesis encoder_out, encoder_mask = self._forward_encoder( speech, speech_lengths, decoding_chunk_size, num_decoding_left_chunks, diff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py index 2073a78b..1e946adf 100644 --- a/paddlespeech/t2s/modules/transformer/repeat.py +++ b/paddlespeech/t2s/modules/transformer/repeat.py @@ -36,4 +36,4 @@ def repeat(N, fn): Returns: MultiSequential: Repeated model instance. """ - return MultiSequential(*[fn(n) for n in range(N)]) + return MultiSequential(* [fn(n) for n in range(N)]) diff --git a/tests/unit/asr/deepspeech2_online_model_test.py b/tests/unit/asr/deepspeech2_online_model_test.py index d26e5b15..f23c4926 100644 --- a/tests/unit/asr/deepspeech2_online_model_test.py +++ b/tests/unit/asr/deepspeech2_online_model_test.py @@ -11,16 +11,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import os +import pickle import unittest import numpy as np import paddle -import pickle -import os from paddle import inference -from paddlespeech.s2t.models.ds2_online import DeepSpeech2ModelOnline from paddlespeech.s2t.models.ds2_online import DeepSpeech2InferModelOnline +from paddlespeech.s2t.models.ds2_online import DeepSpeech2ModelOnline + class TestDeepSpeech2ModelOnline(unittest.TestCase): def setUp(self): @@ -185,15 +186,12 @@ class TestDeepSpeech2ModelOnline(unittest.TestCase): paddle.allclose(final_state_c_box, final_state_c_box_chk), True) - - class TestDeepSpeech2StaticModelOnline(unittest.TestCase): - def setUp(self): export_prefix = "exp/deepspeech2_online/checkpoints/test_export" if not os.path.exists(os.path.dirname(export_prefix)): os.makedirs(os.path.dirname(export_prefix), mode=0o755) - infer_model = DeepSpeech2InferModelOnline( + infer_model = DeepSpeech2InferModelOnline( feat_size=161, dict_size=4233, num_conv_layers=2, @@ -207,27 +205,25 @@ class TestDeepSpeech2StaticModelOnline(unittest.TestCase): with open("test_data/static_ds2online_inputs.pickle", "rb") as f: self.data_dict = pickle.load(f) - + self.setup_model(export_prefix) - def setup_model(self, export_prefix): - deepspeech_config = inference.Config( - export_prefix + ".pdmodel", - export_prefix + ".pdiparams") - if ('CUDA_VISIBLE_DEVICES' in os.environ.keys() and os.environ['CUDA_VISIBLE_DEVICES'].strip() != ''): + deepspeech_config = inference.Config(export_prefix + ".pdmodel", + export_prefix + ".pdiparams") + if ('CUDA_VISIBLE_DEVICES' in os.environ.keys() and + os.environ['CUDA_VISIBLE_DEVICES'].strip() != ''): deepspeech_config.enable_use_gpu(100, 0) deepspeech_config.enable_memory_optim() deepspeech_predictor = inference.create_predictor(deepspeech_config) self.predictor = deepspeech_predictor - + def test_unit(self): input_names = self.predictor.get_input_names() audio_handle = self.predictor.get_input_handle(input_names[0]) audio_len_handle = self.predictor.get_input_handle(input_names[1]) h_box_handle = self.predictor.get_input_handle(input_names[2]) c_box_handle = self.predictor.get_input_handle(input_names[3]) - x_chunk = self.data_dict["audio_chunk"] x_chunk_lens = self.data_dict["audio_chunk_lens"] @@ -246,13 +242,9 @@ class TestDeepSpeech2StaticModelOnline(unittest.TestCase): c_box_handle.reshape(chunk_state_c_box.shape) c_box_handle.copy_from_cpu(chunk_state_c_box) - - output_names = self.predictor.get_output_names() - output_handle = self.predictor.get_output_handle( - output_names[0]) - output_lens_handle = self.predictor.get_output_handle( - output_names[1]) + output_handle = self.predictor.get_output_handle(output_names[0]) + output_lens_handle = self.predictor.get_output_handle(output_names[1]) output_state_h_handle = self.predictor.get_output_handle( output_names[2]) output_state_c_handle = self.predictor.get_output_handle( @@ -264,7 +256,7 @@ class TestDeepSpeech2StaticModelOnline(unittest.TestCase): chunk_state_h_box = output_state_h_handle.copy_to_cpu() chunk_state_c_box = output_state_c_handle.copy_to_cpu() return True - + if __name__ == '__main__': unittest.main() From 335638ba1877a72d94b39f964e999acd6e18f26a Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 28 Feb 2022 11:01:50 +0000 Subject: [PATCH 24/39] update gitignore, test=doct --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index cc8fff87..778824f5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ *.pyc .vscode *log +*.wav *.pdmodel *.pdiparams* *.zip @@ -30,5 +31,8 @@ tools/OpenBLAS/ tools/Miniconda3-latest-Linux-x86_64.sh tools/activate_python.sh tools/miniconda.sh +tools/CRF++-0.58/ + +speechx/fc_patch/ *output/ From 395c923dee8e2df8a9440242d9f06a4e5adae9f9 Mon Sep 17 00:00:00 2001 From: lym0302 Date: Mon, 28 Feb 2022 20:10:08 +0800 Subject: [PATCH 25/39] modified text sr to lang, test=doc --- paddlespeech/cli/stats/infer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py index 76b2f47b..d60a6691 100644 --- a/paddlespeech/cli/stats/infer.py +++ b/paddlespeech/cli/stats/infer.py @@ -26,7 +26,7 @@ model_name_format = { 'asr': 'Model-Language-Sample Rate', 'cls': 'Model-Sample Rate', 'st': 'Model-Source language-Target language', - 'text': 'Model-Task-Sample Rate', + 'text': 'Model-Task-Language', 'tts': 'Model-Language' } @@ -180,8 +180,7 @@ class StatsExecutor(): ) self.show_support_models(pretrained_models) except BaseException: - print( - "Failed to get the list of TEXT pretrained models.") + print("Failed to get the list of TEXT pretrained models.") elif self.task == 'tts': try: @@ -191,4 +190,4 @@ class StatsExecutor(): ) self.show_support_models(pretrained_models) except BaseException: - print("Failed to get the list of TTS pretrained models.") \ No newline at end of file + print("Failed to get the list of TTS pretrained models.") From c64282e7a7741457a5ff35e4edccda8914d7243f Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Tue, 1 Mar 2022 10:13:13 +0800 Subject: [PATCH 26/39] fix shell pipe example --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d6d97d8b..aa17c71e 100644 --- a/README.md +++ b/README.md @@ -225,8 +225,8 @@ paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --ou echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts ``` -**Shell Pipeline** -ASR + Punc: +**Shell Pipeline** +- ASR + Punctuation Restoration ``` paddlespeech asr --input ./zh.wav | paddlespeech text --task punc ``` From 72c0cda30cdb184c091a65a518969be98ed8e10f Mon Sep 17 00:00:00 2001 From: lym0302 Date: Tue, 1 Mar 2022 10:18:04 +0800 Subject: [PATCH 27/39] add paddlespeech_server stats, test=doc --- paddlespeech/cli/stats/infer.py | 2 +- paddlespeech/server/bin/__init__.py | 1 + .../server/bin/paddlespeech_server.py | 140 +++++++++++++++++- 3 files changed, 141 insertions(+), 2 deletions(-) diff --git a/paddlespeech/cli/stats/infer.py b/paddlespeech/cli/stats/infer.py index d60a6691..4ef50449 100644 --- a/paddlespeech/cli/stats/infer.py +++ b/paddlespeech/cli/stats/infer.py @@ -68,7 +68,7 @@ class StatsExecutor(): ) return False - if self.task == 'asr': + elif self.task == 'asr': try: from ..asr.infer import pretrained_models logger.info( diff --git a/paddlespeech/server/bin/__init__.py b/paddlespeech/server/bin/__init__.py index bd75747f..025aab09 100644 --- a/paddlespeech/server/bin/__init__.py +++ b/paddlespeech/server/bin/__init__.py @@ -14,3 +14,4 @@ from .paddlespeech_client import ASRClientExecutor from .paddlespeech_client import TTSClientExecutor from .paddlespeech_server import ServerExecutor +from .paddlespeech_server import ServerStatsExecutor diff --git a/paddlespeech/server/bin/paddlespeech_server.py b/paddlespeech/server/bin/paddlespeech_server.py index aff77d54..21fc5c65 100644 --- a/paddlespeech/server/bin/paddlespeech_server.py +++ b/paddlespeech/server/bin/paddlespeech_server.py @@ -16,15 +16,17 @@ from typing import List import uvicorn from fastapi import FastAPI +from prettytable import PrettyTable from ..executor import BaseExecutor from ..util import cli_server_register from ..util import stats_wrapper +from paddlespeech.cli.log import logger from paddlespeech.server.engine.engine_pool import init_engine_pool from paddlespeech.server.restful.api import setup_router from paddlespeech.server.utils.config import get_config -__all__ = ['ServerExecutor'] +__all__ = ['ServerExecutor', 'ServerStatsExecutor'] app = FastAPI( title="PaddleSpeech Serving API", description="Api", version="0.0.1") @@ -86,3 +88,139 @@ class ServerExecutor(BaseExecutor): config = get_config(config_file) if self.init(config): uvicorn.run(app, host=config.host, port=config.port, debug=True) + + +@cli_server_register( + name='paddlespeech_server.stats', + description='Get the models supported by each speech task in the service.') +class ServerStatsExecutor(): + def __init__(self): + super(ServerStatsExecutor, self).__init__() + + self.parser = argparse.ArgumentParser( + prog='paddlespeech_server.stats', add_help=True) + self.parser.add_argument( + '--task', + type=str, + default=None, + choices=['asr', 'tts'], + help='Choose speech task.', + required=True) + self.task_choices = ['asr', 'tts'] + self.model_name_format = { + 'asr': 'Model-Language-Sample Rate', + 'tts': 'Model-Language' + } + + def show_support_models(self, pretrained_models: dict): + fields = self.model_name_format[self.task].split("-") + table = PrettyTable(fields) + for key in pretrained_models: + table.add_row(key.split("-")) + print(table) + + def execute(self, argv: List[str]) -> bool: + """ + Command line entry. + """ + parser_args = self.parser.parse_args(argv) + self.task = parser_args.task + if self.task not in self.task_choices: + logger.error( + "Please input correct speech task, choices = ['asr', 'tts']") + return False + + elif self.task == 'asr': + try: + from paddlespeech.cli.asr.infer import pretrained_models + logger.info( + "Here is the table of ASR pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + # show ASR static pretrained model + from paddlespeech.server.engine.asr.paddleinference.asr_engine import pretrained_models + logger.info( + "Here is the table of ASR static pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + return True + except BaseException: + logger.error( + "Failed to get the table of ASR pretrained models supported in the service." + ) + return False + + elif self.task == 'tts': + try: + from paddlespeech.cli.tts.infer import pretrained_models + logger.info( + "Here is the table of TTS pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + # show TTS static pretrained model + from paddlespeech.server.engine.tts.paddleinference.tts_engine import pretrained_models + logger.info( + "Here is the table of TTS static pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + return True + except BaseException: + logger.error( + "Failed to get the table of TTS pretrained models supported in the service." + ) + return False + + @stats_wrapper + def __call__( + self, + task: str=None, ): + """ + Python API to call an executor. + """ + self.task = task + if self.task not in self.task_choices: + print("Please input correct speech task, choices = ['asr', 'tts']") + + elif self.task == 'asr': + try: + from paddlespeech.cli.asr.infer import pretrained_models + print( + "Here is the table of ASR pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + # show ASR static pretrained model + from paddlespeech.server.engine.asr.paddleinference.asr_engine import pretrained_models + print( + "Here is the table of ASR static pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + except BaseException: + print( + "Failed to get the table of ASR pretrained models supported in the service." + ) + + elif self.task == 'tts': + try: + from paddlespeech.cli.tts.infer import pretrained_models + print( + "Here is the table of TTS pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + # show TTS static pretrained model + from paddlespeech.server.engine.tts.paddleinference.tts_engine import pretrained_models + print( + "Here is the table of TTS static pretrained models supported in the service." + ) + self.show_support_models(pretrained_models) + + except BaseException: + print( + "Failed to get the table of TTS pretrained models supported in the service." + ) From cb07bd2a94c8a39331eec5ae649bfe01331244aa Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 1 Mar 2022 03:41:24 +0000 Subject: [PATCH 28/39] add rtf for synthesize, add more vocoder for synthesize_e2e.sh, test=tts --- examples/csmsc/tts0/local/synthesize.sh | 106 +++++++++++++++--- examples/csmsc/tts0/local/synthesize_e2e.sh | 16 +-- examples/csmsc/tts2/local/synthesize.sh | 113 +++++++++++++++++--- examples/csmsc/tts2/local/synthesize_e2e.sh | 12 +-- examples/csmsc/tts3/local/synthesize.sh | 106 +++++++++++++++--- examples/csmsc/tts3/local/synthesize_e2e.sh | 12 +-- paddlespeech/t2s/exps/synthesize.py | 94 ++++++++++------ paddlespeech/t2s/exps/synthesize_e2e.py | 106 +++++++++--------- paddlespeech/t2s/exps/wavernn/synthesize.py | 2 +- paddlespeech/t2s/models/melgan/melgan.py | 2 +- paddlespeech/t2s/models/wavernn/wavernn.py | 14 ++- 11 files changed, 434 insertions(+), 149 deletions(-) diff --git a/examples/csmsc/tts0/local/synthesize.sh b/examples/csmsc/tts0/local/synthesize.sh index 4be06dd8..bfb4844b 100755 --- a/examples/csmsc/tts0/local/synthesize.sh +++ b/examples/csmsc/tts0/local/synthesize.sh @@ -3,18 +3,96 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 +stage=0 +stop_stage=0 -FLAGS_allocator_strategy=naive_best_fit \ -FLAGS_fraction_of_gpu_memory_to_use=0.01 \ -python3 ${BIN_DIR}/../synthesize.py \ - --am=tacotron2_csmsc \ - --am_config=${config_path} \ - --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --am_stat=dump/train/speech_stats.npy \ - --voc=pwgan_csmsc \ - --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ - --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ - --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=tacotron2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_csmsc \ + --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ + --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ + --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# for more GAN Vocoders +# multi band melgan +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=tacotron2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=mb_melgan_csmsc \ + --voc_config=mb_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=mb_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1000000.pdz\ + --voc_stat=mb_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=tacotron2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=style_melgan_csmsc \ + --voc_config=style_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=style_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1500000.pdz \ + --voc_stat=style_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# hifigan +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + echo "in hifigan syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=tacotron2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_csmsc \ + --voc_config=hifigan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=hifigan_csmsc_ckpt_0.1.1/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# wavernn +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + echo "in wavernn syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=tacotron2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=wavernn_csmsc \ + --voc_config=wavernn_csmsc_ckpt_0.2.0/default.yaml \ + --voc_ckpt=wavernn_csmsc_ckpt_0.2.0/snapshot_iter_400000.pdz \ + --voc_stat=wavernn_csmsc_ckpt_0.2.0/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi diff --git a/examples/csmsc/tts0/local/synthesize_e2e.sh b/examples/csmsc/tts0/local/synthesize_e2e.sh index 79bb9f83..4c73a18d 100755 --- a/examples/csmsc/tts0/local/synthesize_e2e.sh +++ b/examples/csmsc/tts0/local/synthesize_e2e.sh @@ -39,14 +39,14 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ --am_stat=dump/train/speech_stats.npy \ --voc=mb_melgan_csmsc \ - --voc_config=mb_melgan_baker_finetune_ckpt_0.5/finetune.yaml \ - --voc_ckpt=mb_melgan_baker_finetune_ckpt_0.5/snapshot_iter_2000000.pdz\ - --voc_stat=mb_melgan_baker_finetune_ckpt_0.5/feats_stats.npy \ + --voc_config=mb_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=mb_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1000000.pdz\ + --voc_stat=mb_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ - --phones_dict=dump/phone_id_map.txt + --phones_dict=dump/phone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # the pretrained models haven't release now @@ -88,8 +88,8 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ - --phones_dict=dump/phone_id_map.txt + --phones_dict=dump/phone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # wavernn @@ -111,4 +111,4 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then --output_dir=${train_output_path}/test_e2e \ --phones_dict=dump/phone_id_map.txt \ --inference_dir=${train_output_path}/inference -fi \ No newline at end of file +fi diff --git a/examples/csmsc/tts2/local/synthesize.sh b/examples/csmsc/tts2/local/synthesize.sh index cedc9717..07cf156e 100755 --- a/examples/csmsc/tts2/local/synthesize.sh +++ b/examples/csmsc/tts2/local/synthesize.sh @@ -1,20 +1,103 @@ #!/bin/bash + config_path=$1 train_output_path=$2 ckpt_name=$3 +stage=0 +stop_stage=0 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=speedyspeech_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_csmsc \ + --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ + --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ + --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --tones_dict=dump/tone_id_map.txt +fi + +# for more GAN Vocoders +# multi band melgan +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=speedyspeech_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=mb_melgan_csmsc \ + --voc_config=mb_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=mb_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1000000.pdz\ + --voc_stat=mb_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --tones_dict=dump/tone_id_map.txt +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=speedyspeech_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=style_melgan_csmsc \ + --voc_config=style_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=style_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1500000.pdz \ + --voc_stat=style_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --tones_dict=dump/tone_id_map.txt +fi + +# hifigan +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + echo "in hifigan syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=speedyspeech_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_csmsc \ + --voc_config=hifigan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=hifigan_csmsc_ckpt_0.1.1/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --tones_dict=dump/tone_id_map.txt +fi -FLAGS_allocator_strategy=naive_best_fit \ -FLAGS_fraction_of_gpu_memory_to_use=0.01 \ -python3 ${BIN_DIR}/../synthesize.py \ - --am=speedyspeech_csmsc \ - --am_config=${config_path} \ - --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --am_stat=dump/train/feats_stats.npy \ - --voc=pwgan_csmsc \ - --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ - --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ - --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt \ - --tones_dict=dump/tone_id_map.txt \ No newline at end of file +# wavernn +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + echo "in wavernn syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=speedyspeech_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=wavernn_csmsc \ + --voc_config=wavernn_csmsc_ckpt_0.2.0/default.yaml \ + --voc_ckpt=wavernn_csmsc_ckpt_0.2.0/snapshot_iter_400000.pdz \ + --voc_stat=wavernn_csmsc_ckpt_0.2.0/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --tones_dict=dump/tone_id_map.txt \ + --phones_dict=dump/phone_id_map.txt +fi diff --git a/examples/csmsc/tts2/local/synthesize_e2e.sh b/examples/csmsc/tts2/local/synthesize_e2e.sh index 35fcf251..d5862a61 100755 --- a/examples/csmsc/tts2/local/synthesize_e2e.sh +++ b/examples/csmsc/tts2/local/synthesize_e2e.sh @@ -22,9 +22,9 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ --phones_dict=dump/phone_id_map.txt \ - --tones_dict=dump/tone_id_map.txt + --tones_dict=dump/tone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # for more GAN Vocoders @@ -44,9 +44,9 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ --phones_dict=dump/phone_id_map.txt \ - --tones_dict=dump/tone_id_map.txt + --tones_dict=dump/tone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # the pretrained models haven't release now @@ -88,9 +88,9 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ --phones_dict=dump/phone_id_map.txt \ - --tones_dict=dump/tone_id_map.txt + --tones_dict=dump/tone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi diff --git a/examples/csmsc/tts3/local/synthesize.sh b/examples/csmsc/tts3/local/synthesize.sh index 19767426..273dacd5 100755 --- a/examples/csmsc/tts3/local/synthesize.sh +++ b/examples/csmsc/tts3/local/synthesize.sh @@ -3,18 +3,96 @@ config_path=$1 train_output_path=$2 ckpt_name=$3 +stage=0 +stop_stage=0 -FLAGS_allocator_strategy=naive_best_fit \ -FLAGS_fraction_of_gpu_memory_to_use=0.01 \ -python3 ${BIN_DIR}/../synthesize.py \ - --am=fastspeech2_csmsc \ - --am_config=${config_path} \ - --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ - --am_stat=dump/train/speech_stats.npy \ - --voc=pwgan_csmsc \ - --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ - --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ - --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ - --test_metadata=dump/test/norm/metadata.jsonl \ - --output_dir=${train_output_path}/test \ - --phones_dict=dump/phone_id_map.txt +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_csmsc \ + --voc_config=pwg_baker_ckpt_0.4/pwg_default.yaml \ + --voc_ckpt=pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \ + --voc_stat=pwg_baker_ckpt_0.4/pwg_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# for more GAN Vocoders +# multi band melgan +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=mb_melgan_csmsc \ + --voc_config=mb_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=mb_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1000000.pdz\ + --voc_stat=mb_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=style_melgan_csmsc \ + --voc_config=style_melgan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=style_melgan_csmsc_ckpt_0.1.1/snapshot_iter_1500000.pdz \ + --voc_stat=style_melgan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# hifigan +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + echo "in hifigan syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_csmsc \ + --voc_config=hifigan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=hifigan_csmsc_ckpt_0.1.1/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi + +# wavernn +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + echo "in wavernn syn" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_csmsc \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=wavernn_csmsc \ + --voc_config=wavernn_csmsc_ckpt_0.2.0/default.yaml \ + --voc_ckpt=wavernn_csmsc_ckpt_0.2.0/snapshot_iter_400000.pdz \ + --voc_stat=wavernn_csmsc_ckpt_0.2.0/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt +fi diff --git a/examples/csmsc/tts3/local/synthesize_e2e.sh b/examples/csmsc/tts3/local/synthesize_e2e.sh index 44356e4b..9e25c072 100755 --- a/examples/csmsc/tts3/local/synthesize_e2e.sh +++ b/examples/csmsc/tts3/local/synthesize_e2e.sh @@ -22,8 +22,8 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ - --phones_dict=dump/phone_id_map.txt + --phones_dict=dump/phone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # for more GAN Vocoders @@ -43,8 +43,8 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ - --phones_dict=dump/phone_id_map.txt + --phones_dict=dump/phone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi # the pretrained models haven't release now @@ -86,8 +86,8 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --lang=zh \ --text=${BIN_DIR}/../sentences.txt \ --output_dir=${train_output_path}/test_e2e \ - --inference_dir=${train_output_path}/inference \ - --phones_dict=dump/phone_id_map.txt + --phones_dict=dump/phone_id_map.txt \ + --inference_dir=${train_output_path}/inference fi diff --git a/paddlespeech/t2s/exps/synthesize.py b/paddlespeech/t2s/exps/synthesize.py index 1c42a87c..81da14f2 100644 --- a/paddlespeech/t2s/exps/synthesize.py +++ b/paddlespeech/t2s/exps/synthesize.py @@ -20,6 +20,7 @@ import numpy as np import paddle import soundfile as sf import yaml +from timer import timer from yacs.config import CfgNode from paddlespeech.s2t.utils.dynamic_import import dynamic_import @@ -50,6 +51,18 @@ model_alias = { "paddlespeech.t2s.models.melgan:MelGANGenerator", "mb_melgan_inference": "paddlespeech.t2s.models.melgan:MelGANInference", + "style_melgan": + "paddlespeech.t2s.models.melgan:StyleMelGANGenerator", + "style_melgan_inference": + "paddlespeech.t2s.models.melgan:StyleMelGANInference", + "hifigan": + "paddlespeech.t2s.models.hifigan:HiFiGANGenerator", + "hifigan_inference": + "paddlespeech.t2s.models.hifigan:HiFiGANInference", + "wavernn": + "paddlespeech.t2s.models.wavernn:WaveRNN", + "wavernn_inference": + "paddlespeech.t2s.models.wavernn:WaveRNNInference", } @@ -146,10 +159,15 @@ def evaluate(args): voc_name = args.voc[:args.voc.rindex('_')] voc_class = dynamic_import(voc_name, model_alias) voc_inference_class = dynamic_import(voc_name + '_inference', model_alias) - voc = voc_class(**voc_config["generator_params"]) - voc.set_state_dict(paddle.load(args.voc_ckpt)["generator_params"]) - voc.remove_weight_norm() - voc.eval() + if voc_name != 'wavernn': + voc = voc_class(**voc_config["generator_params"]) + voc.set_state_dict(paddle.load(args.voc_ckpt)["generator_params"]) + voc.remove_weight_norm() + voc.eval() + else: + voc = voc_class(**voc_config["model"]) + voc.set_state_dict(paddle.load(args.voc_ckpt)["main_params"]) + voc.eval() voc_mu, voc_std = np.load(args.voc_stat) voc_mu = paddle.to_tensor(voc_mu) voc_std = paddle.to_tensor(voc_std) @@ -162,38 +180,51 @@ def evaluate(args): output_dir = Path(args.output_dir) output_dir.mkdir(parents=True, exist_ok=True) + N = 0 + T = 0 + for datum in test_dataset: utt_id = datum["utt_id"] - with paddle.no_grad(): - # acoustic model - if am_name == 'fastspeech2': - phone_ids = paddle.to_tensor(datum["text"]) - spk_emb = None - spk_id = None - # multi speaker - if args.voice_cloning and "spk_emb" in datum: - spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) - elif "spk_id" in datum: - spk_id = paddle.to_tensor(datum["spk_id"]) - mel = am_inference(phone_ids, spk_id=spk_id, spk_emb=spk_emb) - elif am_name == 'speedyspeech': - phone_ids = paddle.to_tensor(datum["phones"]) - tone_ids = paddle.to_tensor(datum["tones"]) - mel = am_inference(phone_ids, tone_ids) - elif am_name == 'tacotron2': - phone_ids = paddle.to_tensor(datum["text"]) - spk_emb = None - # multi speaker - if args.voice_cloning and "spk_emb" in datum: - spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) - mel = am_inference(phone_ids, spk_emb=spk_emb) + with timer() as t: + with paddle.no_grad(): + # acoustic model + if am_name == 'fastspeech2': + phone_ids = paddle.to_tensor(datum["text"]) + spk_emb = None + spk_id = None + # multi speaker + if args.voice_cloning and "spk_emb" in datum: + spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) + elif "spk_id" in datum: + spk_id = paddle.to_tensor(datum["spk_id"]) + mel = am_inference( + phone_ids, spk_id=spk_id, spk_emb=spk_emb) + elif am_name == 'speedyspeech': + phone_ids = paddle.to_tensor(datum["phones"]) + tone_ids = paddle.to_tensor(datum["tones"]) + mel = am_inference(phone_ids, tone_ids) + elif am_name == 'tacotron2': + phone_ids = paddle.to_tensor(datum["text"]) + spk_emb = None + # multi speaker + if args.voice_cloning and "spk_emb" in datum: + spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) + mel = am_inference(phone_ids, spk_emb=spk_emb) # vocoder wav = voc_inference(mel) + + wav = wav.numpy() + N += wav.size + T += t.elapse + speed = wav.size / t.elapse + rtf = am_config.fs / speed + print( + f"{utt_id}, mel: {mel.shape}, wave: {wav.size}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}." + ) sf.write( - str(output_dir / (utt_id + ".wav")), - wav.numpy(), - samplerate=am_config.fs) + str(output_dir / (utt_id + ".wav")), wav, samplerate=am_config.fs) print(f"{utt_id} done!") + print(f"generation speed: {N / T}Hz, RTF: {am_config.fs / (N / T) }") def main(): @@ -246,7 +277,8 @@ def main(): default='pwgan_csmsc', choices=[ 'pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3', 'pwgan_vctk', - 'mb_melgan_csmsc' + 'mb_melgan_csmsc', 'wavernn_csmsc', 'hifigan_csmsc', + 'style_melgan_csmsc' ], help='Choose vocoder type of tts task.') diff --git a/paddlespeech/t2s/exps/synthesize_e2e.py b/paddlespeech/t2s/exps/synthesize_e2e.py index 75c631b8..be78b953 100644 --- a/paddlespeech/t2s/exps/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/synthesize_e2e.py @@ -21,6 +21,7 @@ import soundfile as sf import yaml from paddle import jit from paddle.static import InputSpec +from timer import timer from yacs.config import CfgNode from paddlespeech.s2t.utils.dynamic_import import dynamic_import @@ -233,59 +234,68 @@ def evaluate(args): # but still not stopping in the end (NOTE by yuantian01 Feb 9 2022) if am_name == 'tacotron2': merge_sentences = True - + N = 0 + T = 0 for utt_id, sentence in sentences: - get_tone_ids = False - if am_name == 'speedyspeech': - get_tone_ids = True - if args.lang == 'zh': - input_ids = frontend.get_input_ids( - sentence, - merge_sentences=merge_sentences, - get_tone_ids=get_tone_ids) - phone_ids = input_ids["phone_ids"] - if get_tone_ids: - tone_ids = input_ids["tone_ids"] - elif args.lang == 'en': - input_ids = frontend.get_input_ids( - sentence, merge_sentences=merge_sentences) - phone_ids = input_ids["phone_ids"] - else: - print("lang should in {'zh', 'en'}!") - with paddle.no_grad(): - flags = 0 - for i in range(len(phone_ids)): - part_phone_ids = phone_ids[i] - # acoustic model - if am_name == 'fastspeech2': - # multi speaker - if am_dataset in {"aishell3", "vctk"}: - spk_id = paddle.to_tensor(args.spk_id) - mel = am_inference(part_phone_ids, spk_id) - else: + with timer() as t: + get_tone_ids = False + if am_name == 'speedyspeech': + get_tone_ids = True + if args.lang == 'zh': + input_ids = frontend.get_input_ids( + sentence, + merge_sentences=merge_sentences, + get_tone_ids=get_tone_ids) + phone_ids = input_ids["phone_ids"] + if get_tone_ids: + tone_ids = input_ids["tone_ids"] + elif args.lang == 'en': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences) + phone_ids = input_ids["phone_ids"] + else: + print("lang should in {'zh', 'en'}!") + with paddle.no_grad(): + flags = 0 + for i in range(len(phone_ids)): + part_phone_ids = phone_ids[i] + # acoustic model + if am_name == 'fastspeech2': + # multi speaker + if am_dataset in {"aishell3", "vctk"}: + spk_id = paddle.to_tensor(args.spk_id) + mel = am_inference(part_phone_ids, spk_id) + else: + mel = am_inference(part_phone_ids) + elif am_name == 'speedyspeech': + part_tone_ids = tone_ids[i] + if am_dataset in {"aishell3", "vctk"}: + spk_id = paddle.to_tensor(args.spk_id) + mel = am_inference(part_phone_ids, part_tone_ids, + spk_id) + else: + mel = am_inference(part_phone_ids, part_tone_ids) + elif am_name == 'tacotron2': mel = am_inference(part_phone_ids) - elif am_name == 'speedyspeech': - part_tone_ids = tone_ids[i] - if am_dataset in {"aishell3", "vctk"}: - spk_id = paddle.to_tensor(args.spk_id) - mel = am_inference(part_phone_ids, part_tone_ids, - spk_id) + # vocoder + wav = voc_inference(mel) + if flags == 0: + wav_all = wav + flags = 1 else: - mel = am_inference(part_phone_ids, part_tone_ids) - elif am_name == 'tacotron2': - mel = am_inference(part_phone_ids) - # vocoder - wav = voc_inference(mel) - if flags == 0: - wav_all = wav - flags = 1 - else: - wav_all = paddle.concat([wav_all, wav]) + wav_all = paddle.concat([wav_all, wav]) + wav = wav_all.numpy() + N += wav.size + T += t.elapse + speed = wav.size / t.elapse + rtf = am_config.fs / speed + print( + f"{utt_id}, mel: {mel.shape}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}." + ) sf.write( - str(output_dir / (utt_id + ".wav")), - wav_all.numpy(), - samplerate=am_config.fs) + str(output_dir / (utt_id + ".wav")), wav, samplerate=am_config.fs) print(f"{utt_id} done!") + print(f"generation speed: {N / T}Hz, RTF: {am_config.fs / (N / T) }") def main(): diff --git a/paddlespeech/t2s/exps/wavernn/synthesize.py b/paddlespeech/t2s/exps/wavernn/synthesize.py index 4357b282..d23e9cb7 100644 --- a/paddlespeech/t2s/exps/wavernn/synthesize.py +++ b/paddlespeech/t2s/exps/wavernn/synthesize.py @@ -91,7 +91,7 @@ def main(): target=config.inference.target, overlap=config.inference.overlap, mu_law=config.mu_law, - gen_display=True) + gen_display=False) wav = wav.numpy() N += wav.size T += t.elapse diff --git a/paddlespeech/t2s/models/melgan/melgan.py b/paddlespeech/t2s/models/melgan/melgan.py index 6a139659..22d8fd9e 100644 --- a/paddlespeech/t2s/models/melgan/melgan.py +++ b/paddlespeech/t2s/models/melgan/melgan.py @@ -66,7 +66,7 @@ class MelGANGenerator(nn.Layer): nonlinear_activation_params (Dict[str, Any], optional): Parameters passed to the linear activation in the upsample network, by default {} pad (str): Padding function module name before dilated convolution layer. - pad_params (dict): Hyperparameters for padding function. + pad_params (dict): Hyperparameters for padding function. use_final_nonlinear_activation (nn.Layer): Activation function for the final layer. use_weight_norm (bool): Whether to use weight norm. If set to true, it will be applied to all of the conv layers. diff --git a/paddlespeech/t2s/models/wavernn/wavernn.py b/paddlespeech/t2s/models/wavernn/wavernn.py index 1320ffa3..95907043 100644 --- a/paddlespeech/t2s/models/wavernn/wavernn.py +++ b/paddlespeech/t2s/models/wavernn/wavernn.py @@ -509,16 +509,20 @@ class WaveRNN(nn.Layer): total_len = num_folds * (target + overlap) + overlap # Need some silence for the run warmup - slience_len = overlap // 2 + slience_len = 0 + linear_len = slience_len fade_len = overlap - slience_len slience = paddle.zeros([slience_len], dtype=paddle.float32) - linear = paddle.ones([fade_len], dtype=paddle.float32) + linear = paddle.ones([linear_len], dtype=paddle.float32) # Equal power crossfade # fade_in increase from 0 to 1, fade_out reduces from 1 to 0 - t = paddle.linspace(-1, 1, fade_len, dtype=paddle.float32) - fade_in = paddle.sqrt(0.5 * (1 + t)) - fade_out = paddle.sqrt(0.5 * (1 - t)) + sigmoid_scale = 2.3 + t = paddle.linspace( + -sigmoid_scale, sigmoid_scale, fade_len, dtype=paddle.float32) + # sigmoid 曲线应该更好 + fade_in = paddle.nn.functional.sigmoid(t) + fade_out = 1 - paddle.nn.functional.sigmoid(t) # Concat the silence to the fades fade_out = paddle.concat([linear, fade_out]) fade_in = paddle.concat([slience, fade_in]) From 641984ae30f52928258a33af29d8a6345134da72 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 1 Mar 2022 09:51:05 +0000 Subject: [PATCH 29/39] add code annotation, test=tts --- examples/csmsc/tts0/local/synthesize.sh | 2 ++ examples/csmsc/tts0/local/synthesize_e2e.sh | 1 + examples/csmsc/tts2/local/synthesize.sh | 2 ++ examples/csmsc/tts2/local/synthesize_e2e.sh | 2 +- examples/csmsc/tts3/local/synthesize.sh | 2 ++ examples/csmsc/tts3/local/synthesize_e2e.sh | 1 + 6 files changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/csmsc/tts0/local/synthesize.sh b/examples/csmsc/tts0/local/synthesize.sh index bfb4844b..5b8ed15e 100755 --- a/examples/csmsc/tts0/local/synthesize.sh +++ b/examples/csmsc/tts0/local/synthesize.sh @@ -6,6 +6,7 @@ ckpt_name=$3 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ @@ -42,6 +43,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --phones_dict=dump/phone_id_map.txt fi +# style melgan if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ diff --git a/examples/csmsc/tts0/local/synthesize_e2e.sh b/examples/csmsc/tts0/local/synthesize_e2e.sh index 4c73a18d..f7675873 100755 --- a/examples/csmsc/tts0/local/synthesize_e2e.sh +++ b/examples/csmsc/tts0/local/synthesize_e2e.sh @@ -8,6 +8,7 @@ stage=0 stop_stage=0 # TODO: tacotron2 动转静的结果没有静态图的响亮, 可能还是 decode 的时候某个函数动静不对齐 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ diff --git a/examples/csmsc/tts2/local/synthesize.sh b/examples/csmsc/tts2/local/synthesize.sh index 07cf156e..37b29818 100755 --- a/examples/csmsc/tts2/local/synthesize.sh +++ b/examples/csmsc/tts2/local/synthesize.sh @@ -6,6 +6,7 @@ ckpt_name=$3 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ @@ -44,6 +45,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --tones_dict=dump/tone_id_map.txt fi +# style melgan if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ diff --git a/examples/csmsc/tts2/local/synthesize_e2e.sh b/examples/csmsc/tts2/local/synthesize_e2e.sh index d5862a61..553b4554 100755 --- a/examples/csmsc/tts2/local/synthesize_e2e.sh +++ b/examples/csmsc/tts2/local/synthesize_e2e.sh @@ -7,6 +7,7 @@ ckpt_name=$3 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ @@ -93,7 +94,6 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then --inference_dir=${train_output_path}/inference fi - # wavernn if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then echo "in wavernn syn_e2e" diff --git a/examples/csmsc/tts3/local/synthesize.sh b/examples/csmsc/tts3/local/synthesize.sh index 273dacd5..043bb52f 100755 --- a/examples/csmsc/tts3/local/synthesize.sh +++ b/examples/csmsc/tts3/local/synthesize.sh @@ -6,6 +6,7 @@ ckpt_name=$3 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ @@ -42,6 +43,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --phones_dict=dump/phone_id_map.txt fi +# style melgan if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ diff --git a/examples/csmsc/tts3/local/synthesize_e2e.sh b/examples/csmsc/tts3/local/synthesize_e2e.sh index 9e25c072..512e062b 100755 --- a/examples/csmsc/tts3/local/synthesize_e2e.sh +++ b/examples/csmsc/tts3/local/synthesize_e2e.sh @@ -7,6 +7,7 @@ ckpt_name=$3 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ From ab0448873815158b9bf28f9f3e200007afe70c4c Mon Sep 17 00:00:00 2001 From: lym0302 Date: Tue, 1 Mar 2022 21:12:37 +0800 Subject: [PATCH 30/39] update server cli, test=doc --- demos/speech_server/README.md | 59 +++------ demos/speech_server/README_cn.md | 62 +++------ demos/speech_server/conf/application.yaml | 18 +-- demos/speech_server/conf/asr/asr.yaml | 2 +- demos/speech_server/conf/asr/asr_pd.yaml | 5 +- demos/speech_server/conf/tts/tts.yaml | 2 +- demos/speech_server/conf/tts/tts_pd.yaml | 14 +- paddlespeech/cli/__init__.py | 2 +- paddlespeech/cli/tts/infer.py | 9 ++ .../server/bin/paddlespeech_client.py | 1 - paddlespeech/server/conf/application.yaml | 2 +- paddlespeech/server/conf/asr/asr.yaml | 2 +- paddlespeech/server/conf/asr/asr_pd.yaml | 5 +- paddlespeech/server/conf/tts/tts.yaml | 2 +- paddlespeech/server/conf/tts/tts_pd.yaml | 18 +-- .../engine/asr/paddleinference/asr_engine.py | 7 +- .../server/engine/asr/python/asr_engine.py | 24 +++- .../engine/tts/paddleinference/tts_engine.py | 121 +++++++++++++----- .../server/engine/tts/python/tts_engine.py | 87 ++++++++++--- paddlespeech/server/restful/tts_api.py | 5 + paddlespeech/server/utils/paddle_predictor.py | 26 +++- 21 files changed, 294 insertions(+), 179 deletions(-) diff --git a/demos/speech_server/README.md b/demos/speech_server/README.md index 39007f6c..ac5cc4b0 100644 --- a/demos/speech_server/README.md +++ b/demos/speech_server/README.md @@ -15,6 +15,17 @@ You can choose one way from easy, meduim and hard to install paddlespeech. ### 2. Prepare config File The configuration file contains the service-related configuration files and the model configuration related to the voice tasks contained in the service. They are all under the `conf` folder. +**Note: The configuration of `engine_backend` in `application.yaml` represents all speech tasks included in the started service. ** +If the service you want to start contains only a certain speech task, then you need to comment out the speech tasks that do not need to be included. For example, if you only want to use the speech recognition (ASR) service, then you can comment out the speech synthesis (TTS) service, as in the following example: +```bash +engine_backend: + asr: 'conf/asr/asr.yaml' + #tts: 'conf/tts/tts.yaml' +``` + +**Note: The configuration file of `engine_backend` in `application.yaml` needs to match the configuration type of `engine_type`. ** +When the configuration file of `engine_backend` is `XXX.yaml`, the configuration type of `engine_type` needs to be set to `python`; when the configuration file of `engine_backend` is `XXX_pd.yaml`, the configuration of `engine_type` needs to be set type is `inference`; + The input of ASR client demo should be a WAV file(`.wav`), and the sample rate must be the same as the model. Here are sample files for thisASR client demo that can be downloaded: @@ -76,6 +87,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ### 4. ASR Client Usage +**Note:** The response time will be slightly longer when using the client for the first time - Command Line (Recommended) ``` paddlespeech_client asr --server_ip 127.0.0.1 --port 8090 --input ./zh.wav @@ -122,6 +134,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ``` ### 5. TTS Client Usage +**Note:** The response time will be slightly longer when using the client for the first time - Command Line (Recommended) ```bash paddlespeech_client tts --server_ip 127.0.0.1 --port 8090 --input "您好,欢迎使用百度飞桨语音合成服务。" --output output.wav @@ -147,8 +160,6 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee [2022-02-23 15:20:37,875] [ INFO] - Save synthesized audio successfully on output.wav. [2022-02-23 15:20:37,875] [ INFO] - Audio duration: 3.612500 s. [2022-02-23 15:20:37,875] [ INFO] - Response time: 0.348050 s. - [2022-02-23 15:20:37,875] [ INFO] - RTF: 0.096346 - ``` @@ -174,51 +185,13 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Save synthesized audio successfully on ./output.wav. Audio duration: 3.612500 s. Response time: 0.388317 s. - RTF: 0.107493 ``` -## Pretrained Models +## Models supported by the service ### ASR model -Here is a list of [ASR pretrained models](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/speech_recognition/README.md#4pretrained-models) released by PaddleSpeech, both command line and python interfaces are available: - -| Model | Language | Sample Rate -| :--- | :---: | :---: | -| conformer_wenetspeech| zh| 16000 -| transformer_librispeech| en| 16000 +Get all models supported by the ASR service via `paddlespeech_server stats --task asr`, where static models can be used for paddle inference inference. ### TTS model -Here is a list of [TTS pretrained models](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/text_to_speech/README.md#4-pretrained-models) released by PaddleSpeech, both command line and python interfaces are available: - -- Acoustic model - | Model | Language - | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - | fastspeech2_aishell3| zh - | fastspeech2_ljspeech| en - | fastspeech2_vctk| en - -- Vocoder - | Model | Language - | :--- | :---: | - | pwgan_csmsc| zh - | pwgan_aishell3| zh - | pwgan_ljspeech| en - | pwgan_vctk| en - | mb_melgan_csmsc| zh - -Here is a list of **TTS pretrained static models** released by PaddleSpeech, both command line and python interfaces are available: -- Acoustic model - | Model | Language - | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - -- Vocoder - | Model | Language - | :--- | :---: | - | pwgan_csmsc| zh - | mb_melgan_csmsc| zh - | hifigan_csmsc| zh +Get all models supported by the TTS service via `paddlespeech_server stats --task tts`, where static models can be used for paddle inference inference. diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md index f5666070..f202a30c 100644 --- a/demos/speech_server/README_cn.md +++ b/demos/speech_server/README_cn.md @@ -14,6 +14,15 @@ ### 2. 准备配置文件 配置文件包含服务相关的配置文件和服务中包含的语音任务相关的模型配置。 它们都在 `conf` 文件夹下。 +**注意:`application.yaml` 中 `engine_backend` 的配置表示启动的服务中包含的所有语音任务。** +如果你想启动的服务中只包含某项语音任务,那么你需要注释掉不需要包含的语音任务。例如你只想使用语音识别(ASR)服务,那么你可以将语音合成(TTS)服务注释掉,如下示例: +```bash +engine_backend: + asr: 'conf/asr/asr.yaml' + #tts: 'conf/tts/tts.yaml' +``` +**注意:`application.yaml` 中 `engine_backend` 的配置文件需要和 `engine_type` 的配置类型匹配。** +当`engine_backend` 的配置文件为`XXX.yaml`时,需要设置`engine_type`的配置类型为`python`;当`engine_backend` 的配置文件为`XXX_pd.yaml`时,需要设置`engine_type`的配置类型为`inference`; 这个 ASR client 的输入应该是一个 WAV 文件(`.wav`),并且采样率必须与模型的采样率相同。 @@ -75,6 +84,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ``` ### 4. ASR客户端使用方法 +**注意:**初次使用客户端时响应时间会略长 - 命令行 (推荐使用) ``` paddlespeech_client asr --server_ip 127.0.0.1 --port 8090 --input ./zh.wav @@ -123,6 +133,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ``` ### 5. TTS客户端使用方法 +**注意:**初次使用客户端时响应时间会略长 ```bash paddlespeech_client tts --server_ip 127.0.0.1 --port 8090 --input "您好,欢迎使用百度飞桨语音合成服务。" --output output.wav ``` @@ -148,7 +159,6 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee [2022-02-23 15:20:37,875] [ INFO] - Save synthesized audio successfully on output.wav. [2022-02-23 15:20:37,875] [ INFO] - Audio duration: 3.612500 s. [2022-02-23 15:20:37,875] [ INFO] - Response time: 0.348050 s. - [2022-02-23 15:20:37,875] [ INFO] - RTF: 0.096346 ``` - Python API @@ -173,50 +183,12 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Save synthesized audio successfully on ./output.wav. Audio duration: 3.612500 s. Response time: 0.388317 s. - RTF: 0.107493 ``` -## Pretrained Models -### ASR model -下面是PaddleSpeech发布的[ASR预训练模型](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/speech_recognition/README.md#4pretrained-models)列表,命令行和python接口均可用: - -| Model | Language | Sample Rate -| :--- | :---: | :---: | -| conformer_wenetspeech| zh| 16000 -| transformer_librispeech| en| 16000 - -### TTS model -下面是PaddleSpeech发布的 [TTS预训练模型](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/demos/text_to_speech/README.md#4-pretrained-models) 列表,命令行和python接口均可用: - -- Acoustic model - | Model | Language - | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - | fastspeech2_aishell3| zh - | fastspeech2_ljspeech| en - | fastspeech2_vctk| en - -- Vocoder - | Model | Language - | :--- | :---: | - | pwgan_csmsc| zh - | pwgan_aishell3| zh - | pwgan_ljspeech| en - | pwgan_vctk| en - | mb_melgan_csmsc| zh - -下面是PaddleSpeech发布的 **TTS预训练静态模型** 列表,命令行和python接口均可用: -- Acoustic model - | Model | Language - | :--- | :---: | - | speedyspeech_csmsc| zh - | fastspeech2_csmsc| zh - -- Vocoder - | Model | Language - | :--- | :---: | - | pwgan_csmsc| zh - | mb_melgan_csmsc| zh - | hifigan_csmsc| zh +## 服务支持的模型 +### ASR支持的模型 +通过 `paddlespeech_server stats --task asr` 获取ASR服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 + +### TTS支持的模型 +通过 `paddlespeech_server stats --task tts` 获取TTS服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 diff --git a/demos/speech_server/conf/application.yaml b/demos/speech_server/conf/application.yaml index fd4f5f37..6dcae74a 100644 --- a/demos/speech_server/conf/application.yaml +++ b/demos/speech_server/conf/application.yaml @@ -3,23 +3,25 @@ ################################################################## # SERVER SETTING # ################################################################## -host: '0.0.0.0' +host: '127.0.0.1' port: 8090 ################################################################## # CONFIG FILE # ################################################################## +# add engine backend type (Options: asr, tts) and config file here. +# Adding a speech task to engine_backend means starting the service. +engine_backend: + asr: 'conf/asr/asr.yaml' + tts: 'conf/tts/tts.yaml' + # The engine_type of speech task needs to keep the same type as the config file of speech task. # E.g: The engine_type of asr is 'python', the engine_backend of asr is 'XX/asr.yaml' # E.g: The engine_type of asr is 'inference', the engine_backend of asr is 'XX/asr_pd.yaml' # # add engine type (Options: python, inference) engine_type: - asr: 'inference' - tts: 'inference' + asr: 'python' + tts: 'python' + -# add engine backend type (Options: asr, tts) and config file here. -# Adding a speech task to engine_backend means starting the service. -engine_backend: - asr: 'conf/asr/asr_pd.yaml' - tts: 'conf/tts/tts_pd.yaml' diff --git a/demos/speech_server/conf/asr/asr.yaml b/demos/speech_server/conf/asr/asr.yaml index 1a805142..a6743b77 100644 --- a/demos/speech_server/conf/asr/asr.yaml +++ b/demos/speech_server/conf/asr/asr.yaml @@ -5,4 +5,4 @@ cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' force_yes: True -device: 'cpu' # set 'gpu:id' or 'cpu' +device: # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/asr/asr_pd.yaml b/demos/speech_server/conf/asr/asr_pd.yaml index 6cddb450..4c415ac7 100644 --- a/demos/speech_server/conf/asr/asr_pd.yaml +++ b/demos/speech_server/conf/asr/asr_pd.yaml @@ -15,9 +15,10 @@ decode_method: force_yes: True am_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: True + device: # set 'gpu:id' or 'cpu' switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## diff --git a/demos/speech_server/conf/tts/tts.yaml b/demos/speech_server/conf/tts/tts.yaml index 19e8874e..19207f0b 100644 --- a/demos/speech_server/conf/tts/tts.yaml +++ b/demos/speech_server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: 'cpu' # set 'gpu:id' or 'cpu' +device: # set 'gpu:id' or 'cpu' diff --git a/demos/speech_server/conf/tts/tts_pd.yaml b/demos/speech_server/conf/tts/tts_pd.yaml index 97df5261..e27b9665 100644 --- a/demos/speech_server/conf/tts/tts_pd.yaml +++ b/demos/speech_server/conf/tts/tts_pd.yaml @@ -15,9 +15,10 @@ speaker_dict: spk_id: 0 am_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: False - switch_ir_optim: False + device: # set 'gpu:id' or 'cpu' + switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## @@ -30,9 +31,10 @@ voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) voc_sample_rate: 24000 voc_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: False - switch_ir_optim: False + device: # set 'gpu:id' or 'cpu' + switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## # OTHERS # diff --git a/paddlespeech/cli/__init__.py b/paddlespeech/cli/__init__.py index 12ff9919..b526a384 100644 --- a/paddlespeech/cli/__init__.py +++ b/paddlespeech/cli/__init__.py @@ -18,8 +18,8 @@ from .base_commands import BaseCommand from .base_commands import HelpCommand from .cls import CLSExecutor from .st import STExecutor +from .stats import StatsExecutor from .text import TextExecutor from .tts import TTSExecutor -from .stats import StatsExecutor _locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8']) diff --git a/paddlespeech/cli/tts/infer.py b/paddlespeech/cli/tts/infer.py index ba15d652..8423dfa8 100644 --- a/paddlespeech/cli/tts/infer.py +++ b/paddlespeech/cli/tts/infer.py @@ -13,6 +13,7 @@ # limitations under the License. import argparse import os +import time from collections import OrderedDict from typing import Any from typing import List @@ -621,6 +622,7 @@ class TTSExecutor(BaseExecutor): am_dataset = am[am.rindex('_') + 1:] get_tone_ids = False merge_sentences = False + frontend_st = time.time() if am_name == 'speedyspeech': get_tone_ids = True if lang == 'zh': @@ -637,9 +639,13 @@ class TTSExecutor(BaseExecutor): phone_ids = input_ids["phone_ids"] else: print("lang should in {'zh', 'en'}!") + self.frontend_time = time.time() - frontend_st + self.am_time = 0 + self.voc_time = 0 flags = 0 for i in range(len(phone_ids)): + am_st = time.time() part_phone_ids = phone_ids[i] # am if am_name == 'speedyspeech': @@ -653,13 +659,16 @@ class TTSExecutor(BaseExecutor): part_phone_ids, spk_id=paddle.to_tensor(spk_id)) else: mel = self.am_inference(part_phone_ids) + self.am_time += (time.time() - am_st) # voc + voc_st = time.time() wav = self.voc_inference(mel) if flags == 0: wav_all = wav flags = 1 else: wav_all = paddle.concat([wav_all, wav]) + self.voc_time += (time.time() - voc_st) self._outputs['wav'] = wav_all def postprocess(self, output: str='output.wav') -> Union[str, os.PathLike]: diff --git a/paddlespeech/server/bin/paddlespeech_client.py b/paddlespeech/server/bin/paddlespeech_client.py index 853d272f..ee6ab7ad 100644 --- a/paddlespeech/server/bin/paddlespeech_client.py +++ b/paddlespeech/server/bin/paddlespeech_client.py @@ -121,7 +121,6 @@ class TTSClientExecutor(BaseExecutor): (args.output)) logger.info("Audio duration: %f s." % (duration)) logger.info("Response time: %f s." % (time_consume)) - logger.info("RTF: %f " % (time_consume / duration)) return True except BaseException: diff --git a/paddlespeech/server/conf/application.yaml b/paddlespeech/server/conf/application.yaml index cc08665e..9900492c 100644 --- a/paddlespeech/server/conf/application.yaml +++ b/paddlespeech/server/conf/application.yaml @@ -3,7 +3,7 @@ ################################################################## # SERVER SETTING # ################################################################## -host: '0.0.0.0' +host: '127.0.0.1' port: 8090 ################################################################## diff --git a/paddlespeech/server/conf/asr/asr.yaml b/paddlespeech/server/conf/asr/asr.yaml index 1a805142..a6743b77 100644 --- a/paddlespeech/server/conf/asr/asr.yaml +++ b/paddlespeech/server/conf/asr/asr.yaml @@ -5,4 +5,4 @@ cfg_path: # [optional] ckpt_path: # [optional] decode_method: 'attention_rescoring' force_yes: True -device: 'cpu' # set 'gpu:id' or 'cpu' +device: # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/asr/asr_pd.yaml b/paddlespeech/server/conf/asr/asr_pd.yaml index 6cddb450..4c415ac7 100644 --- a/paddlespeech/server/conf/asr/asr_pd.yaml +++ b/paddlespeech/server/conf/asr/asr_pd.yaml @@ -15,9 +15,10 @@ decode_method: force_yes: True am_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: True + device: # set 'gpu:id' or 'cpu' switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## diff --git a/paddlespeech/server/conf/tts/tts.yaml b/paddlespeech/server/conf/tts/tts.yaml index 19e8874e..19207f0b 100644 --- a/paddlespeech/server/conf/tts/tts.yaml +++ b/paddlespeech/server/conf/tts/tts.yaml @@ -29,4 +29,4 @@ voc_stat: # OTHERS # ################################################################## lang: 'zh' -device: 'cpu' # set 'gpu:id' or 'cpu' +device: # set 'gpu:id' or 'cpu' diff --git a/paddlespeech/server/conf/tts/tts_pd.yaml b/paddlespeech/server/conf/tts/tts_pd.yaml index 019c7ed6..e27b9665 100644 --- a/paddlespeech/server/conf/tts/tts_pd.yaml +++ b/paddlespeech/server/conf/tts/tts_pd.yaml @@ -8,16 +8,17 @@ am: 'fastspeech2_csmsc' am_model: # the pdmodel file of your am static model (XX.pdmodel) am_params: # the pdiparams file of your am static model (XX.pdipparams) -am_sample_rate: 24000 # must match the model +am_sample_rate: 24000 phones_dict: tones_dict: speaker_dict: spk_id: 0 am_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: False - switch_ir_optim: False + device: # set 'gpu:id' or 'cpu' + switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## @@ -27,12 +28,13 @@ am_predictor_conf: voc: 'pwgan_csmsc' voc_model: # the pdmodel file of your vocoder static model (XX.pdmodel) voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams) -voc_sample_rate: 24000 #must match the model +voc_sample_rate: 24000 voc_predictor_conf: - device: 'cpu' # set 'gpu:id' or 'cpu' - enable_mkldnn: False - switch_ir_optim: False + device: # set 'gpu:id' or 'cpu' + switch_ir_optim: True + glog_info: False # True -> print glog + summary: True # False -> do not show predictor config ################################################################## # OTHERS # diff --git a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py index 5d4c4fa6..cb973e92 100644 --- a/paddlespeech/server/engine/asr/paddleinference/asr_engine.py +++ b/paddlespeech/server/engine/asr/paddleinference/asr_engine.py @@ -13,6 +13,7 @@ # limitations under the License. import io import os +import time from typing import Optional import paddle @@ -197,7 +198,6 @@ class ASREngine(BaseEngine): self.executor = ASRServerExecutor() self.config = get_config(config_file) - paddle.set_device(paddle.get_device()) self.executor._init_from_path( model_type=self.config.model_type, am_model=self.config.am_model, @@ -223,13 +223,18 @@ class ASREngine(BaseEngine): logger.info("start running asr engine") self.executor.preprocess(self.config.model_type, io.BytesIO(audio_data)) + st = time.time() self.executor.infer(self.config.model_type) + infer_time = time.time() - st self.output = self.executor.postprocess() # Retrieve result of asr. logger.info("end inferring asr engine") else: logger.info("file check failed!") self.output = None + logger.info("inference time: {}".format(infer_time)) + logger.info("asr engine type: paddle inference") + def postprocess(self): """postprocess """ diff --git a/paddlespeech/server/engine/asr/python/asr_engine.py b/paddlespeech/server/engine/asr/python/asr_engine.py index 9fac487d..1e2c5cc2 100644 --- a/paddlespeech/server/engine/asr/python/asr_engine.py +++ b/paddlespeech/server/engine/asr/python/asr_engine.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import io +import time import paddle @@ -53,16 +54,24 @@ class ASREngine(BaseEngine): self.executor = ASRServerExecutor() self.config = get_config(config_file) - if self.config.device is None: - paddle.set_device(paddle.get_device()) - else: - paddle.set_device(self.config.device) + try: + if self.config.device: + self.device = self.config.device + else: + self.device = paddle.get_device() + paddle.set_device(self.device) + except BaseException: + logger.error( + "Set device failed, please check if device is already used and the parameter 'device' in the yaml file" + ) + self.executor._init_from_path( self.config.model, self.config.lang, self.config.sample_rate, self.config.cfg_path, self.config.decode_method, self.config.ckpt_path) - logger.info("Initialize ASR server engine successfully.") + logger.info("Initialize ASR server engine successfully on device: %s." % + (self.device)) return True def run(self, audio_data): @@ -76,12 +85,17 @@ class ASREngine(BaseEngine): self.config.force_yes): logger.info("start run asr engine") self.executor.preprocess(self.config.model, io.BytesIO(audio_data)) + st = time.time() self.executor.infer(self.config.model) + infer_time = time.time() - st self.output = self.executor.postprocess() # Retrieve result of asr. else: logger.info("file check failed!") self.output = None + logger.info("inference time: {}".format(infer_time)) + logger.info("asr engine type: python") + def postprocess(self): """postprocess """ diff --git a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py index a9dc5f4e..5955c1a2 100644 --- a/paddlespeech/server/engine/tts/paddleinference/tts_engine.py +++ b/paddlespeech/server/engine/tts/paddleinference/tts_engine.py @@ -14,6 +14,7 @@ import base64 import io import os +import time from typing import Optional import librosa @@ -179,7 +180,7 @@ class TTSServerExecutor(TTSExecutor): self.phones_dict = os.path.abspath(phones_dict) self.am_sample_rate = am_sample_rate self.am_res_path = os.path.dirname(os.path.abspath(self.am_model)) - print("self.phones_dict:", self.phones_dict) + logger.info("self.phones_dict: {}".format(self.phones_dict)) # for speedyspeech self.tones_dict = None @@ -224,21 +225,21 @@ class TTSServerExecutor(TTSExecutor): with open(self.phones_dict, "r") as f: phn_id = [line.strip().split() for line in f.readlines()] vocab_size = len(phn_id) - print("vocab_size:", vocab_size) + logger.info("vocab_size: {}".format(vocab_size)) tone_size = None if self.tones_dict: with open(self.tones_dict, "r") as f: tone_id = [line.strip().split() for line in f.readlines()] tone_size = len(tone_id) - print("tone_size:", tone_size) + logger.info("tone_size: {}".format(tone_size)) spk_num = None if self.speaker_dict: with open(self.speaker_dict, 'rt') as f: spk_id = [line.strip().split() for line in f.readlines()] spk_num = len(spk_id) - print("spk_num:", spk_num) + logger.info("spk_num: {}".format(spk_num)) # frontend if lang == 'zh': @@ -248,21 +249,29 @@ class TTSServerExecutor(TTSExecutor): elif lang == 'en': self.frontend = English(phone_vocab_path=self.phones_dict) - print("frontend done!") - - # am predictor - self.am_predictor_conf = am_predictor_conf - self.am_predictor = init_predictor( - model_file=self.am_model, - params_file=self.am_params, - predictor_conf=self.am_predictor_conf) - - # voc predictor - self.voc_predictor_conf = voc_predictor_conf - self.voc_predictor = init_predictor( - model_file=self.voc_model, - params_file=self.voc_params, - predictor_conf=self.voc_predictor_conf) + logger.info("frontend done!") + + try: + # am predictor + self.am_predictor_conf = am_predictor_conf + self.am_predictor = init_predictor( + model_file=self.am_model, + params_file=self.am_params, + predictor_conf=self.am_predictor_conf) + logger.info("Create AM predictor successfully.") + except BaseException: + logger.error("Failed to create AM predictor.") + + try: + # voc predictor + self.voc_predictor_conf = voc_predictor_conf + self.voc_predictor = init_predictor( + model_file=self.voc_model, + params_file=self.voc_params, + predictor_conf=self.voc_predictor_conf) + logger.info("Create Vocoder predictor successfully.") + except BaseException: + logger.error("Failed to create Vocoder predictor.") @paddle.no_grad() def infer(self, @@ -277,6 +286,7 @@ class TTSServerExecutor(TTSExecutor): am_dataset = am[am.rindex('_') + 1:] get_tone_ids = False merge_sentences = False + frontend_st = time.time() if am_name == 'speedyspeech': get_tone_ids = True if lang == 'zh': @@ -292,10 +302,14 @@ class TTSServerExecutor(TTSExecutor): text, merge_sentences=merge_sentences) phone_ids = input_ids["phone_ids"] else: - print("lang should in {'zh', 'en'}!") + logger.error("lang should in {'zh', 'en'}!") + self.frontend_time = time.time() - frontend_st + self.am_time = 0 + self.voc_time = 0 flags = 0 for i in range(len(phone_ids)): + am_st = time.time() part_phone_ids = phone_ids[i] # am if am_name == 'speedyspeech': @@ -314,7 +328,10 @@ class TTSServerExecutor(TTSExecutor): am_result = run_model(self.am_predictor, [part_phone_ids.numpy()]) mel = am_result[0] + self.am_time += (time.time() - am_st) + # voc + voc_st = time.time() voc_result = run_model(self.voc_predictor, [mel]) wav = voc_result[0] wav = paddle.to_tensor(wav) @@ -324,6 +341,7 @@ class TTSServerExecutor(TTSExecutor): flags = 1 else: wav_all = paddle.concat([wav_all, wav]) + self.voc_time += (time.time() - voc_st) self._outputs['wav'] = wav_all @@ -370,7 +388,7 @@ class TTSEngine(BaseEngine): def postprocess(self, wav, original_fs: int, - target_fs: int=16000, + target_fs: int=0, volume: float=1.0, speed: float=1.0, audio_path: str=None): @@ -395,38 +413,50 @@ class TTSEngine(BaseEngine): if target_fs == 0 or target_fs > original_fs: target_fs = original_fs wav_tar_fs = wav + logger.info( + "The sample rate of synthesized audio is the same as model, which is {}Hz". + format(original_fs)) else: wav_tar_fs = librosa.resample( np.squeeze(wav), original_fs, target_fs) - + logger.info( + "The sample rate of model is {}Hz and the target sample rate is {}Hz. Converting the sample rate of the synthesized audio successfully.". + format(original_fs, target_fs)) # transform volume wav_vol = wav_tar_fs * volume + logger.info("Transform the volume of the audio successfully.") # transform speed try: # windows not support soxbindings wav_speed = change_speed(wav_vol, speed, target_fs) + logger.info("Transform the speed of the audio successfully.") except ServerBaseException: raise ServerBaseException( ErrorCode.SERVER_INTERNAL_ERR, - "Transform speed failed. Can not install soxbindings on your system. \ + "Failed to transform speed. Can not install soxbindings on your system. \ You need to set speed value 1.0.") except BaseException: - logger.error("Transform speed failed.") + logger.error("Failed to transform speed.") # wav to base64 buf = io.BytesIO() wavfile.write(buf, target_fs, wav_speed) base64_bytes = base64.b64encode(buf.read()) wav_base64 = base64_bytes.decode('utf-8') + logger.info("Audio to string successfully.") # save audio - if audio_path is not None and audio_path.endswith(".wav"): - sf.write(audio_path, wav_speed, target_fs) - elif audio_path is not None and audio_path.endswith(".pcm"): - wav_norm = wav_speed * (32767 / max(0.001, - np.max(np.abs(wav_speed)))) - with open(audio_path, "wb") as f: - f.write(wav_norm.astype(np.int16)) + if audio_path is not None: + if audio_path.endswith(".wav"): + sf.write(audio_path, wav_speed, target_fs) + elif audio_path.endswith(".pcm"): + wav_norm = wav_speed * (32767 / max(0.001, + np.max(np.abs(wav_speed)))) + with open(audio_path, "wb") as f: + f.write(wav_norm.astype(np.int16)) + logger.info("Save audio to {} successfully.".format(audio_path)) + else: + logger.info("There is no need to save audio.") return target_fs, wav_base64 @@ -462,8 +492,12 @@ class TTSEngine(BaseEngine): lang = self.config.lang try: + infer_st = time.time() self.executor.infer( text=sentence, lang=lang, am=self.config.am, spk_id=spk_id) + infer_et = time.time() + infer_time = infer_et - infer_st + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts infer failed.") @@ -471,6 +505,7 @@ class TTSEngine(BaseEngine): logger.error("tts infer failed.") try: + postprocess_st = time.time() target_sample_rate, wav_base64 = self.postprocess( wav=self.executor._outputs['wav'].numpy(), original_fs=self.executor.am_sample_rate, @@ -478,10 +513,34 @@ class TTSEngine(BaseEngine): volume=volume, speed=speed, audio_path=save_path) + postprocess_et = time.time() + postprocess_time = postprocess_et - postprocess_st + duration = len(self.executor._outputs['wav'] + .numpy()) / self.executor.am_sample_rate + rtf = infer_time / duration + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts postprocess failed.") except BaseException: logger.error("tts postprocess failed.") + logger.info("AM model: {}".format(self.config.am)) + logger.info("Vocoder model: {}".format(self.config.voc)) + logger.info("Language: {}".format(lang)) + logger.info("tts engine type: paddle inference") + + logger.info("audio duration: {}".format(duration)) + logger.info( + "frontend inference time: {}".format(self.executor.frontend_time)) + logger.info("AM inference time: {}".format(self.executor.am_time)) + logger.info("Vocoder inference time: {}".format(self.executor.voc_time)) + logger.info("total inference time: {}".format(infer_time)) + logger.info( + "postprocess (change speed, volume, target sample rate) time: {}". + format(postprocess_time)) + logger.info("total generate audio time: {}".format(infer_time + + postprocess_time)) + logger.info("RTF: {}".format(rtf)) + return lang, target_sample_rate, wav_base64 diff --git a/paddlespeech/server/engine/tts/python/tts_engine.py b/paddlespeech/server/engine/tts/python/tts_engine.py index 20b4e0fe..7dd57669 100644 --- a/paddlespeech/server/engine/tts/python/tts_engine.py +++ b/paddlespeech/server/engine/tts/python/tts_engine.py @@ -13,6 +13,7 @@ # limitations under the License. import base64 import io +import time import librosa import numpy as np @@ -54,11 +55,20 @@ class TTSEngine(BaseEngine): try: self.config = get_config(config_file) - if self.config.device is None: - paddle.set_device(paddle.get_device()) + if self.config.device: + self.device = self.config.device else: - paddle.set_device(self.config.device) + self.device = paddle.get_device() + paddle.set_device(self.device) + except BaseException: + logger.error( + "Set device failed, please check if device is already used and the parameter 'device' in the yaml file" + ) + logger.error("Initialize TTS server engine Failed on device: %s." % + (self.device)) + return False + try: self.executor._init_from_path( am=self.config.am, am_config=self.config.am_config, @@ -73,16 +83,19 @@ class TTSEngine(BaseEngine): voc_stat=self.config.voc_stat, lang=self.config.lang) except BaseException: - logger.error("Initialize TTS server engine Failed.") + logger.error("Failed to get model related files.") + logger.error("Initialize TTS server engine Failed on device: %s." % + (self.device)) return False - logger.info("Initialize TTS server engine successfully.") + logger.info("Initialize TTS server engine successfully on device: %s." % + (self.device)) return True def postprocess(self, wav, original_fs: int, - target_fs: int=16000, + target_fs: int=0, volume: float=1.0, speed: float=1.0, audio_path: str=None): @@ -107,38 +120,50 @@ class TTSEngine(BaseEngine): if target_fs == 0 or target_fs > original_fs: target_fs = original_fs wav_tar_fs = wav + logger.info( + "The sample rate of synthesized audio is the same as model, which is {}Hz". + format(original_fs)) else: wav_tar_fs = librosa.resample( np.squeeze(wav), original_fs, target_fs) - + logger.info( + "The sample rate of model is {}Hz and the target sample rate is {}Hz. Converting the sample rate of the synthesized audio successfully.". + format(original_fs, target_fs)) # transform volume wav_vol = wav_tar_fs * volume + logger.info("Transform the volume of the audio successfully.") # transform speed try: # windows not support soxbindings wav_speed = change_speed(wav_vol, speed, target_fs) + logger.info("Transform the speed of the audio successfully.") except ServerBaseException: raise ServerBaseException( ErrorCode.SERVER_INTERNAL_ERR, - "Transform speed failed. Can not install soxbindings on your system. \ + "Failed to transform speed. Can not install soxbindings on your system. \ You need to set speed value 1.0.") except BaseException: - logger.error("Transform speed failed.") + logger.error("Failed to transform speed.") # wav to base64 buf = io.BytesIO() wavfile.write(buf, target_fs, wav_speed) base64_bytes = base64.b64encode(buf.read()) wav_base64 = base64_bytes.decode('utf-8') + logger.info("Audio to string successfully.") # save audio - if audio_path is not None and audio_path.endswith(".wav"): - sf.write(audio_path, wav_speed, target_fs) - elif audio_path is not None and audio_path.endswith(".pcm"): - wav_norm = wav_speed * (32767 / max(0.001, - np.max(np.abs(wav_speed)))) - with open(audio_path, "wb") as f: - f.write(wav_norm.astype(np.int16)) + if audio_path is not None: + if audio_path.endswith(".wav"): + sf.write(audio_path, wav_speed, target_fs) + elif audio_path.endswith(".pcm"): + wav_norm = wav_speed * (32767 / max(0.001, + np.max(np.abs(wav_speed)))) + with open(audio_path, "wb") as f: + f.write(wav_norm.astype(np.int16)) + logger.info("Save audio to {} successfully.".format(audio_path)) + else: + logger.info("There is no need to save audio.") return target_fs, wav_base64 @@ -174,8 +199,15 @@ class TTSEngine(BaseEngine): lang = self.config.lang try: + infer_st = time.time() self.executor.infer( text=sentence, lang=lang, am=self.config.am, spk_id=spk_id) + infer_et = time.time() + infer_time = infer_et - infer_st + duration = len(self.executor._outputs['wav'] + .numpy()) / self.executor.am_config.fs + rtf = infer_time / duration + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts infer failed.") @@ -183,6 +215,7 @@ class TTSEngine(BaseEngine): logger.error("tts infer failed.") try: + postprocess_st = time.time() target_sample_rate, wav_base64 = self.postprocess( wav=self.executor._outputs['wav'].numpy(), original_fs=self.executor.am_config.fs, @@ -190,10 +223,32 @@ class TTSEngine(BaseEngine): volume=volume, speed=speed, audio_path=save_path) + postprocess_et = time.time() + postprocess_time = postprocess_et - postprocess_st + except ServerBaseException: raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR, "tts postprocess failed.") except BaseException: logger.error("tts postprocess failed.") + logger.info("AM model: {}".format(self.config.am)) + logger.info("Vocoder model: {}".format(self.config.voc)) + logger.info("Language: {}".format(lang)) + logger.info("tts engine type: python") + + logger.info("audio duration: {}".format(duration)) + logger.info( + "frontend inference time: {}".format(self.executor.frontend_time)) + logger.info("AM inference time: {}".format(self.executor.am_time)) + logger.info("Vocoder inference time: {}".format(self.executor.voc_time)) + logger.info("total inference time: {}".format(infer_time)) + logger.info( + "postprocess (change speed, volume, target sample rate) time: {}". + format(postprocess_time)) + logger.info("total generate audio time: {}".format(infer_time + + postprocess_time)) + logger.info("RTF: {}".format(rtf)) + logger.info("device: {}".format(self.device)) + return lang, target_sample_rate, wav_base64 diff --git a/paddlespeech/server/restful/tts_api.py b/paddlespeech/server/restful/tts_api.py index c7e91300..0af0f6d0 100644 --- a/paddlespeech/server/restful/tts_api.py +++ b/paddlespeech/server/restful/tts_api.py @@ -16,6 +16,7 @@ from typing import Union from fastapi import APIRouter +from paddlespeech.cli.log import logger from paddlespeech.server.engine.engine_pool import get_engine_pool from paddlespeech.server.restful.request import TTSRequest from paddlespeech.server.restful.response import ErrorResponse @@ -60,6 +61,9 @@ def tts(request_body: TTSRequest): Returns: json: [description] """ + + logger.info("request: {}".format(request_body)) + # get params text = request_body.text spk_id = request_body.spk_id @@ -92,6 +96,7 @@ def tts(request_body: TTSRequest): # get single engine from engine pool engine_pool = get_engine_pool() tts_engine = engine_pool['tts'] + logger.info("Get tts engine successfully.") lang, target_sample_rate, wav_base64 = tts_engine.run( text, spk_id, speed, volume, sample_rate, save_path) diff --git a/paddlespeech/server/utils/paddle_predictor.py b/paddlespeech/server/utils/paddle_predictor.py index f4216d74..4035d48d 100644 --- a/paddlespeech/server/utils/paddle_predictor.py +++ b/paddlespeech/server/utils/paddle_predictor.py @@ -15,6 +15,7 @@ import os from typing import List from typing import Optional +import paddle from paddle.inference import Config from paddle.inference import create_predictor @@ -40,15 +41,30 @@ def init_predictor(model_dir: Optional[os.PathLike]=None, else: config = Config(model_file, params_file) - config.enable_memory_optim() - if "gpu" in predictor_conf["device"]: - gpu_id = predictor_conf["device"].split(":")[-1] + # set device + if predictor_conf["device"]: + device = predictor_conf["device"] + else: + device = paddle.get_device() + if "gpu" in device: + gpu_id = device.split(":")[-1] config.enable_use_gpu(1000, int(gpu_id)) - if predictor_conf["enable_mkldnn"]: - config.enable_mkldnn() + + # IR optim if predictor_conf["switch_ir_optim"]: config.switch_ir_optim() + # glog + if not predictor_conf["glog_info"]: + config.disable_glog_info() + + # config summary + if predictor_conf["summary"]: + print(config.summary()) + + # memory optim + config.enable_memory_optim() + predictor = create_predictor(config) return predictor From 7d1ed0d052aa4f2a2481dd82f9471004e82a20f6 Mon Sep 17 00:00:00 2001 From: Phecda xu <46859427+phecda-xu@users.noreply.github.com> Date: Tue, 1 Mar 2022 22:45:30 +0800 Subject: [PATCH 31/39] Update README.md add PaddleDubbing info --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 46730797..48732f59 100644 --- a/README.md +++ b/README.md @@ -561,6 +561,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P - Many thanks to [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) for developing Virtual Uploader(VUP)/Virtual YouTuber(VTuber) with PaddleSpeech TTS function. - Many thanks to [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) for contributing Punctuation Restoration model. - Many thanks to [kslz](https://github.com/745165806) for supplementary Chinese documents. +- Many thanks to [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) for developing a dubbing tool with GUI based on PaddleSpeech TTS model. Besides, PaddleSpeech depends on a lot of open source repositories. See [references](./docs/source/reference.md) for more information. From 7da1d388b9ee6e6606bddcefab629237692dbec0 Mon Sep 17 00:00:00 2001 From: Phecda xu <46859427+phecda-xu@users.noreply.github.com> Date: Tue, 1 Mar 2022 22:54:59 +0800 Subject: [PATCH 32/39] Update README_cn.md --- README_cn.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README_cn.md b/README_cn.md index 9782240a..72352887 100644 --- a/README_cn.md +++ b/README_cn.md @@ -556,6 +556,7 @@ year={2021} - 非常感谢 [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) 采用 PaddleSpeech 语音合成功能实现 Virtual Uploader(VUP)/Virtual YouTuber(VTuber) 虚拟主播。 - 非常感谢 [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) 贡献标点重建相关模型。 - 非常感谢 [kslz](https://github.com/kslz) 补充中文文档。 +- 非常感谢 [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) 基于PaddleSpeech的TTS模型搭建带GUI操作界面的配音工具。 此外,PaddleSpeech 依赖于许多开源存储库。有关更多信息,请参阅 [references](./docs/source/reference.md)。 From d69b507a09d1b7512318699e24a67a89191b1121 Mon Sep 17 00:00:00 2001 From: Phecda xu <46859427+phecda-xu@users.noreply.github.com> Date: Tue, 1 Mar 2022 23:12:10 +0800 Subject: [PATCH 33/39] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 48732f59..66178662 100644 --- a/README.md +++ b/README.md @@ -561,6 +561,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P - Many thanks to [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) for developing Virtual Uploader(VUP)/Virtual YouTuber(VTuber) with PaddleSpeech TTS function. - Many thanks to [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) for contributing Punctuation Restoration model. - Many thanks to [kslz](https://github.com/745165806) for supplementary Chinese documents. +- Many thanks to [awmmmm](https://github.com/awmmmm) for contributing fastspeech2 aishell3 conformer pretrained model. - Many thanks to [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) for developing a dubbing tool with GUI based on PaddleSpeech TTS model. Besides, PaddleSpeech depends on a lot of open source repositories. See [references](./docs/source/reference.md) for more information. From 8858c7066b9959e8502df4a974bbe59f80e08ec9 Mon Sep 17 00:00:00 2001 From: Phecda xu <46859427+phecda-xu@users.noreply.github.com> Date: Tue, 1 Mar 2022 23:13:44 +0800 Subject: [PATCH 34/39] Update README_cn.md --- README_cn.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README_cn.md b/README_cn.md index 72352887..27580639 100644 --- a/README_cn.md +++ b/README_cn.md @@ -556,6 +556,7 @@ year={2021} - 非常感谢 [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) 采用 PaddleSpeech 语音合成功能实现 Virtual Uploader(VUP)/Virtual YouTuber(VTuber) 虚拟主播。 - 非常感谢 [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) 贡献标点重建相关模型。 - 非常感谢 [kslz](https://github.com/kslz) 补充中文文档。 +- 非常感谢 [awmmmm](https://github.com/awmmmm) 提供 fastspeech2 aishell3 conformer 预训练模型。 - 非常感谢 [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) 基于PaddleSpeech的TTS模型搭建带GUI操作界面的配音工具。 此外,PaddleSpeech 依赖于许多开源存储库。有关更多信息,请参阅 [references](./docs/source/reference.md)。 From 34b600c4a2035b44c29ca70f7d34e685ff5f98a3 Mon Sep 17 00:00:00 2001 From: Phecda xu <46859427+phecda-xu@users.noreply.github.com> Date: Tue, 1 Mar 2022 23:15:30 +0800 Subject: [PATCH 35/39] Update README_cn.md --- README_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_cn.md b/README_cn.md index 27580639..1196eec1 100644 --- a/README_cn.md +++ b/README_cn.md @@ -557,7 +557,7 @@ year={2021} - 非常感谢 [745165806](https://github.com/745165806)/[PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask) 贡献标点重建相关模型。 - 非常感谢 [kslz](https://github.com/kslz) 补充中文文档。 - 非常感谢 [awmmmm](https://github.com/awmmmm) 提供 fastspeech2 aishell3 conformer 预训练模型。 -- 非常感谢 [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) 基于PaddleSpeech的TTS模型搭建带GUI操作界面的配音工具。 +- 非常感谢 [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) 基于 PaddleSpeech 的 TTS 模型搭建带 GUI 操作界面的配音工具。 此外,PaddleSpeech 依赖于许多开源存储库。有关更多信息,请参阅 [references](./docs/source/reference.md)。 From c116a3a92644a6fcbf0e2346d0077bb7c3b3c50c Mon Sep 17 00:00:00 2001 From: Jerryuhoo Date: Wed, 2 Mar 2022 09:41:18 +0800 Subject: [PATCH 36/39] fix Speedyspeech multi-speaker inference, test=tts --- paddlespeech/t2s/exps/synthesize_e2e.py | 8 ++++---- paddlespeech/t2s/models/speedyspeech/speedyspeech.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/paddlespeech/t2s/exps/synthesize_e2e.py b/paddlespeech/t2s/exps/synthesize_e2e.py index 75c631b8..514d4822 100644 --- a/paddlespeech/t2s/exps/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/synthesize_e2e.py @@ -194,10 +194,10 @@ def evaluate(args): am_inference = jit.to_static( am_inference, input_spec=[ - InputSpec([-1], dtype=paddle.int64), # text - InputSpec([-1], dtype=paddle.int64), # tone - None, # duration - InputSpec([-1], dtype=paddle.int64) # spk_id + InputSpec([-1], dtype=paddle.int64), # text + InputSpec([-1], dtype=paddle.int64), # tone + InputSpec([1], dtype=paddle.int64), # spk_id + None # duration ]) else: am_inference = jit.to_static( diff --git a/paddlespeech/t2s/models/speedyspeech/speedyspeech.py b/paddlespeech/t2s/models/speedyspeech/speedyspeech.py index 42e8f743..44ccfc60 100644 --- a/paddlespeech/t2s/models/speedyspeech/speedyspeech.py +++ b/paddlespeech/t2s/models/speedyspeech/speedyspeech.py @@ -247,7 +247,7 @@ class SpeedySpeechInference(nn.Layer): self.normalizer = normalizer self.acoustic_model = speedyspeech_model - def forward(self, phones, tones, durations=None, spk_id=None): + def forward(self, phones, tones, spk_id=None, durations=None): normalized_mel = self.acoustic_model.inference( phones, tones, durations=durations, spk_id=spk_id) logmel = self.normalizer.inverse(normalized_mel) From 85d4a31e04e238b3459e8c3f34a502fe8dd5f69e Mon Sep 17 00:00:00 2001 From: lym0302 Date: Wed, 2 Mar 2022 09:55:54 +0800 Subject: [PATCH 37/39] update application.yaml, test=doc --- paddlespeech/server/conf/application.yaml | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/paddlespeech/server/conf/application.yaml b/paddlespeech/server/conf/application.yaml index 9900492c..6dcae74a 100644 --- a/paddlespeech/server/conf/application.yaml +++ b/paddlespeech/server/conf/application.yaml @@ -9,6 +9,12 @@ port: 8090 ################################################################## # CONFIG FILE # ################################################################## +# add engine backend type (Options: asr, tts) and config file here. +# Adding a speech task to engine_backend means starting the service. +engine_backend: + asr: 'conf/asr/asr.yaml' + tts: 'conf/tts/tts.yaml' + # The engine_type of speech task needs to keep the same type as the config file of speech task. # E.g: The engine_type of asr is 'python', the engine_backend of asr is 'XX/asr.yaml' # E.g: The engine_type of asr is 'inference', the engine_backend of asr is 'XX/asr_pd.yaml' @@ -18,8 +24,4 @@ engine_type: asr: 'python' tts: 'python' -# add engine backend type (Options: asr, tts) and config file here. -# Adding a speech task to engine_backend means starting the service. -engine_backend: - asr: 'conf/asr/asr.yaml' - tts: 'conf/tts/tts.yaml' + From 556ac958d4367b5d8751710cbbbac14500b4d9f8 Mon Sep 17 00:00:00 2001 From: Jerryuhoo Date: Wed, 2 Mar 2022 10:39:38 +0800 Subject: [PATCH 38/39] update readme, test=doc add examples --- README.md | 7 +++++++ README_cn.md | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/README.md b/README.md index 837d2478..7f95abac 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,12 @@ For more synthesized audios, please refer to [PaddleSpeech Text-to-Speech sample - [PaddleSpeech Demo Video](https://paddlespeech.readthedocs.io/en/latest/demo_video.html) +- **[VTuberTalk](https://github.com/JiehangXie/PaddleBoBo): Use PaddleSpeech TTS and ASR to clone voice from videos.** + +
+ +
+ ### 🔥 Hot Activities - 2021.12.21~12.24 @@ -574,6 +580,7 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P - Many thanks to [kslz](https://github.com/745165806) for supplementary Chinese documents. - Many thanks to [awmmmm](https://github.com/awmmmm) for contributing fastspeech2 aishell3 conformer pretrained model. - Many thanks to [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) for developing a dubbing tool with GUI based on PaddleSpeech TTS model. +- Many thanks to [jerryuhoo](https://github.com/jerryuhoo)/[VTuberTalk](https://github.com/jerryuhoo/VTuberTalk) for developing a GUI tool based on PaddleSpeech TTS and code for making datasets from videos based on PaddleSpeech ASR. Besides, PaddleSpeech depends on a lot of open source repositories. See [references](./docs/source/reference.md) for more information. diff --git a/README_cn.md b/README_cn.md index 5c00637d..742ef062 100644 --- a/README_cn.md +++ b/README_cn.md @@ -150,6 +150,12 @@ from https://github.com/18F/open-source-guide/blob/18f-pages/pages/making-readme - [PaddleSpeech 示例视频](https://paddlespeech.readthedocs.io/en/latest/demo_video.html) +- **[VTuberTalk](https://github.com/JiehangXie/PaddleBoBo): 使用 PaddleSpeech 的语音合成和语音识别从视频中克隆人声。** + +
+ +
+ ### 🔥 热门活动 - 2021.12.21~12.24 @@ -569,6 +575,7 @@ year={2021} - 非常感谢 [kslz](https://github.com/kslz) 补充中文文档。 - 非常感谢 [awmmmm](https://github.com/awmmmm) 提供 fastspeech2 aishell3 conformer 预训练模型。 - 非常感谢 [phecda-xu](https://github.com/phecda-xu)/[PaddleDubbing](https://github.com/phecda-xu/PaddleDubbing) 基于 PaddleSpeech 的 TTS 模型搭建带 GUI 操作界面的配音工具。 +- 非常感谢 [jerryuhoo](https://github.com/jerryuhoo)/[VTuberTalk](https://github.com/jerryuhoo/VTuberTalk) 基于 PaddleSpeech 的 TTS GUI 界面和基于 ASR 制作数据集的相关代码。 此外,PaddleSpeech 依赖于许多开源存储库。有关更多信息,请参阅 [references](./docs/source/reference.md)。 From a848f408f0a345e09d4b553ecccae16b447fa328 Mon Sep 17 00:00:00 2001 From: Jerryuhoo Date: Wed, 2 Mar 2022 11:14:39 +0800 Subject: [PATCH 39/39] Update readme, test=doc --- README.md | 2 +- README_cn.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7f95abac..46f492e9 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ For more synthesized audios, please refer to [PaddleSpeech Text-to-Speech sample - [PaddleSpeech Demo Video](https://paddlespeech.readthedocs.io/en/latest/demo_video.html) -- **[VTuberTalk](https://github.com/JiehangXie/PaddleBoBo): Use PaddleSpeech TTS and ASR to clone voice from videos.** +- **[VTuberTalk](https://github.com/jerryuhoo/VTuberTalk): Use PaddleSpeech TTS and ASR to clone voice from videos.**
diff --git a/README_cn.md b/README_cn.md index 742ef062..e8494737 100644 --- a/README_cn.md +++ b/README_cn.md @@ -150,7 +150,7 @@ from https://github.com/18F/open-source-guide/blob/18f-pages/pages/making-readme - [PaddleSpeech 示例视频](https://paddlespeech.readthedocs.io/en/latest/demo_video.html) -- **[VTuberTalk](https://github.com/JiehangXie/PaddleBoBo): 使用 PaddleSpeech 的语音合成和语音识别从视频中克隆人声。** +- **[VTuberTalk](https://github.com/jerryuhoo/VTuberTalk): 使用 PaddleSpeech 的语音合成和语音识别从视频中克隆人声。**