From 783b98603887578ae3223a9164d9999c4ba8b0e3 Mon Sep 17 00:00:00 2001
From: YangZhou <56786796+SmileGoat@users.noreply.github.com>
Date: Fri, 16 Sep 2022 22:38:56 +0800
Subject: [PATCH] Revert "[audio]merge develop"
---
.github/ISSUE_TEMPLATE/bug-report-tts.md | 42 --
.../{bug-report-s2t.md => bug_report.md} | 10 +-
.github/ISSUE_TEMPLATE/feature-request.md | 19 -
.github/ISSUE_TEMPLATE/others.md | 15 -
.github/ISSUE_TEMPLATE/question.md | 19 -
CHANGELOG.md | 66 ++
README.md | 367 +++--------
README_cn.md | 202 ++----
demos/audio_searching/README.md | 6 -
demos/audio_searching/src/operations/load.py | 5 +-
demos/metaverse/README.md | 2 -
demos/metaverse/README_cn.md | 27 -
demos/speaker_verification/README.md | 1 -
demos/speaker_verification/README_cn.md | 1 -
demos/speech_server/conf/application.yaml | 4 +-
demos/speech_web/API.md | 2 +-
demos/speech_web/speech_server/main.py | 160 ++---
.../speech_web/speech_server/requirements.txt | 11 +-
.../speech_server/src/AudioManeger.py | 87 ++-
.../speech_server/src/SpeechBase/asr.py | 18 +-
.../speech_server/src/SpeechBase/nlp.py | 18 +-
.../src/SpeechBase/sql_helper.py | 56 +-
.../speech_server/src/SpeechBase/tts.py | 92 +--
.../speech_server/src/SpeechBase/vpr.py | 54 +-
.../src/SpeechBase/vpr_encode.py | 9 +-
.../speech_server/src/WebsocketManeger.py | 3 +-
demos/speech_web/speech_server/src/robot.py | 44 +-
demos/speech_web/speech_server/src/util.py | 17 +-
demos/story_talker/README.md | 2 -
demos/story_talker/README_cn.md | 20 -
.../conf/application.yaml | 1 -
.../local/rtf_from_log.py | 2 +-
.../conf/tts_online_application.yaml | 3 +-
.../conf/tts_online_ws_application.yaml | 3 +-
demos/style_fs2/README.md | 2 -
demos/style_fs2/README_cn.md | 33 -
demos/text_to_speech/README.md | 130 ++--
demos/text_to_speech/README_cn.md | 136 ++--
docs/requirements.txt | 38 +-
docs/source/conf.py | 5 +-
docs/source/released_model.md | 8 +-
docs/source/tts/quick_start.md | 4 +-
docs/source/tts/quick_start_cn.md | 4 +-
docs/source/tts/tts_papers.md | 1 -
docs/tutorial/tts/tts_tutorial.ipynb | 2 +-
examples/aishell/asr0/README.md | 4 +-
examples/aishell3/README.md | 6 +-
examples/aishell3/ernie_sat/README.md | 152 +----
examples/aishell3/ernie_sat/conf/default.yaml | 9 +-
.../aishell3/ernie_sat/local/synthesize.sh | 23 +-
.../ernie_sat/local/synthesize_e2e.sh | 52 --
examples/aishell3/ernie_sat/local/train.sh | 4 +-
examples/aishell3/ernie_sat/run.sh | 8 +-
examples/aishell3/tts3/README.md | 17 +-
.../aishell3/tts3/local/synthesize_e2e.sh | 6 +-
examples/aishell3/tts3/run.sh | 4 +-
examples/aishell3/vc1/README.md | 4 +-
examples/aishell3/vc2/README.md | 126 ----
examples/aishell3/vc2/conf/default.yaml | 104 ---
examples/aishell3/vc2/local/preprocess.sh | 85 ---
examples/aishell3/vc2/local/synthesize.sh | 22 -
examples/aishell3/vc2/local/train.sh | 13 -
examples/aishell3/vc2/local/voice_cloning.sh | 23 -
examples/aishell3/vc2/path.sh | 13 -
examples/aishell3/vc2/run.sh | 39 --
examples/aishell3/vits-vc/README.md | 154 -----
examples/aishell3/vits-vc/conf/default.yaml | 185 ------
examples/aishell3/vits-vc/local/preprocess.sh | 79 ---
examples/aishell3/vits-vc/local/synthesize.sh | 19 -
examples/aishell3/vits-vc/local/train.sh | 18 -
.../aishell3/vits-vc/local/voice_cloning.sh | 22 -
examples/aishell3/vits-vc/path.sh | 13 -
examples/aishell3/vits-vc/run.sh | 45 --
examples/aishell3/vits/README.md | 202 ------
examples/aishell3/vits/conf/default.yaml | 184 ------
examples/aishell3/vits/local/preprocess.sh | 69 --
examples/aishell3/vits/local/synthesize.sh | 19 -
.../aishell3/vits/local/synthesize_e2e.sh | 24 -
examples/aishell3/vits/local/train.sh | 18 -
examples/aishell3/vits/run.sh | 36 -
examples/aishell3_vctk/README.md | 1 -
examples/aishell3_vctk/ernie_sat/README.md | 164 +----
.../aishell3_vctk/ernie_sat/conf/default.yaml | 9 +-
.../ernie_sat/local/synthesize.sh | 23 +-
.../ernie_sat/local/synthesize_e2e.sh | 53 --
.../aishell3_vctk/ernie_sat/local/train.sh | 2 +-
examples/aishell3_vctk/ernie_sat/run.sh | 8 +-
examples/csmsc/README.md | 2 +-
examples/csmsc/tts2/run.sh | 4 +-
examples/csmsc/tts3/run.sh | 4 +-
examples/csmsc/tts3/run_cnndecoder.sh | 8 +-
examples/csmsc/vits/run.sh | 2 +-
examples/ernie_sat/.meta/framework.png | Bin 0 -> 143263 bytes
examples/ernie_sat/README.md | 137 ++++
examples/ernie_sat/local/align.py | 454 +++++++++++++
examples/ernie_sat/local/inference.py | 609 +++++++++++++++++
examples/ernie_sat/local/inference_new.py | 622 ++++++++++++++++++
examples/ernie_sat/local/sedit_arg_parser.py | 97 +++
examples/ernie_sat/local/utils.py | 175 +++++
examples/{aishell3/vits => ernie_sat}/path.sh | 4 +-
examples/ernie_sat/prompt/dev/text | 3 +
examples/ernie_sat/prompt/dev/wav.scp | 3 +
examples/ernie_sat/run_clone_en_to_zh.sh | 27 +
examples/ernie_sat/run_clone_en_to_zh_new.sh | 27 +
examples/ernie_sat/run_gen_en.sh | 26 +
examples/ernie_sat/run_gen_en_new.sh | 26 +
examples/ernie_sat/run_sedit_en.sh | 27 +
examples/ernie_sat/run_sedit_en_new.sh | 27 +
examples/ernie_sat/test_run.sh | 6 +
examples/ernie_sat/test_run_new.sh | 6 +
examples/ernie_sat/tools/.gitkeep | 0
examples/hey_snips/kws0/conf/mdtc.yaml | 4 +-
examples/iwslt2012/punc0/README.md | 54 +-
examples/iwslt2012/punc0/local/preprocess.py | 29 -
examples/ljspeech/README.md | 2 +-
examples/ljspeech/tts3/run.sh | 4 +-
examples/other/g2p/README.md | 9 +-
examples/other/g2p/compare_badcase.py | 66 --
examples/other/tn/README.md | 3 -
examples/other/tts_finetune/tts3/README.md | 223 -------
examples/other/tts_finetune/tts3/finetune.py | 214 ------
.../other/tts_finetune/tts3/finetune.yaml | 12 -
.../tts_finetune/tts3/local/check_oov.py | 125 ----
.../other/tts_finetune/tts3/local/extract.py | 286 --------
.../tts_finetune/tts3/local/label_process.py | 63 --
.../tts_finetune/tts3/local/prepare_env.py | 35 -
.../other/tts_finetune/tts3/local/train.py | 178 -----
examples/other/tts_finetune/tts3/path.sh | 13 -
examples/other/tts_finetune/tts3/run.sh | 63 --
examples/vctk/README.md | 3 +-
examples/vctk/ernie_sat/README.md | 153 +----
examples/vctk/ernie_sat/conf/default.yaml | 7 +-
examples/vctk/ernie_sat/local/synthesize.sh | 26 +-
.../vctk/ernie_sat/local/synthesize_e2e.sh | 52 --
examples/vctk/ernie_sat/local/train.sh | 2 +-
examples/vctk/ernie_sat/run.sh | 8 +-
examples/vctk/tts3/README.md | 16 +-
examples/vctk/tts3/run.sh | 4 +-
examples/voxceleb/sv0/README.md | 2 +-
examples/voxceleb/sv0/RESULT.md | 4 -
examples/wenetspeech/asr1/RESULTS.md | 12 -
examples/zh_en_tts/tts3/README.md | 16 +-
examples/zh_en_tts/tts3/run.sh | 4 +-
paddlespeech/audio/__init__.py | 13 +-
paddlespeech/audio/streamdata/__init__.py | 125 ++--
paddlespeech/audio/streamdata/autodecode.py | 19 +-
paddlespeech/audio/streamdata/cache.py | 63 +-
paddlespeech/audio/streamdata/compat.py | 68 +-
.../audio/streamdata/extradatasets.py | 13 +-
paddlespeech/audio/streamdata/filters.py | 256 +++----
paddlespeech/audio/streamdata/gopen.py | 62 +-
paddlespeech/audio/streamdata/handlers.py | 5 +-
paddlespeech/audio/streamdata/mix.py | 9 +-
paddlespeech/audio/streamdata/paddle_utils.py | 14 +-
paddlespeech/audio/streamdata/pipeline.py | 14 +-
paddlespeech/audio/streamdata/shardlists.py | 77 +--
paddlespeech/audio/streamdata/tariterators.py | 81 ++-
paddlespeech/audio/streamdata/utils.py | 32 +-
paddlespeech/audio/streamdata/writer.py | 77 ++-
paddlespeech/audio/text/text_featurizer.py | 2 +-
paddlespeech/audio/transform/perturb.py | 11 +-
paddlespeech/audio/transform/spec_augment.py | 1 -
paddlespeech/cli/asr/infer.py | 20 +-
paddlespeech/cli/base_commands.py | 2 -
paddlespeech/cli/executor.py | 2 +-
paddlespeech/cli/vector/infer.py | 69 +-
paddlespeech/resource/pretrained_models.py | 10 +-
paddlespeech/s2t/__init__.py | 1 -
.../s2t/exps/deepspeech2/bin/test_wav.py | 65 +-
paddlespeech/s2t/exps/u2/model.py | 23 +-
paddlespeech/s2t/exps/u2_kaldi/model.py | 26 +-
paddlespeech/s2t/exps/u2_st/model.py | 19 +-
paddlespeech/s2t/io/dataloader.py | 145 ++--
paddlespeech/s2t/models/u2/u2.py | 2 +-
paddlespeech/s2t/models/u2_st/u2_st.py | 13 +-
paddlespeech/s2t/modules/align.py | 39 +-
paddlespeech/s2t/modules/attention.py | 24 +-
.../s2t/modules/conformer_convolution.py | 18 +-
paddlespeech/s2t/modules/encoder.py | 31 +-
paddlespeech/s2t/modules/encoder_layer.py | 4 +-
paddlespeech/s2t/modules/initializer.py | 2 +-
paddlespeech/s2t/training/trainer.py | 12 +-
.../server/bin/paddlespeech_server.py | 4 +-
paddlespeech/server/conf/application.yaml | 2 -
.../server/engine/asr/online/ctc_endpoint.py | 6 +-
.../engine/asr/online/onnx/asr_engine.py | 2 +-
.../asr/online/paddleinference/asr_engine.py | 2 +-
.../engine/asr/online/python/asr_engine.py | 16 +-
.../server/engine/asr/python/asr_engine.py | 9 +-
paddlespeech/server/engine/engine_warmup.py | 4 +-
.../engine/vector/python/vector_engine.py | 3 +-
paddlespeech/t2s/datasets/am_batch_fn.py | 229 +++++--
paddlespeech/t2s/datasets/sampler.py | 182 -----
paddlespeech/t2s/exps/ernie_sat/align.py | 19 +-
paddlespeech/t2s/exps/ernie_sat/normalize.py | 2 +-
paddlespeech/t2s/exps/ernie_sat/preprocess.py | 2 +-
.../t2s/exps/ernie_sat/synthesize_e2e.py | 387 ++++-------
paddlespeech/t2s/exps/ernie_sat/train.py | 4 +-
paddlespeech/t2s/exps/ernie_sat/utils.py | 11 +-
.../t2s/exps/fastspeech2/vc2_infer.py | 70 --
paddlespeech/t2s/exps/syn_utils.py | 12 +-
paddlespeech/t2s/exps/vits/__init__.py | 13 -
paddlespeech/t2s/exps/vits/synthesize.py | 40 +-
paddlespeech/t2s/exps/vits/synthesize_e2e.py | 23 +-
paddlespeech/t2s/exps/vits/train.py | 37 +-
paddlespeech/t2s/exps/vits/voice_cloning.py | 213 ------
paddlespeech/t2s/exps/voice_cloning.py | 126 ++--
paddlespeech/t2s/frontend/g2pw/__init__.py | 1 +
paddlespeech/t2s/frontend/g2pw/dataset.py | 10 +-
paddlespeech/t2s/frontend/g2pw/onnx_api.py | 63 +-
paddlespeech/t2s/frontend/mix_frontend.py | 151 +----
paddlespeech/t2s/frontend/polyphonic.yaml | 25 +-
paddlespeech/t2s/frontend/tone_sandhi.py | 75 +--
paddlespeech/t2s/frontend/zh_frontend.py | 47 +-
.../t2s/frontend/zh_normalization/num.py | 2 +-
paddlespeech/t2s/models/ernie_sat/__init__.py | 1 +
.../t2s/models/ernie_sat/ernie_sat.py | 4 +-
paddlespeech/t2s/models/ernie_sat/mlm.py | 579 ++++++++++++++++
paddlespeech/t2s/models/vits/generator.py | 76 ---
paddlespeech/t2s/models/vits/vits.py | 42 +-
paddlespeech/t2s/models/vits/vits_updater.py | 4 -
.../t2s/training/updaters/standard_updater.py | 4 +-
setup.py | 13 +-
.../ds2_ol/onnx/local/onnx_infer_shape.py | 39 +-
tests/benchmark/pwgan/run_benchmark.sh | 1 -
tests/test_tipc/barrier.sh | 10 -
tests/test_tipc/benchmark_train.sh | 1 -
tests/test_tipc/prepare.sh | 20 +-
tools/get_contributors.ipynb | 146 ----
229 files changed, 4813 insertions(+), 7188 deletions(-)
delete mode 100644 .github/ISSUE_TEMPLATE/bug-report-tts.md
rename .github/ISSUE_TEMPLATE/{bug-report-s2t.md => bug_report.md} (86%)
delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md
delete mode 100644 .github/ISSUE_TEMPLATE/others.md
delete mode 100644 .github/ISSUE_TEMPLATE/question.md
create mode 100644 CHANGELOG.md
delete mode 100644 demos/metaverse/README_cn.md
delete mode 100644 demos/story_talker/README_cn.md
delete mode 100644 demos/style_fs2/README_cn.md
delete mode 100755 examples/aishell3/ernie_sat/local/synthesize_e2e.sh
delete mode 100644 examples/aishell3/vc2/README.md
delete mode 100644 examples/aishell3/vc2/conf/default.yaml
delete mode 100755 examples/aishell3/vc2/local/preprocess.sh
delete mode 100755 examples/aishell3/vc2/local/synthesize.sh
delete mode 100755 examples/aishell3/vc2/local/train.sh
delete mode 100755 examples/aishell3/vc2/local/voice_cloning.sh
delete mode 100755 examples/aishell3/vc2/path.sh
delete mode 100755 examples/aishell3/vc2/run.sh
delete mode 100644 examples/aishell3/vits-vc/README.md
delete mode 100644 examples/aishell3/vits-vc/conf/default.yaml
delete mode 100755 examples/aishell3/vits-vc/local/preprocess.sh
delete mode 100755 examples/aishell3/vits-vc/local/synthesize.sh
delete mode 100755 examples/aishell3/vits-vc/local/train.sh
delete mode 100755 examples/aishell3/vits-vc/local/voice_cloning.sh
delete mode 100755 examples/aishell3/vits-vc/path.sh
delete mode 100755 examples/aishell3/vits-vc/run.sh
delete mode 100644 examples/aishell3/vits/README.md
delete mode 100644 examples/aishell3/vits/conf/default.yaml
delete mode 100755 examples/aishell3/vits/local/preprocess.sh
delete mode 100755 examples/aishell3/vits/local/synthesize.sh
delete mode 100755 examples/aishell3/vits/local/synthesize_e2e.sh
delete mode 100755 examples/aishell3/vits/local/train.sh
delete mode 100755 examples/aishell3/vits/run.sh
delete mode 100755 examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh
create mode 100644 examples/ernie_sat/.meta/framework.png
create mode 100644 examples/ernie_sat/README.md
create mode 100755 examples/ernie_sat/local/align.py
create mode 100644 examples/ernie_sat/local/inference.py
create mode 100644 examples/ernie_sat/local/inference_new.py
create mode 100644 examples/ernie_sat/local/sedit_arg_parser.py
create mode 100644 examples/ernie_sat/local/utils.py
rename examples/{aishell3/vits => ernie_sat}/path.sh (68%)
create mode 100644 examples/ernie_sat/prompt/dev/text
create mode 100644 examples/ernie_sat/prompt/dev/wav.scp
create mode 100755 examples/ernie_sat/run_clone_en_to_zh.sh
create mode 100755 examples/ernie_sat/run_clone_en_to_zh_new.sh
create mode 100755 examples/ernie_sat/run_gen_en.sh
create mode 100755 examples/ernie_sat/run_gen_en_new.sh
create mode 100755 examples/ernie_sat/run_sedit_en.sh
create mode 100755 examples/ernie_sat/run_sedit_en_new.sh
create mode 100755 examples/ernie_sat/test_run.sh
create mode 100755 examples/ernie_sat/test_run_new.sh
create mode 100644 examples/ernie_sat/tools/.gitkeep
delete mode 100644 examples/iwslt2012/punc0/local/preprocess.py
delete mode 100644 examples/other/g2p/compare_badcase.py
delete mode 100644 examples/other/tts_finetune/tts3/README.md
delete mode 100644 examples/other/tts_finetune/tts3/finetune.py
delete mode 100644 examples/other/tts_finetune/tts3/finetune.yaml
delete mode 100644 examples/other/tts_finetune/tts3/local/check_oov.py
delete mode 100644 examples/other/tts_finetune/tts3/local/extract.py
delete mode 100644 examples/other/tts_finetune/tts3/local/label_process.py
delete mode 100644 examples/other/tts_finetune/tts3/local/prepare_env.py
delete mode 100644 examples/other/tts_finetune/tts3/local/train.py
delete mode 100755 examples/other/tts_finetune/tts3/path.sh
delete mode 100755 examples/other/tts_finetune/tts3/run.sh
delete mode 100755 examples/vctk/ernie_sat/local/synthesize_e2e.sh
delete mode 100644 paddlespeech/t2s/datasets/sampler.py
delete mode 100644 paddlespeech/t2s/exps/fastspeech2/vc2_infer.py
delete mode 100644 paddlespeech/t2s/exps/vits/__init__.py
delete mode 100644 paddlespeech/t2s/exps/vits/voice_cloning.py
create mode 100644 paddlespeech/t2s/models/ernie_sat/mlm.py
delete mode 100644 tests/test_tipc/barrier.sh
mode change 100755 => 100644 tests/test_tipc/prepare.sh
delete mode 100644 tools/get_contributors.ipynb
diff --git a/.github/ISSUE_TEMPLATE/bug-report-tts.md b/.github/ISSUE_TEMPLATE/bug-report-tts.md
deleted file mode 100644
index 64b33c32e..000000000
--- a/.github/ISSUE_TEMPLATE/bug-report-tts.md
+++ /dev/null
@@ -1,42 +0,0 @@
----
-name: "\U0001F41B TTS Bug Report"
-about: Create a report to help us improve
-title: "[TTS]XXXX"
-labels: Bug, T2S
-assignees: yt605155624
-
----
-
-For support and discussions, please use our [Discourse forums](https://github.com/PaddlePaddle/DeepSpeech/discussions).
-
-If you've found a bug then please create an issue with the following information:
-
-**Describe the bug**
-A clear and concise description of what the bug is.
-
-**To Reproduce**
-Steps to reproduce the behavior:
-1. Go to '...'
-2. Click on '....'
-3. Scroll down to '....'
-4. See error
-
-**Expected behavior**
-A clear and concise description of what you expected to happen.
-
-**Screenshots**
-If applicable, add screenshots to help explain your problem.
-
-**Environment (please complete the following information):**
- - OS: [e.g. Ubuntu]
- - GCC/G++ Version [e.g. 8.3]
- - Python Version [e.g. 3.7]
- - PaddlePaddle Version [e.g. 2.0.0]
- - Model Version [e.g. 2.0.0]
- - GPU/DRIVER Informationo [e.g. Tesla V100-SXM2-32GB/440.64.00]
- - CUDA/CUDNN Version [e.g. cuda-10.2]
- - MKL Version
-- TensorRT Version
-
-**Additional context**
-Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/bug-report-s2t.md b/.github/ISSUE_TEMPLATE/bug_report.md
similarity index 86%
rename from .github/ISSUE_TEMPLATE/bug-report-s2t.md
rename to .github/ISSUE_TEMPLATE/bug_report.md
index 512cdbb01..b31d98631 100644
--- a/.github/ISSUE_TEMPLATE/bug-report-s2t.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -1,9 +1,9 @@
---
-name: "\U0001F41B S2T Bug Report"
+name: Bug report
about: Create a report to help us improve
-title: "[S2T]XXXX"
-labels: Bug, S2T
-assignees: zh794390558
+title: ''
+labels: ''
+assignees: ''
---
@@ -27,7 +27,7 @@ A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
-**Environment (please complete the following information):**
+** Environment (please complete the following information):**
- OS: [e.g. Ubuntu]
- GCC/G++ Version [e.g. 8.3]
- Python Version [e.g. 3.7]
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
deleted file mode 100644
index 10b0f3f02..000000000
--- a/.github/ISSUE_TEMPLATE/feature-request.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: "\U0001F680 Feature Request"
-about: As a user, I want to request a New Feature on the product.
-title: ''
-labels: feature request
-assignees: D-DanielYang, iftaken
-
----
-
-## Feature Request
-
-**Is your feature request related to a problem? Please describe:**
-
-
-**Describe the feature you'd like:**
-
-
-**Describe alternatives you've considered:**
-
diff --git a/.github/ISSUE_TEMPLATE/others.md b/.github/ISSUE_TEMPLATE/others.md
deleted file mode 100644
index e135a2689..000000000
--- a/.github/ISSUE_TEMPLATE/others.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-name: "\U0001F9E9 Others"
-about: Report any other non-support related issues.
-title: ''
-labels: ''
-assignees: ''
-
----
-
-## Others
-
-
diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
deleted file mode 100644
index 445905c61..000000000
--- a/.github/ISSUE_TEMPLATE/question.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: "\U0001F914 Ask a Question"
-about: I want to ask a question.
-title: ''
-labels: Question
-assignees: ''
-
----
-
-## General Question
-
-
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 000000000..2782b8176
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,66 @@
+# Changelog
+
+Date: 2022-3-22, Author: yt605155624.
+Add features to: CLI:
+ - Support aishell3_hifigan、vctk_hifigan
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1587
+
+Date: 2022-3-09, Author: yt605155624.
+Add features to: T2S:
+ - Add ljspeech hifigan egs.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1549
+
+Date: 2022-3-08, Author: yt605155624.
+Add features to: T2S:
+ - Add aishell3 hifigan egs.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1545
+
+Date: 2022-3-08, Author: yt605155624.
+Add features to: T2S:
+ - Add vctk hifigan egs.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1544
+
+Date: 2022-1-29, Author: yt605155624.
+Add features to: T2S:
+ - Update aishell3 vc0 with new Tacotron2.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1419
+
+Date: 2022-1-29, Author: yt605155624.
+Add features to: T2S:
+ - Add ljspeech Tacotron2.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1416
+
+Date: 2022-1-24, Author: yt605155624.
+Add features to: T2S:
+ - Add csmsc WaveRNN.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1379
+
+Date: 2022-1-19, Author: yt605155624.
+Add features to: T2S:
+ - Add csmsc Tacotron2.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1314
+
+
+Date: 2022-1-10, Author: Jackwaterveg.
+Add features to: CLI:
+ - Support English (librispeech/asr1/transformer).
+ - Support choosing `decode_method` for conformer and transformer models.
+ - Refactor the config, using the unified config.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1297
+
+***
+
+Date: 2022-1-17, Author: Jackwaterveg.
+Add features to: CLI:
+ - Support deepspeech2 online/offline model(aishell).
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/1356
+
+***
+
+Date: 2022-1-24, Author: Jackwaterveg.
+Add features to: ctc_decoders:
+ - Support online ctc prefix-beam search decoder.
+ - Unified ctc online decoder and ctc offline decoder.
+ - PRLink: https://github.com/PaddlePaddle/PaddleSpeech/pull/821
+
+***
diff --git a/README.md b/README.md
index 59c61f776..e35289e2b 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,4 @@
+
([简体中文](./README_cn.md)|English)
@@ -159,20 +160,15 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision
- 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV).
### Recent Update
-- ⚡ 2022.08.25: Release TTS [finetune](./examples/other/tts_finetune/tts3) example.
-- 🔥 2022.08.22: Add ERNIE-SAT models: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat).
-- 🔥 2022.08.15: Add [g2pW](https://github.com/GitYCC/g2pW) into TTS Chinese Text Frontend.
-- 🔥 2022.08.09: Release [Chinese English mixed TTS](./examples/zh_en_tts/tts3).
-- ⚡ 2022.08.03: Add ONNXRuntime infer for TTS CLI.
-- 🎉 2022.07.18: Release VITS: [VITS-csmsc](./examples/csmsc/vits)、[VITS-aishell3](./examples/aishell3/vits)、[VITS-VC](./examples/aishell3/vits-vc).
-- 🎉 2022.06.22: All TTS models support ONNX format.
-- 🍀 2022.06.17: Add [PaddleSpeech Web Demo](./demos/speech_web).
-- 👑 2022.05.13: Release [PP-ASR](./docs/source/asr/PPASR.md)、[PP-TTS](./docs/source/tts/PPTTS.md)、[PP-VPR](docs/source/vpr/PPVPR.md).
-- 👏🏻 2022.05.06: `PaddleSpeech Streaming Server` is available for `Streaming ASR` with `Punctuation Restoration` and `Token Timestamp` and `Text-to-Speech`.
-- 👏🏻 2022.05.06: `PaddleSpeech Server` is available for `Audio Classification`, `Automatic Speech Recognition` and `Text-to-Speech`, `Speaker Verification` and `Punctuation Restoration`.
-- 👏🏻 2022.03.28: `PaddleSpeech CLI` is available for `Speaker Verification`.
+- 👑 2022.05.13: Release [PP-ASR](./docs/source/asr/PPASR.md)、[PP-TTS](./docs/source/tts/PPTTS.md)、[PP-VPR](docs/source/vpr/PPVPR.md)
+- 👏🏻 2022.05.06: `Streaming ASR` with `Punctuation Restoration` and `Token Timestamp`.
+- 👏🏻 2022.05.06: `Server` is available for `Speaker Verification`, and `Punctuation Restoration`.
+- 👏🏻 2022.04.28: `Streaming Server` is available for `Automatic Speech Recognition` and `Text-to-Speech`.
+- 👏🏻 2022.03.28: `Server` is available for `Audio Classification`, `Automatic Speech Recognition` and `Text-to-Speech`.
+- 👏🏻 2022.03.28: `CLI` is available for `Speaker Verification`.
- 🤗 2021.12.14: [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available!
-- 👏🏻 2021.12.10: `PaddleSpeech CLI` is available for `Audio Classification`, `Automatic Speech Recognition`, `Speech Translation (English to Chinese)` and `Text-to-Speech`.
+- 👏🏻 2021.12.10: `CLI` is available for `Audio Classification`, `Automatic Speech Recognition`, `Speech Translation (English to Chinese)` and `Text-to-Speech`.
+
### Community
- Scan the QR code below with your Wechat, you can access to official technical exchange group and get the bonus ( more than 20GB learning materials, such as papers, codes and videos ) and the live link of the lessons. Look forward to your participation.
@@ -184,191 +180,62 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision
## Installation
We strongly recommend our users to install PaddleSpeech in **Linux** with *python>=3.7* and *paddlepaddle>=2.3.1*.
-
-### **Dependency Introduction**
-
-+ gcc >= 4.8.5
-+ paddlepaddle >= 2.3.1
-+ python >= 3.7
-+ OS support: Linux(recommend), Windows, Mac OSX
-
-PaddleSpeech depends on paddlepaddle. For installation, please refer to the official website of [paddlepaddle](https://www.paddlepaddle.org.cn/en) and choose according to your own machine. Here is an example of the cpu version.
-
-```bash
-pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
-```
-
-There are two quick installation methods for PaddleSpeech, one is pip installation, and the other is source code compilation (recommended).
-### pip install
-
-```shell
-pip install pytest-runner
-pip install paddlespeech
-```
-
-### source code compilation
-
-```shell
-git clone https://github.com/PaddlePaddle/PaddleSpeech.git
-cd PaddleSpeech
-pip install pytest-runner
-pip install .
-```
-
-For more installation problems, such as conda environment, librosa-dependent, gcc problems, kaldi installation, etc., you can refer to this [installation document](./docs/source/install.md). If you encounter problems during installation, you can leave a message on [#2150](https://github.com/PaddlePaddle/PaddleSpeech/issues/2150) and find related problems
+Up to now, **Linux** supports CLI for the all our tasks, **Mac OSX** and **Windows** only supports PaddleSpeech CLI for Audio Classification, Speech-to-Text and Text-to-Speech. To install `PaddleSpeech`, please see [installation](./docs/source/install.md).
## Quick Start
-Developers can have a try of our models with [PaddleSpeech Command Line](./paddlespeech/cli/README.md) or Python. Change `--input` to test your own audio/text and support 16k wav format audio.
-
-**You can also quickly experience it in AI Studio 👉🏻 [PaddleSpeech API Demo](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660876445786)**
-
-
-Test audio sample download
+Developers can have a try of our models with [PaddleSpeech Command Line](./paddlespeech/cli/README.md). Change `--input` to test your own audio/text.
+**Audio Classification**
```shell
-wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav
-wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav
+paddlespeech cls --input input.wav
```
-### Automatic Speech Recognition
-
- (Click to expand)Open Source Speech Recognition
-
-**command line experience**
-
-```shell
-paddlespeech asr --lang zh --input zh.wav
+**Speaker Verification**
```
-
-**Python API experience**
-
-```python
->>> from paddlespeech.cli.asr.infer import ASRExecutor
->>> asr = ASRExecutor()
->>> result = asr(audio_file="zh.wav")
->>> print(result)
-我认为跑步最重要的就是给我带来了身体健康
+paddlespeech vector --task spk --input input_16k.wav
```
-
-
-### Text-to-Speech
-
- Open Source Speech Synthesis
-
-Output 24k sample rate wav format audio
-
-
-**command line experience**
+**Automatic Speech Recognition**
```shell
-paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output output.wav
+paddlespeech asr --lang zh --input input_16k.wav
```
+- web demo for Automatic Speech Recognition is integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See Demo: [ASR Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR)
-**Python API experience**
-
-```python
->>> from paddlespeech.cli.tts.infer import TTSExecutor
->>> tts = TTSExecutor()
->>> tts(text="今天天气十分不错。", output="output.wav")
-```
-- You can experience in [Huggingface Spaces](https://huggingface.co/spaces) [TTS Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS)
-
-
-
-### Audio Classification
-
- An open-domain sound classification tool
-
-Sound classification model based on 527 categories of AudioSet dataset
-
-**command line experience**
-
+**Speech Translation** (English to Chinese)
+(not support for Mac and Windows now)
```shell
-paddlespeech cls --input zh.wav
+paddlespeech st --input input_16k.wav
```
-**Python API experience**
-
-```python
->>> from paddlespeech.cli.cls.infer import CLSExecutor
->>> cls = CLSExecutor()
->>> result = cls(audio_file="zh.wav")
->>> print(result)
-Speech 0.9027186632156372
-```
-
-
-
-### Voiceprint Extraction
-
- Industrial-grade voiceprint extraction tool
-
-**command line experience**
-
+**Text-to-Speech**
```shell
-paddlespeech vector --task spk --input zh.wav
+paddlespeech tts --input "你好,欢迎使用飞桨深度学习框架!" --output output.wav
```
+- web demo for Text to Speech is integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See Demo: [TTS Demo](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS)
-**Python API experience**
+**Text Postprocessing**
+- Punctuation Restoration
+ ```bash
+ paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭
+ ```
-```python
->>> from paddlespeech.cli.vector import VectorExecutor
->>> vec = VectorExecutor()
->>> result = vec(audio_file="zh.wav")
->>> print(result) # 187维向量
-[ -0.19083306 9.474295 -14.122263 -2.0916545 0.04848729
- 4.9295826 1.4780062 0.3733844 10.695862 3.2697146
- -4.48199 -0.6617882 -9.170393 -11.1568775 -1.2358263 ...]
+**Batch Process**
```
-
-
-
-### Punctuation Restoration
-
- Quick recovery of text punctuation, works with ASR models
-
-**command line experience**
-
-```shell
-paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭
+echo -e "1 欢迎光临。\n2 谢谢惠顾。" | paddlespeech tts
```
-**Python API experience**
-
-```python
->>> from paddlespeech.cli.text.infer import TextExecutor
->>> text_punc = TextExecutor()
->>> result = text_punc(text="今天的天气真不错啊你下午有空吗我想约你一起去吃饭")
-今天的天气真不错啊!你下午有空吗?我想约你一起去吃饭。
+**Shell Pipeline**
+- ASR + Punctuation Restoration
```
-
-
-
-### Speech Translation
-
- End-to-end English to Chinese Speech Translation Tool
-
-Use pre-compiled kaldi related tools, only support experience in Ubuntu system
-
-**command line experience**
-
-```shell
-paddlespeech st --input en.wav
+paddlespeech asr --input ./zh.wav | paddlespeech text --task punc
```
-**Python API experience**
-
-```python
->>> from paddlespeech.cli.st.infer import STExecutor
->>> st = STExecutor()
->>> result = st(audio_file="en.wav")
-['我 在 这栋 建筑 的 古老 门上 敲门 。']
-```
+For more command lines, please see: [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos)
-
+If you want to try more functions like training and tuning, please have a look at [Speech-to-Text Quick Start](./docs/source/asr/quick_start.md) and [Text-to-Speech Quick Start](./docs/source/tts/quick_start.md).
@@ -376,12 +243,10 @@ paddlespeech st --input en.wav
Developers can have a try of our speech server with [PaddleSpeech Server Command Line](./paddlespeech/server/README.md).
-**You can try it quickly in AI Studio (recommend): [SpeechServer](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660877827034)**
-
**Start server**
```shell
-paddlespeech_server start --config_file ./demos/speech_server/conf/application.yaml
+paddlespeech_server start --config_file ./paddlespeech/server/conf/application.yaml
```
**Access Speech Recognition Services**
@@ -539,7 +404,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
- Acoustic Model |
+ Acoustic Model |
Tacotron2 |
LJSpeech / CSMSC |
@@ -562,16 +427,9 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
|
FastSpeech2 |
- LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN / finetune |
-
- fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en / fastspeech2-finetune
- |
-
-
- ERNIE-SAT |
- VCTK / AISHELL-3 / ZH_EN |
+ LJSpeech / VCTK / CSMSC / AISHELL-3 |
- ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en
+ fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3
|
@@ -604,61 +462,47 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
- HiFiGAN |
- LJSpeech / VCTK / CSMSC / AISHELL-3 |
+ HiFiGAN |
+ LJSpeech / VCTK / CSMSC / AISHELL-3 |
HiFiGAN-ljspeech / HiFiGAN-vctk / HiFiGAN-csmsc / HiFiGAN-aishell3
|
- WaveRNN |
- CSMSC |
+ WaveRNN |
+ CSMSC |
WaveRNN-csmsc
|
- Voice Cloning |
+ Voice Cloning |
GE2E |
Librispeech, etc. |
- GE2E
+ ge2e
|
- SV2TTS (GE2E + Tacotron2) |
+ GE2E + Tacotron2 |
AISHELL-3 |
- VC0
+ ge2e-tacotron2-aishell3
|
- SV2TTS (GE2E + FastSpeech2) |
+ GE2E + FastSpeech2 |
AISHELL-3 |
- VC1
+ ge2e-fastspeech2-aishell3
|
-
- SV2TTS (ECAPA-TDNN + FastSpeech2) |
- AISHELL-3 |
-
- VC2
- |
-
-
- GE2E + VITS |
- AISHELL-3 |
-
- VITS-VC
- |
-
-
+
End-to-End |
VITS |
- CSMSC / AISHELL-3 |
+ CSMSC |
- VITS-csmsc / VITS-aishell3
+ VITS-csmsc
|
@@ -818,79 +662,44 @@ You are warmly welcome to submit questions in [discussions](https://github.com/P
### Contributors
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
## Acknowledgement
-- Many thanks to [HighCWu](https://github.com/HighCWu) for adding [VITS-aishell3](./examples/aishell3/vits) and [VITS-VC](./examples/aishell3/vits-vc) examples.
-- Many thanks to [david-95](https://github.com/david-95) improved TTS, fixed multi-punctuation bug, and contributed to multiple program and data.
-- Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW).
+
+- Many thanks to [BarryKCL](https://github.com/BarryKCL) improved TTS Chinses frontend based on [G2PW](https://github.com/GitYCC/g2pW)
- Many thanks to [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) for years of attention, constructive advice and great help.
- Many thanks to [mymagicpower](https://github.com/mymagicpower) for the Java implementation of ASR upon [short](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk) and [long](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk) audio files.
- Many thanks to [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) for developing Virtual Uploader(VUP)/Virtual YouTuber(VTuber) with PaddleSpeech TTS function.
diff --git a/README_cn.md b/README_cn.md
index 070a656a2..1c6a949fd 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -1,3 +1,4 @@
+
(简体中文|[English](./README.md))
@@ -164,37 +165,13 @@
- 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。
-### 近期活动
-
- ❗️重磅❗️飞桨智慧金融行业系列直播课
-✅ 覆盖智能风控、智能运维、智能营销、智能客服四大金融主流场景
-
-📆 9月6日-9月29日每周二、四19:00
-+ 智慧金融行业深入洞察
-+ 8节理论+实践精品直播课
-+ 10+真实产业场景范例教学及实践
-+ 更有免费算力+结业证书等礼品等你来拿
-扫码报名码住直播链接,与行业精英深度交流
-
-
-

-
-
### 近期更新
-- ⚡ 2022.08.25: 发布 TTS [finetune](./examples/other/tts_finetune/tts3) 示例。
-- 🔥 2022.08.22: 新增 ERNIE-SAT 模型: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat)。
-- 🔥 2022.08.15: 将 [g2pW](https://github.com/GitYCC/g2pW) 引入 TTS 中文文本前端。
-- 🔥 2022.08.09: 发布[中英文混合 TTS](./examples/zh_en_tts/tts3)。
-- ⚡ 2022.08.03: TTS CLI 新增 ONNXRuntime 推理方式。
-- 🎉 2022.07.18: 发布 VITS 模型: [VITS-csmsc](./examples/csmsc/vits)、[VITS-aishell3](./examples/aishell3/vits)、[VITS-VC](./examples/aishell3/vits-vc)。
-- 🎉 2022.06.22: 所有 TTS 模型支持了 ONNX 格式。
-- 🍀 2022.06.17: 新增 [PaddleSpeech 网页应用](./demos/speech_web)。
+
- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md) 流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md) 流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md) 全链路声纹识别系统
-- 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线!覆盖了语音识别(标点恢复、时间戳)和语音合成。
-- 👏🏻 2022.05.06: PaddleSpeech Server 上线!覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。
-- 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成和声纹验证。
-- 🤗 2021.12.14: PaddleSpeech [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) 和 [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) 可在 Hugging Face Spaces 上体验!
-- 👏🏻 2021.12.10: PaddleSpeech CLI 支持语音分类, 语音识别, 语音翻译(英译中)和语音合成。
+- 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。
+- 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。
+- 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成,声纹验证。
+- 🤗 2021.12.14: PaddleSpeech [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available!
### 🔥 加入技术交流群获取入群福利
@@ -219,13 +196,13 @@
+ python >= 3.7
+ linux(推荐), mac, windows
-PaddleSpeech 依赖于 paddlepaddle,安装可以参考[ paddlepaddle 官网](https://www.paddlepaddle.org.cn/),根据自己机器的情况进行选择。这里给出 cpu 版本示例,其它版本大家可以根据自己机器的情况进行安装。
+PaddleSpeech依赖于paddlepaddle,安装可以参考[paddlepaddle官网](https://www.paddlepaddle.org.cn/),根据自己机器的情况进行选择。这里给出cpu版本示例,其它版本大家可以根据自己机器的情况进行安装。
```shell
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
```
-PaddleSpeech 快速安装方式有两种,一种是 pip 安装,一种是源码编译(推荐)。
+PaddleSpeech快速安装方式有两种,一种是pip安装,一种是源码编译(推荐)。
### pip 安装
```shell
@@ -245,9 +222,10 @@ pip install .
## 快速开始
-安装完成后,开发者可以通过命令行或者 Python 快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持 16k wav 格式音频。
-你也可以在 `aistudio` 中快速体验 👉🏻[一键预测,快速上手 Speech 开发任务](https://aistudio.baidu.com/aistudio/projectdetail/4353348?sUid=2470186&shared=1&ts=1660878142250)。
+安装完成后,开发者可以通过命令行或者Python快速开始,命令行模式下改变 `--input` 可以尝试用自己的音频或文本测试,支持16k wav格式音频。
+
+你也可以在`aistudio`中快速体验 👉🏻[PaddleSpeech API Demo ](https://aistudio.baidu.com/aistudio/projectdetail/4281335?shared=1)。
测试音频示例下载
```shell
@@ -303,7 +281,7 @@ Python API 一键预测
适配多场景的开放领域声音分类工具
-基于 AudioSet 数据集 527 个类别的声音分类模型
+基于AudioSet数据集527个类别的声音分类模型
命令行一键体验
@@ -372,7 +350,7 @@ Python API 一键预测
端到端英译中语音翻译工具
-使用预编译的 kaldi 相关工具,只支持在 Ubuntu 系统中体验
+使用预编译的kaldi相关工具,只支持在Ubuntu系统中体验
命令行一键体验
@@ -392,15 +370,14 @@ python API 一键预测
+
## 快速使用服务
-安装完成后,开发者可以通过命令行一键启动语音识别,语音合成,音频分类等多种服务。
-
-你可以在 AI Studio 中快速体验:[SpeechServer 一键部署](https://aistudio.baidu.com/aistudio/projectdetail/4354592?sUid=2470186&shared=1&ts=1660878208266)
+安装完成后,开发者可以通过命令行一键启动语音识别,语音合成,音频分类三种服务。
**启动服务**
```shell
-paddlespeech_server start --config_file ./demos/speech_server/conf/application.yaml
+paddlespeech_server start --config_file ./paddlespeech/server/conf/application.yaml
```
**访问语音识别服务**
@@ -552,7 +529,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
- 声学模型 |
+ 声学模型 |
Tacotron2 |
LJSpeech / CSMSC |
@@ -575,16 +552,9 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
|
FastSpeech2 |
- LJSpeech / VCTK / CSMSC / AISHELL-3 / ZH_EN / finetune |
-
- fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3 / fastspeech2-zh_en / fastspeech2-finetune
- |
-
-
- ERNIE-SAT |
- VCTK / AISHELL-3 / ZH_EN |
+ LJSpeech / VCTK / CSMSC / AISHELL-3 |
- ERNIE-SAT-vctk / ERNIE-SAT-aishell3 / ERNIE-SAT-zh_en
+ fastspeech2-ljspeech / fastspeech2-vctk / fastspeech2-csmsc / fastspeech2-aishell3
|
@@ -631,47 +601,34 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
- 声音克隆 |
+ 声音克隆 |
GE2E |
Librispeech, etc. |
- GE2E
+ ge2e
|
- SV2TTS (GE2E + Tacotron2) |
+ GE2E + Tacotron2 |
AISHELL-3 |
- VC0
+ ge2e-tacotron2-aishell3
|
- SV2TTS (GE2E + FastSpeech2) |
+ GE2E + FastSpeech2 |
AISHELL-3 |
- VC1
+ ge2e-fastspeech2-aishell3
|
-
- SV2TTS (ECAPA-TDNN + FastSpeech2) |
- AISHELL-3 |
-
- VC2
- |
-
-
- GE2E + VITS |
- AISHELL-3 |
-
- VITS-VC
- |
端到端 |
VITS |
- CSMSC / AISHELL-3 |
+ CSMSC |
- VITS-csmsc / VITS-aishell3
+ VITS-csmsc
|
@@ -839,79 +796,44 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
### 贡献者
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
## 致谢
-- 非常感谢 [HighCWu](https://github.com/HighCWu) 新增 [VITS-aishell3](./examples/aishell3/vits) 和 [VITS-VC](./examples/aishell3/vits-vc) 代码示例。
-- 非常感谢 [david-95](https://github.com/david-95) 修复句尾多标点符号出错的问题,贡献补充多条程序和数据。
-- 非常感谢 [BarryKCL](https://github.com/BarryKCL) 基于 [G2PW](https://github.com/GitYCC/g2pW) 对 TTS 中文文本前端的优化。
+
+- 非常感谢 [BarryKCL](https://github.com/BarryKCL)基于[G2PW](https://github.com/GitYCC/g2pW)对TTS中文文本前端的优化。
- 非常感谢 [yeyupiaoling](https://github.com/yeyupiaoling)/[PPASR](https://github.com/yeyupiaoling/PPASR)/[PaddlePaddle-DeepSpeech](https://github.com/yeyupiaoling/PaddlePaddle-DeepSpeech)/[VoiceprintRecognition-PaddlePaddle](https://github.com/yeyupiaoling/VoiceprintRecognition-PaddlePaddle)/[AudioClassification-PaddlePaddle](https://github.com/yeyupiaoling/AudioClassification-PaddlePaddle) 多年来的关注和建议,以及在诸多问题上的帮助。
- 非常感谢 [mymagicpower](https://github.com/mymagicpower) 采用PaddleSpeech 对 ASR 的[短语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_sdk)及[长语音](https://github.com/mymagicpower/AIAS/tree/main/3_audio_sdks/asr_long_audio_sdk)进行 Java 实现。
- 非常感谢 [JiehangXie](https://github.com/JiehangXie)/[PaddleBoBo](https://github.com/JiehangXie/PaddleBoBo) 采用 PaddleSpeech 语音合成功能实现 Virtual Uploader(VUP)/Virtual YouTuber(VTuber) 虚拟主播。
diff --git a/demos/audio_searching/README.md b/demos/audio_searching/README.md
index 0fc901432..db38d14ed 100644
--- a/demos/audio_searching/README.md
+++ b/demos/audio_searching/README.md
@@ -226,12 +226,6 @@ recall and elapsed time statistics are shown in the following figure:
The retrieval framework based on Milvus takes about 2.9 milliseconds to retrieve on the premise of 90% recall rate, and it takes about 500 milliseconds for feature extraction (testing audio takes about 5 seconds), that is, a single audio test takes about 503 milliseconds in total, which can meet most application scenarios.
-* compute embeding takes 500 ms
-* retrieval with cosine takes 2.9 ms
-* total takes 503 ms
-
-> test audio is 5 sec
-
### 6.Pretrained Models
Here is a list of pretrained models released by PaddleSpeech :
diff --git a/demos/audio_searching/src/operations/load.py b/demos/audio_searching/src/operations/load.py
index d1ea00576..0d9edb784 100644
--- a/demos/audio_searching/src/operations/load.py
+++ b/demos/audio_searching/src/operations/load.py
@@ -26,9 +26,8 @@ def get_audios(path):
"""
supported_formats = [".wav", ".mp3", ".ogg", ".flac", ".m4a"]
return [
- item
- for sublist in [[os.path.join(dir, file) for file in files]
- for dir, _, files in list(os.walk(path))]
+ item for sublist in [[os.path.join(dir, file) for file in files]
+ for dir, _, files in list(os.walk(path))]
for item in sublist if os.path.splitext(item)[1] in supported_formats
]
diff --git a/demos/metaverse/README.md b/demos/metaverse/README.md
index 2c6b0d3ee..e458256a8 100644
--- a/demos/metaverse/README.md
+++ b/demos/metaverse/README.md
@@ -1,5 +1,3 @@
-([简体中文](./README_cn.md)|English)
-
# Metaverse
## Introduction
Metaverse is a new Internet application and social form integrating virtual reality produced by integrating a variety of new technologies.
diff --git a/demos/metaverse/README_cn.md b/demos/metaverse/README_cn.md
deleted file mode 100644
index a716109f1..000000000
--- a/demos/metaverse/README_cn.md
+++ /dev/null
@@ -1,27 +0,0 @@
-(简体中文|[English](./README.md))
-
-# Metaverse
-
-## 简介
-
-Metaverse 是一种新的互联网应用和社交形式,融合了多种新技术,产生了虚拟现实。
-
-这个演示是一个让图片中的名人“说话”的实现。通过 `PaddleSpeech` 的 `TTS` 模块和 `PaddleGAN` 的组合,我们集成了安装和特定模块到一个 shell 脚本中。
-
-## 使用
-
-您可以使用 `PaddleSpeech` 的 `TTS` 模块和 `PaddleGAN` 让您最喜欢的人说出指定的内容,并构建您的虚拟人。
-
-运行 `run.sh` 完成所有基本程序,包括安装。
-
-```bash
-./run.sh
-```
-
-在 `run.sh`, 先会执行 `source path.sh` 来设置好环境变量。
-
-如果您想尝试您的句子,请替换 `sentences.txt` 中的句子。
-
-如果您想尝试图像,请将图像替换 shell 脚本中的 `download/Lamarr.png` 。
-
-结果已显示在我们的 [notebook](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/tutorial/tts/tts_tutorial.ipynb)。
diff --git a/demos/speaker_verification/README.md b/demos/speaker_verification/README.md
index 55f9a7360..900b5ae40 100644
--- a/demos/speaker_verification/README.md
+++ b/demos/speaker_verification/README.md
@@ -19,7 +19,6 @@ The input of this cli demo should be a WAV file(`.wav`), and the sample rate mus
Here are sample files for this demo that can be downloaded:
```bash
wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav
-wget -c https://paddlespeech.bj.bcebos.com/vector/audio/123456789.wav
```
### 3. Usage
diff --git a/demos/speaker_verification/README_cn.md b/demos/speaker_verification/README_cn.md
index 85224699c..f6afa86ac 100644
--- a/demos/speaker_verification/README_cn.md
+++ b/demos/speaker_verification/README_cn.md
@@ -19,7 +19,6 @@
```bash
# 该音频的内容是数字串 85236145389
wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav
-wget -c https://paddlespeech.bj.bcebos.com/vector/audio/123456789.wav
```
### 3. 使用方法
- 命令行 (推荐使用)
diff --git a/demos/speech_server/conf/application.yaml b/demos/speech_server/conf/application.yaml
index b5ee80095..9c171c470 100644
--- a/demos/speech_server/conf/application.yaml
+++ b/demos/speech_server/conf/application.yaml
@@ -61,7 +61,7 @@ tts_python:
phones_dict:
tones_dict:
speaker_dict:
-
+ spk_id: 0
# voc (vocoder) choices=['pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3',
# 'pwgan_vctk', 'mb_melgan_csmsc', 'style_melgan_csmsc',
@@ -87,7 +87,7 @@ tts_inference:
phones_dict:
tones_dict:
speaker_dict:
-
+ spk_id: 0
am_predictor_conf:
device: # set 'gpu:id' or 'cpu'
diff --git a/demos/speech_web/API.md b/demos/speech_web/API.md
index f66ec138e..c51446749 100644
--- a/demos/speech_web/API.md
+++ b/demos/speech_web/API.md
@@ -401,4 +401,4 @@ curl -X 'GET' \
"code": 0,
"result":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"message": "ok"
-```
+```
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/main.py b/demos/speech_web/speech_server/main.py
index d4750d598..b10176670 100644
--- a/demos/speech_web/speech_server/main.py
+++ b/demos/speech_web/speech_server/main.py
@@ -3,48 +3,48 @@
# 2. 接收录音音频,返回识别结果
# 3. 接收ASR识别结果,返回NLP对话结果
# 4. 接收NLP对话结果,返回TTS音频
-import argparse
+
import base64
-import datetime
-import json
+import yaml
import os
-from typing import List
-
-import aiofiles
+import json
+import datetime
import librosa
import soundfile as sf
+import numpy as np
+import argparse
import uvicorn
-from fastapi import FastAPI
-from fastapi import File
-from fastapi import Form
-from fastapi import UploadFile
-from fastapi import WebSocket
-from fastapi import WebSocketDisconnect
-from fastapi.responses import StreamingResponse
+import aiofiles
+from typing import Optional, List
from pydantic import BaseModel
-from src.AudioManeger import AudioMannger
-from src.robot import Robot
-from src.SpeechBase.vpr import VPR
-from src.util import *
-from src.WebsocketManeger import ConnectionManager
+from fastapi import FastAPI, Header, File, UploadFile, Form, Cookie, WebSocket, WebSocketDisconnect
+from fastapi.responses import StreamingResponse
+from starlette.responses import FileResponse
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
-from starlette.responses import FileResponse
from starlette.websockets import WebSocketState as WebSocketState
+from src.AudioManeger import AudioMannger
+from src.util import *
+from src.robot import Robot
+from src.WebsocketManeger import ConnectionManager
+from src.SpeechBase.vpr import VPR
+
from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler
from paddlespeech.server.utils.audio_process import float2pcm
+
# 解析配置
-parser = argparse.ArgumentParser(prog='PaddleSpeechDemo', add_help=True)
+parser = argparse.ArgumentParser(
+ prog='PaddleSpeechDemo', add_help=True)
parser.add_argument(
- "--port",
- action="store",
- type=int,
- help="port of the app",
- default=8010,
- required=False)
+ "--port",
+ action="store",
+ type=int,
+ help="port of the app",
+ default=8010,
+ required=False)
args = parser.parse_args()
port = args.port
@@ -60,41 +60,39 @@ ie_model_path = "source/model"
UPLOAD_PATH = "source/vpr"
WAV_PATH = "source/wav"
-base_sources = [UPLOAD_PATH, WAV_PATH]
+
+base_sources = [
+ UPLOAD_PATH, WAV_PATH
+]
for path in base_sources:
os.makedirs(path, exist_ok=True)
+
# 初始化
app = FastAPI()
-chatbot = Robot(
- asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path)
+chatbot = Robot(asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path)
manager = ConnectionManager()
aumanager = AudioMannger(chatbot)
aumanager.init()
-vpr = VPR(db_path, dim=192, top_k=5)
-
+vpr = VPR(db_path, dim = 192, top_k = 5)
# 服务配置
class NlpBase(BaseModel):
chat: str
-
class TtsBase(BaseModel):
- text: str
-
+ text: str
class Audios:
def __init__(self) -> None:
self.audios = b""
-
audios = Audios()
######################################################################
########################### ASR 服务 #################################
#####################################################################
-
# 接收文件,返回ASR结果
# 上传文件
@app.post("/asr/offline")
@@ -103,8 +101,7 @@ async def speech2textOffline(files: List[UploadFile]):
asr_res = ""
for file in files[:1]:
# 生成时间戳
- now_name = "asr_offline_" + datetime.datetime.strftime(
- datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
+ now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
async with aiofiles.open(out_file_path, 'wb') as out_file:
content = await file.read() # async read
@@ -113,9 +110,10 @@ async def speech2textOffline(files: List[UploadFile]):
# 返回ASR识别结果
asr_res = chatbot.speech2text(out_file_path)
return SuccessRequest(result=asr_res)
+ # else:
+ # return ErrorRequest(message="文件不是.wav格式")
return ErrorRequest(message="上传文件为空")
-
# 接收文件,同时将wav强制转成16k, int16类型
@app.post("/asr/offlinefile")
async def speech2textOfflineFile(files: List[UploadFile]):
@@ -123,8 +121,7 @@ async def speech2textOfflineFile(files: List[UploadFile]):
asr_res = ""
for file in files[:1]:
# 生成时间戳
- now_name = "asr_offline_" + datetime.datetime.strftime(
- datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
+ now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
async with aiofiles.open(out_file_path, 'wb') as out_file:
content = await file.read() # async read
@@ -135,20 +132,24 @@ async def speech2textOfflineFile(files: List[UploadFile]):
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
wav_base64 = base64.b64encode(wav_bytes).decode('utf8')
-
+
# 将文件重新写入
now_name = now_name[:-4] + "_16k" + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
- sf.write(out_file_path, wav, 16000)
+ sf.write(out_file_path,wav,16000)
# 返回ASR识别结果
asr_res = chatbot.speech2text(out_file_path)
- response_res = {"asr_result": asr_res, "wav_base64": wav_base64}
+ response_res = {
+ "asr_result": asr_res,
+ "wav_base64": wav_base64
+ }
return SuccessRequest(result=response_res)
-
+
return ErrorRequest(message="上传文件为空")
+
# 流式接收测试
@app.post("/asr/online1")
async def speech2textOnlineRecive(files: List[UploadFile]):
@@ -160,17 +161,15 @@ async def speech2textOnlineRecive(files: List[UploadFile]):
print(f"audios长度变化: {len(audios.audios)}")
return SuccessRequest(message="接收成功")
-
# 采集环境噪音大小
@app.post("/asr/collectEnv")
async def collectEnv(files: List[UploadFile]):
- for file in files[:1]:
+ for file in files[:1]:
content = await file.read() # async read
# 初始化, wav 前44字节是头部信息
aumanager.compute_env_volume(content[44:])
vad_ = aumanager.vad_threshold
- return SuccessRequest(result=vad_, message="采集环境噪音成功")
-
+ return SuccessRequest(result=vad_,message="采集环境噪音成功")
# 停止录音
@app.get("/asr/stopRecord")
@@ -180,7 +179,6 @@ async def stopRecord():
print("Online录音暂停")
return SuccessRequest(message="停止成功")
-
# 恢复录音
@app.get("/asr/resumeRecord")
async def resumeRecord():
@@ -189,7 +187,7 @@ async def resumeRecord():
return SuccessRequest(message="Online录音恢复")
-# 聊天用的 ASR
+# 聊天用的ASR
@app.websocket("/ws/asr/offlineStream")
async def websocket_endpoint(websocket: WebSocket):
await manager.connect(websocket)
@@ -212,9 +210,9 @@ async def websocket_endpoint(websocket: WebSocket):
# print(f"用户-{user}-离开")
- # 流式识别的 ASR
+# Online识别的ASR
@app.websocket('/ws/asr/onlineStream')
-async def websocket_endpoint_online(websocket: WebSocket):
+async def websocket_endpoint(websocket: WebSocket):
"""PaddleSpeech Online ASR Server api
Args:
@@ -300,14 +298,12 @@ async def websocket_endpoint_online(websocket: WebSocket):
except WebSocketDisconnect:
pass
-
######################################################################
########################### NLP 服务 #################################
#####################################################################
-
@app.post("/nlp/chat")
-async def chatOffline(nlp_base: NlpBase):
+async def chatOffline(nlp_base:NlpBase):
chat = nlp_base.chat
if not chat:
return ErrorRequest(message="传入文本为空")
@@ -315,9 +311,8 @@ async def chatOffline(nlp_base: NlpBase):
res = chatbot.chat(chat)
return SuccessRequest(result=res)
-
@app.post("/nlp/ie")
-async def ieOffline(nlp_base: NlpBase):
+async def ieOffline(nlp_base:NlpBase):
nlp_text = nlp_base.chat
if not nlp_text:
return ErrorRequest(message="传入文本为空")
@@ -325,20 +320,17 @@ async def ieOffline(nlp_base: NlpBase):
res = chatbot.ie(nlp_text)
return SuccessRequest(result=res)
-
######################################################################
########################### TTS 服务 #################################
#####################################################################
-
@app.post("/tts/offline")
-async def text2speechOffline(tts_base: TtsBase):
+async def text2speechOffline(tts_base:TtsBase):
text = tts_base.text
if not text:
return ErrorRequest(message="文本为空")
else:
- now_name = "tts_" + datetime.datetime.strftime(
- datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
+ now_name = "tts_"+ datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
# 保存为文件,再转成base64传输
chatbot.text2speech(text, outpath=out_file_path)
@@ -347,14 +339,12 @@ async def text2speechOffline(tts_base: TtsBase):
base_str = base64.b64encode(data_bin)
return SuccessRequest(result=base_str)
-
# http流式TTS
@app.post("/tts/online")
async def stream_tts(request_body: TtsBase):
text = request_body.text
return StreamingResponse(chatbot.text2speechStreamBytes(text=text))
-
# ws流式TTS
@app.websocket("/ws/tts/online")
async def stream_ttsWS(websocket: WebSocket):
@@ -366,11 +356,17 @@ async def stream_ttsWS(websocket: WebSocket):
if text:
for sub_wav in chatbot.text2speechStream(text=text):
# print("发送sub wav: ", len(sub_wav))
- res = {"wav": sub_wav, "done": False}
+ res = {
+ "wav": sub_wav,
+ "done": False
+ }
await websocket.send_json(res)
-
+
# 输送结束
- res = {"wav": sub_wav, "done": True}
+ res = {
+ "wav": sub_wav,
+ "done": True
+ }
await websocket.send_json(res)
# manager.disconnect(websocket)
@@ -400,9 +396,8 @@ async def vpr_enroll(table_name: str=None,
return {'status': False, 'msg': "spk_id can not be None"}
# Save the upload data to server.
content = await audio.read()
- now_name = "vpr_enroll_" + datetime.datetime.strftime(
- datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
- audio_path = os.path.join(UPLOAD_PATH, now_name)
+ now_name = "vpr_enroll_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
+ audio_path = os.path.join(UPLOAD_PATH, now_name)
with open(audio_path, "wb+") as f:
f.write(content)
@@ -418,19 +413,20 @@ async def vpr_recog(request: Request,
audio: UploadFile=File(...)):
# Voice print recognition online
# try:
- # Save the upload data to server.
+ # Save the upload data to server.
content = await audio.read()
- now_name = "vpr_query_" + datetime.datetime.strftime(
- datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
- query_audio_path = os.path.join(UPLOAD_PATH, now_name)
+ now_name = "vpr_query_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
+ query_audio_path = os.path.join(UPLOAD_PATH, now_name)
with open(query_audio_path, "wb+") as f:
- f.write(content)
+ f.write(content)
spk_ids, paths, scores = vpr.do_search_vpr(query_audio_path)
res = dict(zip(spk_ids, zip(paths, scores)))
# Sort results by distance metric, closest distances first
res = sorted(res.items(), key=lambda item: item[1][1], reverse=True)
return res
+ # except Exception as e:
+ # return {'status': False, 'msg': e}, 400
@app.post('/vpr/del')
@@ -464,18 +460,17 @@ async def vpr_database64(vprId: int):
return {'status': False, 'msg': "vpr_id can not be None"}
audio_path = vpr.do_get_wav(vprId)
# 返回base64
-
+
# 将文件转成16k, 16bit类型的wav文件
wav, sr = librosa.load(audio_path, sr=16000)
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
wav_base64 = base64.b64encode(wav_bytes).decode('utf8')
-
+
return SuccessRequest(result=wav_base64)
except Exception as e:
return {'status': False, 'msg': e}, 400
-
@app.get('/vpr/data')
async def vpr_data(vprId: int):
# Get the audio file from path by spk_id in MySQL
@@ -487,6 +482,11 @@ async def vpr_data(vprId: int):
except Exception as e:
return {'status': False, 'msg': e}, 400
-
if __name__ == '__main__':
uvicorn.run(app=app, host='0.0.0.0', port=port)
+
+
+
+
+
+
diff --git a/demos/speech_web/speech_server/requirements.txt b/demos/speech_web/speech_server/requirements.txt
index 607f0d4d0..7e7bd1680 100644
--- a/demos/speech_web/speech_server/requirements.txt
+++ b/demos/speech_web/speech_server/requirements.txt
@@ -1,13 +1,14 @@
aiofiles
-faiss-cpu
fastapi
librosa
numpy
-paddlenlp
-paddlepaddle
-paddlespeech
pydantic
-python-multipartscikit_learn
+scikit_learn
SoundFile
starlette
uvicorn
+paddlepaddle
+paddlespeech
+paddlenlp
+faiss-cpu
+python-multipart
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/AudioManeger.py b/demos/speech_web/speech_server/src/AudioManeger.py
index 8fe512cfd..0deb03699 100644
--- a/demos/speech_web/speech_server/src/AudioManeger.py
+++ b/demos/speech_web/speech_server/src/AudioManeger.py
@@ -1,19 +1,15 @@
-import datetime
+import imp
+from queue import Queue
+import numpy as np
import os
import wave
-
-import numpy as np
-
+import random
+import datetime
from .util import randName
class AudioMannger:
- def __init__(self,
- robot,
- frame_length=160,
- frame=10,
- data_width=2,
- vad_default=300):
+ def __init__(self, robot, frame_length=160, frame=10, data_width=2, vad_default = 300):
# 二进制 pcm 流
self.audios = b''
self.asr_result = ""
@@ -24,9 +20,8 @@ class AudioMannger:
os.makedirs(self.file_dir, exist_ok=True)
self.vad_deafult = vad_default
self.vad_threshold = vad_default
- self.vad_threshold_path = os.path.join(self.file_dir,
- "vad_threshold.npy")
-
+ self.vad_threshold_path = os.path.join(self.file_dir, "vad_threshold.npy")
+
# 10ms 一帧
self.frame_length = frame_length
# 10帧,检测一次 vad
@@ -35,64 +30,67 @@ class AudioMannger:
self.data_width = data_width
# window
self.window_length = frame_length * frame * data_width
-
+
# 是否开始录音
self.on_asr = False
- self.silence_cnt = 0
+ self.silence_cnt = 0
self.max_silence_cnt = 4
self.is_pause = False # 录音暂停与恢复
-
+
+
+
def init(self):
if os.path.exists(self.vad_threshold_path):
# 平均响度文件存在
self.vad_threshold = np.load(self.vad_threshold_path)
-
+
+
def clear_audio(self):
# 清空 pcm 累积片段与 asr 识别结果
self.audios = b''
-
+
def clear_asr(self):
self.asr_result = ""
-
+
+
def compute_chunk_volume(self, start_index, pcm_bins):
# 根据帧长计算能量平均值
- pcm_bin = pcm_bins[start_index:start_index + self.window_length]
+ pcm_bin = pcm_bins[start_index: start_index + self.window_length]
# 转成 numpy
pcm_np = np.frombuffer(pcm_bin, np.int16)
# 归一化 + 计算响度
x = pcm_np.astype(np.float32)
x = np.abs(x)
- return np.mean(x)
-
+ return np.mean(x)
+
+
def is_speech(self, start_index, pcm_bins):
# 检查是否没
if start_index > len(pcm_bins):
return False
# 检查从这个 start 开始是否为静音帧
- energy = self.compute_chunk_volume(
- start_index=start_index, pcm_bins=pcm_bins)
+ energy = self.compute_chunk_volume(start_index=start_index, pcm_bins=pcm_bins)
# print(energy)
if energy > self.vad_threshold:
return True
else:
return False
-
+
def compute_env_volume(self, pcm_bins):
max_energy = 0
start = 0
while start < len(pcm_bins):
- energy = self.compute_chunk_volume(
- start_index=start, pcm_bins=pcm_bins)
+ energy = self.compute_chunk_volume(start_index=start, pcm_bins=pcm_bins)
if energy > max_energy:
max_energy = energy
start += self.window_length
self.vad_threshold = max_energy + 100 if max_energy > self.vad_deafult else self.vad_deafult
-
+
# 保存成文件
np.save(self.vad_threshold_path, self.vad_threshold)
print(f"vad 阈值大小: {self.vad_threshold}")
print(f"环境采样保存: {os.path.realpath(self.vad_threshold_path)}")
-
+
def stream_asr(self, pcm_bin):
# 先把 pcm_bin 送进去做端点检测
start = 0
@@ -101,7 +99,7 @@ class AudioMannger:
self.on_asr = True
self.silence_cnt = 0
print("录音中")
- self.audios += pcm_bin[start:start + self.window_length]
+ self.audios += pcm_bin[ start : start + self.window_length]
else:
if self.on_asr:
self.silence_cnt += 1
@@ -112,42 +110,41 @@ class AudioMannger:
print("录音停止")
# audios 保存为 wav, 送入 ASR
if len(self.audios) > 2 * 16000:
- file_path = os.path.join(
- self.file_dir,
- "asr_" + datetime.datetime.strftime(
- datetime.datetime.now(),
- '%Y%m%d%H%M%S') + randName() + ".wav")
+ file_path = os.path.join(self.file_dir, "asr_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav")
self.save_audio(file_path=file_path)
self.asr_result = self.robot.speech2text(file_path)
self.clear_audio()
- return self.asr_result
+ return self.asr_result
else:
# 正常接收
print("录音中 静音")
- self.audios += pcm_bin[start:start + self.window_length]
+ self.audios += pcm_bin[ start : start + self.window_length]
start += self.window_length
return ""
-
+
def save_audio(self, file_path):
print("保存音频")
- wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav"
- wf.setnchannels(1) # 设置声道数为2
- wf.setsampwidth(2) # 设置采样深度为
- wf.setframerate(16000) # 设置采样率为16000
+ wf = wave.open(file_path, 'wb') # 创建一个音频文件,名字为“01.wav"
+ wf.setnchannels(1) # 设置声道数为2
+ wf.setsampwidth(2) # 设置采样深度为
+ wf.setframerate(16000) # 设置采样率为16000
# 将数据写入创建的音频文件
wf.writeframes(self.audios)
# 写完后将文件关闭
wf.close()
-
+
def end(self):
# audios 保存为 wav, 送入 ASR
file_path = os.path.join(self.file_dir, "asr.wav")
self.save_audio(file_path=file_path)
return self.robot.speech2text(file_path)
-
+
def stop(self):
self.is_pause = True
self.audios = b''
-
+
def resume(self):
self.is_pause = False
+
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/SpeechBase/asr.py b/demos/speech_web/speech_server/src/SpeechBase/asr.py
index 5213ea787..8d4c0cffc 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/asr.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/asr.py
@@ -1,10 +1,13 @@
+from re import sub
import numpy as np
+import paddle
+import librosa
+import soundfile
from paddlespeech.server.engine.asr.online.python.asr_engine import ASREngine
from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler
from paddlespeech.server.utils.config import get_config
-
def readWave(samples):
x_len = len(samples)
@@ -28,23 +31,20 @@ def readWave(samples):
class ASR:
- def __init__(
- self,
- config_path, ) -> None:
+ def __init__(self, config_path, ) -> None:
self.config = get_config(config_path)['asr_online']
self.engine = ASREngine()
self.engine.init(self.config)
self.connection_handler = PaddleASRConnectionHanddler(self.engine)
-
+
def offlineASR(self, samples, sample_rate=16000):
- x_chunk, x_chunk_lens = self.engine.preprocess(
- samples=samples, sample_rate=sample_rate)
+ x_chunk, x_chunk_lens = self.engine.preprocess(samples=samples, sample_rate=sample_rate)
self.engine.run(x_chunk, x_chunk_lens)
result = self.engine.postprocess()
self.engine.reset()
return result
- def onlineASR(self, samples: bytes=None, is_finished=False):
+ def onlineASR(self, samples:bytes=None, is_finished=False):
if not is_finished:
# 流式开始
self.connection_handler.extract_feat(samples)
@@ -58,3 +58,5 @@ class ASR:
asr_results = self.connection_handler.get_result()
self.connection_handler.reset()
return asr_results
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/SpeechBase/nlp.py b/demos/speech_web/speech_server/src/SpeechBase/nlp.py
index b642a51d6..4ece63256 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/nlp.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/nlp.py
@@ -1,23 +1,23 @@
from paddlenlp import Taskflow
-
class NLP:
def __init__(self, ie_model_path=None):
schema = ["时间", "出发地", "目的地", "费用"]
if ie_model_path:
- self.ie_model = Taskflow(
- "information_extraction",
- schema=schema,
- task_path=ie_model_path)
+ self.ie_model = Taskflow("information_extraction",
+ schema=schema, task_path=ie_model_path)
else:
- self.ie_model = Taskflow("information_extraction", schema=schema)
-
+ self.ie_model = Taskflow("information_extraction",
+ schema=schema)
+
self.dialogue_model = Taskflow("dialogue")
-
+
def chat(self, text):
result = self.dialogue_model([text])
return result[0]
-
+
def ie(self, text):
result = self.ie_model(text)
return result
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py
index bd8d58970..6937def58 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/sql_helper.py
@@ -1,19 +1,18 @@
import base64
-import os
import sqlite3
-
+import os
import numpy as np
+from pkg_resources import resource_stream
-def dict_factory(cursor, row):
- d = {}
- for idx, col in enumerate(cursor.description):
- d[col[0]] = row[idx]
- return d
-
+def dict_factory(cursor, row):
+ d = {}
+ for idx, col in enumerate(cursor.description):
+ d[col[0]] = row[idx]
+ return d
class DataBase(object):
- def __init__(self, db_path: str):
+ def __init__(self, db_path:str):
db_path = os.path.realpath(db_path)
if os.path.exists(db_path):
@@ -22,12 +21,12 @@ class DataBase(object):
db_path_dir = os.path.dirname(db_path)
os.makedirs(db_path_dir, exist_ok=True)
self.db_path = db_path
-
+
self.conn = sqlite3.connect(self.db_path)
self.conn.row_factory = dict_factory
self.cursor = self.conn.cursor()
self.init_database()
-
+
def init_database(self):
"""
初始化数据库, 若表不存在则创建
@@ -42,21 +41,20 @@ class DataBase(object):
"""
self.cursor.execute(sql)
self.conn.commit()
-
+
def execute_base(self, sql, data_dict):
self.cursor.execute(sql, data_dict)
self.conn.commit()
-
- def insert_one(self, username, vector_base64: str, wav_path):
+
+ def insert_one(self, username, vector_base64:str, wav_path):
if not os.path.exists(wav_path):
return None, "wav not exists"
else:
- sql = """
+ sql = f"""
insert into
vprtable (username, vector, wavpath)
values (?, ?, ?)
"""
-
try:
self.cursor.execute(sql, (username, vector_base64, wav_path))
self.conn.commit()
@@ -65,27 +63,25 @@ class DataBase(object):
except Exception as e:
print(e)
return None, e
-
+
def select_all(self):
sql = """
SELECT * from vprtable
"""
result = self.cursor.execute(sql).fetchall()
return result
-
+
def select_by_id(self, vpr_id):
sql = f"""
SELECT * from vprtable WHERE `id` = {vpr_id}
"""
-
result = self.cursor.execute(sql).fetchall()
return result
-
+
def select_by_username(self, username):
sql = f"""
SELECT * from vprtable WHERE `username` = '{username}'
"""
-
result = self.cursor.execute(sql).fetchall()
return result
@@ -93,30 +89,28 @@ class DataBase(object):
sql = f"""
DELETE from vprtable WHERE `username`='{username}'
"""
-
self.cursor.execute(sql)
self.conn.commit()
-
+
def drop_all(self):
- sql = """
+ sql = f"""
DELETE from vprtable
"""
-
self.cursor.execute(sql)
self.conn.commit()
-
+
def drop_table(self):
- sql = """
+ sql = f"""
DROP TABLE vprtable
"""
-
self.cursor.execute(sql)
self.conn.commit()
-
- def encode_vector(self, vector: np.ndarray):
+
+ def encode_vector(self, vector:np.ndarray):
return base64.b64encode(vector).decode('utf8')
-
+
def decode_vector(self, vector_base64, dtype=np.float32):
b = base64.b64decode(vector_base64)
vc = np.frombuffer(b, dtype=dtype)
return vc
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/SpeechBase/tts.py b/demos/speech_web/speech_server/src/SpeechBase/tts.py
index eb32bca0e..d5ba0c802 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/tts.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/tts.py
@@ -5,19 +5,18 @@
# 2. 加载模型
# 3. 端到端推理
# 4. 流式推理
+
import base64
-import logging
import math
-
+import logging
import numpy as np
-
-from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
+from paddlespeech.server.utils.onnx_infer import get_sess
+from paddlespeech.t2s.frontend.zh_frontend import Frontend
+from paddlespeech.server.utils.util import denorm, get_chunks
from paddlespeech.server.utils.audio_process import float2pcm
from paddlespeech.server.utils.config import get_config
-from paddlespeech.server.utils.util import denorm
-from paddlespeech.server.utils.util import get_chunks
-from paddlespeech.t2s.frontend.zh_frontend import Frontend
+from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
class TTS:
def __init__(self, config_path):
@@ -27,12 +26,12 @@ class TTS:
self.engine.init(self.config)
self.executor = self.engine.executor
#self.engine.warm_up()
-
+
# 前端初始化
self.frontend = Frontend(
- phone_vocab_path=self.engine.executor.phones_dict,
- tone_vocab_path=None)
-
+ phone_vocab_path=self.engine.executor.phones_dict,
+ tone_vocab_path=None)
+
def depadding(self, data, chunk_num, chunk_id, block, pad, upsample):
"""
Streaming inference removes the result of pad inference
@@ -49,37 +48,39 @@ class TTS:
data = data[front_pad * upsample:(front_pad + block) * upsample]
return data
-
+
def offlineTTS(self, text):
get_tone_ids = False
merge_sentences = False
-
+
input_ids = self.frontend.get_input_ids(
- text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids)
+ text,
+ merge_sentences=merge_sentences,
+ get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
wav_list = []
for i in range(len(phone_ids)):
orig_hs = self.engine.executor.am_encoder_infer_sess.run(
- None, input_feed={'text': phone_ids[i].numpy()})
+ None, input_feed={'text': phone_ids[i].numpy()}
+ )
hs = orig_hs[0]
am_decoder_output = self.engine.executor.am_decoder_sess.run(
- None, input_feed={'xs': hs})
+ None, input_feed={'xs': hs})
am_postnet_output = self.engine.executor.am_postnet_sess.run(
- None,
- input_feed={
- 'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
- })
+ None,
+ input_feed={
+ 'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
+ })
am_output_data = am_decoder_output + np.transpose(
am_postnet_output[0], (0, 2, 1))
normalized_mel = am_output_data[0][0]
- mel = denorm(normalized_mel, self.engine.executor.am_mu,
- self.engine.executor.am_std)
+ mel = denorm(normalized_mel, self.engine.executor.am_mu, self.engine.executor.am_std)
wav = self.engine.executor.voc_sess.run(
- output_names=None, input_feed={'logmel': mel})[0]
+ output_names=None, input_feed={'logmel': mel})[0]
wav_list.append(wav)
wavs = np.concatenate(wav_list)
return wavs
-
+
def streamTTS(self, text):
get_tone_ids = False
@@ -87,7 +88,9 @@ class TTS:
# front
input_ids = self.frontend.get_input_ids(
- text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids)
+ text,
+ merge_sentences=merge_sentences,
+ get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
for i in range(len(phone_ids)):
@@ -102,15 +105,14 @@ class TTS:
mel = mel[0]
# voc streaming
- mel_chunks = get_chunks(mel, self.config.voc_block,
- self.config.voc_pad, "voc")
+ mel_chunks = get_chunks(mel, self.config.voc_block, self.config.voc_pad, "voc")
voc_chunk_num = len(mel_chunks)
for i, mel_chunk in enumerate(mel_chunks):
sub_wav = self.executor.voc_sess.run(
output_names=None, input_feed={'logmel': mel_chunk})
- sub_wav = self.depadding(
- sub_wav[0], voc_chunk_num, i, self.config.voc_block,
- self.config.voc_pad, self.config.voc_upsample)
+ sub_wav = self.depadding(sub_wav[0], voc_chunk_num, i,
+ self.config.voc_block, self.config.voc_pad,
+ self.config.voc_upsample)
yield self.after_process(sub_wav)
@@ -128,8 +130,7 @@ class TTS:
end = min(self.config.voc_block + self.config.voc_pad, mel_len)
# streaming am
- hss = get_chunks(orig_hs, self.config.am_block,
- self.config.am_pad, "am")
+ hss = get_chunks(orig_hs, self.config.am_block, self.config.am_pad, "am")
am_chunk_num = len(hss)
for i, hs in enumerate(hss):
am_decoder_output = self.executor.am_decoder_sess.run(
@@ -146,8 +147,7 @@ class TTS:
sub_mel = denorm(normalized_mel, self.executor.am_mu,
self.executor.am_std)
sub_mel = self.depadding(sub_mel, am_chunk_num, i,
- self.config.am_block,
- self.config.am_pad, 1)
+ self.config.am_block, self.config.am_pad, 1)
if i == 0:
mel_streaming = sub_mel
@@ -165,22 +165,23 @@ class TTS:
output_names=None, input_feed={'logmel': voc_chunk})
sub_wav = self.depadding(
sub_wav[0], voc_chunk_num, voc_chunk_id,
- self.config.voc_block, self.config.voc_pad,
- self.config.voc_upsample)
+ self.config.voc_block, self.config.voc_pad, self.config.voc_upsample)
yield self.after_process(sub_wav)
voc_chunk_id += 1
- start = max(0, voc_chunk_id * self.config.voc_block -
- self.config.voc_pad)
- end = min((voc_chunk_id + 1) * self.config.voc_block +
- self.config.voc_pad, mel_len)
+ start = max(
+ 0, voc_chunk_id * self.config.voc_block - self.config.voc_pad)
+ end = min(
+ (voc_chunk_id + 1) * self.config.voc_block + self.config.voc_pad,
+ mel_len)
else:
logging.error(
"Only support fastspeech2_csmsc or fastspeech2_cnndecoder_csmsc on streaming tts."
- )
+ )
+
def streamTTSBytes(self, text):
for wav in self.engine.executor.infer(
text=text,
@@ -190,14 +191,19 @@ class TTS:
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
yield wav_bytes
-
+
+
def after_process(self, wav):
# for tvm
wav = float2pcm(wav) # float32 to int16
wav_bytes = wav.tobytes() # to bytes
wav_base64 = base64.b64encode(wav_bytes).decode('utf8') # to base64
return wav_base64
-
+
def streamTTS_TVM(self, text):
# 用 TVM 优化
pass
+
+
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr.py b/demos/speech_web/speech_server/src/SpeechBase/vpr.py
index cf3367991..29ee986e3 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/vpr.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/vpr.py
@@ -1,13 +1,11 @@
# vpr Demo 没有使用 mysql 与 muilvs, 仅用于docker演示
import logging
-
import faiss
+from matplotlib import use
import numpy as np
-
from .sql_helper import DataBase
from .vpr_encode import get_audio_embedding
-
class VPR:
def __init__(self, db_path, dim, top_k) -> None:
# 初始化
@@ -16,15 +14,15 @@ class VPR:
self.top_k = top_k
self.dtype = np.float32
self.vpr_idx = 0
-
+
# db 初始化
self.db = DataBase(db_path)
-
+
# faiss 初始化
index_ip = faiss.IndexFlatIP(dim)
self.index_ip = faiss.IndexIDMap(index_ip)
self.init()
-
+
def init(self):
# demo 初始化,把 mysql中的向量注册到 faiss 中
sql_dbs = self.db.select_all()
@@ -36,13 +34,12 @@ class VPR:
if len(vc.shape) == 1:
vc = np.expand_dims(vc, axis=0)
# 构建数据库
- self.index_ip.add_with_ids(vc, np.array(
- (idx, )).astype('int64'))
+ self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64'))
logging.info("faiss 构建完毕")
-
+
def faiss_enroll(self, idx, vc):
- self.index_ip.add_with_ids(vc, np.array((idx, )).astype('int64'))
-
+ self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64'))
+
def vpr_enroll(self, username, wav_path):
# 注册声纹
emb = get_audio_embedding(wav_path)
@@ -56,22 +53,21 @@ class VPR:
else:
last_idx, mess = None
return last_idx
-
+
def vpr_recog(self, wav_path):
# 识别声纹
emb_search = get_audio_embedding(wav_path)
-
+
if emb_search is not None:
emb_search = np.expand_dims(emb_search, axis=0)
D, I = self.index_ip.search(emb_search, self.top_k)
D = D.tolist()[0]
- I = I.tolist()[0]
- return [(round(D[i] * 100, 2), I[i]) for i in range(len(D))
- if I[i] != -1]
+ I = I.tolist()[0]
+ return [(round(D[i] * 100, 2 ), I[i]) for i in range(len(D)) if I[i] != -1]
else:
logging.error("识别失败")
return None
-
+
def do_search_vpr(self, wav_path):
spk_ids, paths, scores = [], [], []
recog_result = self.vpr_recog(wav_path)
@@ -82,39 +78,41 @@ class VPR:
scores.append(score)
paths.append("")
return spk_ids, paths, scores
-
+
def vpr_del(self, username):
# 根据用户username, 删除声纹
# 查用户ID,删除对应向量
res = self.db.select_by_username(username)
for r in res:
idx = r['id']
- self.index_ip.remove_ids(np.array((idx, )).astype('int64'))
-
+ self.index_ip.remove_ids(np.array((idx,)).astype('int64'))
+
self.db.drop_by_username(username)
-
+
def vpr_list(self):
# 获取数据列表
return self.db.select_all()
-
+
def do_list(self):
spk_ids, vpr_ids = [], []
for res in self.db.select_all():
spk_ids.append(res['username'])
vpr_ids.append(res['id'])
- return spk_ids, vpr_ids
-
+ return spk_ids, vpr_ids
+
def do_get_wav(self, vpr_idx):
- res = self.db.select_by_id(vpr_idx)
- return res[0]['wavpath']
-
+ res = self.db.select_by_id(vpr_idx)
+ return res[0]['wavpath']
+
+
def vpr_data(self, idx):
# 获取对应ID的数据
res = self.db.select_by_id(idx)
return res
-
+
def vpr_droptable(self):
# 删除表
self.db.drop_table()
# 清空 faiss
self.index_ip.reset()
+
diff --git a/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py
index 9d052fd98..a6a00e4d0 100644
--- a/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py
+++ b/demos/speech_web/speech_server/src/SpeechBase/vpr_encode.py
@@ -1,12 +1,9 @@
-import logging
-
-import numpy as np
-
from paddlespeech.cli.vector import VectorExecutor
+import numpy as np
+import logging
vector_executor = VectorExecutor()
-
def get_audio_embedding(path):
"""
Use vpr_inference to generate embedding of audio
@@ -19,3 +16,5 @@ def get_audio_embedding(path):
except Exception as e:
logging.error(f"Error with embedding:{e}")
return None
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/WebsocketManeger.py b/demos/speech_web/speech_server/src/WebsocketManeger.py
index 954d849a5..5edde8430 100644
--- a/demos/speech_web/speech_server/src/WebsocketManeger.py
+++ b/demos/speech_web/speech_server/src/WebsocketManeger.py
@@ -2,7 +2,6 @@ from typing import List
from fastapi import WebSocket
-
class ConnectionManager:
def __init__(self):
# 存放激活的ws连接对象
@@ -29,4 +28,4 @@ class ConnectionManager:
await connection.send_text(message)
-manager = ConnectionManager()
+manager = ConnectionManager()
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/robot.py b/demos/speech_web/speech_server/src/robot.py
index dd8c56e0c..b971c57b5 100644
--- a/demos/speech_web/speech_server/src/robot.py
+++ b/demos/speech_web/speech_server/src/robot.py
@@ -1,64 +1,60 @@
+from paddlespeech.cli.asr.infer import ASRExecutor
+import soundfile as sf
import os
+import librosa
-import soundfile as sf
from src.SpeechBase.asr import ASR
-from src.SpeechBase.nlp import NLP
from src.SpeechBase.tts import TTS
-
-from paddlespeech.cli.asr.infer import ASRExecutor
+from src.SpeechBase.nlp import NLP
class Robot:
- def __init__(self,
- asr_config,
- tts_config,
- asr_init_path,
+ def __init__(self, asr_config, tts_config,asr_init_path,
ie_model_path=None) -> None:
self.nlp = NLP(ie_model_path=ie_model_path)
self.asr = ASR(config_path=asr_config)
self.tts = TTS(config_path=tts_config)
self.tts_sample_rate = 24000
self.asr_sample_rate = 16000
-
+
# 流式识别效果不如端到端的模型,这里流式模型与端到端模型分开
self.asr_model = ASRExecutor()
self.asr_name = "conformer_wenetspeech"
self.warm_up_asrmodel(asr_init_path)
+
- def warm_up_asrmodel(self, asr_init_path):
+ def warm_up_asrmodel(self, asr_init_path):
if not os.path.exists(asr_init_path):
path_dir = os.path.dirname(asr_init_path)
if not os.path.exists(path_dir):
os.makedirs(path_dir, exist_ok=True)
-
+
# TTS生成,采样率24000
text = "生成初始音频"
self.text2speech(text, asr_init_path)
-
+
# asr model初始化
- self.asr_model(
- asr_init_path,
- model=self.asr_name,
- lang='zh',
- sample_rate=16000,
- force_yes=True)
-
+ self.asr_model(asr_init_path, model=self.asr_name,lang='zh',
+ sample_rate=16000, force_yes=True)
+
+
def speech2text(self, audio_file):
self.asr_model.preprocess(self.asr_name, audio_file)
self.asr_model.infer(self.asr_name)
res = self.asr_model.postprocess()
return res
-
+
def text2speech(self, text, outpath):
wav = self.tts.offlineTTS(text)
- sf.write(outpath, wav, samplerate=self.tts_sample_rate)
+ sf.write(
+ outpath, wav, samplerate=self.tts_sample_rate)
res = wav
return res
-
+
def text2speechStream(self, text):
for sub_wav_base64 in self.tts.streamTTS(text=text):
yield sub_wav_base64
-
+
def text2speechStreamBytes(self, text):
for wav_bytes in self.tts.streamTTSBytes(text=text):
yield wav_bytes
@@ -70,3 +66,5 @@ class Robot:
def ie(self, text):
result = self.nlp.ie(text)
return result
+
+
\ No newline at end of file
diff --git a/demos/speech_web/speech_server/src/util.py b/demos/speech_web/speech_server/src/util.py
index 4a566b6ee..34005d919 100644
--- a/demos/speech_web/speech_server/src/util.py
+++ b/demos/speech_web/speech_server/src/util.py
@@ -1,13 +1,18 @@
import random
-
def randName(n=5):
- return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba', n))
-
+ return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba',n))
def SuccessRequest(result=None, message="ok"):
- return {"code": 0, "result": result, "message": message}
-
+ return {
+ "code": 0,
+ "result":result,
+ "message": message
+ }
def ErrorRequest(result=None, message="error"):
- return {"code": -1, "result": result, "message": message}
+ return {
+ "code": -1,
+ "result":result,
+ "message": message
+ }
\ No newline at end of file
diff --git a/demos/story_talker/README.md b/demos/story_talker/README.md
index 58d2db959..62414383b 100644
--- a/demos/story_talker/README.md
+++ b/demos/story_talker/README.md
@@ -1,5 +1,3 @@
-([简体中文](./README_cn.md)|English)
-
# Story Talker
## Introduction
Storybooks are very important children's enlightenment books, but parents usually don't have enough time to read storybooks for their children. For very young children, they may not understand the Chinese characters in storybooks. Or sometimes, children just want to "listen" but don't want to "read".
diff --git a/demos/story_talker/README_cn.md b/demos/story_talker/README_cn.md
deleted file mode 100644
index 7c4789ddf..000000000
--- a/demos/story_talker/README_cn.md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-(简体中文|[English](./README.md))
-
-# Story Talker
-
-## 简介
-
-故事书是非常重要的儿童启蒙书,但家长通常没有足够的时间为孩子读故事书。对于非常小的孩子,他们可能不理解故事书中的汉字。或有时,孩子们只是想“听”,而不想“读”。
-
-您可以使用 `PaddleOCR` 获取故事书的文本,并通过 `PaddleSpeech` 的 `TTS` 模块进行阅读。
-
-## 使用
-
-运行以下命令行开始:
-
-```
-./run.sh
-```
-
-结果已显示在 [notebook](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/tutorial/tts/tts_tutorial.ipynb)。
diff --git a/demos/streaming_asr_server/conf/application.yaml b/demos/streaming_asr_server/conf/application.yaml
index a89d312ab..683d86f03 100644
--- a/demos/streaming_asr_server/conf/application.yaml
+++ b/demos/streaming_asr_server/conf/application.yaml
@@ -28,7 +28,6 @@ asr_online:
sample_rate: 16000
cfg_path:
decode_method:
- num_decoding_left_chunks: -1
force_yes: True
device: 'cpu' # cpu or gpu:id
decode_method: "attention_rescoring"
diff --git a/demos/streaming_asr_server/local/rtf_from_log.py b/demos/streaming_asr_server/local/rtf_from_log.py
index 09a9c9750..4b89b48fd 100755
--- a/demos/streaming_asr_server/local/rtf_from_log.py
+++ b/demos/streaming_asr_server/local/rtf_from_log.py
@@ -34,7 +34,7 @@ if __name__ == '__main__':
n = 0
for m in rtfs:
# not accurate, may have duplicate log
- n += 1
+ n += 1
T += m['T']
P += m['P']
diff --git a/demos/streaming_tts_server/conf/tts_online_application.yaml b/demos/streaming_tts_server/conf/tts_online_application.yaml
index f5ec9dc8e..e617912fe 100644
--- a/demos/streaming_tts_server/conf/tts_online_application.yaml
+++ b/demos/streaming_tts_server/conf/tts_online_application.yaml
@@ -29,7 +29,7 @@ tts_online:
phones_dict:
tones_dict:
speaker_dict:
-
+ spk_id: 0
# voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc']
# Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference
@@ -70,6 +70,7 @@ tts_online-onnx:
phones_dict:
tones_dict:
speaker_dict:
+ spk_id: 0
am_sample_rate: 24000
am_sess_conf:
device: "cpu" # set 'gpu:id' or 'cpu'
diff --git a/demos/streaming_tts_server/conf/tts_online_ws_application.yaml b/demos/streaming_tts_server/conf/tts_online_ws_application.yaml
index c65633917..329f882cc 100644
--- a/demos/streaming_tts_server/conf/tts_online_ws_application.yaml
+++ b/demos/streaming_tts_server/conf/tts_online_ws_application.yaml
@@ -29,7 +29,7 @@ tts_online:
phones_dict:
tones_dict:
speaker_dict:
-
+ spk_id: 0
# voc (vocoder) choices=['mb_melgan_csmsc, hifigan_csmsc']
# Both mb_melgan_csmsc and hifigan_csmsc support streaming voc inference
@@ -70,6 +70,7 @@ tts_online-onnx:
phones_dict:
tones_dict:
speaker_dict:
+ spk_id: 0
am_sample_rate: 24000
am_sess_conf:
device: "cpu" # set 'gpu:id' or 'cpu'
diff --git a/demos/style_fs2/README.md b/demos/style_fs2/README.md
index 618c74789..123230b8f 100644
--- a/demos/style_fs2/README.md
+++ b/demos/style_fs2/README.md
@@ -1,5 +1,3 @@
-([简体中文](./README_cn.md)|English)
-
# Style FastSpeech2
## Introduction
[FastSpeech2](https://arxiv.org/abs/2006.04558) is a classical acoustic model for Text-to-Speech synthesis, which introduces controllable speech input, including `phoneme duration`、 `energy` and `pitch`.
diff --git a/demos/style_fs2/README_cn.md b/demos/style_fs2/README_cn.md
deleted file mode 100644
index 5c74f691a..000000000
--- a/demos/style_fs2/README_cn.md
+++ /dev/null
@@ -1,33 +0,0 @@
-(简体中文|[English](./README.md))
-
-# Style FastSpeech2
-
-## 简介
-
-[FastSpeech2](https://arxiv.org/abs/2006.04558) 是用于语音合成的经典声学模型,它引入了可控语音输入,包括 `phoneme duration` 、 `energy` 和 `pitch` 。
-
-在预测阶段,您可以更改这些变量以获得一些有趣的结果。
-
-例如:
-
-1. `FastSpeech2` 中的 `duration` 可以控制音频的速度 ,并保持 `pitch` 。(在某些语音工具中,增加速度将增加音调,反之亦然。)
-2. 当我们将一个句子的 `pitch` 设置为平均值并将音素的 `tones` 设置为 `1` 时,我们将获得 `robot-style` 的音色。
-3. 当我们提高成年女性的 `pitch` (比例固定)时,我们会得到 `child-style` 的音色。
-
-句子中不同音素的 `duration` 和 `pitch` 可以具有不同的比例。您可以设置不同的音阶比例来强调或削弱某些音素的发音。
-
-## 运行
-
-运行以下命令行开始:
-
-```
-./run.sh
-```
-
-在 `run.sh`, 会首先执行 `source path.sh` 去设置好环境变量。
-
-如果您想尝试您的句子,请替换 `sentences.txt`中的句子。
-
-更多的细节,请查看 `style_syn.py`。
-
-语音样例可以在 [style-control-in-fastspeech2](https://paddlespeech.readthedocs.io/en/latest/tts/demo.html#style-control-in-fastspeech2) 查看。
diff --git a/demos/text_to_speech/README.md b/demos/text_to_speech/README.md
index 41dcf820b..389847a12 100644
--- a/demos/text_to_speech/README.md
+++ b/demos/text_to_speech/README.md
@@ -16,8 +16,8 @@ You can choose one way from easy, meduim and hard to install paddlespeech.
The input of this demo should be a text of the specific language that can be passed via argument.
### 3. Usage
- Command Line (Recommended)
- The default acoustic model is `Fastspeech2`, and the default vocoder is `HiFiGAN`, the default inference method is dygraph inference.
- Chinese
+ The default acoustic model is `Fastspeech2`, and the default vocoder is `Parallel WaveGAN`.
```bash
paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!"
```
@@ -45,33 +45,7 @@ The input of this demo should be a text of the specific language that can be pas
You can change `spk_id` here.
```bash
paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "hello, boys" --lang en --spk_id 0
- ```
- - Chinese English Mixed, multi-speaker
- You can change `spk_id` here.
- ```bash
- # The `am` must be `fastspeech2_mix`!
- # The `lang` must be `mix`!
- # The voc must be chinese datasets' voc now!
- # spk 174 is csmcc, spk 175 is ljspeech
- paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174.wav
- paddlespeech tts --am fastspeech2_mix --voc hifigan_aishell3 --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174_aishell3.wav
- paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav
- paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav
- ```
- - Use ONNXRuntime infer:
- ```bash
- paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output default.wav --use_onnx True
- paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output ss.wav --use_onnx True
- paddlespeech tts --voc mb_melgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output mb.wav --use_onnx True
- paddlespeech tts --voc pwgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_aishell3 --voc hifigan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_hifigan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_ljspeech --voc pwgan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_ljspeech --voc hifigan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_hifigan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_vctk --voc hifigan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_hifigan.wav --use_onnx True
- ```
-
+ ```
Usage:
```bash
@@ -94,8 +68,6 @@ The input of this demo should be a text of the specific language that can be pas
- `lang`: Language of tts task. Default: `zh`.
- `device`: Choose device to execute model inference. Default: default device of paddlepaddle in current environment.
- `output`: Output wave filepath. Default: `output.wav`.
- - `use_onnx`: whether to usen ONNXRuntime inference.
- - `fs`: sample rate for ONNX models when use specified model files.
Output:
```bash
@@ -103,76 +75,54 @@ The input of this demo should be a text of the specific language that can be pas
```
- Python API
- - Dygraph infer:
- ```python
- import paddle
- from paddlespeech.cli.tts import TTSExecutor
- tts_executor = TTSExecutor()
- wav_file = tts_executor(
- text='今天的天气不错啊',
- output='output.wav',
- am='fastspeech2_csmsc',
- am_config=None,
- am_ckpt=None,
- am_stat=None,
- spk_id=0,
- phones_dict=None,
- tones_dict=None,
- speaker_dict=None,
- voc='pwgan_csmsc',
- voc_config=None,
- voc_ckpt=None,
- voc_stat=None,
- lang='zh',
- device=paddle.get_device())
- print('Wave file has been generated: {}'.format(wav_file))
- ```
- - ONNXRuntime infer:
- ```python
- from paddlespeech.cli.tts import TTSExecutor
- tts_executor = TTSExecutor()
- wav_file = tts_executor(
- text='对数据集进行预处理',
- output='output.wav',
- am='fastspeech2_csmsc',
- voc='hifigan_csmsc',
- lang='zh',
- use_onnx=True,
- cpu_threads=2)
- ```
-
+ ```python
+ import paddle
+ from paddlespeech.cli.tts import TTSExecutor
+
+ tts_executor = TTSExecutor()
+ wav_file = tts_executor(
+ text='今天的天气不错啊',
+ output='output.wav',
+ am='fastspeech2_csmsc',
+ am_config=None,
+ am_ckpt=None,
+ am_stat=None,
+ spk_id=0,
+ phones_dict=None,
+ tones_dict=None,
+ speaker_dict=None,
+ voc='pwgan_csmsc',
+ voc_config=None,
+ voc_ckpt=None,
+ voc_stat=None,
+ lang='zh',
+ device=paddle.get_device())
+ print('Wave file has been generated: {}'.format(wav_file))
+ ```
+
Output:
```bash
Wave file has been generated: output.wav
```
### 4. Pretrained Models
+
Here is a list of pretrained models released by PaddleSpeech that can be used by command and python API:
- Acoustic model
- | Model | Language |
+ | Model | Language
| :--- | :---: |
- | speedyspeech_csmsc | zh |
- | fastspeech2_csmsc | zh |
- | fastspeech2_ljspeech | en |
- | fastspeech2_aishell3 | zh |
- | fastspeech2_vctk | en |
- | fastspeech2_cnndecoder_csmsc | zh |
- | fastspeech2_mix | mix |
- | tacotron2_csmsc | zh |
- | tacotron2_ljspeech | en |
+ | speedyspeech_csmsc| zh
+ | fastspeech2_csmsc| zh
+ | fastspeech2_aishell3| zh
+ | fastspeech2_ljspeech| en
+ | fastspeech2_vctk| en
- Vocoder
- | Model | Language |
+ | Model | Language
| :--- | :---: |
- | pwgan_csmsc | zh |
- | pwgan_ljspeech | en |
- | pwgan_aishell3 | zh |
- | pwgan_vctk | en |
- | mb_melgan_csmsc | zh |
- | style_melgan_csmsc | zh |
- | hifigan_csmsc | zh |
- | hifigan_ljspeech | en |
- | hifigan_aishell3 | zh |
- | hifigan_vctk | en |
- | wavernn_csmsc | zh |
+ | pwgan_csmsc| zh
+ | pwgan_aishell3| zh
+ | pwgan_ljspeech| en
+ | pwgan_vctk| en
+ | mb_melgan_csmsc| zh
diff --git a/demos/text_to_speech/README_cn.md b/demos/text_to_speech/README_cn.md
index 4a4132238..f967d3d4d 100644
--- a/demos/text_to_speech/README_cn.md
+++ b/demos/text_to_speech/README_cn.md
@@ -1,23 +1,26 @@
(简体中文|[English](./README.md))
# 语音合成
+
## 介绍
语音合成是一种自然语言建模过程,其将文本转换为语音以进行音频演示。
这个 demo 是一个从给定文本生成音频的实现,它可以通过使用 `PaddleSpeech` 的单个命令或 python 中的几行代码来实现。
+
## 使用方法
### 1. 安装
请看[安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install_cn.md)。
-你可以从 easy,medium,hard 三种方式中选择一种方式安装。
+你可以从 easy,medium,hard 三中方式中选择一种方式安装。
### 2. 准备输入
这个 demo 的输入是通过参数传递的特定语言的文本。
### 3. 使用方法
- 命令行 (推荐使用)
- 默认的声学模型是 `Fastspeech2`,默认的声码器是 `HiFiGAN`,默认推理方式是动态图推理。
- 中文
+
+ 默认的声学模型是 `Fastspeech2`,默认的声码器是 `Parallel WaveGAN`.
```bash
paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!"
```
@@ -31,7 +34,7 @@
```
- 中文, 多说话人
- 你可以改变 `spk_id`。
+ 你可以改变 `spk_id` 。
```bash
paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0
```
@@ -42,36 +45,10 @@
```
- 英文,多说话人
- 你可以改变 `spk_id`。
+ 你可以改变 `spk_id` 。
```bash
paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "hello, boys" --lang en --spk_id 0
```
- - 中英文混合,多说话人
- 你可以改变 `spk_id`。
- ```bash
- # The `am` must be `fastspeech2_mix`!
- # The `lang` must be `mix`!
- # The voc must be chinese datasets' voc now!
- # spk 174 is csmcc, spk 175 is ljspeech
- paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174.wav
- paddlespeech tts --am fastspeech2_mix --voc hifigan_aishell3 --lang mix --input "热烈欢迎您在 Discussions 中提交问题,并在 Issues 中指出发现的 bug。此外,我们非常希望您参与到 Paddle Speech 的开发中!" --spk_id 174 --output mix_spk174_aishell3.wav
- paddlespeech tts --am fastspeech2_mix --voc pwgan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175_pwgan.wav
- paddlespeech tts --am fastspeech2_mix --voc hifigan_csmsc --lang mix --input "我们的声学模型使用了 Fast Speech Two, 声码器使用了 Parallel Wave GAN and Hifi GAN." --spk_id 175 --output mix_spk175.wav
- ```
- - 使用 ONNXRuntime 推理:
- ```bash
- paddlespeech tts --input "你好,欢迎使用百度飞桨深度学习框架!" --output default.wav --use_onnx True
- paddlespeech tts --am speedyspeech_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output ss.wav --use_onnx True
- paddlespeech tts --voc mb_melgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output mb.wav --use_onnx True
- paddlespeech tts --voc pwgan_csmsc --input "你好,欢迎使用百度飞桨深度学习框架!" --output pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_aishell3 --voc pwgan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_aishell3 --voc hifigan_aishell3 --input "你好,欢迎使用百度飞桨深度学习框架!" --spk_id 0 --output aishell3_fs2_hifigan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_ljspeech --voc pwgan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_ljspeech --voc hifigan_ljspeech --lang en --input "Life was like a box of chocolates, you never know what you're gonna get." --output lj_fs2_hifigan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_vctk --voc pwgan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_pwgan.wav --use_onnx True
- paddlespeech tts --am fastspeech2_vctk --voc hifigan_vctk --input "Life was like a box of chocolates, you never know what you're gonna get." --lang en --spk_id 0 --output vctk_fs2_hifigan.wav --use_onnx True
- ```
-
使用方法:
```bash
@@ -94,8 +71,6 @@
- `lang`:TTS 任务的语言, 默认值:`zh`。
- `device`:执行预测的设备, 默认值:当前系统下 paddlepaddle 的默认 device。
- `output`:输出音频的路径, 默认值:`output.wav`。
- - `use_onnx`: 是否使用 ONNXRuntime 进行推理。
- - `fs`: 使用特定 ONNX 模型时的采样率。
输出:
```bash
@@ -103,44 +78,31 @@
```
- Python API
- - 动态图推理:
- ```python
- import paddle
- from paddlespeech.cli.tts import TTSExecutor
- tts_executor = TTSExecutor()
- wav_file = tts_executor(
- text='今天的天气不错啊',
- output='output.wav',
- am='fastspeech2_csmsc',
- am_config=None,
- am_ckpt=None,
- am_stat=None,
- spk_id=0,
- phones_dict=None,
- tones_dict=None,
- speaker_dict=None,
- voc='pwgan_csmsc',
- voc_config=None,
- voc_ckpt=None,
- voc_stat=None,
- lang='zh',
- device=paddle.get_device())
- print('Wave file has been generated: {}'.format(wav_file))
- ```
- - ONNXRuntime 推理:
- ```python
- from paddlespeech.cli.tts import TTSExecutor
- tts_executor = TTSExecutor()
- wav_file = tts_executor(
- text='对数据集进行预处理',
- output='output.wav',
- am='fastspeech2_csmsc',
- voc='hifigan_csmsc',
- lang='zh',
- use_onnx=True,
- cpu_threads=2)
- ```
-
+ ```python
+ import paddle
+ from paddlespeech.cli.tts import TTSExecutor
+
+ tts_executor = TTSExecutor()
+ wav_file = tts_executor(
+ text='今天的天气不错啊',
+ output='output.wav',
+ am='fastspeech2_csmsc',
+ am_config=None,
+ am_ckpt=None,
+ am_stat=None,
+ spk_id=0,
+ phones_dict=None,
+ tones_dict=None,
+ speaker_dict=None,
+ voc='pwgan_csmsc',
+ voc_config=None,
+ voc_ckpt=None,
+ voc_stat=None,
+ lang='zh',
+ device=paddle.get_device())
+ print('Wave file has been generated: {}'.format(wav_file))
+ ```
+
输出:
```bash
Wave file has been generated: output.wav
@@ -150,29 +112,19 @@
以下是 PaddleSpeech 提供的可以被命令行和 python API 使用的预训练模型列表:
- 声学模型
- | 模型 | 语言 |
+ | 模型 | 语言
| :--- | :---: |
- | speedyspeech_csmsc | zh |
- | fastspeech2_csmsc | zh |
- | fastspeech2_ljspeech | en |
- | fastspeech2_aishell3 | zh |
- | fastspeech2_vctk | en |
- | fastspeech2_cnndecoder_csmsc | zh |
- | fastspeech2_mix | mix |
- | tacotron2_csmsc | zh |
- | tacotron2_ljspeech | en |
+ | speedyspeech_csmsc| zh
+ | fastspeech2_csmsc| zh
+ | fastspeech2_aishell3| zh
+ | fastspeech2_ljspeech| en
+ | fastspeech2_vctk| en
- 声码器
- | 模型 | 语言 |
+ | 模型 | 语言
| :--- | :---: |
- | pwgan_csmsc | zh |
- | pwgan_ljspeech | en |
- | pwgan_aishell3 | zh |
- | pwgan_vctk | en |
- | mb_melgan_csmsc | zh |
- | style_melgan_csmsc | zh |
- | hifigan_csmsc | zh |
- | hifigan_ljspeech | en |
- | hifigan_aishell3 | zh |
- | hifigan_vctk | en |
- | wavernn_csmsc | zh |
+ | pwgan_csmsc| zh
+ | pwgan_aishell3| zh
+ | pwgan_ljspeech| en
+ | pwgan_vctk| en
+ | mb_melgan_csmsc| zh
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 3fb82367f..ee116a9b6 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,7 +1,12 @@
-braceexpand
-colorlog
+myst-parser
+numpydoc
+recommonmark>=0.5.0
+sphinx
+sphinx-autobuild
+sphinx-markdown-tables
+sphinx_rtd_theme
+paddlepaddle>=2.2.2
editdistance
-fastapi
g2p_en
g2pM
h5py
@@ -9,45 +14,40 @@ inflect
jieba
jsonlines
kaldiio
-keyboard
librosa==0.8.1
loguru
matplotlib
-myst-parser
nara_wpe
-numpydoc
onnxruntime==1.10.0
opencc
+pandas
paddlenlp
-paddlepaddle>=2.2.2
paddlespeech_feat
-pandas
-pathos == 0.2.8
-pattern_singleton
Pillow>=9.0.0
praatio==5.0.0
-prettytable
-pypinyin<=0.44.0
+pypinyin
pypinyin-dict
python-dateutil
pyworld==0.2.12
-recommonmark>=0.5.0
resampy==0.2.2
sacrebleu
scipy
sentencepiece~=0.1.96
soundfile~=0.10
-sphinx
-sphinx-autobuild
-sphinx-markdown-tables
-sphinx_rtd_theme
textgrid
timer
tqdm
typeguard
-uvicorn
visualdl
webrtcvad
-websockets
yacs~=0.1.8
+prettytable
zhon
+colorlog
+pathos == 0.2.8
+fastapi
+websockets
+keyboard
+uvicorn
+pattern_singleton
+braceexpand
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
index cd9b1807b..c94cf0b86 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -20,11 +20,10 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-import os
-import sys
-
import recommonmark.parser
import sphinx_rtd_theme
+import sys
+import os
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ["soundfile", "librosa"]
diff --git a/docs/source/released_model.md b/docs/source/released_model.md
index d6691812e..a1e3eb879 100644
--- a/docs/source/released_model.md
+++ b/docs/source/released_model.md
@@ -42,11 +42,9 @@ SpeedySpeech| CSMSC | [speedyspeech-csmsc](https://github.com/PaddlePaddle/Paddl
FastSpeech2| CSMSC |[fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)|[fastspeech2_nosil_baker_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_baker_ckpt_0.4.zip)|[fastspeech2_csmsc_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_csmsc_static_0.2.0.zip) [fastspeech2_csmsc_onnx_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_csmsc_onnx_0.2.0.zip)|157MB|
FastSpeech2-Conformer| CSMSC |[fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)|[fastspeech2_conformer_baker_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip)|||
FastSpeech2-CNNDecoder| CSMSC| [fastspeech2-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/csmsc/tts3)| [fastspeech2_cnndecoder_csmsc_ckpt_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_ckpt_1.0.0.zip) | [fastspeech2_cnndecoder_csmsc_static_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_static_1.0.0.zip) [fastspeech2_cnndecoder_csmsc_streaming_static_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_static_1.0.0.zip) [fastspeech2_cnndecoder_csmsc_onnx_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_onnx_1.0.0.zip) [fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip) | 84MB|
-FastSpeech2| AISHELL-3 |[fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3)|[fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip)|[fastspeech2_aishell3_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_static_1.1.0.zip) [fastspeech2_aishell3_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_onnx_1.1.0.zip)|147MB|
+FastSpeech2| AISHELL-3 |[fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3)|[fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip)|[fastspeech2_aishell3_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_static_1.1.0.zip) [fastspeech2_aishell3_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_onnx_1.1.0.zip)|147MB|
FastSpeech2| LJSpeech |[fastspeech2-ljspeech](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/ljspeech/tts3)|[fastspeech2_nosil_ljspeech_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip)|[fastspeech2_ljspeech_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_ljspeech_static_1.1.0.zip) [fastspeech2_ljspeech_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_ljspeech_onnx_1.1.0.zip)|145MB|
-FastSpeech2| VCTK |[fastspeech2-vctk](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/tts3)|[fastspeech2_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_ckpt_1.2.0.zip)|[fastspeech2_vctk_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_static_1.1.0.zip) [fastspeech2_vctk_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_onnx_1.1.0.zip) | 145MB|
-FastSpeech2| ZH_EN |[fastspeech2-zh_en](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/zh_en_tts/tts3)|[fastspeech2_mix_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_1.2.0.zip)|[fastspeech2_mix_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_static_0.2.0.zip) [fastspeech2_mix_onnx_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_onnx_0.2.0.zip) | 145MB|
-
+FastSpeech2| VCTK |[fastspeech2-vctk](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/tts3)|[fastspeech2_nosil_vctk_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_vctk_ckpt_0.5.zip)|[fastspeech2_vctk_static_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_static_1.1.0.zip) [fastspeech2_vctk_onnx_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_onnx_1.1.0.zip) | 145MB|
### Vocoders
Model Type | Dataset| Example Link | Pretrained Models| Static/ONNX Models|Size (static)
@@ -69,7 +67,7 @@ WaveRNN | CSMSC |[WaveRNN-csmsc](https://github.com/PaddlePaddle/PaddleSpeech/tr
Model Type | Dataset| Example Link | Pretrained Models
:-------------:| :------------:| :-----: | :-----: |
GE2E| AISHELL-3, etc. |[ge2e](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/ge2e)|[ge2e_ckpt_0.3.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ge2e/ge2e_ckpt_0.3.zip)
-GE2E + Tacotron2| AISHELL-3 |[ge2e-Tacotron2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc0)|[tacotron2_aishell3_ckpt_vc0_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/tacotron2/tacotron2_aishell3_ckpt_vc0_0.2.0.zip)
+GE2E + Tactron2| AISHELL-3 |[ge2e-tactron2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc0)|[tacotron2_aishell3_ckpt_vc0_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/tacotron2/tacotron2_aishell3_ckpt_vc0_0.2.0.zip)
GE2E + FastSpeech2 | AISHELL-3 |[ge2e-fastspeech2-aishell3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vc1)|[fastspeech2_nosil_aishell3_vc1_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_vc1_ckpt_0.5.zip)
diff --git a/docs/source/tts/quick_start.md b/docs/source/tts/quick_start.md
index d8dbc646c..bddee7786 100644
--- a/docs/source/tts/quick_start.md
+++ b/docs/source/tts/quick_start.md
@@ -7,7 +7,7 @@ The examples in PaddleSpeech are mainly classified by datasets, the TTS datasets
* VCTK (English multiple speakers)
The models in PaddleSpeech TTS have the following mapping relationship:
-* tts0 - Tacotron2
+* tts0 - Tactron2
* tts1 - TransformerTTS
* tts2 - SpeedySpeech
* tts3 - FastSpeech2
@@ -17,7 +17,7 @@ The models in PaddleSpeech TTS have the following mapping relationship:
* voc3 - MultiBand MelGAN
* voc4 - Style MelGAN
* voc5 - HiFiGAN
-* vc0 - Tacotron2 Voice Clone with GE2E
+* vc0 - Tactron2 Voice Clone with GE2E
* vc1 - FastSpeech2 Voice Clone with GE2E
## Quick Start
diff --git a/docs/source/tts/quick_start_cn.md b/docs/source/tts/quick_start_cn.md
index c56d9bb45..37246e84e 100644
--- a/docs/source/tts/quick_start_cn.md
+++ b/docs/source/tts/quick_start_cn.md
@@ -9,7 +9,7 @@
PaddleSpeech 的 TTS 模型具有以下映射关系:
-* tts0 - Tacotron2
+* tts0 - Tactron2
* tts1 - TransformerTTS
* tts2 - SpeedySpeech
* tts3 - FastSpeech2
@@ -19,7 +19,7 @@ PaddleSpeech 的 TTS 模型具有以下映射关系:
* voc3 - MultiBand MelGAN
* voc4 - Style MelGAN
* voc5 - HiFiGAN
-* vc0 - Tacotron2 Voice Clone with GE2E
+* vc0 - Tactron2 Voice Clone with GE2E
* vc1 - FastSpeech2 Voice Clone with GE2E
## 快速开始
diff --git a/docs/source/tts/tts_papers.md b/docs/source/tts/tts_papers.md
index f3ca1b624..681b21066 100644
--- a/docs/source/tts/tts_papers.md
+++ b/docs/source/tts/tts_papers.md
@@ -5,7 +5,6 @@
- [Disambiguation of Chinese Polyphones in an End-to-End Framework with Semantic Features Extracted by Pre-trained BERT](https://www1.se.cuhk.edu.hk/~hccl/publications/pub/201909_INTERSPEECH_DongyangDAI.pdf)
- [Polyphone Disambiguation in Mandarin Chinese with Semi-Supervised Learning](https://www.isca-speech.org/archive/pdfs/interspeech_2021/shi21d_interspeech.pdf)
* github: https://github.com/PaperMechanica/SemiPPL
-- [WikipediaHomographData](https://github.com/google-research-datasets/WikipediaHomographData)
### Text Normalization
#### English
- [applenob/text_normalization](https://github.com/applenob/text_normalization)
diff --git a/docs/tutorial/tts/tts_tutorial.ipynb b/docs/tutorial/tts/tts_tutorial.ipynb
index 583adb014..81f713efa 100644
--- a/docs/tutorial/tts/tts_tutorial.ipynb
+++ b/docs/tutorial/tts/tts_tutorial.ipynb
@@ -769,7 +769,7 @@
"```\n",
"我们在每个数据集的 README.md 介绍了子目录和模型的对应关系, 在 TTS 中有如下对应关系:\n",
"```text\n",
- "tts0 - Tacotron2\n",
+ "tts0 - Tactron2\n",
"tts1 - TransformerTTS\n",
"tts2 - SpeedySpeech\n",
"tts3 - FastSpeech2\n",
diff --git a/examples/aishell/asr0/README.md b/examples/aishell/asr0/README.md
index 131de36e3..4459b1382 100644
--- a/examples/aishell/asr0/README.md
+++ b/examples/aishell/asr0/README.md
@@ -197,7 +197,7 @@ In some situations, you want to use the trained model to do the inference for th
```bash
if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
# test a single .wav file
- CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file}
+ CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file}
fi
```
you can train the model by yourself, or you can download the pretrained model by the script below:
@@ -211,5 +211,5 @@ wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wa
```
You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of the audio demo by running the script below.
```bash
-CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/deepspeech2.yaml conf/tuning/decode.yaml exp/deepspeech2/checkpoints/avg_1 data/demo_01_03.wav
+CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/deepspeech2.yaml exp/deepspeech2/checkpoints/avg_1 data/demo_01_03.wav
```
diff --git a/examples/aishell3/README.md b/examples/aishell3/README.md
index dd09bdfb2..273f488e4 100644
--- a/examples/aishell3/README.md
+++ b/examples/aishell3/README.md
@@ -1,6 +1,6 @@
# Aishell3
-* tts0 - Tacotron2
+* tts0 - Tactron2
* tts1 - TransformerTTS
* tts2 - SpeedySpeech
* tts3 - FastSpeech2
@@ -8,7 +8,5 @@
* voc1 - Parallel WaveGAN
* voc2 - MelGAN
* voc3 - MultiBand MelGAN
-* vc0 - Tacotron2 Voice Cloning with GE2E
+* vc0 - Tactron2 Voice Cloning with GE2E
* vc1 - FastSpeech2 Voice Cloning with GE2E
-* vc2 - FastSpeech2 Voice Cloning with ECAPA-TDNN
-* ernie_sat - ERNIE-SAT
diff --git a/examples/aishell3/ernie_sat/README.md b/examples/aishell3/ernie_sat/README.md
index 707ee1381..8086d007c 100644
--- a/examples/aishell3/ernie_sat/README.md
+++ b/examples/aishell3/ernie_sat/README.md
@@ -1,151 +1 @@
-# ERNIE-SAT with AISHELL3 dataset
-
-ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。
-
-## 模型框架
-ERNIE-SAT 中我们提出了两项创新:
-- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射
-- 采用语言和语音的联合掩码学习实现了语言和语音的对齐
-
-
-
-
-
-## Dataset
-### Download and Extract
-Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
-
-### Get MFA Result and Extract
-We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2.
-You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
-
-## Get Started
-Assume the path to the dataset is `~/datasets/data_aishell3`.
-Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
-Run the command below to
-1. **source path**.
-2. preprocess the dataset.
-3. train the model.
-4. synthesize wavs.
- - synthesize waveform from `metadata.jsonl`.
- - synthesize waveform from text file.
-
-```bash
-./run.sh
-```
-You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
-```bash
-./run.sh --stage 0 --stop-stage 0
-```
-### Data Preprocessing
-```bash
-./local/preprocess.sh ${conf_path}
-```
-When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
-
-```text
-dump
-├── dev
-│ ├── norm
-│ └── raw
-├── phone_id_map.txt
-├── speaker_id_map.txt
-├── test
-│ ├── norm
-│ └── raw
-└── train
- ├── norm
- ├── raw
- └── speech_stats.npy
-```
-The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`.
-
-Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, speaker, and id of each utterance.
-
-### Model Training
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
-```
-`./local/train.sh` calls `${BIN_DIR}/train.py`.
-
-### Synthesizing
-We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc5) as the neural vocoder.
-
-Download pretrained HiFiGAN model from [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) and unzip it.
-```bash
-unzip hifigan_aishell3_ckpt_0.2.0.zip
-```
-HiFiGAN checkpoint contains files listed below.
-```text
-hifigan_aishell3_ckpt_0.2.0
-├── default.yaml # default config used to train HiFiGAN
-├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN
-└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN
-```
-`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-## Speech Synthesis and Speech Editing
-### Prepare
-**prepare aligner**
-```bash
-mkdir -p tools/aligner
-cd tools
-# download MFA
-wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz
-# extract MFA
-tar xvf montreal-forced-aligner_linux.tar.gz
-# fix .so of MFA
-cd montreal-forced-aligner/lib
-ln -snf libpython3.6m.so.1.0 libpython3.6m.so
-cd -
-# download align models and dicts
-cd aligner
-wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip
-wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon
-wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip
-wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b
-cd ../../
-```
-**prepare pretrained FastSpeech2 models**
-
-ERNIE-SAT use FastSpeech2 as phoneme duration predictor:
-```bash
-mkdir download
-cd download
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip
-unzip fastspeech2_conformer_baker_ckpt_0.5.zip
-unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip
-cd ../
-```
-**prepare source data**
-```bash
-mkdir source
-cd source
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540307.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540428.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/LJ050-0278.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p243_313.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p299_096.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/this_was_not_the_show_for_me.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/README.md
-cd ../
-```
-
-You can check the text of downloaded wavs in `source/README.md`.
-### Speech Synthesis and Speech Editing
-```bash
-./run.sh --stage 3 --stop-stage 3 --gpus 0
-```
-`stage 3` of `run.sh` calls `local/synthesize_e2e.sh`, `stage 0` of it is **Speech Synthesis** and `stage 1` of it is **Speech Editing**.
-
-You can modify `--wav_path`、`--old_str` and `--new_str` yourself, `--old_str` should be the text corresponding to the audio of `--wav_path`, `--new_str` should be designed according to `--task_name`, both `--source_lang` and `--target_lang` should be `zh` for model trained with AISHELL3 dataset.
-## Pretrained Model
-Pretrained ErnieSAT model:
-- [erniesat_aishell3_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/erniesat_aishell3_ckpt_1.2.0.zip)
-
-Model | Step | eval/mlm_loss | eval/loss
-:-------------:| :------------:| :-----: | :-----:
-default| 8(gpu) x 289500|51.723782|51.723782
+# ERNIE SAT with AISHELL3 dataset
diff --git a/examples/aishell3/ernie_sat/conf/default.yaml b/examples/aishell3/ernie_sat/conf/default.yaml
index dbd5c467e..fdc767fb0 100644
--- a/examples/aishell3/ernie_sat/conf/default.yaml
+++ b/examples/aishell3/ernie_sat/conf/default.yaml
@@ -1,6 +1,3 @@
-# This configuration tested on 8 GPUs (A100) with 80GB GPU memory.
-# It takes around 3 days to finish the training,You can adjust
-# batch_size、num_workers here and ngpu in local/train.sh for your machine
###########################################################
# FEATURE EXTRACTION SETTING #
###########################################################
@@ -24,8 +21,8 @@ mlm_prob: 0.8
###########################################################
# DATA SETTING #
###########################################################
-batch_size: 40
-num_workers: 8
+batch_size: 20
+num_workers: 2
###########################################################
# MODEL SETTING #
@@ -283,4 +280,4 @@ token_list:
- o3
- iang5
- ei5
--
+-
\ No newline at end of file
diff --git a/examples/aishell3/ernie_sat/local/synthesize.sh b/examples/aishell3/ernie_sat/local/synthesize.sh
index 8b4178f13..3e907427c 100755
--- a/examples/aishell3/ernie_sat/local/synthesize.sh
+++ b/examples/aishell3/ernie_sat/local/synthesize.sh
@@ -4,11 +4,28 @@ config_path=$1
train_output_path=$2
ckpt_name=$3
-stage=0
-stop_stage=0
+stage=1
+stop_stage=1
-# hifigan
+# pwgan
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ FLAGS_allocator_strategy=naive_best_fit \
+ FLAGS_fraction_of_gpu_memory_to_use=0.01 \
+ python3 ${BIN_DIR}/synthesize.py \
+ --erniesat_config=${config_path} \
+ --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
+ --erniesat_stat=dump/train/speech_stats.npy \
+ --voc=pwgan_aishell3 \
+ --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \
+ --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \
+ --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \
+ --test_metadata=dump/test/norm/metadata.jsonl \
+ --output_dir=${train_output_path}/test \
+ --phones_dict=dump/phone_id_map.txt
+fi
+
+# hifigan
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize.py \
diff --git a/examples/aishell3/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3/ernie_sat/local/synthesize_e2e.sh
deleted file mode 100755
index 77b353b52..000000000
--- a/examples/aishell3/ernie_sat/local/synthesize_e2e.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-
-stage=0
-stop_stage=1
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- echo 'speech synthesize !'
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize_e2e.py \
- --task_name=synthesize \
- --wav_path=source/SSB03540307.wav \
- --old_str='请播放歌曲小苹果' \
- --new_str='歌曲真好听' \
- --source_lang=zh \
- --target_lang=zh \
- --erniesat_config=${config_path} \
- --phones_dict=dump/phone_id_map.txt \
- --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --erniesat_stat=dump/train/speech_stats.npy \
- --voc=hifigan_aishell3 \
- --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \
- --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
- --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \
- --output_name=exp/pred_gen.wav
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- echo 'speech edit !'
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize_e2e.py \
- --task_name=edit \
- --wav_path=source/SSB03540428.wav \
- --old_str='今天天气很好' \
- --new_str='今天心情很好' \
- --source_lang=zh \
- --target_lang=zh \
- --erniesat_config=${config_path} \
- --phones_dict=dump/phone_id_map.txt \
- --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --erniesat_stat=dump/train/speech_stats.npy \
- --voc=hifigan_aishell3 \
- --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \
- --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
- --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \
- --output_name=exp/pred_edit.wav
-fi
diff --git a/examples/aishell3/ernie_sat/local/train.sh b/examples/aishell3/ernie_sat/local/train.sh
index 829310832..30720e8f5 100755
--- a/examples/aishell3/ernie_sat/local/train.sh
+++ b/examples/aishell3/ernie_sat/local/train.sh
@@ -8,5 +8,5 @@ python3 ${BIN_DIR}/train.py \
--dev-metadata=dump/dev/norm/metadata.jsonl \
--config=${config_path} \
--output-dir=${train_output_path} \
- --ngpu=8 \
- --phones-dict=dump/phone_id_map.txt
+ --ngpu=2 \
+ --phones-dict=dump/phone_id_map.txt
\ No newline at end of file
diff --git a/examples/aishell3/ernie_sat/run.sh b/examples/aishell3/ernie_sat/run.sh
index d3efefe0c..d75a19f23 100755
--- a/examples/aishell3/ernie_sat/run.sh
+++ b/examples/aishell3/ernie_sat/run.sh
@@ -3,13 +3,13 @@
set -e
source path.sh
-gpus=0,1,2,3,4,5,6,7
+gpus=0,1
stage=0
stop_stage=100
conf_path=conf/default.yaml
train_output_path=exp/default
-ckpt_name=snapshot_iter_289500.pdz
+ckpt_name=snapshot_iter_153.pdz
# with the following command, you can choose the stage range you want to run
# such as `./run.sh --stage 0 --stop-stage 0`
@@ -30,7 +30,3 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# synthesize, vocoder is pwgan
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
-fi
diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md
index 3e1dee2fb..21bad51ec 100644
--- a/examples/aishell3/tts3/README.md
+++ b/examples/aishell3/tts3/README.md
@@ -217,7 +217,7 @@ optional arguments:
## Pretrained Model
Pretrained FastSpeech2 model with no silence in the edge of audios:
-- [fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip)
+- [fastspeech2_nosil_aishell3_ckpt_0.4.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_aishell3_ckpt_0.4.zip)
- [fastspeech2_conformer_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_aishell3_ckpt_0.2.0.zip) (Thanks for [@awmmmm](https://github.com/awmmmm)'s contribution)
The static model can be downloaded here:
@@ -229,11 +229,9 @@ The ONNX model can be downloaded here:
FastSpeech2 checkpoint contains files listed below.
```text
-fastspeech2_aishell3_ckpt_1.1.0
+fastspeech2_nosil_aishell3_ckpt_0.4
├── default.yaml # default config used to train fastspeech2
-├── energy_stats.npy # statistics used to normalize energy when training fastspeech2
├── phone_id_map.txt # phone vocabulary file when training fastspeech2
-├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2
├── snapshot_iter_96400.pdz # model parameters and optimizer states
├── speaker_id_map.txt # speaker id map file when training a multi-speaker fastspeech2
└── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2
@@ -246,9 +244,9 @@ FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/../synthesize_e2e.py \
--am=fastspeech2_aishell3 \
- --am_config=fastspeech2_aishell3_ckpt_1.1.0/default.yaml \
- --am_ckpt=fastspeech2_aishell3_ckpt_1.1.0/snapshot_iter_96400.pdz \
- --am_stat=fastspeech2_aishell3_ckpt_1.1.0/speech_stats.npy \
+ --am_config=fastspeech2_nosil_aishell3_ckpt_0.4/default.yaml \
+ --am_ckpt=fastspeech2_nosil_aishell3_ckpt_0.4/snapshot_iter_96400.pdz \
+ --am_stat=fastspeech2_nosil_aishell3_ckpt_0.4/speech_stats.npy \
--voc=pwgan_aishell3 \
--voc_config=pwg_aishell3_ckpt_0.5/default.yaml \
--voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \
@@ -256,8 +254,9 @@ python3 ${BIN_DIR}/../synthesize_e2e.py \
--lang=zh \
--text=${BIN_DIR}/../sentences.txt \
--output_dir=exp/default/test_e2e \
- --phones_dict=fastspeech2_aishell3_ckpt_1.1.0/phone_id_map.txt \
- --speaker_dict=fastspeech2_aishell3_ckpt_1.1.0/speaker_id_map.txt \
+ --phones_dict=fastspeech2_nosil_aishell3_ckpt_0.4/phone_id_map.txt \
+ --speaker_dict=fastspeech2_nosil_aishell3_ckpt_0.4/speaker_id_map.txt \
--spk_id=0 \
--inference_dir=exp/default/inference
+
```
diff --git a/examples/aishell3/tts3/local/synthesize_e2e.sh b/examples/aishell3/tts3/local/synthesize_e2e.sh
index 158350ae4..ff3608be7 100755
--- a/examples/aishell3/tts3/local/synthesize_e2e.sh
+++ b/examples/aishell3/tts3/local/synthesize_e2e.sh
@@ -38,7 +38,7 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
--am=fastspeech2_aishell3 \
--am_config=${config_path} \
--am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --am_stat=dump/train/speech_stats.npy \
+ --am_stat=fastspeech2_nosil_aishell3_ckpt_0.4/speech_stats.npy \
--voc=hifigan_aishell3 \
--voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \
--voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
@@ -46,8 +46,8 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
--lang=zh \
--text=${BIN_DIR}/../sentences.txt \
--output_dir=${train_output_path}/test_e2e \
- --phones_dict=dump/phone_id_map.txt \
- --speaker_dict=dump/speaker_id_map.txt \
+ --phones_dict=fastspeech2_nosil_aishell3_ckpt_0.4/phone_id_map.txt \
+ --speaker_dict=fastspeech2_nosil_aishell3_ckpt_0.4/speaker_id_map.txt \
--spk_id=0 \
--inference_dir=${train_output_path}/inference
fi
diff --git a/examples/aishell3/tts3/run.sh b/examples/aishell3/tts3/run.sh
index f730f3761..24715fee1 100755
--- a/examples/aishell3/tts3/run.sh
+++ b/examples/aishell3/tts3/run.sh
@@ -44,8 +44,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
- if [[ -z "$version" || ${version} != '1.0.0' ]]; then
- pip install paddle2onnx==1.0.0
+ if [[ -z "$version" || ${version} != '0.9.8' ]]; then
+ pip install paddle2onnx==0.9.8
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder
diff --git a/examples/aishell3/vc1/README.md b/examples/aishell3/vc1/README.md
index 93e0fd7ec..aab525103 100644
--- a/examples/aishell3/vc1/README.md
+++ b/examples/aishell3/vc1/README.md
@@ -99,7 +99,7 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p
The synthesizing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`.
### Voice Cloning
-Assume there are some reference audios in `./ref_audio`
+Assume there are some reference audios in `./ref_audio`
```text
ref_audio
├── 001238.wav
@@ -116,7 +116,7 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_outpu
Model | Step | eval/loss | eval/l1_loss | eval/duration_loss | eval/pitch_loss| eval/energy_loss
:-------------:| :------------:| :-----: | :-----: | :--------: |:--------:|:---------:
-default|2(gpu) x 96400|0.99699|0.62013|0.053057|0.11954| 0.20426|
+default|2(gpu) x 96400|0.99699|0.62013|0.53057|0.11954| 0.20426|
FastSpeech2 checkpoint contains files listed below.
(There is no need for `speaker_id_map.txt` here )
diff --git a/examples/aishell3/vc2/README.md b/examples/aishell3/vc2/README.md
deleted file mode 100644
index 774823674..000000000
--- a/examples/aishell3/vc2/README.md
+++ /dev/null
@@ -1,126 +0,0 @@
-# FastSpeech2 + AISHELL-3 Voice Cloning (ECAPA-TDNN)
-This example contains code used to train a [FastSpeech2](https://arxiv.org/abs/2006.04558) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). The trained model can be used in Voice Cloning Task, We refer to the model structure of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf). The general steps are as follows:
-1. Speaker Encoder: We use Speaker Verification to train a speaker encoder. Datasets used in this task are different from those used in `FastSpeech2` because the transcriptions are not needed, we use more datasets, refer to [ECAPA-TDNN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0).
-2. Synthesizer: We use the trained speaker encoder to generate speaker embedding for each sentence in AISHELL-3. This embedding is an extra input of `FastSpeech2` which will be concated with encoder outputs.
-3. Vocoder: We use [Parallel Wave GAN](http://arxiv.org/abs/1910.11480) as the neural Vocoder, refer to [voc1](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1).
-
-## Dataset
-### Download and Extract
-Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
-
-### Get MFA Result and Extract
-We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2.
-You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
-
-## Get Started
-Assume the path to the dataset is `~/datasets/data_aishell3`.
-Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
-
-Run the command below to
-1. **source path**.
-2. preprocess the dataset.
-3. train the model.
-4. synthesize waveform from `metadata.jsonl`.
-5. start a voice cloning inference.
-```bash
-./run.sh
-```
-You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
-```bash
-./run.sh --stage 0 --stop-stage 0
-```
-### Data Preprocessing
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path}
-```
-When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
-```text
-dump
-├── dev
-│ ├── norm
-│ └── raw
-├── embed
-│ ├── SSB0005
-│ ├── SSB0009
-│ ├── ...
-│ └── ...
-├── phone_id_map.txt
-├── speaker_id_map.txt
-├── test
-│ ├── norm
-│ └── raw
-└── train
- ├── energy_stats.npy
- ├── norm
- ├── pitch_stats.npy
- ├── raw
- └── speech_stats.npy
-```
-The `embed` contains the generated speaker embedding for each sentence in AISHELL-3, which has the same file structure with wav files and the format is `.npy`.
-
-The computing time of utterance embedding can be x hours.
-
-The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech、pitch and energy features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`.
-
-Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, the path of pitch features, the path of energy features, speaker, and id of each utterance.
-
-The preprocessing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but there is one more `ECAPA-TDNN/inference` step here.
-
-### Model Training
-`./local/train.sh` calls `${BIN_DIR}/train.py`.
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
-```
-The training step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/train.py`.
-
-### Synthesizing
-We use [parallel wavegan](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1) as the neural vocoder.
-Download pretrained parallel wavegan model from [pwg_aishell3_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_aishell3_ckpt_0.5.zip) and unzip it.
-```bash
-unzip pwg_aishell3_ckpt_0.5.zip
-```
-Parallel WaveGAN checkpoint contains files listed below.
-```text
-pwg_aishell3_ckpt_0.5
-├── default.yaml # default config used to train parallel wavegan
-├── feats_stats.npy # statistics used to normalize spectrogram when training parallel wavegan
-└── snapshot_iter_1000000.pdz # generator parameters of parallel wavegan
-```
-`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-The synthesizing step is very similar to that one of [tts3](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/tts3), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`.
-
-### Voice Cloning
-Assume there are some reference audios in `./ref_audio` (the format must be wav here)
-```text
-ref_audio
-├── 001238.wav
-├── LJ015-0254.wav
-└── audio_self_test.wav
-```
-`./local/voice_cloning.sh` calls `${BIN_DIR}/../voice_cloning.py`
-
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ref_audio_dir}
-```
-## Pretrained Model
-- [fastspeech2_aishell3_ckpt_vc2_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_vc2_1.2.0.zip)
-
-Model | Step | eval/loss | eval/l1_loss | eval/duration_loss | eval/pitch_loss| eval/energy_loss
-:-------------:| :------------:| :-----: | :-----: | :--------: |:--------:|:---------:
-default|2(gpu) x 96400|0.991855|0.599517|0.052142|0.094877| 0.245318|
-
-FastSpeech2 checkpoint contains files listed below.
-(There is no need for `speaker_id_map.txt` here )
-
-```text
-fastspeech2_aishell3_ckpt_vc2_1.2.0
-├── default.yaml # default config used to train fastspeech2
-├── energy_stats.npy # statistics used to normalize energy when training fastspeech2
-├── phone_id_map.txt # phone vocabulary file when training fastspeech2
-├── pitch_stats.npy # statistics used to normalize pitch when training fastspeech2
-├── snapshot_iter_96400.pdz # model parameters and optimizer states
-└── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2
-```
diff --git a/examples/aishell3/vc2/conf/default.yaml b/examples/aishell3/vc2/conf/default.yaml
deleted file mode 100644
index 5ef37f812..000000000
--- a/examples/aishell3/vc2/conf/default.yaml
+++ /dev/null
@@ -1,104 +0,0 @@
-###########################################################
-# FEATURE EXTRACTION SETTING #
-###########################################################
-
-fs: 24000 # sr
-n_fft: 2048 # FFT size (samples).
-n_shift: 300 # Hop size (samples). 12.5ms
-win_length: 1200 # Window length (samples). 50ms
- # If set to null, it will be the same as fft_size.
-window: "hann" # Window function.
-
-# Only used for feats_type != raw
-
-fmin: 80 # Minimum frequency of Mel basis.
-fmax: 7600 # Maximum frequency of Mel basis.
-n_mels: 80 # The number of mel basis.
-
-# Only used for the model using pitch features (e.g. FastSpeech2)
-f0min: 80 # Minimum f0 for pitch extraction.
-f0max: 400 # Maximum f0 for pitch extraction.
-
-
-###########################################################
-# DATA SETTING #
-###########################################################
-batch_size: 64
-num_workers: 2
-
-
-###########################################################
-# MODEL SETTING #
-###########################################################
-model:
- adim: 384 # attention dimension
- aheads: 2 # number of attention heads
- elayers: 4 # number of encoder layers
- eunits: 1536 # number of encoder ff units
- dlayers: 4 # number of decoder layers
- dunits: 1536 # number of decoder ff units
- positionwise_layer_type: conv1d # type of position-wise layer
- positionwise_conv_kernel_size: 3 # kernel size of position wise conv layer
- duration_predictor_layers: 2 # number of layers of duration predictor
- duration_predictor_chans: 256 # number of channels of duration predictor
- duration_predictor_kernel_size: 3 # filter size of duration predictor
- postnet_layers: 5 # number of layers of postnset
- postnet_filts: 5 # filter size of conv layers in postnet
- postnet_chans: 256 # number of channels of conv layers in postnet
- use_scaled_pos_enc: True # whether to use scaled positional encoding
- encoder_normalize_before: True # whether to perform layer normalization before the input
- decoder_normalize_before: True # whether to perform layer normalization before the input
- reduction_factor: 1 # reduction factor
- init_type: xavier_uniform # initialization type
- init_enc_alpha: 1.0 # initial value of alpha of encoder scaled position encoding
- init_dec_alpha: 1.0 # initial value of alpha of decoder scaled position encoding
- transformer_enc_dropout_rate: 0.2 # dropout rate for transformer encoder layer
- transformer_enc_positional_dropout_rate: 0.2 # dropout rate for transformer encoder positional encoding
- transformer_enc_attn_dropout_rate: 0.2 # dropout rate for transformer encoder attention layer
- transformer_dec_dropout_rate: 0.2 # dropout rate for transformer decoder layer
- transformer_dec_positional_dropout_rate: 0.2 # dropout rate for transformer decoder positional encoding
- transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer
- pitch_predictor_layers: 5 # number of conv layers in pitch predictor
- pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor
- pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor
- pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor
- pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch
- pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch
- stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder
- energy_predictor_layers: 2 # number of conv layers in energy predictor
- energy_predictor_chans: 256 # number of channels of conv layers in energy predictor
- energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor
- energy_predictor_dropout: 0.5 # dropout rate in energy predictor
- energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy
- energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy
- stop_gradient_from_energy_predictor: False # whether to stop the gradient from energy predictor to encoder
- spk_embed_dim: 192 # speaker embedding dimension
- spk_embed_integration_type: concat # speaker embedding integration type
-
-
-
-###########################################################
-# UPDATER SETTING #
-###########################################################
-updater:
- use_masking: True # whether to apply masking for padded part in loss calculation
-
-
-###########################################################
-# OPTIMIZER SETTING #
-###########################################################
-optimizer:
- optim: adam # optimizer type
- learning_rate: 0.001 # learning rate
-
-###########################################################
-# TRAINING SETTING #
-###########################################################
-max_epoch: 200
-num_snapshots: 5
-
-
-###########################################################
-# OTHER SETTING #
-###########################################################
-seed: 10086
diff --git a/examples/aishell3/vc2/local/preprocess.sh b/examples/aishell3/vc2/local/preprocess.sh
deleted file mode 100755
index f5262a26d..000000000
--- a/examples/aishell3/vc2/local/preprocess.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-stage=0
-stop_stage=100
-
-config_path=$1
-
-# gen speaker embedding
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- python3 ${BIN_DIR}/vc2_infer.py \
- --input=~/datasets/data_aishell3/train/wav/ \
- --output=dump/embed \
- --num-cpu=20
-fi
-
-# copy from tts3/preprocess
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # get durations from MFA's result
- echo "Generate durations.txt from MFA results ..."
- python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \
- --inputdir=./aishell3_alignment_tone \
- --output durations.txt \
- --config=${config_path}
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- # extract features
- echo "Extract features ..."
- python3 ${BIN_DIR}/preprocess.py \
- --dataset=aishell3 \
- --rootdir=~/datasets/data_aishell3/ \
- --dumpdir=dump \
- --dur-file=durations.txt \
- --config=${config_path} \
- --num-cpu=20 \
- --cut-sil=True \
- --spk_emb_dir=dump/embed
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- # get features' stats(mean and std)
- echo "Get features' stats ..."
- python3 ${MAIN_ROOT}/utils/compute_statistics.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --field-name="speech"
-
- python3 ${MAIN_ROOT}/utils/compute_statistics.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --field-name="pitch"
-
- python3 ${MAIN_ROOT}/utils/compute_statistics.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --field-name="energy"
-fi
-
-if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
- # normalize and covert phone/speaker to id, dev and test should use train's stats
- echo "Normalize ..."
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --dumpdir=dump/train/norm \
- --speech-stats=dump/train/speech_stats.npy \
- --pitch-stats=dump/train/pitch_stats.npy \
- --energy-stats=dump/train/energy_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/dev/raw/metadata.jsonl \
- --dumpdir=dump/dev/norm \
- --speech-stats=dump/train/speech_stats.npy \
- --pitch-stats=dump/train/pitch_stats.npy \
- --energy-stats=dump/train/energy_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/test/raw/metadata.jsonl \
- --dumpdir=dump/test/norm \
- --speech-stats=dump/train/speech_stats.npy \
- --pitch-stats=dump/train/pitch_stats.npy \
- --energy-stats=dump/train/energy_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt
-fi
diff --git a/examples/aishell3/vc2/local/synthesize.sh b/examples/aishell3/vc2/local/synthesize.sh
deleted file mode 100755
index 8c61e3f3e..000000000
--- a/examples/aishell3/vc2/local/synthesize.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-
-FLAGS_allocator_strategy=naive_best_fit \
-FLAGS_fraction_of_gpu_memory_to_use=0.01 \
-python3 ${BIN_DIR}/../synthesize.py \
- --am=fastspeech2_aishell3 \
- --am_config=${config_path} \
- --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --am_stat=dump/train/speech_stats.npy \
- --voc=pwgan_aishell3 \
- --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \
- --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \
- --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \
- --test_metadata=dump/test/norm/metadata.jsonl \
- --output_dir=${train_output_path}/test \
- --phones_dict=dump/phone_id_map.txt \
- --speaker_dict=dump/speaker_id_map.txt \
- --voice-cloning=True
diff --git a/examples/aishell3/vc2/local/train.sh b/examples/aishell3/vc2/local/train.sh
deleted file mode 100755
index c775fcadc..000000000
--- a/examples/aishell3/vc2/local/train.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-
-python3 ${BIN_DIR}/train.py \
- --train-metadata=dump/train/norm/metadata.jsonl \
- --dev-metadata=dump/dev/norm/metadata.jsonl \
- --config=${config_path} \
- --output-dir=${train_output_path} \
- --ngpu=2 \
- --phones-dict=dump/phone_id_map.txt \
- --voice-cloning=True
\ No newline at end of file
diff --git a/examples/aishell3/vc2/local/voice_cloning.sh b/examples/aishell3/vc2/local/voice_cloning.sh
deleted file mode 100755
index 09c5e4369..000000000
--- a/examples/aishell3/vc2/local/voice_cloning.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-ref_audio_dir=$4
-
-FLAGS_allocator_strategy=naive_best_fit \
-FLAGS_fraction_of_gpu_memory_to_use=0.01 \
-python3 ${BIN_DIR}/../voice_cloning.py \
- --am=fastspeech2_aishell3 \
- --am_config=${config_path} \
- --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --am_stat=dump/train/speech_stats.npy \
- --voc=pwgan_aishell3 \
- --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \
- --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \
- --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \
- --text="凯莫瑞安联合体的经济崩溃迫在眉睫。" \
- --input-dir=${ref_audio_dir} \
- --output-dir=${train_output_path}/vc_syn \
- --phones-dict=dump/phone_id_map.txt \
- --use_ecapa=True
diff --git a/examples/aishell3/vc2/path.sh b/examples/aishell3/vc2/path.sh
deleted file mode 100755
index fb7e8411c..000000000
--- a/examples/aishell3/vc2/path.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-export MAIN_ROOT=`realpath ${PWD}/../../../`
-
-export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH}
-export LC_ALL=C
-
-export PYTHONDONTWRITEBYTECODE=1
-# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
-export PYTHONIOENCODING=UTF-8
-export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
-
-MODEL=fastspeech2
-export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL}
diff --git a/examples/aishell3/vc2/run.sh b/examples/aishell3/vc2/run.sh
deleted file mode 100755
index 06d562988..000000000
--- a/examples/aishell3/vc2/run.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-set -e
-source path.sh
-
-gpus=0,1
-stage=0
-stop_stage=100
-
-conf_path=conf/default.yaml
-train_output_path=exp/default
-ckpt_name=snapshot_iter_96400.pdz
-ref_audio_dir=ref_audio
-
-
-# with the following command, you can choose the stage range you want to run
-# such as `./run.sh --stage 0 --stop-stage 0`
-# this can not be mixed use with `$1`, `$2` ...
-source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- # prepare data
- CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} || exit -1
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # train model, all `ckpt` under `train_output_path/checkpoints/` dir
- CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- # synthesize, vocoder is pwgan
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- # synthesize, vocoder is pwgan
- CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ref_audio_dir} || exit -1
-fi
diff --git a/examples/aishell3/vits-vc/README.md b/examples/aishell3/vits-vc/README.md
deleted file mode 100644
index 84f874006..000000000
--- a/examples/aishell3/vits-vc/README.md
+++ /dev/null
@@ -1,154 +0,0 @@
-# VITS with AISHELL-3
-This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). The trained model can be used in Voice Cloning Task, We refer to the model structure of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf). The general steps are as follows:
-1. Speaker Encoder: We use Speaker Verification to train a speaker encoder. Datasets used in this task are different from those used in `VITS` because the transcriptions are not needed, we use more datasets, refer to [ge2e](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/ge2e).
-2. Synthesizer and Vocoder: We use the trained speaker encoder to generate speaker embedding for each sentence in AISHELL-3. This embedding is an extra input of `VITS` which will be concated with encoder outputs. The vocoder is part of `VITS` due to its special structure.
-
-## Dataset
-### Download and Extract
-Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
-
-### Get MFA Result and Extract
-We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here.
-You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
-
-## Pretrained GE2E Model
-We use pretrained GE2E model to generate speaker embedding for each sentence.
-
-Download pretrained GE2E model from here [ge2e_ckpt_0.3.zip](https://bj.bcebos.com/paddlespeech/Parakeet/released_models/ge2e/ge2e_ckpt_0.3.zip), and `unzip` it.
-
-## Get Started
-Assume the path to the dataset is `~/datasets/data_aishell3`.
-Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
-Assume the path to the pretrained ge2e model is `./ge2e_ckpt_0.3`.
-
-Run the command below to
-1. **source path**.
-2. preprocess the dataset.
-3. train the model.
-4. synthesize waveform from `metadata.jsonl`.
-5. start a voice cloning inference.
-
-```bash
-./run.sh
-```
-You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
-```bash
-./run.sh --stage 0 --stop-stage 0
-```
-
-### Data Preprocessing
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${ge2e_ckpt_path}
-```
-When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
-
-```text
-dump
-├── dev
-│ ├── norm
-│ └── raw
-├── embed
-│ ├── SSB0005
-│ ├── SSB0009
-│ ├── ...
-│ └── ...
-├── phone_id_map.txt
-├── speaker_id_map.txt
-├── test
-│ ├── norm
-│ └── raw
-└── train
- ├── feats_stats.npy
- ├── norm
- └── raw
-```
-The `embed` contains the generated speaker embedding for each sentence in AISHELL-3, which has the same file structure with wav files and the format is `.npy`.
-
-The computing time of utterance embedding can be x hours.
-
-The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`.
-
-Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance.
-
-The preprocessing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but there is one more `ge2e/inference` step here.
-
-### Model Training
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
-```
-The training step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/train.py`.
-
-### Synthesizing
-
-`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
-
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-```text
-usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT]
- [--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT]
- [--voice-cloning VOICE_CLONING] [--ngpu NGPU]
- [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR]
-
-Synthesize with VITS
-
-optional arguments:
- -h, --help show this help message and exit
- --config CONFIG Config of VITS.
- --ckpt CKPT Checkpoint file of VITS.
- --phones_dict PHONES_DICT
- phone vocabulary file.
- --speaker_dict SPEAKER_DICT
- speaker id map file.
- --voice-cloning VOICE_CLONING
- whether training voice cloning model.
- --ngpu NGPU if ngpu == 0, use cpu.
- --test_metadata TEST_METADATA
- test metadata.
- --output_dir OUTPUT_DIR
- output dir.
-```
-The synthesizing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`.
-
-### Voice Cloning
-Assume there are some reference audios in `./ref_audio`
-```text
-ref_audio
-├── 001238.wav
-├── LJ015-0254.wav
-└── audio_self_test.mp3
-```
-`./local/voice_cloning.sh` calls `${BIN_DIR}/voice_cloning.py`
-
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir}
-```
-
-If you want to convert a speaker audio file to refered speaker, run:
-
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path}
-```
-
-
-
diff --git a/examples/aishell3/vits-vc/conf/default.yaml b/examples/aishell3/vits-vc/conf/default.yaml
deleted file mode 100644
index c71e071d2..000000000
--- a/examples/aishell3/vits-vc/conf/default.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-# This configuration tested on 4 GPUs (V100) with 32GB GPU
-# memory. It takes around 2 weeks to finish the training
-# but 100k iters model should generate reasonable results.
-###########################################################
-# FEATURE EXTRACTION SETTING #
-###########################################################
-
-fs: 22050 # sr
-n_fft: 1024 # FFT size (samples).
-n_shift: 256 # Hop size (samples). 12.5ms
-win_length: null # Window length (samples). 50ms
- # If set to null, it will be the same as fft_size.
-window: "hann" # Window function.
-
-
-##########################################################
-# TTS MODEL SETTING #
-##########################################################
-model:
- # generator related
- generator_type: vits_generator
- generator_params:
- hidden_channels: 192
- spk_embed_dim: 256
- global_channels: 256
- segment_size: 32
- text_encoder_attention_heads: 2
- text_encoder_ffn_expand: 4
- text_encoder_blocks: 6
- text_encoder_positionwise_layer_type: "conv1d"
- text_encoder_positionwise_conv_kernel_size: 3
- text_encoder_positional_encoding_layer_type: "rel_pos"
- text_encoder_self_attention_layer_type: "rel_selfattn"
- text_encoder_activation_type: "swish"
- text_encoder_normalize_before: True
- text_encoder_dropout_rate: 0.1
- text_encoder_positional_dropout_rate: 0.0
- text_encoder_attention_dropout_rate: 0.1
- use_macaron_style_in_text_encoder: True
- use_conformer_conv_in_text_encoder: False
- text_encoder_conformer_kernel_size: -1
- decoder_kernel_size: 7
- decoder_channels: 512
- decoder_upsample_scales: [8, 8, 2, 2]
- decoder_upsample_kernel_sizes: [16, 16, 4, 4]
- decoder_resblock_kernel_sizes: [3, 7, 11]
- decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
- use_weight_norm_in_decoder: True
- posterior_encoder_kernel_size: 5
- posterior_encoder_layers: 16
- posterior_encoder_stacks: 1
- posterior_encoder_base_dilation: 1
- posterior_encoder_dropout_rate: 0.0
- use_weight_norm_in_posterior_encoder: True
- flow_flows: 4
- flow_kernel_size: 5
- flow_base_dilation: 1
- flow_layers: 4
- flow_dropout_rate: 0.0
- use_weight_norm_in_flow: True
- use_only_mean_in_flow: True
- stochastic_duration_predictor_kernel_size: 3
- stochastic_duration_predictor_dropout_rate: 0.5
- stochastic_duration_predictor_flows: 4
- stochastic_duration_predictor_dds_conv_layers: 3
- # discriminator related
- discriminator_type: hifigan_multi_scale_multi_period_discriminator
- discriminator_params:
- scales: 1
- scale_downsample_pooling: "AvgPool1D"
- scale_downsample_pooling_params:
- kernel_size: 4
- stride: 2
- padding: 2
- scale_discriminator_params:
- in_channels: 1
- out_channels: 1
- kernel_sizes: [15, 41, 5, 3]
- channels: 128
- max_downsample_channels: 1024
- max_groups: 16
- bias: True
- downsample_scales: [2, 2, 4, 4, 1]
- nonlinear_activation: "leakyrelu"
- nonlinear_activation_params:
- negative_slope: 0.1
- use_weight_norm: True
- use_spectral_norm: False
- follow_official_norm: False
- periods: [2, 3, 5, 7, 11]
- period_discriminator_params:
- in_channels: 1
- out_channels: 1
- kernel_sizes: [5, 3]
- channels: 32
- downsample_scales: [3, 3, 3, 3, 1]
- max_downsample_channels: 1024
- bias: True
- nonlinear_activation: "leakyrelu"
- nonlinear_activation_params:
- negative_slope: 0.1
- use_weight_norm: True
- use_spectral_norm: False
- # others
- sampling_rate: 22050 # needed in the inference for saving wav
- cache_generator_outputs: True # whether to cache generator outputs in the training
-
-###########################################################
-# LOSS SETTING #
-###########################################################
-# loss function related
-generator_adv_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- loss_type: mse # loss type, "mse" or "hinge"
-discriminator_adv_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- loss_type: mse # loss type, "mse" or "hinge"
-feat_match_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- average_by_layers: False # whether to average loss value by #layers of each discriminator
- include_final_outputs: True # whether to include final outputs for loss calculation
-mel_loss_params:
- fs: 22050 # must be the same as the training data
- fft_size: 1024 # fft points
- hop_size: 256 # hop size
- win_length: null # window length
- window: hann # window type
- num_mels: 80 # number of Mel basis
- fmin: 0 # minimum frequency for Mel basis
- fmax: null # maximum frequency for Mel basis
- log_base: null # null represent natural log
-
-###########################################################
-# ADVERSARIAL LOSS SETTING #
-###########################################################
-lambda_adv: 1.0 # loss scaling coefficient for adversarial loss
-lambda_mel: 45.0 # loss scaling coefficient for Mel loss
-lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss
-lambda_dur: 1.0 # loss scaling coefficient for duration loss
-lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss
-# others
-sampling_rate: 22050 # needed in the inference for saving wav
-cache_generator_outputs: True # whether to cache generator outputs in the training
-
-
-###########################################################
-# DATA LOADER SETTING #
-###########################################################
-batch_size: 50 # Batch size.
-num_workers: 4 # Number of workers in DataLoader.
-
-##########################################################
-# OPTIMIZER & SCHEDULER SETTING #
-##########################################################
-# optimizer setting for generator
-generator_optimizer_params:
- beta1: 0.8
- beta2: 0.99
- epsilon: 1.0e-9
- weight_decay: 0.0
-generator_scheduler: exponential_decay
-generator_scheduler_params:
- learning_rate: 2.0e-4
- gamma: 0.999875
-
-# optimizer setting for discriminator
-discriminator_optimizer_params:
- beta1: 0.8
- beta2: 0.99
- epsilon: 1.0e-9
- weight_decay: 0.0
-discriminator_scheduler: exponential_decay
-discriminator_scheduler_params:
- learning_rate: 2.0e-4
- gamma: 0.999875
-generator_first: False # whether to start updating generator first
-
-##########################################################
-# OTHER TRAINING SETTING #
-##########################################################
-num_snapshots: 10 # max number of snapshots to keep while training
-train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000
-save_interval_steps: 1000 # Interval steps to save checkpoint.
-eval_interval_steps: 250 # Interval steps to evaluate the network.
-seed: 777 # random seed number
diff --git a/examples/aishell3/vits-vc/local/preprocess.sh b/examples/aishell3/vits-vc/local/preprocess.sh
deleted file mode 100755
index 2f3772863..000000000
--- a/examples/aishell3/vits-vc/local/preprocess.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-
-stage=0
-stop_stage=100
-
-config_path=$1
-add_blank=$2
-ge2e_ckpt_path=$3
-
-# gen speaker embedding
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- python3 ${MAIN_ROOT}/paddlespeech/vector/exps/ge2e/inference.py \
- --input=~/datasets/data_aishell3/train/wav/ \
- --output=dump/embed \
- --checkpoint_path=${ge2e_ckpt_path}
-fi
-
-# copy from tts3/preprocess
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # get durations from MFA's result
- echo "Generate durations.txt from MFA results ..."
- python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \
- --inputdir=./aishell3_alignment_tone \
- --output durations.txt \
- --config=${config_path}
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- # extract features
- echo "Extract features ..."
- python3 ${BIN_DIR}/preprocess.py \
- --dataset=aishell3 \
- --rootdir=~/datasets/data_aishell3/ \
- --dumpdir=dump \
- --dur-file=durations.txt \
- --config=${config_path} \
- --num-cpu=20 \
- --cut-sil=True \
- --spk_emb_dir=dump/embed
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- # get features' stats(mean and std)
- echo "Get features' stats ..."
- python3 ${MAIN_ROOT}/utils/compute_statistics.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --field-name="feats"
-fi
-
-if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
- # normalize and covert phone/speaker to id, dev and test should use train's stats
- echo "Normalize ..."
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --dumpdir=dump/train/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/dev/raw/metadata.jsonl \
- --dumpdir=dump/dev/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/test/raw/metadata.jsonl \
- --dumpdir=dump/test/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-fi
diff --git a/examples/aishell3/vits-vc/local/synthesize.sh b/examples/aishell3/vits-vc/local/synthesize.sh
deleted file mode 100755
index 01a74fa3b..000000000
--- a/examples/aishell3/vits-vc/local/synthesize.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-stage=0
-stop_stage=0
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize.py \
- --config=${config_path} \
- --ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --phones_dict=dump/phone_id_map.txt \
- --test_metadata=dump/test/norm/metadata.jsonl \
- --output_dir=${train_output_path}/test \
- --voice-cloning=True
-fi
diff --git a/examples/aishell3/vits-vc/local/train.sh b/examples/aishell3/vits-vc/local/train.sh
deleted file mode 100755
index eeb6f0871..000000000
--- a/examples/aishell3/vits-vc/local/train.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-
-# install monotonic_align
-cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align
-python3 setup.py build_ext --inplace
-cd -
-
-python3 ${BIN_DIR}/train.py \
- --train-metadata=dump/train/norm/metadata.jsonl \
- --dev-metadata=dump/dev/norm/metadata.jsonl \
- --config=${config_path} \
- --output-dir=${train_output_path} \
- --ngpu=4 \
- --phones-dict=dump/phone_id_map.txt \
- --voice-cloning=True
diff --git a/examples/aishell3/vits-vc/local/voice_cloning.sh b/examples/aishell3/vits-vc/local/voice_cloning.sh
deleted file mode 100755
index 68ea54914..000000000
--- a/examples/aishell3/vits-vc/local/voice_cloning.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-ge2e_params_path=$4
-add_blank=$5
-ref_audio_dir=$6
-src_audio_path=$7
-
-FLAGS_allocator_strategy=naive_best_fit \
-FLAGS_fraction_of_gpu_memory_to_use=0.01 \
-python3 ${BIN_DIR}/voice_cloning.py \
- --config=${config_path} \
- --ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --ge2e_params_path=${ge2e_params_path} \
- --phones_dict=dump/phone_id_map.txt \
- --text="凯莫瑞安联合体的经济崩溃迫在眉睫。" \
- --audio-path=${src_audio_path} \
- --input-dir=${ref_audio_dir} \
- --output-dir=${train_output_path}/vc_syn \
- --add-blank=${add_blank}
diff --git a/examples/aishell3/vits-vc/path.sh b/examples/aishell3/vits-vc/path.sh
deleted file mode 100755
index 52d0c3783..000000000
--- a/examples/aishell3/vits-vc/path.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-export MAIN_ROOT=`realpath ${PWD}/../../../`
-
-export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH}
-export LC_ALL=C
-
-export PYTHONDONTWRITEBYTECODE=1
-# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
-export PYTHONIOENCODING=UTF-8
-export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
-
-MODEL=vits
-export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL}
\ No newline at end of file
diff --git a/examples/aishell3/vits-vc/run.sh b/examples/aishell3/vits-vc/run.sh
deleted file mode 100755
index fff0c27d3..000000000
--- a/examples/aishell3/vits-vc/run.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-set -e
-source path.sh
-
-gpus=0,1,2,3
-stage=0
-stop_stage=100
-
-conf_path=conf/default.yaml
-train_output_path=exp/default
-ckpt_name=snapshot_iter_153.pdz
-add_blank=true
-ref_audio_dir=ref_audio
-src_audio_path=''
-
-# not include ".pdparams" here
-ge2e_ckpt_path=./ge2e_ckpt_0.3/step-3000000
-
-# include ".pdparams" here
-ge2e_params_path=${ge2e_ckpt_path}.pdparams
-
-# with the following command, you can choose the stage range you want to run
-# such as `./run.sh --stage 0 --stop-stage 0`
-# this can not be mixed use with `$1`, `$2` ...
-source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- # prepare data
- CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${add_blank} ${ge2e_ckpt_path} || exit -1
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # train model, all `ckpt` under `train_output_path/checkpoints/` dir
- CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} \
- ${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path} || exit -1
-fi
diff --git a/examples/aishell3/vits/README.md b/examples/aishell3/vits/README.md
deleted file mode 100644
index dc80e18bc..000000000
--- a/examples/aishell3/vits/README.md
+++ /dev/null
@@ -1,202 +0,0 @@
-# VITS with AISHELL-3
-This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3).
-
-AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus that could be used to train multi-speaker Text-to-Speech (TTS) systems.
-
-We use AISHELL-3 to train a multi-speaker VITS model here.
-## Dataset
-### Download and Extract
-Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
-
-### Get MFA Result and Extract
-We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here.
-You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
-
-## Get Started
-Assume the path to the dataset is `~/datasets/data_aishell3`.
-Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
-Run the command below to
-1. **source path**.
-2. preprocess the dataset.
-3. train the model.
-4. synthesize wavs.
- - synthesize waveform from `metadata.jsonl`.
- - synthesize waveform from a text file.
-
-```bash
-./run.sh
-```
-You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
-```bash
-./run.sh --stage 0 --stop-stage 0
-```
-
-### Data Preprocessing
-```bash
-./local/preprocess.sh ${conf_path}
-```
-When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
-
-```text
-dump
-├── dev
-│ ├── norm
-│ └── raw
-├── phone_id_map.txt
-├── speaker_id_map.txt
-├── test
-│ ├── norm
-│ └── raw
-└── train
- ├── feats_stats.npy
- ├── norm
- └── raw
-```
-The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`.
-
-Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance.
-
-### Model Training
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
-```
-`./local/train.sh` calls `${BIN_DIR}/train.py`.
-Here's the complete help message.
-```text
-usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA]
- [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR]
- [--ngpu NGPU] [--phones-dict PHONES_DICT]
- [--speaker-dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING]
-
-Train a VITS model.
-
-optional arguments:
- -h, --help show this help message and exit
- --config CONFIG config file to overwrite default config.
- --train-metadata TRAIN_METADATA
- training data.
- --dev-metadata DEV_METADATA
- dev data.
- --output-dir OUTPUT_DIR
- output dir.
- --ngpu NGPU if ngpu == 0, use cpu.
- --phones-dict PHONES_DICT
- phone vocabulary file.
- --speaker-dict SPEAKER_DICT
- speaker id map file for multiple speaker model.
- --voice-cloning VOICE_CLONING
- whether training voice cloning model.
-```
-1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`.
-2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder.
-3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory.
-4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu.
-5. `--phones-dict` is the path of the phone vocabulary file.
-6. `--speaker-dict` is the path of the speaker id map file when training a multi-speaker VITS.
-
-### Synthesizing
-
-`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
-
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-```text
-usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT]
- [--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT]
- [--voice-cloning VOICE_CLONING] [--ngpu NGPU]
- [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR]
-
-Synthesize with VITS
-
-optional arguments:
- -h, --help show this help message and exit
- --config CONFIG Config of VITS.
- --ckpt CKPT Checkpoint file of VITS.
- --phones_dict PHONES_DICT
- phone vocabulary file.
- --speaker_dict SPEAKER_DICT
- speaker id map file.
- --voice-cloning VOICE_CLONING
- whether training voice cloning model.
- --ngpu NGPU if ngpu == 0, use cpu.
- --test_metadata TEST_METADATA
- test metadata.
- --output_dir OUTPUT_DIR
- output dir.
-```
-`./local/synthesize_e2e.sh` calls `${BIN_DIR}/synthesize_e2e.py`, which can synthesize waveform from text file.
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-```text
-usage: synthesize_e2e.py [-h] [--config CONFIG] [--ckpt CKPT]
- [--phones_dict PHONES_DICT]
- [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID]
- [--lang LANG]
- [--inference_dir INFERENCE_DIR] [--ngpu NGPU]
- [--text TEXT] [--output_dir OUTPUT_DIR]
-
-Synthesize with VITS
-
-optional arguments:
- -h, --help show this help message and exit
- --config CONFIG Config of VITS.
- --ckpt CKPT Checkpoint file of VITS.
- --phones_dict PHONES_DICT
- phone vocabulary file.
- --speaker_dict SPEAKER_DICT
- speaker id map file.
- --spk_id SPK_ID spk id for multi speaker acoustic model
- --lang LANG Choose model language. zh or en
- --inference_dir INFERENCE_DIR
- dir to save inference models
- --ngpu NGPU if ngpu == 0, use cpu.
- --text TEXT text to synthesize, a 'utt_id sentence' pair per line.
- --output_dir OUTPUT_DIR
- output dir.
-```
-1. `--config`, `--ckpt`, `--phones_dict` and `--speaker_dict` are arguments for acoustic model, which correspond to the 3 files in the VITS pretrained model.
-2. `--lang` is the model language, which can be `zh` or `en`.
-3. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder.
-4. `--text` is the text file, which contains sentences to synthesize.
-5. `--output_dir` is the directory to save synthesized audio files.
-6. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu.
-
-
-
diff --git a/examples/aishell3/vits/conf/default.yaml b/examples/aishell3/vits/conf/default.yaml
deleted file mode 100644
index bc0f224d0..000000000
--- a/examples/aishell3/vits/conf/default.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-# This configuration tested on 4 GPUs (V100) with 32GB GPU
-# memory. It takes around 2 weeks to finish the training
-# but 100k iters model should generate reasonable results.
-###########################################################
-# FEATURE EXTRACTION SETTING #
-###########################################################
-
-fs: 22050 # sr
-n_fft: 1024 # FFT size (samples).
-n_shift: 256 # Hop size (samples). 12.5ms
-win_length: null # Window length (samples). 50ms
- # If set to null, it will be the same as fft_size.
-window: "hann" # Window function.
-
-
-##########################################################
-# TTS MODEL SETTING #
-##########################################################
-model:
- # generator related
- generator_type: vits_generator
- generator_params:
- hidden_channels: 192
- global_channels: 256
- segment_size: 32
- text_encoder_attention_heads: 2
- text_encoder_ffn_expand: 4
- text_encoder_blocks: 6
- text_encoder_positionwise_layer_type: "conv1d"
- text_encoder_positionwise_conv_kernel_size: 3
- text_encoder_positional_encoding_layer_type: "rel_pos"
- text_encoder_self_attention_layer_type: "rel_selfattn"
- text_encoder_activation_type: "swish"
- text_encoder_normalize_before: True
- text_encoder_dropout_rate: 0.1
- text_encoder_positional_dropout_rate: 0.0
- text_encoder_attention_dropout_rate: 0.1
- use_macaron_style_in_text_encoder: True
- use_conformer_conv_in_text_encoder: False
- text_encoder_conformer_kernel_size: -1
- decoder_kernel_size: 7
- decoder_channels: 512
- decoder_upsample_scales: [8, 8, 2, 2]
- decoder_upsample_kernel_sizes: [16, 16, 4, 4]
- decoder_resblock_kernel_sizes: [3, 7, 11]
- decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
- use_weight_norm_in_decoder: True
- posterior_encoder_kernel_size: 5
- posterior_encoder_layers: 16
- posterior_encoder_stacks: 1
- posterior_encoder_base_dilation: 1
- posterior_encoder_dropout_rate: 0.0
- use_weight_norm_in_posterior_encoder: True
- flow_flows: 4
- flow_kernel_size: 5
- flow_base_dilation: 1
- flow_layers: 4
- flow_dropout_rate: 0.0
- use_weight_norm_in_flow: True
- use_only_mean_in_flow: True
- stochastic_duration_predictor_kernel_size: 3
- stochastic_duration_predictor_dropout_rate: 0.5
- stochastic_duration_predictor_flows: 4
- stochastic_duration_predictor_dds_conv_layers: 3
- # discriminator related
- discriminator_type: hifigan_multi_scale_multi_period_discriminator
- discriminator_params:
- scales: 1
- scale_downsample_pooling: "AvgPool1D"
- scale_downsample_pooling_params:
- kernel_size: 4
- stride: 2
- padding: 2
- scale_discriminator_params:
- in_channels: 1
- out_channels: 1
- kernel_sizes: [15, 41, 5, 3]
- channels: 128
- max_downsample_channels: 1024
- max_groups: 16
- bias: True
- downsample_scales: [2, 2, 4, 4, 1]
- nonlinear_activation: "leakyrelu"
- nonlinear_activation_params:
- negative_slope: 0.1
- use_weight_norm: True
- use_spectral_norm: False
- follow_official_norm: False
- periods: [2, 3, 5, 7, 11]
- period_discriminator_params:
- in_channels: 1
- out_channels: 1
- kernel_sizes: [5, 3]
- channels: 32
- downsample_scales: [3, 3, 3, 3, 1]
- max_downsample_channels: 1024
- bias: True
- nonlinear_activation: "leakyrelu"
- nonlinear_activation_params:
- negative_slope: 0.1
- use_weight_norm: True
- use_spectral_norm: False
- # others
- sampling_rate: 22050 # needed in the inference for saving wav
- cache_generator_outputs: True # whether to cache generator outputs in the training
-
-###########################################################
-# LOSS SETTING #
-###########################################################
-# loss function related
-generator_adv_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- loss_type: mse # loss type, "mse" or "hinge"
-discriminator_adv_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- loss_type: mse # loss type, "mse" or "hinge"
-feat_match_loss_params:
- average_by_discriminators: False # whether to average loss value by #discriminators
- average_by_layers: False # whether to average loss value by #layers of each discriminator
- include_final_outputs: True # whether to include final outputs for loss calculation
-mel_loss_params:
- fs: 22050 # must be the same as the training data
- fft_size: 1024 # fft points
- hop_size: 256 # hop size
- win_length: null # window length
- window: hann # window type
- num_mels: 80 # number of Mel basis
- fmin: 0 # minimum frequency for Mel basis
- fmax: null # maximum frequency for Mel basis
- log_base: null # null represent natural log
-
-###########################################################
-# ADVERSARIAL LOSS SETTING #
-###########################################################
-lambda_adv: 1.0 # loss scaling coefficient for adversarial loss
-lambda_mel: 45.0 # loss scaling coefficient for Mel loss
-lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss
-lambda_dur: 1.0 # loss scaling coefficient for duration loss
-lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss
-# others
-sampling_rate: 22050 # needed in the inference for saving wav
-cache_generator_outputs: True # whether to cache generator outputs in the training
-
-
-###########################################################
-# DATA LOADER SETTING #
-###########################################################
-batch_size: 50 # Batch size.
-num_workers: 4 # Number of workers in DataLoader.
-
-##########################################################
-# OPTIMIZER & SCHEDULER SETTING #
-##########################################################
-# optimizer setting for generator
-generator_optimizer_params:
- beta1: 0.8
- beta2: 0.99
- epsilon: 1.0e-9
- weight_decay: 0.0
-generator_scheduler: exponential_decay
-generator_scheduler_params:
- learning_rate: 2.0e-4
- gamma: 0.999875
-
-# optimizer setting for discriminator
-discriminator_optimizer_params:
- beta1: 0.8
- beta2: 0.99
- epsilon: 1.0e-9
- weight_decay: 0.0
-discriminator_scheduler: exponential_decay
-discriminator_scheduler_params:
- learning_rate: 2.0e-4
- gamma: 0.999875
-generator_first: False # whether to start updating generator first
-
-##########################################################
-# OTHER TRAINING SETTING #
-##########################################################
-num_snapshots: 10 # max number of snapshots to keep while training
-train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000
-save_interval_steps: 1000 # Interval steps to save checkpoint.
-eval_interval_steps: 250 # Interval steps to evaluate the network.
-seed: 777 # random seed number
diff --git a/examples/aishell3/vits/local/preprocess.sh b/examples/aishell3/vits/local/preprocess.sh
deleted file mode 100755
index 70ee064f8..000000000
--- a/examples/aishell3/vits/local/preprocess.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-
-stage=0
-stop_stage=100
-
-config_path=$1
-add_blank=$2
-
-# copy from tts3/preprocess
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- # get durations from MFA's result
- echo "Generate durations.txt from MFA results ..."
- python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \
- --inputdir=./aishell3_alignment_tone \
- --output durations.txt \
- --config=${config_path}
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # extract features
- echo "Extract features ..."
- python3 ${BIN_DIR}/preprocess.py \
- --dataset=aishell3 \
- --rootdir=~/datasets/data_aishell3/ \
- --dumpdir=dump \
- --dur-file=durations.txt \
- --config=${config_path} \
- --num-cpu=20 \
- --cut-sil=True
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- # get features' stats(mean and std)
- echo "Get features' stats ..."
- python3 ${MAIN_ROOT}/utils/compute_statistics.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --field-name="feats"
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- # normalize and covert phone/speaker to id, dev and test should use train's stats
- echo "Normalize ..."
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/train/raw/metadata.jsonl \
- --dumpdir=dump/train/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/dev/raw/metadata.jsonl \
- --dumpdir=dump/dev/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-
- python3 ${BIN_DIR}/normalize.py \
- --metadata=dump/test/raw/metadata.jsonl \
- --dumpdir=dump/test/norm \
- --feats-stats=dump/train/feats_stats.npy \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt \
- --add-blank=${add_blank} \
- --skip-wav-copy
-fi
diff --git a/examples/aishell3/vits/local/synthesize.sh b/examples/aishell3/vits/local/synthesize.sh
deleted file mode 100755
index 07f873594..000000000
--- a/examples/aishell3/vits/local/synthesize.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-stage=0
-stop_stage=0
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize.py \
- --config=${config_path} \
- --ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --phones_dict=dump/phone_id_map.txt \
- --speaker_dict=dump/speaker_id_map.txt \
- --test_metadata=dump/test/norm/metadata.jsonl \
- --output_dir=${train_output_path}/test
-fi
diff --git a/examples/aishell3/vits/local/synthesize_e2e.sh b/examples/aishell3/vits/local/synthesize_e2e.sh
deleted file mode 100755
index f0136991f..000000000
--- a/examples/aishell3/vits/local/synthesize_e2e.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-add_blank=$4
-
-stage=0
-stop_stage=0
-
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize_e2e.py \
- --config=${config_path} \
- --ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --phones_dict=dump/phone_id_map.txt \
- --speaker_dict=dump/speaker_id_map.txt \
- --spk_id=0 \
- --output_dir=${train_output_path}/test_e2e \
- --text=${BIN_DIR}/../sentences.txt \
- --add-blank=${add_blank}
-fi
diff --git a/examples/aishell3/vits/local/train.sh b/examples/aishell3/vits/local/train.sh
deleted file mode 100755
index 8d3fcdae3..000000000
--- a/examples/aishell3/vits/local/train.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-
-# install monotonic_align
-cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align
-python3 setup.py build_ext --inplace
-cd -
-
-python3 ${BIN_DIR}/train.py \
- --train-metadata=dump/train/norm/metadata.jsonl \
- --dev-metadata=dump/dev/norm/metadata.jsonl \
- --config=${config_path} \
- --output-dir=${train_output_path} \
- --ngpu=4 \
- --phones-dict=dump/phone_id_map.txt \
- --speaker-dict=dump/speaker_id_map.txt
diff --git a/examples/aishell3/vits/run.sh b/examples/aishell3/vits/run.sh
deleted file mode 100755
index 157a7d4ac..000000000
--- a/examples/aishell3/vits/run.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -e
-source path.sh
-
-gpus=0,1,2,3
-stage=0
-stop_stage=100
-
-conf_path=conf/default.yaml
-train_output_path=exp/default
-ckpt_name=snapshot_iter_153.pdz
-add_blank=true
-
-# with the following command, you can choose the stage range you want to run
-# such as `./run.sh --stage 0 --stop-stage 0`
-# this can not be mixed use with `$1`, `$2` ...
-source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- # prepare data
- ./local/preprocess.sh ${conf_path} ${add_blank}|| exit -1
-fi
-
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- # train model, all `ckpt` under `train_output_path/checkpoints/` dir
- CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1
-fi
-
-if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
-fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} ${add_blank}|| exit -1
-fi
diff --git a/examples/aishell3_vctk/README.md b/examples/aishell3_vctk/README.md
index 12213da2a..330b25934 100644
--- a/examples/aishell3_vctk/README.md
+++ b/examples/aishell3_vctk/README.md
@@ -1,2 +1 @@
# Mixed Chinese and English TTS with AISHELL3 and VCTK datasets
-* ernie_sat - ERNIE-SAT
diff --git a/examples/aishell3_vctk/ernie_sat/README.md b/examples/aishell3_vctk/ernie_sat/README.md
index a849488d5..1c6bbe230 100644
--- a/examples/aishell3_vctk/ernie_sat/README.md
+++ b/examples/aishell3_vctk/ernie_sat/README.md
@@ -1,163 +1 @@
-# ERNIE-SAT with AISHELL3 and VCTK dataset
-
-ERNIE-SAT 是可以同时处理中英文的跨语言的语音-语言跨模态大模型,其在语音编辑、个性化语音合成以及跨语言的语音合成等多个任务取得了领先效果。可以应用于语音编辑、个性化合成、语音克隆、同传翻译等一系列场景,该项目供研究使用。
-
-## 模型框架
-ERNIE-SAT 中我们提出了两项创新:
-- 在预训练过程中将中英双语对应的音素作为输入,实现了跨语言、个性化的软音素映射
-- 采用语言和语音的联合掩码学习实现了语言和语音的对齐
-
-
-
-
-
-## Dataset
-### Download and Extract
-Download all datasets and extract it to `~/datasets`:
-- The aishell3 dataset is in the directory `~/datasets/data_aishell3`
-- The vctk dataset is in the directory `~/datasets/VCTK-Corpus-0.92`
-
-### Get MFA Result and Extract
-We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for the fastspeech2 training.
-You can download from here:
-- [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz)
-- [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz)
-
-Or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
-
-## Get Started
-Assume the paths to the datasets are:
-- `~/datasets/data_aishell3`
-- `~/datasets/VCTK-Corpus-0.92`
-
-Assume the path to the MFA results of the datasets are:
-- `./aishell3_alignment_tone`
-- `./vctk_alignment`
-
-Run the command below to
-1. **source path**.
-2. preprocess the dataset.
-3. train the model.
-4. synthesize wavs.
- - synthesize waveform from `metadata.jsonl`.
- - synthesize waveform from text file.
-
-```bash
-./run.sh
-```
-You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
-```bash
-./run.sh --stage 0 --stop-stage 0
-```
-### Data Preprocessing
-```bash
-./local/preprocess.sh ${conf_path}
-```
-When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
-
-```text
-dump
-├── dev
-│ ├── norm
-│ └── raw
-├── phone_id_map.txt
-├── speaker_id_map.txt
-├── test
-│ ├── norm
-│ └── raw
-└── train
- ├── norm
- ├── raw
- └── speech_stats.npy
-```
-The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`.
-
-Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, speaker, and id of each utterance.
-
-### Model Training
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
-```
-`./local/train.sh` calls `${BIN_DIR}/train.py`.
-
-### Synthesizing
-We use [HiFiGAN](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc5) as the neural vocoder.
-
-Download pretrained HiFiGAN model from [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) and unzip it.
-```bash
-unzip hifigan_aishell3_ckpt_0.2.0.zip
-```
-HiFiGAN checkpoint contains files listed below.
-```text
-hifigan_aishell3_ckpt_0.2.0
-├── default.yaml # default config used to train HiFiGAN
-├── feats_stats.npy # statistics used to normalize spectrogram when training HiFiGAN
-└── snapshot_iter_2500000.pdz # generator parameters of HiFiGAN
-```
-`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
-```bash
-CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
-```
-## Speech Synthesis and Speech Editing
-### Prepare
-
-**prepare aligner**
-```bash
-mkdir -p tools/aligner
-cd tools
-# download MFA
-wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz
-# extract MFA
-tar xvf montreal-forced-aligner_linux.tar.gz
-# fix .so of MFA
-cd montreal-forced-aligner/lib
-ln -snf libpython3.6m.so.1.0 libpython3.6m.so
-cd -
-# download align models and dicts
-cd aligner
-wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip
-wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon
-wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip
-wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b
-cd ../../
-```
-**prepare pretrained FastSpeech2 models**
-
-ERNIE-SAT use FastSpeech2 as phoneme duration predictor:
-```bash
-mkdir download
-cd download
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_conformer_baker_ckpt_0.5.zip
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_nosil_ljspeech_ckpt_0.5.zip
-unzip fastspeech2_conformer_baker_ckpt_0.5.zip
-unzip fastspeech2_nosil_ljspeech_ckpt_0.5.zip
-cd ../
-```
-**prepare source data**
-```bash
-mkdir source
-cd source
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540307.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/SSB03540428.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/LJ050-0278.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p243_313.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/p299_096.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/this_was_not_the_show_for_me.wav
-wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/source/README.md
-cd ../
-```
-You can check the text of downloaded wavs in `source/README.md`.
-### Cross Language Voice Cloning
-```bash
-./run.sh --stage 3 --stop-stage 3 --gpus 0
-```
-`stage 3` of `run.sh` calls `local/synthesize_e2e.sh`.
-
-You can modify `--wav_path`、`--old_str` and `--new_str` yourself, `--old_str` should be the text corresponding to the audio of `--wav_path`, `--new_str` should be designed according to `--task_name`, `--source_lang` and `--target_lang` should be different in this example.
-## Pretrained Model
-Pretrained ErnieSAT model:
-- [erniesat_aishell3_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/ernie_sat/erniesat_aishell3_vctk_ckpt_1.2.0.zip)
-
-Model | Step | eval/text_mlm_loss | eval/mlm_loss | eval/loss
-:-------------:| :------------:| :-----: | :-----:| :-----:
-default| 8(gpu) x 489000|0.000001|52.477642 |52.477642
+# ERNIE SAT with AISHELL3 and VCTK dataset
diff --git a/examples/aishell3_vctk/ernie_sat/conf/default.yaml b/examples/aishell3_vctk/ernie_sat/conf/default.yaml
index efbdd456d..abb69fcc0 100644
--- a/examples/aishell3_vctk/ernie_sat/conf/default.yaml
+++ b/examples/aishell3_vctk/ernie_sat/conf/default.yaml
@@ -1,6 +1,3 @@
-# This configuration tested on 8 GPUs (A100) with 80GB GPU memory.
-# It takes around 4 days to finish the training,You can adjust
-# batch_size、num_workers here and ngpu in local/train.sh for your machine
###########################################################
# FEATURE EXTRACTION SETTING #
###########################################################
@@ -24,8 +21,8 @@ mlm_prob: 0.8
###########################################################
# DATA SETTING #
###########################################################
-batch_size: 40
-num_workers: 8
+batch_size: 20
+num_workers: 2
###########################################################
# MODEL SETTING #
@@ -82,7 +79,7 @@ grad_clip: 1.0
###########################################################
# TRAINING SETTING #
###########################################################
-max_epoch: 1500
+max_epoch: 700
num_snapshots: 50
###########################################################
diff --git a/examples/aishell3_vctk/ernie_sat/local/synthesize.sh b/examples/aishell3_vctk/ernie_sat/local/synthesize.sh
index 8b4178f13..3e907427c 100755
--- a/examples/aishell3_vctk/ernie_sat/local/synthesize.sh
+++ b/examples/aishell3_vctk/ernie_sat/local/synthesize.sh
@@ -4,11 +4,28 @@ config_path=$1
train_output_path=$2
ckpt_name=$3
-stage=0
-stop_stage=0
+stage=1
+stop_stage=1
-# hifigan
+# pwgan
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ FLAGS_allocator_strategy=naive_best_fit \
+ FLAGS_fraction_of_gpu_memory_to_use=0.01 \
+ python3 ${BIN_DIR}/synthesize.py \
+ --erniesat_config=${config_path} \
+ --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
+ --erniesat_stat=dump/train/speech_stats.npy \
+ --voc=pwgan_aishell3 \
+ --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \
+ --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \
+ --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \
+ --test_metadata=dump/test/norm/metadata.jsonl \
+ --output_dir=${train_output_path}/test \
+ --phones_dict=dump/phone_id_map.txt
+fi
+
+# hifigan
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize.py \
diff --git a/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh b/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh
deleted file mode 100755
index 446ac8791..000000000
--- a/examples/aishell3_vctk/ernie_sat/local/synthesize_e2e.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-# not ready yet
-#!/bin/bash
-
-config_path=$1
-train_output_path=$2
-ckpt_name=$3
-
-stage=0
-stop_stage=1
-
-if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
- echo 'speech cross language from en to zh !'
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize_e2e.py \
- --task_name=synthesize \
- --wav_path=source/p243_313.wav \
- --old_str='For that reason cover should not be given' \
- --new_str='今天天气很好' \
- --source_lang=en \
- --target_lang=zh \
- --erniesat_config=${config_path} \
- --phones_dict=dump/phone_id_map.txt \
- --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --erniesat_stat=dump/train/speech_stats.npy \
- --voc=hifigan_aishell3 \
- --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \
- --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
- --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \
- --output_name=exp/pred_clone_en_zh.wav
-fi
-if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
- echo 'speech cross language from zh to en !'
- FLAGS_allocator_strategy=naive_best_fit \
- FLAGS_fraction_of_gpu_memory_to_use=0.01 \
- python3 ${BIN_DIR}/synthesize_e2e.py \
- --task_name=synthesize \
- --wav_path=source/SSB03540307.wav \
- --old_str='请播放歌曲小苹果' \
- --new_str="Thank you" \
- --source_lang=zh \
- --target_lang=en \
- --erniesat_config=${config_path} \
- --phones_dict=dump/phone_id_map.txt \
- --erniesat_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
- --erniesat_stat=dump/train/speech_stats.npy \
- --voc=hifigan_aishell3 \
- --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \
- --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
- --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \
- --output_name=exp/pred_clone_zh_en.wav
-fi
-
diff --git a/examples/aishell3_vctk/ernie_sat/local/train.sh b/examples/aishell3_vctk/ernie_sat/local/train.sh
index 526aac435..30720e8f5 100755
--- a/examples/aishell3_vctk/ernie_sat/local/train.sh
+++ b/examples/aishell3_vctk/ernie_sat/local/train.sh
@@ -8,5 +8,5 @@ python3 ${BIN_DIR}/train.py \
--dev-metadata=dump/dev/norm/metadata.jsonl \
--config=${config_path} \
--output-dir=${train_output_path} \
- --ngpu=8 \
+ --ngpu=2 \
--phones-dict=dump/phone_id_map.txt
\ No newline at end of file
diff --git a/examples/aishell3_vctk/ernie_sat/run.sh b/examples/aishell3_vctk/ernie_sat/run.sh
index 8cd9d8d1b..d75a19f23 100755
--- a/examples/aishell3_vctk/ernie_sat/run.sh
+++ b/examples/aishell3_vctk/ernie_sat/run.sh
@@ -3,13 +3,13 @@
set -e
source path.sh
-gpus=0,1,2,3,4,5,6,7
+gpus=0,1
stage=0
stop_stage=100
conf_path=conf/default.yaml
train_output_path=exp/default
-ckpt_name=snapshot_iter_489000.pdz
+ckpt_name=snapshot_iter_153.pdz
# with the following command, you can choose the stage range you want to run
# such as `./run.sh --stage 0 --stop-stage 0`
@@ -30,7 +30,3 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# synthesize, vocoder is pwgan
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
fi
-
-if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
-fi
diff --git a/examples/csmsc/README.md b/examples/csmsc/README.md
index 77375faa8..2aad609cb 100644
--- a/examples/csmsc/README.md
+++ b/examples/csmsc/README.md
@@ -1,7 +1,7 @@
# CSMSC
-* tts0 - Tacotron2
+* tts0 - Tactron2
* tts1 - TransformerTTS
* tts2 - SpeedySpeech
* tts3 - FastSpeech2
diff --git a/examples/csmsc/tts2/run.sh b/examples/csmsc/tts2/run.sh
index 557dd4ff3..e51913496 100755
--- a/examples/csmsc/tts2/run.sh
+++ b/examples/csmsc/tts2/run.sh
@@ -46,8 +46,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
- if [[ -z "$version" || ${version} != '1.0.0' ]]; then
- pip install paddle2onnx==1.0.0
+ if [[ -z "$version" || ${version} != '0.9.8' ]]; then
+ pip install paddle2onnx==0.9.8
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder
diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh
index 80acf8200..2662b5811 100755
--- a/examples/csmsc/tts3/run.sh
+++ b/examples/csmsc/tts3/run.sh
@@ -46,8 +46,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
- if [[ -z "$version" || ${version} != '1.0.0' ]]; then
- pip install paddle2onnx==1.0.0
+ if [[ -z "$version" || ${version} != '0.9.8' ]]; then
+ pip install paddle2onnx==0.9.8
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder
diff --git a/examples/csmsc/tts3/run_cnndecoder.sh b/examples/csmsc/tts3/run_cnndecoder.sh
index bae833157..c5ce41a9c 100755
--- a/examples/csmsc/tts3/run_cnndecoder.sh
+++ b/examples/csmsc/tts3/run_cnndecoder.sh
@@ -59,8 +59,8 @@ fi
if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
- if [[ -z "$version" || ${version} != '1.0.0' ]]; then
- pip install paddle2onnx==1.0.0
+ if [[ -z "$version" || ${version} != '0.9.8' ]]; then
+ pip install paddle2onnx==0.9.8
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder
@@ -79,8 +79,8 @@ fi
if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
- if [[ -z "$version" || ${version} != '1.0.0' ]]; then
- pip install paddle2onnx==1.0.0
+ if [[ -z "$version" || ${version} != '0.9.8' ]]; then
+ pip install paddle2onnx==0.9.8
fi
# streaming acoustic model
./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer
diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh
index 74505d9b9..c284b7b23 100755
--- a/examples/csmsc/vits/run.sh
+++ b/examples/csmsc/vits/run.sh
@@ -3,7 +3,7 @@
set -e
source path.sh
-gpus=0,1,2,3
+gpus=0,1
stage=0
stop_stage=100
diff --git a/examples/ernie_sat/.meta/framework.png b/examples/ernie_sat/.meta/framework.png
new file mode 100644
index 0000000000000000000000000000000000000000..c68f62467952aaca91f290e5dead723078343d64
GIT binary patch
literal 143263
zcmeFZcR*9Ynm2rC(m|xxC`CX7q<4Y^5Ks^lkRnk*0qFt)5{QC;bU^_@rB^8tX;LFy
zL5%d?drzo=wD)-1-rc?5-F@%g@7?`l2L?_~nVDzinWz3v$Rp$#;H06RfgV6X0RUIQ
zAAmdyT+{V;eh2{9uLI`*0H6aXBAfwg@CdvG-~%fFP-Rj8RN#M#-{&$Z|9q9|c_!6g
zj?I4GNZtfgZ#a4Qcz8Q`c%D{}xd5nMGPq9tyEU-<;aKbshbIRFV@BhF&(y2W=-+PO
zE(Mdn0&H|tW>n9qD1-q@HVP^>3UV6&0sEw(_(S=_W8fbON-Aoa6SQ>n42<9nRVM*T
z3Mwi}YAPC<-`!9If!6_QHkwmso-ZsDQ{ELvU76t@(T)!K7OvKtg5d0Qd`&3+ScCD`R#jGU;n`1(D2CU
z*zDZ=!s62M%IX?+XLoP^;1G9o{F^RtKL1D-`1g;L{TsU2K)NWYsi~-Gf73-l>Gzv(
zHfowPawkq*G^M@g#V#!WoQ~sCLRNV*y@-MthV%a8UIs2v#aS`zZ_@ri*?*3(kbj7>
zzYz8>bd3X-0m?rVDoSvbsHnh|q6P~MEzR#jOGo>MLieY_@Q1?qyDF}k?aQ&=F%~w@;za0%RphM?DiB%w%CE@)4I@UT814$4JvB7rRy(m{GasL`^q~yV
ze5rvZTaJ0w&CXN(P4Wmr1=%%CGtog3t(~AGL^vVD_ubs?Cn#MlF=p18@^Cpk37#(f@CM`>A`%f
z)h*{h!vsVik+rG?>*{=0NC6_w4?RmV5IOK}g<)!v=#v$^JL~r`r+;&S{d{Dg%Ip{+v!fEqvuZ$Isz8uWCAapy1xG^LfQ^LQga9!Y>
zX|#_BWz+q1DhkcM+veQgyV8bHKbM!$Z9jakjgOiZHpVpZ7fx^QLa$XqagvbMMi0wS
z0~7l73yRwnKN4NsinqMZy7a*>;@$iqI&o^Ezp5k&KhterF$KFzVeqYI+%!!bCCq6Y
zG@qJs5yWZ=m$NxZn-)+_B^1%s+BQ{v62
zK=IPa!yV|AZPZK-l=VE_E?m0gWMHHl3b$wu!%6D5EPaqB1G9WXHCH{gUEq%EOO^|p
zu@(M_4-GDhy1crewpSzzKkKu4CW*>0T6jU|%W>2p;*)&gchkbOjA9LE?7*tQJ-Mn<
z>v1vu0QZvM=26o06lH@|8kG6beAidC#Oe1jWB_g0F@TuKCIhUMD-FY#{Lf@S?oP2-
zwf{EhGfJsJOTdkj^zAB@hj^kYXSaV;?>l2yQh;BlRxMJN@y*PGs3hL@QKjL!+MOf6
zNVSp*u?VEnci%qAwX$PKWgYis#4NL)VW>sK#X*rcUnY*DYkGkXZbua%NuwmDU#Hp8
zvqWn%W97m~W3AwaoJTpckW*8uQ+zcclk}7qXf;_roa>BI8gDc*w6PLS82Qwe9`WT6
z*>f+VN94V_@A(z)Dwo`&;PusY@1MxIGFK$sE!*#8;M`WwxGaG~AS}OMk8^lWUja4d>n-O~tf?Z9rCnt-3#5^PUw%D!qu3APbmi6bl?Mlg-
z!ir`-!#`KeyL64;NhcZ1=_#=H80TGCc67+D!IuPA*^q%|J#C>0yJ)%DJ9kfAoqAZ+
z2{L5eRj3D)68%7I4d-!-k)LyXX?$V6e;L`ye2$;l+OqMg6`NpucS8G9t_=4KOk9^e
zZMUUoQ{y^QzgV;B;(>F4PX>C)zD@h|63$A(G~;~AxkRtH*r{V;=PD!E<5g|Jc8$xj
zVM+;=>s*w%>)boYV^3Y72)WPo}jz8gpLIO~uMwCfh`#yp9N!HYHD`xIA7ef0Vc%qAlb
zGZkRNf2XfVV@z^0Z~6%0Z>r0QSstU|FxC0*sI-dSx*QbbU;hPZ+(rh*U7T?HACu3~
zj+C4TBm+g(#81d}=(vksLj!KNXhJXUzOpi~$t!S)cGhm2T4ebv#2
zCGYT5L1vG2)1c6_nN6Vx9sYvX;Zu@t|D!PzaJ?vwR#fx9qnP4jEo%Hikg
zKWCixTZ30g_Wb08j8+^;L1cAL)(oH7snvL{gqjyLzL+<^#KbSsmp`ykI`iK6Sqk+%
zR0LMIcGFJH{fwHpyZ`d=ui-V!0@Cb*>u6XMg3%>kg{Un#zsFB3@G+Z0x?#w9BhMeRdW`l0EdENJ
z{-t?WjO^)bPfZ3rs!p1da^7Bu3+l4Pd_`yqC8f@dzuKXWjih<{`a8Adue*=e-e3>Y
zCdG%dYrKmkyW|urp8BkJjEN7I7Zk^t)OTsr^^Bo!W{SM!c+sjCgvBh&
z*gxt?oh4!r?O0whkZ|ifqr_J-@Tj4?y?XPYKuABiRpSc}TB~@_2gYr!D_L{(L<_(?
z-CypSHm_-=^&{a*6v|Hwmd6$&GpY6E$Au}#12yP(J
zpVqP_-crw=^!+@%gjhbJ_Olv$P?&r+!CS^!D2T0j2RaRB0Cw+fK
z&oPdoeZ|U&7r_I%95Bj)+lEP`=>gX6{#Dz<@ss|5Z
zdlOl=^e4UgvAGyVUWq=pQje@7M6??jxUVaFbOhLAw>
zb6f>A49%G&;!aYSsbnE>Dl4ZZ$7@h6^jz
zA|H_S=a01Y8T?0vd}T@zT1oLbl~IoA&1DK--X1f>Jeh0MZq*NP7ftu4h&0f6@#IK#
zdsl!AST9t*78ol_M!x870alNW@xzsm>>xf?3(ChF&k`?kiz*<#UiJ(shoxYHg9MZw
z-40OVR>UKeD(mByceLF<@VyOR+gat`TqC3|kS>h%>r0mya-6+jdQj9br!@ITaxF4T2DBCOjRG>#T7SptdhNouGM#EdvD6NWk4g#Rhwetsiin`$=MZtM|Sn+-|v)5pl!W+
z+}DMis>8(6Xur+0#z4q`IXqnWMR2X2=WC0mw6csLGJu(WKk!0w?(S+Me&qn)b<9jq
zwn*{1^3reXytZCPP<+o3UspwKh*52#PQ9mz>Uzf0;#{jbF?0`m#i>>VGcHJqGo(3C
zdx=&}%(C?90Or;Q>jtEy{a856u<9u$rflN_Y2LAwa$xOnl1{mAzMv2}3LCLgik=BD
zr*^bz)Qou&hhnR&IIkkDXj4o5>;#Jfq0!}#|3Jiwd*+3n`B+cUWchO0C2u<@XX|M3
z^jytM>M@kI*9VuRBvo)7S=TxI##9%shJR(M%K`j%RBwGnQymDo?{7;SJ9q@2!QG#*9asK7yX)w)D{`MjP@&eCg^q(_6wX1+X_B^s`wv1NPLDCRs%t0A
z6;_(g2`ZRk7AI1y-4)O>%m*^bgQrwdmJFuL
z-;|4ZD8+O=F0iK$P#ouBqYqx;D#Saz@mj$@>k#3x4Ht890S&q(leo04+ipqL7X`RH
z&ol9$&zv*Rl(_dExE`dBY3-<6en}F<1-3y>?%_R{{Ub~(rV;tl3zlrnUR+m-IX-TE
zK0W^BWLuc#4s);xTmfe}!)$V}kFBmKa<1mf&vL{R8)0u*zo0aa-S7T3W3^8g%6Ydx
zQh?B<>VAuFH$K9S@t3w3PU_$S#xh1Yc~@`H7s4mK{~3K=cuBQqApKS>ztr1_LYDeZ
z-AS6KB&ZI{I?ej`jdW4{?)U1d;hSmnTvx|EXqj@0nhy*2i1vnqFA9trYU)p_#oeSX
zlIavQuVxPyibx~KWA~9SgP946L?lU&z~fRX-Z~|SWetn1$}8EOH|SS#x*ud(T3unp
zC;uAdc!rU!uK=et+XHQGx?sW)jBZlE1=$x(r(*$GiM^bMCaAB{J1TNKD;J_IwV=MY*E(E<~4<_n8MV&9o!r^Cgtx{Ld}-E*up
zJ;B=30+?>`w;$suXF8t&YRlrss_{f?BJEBlZSTL?m6nUNn_hW}(_B^jDTzyp$z8vg
zt~i8Mx6z7HRW6|{*LP6|aRaVy!J?C`+6BH2n{FQXERSW-)T-w}ca7|6NiE}|mD?Wd
zIS;osf3FpDc-PlMeA}}}vfP;<9Jc)s)HF8EGP5l*(CS;ke?hhpst3+f2Bfl-bW$VX
zkRKLG?TL}k-(`nH6*U~CJ!nb|2O8eog&uBJO@bM6pIzCgXdki<~-L5v+cQSqBmEgAP{II&_
zuxJ40+vF%Vhh{Lj7gTM+{4OYpr{uLlIqkHc;m<*WX3^~at8pHbUz4J*TlW`ic^w;W
z*lPZ0W1CBjH2P6$gZ}f1{Fmk0(@d6`Yr|s*f-q#XZ}MfT>zhjU%Z;I&v_2jyPxp~B
z8C(zjZG~*qn!C(CTo&X#`z8&g*r!R%%X!19$c{=u=v@tfUdiYmWq?C^RFg#Gz3&P>ClaK
z@QYL-uHNW*beWYeQp?(6xOi9XliHMUEKiqgXCqk9fp6
z2hp=iUo38m*3^Do(3yb}cfJOjPZ4;MfA;LtHl~(l^IuIC-BtUFZ>5b1wEQ?Z>XeqH
zZCsRVyX7A5?k2F@`_-0@5D`VtZ(lsxx0Nc8b>!qX&~tlKwlyGq*dvtW%6ZM(f5hC-
zZ#+$MwfcP*>_|#m4#Co;#FNS2{O~j-<@{b*oCo94<8$&CBx*Ind-t_F#~PF1(k^cE
z{N`ykelKc{b7bF*?AB{ZRSoaC(KUGRMsW(b%O7Ev2WUj&vyhkNu*Pg{_g4C^7qzCX
zK3#2`nq*gfSi!gVL0_>}Yp9wiJzN@}xGyNg6{qKWI*k8G=tEow`t!KYY2AtZelSeRY=5`&GifbDThm^r^E&iT`%=dt7!L7t+pdnzaQ+$d=ighe
zKW&cWUDNF7v~z)Uq-ZM*l-}bbB>}$F9--amP=}Hly)S82
zV9gn{mU8{hW_sw;6iR=cdV?~MVDB!=Ns^z3fWU`X`|AvKA_$cM^h;xnQC!JyW
zi%g$$B@c5QE;ZBEvpwH(*{M13#Kq%p1^+roMsUK_uzs!f8zhknE})Qu=s)Mb)_;)
zIQQkW0srF?HQKxYHexa2;U3m?iaKRZKYVt0rXi4C@u-R}J5{E)n&;}{OjSe*?|Qp0
z893FX0vE+;xHtx4FSpm3gx?>yotHJ>p2+q7`Rm0pqYG3;R8+MKH)xIKA%MTg9A|73
zD4}VMhd141%yy;(^~N;bgfGd~p1-g2oyPjy5%*I2w9|2N{D~}&yyX5Zt^n(;h1l|!JN%269G-Lx
z##%*w&1B7#CP8p~!)+A
z1oN(RXUe>ubrb&KZ&e1la?(ndr{8+pFWt>E=ezut+oD{(xRy(3{6Q}9!$NO~<|Wk0
z25rS2GpVDJt+9PB-uZsHgU@jzwBNt6PMo3@eQx1M%ttwndr*xM48P#QW=9(23a=du
z)xBFgS;B6@*4p->dR`cDYW|$sX{H;rd1E;7<^#*Ir99{Ck;{H_H+hF18acJDosb~p
z;tj^}o8^SiV9y4hGRtNdwKfYuWnjw}89}1|rKwhUC){eQvPXSt$eA*_{S~SAMNrz1
zPME{v+e{F-6BMFcK+Jm!bQmanVIkVenBa=}ZF=0e?jRgGyXZIQvFYA-)ir@Tz!|%%
zr@X_+#P;6t#51kLsH3^@gDPUGhpZAKBP?=?E748n$Mj_3zzn6v1M`RQWQGwYd5&b>
zRl9i1{2a11BYKP-_ii@jWhKbxs|i1bc5GMsOTN86ORwK~?(0cUote3=ZljrKD$sxYw~D&rKI@YsztoCQ<1C^;zui6c#fTB
z(oAl>Vj97OvW}S2L~U0ah-9$&W`I_hh+mZWC?EViqhg>y-d}wr0kqbx)_8kAyUWFpRS@`B_TL
zG%^4gNKfHrK0c}~62-A%vRx3Ldop!JFJ7VvC~%e7e0|nsxUhtaiXn&RoN1-6QIZp(
z9v&IMVm{`Aw{YQY3h}x_
z&^_NEcHO7Zye|#AJhmM=BX+(N>oc2>0RSA2U~ceF2#u!ZXM_Xp5x=RpY;|A=iQrjl=8I
zk8x%#bIy8?B-=}6k|&!;AJ7q{Eu-#2#d(_2N}^;S5xU?XoP}3`vkzLOfm->XF9ko{
znCeXi;NQ+MYaBepoyj$JuS7BFx=7v$o2#ugvn-Q8dw}%It~WD`9~^G3ku|Be7kl-<
zdGcNs0;5le7koDZX?gcNveMCO#lsy-268^WBmG32Scv$BV3pD*_WSwhlj4x>*`Er+
zbFN#zKecDT5^%ywCE#8r8QAV9PH;}vc?}0~<&avsT~h{U3l8l``sjXAH;h8&fpY;SsiJ<_
zOJTx09knym=+R=widlYM>3{)UE7#;!lgQ{?hMzNB9RW}5NBqjtqP9L5To)7fsovRB
zYFShjP^`u5+0#}Hm?1*I7-{C-Uv}kR@6i7T$5d%*ph-a<_;-q(GQ7x7Dei-JfCts2
z5w-l)SNeww4ra~VlP|v>D&d^5+#M)J?b8H1jCh;JXmo43G{ IG+9XP-8YtjoQcE*NGKAE^(*
zsA)f2qLt6;Y*=iR^AzyE+|u)uBf+A6Sku5hD68tt?&@y1u)k0HE$M=bj~A1n
z=OrhCuQ6E`2lr|#?iv|jKsY0#wdwlZ3H%tu%{t6M)@-BvL3`zTkiB)^
zQcdF0)e^@YRIiK`uc)s11iz=*jl!{Cdqmkem9LO^GQhX#i=vqH4{f)j{nQWU*g0^4
z{BJ%*eJk8`KjXtqxpWE7|H$ntm3T~WIjJ8}u|xW$&D!e|ef^Hlb(>bc$%2=P#gQBH
z1FqL6-#)liUy-tq&c*ULf>yPea}h#K1{{#5a0>G`2?4m*b8h0du+)4aE6PuTd4A^m7W;)dTt>Aw!4c*PmzJ{Yw5K}A7W~Q26q_Z
z(e#+n^**1ae#Pim{nnIMx&qA1#XU0V`HsmE(P_DFToDOuGICf1DG)|`F)H9g!viRII
zJY9~T*M)E6)q?von8<)Lir#1rN`Y3YI>-c*Q
zXqP1~o+`d~oM~o9T{O=f_#>p=AM^wb!)QPhgMl{pBhc9<0}mP<@szE2OH)kVenw<=
z{{gS|u+aOqi7V#84^Pm(de!j$?hP8~==>aP;{jfaAkwSNhqIi6g=XP7KPnNw4Z1aq
zj~|`nkB+qQJimR`D?N%b2B3i(fzSpbZWZGKnuTm6HsL#}yyY6VsZg;QF{oY=#jQ9imwJaQG57IOjoryDz`|*!YzhQ7^*8xwzDx
zfG=CdPvOIp!A|>+w}K7dCy$b@?IB;nP6jjLSPy1!Fl@y^Fg7J(I|mnC$V%_OY}kv;
zetU=Uvo<sclhNsB>$4f&Go?EIH66#{sr(OrGAh(D8aT*&PZ2XZeH5x0q(-%0+y-Ud+`hB5
z7a4O??!5h*WIgR4k6ZUSyEE=9oKtnw{G9flFuhPmTwgk$AIDJoK)`3hegUP@zlD!C
zsHnB+zUn?`SkhbaOeph0k5w5T`fHl{n2t=MhpJx4^@=7HKp{rJUdfSk(v9}h7c%fQ
z5{mmMG3TvsEOSdFP_}xQG|%wn0)v}gNhyhw415<1&>HfLQeo`8!d(SoXr_ER$gJxm6kfx(VvwWUA3Fq@2Y*qW2MR(i0StsRvhGxN19#bY)BBH4<&uFH+MQL4I5JR^Qm|jr
zN`lK0Sjj*Y>%A9X14z(a05M0tZ!AT?VFe*wHgyo=-DVKl=5f;jT4IBeZ0QnYj|GKM
zB3(XiVkaS>1X>dPaIdW&;SBa*Pzj{i)>=Ap8!c8K4M)P
zF*zcFc5lV!>@O(Xp1aw262K0
zbc@p%u}lbF1?NN@rW;U=
z@|e?Ny%wbFTbnP}U~jL>L#;ux#y!=@CvU}(o=astv&(H)xnlh8MfU6A6vsfg^nt^i
zr(u%nV9da4H|LD)3&bcB0tu=v=rS1|5=vP0#WzUVoaOz)|D3WMFlr*rkbM=d$Cu
z%O`G5q2T#s;1(I!%2@N=8gxCPz2Ka5@)sqJf@Ez*@TSVu^?1}PhbgxPYP
zt9;iWl9v&u`KzdCwWB^=|O$A?!oUQ`S_VOn%yb>R^3JS1+P7-M1pRrwz)q^Vp{XSl~RHqSqH=a01y3`N_
z+fghg(IHmNSq%&DFQDzRUDN}nx=)wW{*liA-ZOvNb^j68uyE~ZY<_(H;BOi;qKG?rP8U;JGuj|
zREQXsU6Xn;Owt08|6Yh!$g>%Qm^DNQZwF_qdoec4XFH2fhDP8
zaG=pB3I?7s4j3B9K(91R1$Ty>bdt!C1i>z{-Xo43L+7A`*I@1eF>-d73|yFV4gt
zu~)*5v7k|S5t%!SoHzM(WG6*p(n4I$-|5E}A2BG4A6o8k97}C%z_mI8oQZ>kFn;#m
z7lJr$%P4f4Y#|pUMlvR0-)^pCH+!X9BYY8O^x8;kq)QpPT#PaQsu2k=-JAUTUej`D
z9qLs+$fpDaVjP?we|mkI
zfpDiN_*ZDlyhTKt$~ynr@VZNIwF}?L`>_uVQZI9jnHt-Oq$e%S%b80K#;QLV!XF8x
zGcp>NN)8qUN!NTpPq=a4{?%ZRZ?kR(X{Z{g#OJZn4CW@FnW6%Vvrdx&IE(&360$R6
zgtn8!iF3TnTrboe6{Y!gS?~{yoPX>UB
zlQceP?o09uZ<2mG-7RMid4G58Da7&UuULYBC%y$;FhSb+{GvR6sO2vuBUUySOUo&v
z=7*g|QFPZ5uarKvU3?)yrF;RFDPXyxWdUI`ZU|Vd&z4ht^d$hV{_)4$9vXt9wZhuv
z&D;^c1BNSgb0pj@8;1Lj?F<@EIYe(u91YiCS*k!i-mAB
z(M_ljL?JBs)kC@Eu~t*0Z=Ej8ZD+V6^*Xyd0v^zM{GbVL4DKNVmHIDGPGlhBf7}zUK^%9d{3BC;6`TI5g5I*U?i)6|X7(|c&H67#E8y~aM3sG
zviE@%+~`DFPoBuKvm5?J7{wNQ!Z)5$N&8j2#6U@(I~hpj{JhpQc56jkv=mpIkty_7
zd$5U%ScGsx(-G)<6(tDL5Q3=sa3t5ytzYf@-K6C9mnrEiy
zI`|1t=lNmDqAcMf%5j3Zh#`_89PET;664?UFjxvK!59k
zNzkDwU;bMW#sA8=KRGo2A28P6(M7QS4PBs$gOILla+4aez!AHwOuqzaF(DX`fxGX}
z#8ZR2DIw4W<6gvi2ZVT{>2)|N7J7NnABnrLPh`mlr>0Vy=kGyUdWN72YzQ)7mjXQ&
zTgReF^k5uFg>VX2h1qUvI){s&)zAvOS%tm6S#t9ZFWqfZVb6D*gI6cob~J`Je)Q^l
zF*Zwd8OFqDnKxTNUY>23pY>~DjhH^+V}oKPsP&P7&n2=*(E%CZU59r;ZJ8(F-+t&_
zdj8B)uJ4P~46)SzR-&<_)=z@6Qg4>JmzzP=$DKwr+5{(I5(3wnfrLSseHs}rM;qdt
z^ZZ+eOxT={Zwd3z)X&rOU0`$QIB>Fe^ugl(u%&M!b
zi!ovuUJy6dwS|!+=D(xKs4>K;w>oNCJy+`iayx}
zubFKYSf}@#Ap5uEY4hs1sjE5Dio8~WygLkpT=te~-JtJS$AduJMQlr_Nk2)<|EZv<
zVrENwpMi8XmTxv}))F?5hmb+OA)ie^X5s*>WwySiShJJb4&4W+d^;skau
zOfbb#K}>?n&JJR^5?to!-F-ErY^0*UaV<=JS1e@+plRq&G;f-v2`aXn?RZWGX!vTd
zOQF!V=~FoM^_KGKD`O4Hre9?oDrpa+d2OzodAS-+JJtBQ6;Y1Z7@vg*UBrsCPMwTG
z+f>!5ByXjt2wmZ(e%++1iI%=9I%Ds&AqO)X`U2;)yV_Aw+FkW-c%YL#E^kv@-4vzV
zv}0)(u@tefk0x+%AG{Q7YJULcx6ezxBxVPn!l4gLaiH$2%Gfw`f?ceI`wzF?pp>4X
zhsggdKMTZ0P8fnO&7~ZME=b4sB3a>Qa6z*kFSUhnIa$sj(`{2ySK=)icO5(`&p(vU
zv6s_rj?`We)UH}dd6HanP;q%)g>VU2a((AS>v
z8l5kG-oA4Cr25Rz^4DVi3<#;)BApJ*jUUajGU4ve8>e@gW43Z~oD=R?D`@auiPs*@
zef3`br;42Fna?MnR@z)}7Tg_-b-#H(iO(rvWvFfvJ%+qzAeyZ5iZX{$MAe+ZljE*^
zmH!k9PaE9TB#LG3jI5HSvMHUMDAinb^YZd=-%;yUdCd_O{bk`*ChJ98FxKh&=3a%f
z;=YStaD;tNp`mwg?wIqaN_I7RSDS;FrHEp|sT>&LIPc})vSm3}ti@x`&qdhX9^-jX
z9b%KN6MV%h>|8;X`L{Wg9YJN@g9evw<*F%LwwhMeo_nF&ODMySrvFOf^YV=EevI|i
zSCzW_LuVXYU>tCjh*_@k*=Sz4m}^3(N4_vmPR&l7X|}_e-e%El<}Znljg22Sv5`J?Ju*1Q9%
zt|?H)+nVf^x+yN3R73YVphRj5%jf{i6liSP@>TBTe31V|2Kwnxdu(=BpbO5><3kAX
zoPM6{A-MRlBvaX5UC#A%r+@Za8h?5N<8R>;|K)1}s1mRQ_DO&KHszt-%Fo^Tce3@O
ztWwj?0)RYd9&zqihWoC6kZK)sf9IFwlI>#SH1|p-)l+X#U}j^fX+FE&^Y+?XD>bvK
zY3~R~N?&{XrP7UhhybF+h78=04g>{;AB3PQOAsp{J+nR}F>FC`QFy+nJ;cy?Sjr@r
z0^1=2gE5c{(1QSQ^Q2SL$Io4+aZ+FoVNMb;D-GSF97bU*y+AL^a%8}iRDPe(w2eDM
zD!EG_kpUcSCj^rWM&lUmK&$=^xbN7&^a{N*8OY>n1oIm8@g(7P}w;u&fV=fbKH~&s~U|8dzv!Lrx_?LmCkWTf3
z`HH{HNl>gVWSvX4dkY*WWUYf0y$y!g;2@>+9z>l#jfDe+NzOQ?`16cNZ9D?ar`zSh
zFh9acGGK8b7=a*A&Bx=65Nm%K447hg0pi_${mY!d>`nEL+oN~zsF{f4^3Yv8m;m{Y
zmk9dj6@vbcTb~Da)~)Y^ZB}Rac_vKUNff>m;EFiE$GhfVn8AMkG*X2Fg;X$vi-#8;
z-OZg&NhY$s23hGpSPR&nW+;^m{O_Uz)c^Aq{eR4+iCtql*f+%#o=DiI^3JV9TKg1DE+`LRLu5MZ1eX>Z+9Ub>qv-x
z8c+0m^sD!$&pVNI4{p*kDQG)lN9GZ9ACw4PNTOt;qfA2vB5j&UcdZK7+{(7tr5H
zSVbLiBeAIOd_JbSfwXV`UUuN$UgMmf7PNkV3-J8ZbN+&-r>jpb*`WqWl!
zklBB`6LusH%AtGG3Sd`rC2RE;D^<6=F3+IDuhQc6ZCAmll~FwDTs-ckHZLe;!@($V
zDCpDRKJMq?b54T<5a~>$7Il=oG99yA#
zCx@XJXDE2WcDY41?f}818HknrI&^%k4xYiH^wKBNUC2DagU@O7@MGK!<}c0r$IrL-
zOX1rpIogIv3+eH}YwMW%u?+H-ruvD9mQ)OyNbQRz@Yy1;=3V{;U(@_TbAS1|iQE{B
zL6@x#ZHNz?O;(Px^88xlDlpd+j+Dg}fDuO_2pK4U4cdymRVxd;TlXyGvW!ceRcod4
z4;9*O|~9EU~WSr
ztXX4xWm(}$813?TEtM;2R~iSi67D|F4y$hHm0+2=;reXHl}T+yH)tC#8$1YO2GL1W
zzmfHo2Ps?%jBX}|jbk-WZnD+-xjwYi7sx3(Q!8*%2RObT1x7~WGk6qH&tP;7`dB~f
z{^h~*4QkXkyPY4cyfzd|R|PCyg;DcTEbIwY9f7)+wF(E$^Of~BD2^92o7;cUZYFVJ
z<2Rn;9dVWisB)~>VYsG7;R@qyZRx!?ux_R%uR6WEmRzN2^MdBFD>1oQ|L&v&tGJY|JfkknT*k$FXdC6*VR)#x{*nR-vMG+KKV
zu36LPb-N>tYP`2`;(`%*X19_wdh#cby)F(VzED<8SKqhX`hy1Cy)tYg_CIw&6Sxy$
z*HpEZWPaXt-b>U|ucRS_?II-Z++dI~>{1mULsG!5($G-ePlzZ1=osOL@W@3r}KO&Y4
za5vnOCTL4wuqRizx&8e7xKOVCj!_0_7IO5ORYysWEuYZ;)cuWZqInhO0d+=GjCPr%
z$VRxmiq|>pr-~xQH)x%@oVAMSxKQLmStxLqZh!$8MGE3TOI;K+n>NJq5v#W58|L#U
zLBv7^iuj@uO_YkNC?%*pWzqN`Pwd^}$SG9{UO8s)MO;u=MYn@VA~L`dtOCD?!{d$G
zF{rs)8LY17;Wu8}@@w#HdC1D@HiS=F8{B%iIY8AQ`=e8^cyoHhejXAVtVE!$!m_p*
zWY=R^UVnVVC%CzBRfIeC-f8m>2|DI}7W^U4=y|{Pc)(BL*ydQnA#B0&{+;Hi%DQS?
zLTP)^2)HxkrzB?myLxJMCX-30^dyK0+SItvgQqxq#0=up^w=X6zu?mY`@YHDh-BeL
zUx%AZ=RSO8eHY`Hg)HjvX3`dZF`wfyy0e1BSX+;6+j#7#Ip2KakbztZGO~kVO-Pz>
zP(Iq9O#=O1mjS76k|gM`M}R6}!=Vqg=jMqSCd7Jzma^X4j9H^!$1+V$#LvF(z|BwU
zn&$KklGh7gn{G)fND)58Yt-`Xsz^dB-?3do#XO>Q35UcWI$+F+x=|*lR#L`XAFH+G
zrkwoTVH3I&(c!-e>!tmfkpf28pb=ACwZwOt=Ez5UK~)0!&$29JoY`YfwR}k+?1{`5wd_r$EJ+Q*Js^5=|
z*bqNXUr#qVVRjG)lanPLMb%cuK0&m0;Da&;a&gj1R+;J-xZ>Iyid9Y6V#
z&$bljl>H1!{&(<5JyA+sK$;U55QH}*4GVGO0zubgOgC8A-{l5oIu!2qaRr76UmLUa
zoVa5(x(0n{9i##z-HN8;Fa4Q
zw%;Y($~PczHM2&n?cuW~4LCn8$FFFFpn*+~p5u?@;cAL%*-=ZQz8SJXK?W>e$IeTp
zE9eN7>jtERlN1T3v2l&EVFdAc80)%v8+GLQ^GZf8?-D8>p!@`4Iz6y=1q(t$ZAmD+
z0h|(3U3@ld4*F7Cu)m7Omvy7a&0UGdK22(hHA$Fj;)Of!dCHiW1RbGq-2jvLU^Rju
z?v8S*85+J9Yo>SXCu!>v}tQ$NFT%+
zCre)xc_EnG1ba5_JBi%2IsKiDg?`>c^!c6GjJ6D5O?iyC81YhZ{iWu)pLG-E6C!8R
z%{ae!XokLMJ1@G7yQ(EYRh`{{Y(ofuFUaXP4?$1Dug?qeU&kyJ@b%ai*-q)bx+4(?=8!I^xxJ`obE{cR`=isx0jK}LQA4z3j0-lS5p!F
z#Zzq%#TUT)ng7Gydq*|-ZtJ2b0#XF2(xcLpDk@S0A|hSHMi-*ega}BJ77_sgsgW*S
zrAZAv^w5#sL3$VI2_*zlJl}7xwa;pM?X~Y1_l$eS9qYdklCQj(^Zn+0<};s3y*?%J
zon(yLUBLKPNJ+Fc%HWDav(}TBv_zBKJbXFqz4@!JU<0Pp&MHctdZ;LDtX>RzNN`(3
z(dD)Y__j^yBZIRdZIMRFdz@v_{5DK(PLrazqq}V8B{xsa4nfXR`XLp5^Bn_Ix0mTc
zDp=S|y+WGO1uL)zQ)d$N_%w@r8B;RmzA>%TE(C+GaxXRR{G}f&OrjI8gqOpO3|_YL
zZ6}Qx9(n36ZxaF}?)tYXl{?-3&VB5#J`%5UMa`m|$w)jwFs@)C{wvz9WACs11lA-Q2S@CCL}#XVp?TIn-^7H
zBX@bELO}Y*#o4V%v2UM37AbZpJ=xYV$GgU8;7WuJ8pB9)CBR|gV7Er4W5#6|p{LT~
zU2UqCT{__8x4f9`-39%NS7Td!3jKp!%hZ}kmNoe+glGNF>QuWOM;esArDAzprY7qb$*mL_DV9*7Ymgoi07v1{B@`
z5KbA?gjs39f!5?o%;b+?=D|{h-M!M{mLq%()-S$3@jB4fe($l@(=~w_mCe?Um0A(U>AoeSeoa?2(sGYPUJi^|IpU
z4?v=ye7^j5kSOp!TCq@CcY!;Ik@}48oWXu~DEn`!2ROv9%SlV8(wIqqynjNUJ;>~?
zNBze?01n*cdc<+^Z>kwEx%vR{lbSM5KKBPhBe4H}7Niz~JLgofAuRy$cSH3y?q@C+
z^7FcEAv&=<_$phuxb$5KS&>gvRKl@$K>fKS&ZS=v%7i7&uvA<_n-Xo(UX;#_`GE?!
z0%{$k@wiq$xuls38%Vf3u@BAF100_QNJ9fYniA%l6O2Jvf3L@D1_|ct%Pt_iW$@BH
znZ1ulUzZH&Tdj_8A6d}wuTz>-P5e|>PC>iiwA?71HSuq>UUqfF8-!GMljbJ!w30rT*T3Q67m^7rF
zUey&L7Fd-Xbu`)m)+fZomA|8M0YyiM`N=nM9ymy@O3gVYbi$h%<=3|}wIiOA&pC&i
zyEx+wq?FU@lp8W_=k=>QR7#Dmd}c^U;4)(9vN`jRPVohs_-U=;!5fd-bL_D*Dc_5}
z=u8WLFa7LRS8T$xKc9^G3(feyrU>VtXTjhF_E`iMlH3b7=xdySrhxR>HmFe78Sk{0ceo>7gs%O7*)M}8*OozBxaE0T
zc>9#(EzEf#?0c|d#ZZN!dzv0md~RcO?nf6mn(KI$Gx3F|=f&-QZ*pjPS;fc#*Z9NE
zGk0Rxgr7dju%Uv-TMlRsAFNv^cMLOnix9Utbok`qy5GcbpIKTmSU2a(k!{;Ti*)`7
zYlE|Txg_%;W%VyJJcaIx2h$AA&}D`bm#0hWL}7EFz}u
z+Ehib<7s(Nt`kb7KsI?S&duUp$+}B-0FMP@&ZO(|3p!5~7f5NGINDO?oHQ*HManK*b*{P>Bec@-2O*X|QWy^B3g_XNt~zl~`Dfi?eo#j!_i*L6LOGps$xAs>5gr
z6S%sjgmf+}9F3*y%|8u!IHa+~HI?MAUzFWa$d-`I9d8xlC7I8`^xU5)ry8DRahB_6
z_uQk`RPU9qv?#kz$KQ5|`jNL=F@5fVT*yOo*eWYpypQ)gHS@x?iy$8=QYAS}l>;U~
z=pt&uhXiGCkoN~A5yb}
zzTVSuW^$>h(~Hx1JgrHqO|8Jsg#RXev*^~&g2l{Kvt)O$tWaCTBKLV?mop11D|Q^G
z2;jxzdd39{bv%@iMAUrs*a5+Kk)ThTXz$^X)0JnRZ1+Jp)+J`+6n_}6HuI>hIR6wNHbLQ%2{{hzWK>(1Tz%e!45#rg$y_xenWEQ|dMdXhnc+WYG1*jb
zc9`3EJqN(^NkHPP+RWNixd{48$WtbhUN=lel{u2}v8P)lk_y}7N13V~tMZI~h$}l!
z*KO|zySm`a=%v0OA5obwq3nyj&_WTzNlvAr8$s!FbM(&2<0Z9@60alN3E3}&L>Ph;
zrlVMP#`df&CI{;niN>kBY_;#<$<)E3j3VhT-;wo(dnPNOZzY(;4c$m+ZFGF5&-^VJ#
zs>l>u{9#`iT@)Xhb=$2&)7N3XIx#r7ZQu4<-+E)d!B12^{YTjC$_dN
zZ`x`_##|!c<}ixAZcZ>K!Ma%IIY`1UQV1yhrHjXlaFqPj+`9QL(ACXb%}(@QE&LR@
z1E(YuHw+~3SwjNm;74`XU@1Bpo;sk*LtrV
zD2{Cl57aN2KJ8rnq^RyYHJ8V~auPUi;TI-6-4ONQwjJjew}mhK1~;YKN`!;?tyx}}
z!0D6C8=M?}Qz@akseRTe{Uq>uYa@3%ovs_!TB%5+sP@|=80J1mi}9VE-dC(nx!G>*
z_2W6#ay!OpYVKsytrI5W%>U#^%X6#C<)>L59AXNmofCE$t_i(HswZO{L6dTP>MqOz
z5Meu>1)Sc}Rbg#uF`|OnMTzGA8-$4zV;=EOzq(b75xYJa0v|dD*rS3x>Vas!zIE|I
zwrV7PK!o1*V+;pbX`%=mj-Bb29;eL$w
zfm{bbuZ)@D5w7;@Otlg^K-rDF_TV0bZ)tlid>l($B6l7cXTZneQ|U8-MX~>OPCwx%BRqM)iDM
zdU||IyQ8OI1Jg&Xj=O1@-*!`t3)XL)KKEdI#*xi@O|fY}v@t}rP28QIeNKLN3}oDH
z@=Y`1)kw-}oQxB6vloRJp`2Q^6sh#~xR&|e^yzt@Wx2zNt*g%b;EKiRcg?zh{
zlF_5IYT{qP%XT
zg8s491b9(qV=4VUIXP1q{`C-q%`4L;$aN_##$3yE+R3lBthyIgUsO}UpCNQANkfQc
zvxStq-2Lm|T5s@b@B}GoeB(FK`6hOq{OmoOjQ+(JT02z<~G*5ea{Vc(G1ynLWA=mA?emC%d%vj~LkL6o58$=0h0lf;O_=u4Yz#Yg6
zHDKFhr<{P6A<3X0W(dS>0I>wZ`K3)E%Mq!oU;bb
z*_sA1)dmZerI~inSGRO^>x$#Q%@!XHr4lgAzPr&ooPsU!}DG4hSqNEaFjK|}=U7EbH~3FwHV2l*GmSn{Xl
zXXn>@e6uyYMl5d1cH3xu(A&RyN7MVvLt6Q5d7>dn38ymAqC&Tj(bk|!eA-AzTl^u%
zXuSzn`xi|&Kluj|<3+!oLs$1I^9D1Mj^4jts~|IN!4a;|5Q_^Hr+
z^TR!R`{ys!-ms>jd$VG0pPJY~fnf5ux7j!}-eK%!0~bX?hGku8HofTE)bye|s^S;w
zj3soL6{PU)9j>1F3iZw`)NB~0m}R+na=4-dPsvpi>6;yE{jeCo9ammg_a;A|UU=fW
zB|EthVJE}yHLr#h3px~dxwqRkv7NInAEMC^@}NFVo_*CqkL})OkeF+X?11jRK`5pa
z-Bvdrn85~<&4!ubJfl2h*k4_rG_i+^)}o0F{fXyiuRLLrEvpD%r`4I_QcqU|-Pp`v
z_dv3fjL5Od=TI;i9JsYXt7T5)R-%1@{kmh0^573|8c8SCNjrmBuZv7q1%6QPwjua<@mpTzE$E|Ur)=<~$bQUH$1l@fHh$+fq6%l2So_|jE0b$5b^{@TQovW#
z+olq7Vy7dOH{`2M3}-tsvH{g`VOO*8>8yx$M(vm{&593O6;S=VJoXTNk{nS9W=l?3
zAb^+1P_uE^lY))px1~8|!YB7mY%}w+H}0bapUYf4yr&zY*B<^
zSI~gmDP0IM_<#;OqslcbHP`yAUR#Ry#KIOA*E?+fDZ=GWVc~rTDg_>@lO7
zQ=y=jx6L~fGNcn>Rm%I>5X#Wbt2RO$C$d8ll
z>bf#$k7s4l!pplgVRv}c{cBtmAG4@KqJR*b&R?2xn=+WeZ~&px$R`N3fq0Zvr5Z00
zTz6>nzSYa-SV&EE!Cp$|$zJ<(r{Bv^jE9qG`t{Y6U5LA{0JR2MPamvb6+ApXcd7Dn
zz!}z#u-oGK@946r9R8X^XWR;Oy%)u|AGN3`Njz`wC})J#r+T{@&v5P`w_#YZ))aPy
z$KZ1zLh1hV}38-y_}9!bfB%M3e8KaSI{=ZSlZin`3+Bi;1lh5Gp#L=W|gSWMSZ
z`JMA|4oeu;d)a+AReE_^XkxB{WbBgLm4ELAV}qe5n=g@RoT~<5PY!eaTzhS{!lM~b
z7_n#ac;;SGourmq*8mEkXstA$UNv)`)nMHU_O
zxCyJ3wQY9___)jU{PNE#tKN(Iz?4KCvC}!;psqC{EhD*=a8`Njhdg}evX&Udhm{P?
z{2===Q}B;Hg!g#H4AVCtXI_`^b=
zljB2mwU8f=25N`IEzx=dYdXr+d&K
zJ68-vWA(C=E=H%k5iqz5?R6H`-nmy=!ib6S?sL}jN>p;wTnT7nIIw)8o02W=Fq3V(
z=)++XT%>bb6m^7GT#Zip@VZ1M)!OZ#&)vtdE&5q2L&C#;dFFaMKm)$qsvK=@a4ofp
z#1!NEbZDmRZ18q|jLft2!*!!d?dmK)+80+|UR)oEJTjyB8GHrMD>nNRXxpWT@0#+?
z%E0shWOpF}{UnAme*2S^M`#EA+4k!J&5
z^rz+0rY=IembdpSD4fUAGMBytw?FRKLrY8^&Yn!Gz`9
z)%rOz2{*F0)YxAkWRzQy_W-v#UrZF-*1nrHM*pNCv$1#aFipEJFLyH0Y5n}WCudS8
zX2B)vk&P;f$pVQ&di67t-<8Dpee*uaDiSvkwu|!Ioh^sAVXG{gO;uu1a^fL&D
zy(q>6k%Wr8k`?Z4XW35}_pq~lRd;09=7|1&4$V_6wqVzmN0EU^4pJ`9{K!d@M)CKL
z57k6l?PYsa+petHdLI@c!V!DAFdJb00|i*stTN*@^i~h>%?)AlN;
z%u&k6PFs&_#}QXuyC1!YtuQ=%phTV^4pgu&%Z*Tm`mk5lqr*M0NBDU>^3$B{mjCK8
z!ju1SQwPhqtTEBy2X4PGp=9mz0(ENadb|zA`>n5%DV~RYZ!qPr$TFPd^
z_P8L9Y_N`xuP^pn#Az){&9}~nf|yD4v`L&RaCuBjzj;V}bWuFBym%CMhPq|l67rbJ
z;x|=o1|W&Bf1?wE9Ry*44C@R)LGWZ@rlHIrN)XR0B1!7K`>=slCy9fBb{l}qNt}pM?EBm+I^EZyY)i?$_8hTgW|?1eUOeFp>{X1_71t3h9L9T+
z4#C--+|2tZ;QXCZS{;ym2BaK7{)hi7+tIsO4SDEfoy
z7$q)aKE=&j^f=LP$iHiX1C;Q`!>`2zAa2R*UC)n}5?`8&9LfnJk
zcl;DlqXs_$@t9!*G$S}s@KQpz3aD*Lc+-^P4L4KBLn`
zhRHiZm}>3P=}*KH1}btXVH)S$ia6cKNkA@P!sV;d7B_FzcmW%SsdC|6bq2f0;qLBt
z>9nq1oCN3v5{Ej4fpKYo#Q`D!W5-VDI|nOS?+ka5({AEKDwve}CP1%DAaEBVOu-X8EUY(?L>3N`EwAxv+2{1GPdx0U3~OHH1*$)nQEv#6;M4g(_tfc
zB*6;niUY
zs>&D2*l#MjZHmx(f=o7K`dL@b#*}Z>A-W;RFr^2v96B}@jZ{!j4um|JSouvwVxu!6
zK;*Y(5nl+r=0C@`D&J_ucB~w
z+fQ;1VHaMlI?Sw`A*bB?o2sVHbN#{C<;D#C+K~J4&+EKwOoU&=>*$`3(E9wEmn26{
zgVABx29}Icu=DuDA&&tX%HRni)Ork6HbR7GAe6RiHivyYb-FN8ahSQn7zEr0ROhq+Hw)}t$8TJFT&AK$!~(Q~JJyOY5W
zI@)-N^20|u97XVK#?UKE1z(FL`XsJ~z?y{xvm#e5F^@p~ord3fN@eJ!I!#*+$
zQH26hl~W|CI-KEr(=O06P4dAbGh6NRXUw`@4Q-T(7q)oWPHpMz8FHkRU*El?{*Z2+
z#w)R%#sB0&JD3lqgljDLHaj^(k{=fxfBgc@`l;9^86=!FJ(Ti_KQib6!XS-gL3|Ch
zJk@{|3np1$k!@9tk~qRxqCWPpp)YN0Aw_?BRfu~3-->8X)nzuKcl10F0(WW4Bt1kymKmuDeKI|Gy`VJ)JF?~
z)>3-*GK2~WXTyP89A)Jkja$+C=3SEb*d{yw#p=*x_MhU>T?@e^hveimguIkNI7sOS(9
zFn6;ee2FtfZ=q_|c5hSO;RC>Llch(S1r%X=Q>e5;&oX$Ou;5|juSFCEgQa;Ehen=J
zWuB!0YKN=2P#?i;;U`^;2A~wC(3?y{-($os=8?||Q?CO;>wlSA1+?DH--dJ9Z>N2B
zz*%q2QSGg4C@nSiqGB+wq4Ma
zb6t{JQOLr?XM`map7?TAKP}a1^38aNqbdA@RNjX0|5#
zAvN6(E5)P7baJhIHnm)m!`w$PTCg?$s20rAZrsz2!CbjjvOB3!YrkJVGHZSK@tliDBYi9?FUI5`1&}liSEtW7uGYknbY-me=C&8bmDCTR!p13ZCh@ibQjTg!zli
z`*ok#ubtBBWfAD})5`L?yGLv_liEgyY*%LA`o^VMEvxJYe@x9x?t`l>Re5UBFu0{b
z?D`%Rpt({0hjHsDQksuSl&*rQRXM+UE}e9hPGdJd!umHAt+!L%A~87Xal(mTy~DhZ
zM8G5D=N{e#tBO6+VVk%whj1SFQ7Z2diqlJUX?&nBvE}hp!fU&2Nt|`m4~OLnXBOdv
zBjNRJXoa^EA1qp)*qPSYQB&<4QBrw*pindSrc**p;6`mn5x~4Pck)V#I^D=y-r6sv
zvuT@EPZ@m)(4N-(UN%Q;i;m64rh8w6%k|ARozYc4_lcWoq7dryFzl!M-lvOi4Vx7@
z(UmT-_NN^S7_8n*UGwhulQ?2RUk}oKly;!nDKxOit@bp&22s3rgR{0y()XGC9TPjT
zsoEP^y`_$t%(=U6BiNf{nY+G`mJ;`vt?d;Id4vSuISo#`EiZ)k?&G&FHp%wGLNVv>
zY);kOaJtyK=+Kh7R$zRLu=Gz~+sl2-5MDcf0_X&&!fVR>!Ea5fpQKgyvlN-%I-9V#
zH}H<-zN?S1cQN6m`K{+h+QdEDv*2y!%y~JV`IbF|U8nGe?VmKA{8mx&?gOR8AsH{o
zapHNu`owXLU@Q9lhC>U4u=i4n#urbq-8ZsPE^~Rxl6&3?w_RCA1rOH~m3TY2E?Ayg
z<4;e+&DDIk886>*cE2>6ip^D1TVV6Gs-14uvypu!%!hAdKa+im^nP}aU5>h;@b-I4
zOSRaKRQJQ;Vvd>+-B`Ej%l=0f1YZ)9#~ZM5__EKn>Em;!@61KzXo7YkQYnVT6cF~)
zh?q&{sG!$iNwvpidGa+dF9GI!e$kG>NviKwBlo@t%8!j?imQ&$stPj{sj7MlK;kDS
z>0p5ZodPXcukRhQVzVbW!^2Pajic*09&<-fH`9%r82e?tI&b-)baL8zx9-E7p`pT-
ztJ?bg36dms-Yu#T25PZ(&4{>ocm-MYOYG
z&||UV)aTeVpp5bfpG)|TA?Yi4K^}rUEioir5ILcaS0aQ+O*0IXK&UeSR=GP+y?^Ob
z2{hMrlA^#A)`q8kvw*JOSp?n;vUTn?@W&c1>ITrVI|0!EZ%G20!6aq`=_966RuB*r
zM0{UClq%m<-KmpVX0khZ!l#bAstnyj5?_D_##S1jA6`!T0Kq37e0AjQYp7*MnkQ!av0rLULBCqRux)fcS?Pc)hC_o*W7&0Joh3p#u`^ZxuQwt*+$&Mj}5qR@5>&TNTusMR#;Bf$)R4*HdRu_QGW$!L!IB%R4xD+9Z7XNFKSj@<}Q7(l}W}mlrbvzY}<)mnxcdYlD+I{ew3ge0hQczoN
z9$(f6W9h*yb!Os5dZ1(S6JU|8C?km^VhH7Q6z|eV=Y?B)(>kK5Mo+DU=})zuR#Dl^
z94r(C#COf&xYepGmt2&k^63`N0
zg6n$frGLRs`_KHE#9Qd4<
zr_L}+M4dm8>((`M-Cx=wrb`+x>8YYqt&JfXRGj~q7hfW*Wo}HZwHC~RHOdnx+qsn<
z0jFwyXkEYY-1pS8AK!J|#m?@GkRq>iiAOuvV0Xh0xNrvVD^W~YEt8dwN*itWpH@fA
zv<>ZwpOr$tb%{=1YNUhROj$T;*=AYnoeJ1MWp-ZYtN74Vc3f7UwAUm1%(-(;MdAmb
zJxK_o1urs~csV9z!w|BMD(x5x?&L`BDvkEdi0CQX}hv!@|mX^^-tIEy4cQ0?PN-%3;I2
zQSa3?(rt$P3Ws877rAH^^PmbK#;(nWfoP>wFT*qle#_OB)+Gki^Icx}KBwW7k`;k3
zt=}RdJT4WC*FOSsPDacTU~-yL9$;(~NduUfe8U7VPAE{>Gjb|gWC`U>NRtcm0voAc
zdB~IbjOm(F-?n8glb2OzX}@cdE%#M%1&5DS733ej5PPY#xuOQA6X_3YyJWG3tp>D>V!WqKG02$&ST$>5`K{~*}`q1-@Fazwycm|pg?AM_t)
zS9&gk#Ge6qaKT3jA6%#0{4Wi8=E#?ZFcsK3DO4QfJ9&$Lw
z1`&}_HnG)=6YGrqQ4i)MJ<8Gd$XMu;?
zvX2Oc0~MBLEmd9^YX<(IL11nCvPfh{piR-O6c6R`ae1}OSL0r(lZ|a}O|q`#DM8>2fDvaN2EfYRG~RieGGS>}`j
z!ww(a#0b5`$N$_lRcX
zPpUWOU{|pQAC%b_Giba(ODw=6gU?{8Pog#X^@z@w88v12{gP+Tt|vf~ViV$M_-Z*f
zEgTKG7)0@O5%Xr%xZrQmvzCC3|PgK|g~Q;ovPgA1r^i|o-J7lnwkfuEX?)L)K~
zK4dO{c&RMe$O|9Bu&aV$Ai@YvtL5ZEuod<(A?)1U?oLCZZzm_Y-ep>;G}5!K+OHYD
zv>e;#OtL2E;3it;D^zl~rLfFBS~$6}wGg^sxdG-)V>^R=sl}w+U|;hud}rRQ&>Ag7
zbt?NUYEZ!iVSa=cIOAmTlaK49j#nm4t3e2Bwj;S
zN#;2@X!gE%+f3$K7bIo@IG$dYt1yo$l}3pw;Hb&%>kQs
zv^qQ8^GIGnmdCV+mlRRbRe~Q5*(wD@3!zFkdFSG6qgF4+r^^Q~NglhYOWu!Z7q{Ma
zw|NoU_hy~}kOcz=AZQNjQ0y=;mT0yq|IshOQ5S!*|D@p-G0=5mj^_
z6~Kyb0|HerpEWtUsF4>Yj0w@ft>y%NU2l0>OT2b@NNTwunD29LxBX=uHpM$MI!p|y
ztT>rqgbjF#xeqS4{+2>BRIic?Gr-<7Gc1NZ5-m_xDk`eeN(s%4=(;h}y}I(TYE*#H
z9X082g?e#-okUCU&xdA|i;%O7f@OkYFW~5*?-;1FiZXU+!M?;nAFC)RnxKP4cBnFw
zzfx|(bjLHREZ95QUp=;$;%NT);)QjI{?AVTRnl@qY;>QW>0hY1e{>3S?k8@`yV0QD
zZRb?iCU1ivrVYyg+Y-
z6IE)roEbFL6v6AB5Ry-0NKnB@&ovILM+z)|>dG|sA;kR=A%t`{b;@wf%<#qT?O$sr
zITSqEpe>xQv!Vj}3SWFojZ{Z=hMlO=N8gO(x14mCbC~R)Ho4!!p7%?ej^`&v=K}r?
zIc+dTSp)<=IPwJeIt9g5>_%R84E7`Do6*iQsRO^E6>t3gX}J0yZnaL9+q*lJUc*NUbrOm
zbHyO38kou9m$Q&(HBYM!Nqwnsxe}INWuYiCAg8iZF>h?(NnVv1-N{|taW>#%dgky1
zRtoh->0>mJ9T)+r_sQ+cQp554Chwx-@1t{7B>aaitL1CPUjP0DeI5=;z&Pz9Z07xw
zBO;~Nf*(!Rh;7RY9qURcz`q0BXKZYrAg9HT4)~
zvK~oCxg_dD&WtyMChvNykgeQsoh=0ngyAWZcY7o}59o&Yek#jDi4FZbzmx)B>suMT
zWvnPV486`$5X5;DkKD?{UeK>ICxVzHpL~%lmfk6&Kk2j3gRu}E7nXKqYMSm>DDTDm
zSXq{dZHIvbtvPQLI`UhxWg^Rw()H+u-&73@8G+#{^_W)J2n+p_xV*lbXb&YsNFuK0
zSYnxl-aPziH9EbIqohYm(d(02OD3BdIQy;M=U1(*B^M*v3`FRyQ<>h}sJ-qA|7?rV
zMS<;P0x$fss>O~n7frDBVosO>fM;E>H$fRzHYX62%*ha8`{^Uy>*J(iNlo<>qn3wX
zl#MD@Rz3WDr4EnRR|l6uf6zRyGMSeRPAJQ*SOR8Q)-%4&I&P@#8hLpA&v?vdA+9-_X
z#8&k~Na3V`0pDr;OL9WeX$#&vyQ6_W^{!r{J#4<@&|i=byPKk2UD1d=%x?H{U-IFx
z`NRaD4mb@Si)k_lX0GGYubv(OIoE-RW_ifTfyJ}7E?;|Us)grgV-Si3dp^o^A6#>l
z^>QDzjC%<9M4BHz%g<(sGd-wOV3fnV0q%^<}QQ1ieAyU*b3?
zBSrZvK|e`SY^-M8@~9z=tXT5|9a{_Lg1HIgLnDbvR>#Jxu>lI}eSR9m^WS%Imb=fR
zzi{1nr>1K5kv%{Kx-0brZ$%I)(p%er^O>$J|CBernCd6OIqU^mK(>NHcmp>YnTCSX
z+{DZ33iowBHc_|5T-#oUSZHFTjSc8k>^M0NYljLZcSrP99*{y>`8%nThC9ac=(xM_
z56mr3q|Kil0=+Xe$eFwU6OQ2jfzSLoDEse~@BW|a0Gv)E9~LLsNgZwlD7<*w+c}7M
zGRSZYX#<8}9!7e|LG5{Gr66R^Q^a?IS?(g3@ad@dgJR5L4-4C!gxc`lOPaG~R~cwk
z!mWyL8~b~qT9O|imx|=I%hqj^qbKM*^qA2vO2eN)M6c_D}RZ#?a_4M6)UY
z-)ARwxZ?A=3k-6?BoHY8o-%`4o&tDW}T=nunmR^!a#?!}eE=rv@t+
zS4VGl)I=H5Xy0Hf%q?erZw!SzN%RATJa?FyzzhZ@7bQ}&q5dN1U4#Hhs`=;dqA}k$$EJ)hzh43yVN68+<-zkOQLZQZA}WH
zXb6#!=o~$vPA?wqdYeI<(R+AF_A*?kT294Jj^SYQiJ|@N_Kl>TE7f&?PKm@L;BNkg
z2K#3*#Q$DV3NVTGQDvRQZBZA;{EfE>Zr4Lt2b4714b+1#E{9P~eXVgOJWvKG>1wlo
z5Gua{gvxIKqieZ2<3L~pgB6#dh`&=xmH@gm|K>g9KYWQg@85^|e|xR4NDzQuk-z#m
z0+Tm9d^|~Ff~q
zrRj6_{N1}+fB&shqyPF)cLe@@pntX8|6Zei+tL4JzQ&O-s5_l?r*m}Q
zaKoV7EU&`SnU0uhW+9XKOy~0MTz7|!1=pv8besBr1QW0zoV?D6Tt;N{)W&$J<=sX>
zW;Pq=
zot!d3%m)EU*n5^{$iZoVH-=%fu;DzpF!l;)RwE+&RMC**`MG{Q%6TviVvwPgLXwgp
zU4#P=&u7jMGQn)~O6EodBHFL{u=Y1qKLzg!!TJ*9p&9sl
zrc4tj`~P}*|JH&apJYjI$@$>d;0sV*Ao_D*Fc6leU0NeZR>*&kNH|I8oqW`!(<
z&u5N)q*MJFJuAeQ^*r;U%?IjqI*8ey9OXYvLkmd^#oLI|t@O
zk-3D#0zSu{yZOE8&*`iRtV9er*OnX>e~rFzN7;Bo
z|IDBkS*95A5xC0EE%{86H&$S#k!Ql=OXhvc8M-!OXMedrLo62aBARhxPUuY8c9oO1;AtVJJx
z{BJgtOBYWYsEsDC|Hnq@()XVW_)m;vT&%s%`Qad(-K(?BjI#^fIC3DMxih?^EF1O8
zadv3pn~2G2rLaBFkGf7(NkS}9jsWg}D~wu?RTgQ;==ZHOtM{zV_VKPU?ogjW=B>s{9WO>2g#kFj8aFfXo)Zhi*e0vwC1o4#6jE
z6Jx3XZV|0@?>AM6M=m8)m#}jwm)kfjpemz4bv{`9ftR;p-R6l<_8)S3_0;Pu|0t;U
zr^y~z1bzvP3yB2A?8s1idEyO&_j;C9{WIwgRUiXVjAysFm7^tF?sRuY>)xw5qokcz
znZo_Ek=~qSighM&AX=eB=Ze~a$Vxvp6J?%nllqMIZ)!{pb+y<&GdywZCm1
ze!R9L#UpDjaN4!$T12+lQ2HPH>umrOLvgbLDOZ3fywk#ko@>$R2#1pMfT1JlBPbSi1-h
z#CZba5_8tgNUeM0JLh_uC2Dh%SevMd7rL9MKN_NGQEv%7L~+ui@TE3T#f7c0r4$Rg
zj>jggxf^)165En7A%CuG{dcub+1#5cyk@F?)o5jz0K%IQ)^LnV_&{7Dz67V4l&`ZW
zw(kyRd8ON9#j&9-S;qFE9^89pruXh@Rb*w45~qOHjw&3Q2hoE}R3#`ahGylV{Cri-
z6{gLnM;g&Ji?#ACdNN2b8q$F}OX#tjkg8YAYPItaP0E??u@{)GF!~mLNxen0{~1k(
z2@|fl4$HE*I96{=yhrKs!8a7m5N@=NOwHNbZTHJag`^4_>Ku!6i-&Sh6$M6jp->bq
z*l8Rx0O0@$L-5H#iyIK*0If!k)C{Nc)vNPhy
z<&2ZIs6PTL0_=xX^#5Y-&EuhN`~G3AT2Yb^Q`z?GkRy6@lb^*qn-_59Jx9H#Hrv3!ot
z{&|0>dv+kyHA*5Wt7S1wLfx_Suu-9qtsAwr<1>GhV+kEE!`fN7!O#5c+NR?kMiX~M
zGdd{Qvl-7xyGORA1`;Kq%C>R`q`QYCZ7QR?!embIBypaRF)-ip2-xSl(tsWkTe(Sp
zjl2fp*v6HmRc8L1I%(?bN#-h{QdY(fV)$-NK&>3YnmPFSPlu8HAmgA^hWS(4Nzfw&
zRO*DJalL6Et3Jth2klGD47ytC%|c%rzj}B4gmnnxq`6GW`9ij5Ia-ygi`B4&oU+??
zISNtnDqOA(+ih8;z8*?1Ku=4XX6X<(NgEUD@k>#xBFjH5d*ip?%z%qau;!N3_z?fC
zAWi6n{3f1H`-;_tNB#FP+FYpUt5h>`MGKm(RvQ)uDrC!y6U2)X>>qwif7Zapq;v9o
z*ivPp;--y<8a#*5yhcS)uHmpYnBeXBx|xu&t#NGqK$)7CMRgr}i$GzEuG-CmeEnvb
zH;aL2kdMPc83k?H}Cwk^UevN6hKg-l)cEC+ZcA
zO#hFV$C&yRvYM1Q6Y#Itf)7Z
zIhu}+98EPJs~#L2n?0YKJ@skw^nl|KV*Dt6iG3W)^%!-M=1lavjw3ARp$G;02l02k
zEXuqp)F}x&o#Oj1>$_af{&Kk$x=D|N?xJ>#-$xzeY}r(B2Yrje9Ln;f7q_eut!syd
zi??Fb%I|pZaoWs5S;GVB&r{D@l37Mt5*7S9s3k4(!=s7c1!J!!;P?*|uS6*qvy40J
z=^woOq|PK!$#Z&|E@dAIs(!*3XK1z90<7M6^j`mODE41&lf88E`wZLs`(}uGZn8ky
zi9Yu$-AS(d3Yv>PJXne~zvy`^F1G5_P6nT%e4x8BD`D{I1nN0Ab89fQ%x+L2V}aAh
zezN*exaDG(&+}4)r`Jv#9Xf(oXC`NMVApR6VDBUxtzWu*84bVU;uNpSwF+Zm-&fhb^Ygh59
zN^|YJ*3Dcm$5Pp$0Q}@xD60J}b!Gv0PUafN>e(EJ-cWj|=p6;BHG~L_%cDxJAC=cB
zA(hYNC!2~C^ggz?j+eANVsM^w`c>gLi)$bpRDhN_HaZ@oIa67Q+ApBT)4E3V2RhL)
zk~_5?3l!fE@9h^u`H$qXozG73LriE%kHcsu%tsoyQH@l8elK}d(neY;OH_%G>XJ9d
zE18h1#?a+;|Tnjy7`
z07wi=x^p;sQTdSck#>R>;ueWM7`LbNy^c7uv0
zi;r(ECJ`XVeo!@U;%Y@L^B0DL0=Ep~t|u(*yvCoIu!6Zc89#jzbgl+=%)eoCTWvAY
zdk2P88YqyWUCTwBOzuk0wTqyT?%!8Ack%V=M9hFbn}ppp<@R>DF{C@lnlzmFPTNO5
zOo$FWN5VF*1aG^SMP{un`zpRBgdaWeTJ?c^LKBCWPLlB*ui)V3GH*$5OJ7@GdG}53
zd;<=3k&ZjI!`s7i#@M7a);3rmp9np8u~a(>zrWTKV{U9>=i$LU7FuY|ljl1tWorBB
z(A#DHy~nCts}7v2W)0^w7o-;eN3gOy1;ux+A1O1g%-VF)T(IAMQRw`Ayz*2n=UL4o
z%V*5gPW?Fa*5HBsF*Ey>#i|`W#y9AhZv=WS&ElQf_|ooW#in;uTjM+F7u5nqS7QvF
z#)~Y2gza*!VASd9C~IwqS0K?NlDtWABXeXwr@3but7U8L8;-H`vppaZvZH%d<|Mz^
zynehPk4E6lj~l10<{EN6%~uTZnCuS|VehjzbZz@f>y|@nOqR#0M>G?<7rP1FLz@KY
zmN^CRPHEpy5w8OO0zM=F^Rj)a~8C@wlXr;xJs
zj)#6w!xSrUwbwjM`Yt(oyNh45S}Of)A~)IAfn9z@pQm3Sp#o5d?h{nKwq&(76r0yW
z3k=m|;Z{#6o%>SM0cj~t$6?{b_M^`-bC~LPedd@+fPIWG!7pM7+CgdOYfAeVXQ&s=
zv@@u2>Yk3p_v`vAj)}a=bg31n9Lu_9uoZV?msBBl<}l@$vw@GjpFS?WJtHi8pI~V6
ztEz;Wkx^TiBE*Syf$9$WLz1A0W*zC!u3`S{{o
zp}zG<(b~Yu5=6FU1^*Sa9Q^+Izv?^q-}#Q{&ksV%;RPsM4(J0iSf4|(p@k_3(*p}&
z>l%h%@SrSVQe_Yn2KR%Cd`s#z;OGdiDt;bE#H{x42t|@T8Yov5v-uCYVS;XJzt4mOYm6>7
zMRf~+xR@FhB+VN;!#W^sv?jViBkE%P21BhC)Swh0!P&s{P~(E#pz0)paZ>s@YG&S%
zwzmqXO8OMFAxZ2^EDFDeCi0J2K^CNhw=6+f^lMb^ckSuKG3;k5{?AW>zPx`6m;IY3VzmJEiMTF=Q-F0)F5pQ+Gd}
zdJtH(tsj6Y8BTy~ZG-%uT~a9OfL{zn({XDOy$Ph_A!xh(PbQ@}#ukL2{ok6~&_9;P
zO3L`h+)(+I=&9%lh6N+f0+tN-0d)@(sJ|@izalUW?i-e8-Bm&(Y!FN*mM(X(rnd+;
ze~&Eq?L_q(K}8XkbrlM%-aju6@|P3F6#spNR0!ekr^``EvHHtWC@+6d+3ew>TmY#s4#bDGKedLrnqMDgGD1
z^!F>?_7}$W|53s8KVj^w8wBd;ce&M10f
z%9OYdQ8iTz_8!YN+0D8aBB+dT{SWJq<1!NyGm~L_RGT)3oI9>tyT6G!=ORCVY
z!!|${JL{d1EKC#KJGn!eezk)FP?4mhKPtQIFY4aPk&&XU0K+|aZbPv+4>xPE`LeLY*q8)iha{5;lAPOPSyqm4b$)QMe5GH4N;;M
z60e&sJo9S1Y5y7FK_)7lPx|x>`;wruo;`c7N_a&+uK_+d=9oYIR#4#G7Nul#@%tN%K0IAaC-gL`LlCwjL*M5kdnAV`CZ^w=W6EEYzQzx!ZlkIzM?2my?=Tk}#~+;YSx%K4n0uvg`|cC_45mGF#E`vx
zOF^v%VeA@r(2*I5s~gG-(ScKuULyHgDU!=I3l81u3e@+PRl?(%Ml~3#<%!E!-4=in
zZi4&-z(#kqdSP2+W)139s*d`eZ_y@snfdb))gth91I{c&lJN-jJG`!Ei#+G*^<3wihkTX$qx&?OcTG3RZU+~PNob_FW6(IT*nuD
zp`MGFj@}(wW0*+6?PpaOcCRRW6GccbxH9hCDY`cB-B@|j^0Erm?vWnF=bP0nt;f|y
ze%gIo?CG~%QTx1yvIoa_+jb`^&X0}xXZxFOz#Xz89An(fkkRK!GZ8Cpbgjw$OxuEG
zw5&~R1S(Xm!b#Dv7No2;mt1e?`DFdT<$!Vva*e=yyxVjjtGI5A|JLY>roi|I`+Un|
zG8-~X=wSnsUQY#YM%Fm^?wMPHi#Ko-^}QXn>AbVtwVjQd?9L)?8(khWZ#T(`A1Fm+
zcXGedQSlohTNpvl;+EVFv%6jy<+oDd^Y#FS|Bu0qhjsu}y39DJHT{Y3%cE~gs`=Ov#7+~uiM@5E)avP5&uJ0p(Y6PQ
zKHQs2!uq9yPhz(_iwiart1I03i#R4vR!b!cM+^6wk7~Vh965PsKR~dTo2sAlqSbRZG}BnzE&Xa)O$cNVRqv--2PoGvQt{d
z;x2V@xby;Wjqs2Yzm@Awn_nS2evc{jlLI)8ryts$02l;%v@<`MycmIFb)x;%7xs%+
z9;@<+*yNakijf@RvTZqi@+B3!f=n-{R~}#9B^>30>vC2q%G(Fa8G8I9bwgd!ej2nH
zmi7&{eC}0Aw_CXvTOL`@xHj&{$vO0H-Z5d$QJfFED5FegC773(i;j06oJ$@{*{pb>
zA1@^KUVbEiiJ#n#dGtbB=tQ`$deQY|*M!T<+O9^V_}foD%4`p$*#CB+zKw(uo=07P
z8}{JG40`Ff97%AWbdleG-KS3Kn`Q$Br07C9AMtjGSbKq
z2;YjYLzC&S=8SR$%ig*h`lLsReW1ieDmP=;w9Xq;2CnZ#&=EVf!n?BBVN|+
z-6piY3wdv9ms1|}dN|0`r&NzI9ENH6{(*^oUj}t_^|T7Eq?Tbdy~WcO51cvn`C^&2
zcCi+;SMl
zt9^3@?N;TQQaD`x(O?rYD))jf3Gw}+Jqzx7P(l7={5#8=p{D&{91?q+DdGtpCZ?)s_=`_^R5&(mT7LBAA$<99I|jU*?z^J4ISX
z=Peixa{AlEJ7OOVHf&i9%Tf(zjCVT6tW|pLM9be|Mm(kUskYO8)3=Jt0&;WznZEY_
ziM~u{%dwVav4$bX@7)?HpZUNSI9!HUKXm-VT(-W6nZY*tnDI5OTnF;pH*h@tz*V0l
z<{eXN5$3Sh!TdnC^NRY?+rgRFUNbG&M}|FaIQqg7?Q`f#R#M=+@Ae_br^x#b92d>*
z-2RfmcB^gWDQ)j|)L5NZcDNF6maU0$Yqmq*#re6f(KUiUc59zbvqL|liN8mf_>PU1
z=1I85>+vkL?tbZCCAXECXCu?CYws%OZ@N=kF=D?yHbQTkmC1@%N~(e0Tk5p;u30Is
z@lY-dixQA&ElRK%X8Q3XE^1~h6Vp35j7BT31DM{|5d}7er9Benl%^gi9{>yTNm$#zzcl$ml5n?=1D@>Sj(vhG5#f;dFQD2wYC>E
zFOj1uTzHCO=|#Bj%>ZaOPQ=(=({<&Q?7zM%JTwHY3e!Sg|x-3@*_k132
zqbki7i#D~ZS8IKkp`suAb|hMJ8L(ov6oLwe$!fp(h`L=DIX|zo)CTcjqhU$!Qom_f
zjhN`AI1jZLFMP|AGymi5d(TyP9@^fePeU3HEV9D&%;cZd8dcZsxl{i@`r&Ezw&V=#
zP7`+KK8RI{zJGj)?Z@{d&-P2LVw_VmXwFUre^a}Z2F=o-La#VasV|dNe{|-K8zEpSD(}fjHcorJ{
z>puKm(fRG~p_Em06b@Hy)1b{aZ}MbycyyTRsGoe0;Ci~~RzmTGYK50yzFa-?>;Py=
zI`9EA!>me;u^_+$(TDuc0_>0(BAy-TIIwcjXa&hBg@u;cdkcPjYu4awEuA!Z?!AlV
z5sxdA{-s{o6goYX#!pn%Yyy9*g{Hvqt!U7_K>Aj7eUDqm+v~NS@bPC#@N_(ZC$I5epyw)9lBWu)3)Q!inRXn)
zXyxIcFDSm%acHM?Osba?Jbn~ba;`EBa=huCU;Qabi~fwV&~$I-y|@eFy9CalrCnV#
z4EtehAaW}G9M8NXuI7zXLi47G2gujo13TDcUskm+
z`RS-2<4`*e{@vv;X3mMXgHnaa^U0;$yx*U7-SD$S$u6EBH92qm&Z}GMP+Zi#OR8q$
zDctdyv(;N)mx7p%q%nad#H(-+@ue*3s4SYq
zfV5jjQi0TQO|#+#ewB06R?J5-kHs`h2PnS}qxG3~v&HYF2AfQ>ZiRp_Q%f%$(+pcb
zzW}G62LNs1n8Wmkjox%TNxKIZRMoct0J}iu<~D!9tbqx=+7!;UTXm-`VdRc;^bHe_
zLGrMv3gV>4^>FT)I53qRGW-n+L1v~OQNS^d*nnE%*&TQYYSv;9OR|80b}~k4S2`y2
z2P5FJJvEMwNu{+HyR)=(T%yH>T%lP0^};Y(KX?H4+XI%r9xxx-QJVK@X$yBB;k}_(
zSN}p>+*75dD%LN*yPc=KjbI+deknaPXK`+9CNRzEj^!&teOcJlRJDm>4K{Ys&rgM8
zYsh;78(Lvf_~8NBo?fbB$N94Dc>YZ>71`VXjhO(Z!difr!FPJYEQV;!bxB$lvw=MA
zorPFVKwqjszjIg9>l+_NSJpgDr
zx->*`>?E9^cn93j(g&KK^{pM+CADT|zkeK&Al}-MhygCi_Z;
zzb741OoUoGTsgyfZO&9~3mnOxU!+D8f}Oz5y7j|We!yv3T1Q5tGPHCvmpF<`K*DXF
zmtV8~qxpMT9-GK-Aaonu6DGdar%Ik_&O@=!vYAnHx0lCO?v+&}a2gF53piJvmhwJr
zIBVupiPTacQC_9?ta=yfg>@9M9u1F{7)UKlV--(C&r@HT_)hz&5_VYGkBAq2Un7$JZ46H(vt@4wRYIid8adFD|70*hL
z{(P77CB+*JOQyqyM)I`X{KGVlckPY3m203nKK7nMDM5A*Shs0eITc>dxHOK;4PRc5pL4cw6h<|`tqCRicohnM##
zrEBQt>XbfwQS_Xwr1
z_g}62pI%&en-3=5s7EpKe7vo>q2P=>*j$;LHdrT)CEuWR|01GznhjZ9=M95h`9^^D
zkMIiG{`v;0ZT@^UuF*{1Xuu(}sy?mNGuHEr;)T)x_R?J`yL#)8QVLN*jFz#n?5{IQ
zSE9dELF-t|5hM2~WfaG%g^PyMERlJ?I*8s0TSl=hK&{@CRNPHrITiJ0L3_#nEC7%T
zHUU^?&jFgXnUj9^$I@wn_^55sx)hN62nFRpcX=bf~5V%#s&KO
z+mk1^uH3DYnN>yr(W_lX4P?CFMF4n4S&$dt=JzM|R%pW21y_;NkTfe_9|bu{evU_F
z=Q|Z*!;6bI(>kn+whdMM?Z6|D-#DVoFOFCk{mBO#@_?E|&{UVJ(&5rhqz5an)stwa_0D@;J0~7<;Hnp0(U{lFm
zI;HW>Ud|?4Z4Ek{_~OdcmE4z3T8bFoiq8O7Z#j@NI6tjR)Z{@gm;8CARi-wG2LAtU
z7e6%37#%v1p&Dx^ZtX}FG$i#w3v>qZM6M{)SE$T$W*l2@qzZ_pqV~e!&D>3pa+7pc
zl%l9X(yqOnlWd6fFLn8R7t)0IdecTATxpY+Q?l7Uey?$r^W$CjG)sK&u3mtO&shj3
zE0{t14EiMs`n1Ylsv+DZMC+2CD(VEZZyP55S!fmBHgFDZ>UYaEX9SnTISJV$5iG--
zTvltA7uP)&{o`?>=~M$=7RL0pV(_aUGh$zu?w2p07hn6ywB!#=^uLi^#4oagAjqtL
zlU~BVT8niRyqOxXyIydsLS$^ra4T&}`e@0vQ~YI(gPMMx74&NV_#SvN`cD#XzHU05
zcvK=QiFb3O8}_<#wR}Be(PSjI+xzA)uZ)R
z7phol0xV*p#siJvq-q8S>RYG9PbL&AkU(%@nn784CAEyG4HbxU{7$aBVM*szu;M1d
zN-nN!Z3Hst(p1C1h;EDE{MkzLF`Kz7oWUGxN=IZq7QV~!lG)C%h4}
z%>_z*31Ig@fucK2Oo^2pR2Me^8H)pn6@PfSIKTPFqVQW)UQ0yPgAMoYUi&@I9
zLeO=77C12C`n8p67}8`h|B&x2^xjzayQ=MYMf3c30O10=(4zC_q2Y8ijei?P+r~`eC)9rXxFZcd
z!j&1tV6pKG$h3b35V?tfX`z86Z~xU#kLPwdiSnjk456O)X~h#Olk{+x=SPPFS0*_6
zQI3z%>ulhnV?l}yS$_*Wdw>!BlL?x)1)9wMeb500I4F-|g9VYG`bJ}kHb&PX^Km}f
zn9Me)V*CYwp~qpFy55tb5*9IAd@`)oip&z}}Ei7vg{-Pn26L;mCg
z%6L}*t_4ks23pkGBK8d`()RTuH&T??I|q7{KNsPMfhTutQG#3VZwn+NY>dRp
z&KhK0`r)yP{kpvk<(xS|5Oc!p&8pu9YF74~CxsFUKywbmcL3f2Kub~k%NU*v``eu7
zf6W>4hxxnxO7B5Up@O$wqnZUliuKGoY}Ju*lYY6ZU&2xIuwqEI_~vsL@d;>+dW(tY
zt)9%7XSvUeLq9RO3*g$YB`UR-(uBy$b=8XzrbBFRV=rjh6!Z1%FI$EADk*)lcc!mk
zI^l