From 3e1ba8640b0227e92f6149446f97df6057a4a7f9 Mon Sep 17 00:00:00 2001 From: nyx-c-language Date: Sat, 15 Mar 2025 17:52:25 +0800 Subject: [PATCH] =?UTF-8?q?=E8=A1=A5=E5=85=A8=E5=90=88=E6=88=90=E7=B3=BB?= =?UTF-8?q?=E5=88=97=E4=B8=AD=E7=9A=84=E8=84=9A=E6=9C=AC=E4=B8=AD=E5=8F=82?= =?UTF-8?q?=E6=95=B0=E7=BC=BA=E5=A4=B1=EF=BC=9Acsmsc/tts3=5Frhy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- examples/csmsc/tts3_rhy/README.md | 2 ++ examples/csmsc/tts3_rhy/run.sh | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/csmsc/tts3_rhy/README.md b/examples/csmsc/tts3_rhy/README.md index 855aa885c..da6191592 100644 --- a/examples/csmsc/tts3_rhy/README.md +++ b/examples/csmsc/tts3_rhy/README.md @@ -14,11 +14,13 @@ Remember in our repo, you should add `--rhy-with-duration` flag to obtain the rh Assume the path to the dataset is `~/datasets/BZNSYP`. Assume the path to the MFA result of CSMSC is `./baker_alignment_tone`. Run the command below to + 1. **source path**. 2. preprocess the dataset. 3. train the model. 4. synthesize wavs. - synthesize waveform from `metadata.jsonl`. + - select vocoder type via `--stage` (0 = pwgan, 1 = hifigan) - synthesize waveform from a text file. 5. inference using the static model. ```bash diff --git a/examples/csmsc/tts3_rhy/run.sh b/examples/csmsc/tts3_rhy/run.sh index e49f43ee6..61020093b 100755 --- a/examples/csmsc/tts3_rhy/run.sh +++ b/examples/csmsc/tts3_rhy/run.sh @@ -28,11 +28,11 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then - # synthesize, vocoder is pwgan by default - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 + # synthesize, vocoder is pwgan by default stage 0, stage 1 will use hifigan as vocoder + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh --stage 0 ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then - # synthesize_e2e, vocoder is pwgan by default - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 + # synthesize_e2e, vocoder is pwgan by default stage 0, stage 1 will use hifigan as vocoder + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh --stage 0 ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi