diff --git a/examples/csmsc/tts3_rhy/README.md b/examples/csmsc/tts3_rhy/README.md index 855aa885c..da6191592 100644 --- a/examples/csmsc/tts3_rhy/README.md +++ b/examples/csmsc/tts3_rhy/README.md @@ -14,11 +14,13 @@ Remember in our repo, you should add `--rhy-with-duration` flag to obtain the rh Assume the path to the dataset is `~/datasets/BZNSYP`. Assume the path to the MFA result of CSMSC is `./baker_alignment_tone`. Run the command below to + 1. **source path**. 2. preprocess the dataset. 3. train the model. 4. synthesize wavs. - synthesize waveform from `metadata.jsonl`. + - select vocoder type via `--stage` (0 = pwgan, 1 = hifigan) - synthesize waveform from a text file. 5. inference using the static model. ```bash diff --git a/examples/csmsc/tts3_rhy/run.sh b/examples/csmsc/tts3_rhy/run.sh index e49f43ee6..61020093b 100755 --- a/examples/csmsc/tts3_rhy/run.sh +++ b/examples/csmsc/tts3_rhy/run.sh @@ -28,11 +28,11 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then - # synthesize, vocoder is pwgan by default - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 + # synthesize, vocoder is pwgan by default stage 0, stage 1 will use hifigan as vocoder + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh --stage 0 ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then - # synthesize_e2e, vocoder is pwgan by default - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 + # synthesize_e2e, vocoder is pwgan by default stage 0, stage 1 will use hifigan as vocoder + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh --stage 0 ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 fi