diff --git a/examples/aishell3/tts3/local/lite_predict.sh b/examples/aishell3/tts3/local/lite_predict.sh index 8654b2876..e77e8b6c2 100755 --- a/examples/aishell3/tts3/local/lite_predict.sh +++ b/examples/aishell3/tts3/local/lite_predict.sh @@ -5,6 +5,7 @@ train_output_path=$1 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then python3 ${BIN_DIR}/../lite_predict.py \ --inference_dir=${train_output_path}/pdlite \ @@ -17,6 +18,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --spk_id=0 fi +# hifigan if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then python3 ${BIN_DIR}/../lite_predict.py \ --inference_dir=${train_output_path}/pdlite \ diff --git a/examples/csmsc/tts3/README.md b/examples/csmsc/tts3/README.md index a2bd62494..39926259d 100644 --- a/examples/csmsc/tts3/README.md +++ b/examples/csmsc/tts3/README.md @@ -239,6 +239,7 @@ The ONNX model can be downloaded here: - [fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip) The Paddle-Lite model can be downloaded here: +> please compile develop version of Paddle-Lite to export and run TTS models, cause TTS models are supported by https://github.com/PaddlePaddle/Paddle-Lite/pull/9587 and https://github.com/PaddlePaddle/Paddle-Lite/pull/9706 - [fastspeech2_csmsc_pdlite_1.3.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_csmsc_pdlite_1.3.0.zip) - [fastspeech2_cnndecoder_csmsc_pdlite_1.3.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_pdlite_1.3.0.zip) - [fastspeech2_cnndecoder_csmsc_streaming_pdlite_1.3.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_pdlite_1.3.0.zip) diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index ce083e660..14308af4e 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -64,6 +64,9 @@ fi # must run after stage 3 (which stage generated static models) if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then + # NOTE by yuantian 2022.11.21: please compile develop version of Paddle-Lite to export and run TTS models, + # cause TTS models are supported by https://github.com/PaddlePaddle/Paddle-Lite/pull/9587 + # and https://github.com/PaddlePaddle/Paddle-Lite/pull/9706 ./local/export2lite.sh ${train_output_path} inference pdlite fastspeech2_csmsc x86 ./local/export2lite.sh ${train_output_path} inference pdlite pwgan_csmsc x86 # ./local/export2lite.sh ${train_output_path} inference pdlite mb_melgan_csmsc x86 diff --git a/examples/vctk/tts3/local/lite_predict.sh b/examples/vctk/tts3/local/lite_predict.sh index a4f70fe78..eb608535b 100755 --- a/examples/vctk/tts3/local/lite_predict.sh +++ b/examples/vctk/tts3/local/lite_predict.sh @@ -5,6 +5,7 @@ train_output_path=$1 stage=0 stop_stage=0 +# pwgan if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then python3 ${BIN_DIR}/../lite_predict.py \ --inference_dir=${train_output_path}/pdlite \ @@ -18,6 +19,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --lang=en fi +# hifigan if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then python3 ${BIN_DIR}/../lite_predict.py \ --inference_dir=${train_output_path}/pdlite \ diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index d256c6347..cea125291 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -520,7 +520,6 @@ def get_lite_predictor(model_dir: Optional[os.PathLike]=None, cpu_threads: int=1): config = MobileConfig() config.set_model_from_file(str(Path(model_dir) / model_file)) - # config.set_threads(cpu_threads) predictor = create_paddle_predictor(config) return predictor