diff --git a/examples/csmsc/tts3/local/ort_predict.sh b/examples/csmsc/tts3/local/ort_predict.sh index 1a397ca1..3154f6e5 100755 --- a/examples/csmsc/tts3/local/ort_predict.sh +++ b/examples/csmsc/tts3/local/ort_predict.sh @@ -3,7 +3,7 @@ train_output_path=$1 stage=0 stop_stage=0 -# only support default_fastspeech2 + hifigan now! +# only support default_fastspeech2 + hifigan/mb_melgan now! # synthesize from metadata if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then @@ -28,4 +28,4 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --phones_dict=dump/phone_id_map.txt \ --device=cpu \ --cpu_threads=2 -fi \ No newline at end of file +fi diff --git a/examples/csmsc/tts3/local/paddle2onnx.sh b/examples/csmsc/tts3/local/paddle2onnx.sh new file mode 100755 index 00000000..505f3b66 --- /dev/null +++ b/examples/csmsc/tts3/local/paddle2onnx.sh @@ -0,0 +1,22 @@ +train_output_path=$1 +model_dir=$2 +output_dir=$3 +model=$4 + +enable_dev_version=True + +model_name=${model%_*} +echo model_name: ${model_name} + +if [ ${model_name} = 'mb_melgan' ] ;then + enable_dev_version=False +fi + +mkdir -p ${train_output_path}/${output_dir} + +paddle2onnx \ + --model_dir ${train_output_path}/${model_dir} \ + --model_filename ${model}.pdmodel \ + --params_filename ${model}.pdiparams \ + --save_file ${train_output_path}/${output_dir}/${model}.onnx \ + --enable_dev_version ${enable_dev_version} \ No newline at end of file diff --git a/examples/csmsc/tts3/run.sh b/examples/csmsc/tts3/run.sh index e1a149b6..325b2707 100755 --- a/examples/csmsc/tts3/run.sh +++ b/examples/csmsc/tts3/run.sh @@ -41,3 +41,17 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then CUDA_VISIBLE_DEVICES=${gpus} ./local/inference.sh ${train_output_path} || exit -1 fi +# paddle2onnx, please make sure the static models are in ${train_output_path}/inference first +# we have only tested the following models so far +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + pip install paddle2onnx==0.9.4 + ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc + ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_csmsc + ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx mb_melgan_csmsc +fi + +# inference with onnxruntime, use fastspeech2 + hifigan by default +if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then + # pip install onnxruntime + ./local/ort_predict.sh ${train_output_path} +fi