add paddle2onnx, test=tts

pull/1665/head
TianYuan 2 years ago
parent 124eb6af8f
commit 21c75684ac

@ -3,7 +3,7 @@ train_output_path=$1
stage=0
stop_stage=0
# only support default_fastspeech2 + hifigan now!
# only support default_fastspeech2 + hifigan/mb_melgan now!
# synthesize from metadata
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
@ -28,4 +28,4 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
--phones_dict=dump/phone_id_map.txt \
--device=cpu \
--cpu_threads=2
fi
fi

@ -0,0 +1,22 @@
train_output_path=$1
model_dir=$2
output_dir=$3
model=$4
enable_dev_version=True
model_name=${model%_*}
echo model_name: ${model_name}
if [ ${model_name} = 'mb_melgan' ] ;then
enable_dev_version=False
fi
mkdir -p ${train_output_path}/${output_dir}
paddle2onnx \
--model_dir ${train_output_path}/${model_dir} \
--model_filename ${model}.pdmodel \
--params_filename ${model}.pdiparams \
--save_file ${train_output_path}/${output_dir}/${model}.onnx \
--enable_dev_version ${enable_dev_version}

@ -41,3 +41,17 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
CUDA_VISIBLE_DEVICES=${gpus} ./local/inference.sh ${train_output_path} || exit -1
fi
# paddle2onnx, please make sure the static models are in ${train_output_path}/inference first
# we have only tested the following models so far
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
pip install paddle2onnx==0.9.4
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_csmsc
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx mb_melgan_csmsc
fi
# inference with onnxruntime, use fastspeech2 + hifigan by default
if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
# pip install onnxruntime
./local/ort_predict.sh ${train_output_path}
fi

Loading…
Cancel
Save