diff --git a/tests/test_tipc/common_func.sh b/tests/test_tipc/common_func.sh new file mode 100644 index 00000000..e2ff5c4d --- /dev/null +++ b/tests/test_tipc/common_func.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} + +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} + +function func_set_params(){ + key=$1 + value=$2 + if [ ${key}x = "null"x ];then + echo " " + elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then + echo " " + else + echo "${key}=${value}" + fi +} + +function func_parser_params(){ + strs=$1 + MODE=$2 + IFS=":" + array=(${strs}) + key=${array[0]} + tmp=${array[1]} + IFS="|" + res="" + for _params in ${tmp[*]}; do + IFS="=" + array=(${_params}) + mode=${array[0]} + value=${array[1]} + if [[ ${mode} = ${MODE} ]]; then + IFS="|" + #echo $(func_set_params "${mode}" "${value}") + echo $value + break + fi + IFS="|" + done + echo ${res} +} + +function status_check(){ + last_status=$1 # the exit code + run_command=$2 + run_log=$3 + if [ $last_status -eq 0 ]; then + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + else + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + fi +} \ No newline at end of file diff --git a/tests/test_tipc/configs/conformer/train_benchmark.txt b/tests/test_tipc/configs/conformer/train_benchmark.txt index 68f9e1ef..3833f144 100644 --- a/tests/test_tipc/configs/conformer/train_benchmark.txt +++ b/tests/test_tipc/configs/conformer/train_benchmark.txt @@ -54,4 +54,4 @@ batch_size:16|30 fp_items:fp32 iteration:50 --profiler-options:"batch_range=[10,35];state=GPU;tracer_option=Default;profile_path=model.profile" -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096" +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 diff --git a/tests/test_tipc/configs/pwgan/train_benchmark.txt b/tests/test_tipc/configs/pwgan/train_benchmark.txt new file mode 100644 index 00000000..e936da3c --- /dev/null +++ b/tests/test_tipc/configs/pwgan/train_benchmark.txt @@ -0,0 +1,57 @@ +===========================train_params=========================== +model_name:pwgan +python:python3.7 +gpu_list:0|0,1 +null:null +null:null +--max-iter:100 +null:null +--batch-size:6 +null:null +null:null +null:null +null:null +## +trainer:norm_train +norm_train: ../paddlespeech/t2s/exps/gan_vocoder/parallelwave_gan/train.py --ngpu=1 --train-metadata=dump/train/norm/metadata.jsonl --dev-metadata=dump/dev/norm/metadata.jsonl --config=../examples/csmsc/voc1/conf/default.yaml --output-dir=exp/default --run-benchmark=true --max-iter 10 +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +null:null +null:null +norm_export: null +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +null:null +infer_model:null +infer_export:null +infer_quant:null +inference:null +null:null +null:null +null:null +null:null +null:null +null:null +null:null +null:null +null:null +null:null +null:null +===========================train_benchmark_params========================== +batch_size:6|16 +fp_items:fp32 +iteration:50 +--profiler_options:"batch_range=[10,35];state=GPU;tracer_option=Default;profile_path=model.profile" +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index ea34acb8..0280e5d4 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -58,4 +58,19 @@ if [ ${MODE} = "benchmark_train" ];then sed -i "s#data/#test_tipc/conformer/benchmark_train/data/#g" ${curPath}/conformer/benchmark_train/conf/preprocess.yaml fi -fi + + if [ ${model_name} == "pwgan" ]; then + # 下载 csmsc 数据集并解压缩 + wget -nc https://weixinxcxdb.oss-cn-beijing.aliyuncs.com/gwYinPinKu/BZNSYP.rar + mkdir -p BZNSYP + unrar x BZNSYP.rar BZNSYP + wget -nc https://paddlespeech.bj.bcebos.com/Parakeet/benchmark/durations.txt + # 数据预处理 + python ../paddlespeech/t2s/exps/gan_vocoder/preprocess.py --rootdir=BZNSYP/ --dumpdir=dump --num-cpu=20 --cut-sil=True --dur-file=durations.txt --config=../examples/csmsc/voc1/conf/default.yaml + python ../utils/compute_statistics.py --metadata=dump/train/raw/metadata.jsonl --field-name="feats" + python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/train/raw/metadata.jsonl --dumpdir=dump/train/norm --stats=dump/train/feats_stats.npy + python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/dev/raw/metadata.jsonl --dumpdir=dump/dev/norm --stats=dump/train/feats_stats.npy + python ../paddlespeech/t2s/exps/gan_vocoder/normalize.py --metadata=dump/test/raw/metadata.jsonl --dumpdir=dump/test/norm --stats=dump/train/feats_stats.npy + fi + +fi \ No newline at end of file diff --git a/tests/test_tipc/test_train_inference_python.sh b/tests/test_tipc/test_train_inference_python.sh index 2043e258..ef5747b4 100644 --- a/tests/test_tipc/test_train_inference_python.sh +++ b/tests/test_tipc/test_train_inference_python.sh @@ -20,10 +20,10 @@ train_use_gpu_value=$(func_parser_value "${lines[4]}") autocast_list=$(func_parser_value "${lines[5]}") autocast_key=$(func_parser_key "${lines[5]}") epoch_key=$(func_parser_key "${lines[6]}") -epoch_num=$(func_parser_params "${lines[6]}") +epoch_num=$(func_parser_params "${lines[6]}" "${MODE}") save_model_key=$(func_parser_key "${lines[7]}") train_batch_key=$(func_parser_key "${lines[8]}") -train_batch_value=$(func_parser_params "${lines[8]}") +train_batch_value=$(func_parser_params "${lines[8]}" "${MODE}") pretrain_model_key=$(func_parser_key "${lines[9]}") pretrain_model_value=$(func_parser_value "${lines[9]}") train_model_name=$(func_parser_value "${lines[10]}") @@ -50,7 +50,6 @@ eval_key1=$(func_parser_key "${lines[24]}") eval_value1=$(func_parser_value "${lines[24]}") save_infer_key=$(func_parser_key "${lines[27]}") -save_infer_value=$(func_parser_value "${lines[27]}") export_weight=$(func_parser_key "${lines[28]}") norm_export=$(func_parser_value "${lines[29]}") pact_export=$(func_parser_value "${lines[30]}") @@ -62,11 +61,11 @@ export_key2=$(func_parser_key "${lines[34]}") export_value2=$(func_parser_value "${lines[34]}") inference_dir=$(func_parser_value "${lines[35]}") -# parser inference model +# parser inference model infer_model_dir_list=$(func_parser_value "${lines[36]}") infer_export_list=$(func_parser_value "${lines[37]}") infer_is_quant=$(func_parser_value "${lines[38]}") -# parser inference +# parser inference inference_py=$(func_parser_value "${lines[39]}") use_gpu_key=$(func_parser_key "${lines[40]}") use_gpu_list=$(func_parser_value "${lines[40]}") @@ -91,44 +90,42 @@ infer_value1=$(func_parser_value "${lines[50]}") # parser klquant_infer if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==1 NR==17{print}' $FILENAME) + dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) lines=(${dataline}) model_name=$(func_parser_value "${lines[1]}") python=$(func_parser_value "${lines[2]}") - # parser inference model - infer_model_dir_list=$(func_parser_value "${lines[3]}") - infer_export_list=$(func_parser_value "${lines[4]}") - infer_is_quant=$(func_parser_value "${lines[5]}") - # parser inference - inference_py=$(func_parser_value "${lines[6]}") - use_gpu_key=$(func_parser_key "${lines[7]}") - use_gpu_list=$(func_parser_value "${lines[7]}") - use_mkldnn_key=$(func_parser_key "${lines[8]}") - use_mkldnn_list=$(func_parser_value "${lines[8]}") - cpu_threads_key=$(func_parser_key "${lines[9]}") - cpu_threads_list=$(func_parser_value "${lines[9]}") - batch_size_key=$(func_parser_key "${lines[10]}") - batch_size_list=$(func_parser_value "${lines[10]}") - use_trt_key=$(func_parser_key "${lines[11]}") - use_trt_list=$(func_parser_value "${lines[11]}") - precision_key=$(func_parser_key "${lines[12]}") - precision_list=$(func_parser_value "${lines[12]}") - infer_model_key=$(func_parser_key "${lines[13]}") - image_dir_key=$(func_parser_key "${lines[14]}") - infer_img_dir=$(func_parser_value "${lines[14]}") - save_log_key=$(func_parser_key "${lines[15]}") - benchmark_key=$(func_parser_key "${lines[16]}") - benchmark_value=$(func_parser_value "${lines[16]}") - infer_key1=$(func_parser_key "${lines[17]}") - infer_value1=$(func_parser_value "${lines[17]}") + export_weight=$(func_parser_key "${lines[3]}") + save_infer_key=$(func_parser_key "${lines[4]}") + # parser inference model + infer_model_dir_list=$(func_parser_value "${lines[5]}") + infer_export_list=$(func_parser_value "${lines[6]}") + infer_is_quant=$(func_parser_value "${lines[7]}") + # parser inference + inference_py=$(func_parser_value "${lines[8]}") + use_gpu_key=$(func_parser_key "${lines[9]}") + use_gpu_list=$(func_parser_value "${lines[9]}") + use_mkldnn_key=$(func_parser_key "${lines[10]}") + use_mkldnn_list=$(func_parser_value "${lines[10]}") + cpu_threads_key=$(func_parser_key "${lines[11]}") + cpu_threads_list=$(func_parser_value "${lines[11]}") + batch_size_key=$(func_parser_key "${lines[12]}") + batch_size_list=$(func_parser_value "${lines[12]}") + use_trt_key=$(func_parser_key "${lines[13]}") + use_trt_list=$(func_parser_value "${lines[13]}") + precision_key=$(func_parser_key "${lines[14]}") + precision_list=$(func_parser_value "${lines[14]}") + infer_model_key=$(func_parser_key "${lines[15]}") + image_dir_key=$(func_parser_key "${lines[16]}") + infer_img_dir=$(func_parser_value "${lines[16]}") + save_log_key=$(func_parser_key "${lines[17]}") + save_log_value=$(func_parser_value "${lines[17]}") + benchmark_key=$(func_parser_key "${lines[18]}") + benchmark_value=$(func_parser_value "${lines[18]}") + infer_key1=$(func_parser_key "${lines[19]}") + infer_value1=$(func_parser_value "${lines[19]}") fi -save_model_value=$(func_parser_value "${lines[7]}") -if [[ ${save_model_value} = " " ]] || [[ ${save_model_value} = "null" ]] || [[ ${save_model_value} = "" ]];then - LOG_PATH="./test_tipc/output" -else - LOG_PATH=${save_model_value} -fi +LOG_PATH="./test_tipc/output" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" @@ -141,7 +138,7 @@ function func_inference(){ _log_path=$4 _img_dir=$5 _flag_quant=$6 - # inference + # inference for use_gpu in ${use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then for use_mkldnn in ${use_mkldnn_list[*]}; do @@ -158,15 +155,17 @@ function func_inference(){ continue fi # skip when quant model inference but precision is not int8 set_precision=$(func_set_params "${precision_key}" "${precision}") - + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" @@ -180,11 +179,11 @@ function func_inference(){ for precision in ${precision_list[*]}; do if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then continue - fi + fi if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then continue fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + if [[ ${use_trt} = "False" && ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then continue fi for batch_size in ${batch_size_list[*]}; do @@ -195,13 +194,14 @@ function func_inference(){ set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") set_precision=$(func_set_params "${precision_key}" "${precision}") set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " eval $command last_status=${PIPESTATUS[0]} eval "cat ${_save_log_path}" status_check $last_status "${command}" "${status_log}" - + done done done @@ -227,14 +227,17 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then for infer_model in ${infer_model_dir_list[*]}; do # run export if [ ${infer_run_exports[Count]} != "null" ];then - save_infer_dir=$(dirname $infer_model) + if [ ${MODE} = "klquant_whole_infer" ]; then + save_infer_dir="${infer_model}_klquant" + fi + if [ ${MODE} = "whole_infer" ]; then + save_infer_dir="${infer_model}" + fi set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - # For lac which needs the `data_dir` args - set_export1_key=$(func_set_params "${export_key1}" "${export_value1}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} ${set_export1_key}" - echo ${infer_run_exports[Count]} - echo $export_cmd + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + echo ${infer_run_exports[Count]} + echo $export_cmd eval $export_cmd status_export=$? status_check $status_export "${export_cmd}" "${status_log}" @@ -243,7 +246,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then fi #run inference is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_infer" ]; then + if [ ${MODE} = "klquant_whole_infer" ]; then is_quant="True" fi func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} @@ -261,7 +264,6 @@ else env="" elif [ ${#gpu} -le 1 ];then env="export CUDA_VISIBLE_DEVICES=${gpu}" - eval ${env} elif [ ${#gpu} -le 15 ];then IFS="," array=(${gpu}) @@ -275,13 +277,13 @@ else IFS="|" env=" " fi - for autocast in ${autocast_list[*]}; do + for autocast in ${autocast_list[*]}; do if [ ${autocast} = "amp" ]; then set_amp_config="Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True" else set_amp_config=" " - fi - for trainer in ${trainer_list[*]}; do + fi + for trainer in ${trainer_list[*]}; do flag_quant=False if [ ${trainer} = ${pact_key} ]; then run_train=${pact_trainer} @@ -324,10 +326,6 @@ else save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" fi - # load pretrain from norm training if current trainer is pact or fpgm trainer - if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then - set_pretrain="${load_norm_train_model}" - fi set_save_model=$(func_set_params "${save_model_key}" "${save_log}") if [ ${#gpu} -le 2 ];then # train with cpu or single gpu @@ -338,48 +336,42 @@ else cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" fi # run train - eval "unset CUDA_VISIBLE_DEVICES" eval $cmd status_check $? "${cmd}" "${status_log}" set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") - # save norm trained models to set pretrain for pact training and fpgm training - if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1 ]; then - load_norm_train_model=${set_eval_pretrain} - fi - # run eval + + # run eval if [ ${eval_py} != "null" ]; then + eval ${env} set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" eval $eval_cmd status_check $? "${eval_cmd}" "${status_log}" fi # run export model - if [ ${run_export} != "null" ]; then + if [ ${run_export} != "null" ]; then # run export model - save_infer_path="${save_infer_value}" + save_infer_path="${save_log}" set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") - # For lac which needs the `data_dir` args - set_export1_key=$(func_set_params "${export_key1}" "${export_value1}") - export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key} ${set_export1_key}" + export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" eval $export_cmd status_check $? "${export_cmd}" "${status_log}" #run inference eval $env - save_infer_path="${save_infer_value}" - - if [ ${inference_dir} != "null" ] && [ ${inference_dir} != '##' ]; then + save_infer_path="${save_log}" + if [[ ${inference_dir} != "null" ]] && [[ ${inference_dir} != '##' ]]; then infer_model_dir="${save_infer_path}/${inference_dir}" else infer_model_dir=${save_infer_path} fi func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" - + eval "unset CUDA_VISIBLE_DEVICES" fi - done # done with: for trainer in ${trainer_list[*]}; do - done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do done # done with: for gpu in ${gpu_list[*]}; do -fi # end if [ ${MODE} = "infer" ]; then +fi # end if [ ${MODE} = "infer" ]; then \ No newline at end of file