From 0f3e5a3872defc3e7197e01f8ae7e760b22c00bf Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 16 Sep 2021 05:59:16 +0000 Subject: [PATCH] run_all with aishell/s1 --- tests/benchmark/run_all.sh | 27 +++++++++++++++++++-------- tests/benchmark/run_benchmark.sh | 2 ++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/tests/benchmark/run_all.sh b/tests/benchmark/run_all.sh index 7564174b..7aa11d0f 100644 --- a/tests/benchmark/run_all.sh +++ b/tests/benchmark/run_all.sh @@ -1,20 +1,29 @@ #!/bin/bash -# collect env info -bash ../../utils/pd_env_collect.sh - +ROOT_DIR=../../ +# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: +# collect env info +bash ${ROOT_DIR}/utils/pd_env_collect.sh +cat pd_env.txt -# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37 # 执行目录:需说明 -cd ** +pushd ${ROOT_DIR}/examples/aishell/s1 + # 1 安装该模型需要的依赖 (如需开启优化策略请注明) -pip install ... +pushd ${ROOT_DIR}/tools; make; popd +source ${ROOT_DIR}/tools/venv/bin/activate +pushd ${ROOT_DIR}; bash setup.sh; popd + + # 2 拷贝该模型需要数据、预训练模型 +mkdir -p exp/log +loca/data.sh &> exp/log/data.log + # 3 批量运行(如不方便批量,1,2需放到单个模型中) -model_mode_list=(MobileNetv1 MobileNetv2) -fp_item_list=(fp32 fp16) +model_mode_list=(conformer) +fp_item_list=(fp32) bs_item=(32 64 96) for model_mode in ${model_mode_list[@]}; do for fp_item in ${fp_item_list[@]}; do @@ -31,3 +40,5 @@ for model_mode in ${model_mode_list[@]}; do done done done + +popd # aishell/s1 diff --git a/tests/benchmark/run_benchmark.sh b/tests/benchmark/run_benchmark.sh index 2b9cf70f..625d3616 100644 --- a/tests/benchmark/run_benchmark.sh +++ b/tests/benchmark/run_benchmark.sh @@ -1,6 +1,7 @@ #!/bin/bash set -xe + # 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} # 参数说明 function _set_params(){ @@ -17,6 +18,7 @@ function _set_params(){ num_gpu_devices=${#arr[*]} log_file=${run_log_path}/${model_name}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices} } + function _train(){ echo "Train on ${num_gpu_devices} GPUs" echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"