From 68164dd39f92c837cfe42fac5b4b29e8f4dcb81d Mon Sep 17 00:00:00 2001 From: Jackwaterveg <87408988+Jackwaterveg@users.noreply.github.com> Date: Wed, 15 Dec 2021 17:52:03 +0800 Subject: [PATCH] [asr]rename test_hub to test_wav (#1132) * add the readme, librispeech_asr1 * fix the test_hub * test=asr --- examples/aishell/asr0/local/{test_hub.sh => test_wav.sh} | 2 +- examples/aishell/asr0/run.sh | 2 +- examples/aishell/asr1/README.md | 4 ++-- examples/aishell/asr1/local/{test_hub.sh => test_wav.sh} | 2 +- examples/aishell/asr1/run.sh | 2 +- .../librispeech/asr0/local/{test_hub.sh => test_wav.sh} | 2 +- examples/librispeech/asr0/run.sh | 2 +- examples/librispeech/asr1/README.md | 6 +++--- .../librispeech/asr1/local/{test_hub.sh => test_wav.sh} | 2 +- examples/librispeech/asr1/run.sh | 4 +--- .../s2t/exps/deepspeech2/bin/{test_hub.py => test_wav.py} | 0 11 files changed, 13 insertions(+), 15 deletions(-) rename examples/aishell/asr0/local/{test_hub.sh => test_wav.sh} (95%) rename examples/aishell/asr1/local/{test_hub.sh => test_wav.sh} (96%) rename examples/librispeech/asr0/local/{test_hub.sh => test_wav.sh} (95%) rename examples/librispeech/asr1/local/{test_hub.sh => test_wav.sh} (97%) rename paddlespeech/s2t/exps/deepspeech2/bin/{test_hub.py => test_wav.py} (100%) diff --git a/examples/aishell/asr0/local/test_hub.sh b/examples/aishell/asr0/local/test_wav.sh similarity index 95% rename from examples/aishell/asr0/local/test_hub.sh rename to examples/aishell/asr0/local/test_wav.sh index f9fc4575..4a6d92fb 100755 --- a/examples/aishell/asr0/local/test_hub.sh +++ b/examples/aishell/asr0/local/test_wav.sh @@ -30,7 +30,7 @@ if [ $? -ne 0 ]; then exit 1 fi -python3 -u ${BIN_DIR}/test_hub.py \ +python3 -u ${BIN_DIR}/test_wav.py \ --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ diff --git a/examples/aishell/asr0/run.sh b/examples/aishell/asr0/run.sh index d8ae6062..270b88fc 100755 --- a/examples/aishell/asr0/run.sh +++ b/examples/aishell/asr0/run.sh @@ -50,5 +50,5 @@ fi # Optionally, you can add LM and test it with runtime. if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} || exit -1 fi diff --git a/examples/aishell/asr1/README.md b/examples/aishell/asr1/README.md index 3ee9fa5e..e9dc6b70 100644 --- a/examples/aishell/asr1/README.md +++ b/examples/aishell/asr1/README.md @@ -323,7 +323,7 @@ In some situations, you want to use the trained model to do the inference for th ```bash if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 fi ``` @@ -341,5 +341,5 @@ wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wa You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result by running the script below. ```bash -CUDA_VISIBLE_DEVICES= ./local/test_hub.sh conf/transformer.yaml exp/transformer/checkpoints/avg_20 data/demo_01_03.wav +CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/transformer.yaml exp/transformer/checkpoints/avg_20 data/demo_01_03.wav ``` diff --git a/examples/aishell/asr1/local/test_hub.sh b/examples/aishell/asr1/local/test_wav.sh similarity index 96% rename from examples/aishell/asr1/local/test_hub.sh rename to examples/aishell/asr1/local/test_wav.sh index 900ccc4b..f85c1a47 100755 --- a/examples/aishell/asr1/local/test_hub.sh +++ b/examples/aishell/asr1/local/test_wav.sh @@ -39,7 +39,7 @@ for type in attention_rescoring; do batch_size=1 output_dir=${ckpt_prefix} mkdir -p ${output_dir} - python3 -u ${BIN_DIR}/test_hub.py \ + python3 -u ${BIN_DIR}/test_wav.py \ --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ diff --git a/examples/aishell/asr1/run.sh b/examples/aishell/asr1/run.sh index d9c0ee3e..d07a4ed5 100644 --- a/examples/aishell/asr1/run.sh +++ b/examples/aishell/asr1/run.sh @@ -43,7 +43,7 @@ fi # Optionally, you can add LM and test it with runtime. if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 fi # Not supported at now!!! diff --git a/examples/librispeech/asr0/local/test_hub.sh b/examples/librispeech/asr0/local/test_wav.sh similarity index 95% rename from examples/librispeech/asr0/local/test_hub.sh rename to examples/librispeech/asr0/local/test_wav.sh index 560d6758..e8337da7 100755 --- a/examples/librispeech/asr0/local/test_hub.sh +++ b/examples/librispeech/asr0/local/test_wav.sh @@ -30,7 +30,7 @@ if [ $? -ne 0 ]; then exit 1 fi -python3 -u ${BIN_DIR}/test_hub.py \ +python3 -u ${BIN_DIR}/test_wav.py \ --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ diff --git a/examples/librispeech/asr0/run.sh b/examples/librispeech/asr0/run.sh index 1253b409..5d811b65 100755 --- a/examples/librispeech/asr0/run.sh +++ b/examples/librispeech/asr0/run.sh @@ -43,5 +43,5 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} ${audio_file} || exit -1 fi diff --git a/examples/librispeech/asr1/README.md b/examples/librispeech/asr1/README.md index 331e2434..a2ec4f11 100644 --- a/examples/librispeech/asr1/README.md +++ b/examples/librispeech/asr1/README.md @@ -321,7 +321,7 @@ In some situations, you want to use the trained model to do the inference for th ```bash if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 fi ``` @@ -338,8 +338,8 @@ You can downloads the audio demo: wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/ ``` -You need to prepare an audio file or use the audio demo, please confirm the sample rate of the audio is 16K. You can get the result of audio demo by running the script below. +You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of audio demo by running the script below. ```bash -CUDA_VISIBLE_DEVICES= ./local/test_hub.sh conf/conformer.yaml exp/conformer/checkpoints/avg_20 data/demo_002_en.wav +CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/conformer.yaml exp/conformer/checkpoints/avg_20 data/demo_002_en.wav ``` diff --git a/examples/librispeech/asr1/local/test_hub.sh b/examples/librispeech/asr1/local/test_wav.sh similarity index 97% rename from examples/librispeech/asr1/local/test_hub.sh rename to examples/librispeech/asr1/local/test_wav.sh index ca63cf6c..ab6d685d 100755 --- a/examples/librispeech/asr1/local/test_hub.sh +++ b/examples/librispeech/asr1/local/test_wav.sh @@ -46,7 +46,7 @@ for type in attention_rescoring; do batch_size=1 output_dir=${ckpt_prefix} mkdir -p ${output_dir} - python3 -u ${BIN_DIR}/test_hub.py \ + python3 -u ${BIN_DIR}/test_wav.py \ --ngpu ${ngpu} \ --config ${config_path} \ --result_file ${output_dir}/${type}.rsl \ diff --git a/examples/librispeech/asr1/run.sh b/examples/librispeech/asr1/run.sh index d4e3d34f..ff1d684b 100755 --- a/examples/librispeech/asr1/run.sh +++ b/examples/librispeech/asr1/run.sh @@ -44,12 +44,10 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 6 ]; then # test a single .wav file - CUDA_VISIBLE_DEVICES=0 ./local/test_hub.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1 fi if [ ${stage} -le 51 ] && [ ${stop_stage} -ge 51 ]; then # export ckpt avg_n CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit fi - - diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py similarity index 100% rename from paddlespeech/s2t/exps/deepspeech2/bin/test_hub.py rename to paddlespeech/s2t/exps/deepspeech2/bin/test_wav.py