From d4e0a4ebe44aa9192f838f738aa1121828ab504b Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 11 Mar 2021 12:23:42 +0000 Subject: [PATCH] fix egs --- examples/aishell/local/infer.sh | 4 ++++ examples/aishell/local/infer_golden.sh | 31 -------------------------- examples/aishell/local/server.sh | 2 +- examples/aishell/local/test.sh | 2 +- examples/aishell/local/test_golden.sh | 31 -------------------------- examples/aishell/local/train.sh | 2 +- examples/aishell/run.sh | 5 ++++- examples/librispeech/local/infer.sh | 5 +++++ examples/librispeech/run.sh | 7 ++++-- examples/tiny/README.md | 20 +++++++++++------ examples/tiny/local/infer.sh | 8 +++++-- 11 files changed, 40 insertions(+), 77 deletions(-) delete mode 100644 examples/aishell/local/infer_golden.sh delete mode 100644 examples/aishell/local/test_golden.sh diff --git a/examples/aishell/local/infer.sh b/examples/aishell/local/infer.sh index 4b4c9381b..41ccabf80 100644 --- a/examples/aishell/local/infer.sh +++ b/examples/aishell/local/infer.sh @@ -1,5 +1,9 @@ #! /usr/bin/env bash +if [[ $# != 1 ]]; + echo "usage: $0 ckpt-path" + exit -1 +fi # download language model bash local/download_lm_ch.sh diff --git a/examples/aishell/local/infer_golden.sh b/examples/aishell/local/infer_golden.sh deleted file mode 100644 index 3fdcd1b5e..000000000 --- a/examples/aishell/local/infer_golden.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env bash - -# download language model -bash local/download_lm_ch.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# download well-trained model -bash local/download_model.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# infer -CUDA_VISIBLE_DEVICES=0 \ -python3 -u ${BIN_DIR}/infer.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---checkpoint_path data/pretrain/params.pdparams \ ---opts data.mean_std_filepath data/pretrain/mean_std.npz \ ---opts data.vocab_filepath data/pretrain/vocab.txt - -if [ $? -ne 0 ]; then - echo "Failed in inference!" - exit 1 -fi - - -exit 0 diff --git a/examples/aishell/local/server.sh b/examples/aishell/local/server.sh index 379684075..1cf069ebd 100644 --- a/examples/aishell/local/server.sh +++ b/examples/aishell/local/server.sh @@ -2,7 +2,7 @@ # TODO: replace the model with a mandarin model if [[ $# != 1 ]];then - echo "usage: server.sh checkpoint_path" + echo "usage: $1 checkpoint_path" exit -1 fi diff --git a/examples/aishell/local/test.sh b/examples/aishell/local/test.sh index 74015f5d5..0872ff21e 100644 --- a/examples/aishell/local/test.sh +++ b/examples/aishell/local/test.sh @@ -10,7 +10,7 @@ python3 -u ${BIN_DIR}/test.py \ --device 'gpu' \ --nproc 1 \ --config conf/deepspeech2.yaml \ ---checkpoint_path ${1} +--output ckpt if [ $? -ne 0 ]; then echo "Failed in evaluation!" diff --git a/examples/aishell/local/test_golden.sh b/examples/aishell/local/test_golden.sh deleted file mode 100644 index 86abd38cb..000000000 --- a/examples/aishell/local/test_golden.sh +++ /dev/null @@ -1,31 +0,0 @@ -#! /usr/bin/env bash - -# download language model -bash local/download_lm_ch.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# download well-trained model -bash local/download_model.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# evaluate model -CUDA_VISIBLE_DEVICES=0 \ -python3 -u ${BIN_DIR}/test.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---checkpoint_path data/pretrain/params.pdparams \ ---opts data.mean_std_filepath data/pretrain/mean_std.npz \ ---opts data.vocab_filepath data/pretrain/vocab.txt - -if [ $? -ne 0 ]; then - echo "Failed in evaluation!" - exit 1 -fi - - -exit 0 diff --git a/examples/aishell/local/train.sh b/examples/aishell/local/train.sh index 3e13a79e3..c286566a8 100644 --- a/examples/aishell/local/train.sh +++ b/examples/aishell/local/train.sh @@ -11,7 +11,7 @@ python3 -u ${BIN_DIR}/train.py \ --device 'gpu' \ --nproc ${ngpu} \ --config conf/deepspeech2.yaml \ ---output ckpt +--output ckpt-${1} if [ $? -ne 0 ]; then diff --git a/examples/aishell/run.sh b/examples/aishell/run.sh index dc762df99..8beb6bf0f 100644 --- a/examples/aishell/run.sh +++ b/examples/aishell/run.sh @@ -10,7 +10,10 @@ bash ./local/data.sh CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh # test model -CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh ckpt/checkpoints/step-3284 +CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh # infer model CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284 + +# export model +bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model \ No newline at end of file diff --git a/examples/librispeech/local/infer.sh b/examples/librispeech/local/infer.sh index 9ea39901f..6fc8d39fc 100644 --- a/examples/librispeech/local/infer.sh +++ b/examples/librispeech/local/infer.sh @@ -1,5 +1,10 @@ #! /usr/bin/env bash +if [[ $# != 1 ]]; + echo "usage: $0 ckpt-path" + exit -1 +fi + # download language model bash local/download_lm_en.sh if [ $? -ne 0 ]; then diff --git a/examples/librispeech/run.sh b/examples/librispeech/run.sh index ff87d38bf..cf0f41edb 100644 --- a/examples/librispeech/run.sh +++ b/examples/librispeech/run.sh @@ -7,10 +7,13 @@ source path.sh bash ./local/data.sh # train model -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash ./local/train.sh +CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh # test model CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh # infer model -CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh +CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284 + +# export model +bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model \ No newline at end of file diff --git a/examples/tiny/README.md b/examples/tiny/README.md index c3bfdc9c4..0f96864c0 100644 --- a/examples/tiny/README.md +++ b/examples/tiny/README.md @@ -1,7 +1,8 @@ # Tiny Example 1. `source path.sh` -2. `bash run.sh` +3. set `CUDA_VISIBLE_DEVICES` as you need. +2. demo scrpt is `bash run.sh`. You can run commond separately as needed. ## Steps - Prepare the data @@ -26,11 +27,7 @@ bash local/infer.sh ``` - `infer.sh` will show us some speech-to-text decoding results for several (default: 10) samples with the trained model. The performance might not be good now as the current model is only trained with a toy subset of LibriSpeech. To see the results with a better model, you can download a well-trained (trained for several days, with the complete LibriSpeech) model and do the inference: - - ```bash - bash local/infer_golden.sh - ``` + `infer.sh` will show us some speech-to-text decoding results for several (default: 10) samples with the trained model. The performance might not be good now as the current model is only trained with a toy subset of LibriSpeech. To see the results with a better model, you can download a well-trained (trained for several days, with the complete LibriSpeech) model and do the inference. - Evaluate an existing model @@ -40,6 +37,15 @@ `test.sh` will evaluate the model with Word Error Rate (or Character Error Rate) measurement. Similarly, you can also download a well-trained model and test its performance: + +- Export jit model + + ```bash + bash local/export.sh ckpt_path saved_jit_model_path + ``` + +- Tune hyper paerameter + ```bash - bash local/test_golden.sh + bash local/tune.sh ``` diff --git a/examples/tiny/local/infer.sh b/examples/tiny/local/infer.sh index 3aff6b78b..1243c0d08 100644 --- a/examples/tiny/local/infer.sh +++ b/examples/tiny/local/infer.sh @@ -1,17 +1,21 @@ #! /usr/bin/env bash +if [[ $# != 1 ]]; + echo "usage: $0 ckpt-path" + exit -1 +fi + # download language model bash local/download_lm_en.sh if [ $? -ne 0 ]; then exit 1 fi -CUDA_VISIBLE_DEVICES=0 \ python3 -u ${BIN_DIR}/infer.py \ --device 'gpu' \ --nproc 1 \ --config conf/deepspeech2.yaml \ ---output ckpt +--checkpoint_path ${1} if [ $? -ne 0 ]; then