pull/549/head
Hui Zhang 5 years ago
parent 29dbfa86ad
commit d4e0a4ebe4

@ -1,5 +1,9 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
echo "usage: $0 ckpt-path"
exit -1
fi
# download language model
bash local/download_lm_ch.sh

@ -1,31 +0,0 @@
#! /usr/bin/env bash
# download language model
bash local/download_lm_ch.sh
if [ $? -ne 0 ]; then
exit 1
fi
# download well-trained model
bash local/download_model.sh
if [ $? -ne 0 ]; then
exit 1
fi
# infer
CUDA_VISIBLE_DEVICES=0 \
python3 -u ${BIN_DIR}/infer.py \
--device 'gpu' \
--nproc 1 \
--config conf/deepspeech2.yaml \
--checkpoint_path data/pretrain/params.pdparams \
--opts data.mean_std_filepath data/pretrain/mean_std.npz \
--opts data.vocab_filepath data/pretrain/vocab.txt
if [ $? -ne 0 ]; then
echo "Failed in inference!"
exit 1
fi
exit 0

@ -2,7 +2,7 @@
# TODO: replace the model with a mandarin model
if [[ $# != 1 ]];then
echo "usage: server.sh checkpoint_path"
echo "usage: $1 checkpoint_path"
exit -1
fi

@ -10,7 +10,7 @@ python3 -u ${BIN_DIR}/test.py \
--device 'gpu' \
--nproc 1 \
--config conf/deepspeech2.yaml \
--checkpoint_path ${1}
--output ckpt
if [ $? -ne 0 ]; then
echo "Failed in evaluation!"

@ -1,31 +0,0 @@
#! /usr/bin/env bash
# download language model
bash local/download_lm_ch.sh
if [ $? -ne 0 ]; then
exit 1
fi
# download well-trained model
bash local/download_model.sh
if [ $? -ne 0 ]; then
exit 1
fi
# evaluate model
CUDA_VISIBLE_DEVICES=0 \
python3 -u ${BIN_DIR}/test.py \
--device 'gpu' \
--nproc 1 \
--config conf/deepspeech2.yaml \
--checkpoint_path data/pretrain/params.pdparams \
--opts data.mean_std_filepath data/pretrain/mean_std.npz \
--opts data.vocab_filepath data/pretrain/vocab.txt
if [ $? -ne 0 ]; then
echo "Failed in evaluation!"
exit 1
fi
exit 0

@ -11,7 +11,7 @@ python3 -u ${BIN_DIR}/train.py \
--device 'gpu' \
--nproc ${ngpu} \
--config conf/deepspeech2.yaml \
--output ckpt
--output ckpt-${1}
if [ $? -ne 0 ]; then

@ -10,7 +10,10 @@ bash ./local/data.sh
CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh
# test model
CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh ckpt/checkpoints/step-3284
CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh
# infer model
CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284
# export model
bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model

@ -1,5 +1,10 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
echo "usage: $0 ckpt-path"
exit -1
fi
# download language model
bash local/download_lm_en.sh
if [ $? -ne 0 ]; then

@ -7,10 +7,13 @@ source path.sh
bash ./local/data.sh
# train model
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash ./local/train.sh
CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh
# test model
CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh
# infer model
CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh
CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284
# export model
bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model

@ -1,7 +1,8 @@
# Tiny Example
1. `source path.sh`
2. `bash run.sh`
3. set `CUDA_VISIBLE_DEVICES` as you need.
2. demo scrpt is `bash run.sh`. You can run commond separately as needed.
## Steps
- Prepare the data
@ -26,11 +27,7 @@
bash local/infer.sh
```
`infer.sh` will show us some speech-to-text decoding results for several (default: 10) samples with the trained model. The performance might not be good now as the current model is only trained with a toy subset of LibriSpeech. To see the results with a better model, you can download a well-trained (trained for several days, with the complete LibriSpeech) model and do the inference:
```bash
bash local/infer_golden.sh
```
`infer.sh` will show us some speech-to-text decoding results for several (default: 10) samples with the trained model. The performance might not be good now as the current model is only trained with a toy subset of LibriSpeech. To see the results with a better model, you can download a well-trained (trained for several days, with the complete LibriSpeech) model and do the inference.
- Evaluate an existing model
@ -40,6 +37,15 @@
`test.sh` will evaluate the model with Word Error Rate (or Character Error Rate) measurement. Similarly, you can also download a well-trained model and test its performance:
- Export jit model
```bash
bash local/export.sh ckpt_path saved_jit_model_path
```
- Tune hyper paerameter
```bash
bash local/test_golden.sh
bash local/tune.sh
```

@ -1,17 +1,21 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
echo "usage: $0 ckpt-path"
exit -1
fi
# download language model
bash local/download_lm_en.sh
if [ $? -ne 0 ]; then
exit 1
fi
CUDA_VISIBLE_DEVICES=0 \
python3 -u ${BIN_DIR}/infer.py \
--device 'gpu' \
--nproc 1 \
--config conf/deepspeech2.yaml \
--output ckpt
--checkpoint_path ${1}
if [ $? -ne 0 ]; then

Loading…
Cancel
Save