parent
e5347c48ef
commit
e55b5baf66
@ -1 +1,2 @@
|
||||
* s0 for deepspeech2
|
||||
* s1 for u2
|
||||
|
@ -1,22 +0,0 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
. ${MAIN_ROOT}/utils/utility.sh
|
||||
|
||||
DIR=data/pretrain
|
||||
mkdir -p ${DIR}
|
||||
|
||||
URL='https://deepspeech.bj.bcebos.com/mandarin_models/aishell_model_fluid.tar.gz'
|
||||
MD5=2bf0cc8b6d5da2a2a787b5cc36a496b5
|
||||
TARGET=${DIR}/aishell_model_fluid.tar.gz
|
||||
|
||||
|
||||
echo "Download Aishell model ..."
|
||||
download $URL $MD5 $TARGET
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Fail to download Aishell model!"
|
||||
exit 1
|
||||
fi
|
||||
tar -zxvf $TARGET -C ${DIR}
|
||||
|
||||
|
||||
exit 0
|
@ -0,0 +1,8 @@
|
||||
[
|
||||
{
|
||||
"type": "shift",
|
||||
"params": {"min_shift_ms": -5,
|
||||
"max_shift_ms": 5},
|
||||
"prob": 1.0
|
||||
}
|
||||
]
|
@ -0,0 +1 @@
|
||||
../../s0/local/data.sh
|
@ -0,0 +1 @@
|
||||
../../s0/local/download_lm_ch.sh
|
@ -0,0 +1,20 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
if [ $# != 2 ];then
|
||||
echo "usage: export ckpt_path jit_model_path"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
python3 -u ${BIN_DIR}/export.py \
|
||||
--config conf/deepspeech2.yaml \
|
||||
--checkpoint_path ${1} \
|
||||
--export_path ${2}
|
||||
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed in evaluation!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
exit 0
|
@ -0,0 +1,21 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
# download language model
|
||||
bash local/download_lm_ch.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python3 -u ${BIN_DIR}/test.py \
|
||||
--device 'gpu' \
|
||||
--nproc 1 \
|
||||
--config conf/deepspeech2.yaml \
|
||||
--output ckpt
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed in evaluation!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
exit 0
|
@ -0,0 +1,23 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
# train model
|
||||
# if you wish to resume from an exists model, uncomment --init_from_pretrained_model
|
||||
export FLAGS_sync_nccl_allreduce=0
|
||||
|
||||
ngpu=$(echo ${CUDA_VISIBLE_DEVICES} | python -c 'import sys; a = sys.stdin.read(); print(len(a.split(",")));')
|
||||
echo "using $ngpu gpus..."
|
||||
|
||||
python3 -u ${BIN_DIR}/train.py \
|
||||
--device 'gpu' \
|
||||
--nproc ${ngpu} \
|
||||
--config conf/deepspeech2.yaml \
|
||||
--output ckpt-${1}
|
||||
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed in training!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
exit 0
|
@ -0,0 +1,14 @@
|
||||
export MAIN_ROOT=${PWD}/../../../
|
||||
|
||||
export PATH=${MAIN_ROOT}:${PWD}/tools:${PATH}
|
||||
export LC_ALL=C
|
||||
|
||||
# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
|
||||
export PYTHONIOENCODING=UTF-8
|
||||
export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
|
||||
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
|
||||
|
||||
|
||||
MODEL=u2
|
||||
export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin
|
@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
source path.sh
|
||||
# only demos
|
||||
|
||||
# prepare data
|
||||
bash ./local/data.sh
|
||||
|
||||
# train model
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh
|
||||
|
||||
# test model
|
||||
CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh
|
||||
|
||||
# infer model
|
||||
CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284
|
||||
|
||||
# export model
|
||||
bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model
|
Loading…
Reference in new issue