diff --git a/deepspeech/exps/u2/bin/test.py b/deepspeech/exps/u2/bin/test.py index a80106f74..4ab1c647e 100644 --- a/deepspeech/exps/u2/bin/test.py +++ b/deepspeech/exps/u2/bin/test.py @@ -53,4 +53,4 @@ if __name__ == "__main__": # Setting for profiling pr = cProfile.Profile() pr.runcall(main, config, args) - pr.dump_stats(os.path.join('.', 'test.profile')) + pr.dump_stats(os.path.join(args.output, 'train.profile')) diff --git a/deepspeech/exps/u2/bin/train.py b/deepspeech/exps/u2/bin/train.py index f23716781..9dd0041dd 100644 --- a/deepspeech/exps/u2/bin/train.py +++ b/deepspeech/exps/u2/bin/train.py @@ -56,4 +56,4 @@ if __name__ == "__main__": # Setting for profiling pr = cProfile.Profile() pr.runcall(main, config, args) - pr.dump_stats(os.path.join('.', 'train.profile')) + pr.dump_stats(os.path.join(args.output, 'train.profile')) diff --git a/deepspeech/utils/layer_tools.py b/deepspeech/utils/layer_tools.py index 429391874..67f3c9396 100644 --- a/deepspeech/utils/layer_tools.py +++ b/deepspeech/utils/layer_tools.py @@ -33,7 +33,7 @@ def summary(layer: nn.Layer, print_func=print): if print_func: num_elements = num_elements / 1024**2 print_func( - f"Total parameters: {num_params}, {num_elements:.4f}M elements.") + f"Total parameters: {num_params}, {num_elements:.2f} M elements.") def print_grads(model, print_func=print): @@ -57,7 +57,7 @@ def print_params(model, print_func=print): print_func(msg) if print_func: total = total / 1024**2 - print_func(f"Total parameters: {num_params}, {total:.4f}M elements.") + print_func(f"Total parameters: {num_params}, {total:.2f} M elements.") def gradient_norm(layer: nn.Layer): diff --git a/examples/tiny/s1/local/export.sh b/examples/tiny/s1/local/export.sh index b83a13a98..fb0c3cfae 100755 --- a/examples/tiny/s1/local/export.sh +++ b/examples/tiny/s1/local/export.sh @@ -5,14 +5,24 @@ if [ $# != 3 ];then exit -1 fi +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + config_path=$1 ckpt_path_prefix=$2 jit_model_export_path=$3 +device=gpu +if [ ngpu != 0 ];then + device=cpu +fi + python3 -u ${BIN_DIR}/export.py \ +--device ${device} \ +--nproc ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ ---export_path ${jit_model_export_path} +--export_path ${jit_model_export_path} if [ $? -ne 0 ]; then diff --git a/examples/tiny/s1/local/train.sh b/examples/tiny/s1/local/train.sh index 3ed533808..47645d4b5 100755 --- a/examples/tiny/s1/local/train.sh +++ b/examples/tiny/s1/local/train.sh @@ -10,6 +10,7 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_name=$2 + device=gpu if [ ngpu != 0 ];then device=cpu diff --git a/examples/tiny/s1/run.sh b/examples/tiny/s1/run.sh index 3b0da66ca..71cd45349 100644 --- a/examples/tiny/s1/run.sh +++ b/examples/tiny/s1/run.sh @@ -5,16 +5,16 @@ source path.sh source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; # prepare data -bash ./local/data.sh +bash ./local/data.sh || exit -1 # train model, all `ckpt` under `exp` dir -CUDA_VISIBLE_DEVICES=0 ./local/train.sh conf/conformer.yaml test - -# test ckpt 1 -CUDA_VISIBLE_DEVICES=0 ./local/test.sh conf/conformer.yaml exp/test/checkpoints/1 +CUDA_VISIBLE_DEVICES=0 ./local/train.sh conf/conformer.yaml test || exit -1 # avg 1 best model ./local/avg.sh exp/test/checkpoints 1 +# test ckpt 1 +CUDA_VISIBLE_DEVICES=0 ./local/test.sh conf/conformer.yaml exp/test/checkpoints/avg_1 || exit -1 + # export ckpt 1 -./local/export.sh conf/conformer.yaml exp/test/checkpoints/1 exp/test/checkpoints/1.jit.model \ No newline at end of file +CUDA_VISIBLE_DEVICES= ./local/export.sh conf/conformer.yaml exp/test/checkpoints/avg_1 exp/test/checkpoints/avg_1.jit.model diff --git a/examples/tiny/s1/train.profile b/examples/tiny/s1/train.profile deleted file mode 100644 index 08a9a8684..000000000 Binary files a/examples/tiny/s1/train.profile and /dev/null differ