pull/552/head
Hui Zhang 5 years ago
parent 4c8c2178af
commit e2118016a0

@ -62,14 +62,15 @@ class CTCLoss(nn.Layer):
"""Compute CTC loss.
Args:
logits ([paddle.Tensor]): [description]
ys_pad ([paddle.Tensor]): [description]
hlens ([paddle.Tensor]): [description]
ys_lens ([paddle.Tensor]): [description]
logits ([paddle.Tensor]): [B, Tmax, D]
ys_pad ([paddle.Tensor]): [B, Tmax]
hlens ([paddle.Tensor]): [B]
ys_lens ([paddle.Tensor]): [B]
Returns:
[paddle.Tensor]: scalar. If reduction is 'none', then (N), where N = \text{batch size}.
"""
B = paddle.shape(logits)[0]
# warp-ctc need logits, and do softmax on logits by itself
# warp-ctc need activation with shape [T, B, V + 1]
# logits: (B, L, D) -> (L, B, D)
@ -78,5 +79,5 @@ class CTCLoss(nn.Layer):
# wenet do batch-size average, deepspeech2 not do this
# Batch-size average
# loss = loss / paddle.shape(logits)[1]
# loss = loss / B
return loss

@ -2,3 +2,4 @@ data
ckpt*
demo_cache
*.log
log

@ -1,6 +1,6 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
if [[ $# != 1 ]]; then
echo "usage: $0 ckpt-path"
exit -1
fi

@ -7,7 +7,7 @@ source path.sh
bash ./local/data.sh
# train model
CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh
CUDA_VISIBLE_DEVICES=0,1,2,3 bash ./local/train.sh baseline
# test model
CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh
@ -16,4 +16,4 @@ CUDA_VISIBLE_DEVICES=0 bash ./local/test.sh
CUDA_VISIBLE_DEVICES=0 bash ./local/infer.sh ckpt/checkpoints/step-3284
# export model
bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model
bash ./local/export.sh ckpt/checkpoints/step-3284 jit.model

@ -1,6 +1,6 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
if [[ $# != 1 ]];then
echo "usage: $0 ckpt-path"
exit -1
fi

@ -11,7 +11,7 @@ python3 -u ${BIN_DIR}/train.py \
--device 'gpu' \
--nproc ${ngpu} \
--config conf/deepspeech2.yaml \
--output ckpt
--output ckpt-${1}
if [ $? -ne 0 ]; then
echo "Failed in training!"

@ -1,6 +1,6 @@
#! /usr/bin/env bash
if [[ $# != 1 ]];
if [[ $# != 1 ]];then
echo "usage: $0 ckpt-path"
exit -1
fi

@ -6,7 +6,6 @@ if [ $? -ne 0 ]; then
exit 1
fi
CUDA_VISIBLE_DEVICES=0 \
python3 -u ${BIN_DIR}/test.py \
--device 'gpu' \
--nproc 1 \

@ -2,7 +2,6 @@
export FLAGS_sync_nccl_allreduce=0
CUDA_VISIBLE_DEVICES=0 \
python3 -u ${BIN_DIR}/train.py \
--device 'gpu' \
--nproc 1 \

Loading…
Cancel
Save