From 9abf03bb6b5dbf8650c0da25bba196f0e147c3a2 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Sun, 26 Sep 2021 03:26:33 +0000 Subject: [PATCH] fix libri s1 transformer config --- examples/librispeech/s1/conf/transformer.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/librispeech/s1/conf/transformer.yaml b/examples/librispeech/s1/conf/transformer.yaml index fe9cab06..e4a06767 100644 --- a/examples/librispeech/s1/conf/transformer.yaml +++ b/examples/librispeech/s1/conf/transformer.yaml @@ -8,7 +8,7 @@ data: min_output_len: 0.0 # tokens max_output_len: 400.0 # tokens min_output_input_ratio: 0.05 - max_output_input_ratio: 10.0 + max_output_input_ratio: 100.0 collator: vocab_filepath: data/vocab.txt @@ -16,7 +16,7 @@ collator: spm_model_prefix: 'data/bpe_unigram_5000' mean_std_filepath: "" augmentation_config: conf/augmentation.json - batch_size: 64 + batch_size: 32 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -75,7 +75,7 @@ model: training: n_epoch: 120 - accum_grad: 2 + accum_grad: 4 global_grad_clip: 5.0 optim: adam optim_conf: