Merge pull request #1225 from Jackwaterveg/new_config
[ASR][Config]refactor the train and test configpull/1269/head
commit
4cab9f625b
@ -0,0 +1,10 @@
|
||||
chunk_batch_size: 32
|
||||
error_rate_type: cer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/zh_giga.no_cna_cmn.prune01244.klm
|
||||
alpha: 2.2 #1.9
|
||||
beta: 4.3
|
||||
beam_size: 300
|
||||
cutoff_prob: 0.99
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 10
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/zh_giga.no_cna_cmn.prune01244.klm
|
||||
alpha: 1.9
|
||||
beta: 5.0
|
||||
beam_size: 300
|
||||
cutoff_prob: 0.99
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 10
|
@ -0,0 +1,11 @@
|
||||
beam_size: 10
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
beam_size: 10
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: true # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,13 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
||||
|
||||
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 1.9
|
||||
beta: 0.3
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 1.9
|
||||
beta: 0.3
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: true # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 64
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 1
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 32
|
||||
error_rate_type: cer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/zh_giga.no_cna_cmn.prune01244.klm
|
||||
alpha: 2.6
|
||||
beta: 5.0
|
||||
beam_size: 300
|
||||
cutoff_prob: 0.99
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 32
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 1.4
|
||||
beta: 0.35
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 32
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 2.5
|
||||
beta: 0.3
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,25 @@
|
||||
process:
|
||||
# extract kaldi fbank from PCM
|
||||
- type: fbank_kaldi
|
||||
fs: 16000
|
||||
n_mels: 80
|
||||
n_shift: 160
|
||||
win_length: 400
|
||||
dither: 0.1
|
||||
- type: cmvn_json
|
||||
cmvn_path: data/mean_std.json
|
||||
# these three processes are a.k.a. SpecAugument
|
||||
- type: time_warp
|
||||
max_time_warp: 5
|
||||
inplace: true
|
||||
mode: PIL
|
||||
- type: freq_mask
|
||||
F: 30
|
||||
n_mask: 2
|
||||
inplace: true
|
||||
replace_with_zero: false
|
||||
- type: time_mask
|
||||
T: 40
|
||||
n_mask: 2
|
||||
inplace: true
|
||||
replace_with_zero: false
|
@ -0,0 +1,11 @@
|
||||
batch_size: 5
|
||||
error_rate_type: char-bleu
|
||||
decoding_method: fullsentence # 'fullsentence', 'simultaneous'
|
||||
beam_size: 10
|
||||
word_reward: 0.7
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,16 @@
|
||||
process:
|
||||
# these three processes are a.k.a. SpecAugument
|
||||
- type: time_warp
|
||||
max_time_warp: 5
|
||||
inplace: true
|
||||
mode: PIL
|
||||
- type: freq_mask
|
||||
F: 30
|
||||
n_mask: 2
|
||||
inplace: true
|
||||
replace_with_zero: false
|
||||
- type: time_mask
|
||||
T: 40
|
||||
n_mask: 2
|
||||
inplace: true
|
||||
replace_with_zero: false
|
@ -0,0 +1,12 @@
|
||||
|
||||
batch_size: 5
|
||||
error_rate_type: char-bleu
|
||||
decoding_method: fullsentence # 'fullsentence', 'simultaneous'
|
||||
beam_size: 10
|
||||
word_reward: 0.7
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 64
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 2.5
|
||||
beta: 0.3
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,10 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: wer
|
||||
decoding_method: ctc_beam_search
|
||||
lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm
|
||||
alpha: 2.5
|
||||
beta: 0.3
|
||||
beam_size: 500
|
||||
cutoff_prob: 1.0
|
||||
cutoff_top_n: 40
|
||||
num_proc_bsearch: 8
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 8 #64
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 8 #64
|
||||
error_rate_type: wer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
@ -0,0 +1,11 @@
|
||||
decode_batch_size: 128
|
||||
error_rate_type: cer
|
||||
decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
|
||||
beam_size: 10
|
||||
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
|
||||
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
|
||||
# <0: for decoding, use full chunk.
|
||||
# >0: for decoding, use fixed chunk size as set.
|
||||
# 0: used for training, it's prohibited here.
|
||||
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
|
||||
simulate_streaming: False # simulate streaming inference. Defaults to False.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue