Merge pull request #1844 from zh794390558/doc

[asr] add asr1 config comment
pull/1848/head
Hui Zhang 3 years ago committed by GitHub
commit 30a6304607
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -10,7 +10,7 @@ encoder_conf:
attention_heads: 4 attention_heads: 4
linear_units: 2048 # the number of units of position-wise feed forward linear_units: 2048 # the number of units of position-wise feed forward
num_blocks: 12 # the number of encoder blocks num_blocks: 12 # the number of encoder blocks
dropout_rate: 0.1 dropout_rate: 0.1 # sublayer output dropout
positional_dropout_rate: 0.1 positional_dropout_rate: 0.1
attention_dropout_rate: 0.0 attention_dropout_rate: 0.0
input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
@ -30,7 +30,7 @@ decoder_conf:
attention_heads: 4 attention_heads: 4
linear_units: 2048 linear_units: 2048
num_blocks: 6 num_blocks: 6
dropout_rate: 0.1 dropout_rate: 0.1 # sublayer output dropout
positional_dropout_rate: 0.1 positional_dropout_rate: 0.1
self_attention_dropout_rate: 0.0 self_attention_dropout_rate: 0.0
src_attention_dropout_rate: 0.0 src_attention_dropout_rate: 0.0
@ -39,7 +39,7 @@ model_conf:
ctc_weight: 0.3 ctc_weight: 0.3
lsm_weight: 0.1 # label smoothing option lsm_weight: 0.1 # label smoothing option
length_normalized_loss: false length_normalized_loss: false
init_type: 'kaiming_uniform' init_type: 'kaiming_uniform' # !Warning: need to convergence
########################################### ###########################################
# Data # # Data #

@ -37,7 +37,7 @@ model_conf:
ctc_weight: 0.3 ctc_weight: 0.3
lsm_weight: 0.1 # label smoothing option lsm_weight: 0.1 # label smoothing option
length_normalized_loss: false length_normalized_loss: false
init_type: 'kaiming_uniform' init_type: 'kaiming_uniform' # !Warning: need to convergence
########################################### ###########################################
# Data # # Data #

@ -10,7 +10,7 @@ encoder_conf:
attention_heads: 4 attention_heads: 4
linear_units: 2048 # the number of units of position-wise feed forward linear_units: 2048 # the number of units of position-wise feed forward
num_blocks: 12 # the number of encoder blocks num_blocks: 12 # the number of encoder blocks
dropout_rate: 0.1 dropout_rate: 0.1 # sublayer output dropout
positional_dropout_rate: 0.1 positional_dropout_rate: 0.1
attention_dropout_rate: 0.0 attention_dropout_rate: 0.0
input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
@ -21,7 +21,7 @@ decoder_conf:
attention_heads: 4 attention_heads: 4
linear_units: 2048 linear_units: 2048
num_blocks: 6 num_blocks: 6
dropout_rate: 0.1 dropout_rate: 0.1 # sublayer output dropout
positional_dropout_rate: 0.1 positional_dropout_rate: 0.1
self_attention_dropout_rate: 0.0 self_attention_dropout_rate: 0.0
src_attention_dropout_rate: 0.0 src_attention_dropout_rate: 0.0

Loading…
Cancel
Save