|
|
|
# https://yaml.org/type/float.html
|
|
|
|
###########################################
|
|
|
|
# Data #
|
|
|
|
###########################################
|
|
|
|
train_manifest: data/manifest.train.tiny
|
|
|
|
dev_manifest: data/manifest.dev
|
|
|
|
test_manifest: data/manifest.test
|
|
|
|
min_input_len: 5.0 # frame
|
|
|
|
max_input_len: 3000.0 # frame
|
|
|
|
min_output_len: 0.0 # tokens
|
|
|
|
max_output_len: 400.0 # tokens
|
|
|
|
min_output_input_ratio: 0.01
|
|
|
|
max_output_input_ratio: 20.0
|
|
|
|
|
|
|
|
###########################################
|
|
|
|
# Dataloader #
|
|
|
|
###########################################
|
|
|
|
vocab_filepath: data/lang_char/vocab.txt
|
|
|
|
unit_type: 'spm'
|
|
|
|
spm_model_prefix: data/lang_char/bpe_unigram_8000
|
|
|
|
mean_std_filepath: ""
|
|
|
|
# augmentation_config: conf/augmentation.json
|
|
|
|
batch_size: 10
|
|
|
|
raw_wav: True # use raw_wav or kaldi feature
|
|
|
|
spectrum_type: fbank #linear, mfcc, fbank
|
|
|
|
feat_dim: 83
|
|
|
|
delta_delta: False
|
|
|
|
dither: 1.0
|
|
|
|
target_sample_rate: 16000
|
|
|
|
max_freq: None
|
|
|
|
n_fft: None
|
|
|
|
stride_ms: 10.0
|
|
|
|
window_ms: 25.0
|
|
|
|
use_dB_normalization: True
|
|
|
|
target_dB: -20
|
|
|
|
random_seed: 0
|
|
|
|
keep_transcription_text: False
|
|
|
|
sortagrad: True
|
|
|
|
shuffle_method: batch_shuffle
|
|
|
|
num_workers: 2
|
|
|
|
|
|
|
|
|
|
|
|
############################################
|
|
|
|
# Network Architecture #
|
|
|
|
############################################
|
|
|
|
cmvn_file: None
|
|
|
|
cmvn_file_type: "json"
|
|
|
|
# encoder related
|
|
|
|
encoder: transformer
|
|
|
|
encoder_conf:
|
|
|
|
output_size: 256 # dimension of attention
|
|
|
|
attention_heads: 4
|
|
|
|
linear_units: 2048 # the number of units of position-wise feed forward
|
|
|
|
num_blocks: 12 # the number of encoder blocks
|
|
|
|
dropout_rate: 0.1
|
|
|
|
positional_dropout_rate: 0.1
|
|
|
|
attention_dropout_rate: 0.0
|
|
|
|
input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
|
|
|
|
normalize_before: true
|
|
|
|
|
|
|
|
# decoder related
|
|
|
|
decoder: transformer
|
|
|
|
decoder_conf:
|
|
|
|
attention_heads: 4
|
|
|
|
linear_units: 2048
|
|
|
|
num_blocks: 6
|
|
|
|
dropout_rate: 0.1
|
|
|
|
positional_dropout_rate: 0.1
|
|
|
|
self_attention_dropout_rate: 0.0
|
|
|
|
src_attention_dropout_rate: 0.0
|
|
|
|
|
|
|
|
# hybrid CTC/attention
|
|
|
|
model_conf:
|
|
|
|
asr_weight: 0.0
|
|
|
|
ctc_weight: 0.0
|
|
|
|
lsm_weight: 0.1 # label smoothing option
|
|
|
|
length_normalized_loss: false
|
|
|
|
|
|
|
|
|
|
|
|
###########################################
|
|
|
|
# Training #
|
|
|
|
###########################################
|
|
|
|
n_epoch: 20
|
|
|
|
accum_grad: 2
|
|
|
|
global_grad_clip: 5.0
|
|
|
|
optim: adam
|
|
|
|
optim_conf:
|
|
|
|
lr: 0.004
|
|
|
|
weight_decay: 1.0e-06
|
|
|
|
scheduler: warmuplr
|
|
|
|
scheduler_conf:
|
|
|
|
warmup_steps: 25000
|
|
|
|
lr_decay: 1.0
|
|
|
|
log_interval: 5
|
|
|
|
checkpoint:
|
|
|
|
kbest_n: 50
|
|
|
|
latest_n: 5
|