num_conv_layers=2,#Number of stacking convolution layers.
num_rnn_layers=3,#Number of stacking RNN layers.
rnn_layer_size=1024,#RNN layer size (number of RNN cells).
use_gru=True,#Use gru if set True. Use simple rnn if set False.
share_rnn_weights=True#Whether to share input-hidden weights between forward and backward directional RNNs.Notice that for GRU, weight sharing is not supported.
))
_C.data=ManifestDataset.params()
_C.collator=SpeechCollator.params()
DeepSpeech2Model.params(_C.model)
_C.model=DeepSpeech2Model.params()
_C.training=CN(
dict(
lr=5e-4,# learning rate
lr_decay=1.0,# learning rate decay
weight_decay=1e-6,# the coeff of weight decay
global_grad_clip=5.0,# the global norm clip
n_epoch=50,# train epochs
))
_C.training=DeepSpeech2Trainer.params()
_C.decoding=CN(
dict(
alpha=2.5,# Coef of LM for beam search.
beta=0.3,# Coef of WC for beam search.
cutoff_prob=1.0,# Cutoff probability for pruning.
cutoff_top_n=40,# Cutoff number for pruning.
lang_model_path='models/lm/common_crawl_00.prune01111.trie.klm',# Filepath for language model.