Merge pull request #363 from xinghai-sun/default_value

Reset default value of batch_size, num_proc_data and fix an invalid url for DS2.
pull/2/head
Xinghai Sun 7 years ago committed by GitHub
commit 9ac15f8bd8

@ -18,7 +18,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=300 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=1024 \

@ -27,7 +27,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=300 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=1024 \

@ -9,7 +9,7 @@ python -u train.py \
--batch_size=64 \
--trainer_count=8 \
--num_passes=50 \
--num_proc_data=8 \
--num_proc_data=16 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=1024 \

@ -18,7 +18,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=500 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=2048 \

@ -27,7 +27,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=500 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=2048 \

@ -6,10 +6,10 @@ cd ../.. > /dev/null
# if you wish to resume from an exists model, uncomment --init_model_path
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
python -u train.py \
--batch_size=512 \
--batch_size=160 \
--trainer_count=8 \
--num_passes=50 \
--num_proc_data=8 \
--num_proc_data=16 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=2048 \

@ -6,7 +6,7 @@ cd ../.. > /dev/null
CUDA_VISIBLE_DEVICES=0,1,2,3 \
python -u tools/tune.py \
--num_batches=-1 \
--batch_size=256 \
--batch_size=128 \
--trainer_count=8 \
--beam_size=500 \
--num_proc_bsearch=12 \

@ -18,7 +18,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=500 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=2048 \

@ -27,7 +27,7 @@ python -u test.py \
--trainer_count=8 \
--beam_size=500 \
--num_proc_bsearch=8 \
--num_proc_data=4 \
--num_proc_data=8 \
--num_conv_layers=2 \
--num_rnn_layers=3 \
--rnn_layer_size=2048 \

@ -17,7 +17,7 @@ add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('num_samples', int, 10, "# of samples to infer.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -2,7 +2,7 @@
. ../../utils/utility.sh
URL='http://cloud.dlnel.org/filepub/?uuid=8e3cf742-2ff3-41ce-a49d-f6158cc06a23'
URL='http://cloud.dlnel.org/filepub/?uuid=6020a634-5399-4423-b021-c5ed32680fff'
MD5=2ef08f8b608a7c555592161fc14d81a6
TARGET=./librispeech_model.tar.gz

@ -17,8 +17,8 @@ add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 128, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
add_arg('num_proc_data', int, 4, "# of CPUs for data preprocessing.")
add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -27,7 +27,8 @@ add_arg('num_batches', int, -1, "# of batches tuning on. "
add_arg('batch_size', int, 256, "# of samples per batch.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
@ -86,7 +87,7 @@ def tune():
mean_std_filepath=args.mean_std_path,
augmentation_config='{}',
specgram_type=args.specgram_type,
num_threads=1)
num_threads=args.num_proc_data)
audio_data = paddle.layer.data(
name="audio_spectrogram",

@ -16,7 +16,7 @@ add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('num_passes', int, 200, "# of training epochs.")
add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
add_arg('num_proc_data', int, 16, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

Loading…
Cancel
Save