Set process daemon property and reset default value of num_proc_data arguments.

pull/2/head
Xinghai Sun 7 years ago
parent 64ab19c165
commit be1fbc68a1

@ -133,6 +133,7 @@ def xmap_readers_mp(mapper, reader, process_num, buffer_size, order=False):
# start a read worker in a process # start a read worker in a process
target = order_read_worker if order else read_worker target = order_read_worker if order else read_worker
p = Process(target=target, args=(reader, in_queue)) p = Process(target=target, args=(reader, in_queue))
p.daemon = True
p.start() p.start()
# start handle_workers with multiple processes # start handle_workers with multiple processes
@ -143,6 +144,7 @@ def xmap_readers_mp(mapper, reader, process_num, buffer_size, order=False):
Process(target=target, args=args) for _ in xrange(process_num) Process(target=target, args=args) for _ in xrange(process_num)
] ]
for w in workers: for w in workers:
w.daemon = True
w.start() w.start()
# get results # get results

@ -9,7 +9,7 @@ python -u train.py \
--batch_size=64 \ --batch_size=64 \
--trainer_count=8 \ --trainer_count=8 \
--num_passes=50 \ --num_passes=50 \
--num_proc_data=12 \ --num_proc_data=8 \
--num_conv_layers=2 \ --num_conv_layers=2 \
--num_rnn_layers=3 \ --num_rnn_layers=3 \
--rnn_layer_size=1024 \ --rnn_layer_size=1024 \

@ -9,7 +9,7 @@ python -u train.py \
--batch_size=512 \ --batch_size=512 \
--trainer_count=8 \ --trainer_count=8 \
--num_passes=50 \ --num_passes=50 \
--num_proc_data=12 \ --num_proc_data=8 \
--num_conv_layers=2 \ --num_conv_layers=2 \
--num_rnn_layers=3 \ --num_rnn_layers=3 \
--rnn_layer_size=2048 \ --rnn_layer_size=2048 \

@ -18,7 +18,7 @@ add_arg('batch_size', int, 128, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).") add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.") add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.") add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
add_arg('num_proc_data', int, 12, "# of CPUs for data preprocessing.") add_arg('num_proc_data', int, 4, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.") add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.") add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.") add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -16,7 +16,7 @@ add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 256, "Minibatch size.") add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).") add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('num_passes', int, 200, "# of training epochs.") add_arg('num_passes', int, 200, "# of training epochs.")
add_arg('num_proc_data', int, 12, "# of CPUs for data preprocessing.") add_arg('num_proc_data', int, 8, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.") add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.") add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.") add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

Loading…
Cancel
Save