|
|
@ -17,27 +17,27 @@ from utils.utility import add_arguments, print_arguments
|
|
|
|
parser = argparse.ArgumentParser(description=__doc__)
|
|
|
|
parser = argparse.ArgumentParser(description=__doc__)
|
|
|
|
add_arg = functools.partial(add_arguments, argparser=parser)
|
|
|
|
add_arg = functools.partial(add_arguments, argparser=parser)
|
|
|
|
# yapf: disable
|
|
|
|
# yapf: disable
|
|
|
|
add_arg('num_batches', int, -1, "# of batches tuning on. "
|
|
|
|
add_arg('num_batches', int, -1, "# of batches tuning on. "
|
|
|
|
"Default -1, on whole dev set.")
|
|
|
|
"Default -1, on whole dev set.")
|
|
|
|
add_arg('batch_size', int, 256, "# of samples per batch.")
|
|
|
|
add_arg('batch_size', int, 256, "# of samples per batch.")
|
|
|
|
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
|
|
|
|
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
|
|
|
|
add_arg('beam_size', int, 500, "Beam search width.")
|
|
|
|
add_arg('beam_size', int, 500, "Beam search width.")
|
|
|
|
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
|
|
|
|
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
|
|
|
|
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
|
|
|
|
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
|
|
|
|
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
|
|
|
|
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
|
|
|
|
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
|
|
|
|
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
|
|
|
|
add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.")
|
|
|
|
add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.")
|
|
|
|
add_arg('num_betas', int, 8, "# of beta candidates for tuning.")
|
|
|
|
add_arg('num_betas', int, 8, "# of beta candidates for tuning.")
|
|
|
|
add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.")
|
|
|
|
add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.")
|
|
|
|
add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.")
|
|
|
|
add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.")
|
|
|
|
add_arg('beta_from', float, 0.1, "Where beta starts tuning from.")
|
|
|
|
add_arg('beta_from', float, 0.1, "Where beta starts tuning from.")
|
|
|
|
add_arg('beta_to', float, 0.45, "Where beta ends tuning with.")
|
|
|
|
add_arg('beta_to', float, 0.45, "Where beta ends tuning with.")
|
|
|
|
add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
|
|
|
|
add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
|
|
|
|
add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
|
|
|
|
add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
|
|
|
|
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
|
|
|
|
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
|
|
|
|
add_arg('use_gpu', bool, True, "Use GPU or not.")
|
|
|
|
add_arg('use_gpu', bool, True, "Use GPU or not.")
|
|
|
|
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
|
|
|
|
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
|
|
|
|
"bi-directional RNNs. Not for GRU.")
|
|
|
|
"bi-directional RNNs. Not for GRU.")
|
|
|
|
add_arg('tune_manifest', str,
|
|
|
|
add_arg('tune_manifest', str,
|
|
|
|
'data/librispeech/manifest.dev-clean',
|
|
|
|
'data/librispeech/manifest.dev-clean',
|
|
|
|
"Filepath of manifest to tune.")
|
|
|
|
"Filepath of manifest to tune.")
|
|
|
@ -140,13 +140,11 @@ def tune():
|
|
|
|
for target, result in zip(target_transcripts, result_transcripts):
|
|
|
|
for target, result in zip(target_transcripts, result_transcripts):
|
|
|
|
err_sum[index] += error_rate_func(target, result)
|
|
|
|
err_sum[index] += error_rate_func(target, result)
|
|
|
|
err_ave[index] = err_sum[index] / num_ins
|
|
|
|
err_ave[index] = err_sum[index] / num_ins
|
|
|
|
# print("alpha = %f, beta = %f, WER = %f" %
|
|
|
|
|
|
|
|
# (alpha, beta, err_ave[index]))
|
|
|
|
|
|
|
|
if index % 2 == 0:
|
|
|
|
if index % 2 == 0:
|
|
|
|
sys.stdout.write('.')
|
|
|
|
sys.stdout.write('.')
|
|
|
|
sys.stdout.flush()
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
|
|
|
|
|
|
|
# output on-line tuning result at the the end of current batch
|
|
|
|
# output on-line tuning result at the end of current batch
|
|
|
|
err_ave_min = min(err_ave)
|
|
|
|
err_ave_min = min(err_ave)
|
|
|
|
min_index = err_ave.index(err_ave_min)
|
|
|
|
min_index = err_ave.index(err_ave_min)
|
|
|
|
print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
|
|
|
|
print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
|
|
|
@ -156,7 +154,7 @@ def tune():
|
|
|
|
args.error_rate_type, err_ave_min))
|
|
|
|
args.error_rate_type, err_ave_min))
|
|
|
|
cur_batch += 1
|
|
|
|
cur_batch += 1
|
|
|
|
|
|
|
|
|
|
|
|
# output WER/CER at every point
|
|
|
|
# output WER/CER at every (alpha, beta)
|
|
|
|
print("\nFinal %s:\n" % args.error_rate_type)
|
|
|
|
print("\nFinal %s:\n" % args.error_rate_type)
|
|
|
|
for index in xrange(len(params_grid)):
|
|
|
|
for index in xrange(len(params_grid)):
|
|
|
|
print("(alpha, beta) = (%s, %s), [%s] = %f"
|
|
|
|
print("(alpha, beta) = (%s, %s), [%s] = %f"
|
|
|
|