Re-style the config codes for tools in DS2.

pull/2/head
Xinghai Sun 7 years ago
parent 792129166a
commit 8b64ef29c8

@ -13,8 +13,6 @@ from data_utils.data import DataGenerator
from model import DeepSpeech2Model
from data_utils.utils import read_manifest
parser = argparse.ArgumentParser(description=__doc__)
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
@ -27,6 +25,7 @@ def add_arg(argname, type, default, help, **kwargs):
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg('host_port', int, 8086, "Server's IP port.")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")

@ -5,15 +5,11 @@ from __future__ import print_function
import distutils.util
import argparse
import multiprocessing
import paddle.v2 as paddle
from data_utils.data import DataGenerator
from model import DeepSpeech2Model
from error_rate import wer, cer
NUM_CPU = multiprocessing.cpu_count() // 2
parser = argparse.ArgumentParser(description=__doc__)
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
@ -26,11 +22,12 @@ def add_arg(argname, type, default, help, **kwargs):
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg('batch_size', int, 128, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('parallels_bsearch',int, NUM_CPU,"# of CPUs for beam search.")
add_arg('parallels_data', int, NUM_CPU,"# of CPUs for data preprocessing.")
add_arg('parallels_bsearch',int, 12, "# of CPUs for beam search.")
add_arg('parallels_data', int, 12, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -5,15 +5,11 @@ from __future__ import print_function
import argparse
import distutils.util
import multiprocessing
import paddle.v2 as paddle
from data_utils.data import DataGenerator
from model import DeepSpeech2Model
from error_rate import wer, cer
NUM_CPU = multiprocessing.cpu_count() // 2
parser = argparse.ArgumentParser(description=__doc__)
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
@ -26,11 +22,11 @@ def add_arg(argname, type, default, help, **kwargs):
# yapf: disable
# configurations of overall
parser = argparse.ArgumentParser(description=__doc__)
add_arg('num_samples', int, 10, "# of samples to infer.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('parallels_bsearch',int, NUM_CPU,"# of CPUs for beam search.")
add_arg('parallels_bsearch',int, 12, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -14,26 +14,31 @@ import os.path
import _init_paths
from data_utils import utils
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
parser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--manifest_paths",
type=str,
help="Manifest paths for building vocabulary."
"You can provide multiple manifest files.",
nargs='+',
required=True)
parser.add_argument(
"--count_threshold",
default=0,
type=int,
help="Characters whose counts are below the threshold will be truncated. "
"(default: %(default)i)")
parser.add_argument(
"--vocab_path",
default='datasets/vocab/zh_vocab.txt',
type=str,
help="File path to write the vocabulary. (default: %(default)s)")
add_arg('count_threshold', int, 0, "Truncation threshold for char counts.")
add_arg('vocab_path', str,
'datasets/vocab/zh_vocab.txt',
"Filepath to write the vocabulary.")
add_arg('manifest_paths', str,
None,
"Filepaths of manifests for building vocabulary. "
"You can provide multiple manifest files.",
nargs='+',
required=True)
args = parser.parse_args()
# yapf: disable
def count_manifest(counter, manifest_path):
@ -43,7 +48,16 @@ def count_manifest(counter, manifest_path):
counter.update(char)
def print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def main():
print_arguments(args)
counter = Counter()
for manifest_path in args.manifest_paths:
count_manifest(counter, manifest_path)

@ -9,43 +9,45 @@ from data_utils.normalizer import FeatureNormalizer
from data_utils.augmentor.augmentation import AugmentationPipeline
from data_utils.featurizer.audio_featurizer import AudioFeaturizer
parser = argparse.ArgumentParser(
description='Computing mean and stddev for feature normalizer.')
parser.add_argument(
"--specgram_type",
default='linear',
type=str,
help="Feature type of audio data: 'linear' (power spectrum)"
" or 'mfcc'. (default: %(default)s)")
parser.add_argument(
"--manifest_path",
default='datasets/manifest.train',
type=str,
help="Manifest path for computing normalizer's mean and stddev."
"(default: %(default)s)")
parser.add_argument(
"--num_samples",
default=2000,
type=int,
help="Number of samples for computing mean and stddev. "
"(default: %(default)s)")
parser.add_argument(
"--augmentation_config",
default='{}',
type=str,
help="Augmentation configuration in json-format. "
"(default: %(default)s)")
parser.add_argument(
"--output_file",
default='mean_std.npz',
type=str,
help="Filepath to write mean and std to (.npz)."
"(default: %(default)s)")
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
parser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg('num_samples', int, 2000, "# of samples to for statistics.")
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
add_arg('manifest_path', str,
'datasets/manifest.train',
"Filepath of manifest to compute normalizer's mean and stddev.")
add_arg('output_path', str,
'mean_std.npz',
"Filepath of write mean and stddev to (.npz).")
args = parser.parse_args()
# yapf: disable
def print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(vars(args).iteritems()):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def main():
augmentation_pipeline = AugmentationPipeline(args.augmentation_config)
print_arguments(args)
augmentation_pipeline = AugmentationPipeline('{}')
audio_featurizer = AudioFeaturizer(specgram_type=args.specgram_type)
def augment_and_featurize(audio_segment):
@ -57,7 +59,7 @@ def main():
manifest_path=args.manifest_path,
featurize_func=augment_and_featurize,
num_samples=args.num_samples)
normalizer.write_to_file(args.output_file)
normalizer.write_to_file(args.output_path)
if __name__ == '__main__':

@ -5,14 +5,10 @@ from __future__ import print_function
import argparse
import distutils.util
import multiprocessing
import paddle.v2 as paddle
from model import DeepSpeech2Model
from data_utils.data import DataGenerator
NUM_CPU = multiprocessing.cpu_count() // 2
parser = argparse.ArgumentParser(description=__doc__)
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
@ -25,10 +21,11 @@ def add_arg(argname, type, default, help, **kwargs):
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('num_passes', int, 200, "# of training epochs.")
add_arg('parallels_data', int, NUM_CPU,"# of CPUs for data preprocessing.")
add_arg('parallels_data', int, 12, "# of CPUs for data preprocessing.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")

@ -6,15 +6,11 @@ from __future__ import print_function
import numpy as np
import distutils.util
import argparse
import multiprocessing
import paddle.v2 as paddle
from data_utils.data import DataGenerator
from model import DeepSpeech2Model
from error_rate import wer
NUM_CPU = multiprocessing.cpu_count() // 2
parser = argparse.ArgumentParser(description=__doc__)
def add_arg(argname, type, default, help, **kwargs):
type = distutils.util.strtobool if type == bool else type
@ -27,10 +23,11 @@ def add_arg(argname, type, default, help, **kwargs):
# yapf: disable
parser = argparse.ArgumentParser(description=__doc__)
add_arg('num_samples', int, 100, "# of samples to infer.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('parallels_bsearch',int, NUM_CPU,"# of CPUs for beam search.")
add_arg('parallels_bsearch',int, 12, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
@ -73,6 +70,7 @@ args = parser.parse_args()
# yapf: disable
def tune():
"""Tune parameters alpha and beta on one minibatch."""
if not args.num_alphas >= 0:

Loading…
Cancel
Save