commit
05288cd381
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
from .batch_beam_search import BatchBeamSearch
|
||||||
|
from .beam_search import beam_search
|
||||||
|
from .beam_search import BeamSearch
|
||||||
|
from .beam_search import Hypothesis
|
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class BatchBeamSearch():
|
||||||
|
pass
|
@ -0,0 +1,195 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""V2 backend for `asr_recog.py` using py:class:`decoders.beam_search.BeamSearch`."""
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import jsonlines
|
||||||
|
import paddle
|
||||||
|
import yaml
|
||||||
|
from yacs.config import CfgNode
|
||||||
|
|
||||||
|
from .beam_search import BatchBeamSearch
|
||||||
|
from .beam_search import BeamSearch
|
||||||
|
from .scorers.length_bonus import LengthBonus
|
||||||
|
from .scorers.scorer_interface import BatchScorerInterface
|
||||||
|
from .utils import add_results_to_json
|
||||||
|
from deepspeech.exps import dynamic_import_tester
|
||||||
|
from deepspeech.io.reader import LoadInputsAndTargets
|
||||||
|
from deepspeech.models.asr_interface import ASRInterface
|
||||||
|
from deepspeech.utils.log import Log
|
||||||
|
# from espnet.asr.asr_utils import get_model_conf
|
||||||
|
# from espnet.asr.asr_utils import torch_load
|
||||||
|
# from espnet.nets.lm_interface import dynamic_import_lm
|
||||||
|
|
||||||
|
logger = Log(__name__).getlog()
|
||||||
|
|
||||||
|
# NOTE: you need this func to generate our sphinx doc
|
||||||
|
|
||||||
|
|
||||||
|
def load_trained_model(args):
|
||||||
|
args.nprocs = args.ngpu
|
||||||
|
confs = CfgNode()
|
||||||
|
confs.set_new_allowed(True)
|
||||||
|
confs.merge_from_file(args.model_conf)
|
||||||
|
class_obj = dynamic_import_tester(args.model_name)
|
||||||
|
exp = class_obj(confs, args)
|
||||||
|
with exp.eval():
|
||||||
|
exp.setup()
|
||||||
|
exp.restore()
|
||||||
|
char_list = exp.args.char_list
|
||||||
|
model = exp.model
|
||||||
|
return model, char_list, exp, confs
|
||||||
|
|
||||||
|
|
||||||
|
def recog_v2(args):
|
||||||
|
"""Decode with custom models that implements ScorerInterface.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args (namespace): The program arguments.
|
||||||
|
See py:func:`bin.asr_recog.get_parser` for details
|
||||||
|
|
||||||
|
"""
|
||||||
|
logger.warning("experimental API for custom LMs is selected by --api v2")
|
||||||
|
if args.batchsize > 1:
|
||||||
|
raise NotImplementedError("multi-utt batch decoding is not implemented")
|
||||||
|
if args.streaming_mode is not None:
|
||||||
|
raise NotImplementedError("streaming mode is not implemented")
|
||||||
|
if args.word_rnnlm:
|
||||||
|
raise NotImplementedError("word LM is not implemented")
|
||||||
|
|
||||||
|
# set_deterministic(args)
|
||||||
|
model, char_list, exp, confs = load_trained_model(args)
|
||||||
|
assert isinstance(model, ASRInterface)
|
||||||
|
|
||||||
|
load_inputs_and_targets = LoadInputsAndTargets(
|
||||||
|
mode="asr",
|
||||||
|
load_output=False,
|
||||||
|
sort_in_input_length=False,
|
||||||
|
preprocess_conf=confs.collator.augmentation_config
|
||||||
|
if args.preprocess_conf is None else args.preprocess_conf,
|
||||||
|
preprocess_args={"train": False},
|
||||||
|
)
|
||||||
|
|
||||||
|
if args.rnnlm:
|
||||||
|
lm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
|
||||||
|
# NOTE: for a compatibility with less than 0.5.0 version models
|
||||||
|
lm_model_module = getattr(lm_args, "model_module", "default")
|
||||||
|
lm_class = dynamic_import_lm(lm_model_module, lm_args.backend)
|
||||||
|
lm = lm_class(len(char_list), lm_args)
|
||||||
|
torch_load(args.rnnlm, lm)
|
||||||
|
lm.eval()
|
||||||
|
else:
|
||||||
|
lm = None
|
||||||
|
|
||||||
|
if args.ngram_model:
|
||||||
|
from .scorers.ngram import NgramFullScorer
|
||||||
|
from .scorers.ngram import NgramPartScorer
|
||||||
|
|
||||||
|
if args.ngram_scorer == "full":
|
||||||
|
ngram = NgramFullScorer(args.ngram_model, char_list)
|
||||||
|
else:
|
||||||
|
ngram = NgramPartScorer(args.ngram_model, char_list)
|
||||||
|
else:
|
||||||
|
ngram = None
|
||||||
|
|
||||||
|
scorers = model.scorers() # decoder
|
||||||
|
scorers["lm"] = lm
|
||||||
|
scorers["ngram"] = ngram
|
||||||
|
scorers["length_bonus"] = LengthBonus(len(char_list))
|
||||||
|
weights = dict(
|
||||||
|
decoder=1.0 - args.ctc_weight,
|
||||||
|
ctc=args.ctc_weight,
|
||||||
|
lm=args.lm_weight,
|
||||||
|
ngram=args.ngram_weight,
|
||||||
|
length_bonus=args.penalty,
|
||||||
|
)
|
||||||
|
beam_search = BeamSearch(
|
||||||
|
beam_size=args.beam_size,
|
||||||
|
vocab_size=len(char_list),
|
||||||
|
weights=weights,
|
||||||
|
scorers=scorers,
|
||||||
|
sos=model.sos,
|
||||||
|
eos=model.eos,
|
||||||
|
token_list=char_list,
|
||||||
|
pre_beam_score_key=None if args.ctc_weight == 1.0 else "full",
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO(karita): make all scorers batchfied
|
||||||
|
if args.batchsize == 1:
|
||||||
|
non_batch = [
|
||||||
|
k for k, v in beam_search.full_scorers.items()
|
||||||
|
if not isinstance(v, BatchScorerInterface)
|
||||||
|
]
|
||||||
|
if len(non_batch) == 0:
|
||||||
|
beam_search.__class__ = BatchBeamSearch
|
||||||
|
logger.info("BatchBeamSearch implementation is selected.")
|
||||||
|
else:
|
||||||
|
logger.warning(f"As non-batch scorers {non_batch} are found, "
|
||||||
|
f"fall back to non-batch implementation.")
|
||||||
|
|
||||||
|
if args.ngpu > 1:
|
||||||
|
raise NotImplementedError("only single GPU decoding is supported")
|
||||||
|
if args.ngpu == 1:
|
||||||
|
device = "gpu:0"
|
||||||
|
else:
|
||||||
|
device = "cpu"
|
||||||
|
paddle.set_device(device)
|
||||||
|
dtype = getattr(paddle, args.dtype)
|
||||||
|
logger.info(f"Decoding device={device}, dtype={dtype}")
|
||||||
|
model.to(device=device, dtype=dtype)
|
||||||
|
model.eval()
|
||||||
|
beam_search.to(device=device, dtype=dtype)
|
||||||
|
beam_search.eval()
|
||||||
|
|
||||||
|
# read json data
|
||||||
|
js = []
|
||||||
|
with jsonlines.open(args.recog_json, "r") as reader:
|
||||||
|
for item in reader:
|
||||||
|
js.append(item)
|
||||||
|
# jsonlines to dict, key by 'utt', value by jsonline
|
||||||
|
js = {item['utt']: item for item in js}
|
||||||
|
|
||||||
|
new_js = {}
|
||||||
|
with paddle.no_grad():
|
||||||
|
with jsonlines.open(args.result_label, "w") as f:
|
||||||
|
for idx, name in enumerate(js.keys(), 1):
|
||||||
|
logger.info(f"({idx}/{len(js.keys())}) decoding " + name)
|
||||||
|
batch = [(name, js[name])]
|
||||||
|
feat = load_inputs_and_targets(batch)[0][0]
|
||||||
|
logger.info(f'feat: {feat.shape}')
|
||||||
|
enc = model.encode(paddle.to_tensor(feat).to(dtype))
|
||||||
|
logger.info(f'eout: {enc.shape}')
|
||||||
|
nbest_hyps = beam_search(x=enc,
|
||||||
|
maxlenratio=args.maxlenratio,
|
||||||
|
minlenratio=args.minlenratio)
|
||||||
|
nbest_hyps = [
|
||||||
|
h.asdict()
|
||||||
|
for h in nbest_hyps[:min(len(nbest_hyps), args.nbest)]
|
||||||
|
]
|
||||||
|
new_js[name] = add_results_to_json(js[name], nbest_hyps,
|
||||||
|
char_list)
|
||||||
|
|
||||||
|
item = new_js[name]['output'][0] # 1-best
|
||||||
|
ref = item['text']
|
||||||
|
rec_text = item['rec_text'].replace('▁',
|
||||||
|
' ').replace('<eos>',
|
||||||
|
'').strip()
|
||||||
|
rec_tokenid = list(map(int, item['rec_tokenid'].split()))
|
||||||
|
f.write({
|
||||||
|
"utt": name,
|
||||||
|
"refs": [ref],
|
||||||
|
"hyps": [rec_text],
|
||||||
|
"hyps_tokenid": [rec_tokenid],
|
||||||
|
})
|
@ -0,0 +1,376 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""End-to-end speech recognition model decoding script."""
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
import configargparse
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .recog import recog_v2
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
"""Get default arguments."""
|
||||||
|
parser = configargparse.ArgumentParser(
|
||||||
|
description="Transcribe text from speech using "
|
||||||
|
"a speech recognition model on one CPU or GPU",
|
||||||
|
config_file_parser_class=configargparse.YAMLConfigFileParser,
|
||||||
|
formatter_class=configargparse.ArgumentDefaultsHelpFormatter, )
|
||||||
|
parser.add(
|
||||||
|
'--model-name',
|
||||||
|
type=str,
|
||||||
|
default='u2_kaldi',
|
||||||
|
help='model name, e.g: deepspeech2, u2, u2_kaldi, u2_st')
|
||||||
|
# general configuration
|
||||||
|
parser.add("--config", is_config_file=True, help="Config file path")
|
||||||
|
parser.add(
|
||||||
|
"--config2",
|
||||||
|
is_config_file=True,
|
||||||
|
help="Second config file path that overwrites the settings in `--config`",
|
||||||
|
)
|
||||||
|
parser.add(
|
||||||
|
"--config3",
|
||||||
|
is_config_file=True,
|
||||||
|
help="Third config file path that overwrites the settings "
|
||||||
|
"in `--config` and `--config2`", )
|
||||||
|
|
||||||
|
parser.add_argument("--ngpu", type=int, default=0, help="Number of GPUs")
|
||||||
|
parser.add_argument(
|
||||||
|
"--dtype",
|
||||||
|
choices=("float16", "float32", "float64"),
|
||||||
|
default="float32",
|
||||||
|
help="Float precision (only available in --api v2)", )
|
||||||
|
parser.add_argument("--debugmode", type=int, default=1, help="Debugmode")
|
||||||
|
parser.add_argument("--seed", type=int, default=1, help="Random seed")
|
||||||
|
parser.add_argument(
|
||||||
|
"--verbose", "-V", type=int, default=2, help="Verbose option")
|
||||||
|
parser.add_argument(
|
||||||
|
"--batchsize",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="Batch size for beam search (0: means no batch processing)", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--preprocess-conf",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="The configuration file for the pre-processing", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--api",
|
||||||
|
default="v2",
|
||||||
|
choices=["v2"],
|
||||||
|
help="Beam search APIs "
|
||||||
|
"v2: Experimental API. It supports any models that implements ScorerInterface.",
|
||||||
|
)
|
||||||
|
# task related
|
||||||
|
parser.add_argument(
|
||||||
|
"--recog-json", type=str, help="Filename of recognition data (json)")
|
||||||
|
parser.add_argument(
|
||||||
|
"--result-label",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Filename of result label data (json)", )
|
||||||
|
# model (parameter) related
|
||||||
|
parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Model file parameters to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--model-conf", type=str, default=None, help="Model config file")
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-spkrs",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
choices=[1, 2],
|
||||||
|
help="Number of speakers in the speech", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-encs",
|
||||||
|
default=1,
|
||||||
|
type=int,
|
||||||
|
help="Number of encoders in the model.")
|
||||||
|
# search related
|
||||||
|
parser.add_argument(
|
||||||
|
"--nbest", type=int, default=1, help="Output N-best hypotheses")
|
||||||
|
parser.add_argument("--beam-size", type=int, default=1, help="Beam size")
|
||||||
|
parser.add_argument(
|
||||||
|
"--penalty", type=float, default=0.0, help="Incertion penalty")
|
||||||
|
parser.add_argument(
|
||||||
|
"--maxlenratio",
|
||||||
|
type=float,
|
||||||
|
default=0.0,
|
||||||
|
help="""Input length ratio to obtain max output length.
|
||||||
|
If maxlenratio=0.0 (default), it uses a end-detect function
|
||||||
|
to automatically find maximum hypothesis lengths.
|
||||||
|
If maxlenratio<0.0, its absolute value is interpreted
|
||||||
|
as a constant max output length""", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--minlenratio",
|
||||||
|
type=float,
|
||||||
|
default=0.0,
|
||||||
|
help="Input length ratio to obtain min output length", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--ctc-weight",
|
||||||
|
type=float,
|
||||||
|
default=0.0,
|
||||||
|
help="CTC weight in joint decoding")
|
||||||
|
parser.add_argument(
|
||||||
|
"--weights-ctc-dec",
|
||||||
|
type=float,
|
||||||
|
action="append",
|
||||||
|
help="ctc weight assigned to each encoder during decoding."
|
||||||
|
"[in multi-encoder mode only]", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--ctc-window-margin",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""Use CTC window with margin parameter to accelerate
|
||||||
|
CTC/attention decoding especially on GPU. Smaller magin
|
||||||
|
makes decoding faster, but may increase search errors.
|
||||||
|
If margin=0 (default), this function is disabled""", )
|
||||||
|
# transducer related
|
||||||
|
parser.add_argument(
|
||||||
|
"--search-type",
|
||||||
|
type=str,
|
||||||
|
default="default",
|
||||||
|
choices=["default", "nsc", "tsd", "alsd", "maes"],
|
||||||
|
help="""Type of beam search implementation to use during inference.
|
||||||
|
Can be either: default beam search ("default"),
|
||||||
|
N-Step Constrained beam search ("nsc"), Time-Synchronous Decoding ("tsd"),
|
||||||
|
Alignment-Length Synchronous Decoding ("alsd") or
|
||||||
|
modified Adaptive Expansion Search ("maes").""", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--nstep",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="""Number of expansion steps allowed in NSC beam search or mAES
|
||||||
|
(nstep > 0 for NSC and nstep > 1 for mAES).""", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--prefix-alpha",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="Length prefix difference allowed in NSC beam search or mAES.", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-sym-exp",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="Number of symbol expansions allowed in TSD.", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--u-max",
|
||||||
|
type=int,
|
||||||
|
default=400,
|
||||||
|
help="Length prefix difference allowed in ALSD.", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--expansion-gamma",
|
||||||
|
type=float,
|
||||||
|
default=2.3,
|
||||||
|
help="Allowed logp difference for prune-by-value method in mAES.", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--expansion-beta",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="""Number of additional candidates for expanded hypotheses
|
||||||
|
selection in mAES.""", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--score-norm",
|
||||||
|
type=strtobool,
|
||||||
|
nargs="?",
|
||||||
|
default=True,
|
||||||
|
help="Normalize final hypotheses' score by length", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--softmax-temperature",
|
||||||
|
type=float,
|
||||||
|
default=1.0,
|
||||||
|
help="Penalization term for softmax function.", )
|
||||||
|
# rnnlm related
|
||||||
|
parser.add_argument(
|
||||||
|
"--rnnlm", type=str, default=None, help="RNNLM model file to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--rnnlm-conf",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="RNNLM model config file to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--word-rnnlm",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Word RNNLM model file to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--word-rnnlm-conf",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Word RNNLM model config file to read", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--word-dict", type=str, default=None, help="Word list to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--lm-weight", type=float, default=0.1, help="RNNLM weight")
|
||||||
|
# ngram related
|
||||||
|
parser.add_argument(
|
||||||
|
"--ngram-model",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="ngram model file to read")
|
||||||
|
parser.add_argument(
|
||||||
|
"--ngram-weight", type=float, default=0.1, help="ngram weight")
|
||||||
|
parser.add_argument(
|
||||||
|
"--ngram-scorer",
|
||||||
|
type=str,
|
||||||
|
default="part",
|
||||||
|
choices=("full", "part"),
|
||||||
|
help="""if the ngram is set as a part scorer, similar with CTC scorer,
|
||||||
|
ngram scorer only scores topK hypethesis.
|
||||||
|
if the ngram is set as full scorer, ngram scorer scores all hypthesis
|
||||||
|
the decoding speed of part scorer is musch faster than full one""",
|
||||||
|
)
|
||||||
|
# streaming related
|
||||||
|
parser.add_argument(
|
||||||
|
"--streaming-mode",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
choices=["window", "segment"],
|
||||||
|
help="""Use streaming recognizer for inference.
|
||||||
|
`--batchsize` must be set to 0 to enable this mode""", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--streaming-window", type=int, default=10, help="Window size")
|
||||||
|
parser.add_argument(
|
||||||
|
"--streaming-min-blank-dur",
|
||||||
|
type=int,
|
||||||
|
default=10,
|
||||||
|
help="Minimum blank duration threshold", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--streaming-onset-margin", type=int, default=1, help="Onset margin")
|
||||||
|
parser.add_argument(
|
||||||
|
"--streaming-offset-margin", type=int, default=1, help="Offset margin")
|
||||||
|
# non-autoregressive related
|
||||||
|
# Mask CTC related. See https://arxiv.org/abs/2005.08700 for the detail.
|
||||||
|
parser.add_argument(
|
||||||
|
"--maskctc-n-iterations",
|
||||||
|
type=int,
|
||||||
|
default=10,
|
||||||
|
help="Number of decoding iterations."
|
||||||
|
"For Mask CTC, set 0 to predict 1 mask/iter.", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--maskctc-probability-threshold",
|
||||||
|
type=float,
|
||||||
|
default=0.999,
|
||||||
|
help="Threshold probability for CTC output", )
|
||||||
|
# quantize model related
|
||||||
|
parser.add_argument(
|
||||||
|
"--quantize-config",
|
||||||
|
nargs="*",
|
||||||
|
help="Quantize config list. E.g.: --quantize-config=[Linear,LSTM,GRU]",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--quantize-dtype",
|
||||||
|
type=str,
|
||||||
|
default="qint8",
|
||||||
|
help="Dtype dynamic quantize")
|
||||||
|
parser.add_argument(
|
||||||
|
"--quantize-asr-model",
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="Quantize asr model", )
|
||||||
|
parser.add_argument(
|
||||||
|
"--quantize-lm-model",
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="Quantize lm model", )
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
"""Run the main decoding function."""
|
||||||
|
parser = get_parser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--output", metavar="CKPT_DIR", help="path to save checkpoint.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--checkpoint_path", type=str, help="path to load checkpoint")
|
||||||
|
parser.add_argument("--dict-path", type=str, help="path to load checkpoint")
|
||||||
|
args = parser.parse_args(args)
|
||||||
|
|
||||||
|
if args.ngpu == 0 and args.dtype == "float16":
|
||||||
|
raise ValueError(
|
||||||
|
f"--dtype {args.dtype} does not support the CPU backend.")
|
||||||
|
|
||||||
|
# logging info
|
||||||
|
if args.verbose == 1:
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
|
||||||
|
)
|
||||||
|
elif args.verbose == 2:
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.DEBUG,
|
||||||
|
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.WARN,
|
||||||
|
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
|
||||||
|
)
|
||||||
|
logging.warning("Skip DEBUG/INFO messages")
|
||||||
|
logging.info(args)
|
||||||
|
|
||||||
|
# check CUDA_VISIBLE_DEVICES
|
||||||
|
if args.ngpu > 0:
|
||||||
|
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
|
||||||
|
if cvd is None:
|
||||||
|
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
|
||||||
|
elif args.ngpu != len(cvd.split(",")):
|
||||||
|
logging.error("#gpus is not matched with CUDA_VISIBLE_DEVICES.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# TODO(mn5k): support of multiple GPUs
|
||||||
|
if args.ngpu > 1:
|
||||||
|
logging.error("The program only supports ngpu=1.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# display PYTHONPATH
|
||||||
|
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
|
||||||
|
|
||||||
|
# seed setting
|
||||||
|
random.seed(args.seed)
|
||||||
|
np.random.seed(args.seed)
|
||||||
|
logging.info("set random seed = %d" % args.seed)
|
||||||
|
|
||||||
|
# validate rnn options
|
||||||
|
if args.rnnlm is not None and args.word_rnnlm is not None:
|
||||||
|
logging.error(
|
||||||
|
"It seems that both --rnnlm and --word-rnnlm are specified. "
|
||||||
|
"Please use either option.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# recog
|
||||||
|
if args.num_spkrs == 1:
|
||||||
|
if args.num_encs == 1:
|
||||||
|
# Experimental API that supports custom LMs
|
||||||
|
if args.api == "v2":
|
||||||
|
from deepspeech.decoders.recog import recog_v2
|
||||||
|
recog_v2(args)
|
||||||
|
else:
|
||||||
|
raise ValueError("Only support --api v2")
|
||||||
|
else:
|
||||||
|
if args.api == "v2":
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"--num-encs {args.num_encs} > 1 is not supported in --api v2"
|
||||||
|
)
|
||||||
|
elif args.num_spkrs == 2:
|
||||||
|
raise ValueError("asr_mix not supported.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main(sys.argv[1:])
|
@ -0,0 +1,19 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from deepspeech.decoders.recog_bin import main
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main(sys.argv[1:])
|
@ -0,0 +1,161 @@
|
|||||||
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""ASR Interface module."""
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from deepspeech.utils.dynamic_import import dynamic_import
|
||||||
|
|
||||||
|
|
||||||
|
class ASRInterface:
|
||||||
|
"""ASR Interface for ESPnet model implementation."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def add_arguments(parser):
|
||||||
|
"""Add arguments to parser."""
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def build(cls, idim: int, odim: int, **kwargs):
|
||||||
|
"""Initialize this class with python-level args.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
idim (int): The number of an input feature dim.
|
||||||
|
odim (int): The number of output vocab.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
ASRinterface: A new instance of ASRInterface.
|
||||||
|
|
||||||
|
"""
|
||||||
|
args = argparse.Namespace(**kwargs)
|
||||||
|
return cls(idim, odim, args)
|
||||||
|
|
||||||
|
def forward(self, xs, ilens, ys, olens):
|
||||||
|
"""Compute loss for training.
|
||||||
|
|
||||||
|
:param xs: batch of padded source sequences paddle.Tensor (B, Tmax, idim)
|
||||||
|
:param ilens: batch of lengths of source sequences (B), paddle.Tensor
|
||||||
|
:param ys: batch of padded target sequences paddle.Tensor (B, Lmax)
|
||||||
|
:param olens: batch of lengths of target sequences (B), paddle.Tensor
|
||||||
|
:return: loss value
|
||||||
|
:rtype: paddle.Tensor
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("forward method is not implemented")
|
||||||
|
|
||||||
|
def recognize(self, x, recog_args, char_list=None, rnnlm=None):
|
||||||
|
"""Recognize x for evaluation.
|
||||||
|
|
||||||
|
:param ndarray x: input acouctic feature (B, T, D) or (T, D)
|
||||||
|
:param namespace recog_args: argment namespace contraining options
|
||||||
|
:param list char_list: list of characters
|
||||||
|
:param paddle.nn.Layer rnnlm: language model module
|
||||||
|
:return: N-best decoding results
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("recognize method is not implemented")
|
||||||
|
|
||||||
|
def recognize_batch(self, x, recog_args, char_list=None, rnnlm=None):
|
||||||
|
"""Beam search implementation for batch.
|
||||||
|
|
||||||
|
:param paddle.Tensor x: encoder hidden state sequences (B, Tmax, Henc)
|
||||||
|
:param namespace recog_args: argument namespace containing options
|
||||||
|
:param list char_list: list of characters
|
||||||
|
:param paddle.nn.Module rnnlm: language model module
|
||||||
|
:return: N-best decoding results
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("Batch decoding is not supported yet.")
|
||||||
|
|
||||||
|
def calculate_all_attentions(self, xs, ilens, ys):
|
||||||
|
"""Calculate attention.
|
||||||
|
|
||||||
|
:param list xs: list of padded input sequences [(T1, idim), (T2, idim), ...]
|
||||||
|
:param ndarray ilens: batch of lengths of input sequences (B)
|
||||||
|
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
|
||||||
|
:return: attention weights (B, Lmax, Tmax)
|
||||||
|
:rtype: float ndarray
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
"calculate_all_attentions method is not implemented")
|
||||||
|
|
||||||
|
def calculate_all_ctc_probs(self, xs, ilens, ys):
|
||||||
|
"""Calculate CTC probability.
|
||||||
|
|
||||||
|
:param list xs_pad: list of padded input sequences [(T1, idim), (T2, idim), ...]
|
||||||
|
:param ndarray ilens: batch of lengths of input sequences (B)
|
||||||
|
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
|
||||||
|
:return: CTC probabilities (B, Tmax, vocab)
|
||||||
|
:rtype: float ndarray
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
"calculate_all_ctc_probs method is not implemented")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def attention_plot_class(self):
|
||||||
|
"""Get attention plot class."""
|
||||||
|
from espnet.asr.asr_utils import PlotAttentionReport
|
||||||
|
|
||||||
|
return PlotAttentionReport
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ctc_plot_class(self):
|
||||||
|
"""Get CTC plot class."""
|
||||||
|
from espnet.asr.asr_utils import PlotCTCReport
|
||||||
|
|
||||||
|
return PlotCTCReport
|
||||||
|
|
||||||
|
def get_total_subsampling_factor(self):
|
||||||
|
"""Get total subsampling factor."""
|
||||||
|
raise NotImplementedError(
|
||||||
|
"get_total_subsampling_factor method is not implemented")
|
||||||
|
|
||||||
|
def encode(self, feat):
|
||||||
|
"""Encode feature in `beam_search` (optional).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
x (numpy.ndarray): input feature (T, D)
|
||||||
|
Returns:
|
||||||
|
paddle.Tensor: encoded feature (T, D)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("encode method is not implemented")
|
||||||
|
|
||||||
|
def scorers(self):
|
||||||
|
"""Get scorers for `beam_search` (optional).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, ScorerInterface]: dict of `ScorerInterface` objects
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("decoders method is not implemented")
|
||||||
|
|
||||||
|
|
||||||
|
predefined_asr = {
|
||||||
|
"transformer": "deepspeech.models.u2:U2Model",
|
||||||
|
"conformer": "deepspeech.models.u2:U2Model",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def dynamic_import_asr(module):
|
||||||
|
"""Import ASR models dynamically.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
module (str): asr name. e.g., transformer, conformer
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
type: ASR class
|
||||||
|
|
||||||
|
"""
|
||||||
|
model_class = dynamic_import(module, predefined_asr)
|
||||||
|
assert issubclass(model_class,
|
||||||
|
ASRInterface), f"{module} does not implement ASRInterface"
|
||||||
|
return model_class
|
@ -1,9 +1,14 @@
|
|||||||
# LibriSpeech
|
# LibriSpeech
|
||||||
|
|
||||||
## Transformer
|
| Model | Params | Config | Augmentation| Loss |
|
||||||
| Model | Params | Config | Augmentation| Test Set | Decode Method | Loss | WER % |
|
| --- | --- | --- | --- |
|
||||||
| --- | --- | --- | --- | --- | --- | --- | --- |
|
| transformer | 32.52 M | conf/transformer.yaml | spec_aug | 6.3197922706604 |
|
||||||
| transformer | 32.52 M | conf/transformer.yaml | spec_aug | test-clean | attention | 6.395054340362549 | 4.2 |
|
|
||||||
| transformer | 32.52 M | conf/transformer.yaml | spec_aug | test-clean | ctc_greedy_search | 6.395054340362549 | 5.0 |
|
|
||||||
| transformer | 32.52 M | conf/transformer.yaml | spec_aug | test-clean | ctc_prefix_beam_search | 6.395054340362549 | |
|
| Test Set | Decode Method | #Snt | #Wrd | Corr | Sub | Del | Ins | Err | S.Err |
|
||||||
| transformer | 32.52 M | conf/transformer.yaml | spec_aug | test-clean | attention_rescore | 6.395054340362549 | |
|
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
|
||||||
|
| test-clean | attention | 2620 | 52576 | 96.4 | 2.5 | 1.1 | 0.4 | 4.0 | 34.7 |
|
||||||
|
| test-clean | ctc_greedy_search | 2620 | 52576 | 95.9 | 3.7 | 0.4 | 0.5 | 4.6 | 48.0 |
|
||||||
|
| test-clean | ctc_prefix_beamsearch | 2620 | 52576 | 95.9 | 3.7 | 0.4 | 0.5 | 4.6 | 47.6 |
|
||||||
|
| test-clean | attention_rescore | 2620 | 52576 | 96.8 | 2.9 | 0.3 | 0.4 | 3.7 | 38.0 |
|
||||||
|
| test-clean | join_ctc_w/o_lm | 2620 | 52576 | 97.2 | 2.6 | 0.3 | 0.4 | 3.2 | 34.9 |
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
batchsize: 0
|
||||||
|
beam-size: 60
|
||||||
|
ctc-weight: 0.0
|
||||||
|
lm-weight: 0.0
|
||||||
|
maxlenratio: 0.0
|
||||||
|
minlenratio: 0.0
|
||||||
|
penalty: 0.0
|
@ -0,0 +1,7 @@
|
|||||||
|
batchsize: 0
|
||||||
|
beam-size: 60
|
||||||
|
ctc-weight: 0.4
|
||||||
|
lm-weight: 0.6
|
||||||
|
maxlenratio: 0.0
|
||||||
|
minlenratio: 0.0
|
||||||
|
penalty: 0.0
|
@ -0,0 +1,7 @@
|
|||||||
|
batchsize: 0
|
||||||
|
beam-size: 60
|
||||||
|
ctc-weight: 0.4
|
||||||
|
lm-weight: 0.0
|
||||||
|
maxlenratio: 0.0
|
||||||
|
minlenratio: 0.0
|
||||||
|
penalty: 0.0
|
@ -0,0 +1,109 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
expdir=exp
|
||||||
|
datadir=data
|
||||||
|
nj=32
|
||||||
|
tag=
|
||||||
|
|
||||||
|
# decode config
|
||||||
|
decode_config=conf/decode/decode.yaml
|
||||||
|
|
||||||
|
# lm params
|
||||||
|
lang_model=rnnlm.model.best
|
||||||
|
lmexpdir=exp/train_rnnlm_pytorch_lm_transformer_cosine_batchsize32_lr1e-4_layer16_unigram5000_ngpu4/
|
||||||
|
lmtag='nolm'
|
||||||
|
|
||||||
|
recog_set="test-clean test-other dev-clean dev-other"
|
||||||
|
recog_set="test-clean"
|
||||||
|
|
||||||
|
# bpemode (unigram or bpe)
|
||||||
|
nbpe=5000
|
||||||
|
bpemode=unigram
|
||||||
|
bpeprefix="data/bpe_${bpemode}_${nbpe}"
|
||||||
|
bpemodel=${bpeprefix}.model
|
||||||
|
|
||||||
|
# bin params
|
||||||
|
config_path=conf/transformer.yaml
|
||||||
|
dict=data/bpe_unigram_5000_units.txt
|
||||||
|
ckpt_prefix=
|
||||||
|
|
||||||
|
source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
|
||||||
|
|
||||||
|
if [ -z ${ckpt_prefix} ]; then
|
||||||
|
echo "usage: $0 --ckpt_prefix ckpt_prefix"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
|
||||||
|
echo "using $ngpu gpus..."
|
||||||
|
|
||||||
|
ckpt_dir=$(dirname `dirname ${ckpt_prefix}`)
|
||||||
|
echo "ckpt dir: ${ckpt_dir}"
|
||||||
|
|
||||||
|
ckpt_tag=$(basename ${ckpt_prefix})
|
||||||
|
echo "ckpt tag: ${ckpt_tag}"
|
||||||
|
|
||||||
|
chunk_mode=false
|
||||||
|
if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then
|
||||||
|
chunk_mode=true
|
||||||
|
fi
|
||||||
|
echo "chunk mode: ${chunk_mode}"
|
||||||
|
echo "decode conf: ${decode_config}"
|
||||||
|
|
||||||
|
# download language model
|
||||||
|
#bash local/download_lm_en.sh
|
||||||
|
#if [ $? -ne 0 ]; then
|
||||||
|
# exit 1
|
||||||
|
#fi
|
||||||
|
|
||||||
|
|
||||||
|
pids=() # initialize pids
|
||||||
|
|
||||||
|
for dmethd in join_ctc; do
|
||||||
|
(
|
||||||
|
echo "${dmethd} decoding"
|
||||||
|
for rtask in ${recog_set}; do
|
||||||
|
(
|
||||||
|
echo "${rtask} dataset"
|
||||||
|
decode_dir=${ckpt_dir}/decode/decode_${rtask/-/_}_${dmethd}_$(basename ${config_path%.*})_${lmtag}_${ckpt_tag}_${tag}
|
||||||
|
feat_recog_dir=${datadir}
|
||||||
|
mkdir -p ${decode_dir}
|
||||||
|
mkdir -p ${feat_recog_dir}
|
||||||
|
|
||||||
|
# split data
|
||||||
|
split_json.sh manifest.${rtask} ${nj}
|
||||||
|
|
||||||
|
#### use CPU for decoding
|
||||||
|
ngpu=0
|
||||||
|
|
||||||
|
# set batchsize 0 to disable batch decoding
|
||||||
|
${decode_cmd} JOB=1:${nj} ${decode_dir}/log/decode.JOB.log \
|
||||||
|
python3 -u ${BIN_DIR}/recog.py \
|
||||||
|
--api v2 \
|
||||||
|
--config ${decode_config} \
|
||||||
|
--ngpu ${ngpu} \
|
||||||
|
--batchsize 0 \
|
||||||
|
--checkpoint_path ${ckpt_prefix} \
|
||||||
|
--dict-path ${dict} \
|
||||||
|
--recog-json ${feat_recog_dir}/split${nj}/JOB/manifest.${rtask} \
|
||||||
|
--result-label ${decode_dir}/data.JOB.json \
|
||||||
|
--model-conf ${config_path} \
|
||||||
|
--model ${ckpt_prefix}.pdparams
|
||||||
|
|
||||||
|
#--rnnlm ${lmexpdir}/${lang_model} \
|
||||||
|
|
||||||
|
score_sclite.sh --bpe ${nbpe} --bpemodel ${bpemodel} --wer false ${decode_dir} ${dict}
|
||||||
|
|
||||||
|
) &
|
||||||
|
pids+=($!) # store background pids
|
||||||
|
i=0; for pid in "${pids[@]}"; do wait ${pid} || ((++i)); done
|
||||||
|
[ ${i} -gt 0 ] && echo "$0: ${i} background jobs are failed." || true
|
||||||
|
done
|
||||||
|
)
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Finished"
|
||||||
|
|
||||||
|
exit 0
|
@ -1,42 +1,43 @@
|
|||||||
|
ConfigArgParse
|
||||||
coverage
|
coverage
|
||||||
editdistance
|
editdistance
|
||||||
|
g2p_en
|
||||||
|
g2pM
|
||||||
gpustat
|
gpustat
|
||||||
|
h5py
|
||||||
|
inflect
|
||||||
|
jieba
|
||||||
jsonlines
|
jsonlines
|
||||||
kaldiio
|
kaldiio
|
||||||
|
librosa
|
||||||
|
llvmlite
|
||||||
loguru
|
loguru
|
||||||
|
matplotlib
|
||||||
|
nltk
|
||||||
|
numba
|
||||||
|
numpy==1.20.0
|
||||||
|
pandas
|
||||||
|
phkit
|
||||||
Pillow
|
Pillow
|
||||||
|
praatio~=4.1
|
||||||
pre-commit
|
pre-commit
|
||||||
pybind11
|
pybind11
|
||||||
|
pypinyin
|
||||||
|
pyworld
|
||||||
resampy==0.2.2
|
resampy==0.2.2
|
||||||
sacrebleu
|
sacrebleu
|
||||||
scipy==1.2.1
|
scipy==1.2.1
|
||||||
sentencepiece
|
sentencepiece
|
||||||
snakeviz
|
snakeviz
|
||||||
|
soundfile~=0.10
|
||||||
sox
|
sox
|
||||||
tensorboardX
|
tensorboardX
|
||||||
textgrid
|
textgrid
|
||||||
|
timer
|
||||||
tqdm
|
tqdm
|
||||||
typeguard
|
typeguard
|
||||||
visualdl==2.2.0
|
|
||||||
yacs
|
|
||||||
numpy==1.20.0
|
|
||||||
numba
|
|
||||||
nltk
|
|
||||||
inflect
|
|
||||||
librosa
|
|
||||||
unidecode
|
unidecode
|
||||||
llvmlite
|
visualdl==2.2.0
|
||||||
matplotlib
|
|
||||||
pandas
|
|
||||||
soundfile~=0.10
|
|
||||||
g2p_en
|
|
||||||
pypinyin
|
|
||||||
webrtcvad
|
webrtcvad
|
||||||
g2pM
|
yacs
|
||||||
praatio~=4.1
|
|
||||||
h5py
|
|
||||||
timer
|
|
||||||
pyworld
|
|
||||||
jieba
|
|
||||||
phkit
|
|
||||||
yq
|
yq
|
||||||
|
Loading…
Reference in new issue