Merge pull request #2000 from yt605155624/fix_chunk

[TTS]reneame chunk to block in streaming tts, test=tts
pull/2005/head
TianYuan 3 years ago committed by GitHub
commit 1a6df85f97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -90,7 +90,7 @@ def parse_args():
default=False, default=False,
help="whether use streaming acoustic model") help="whether use streaming acoustic model")
parser.add_argument( parser.add_argument(
"--chunk_size", type=int, default=42, help="chunk size of am streaming") "--block_size", type=int, default=42, help="block size of am streaming")
parser.add_argument( parser.add_argument(
"--pad_size", type=int, default=12, help="pad size of am streaming") "--pad_size", type=int, default=12, help="pad size of am streaming")
@ -169,7 +169,7 @@ def main():
N = 0 N = 0
T = 0 T = 0
chunk_size = args.chunk_size block_size = args.block_size
pad_size = args.pad_size pad_size = args.pad_size
get_tone_ids = False get_tone_ids = False
for utt_id, sentence in sentences: for utt_id, sentence in sentences:
@ -189,7 +189,7 @@ def main():
am_encoder_infer_predictor, input=phones) am_encoder_infer_predictor, input=phones)
if args.am_streaming: if args.am_streaming:
hss = get_chunks(orig_hs, chunk_size, pad_size) hss = get_chunks(orig_hs, block_size, pad_size)
chunk_num = len(hss) chunk_num = len(hss)
mel_list = [] mel_list = []
for i, hs in enumerate(hss): for i, hs in enumerate(hss):
@ -211,7 +211,7 @@ def main():
sub_mel = sub_mel[pad_size:] sub_mel = sub_mel[pad_size:]
else: else:
# 倒数几块的右侧也可能没有 pad 够 # 倒数几块的右侧也可能没有 pad 够
sub_mel = sub_mel[pad_size:(chunk_size + pad_size) - sub_mel = sub_mel[pad_size:(block_size + pad_size) -
sub_mel.shape[0]] sub_mel.shape[0]]
mel_list.append(sub_mel) mel_list.append(sub_mel)
mel = np.concatenate(mel_list, axis=0) mel = np.concatenate(mel_list, axis=0)

@ -97,7 +97,7 @@ def ort_predict(args):
T = 0 T = 0
merge_sentences = True merge_sentences = True
get_tone_ids = False get_tone_ids = False
chunk_size = args.chunk_size block_size = args.block_size
pad_size = args.pad_size pad_size = args.pad_size
for utt_id, sentence in sentences: for utt_id, sentence in sentences:
@ -115,7 +115,7 @@ def ort_predict(args):
orig_hs = am_encoder_infer_sess.run( orig_hs = am_encoder_infer_sess.run(
None, input_feed={'text': phone_ids}) None, input_feed={'text': phone_ids})
if args.am_streaming: if args.am_streaming:
hss = get_chunks(orig_hs[0], chunk_size, pad_size) hss = get_chunks(orig_hs[0], block_size, pad_size)
chunk_num = len(hss) chunk_num = len(hss)
mel_list = [] mel_list = []
for i, hs in enumerate(hss): for i, hs in enumerate(hss):
@ -139,7 +139,7 @@ def ort_predict(args):
sub_mel = sub_mel[pad_size:] sub_mel = sub_mel[pad_size:]
else: else:
# 倒数几块的右侧也可能没有 pad 够 # 倒数几块的右侧也可能没有 pad 够
sub_mel = sub_mel[pad_size:(chunk_size + pad_size) - sub_mel = sub_mel[pad_size:(block_size + pad_size) -
sub_mel.shape[0]] sub_mel.shape[0]]
mel_list.append(sub_mel) mel_list.append(sub_mel)
mel = np.concatenate(mel_list, axis=0) mel = np.concatenate(mel_list, axis=0)
@ -236,7 +236,7 @@ def parse_args():
default=False, default=False,
help="whether use streaming acoustic model") help="whether use streaming acoustic model")
parser.add_argument( parser.add_argument(
"--chunk_size", type=int, default=42, help="chunk size of am streaming") "--block_size", type=int, default=42, help="block size of am streaming")
parser.add_argument( parser.add_argument(
"--pad_size", type=int, default=12, help="pad size of am streaming") "--pad_size", type=int, default=12, help="pad size of am streaming")

@ -75,13 +75,13 @@ def denorm(data, mean, std):
return data * std + mean return data * std + mean
def get_chunks(data, chunk_size: int, pad_size: int): def get_chunks(data, block_size: int, pad_size: int):
data_len = data.shape[1] data_len = data.shape[1]
chunks = [] chunks = []
n = math.ceil(data_len / chunk_size) n = math.ceil(data_len / block_size)
for i in range(n): for i in range(n):
start = max(0, i * chunk_size - pad_size) start = max(0, i * block_size - pad_size)
end = min((i + 1) * chunk_size + pad_size, data_len) end = min((i + 1) * block_size + pad_size, data_len)
chunks.append(data[:, start:end, :]) chunks.append(data[:, start:end, :])
return chunks return chunks

@ -133,7 +133,7 @@ def evaluate(args):
N = 0 N = 0
T = 0 T = 0
chunk_size = args.chunk_size block_size = args.block_size
pad_size = args.pad_size pad_size = args.pad_size
for utt_id, sentence in sentences: for utt_id, sentence in sentences:
@ -153,7 +153,7 @@ def evaluate(args):
# acoustic model # acoustic model
orig_hs = am_encoder_infer(phone_ids) orig_hs = am_encoder_infer(phone_ids)
if args.am_streaming: if args.am_streaming:
hss = get_chunks(orig_hs, chunk_size, pad_size) hss = get_chunks(orig_hs, block_size, pad_size)
chunk_num = len(hss) chunk_num = len(hss)
mel_list = [] mel_list = []
for i, hs in enumerate(hss): for i, hs in enumerate(hss):
@ -171,7 +171,7 @@ def evaluate(args):
sub_mel = sub_mel[pad_size:] sub_mel = sub_mel[pad_size:]
else: else:
# 倒数几块的右侧也可能没有 pad 够 # 倒数几块的右侧也可能没有 pad 够
sub_mel = sub_mel[pad_size:(chunk_size + pad_size) - sub_mel = sub_mel[pad_size:(block_size + pad_size) -
sub_mel.shape[0]] sub_mel.shape[0]]
mel_list.append(sub_mel) mel_list.append(sub_mel)
mel = paddle.concat(mel_list, axis=0) mel = paddle.concat(mel_list, axis=0)
@ -277,7 +277,7 @@ def parse_args():
default=False, default=False,
help="whether use streaming acoustic model") help="whether use streaming acoustic model")
parser.add_argument( parser.add_argument(
"--chunk_size", type=int, default=42, help="chunk size of am streaming") "--block_size", type=int, default=42, help="block size of am streaming")
parser.add_argument( parser.add_argument(
"--pad_size", type=int, default=12, help="pad size of am streaming") "--pad_size", type=int, default=12, help="pad size of am streaming")

Loading…
Cancel
Save