parent
9bf5471613
commit
4a6d04f960
@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_path=$1
|
||||
train_output_path=$2
|
||||
ckpt_name=$3
|
||||
|
||||
stage=0
|
||||
stop_stage=0
|
||||
|
||||
# pwgan
|
||||
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
|
||||
FLAGS_allocator_strategy=naive_best_fit \
|
||||
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
|
||||
python3 ${BIN_DIR}/../synthesize_e2e.py \
|
||||
--am=diffsinger_opencpop \
|
||||
--am_config=${config_path} \
|
||||
--am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
|
||||
--am_stat=dump/train/speech_stats.npy \
|
||||
--voc=pwgan_opencpop \
|
||||
--voc_config=pwgan_opencpop_ckpt_1.4.0/default.yaml \
|
||||
--voc_ckpt=pwgan_opencpop_ckpt_1.4.0/snapshot_iter_100000.pdz \
|
||||
--voc_stat=pwgan_opencpop_ckpt_1.4.0/feats_stats.npy \
|
||||
--lang=sing \
|
||||
--text=${BIN_DIR}/../sentences_sing.txt \
|
||||
--output_dir=${train_output_path}/test_e2e \
|
||||
--phones_dict=dump/phone_id_map.txt \
|
||||
--speech_stretchs=dump/train/speech_stretchs.npy \
|
||||
--pinyin_phone=./pinyin_to_phone.txt
|
||||
fi
|
||||
|
||||
# for more GAN Vocoders
|
||||
# hifigan
|
||||
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
|
||||
echo "in hifigan syn_e2e"
|
||||
FLAGS_allocator_strategy=naive_best_fit \
|
||||
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
|
||||
python3 ${BIN_DIR}/../synthesize_e2e.py \
|
||||
--am=diffsinger_opencpop \
|
||||
--am_config=${config_path} \
|
||||
--am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \
|
||||
--am_stat=dump/train/speech_stats.npy \
|
||||
--voc=hifigan_opencpop \
|
||||
--voc_config=hifigan_opencpop_ckpt_1.4.0/default.yaml \
|
||||
--voc_ckpt=hifigan_opencpop_ckpt_1.4.0/snapshot_iter_625000.pdz \
|
||||
--voc_stat=hifigan_opencpop_ckpt_1.4.0/feats_stats.npy \
|
||||
--lang=sing \
|
||||
--text=${BIN_DIR}/../sentences_sing.txt \
|
||||
--output_dir=${train_output_path}/test_e2e \
|
||||
--phones_dict=dump/phone_id_map.txt \
|
||||
--speech_stretchs=dump/train/speech_stretchs.npy \
|
||||
--pinyin_phone=./pinyin_to_phone.txt
|
||||
|
||||
fi
|
@ -0,0 +1,2 @@
|
||||
{"utt_id": "2093003457", "input_type": "word", "text": "小酒窝长睫毛AP是你最美的记号", "notes": "C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4", "note_durs": "0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340"}
|
||||
{"utt_id": "2093003458", "input_type": "phoneme", "phones": "w o m ei t ian sh ui ui b u u zh ao AP x iang n ian n i d e w ei x iao iao AP" , "notes": "C#4/Db4 C#4/Db4 D#4/Eb4 D#4/Eb4 F4 F4 F#4/Gb4 F#4/Gb4 D#4/Eb4 D#4/Eb4 D#4/Eb4 A#3/Bb3 A#3/Bb3 A#3/Bb3 rest F#4/Gb4 F#4/Gb4 F4 F4 F#4/Gb4 F#4/Gb4 F4 F4 G#4/Ab4 G#4/Ab4 D#4/Eb4 D#4/Eb4 C#4/Db4 rest", "note_durs": "0.221750 0.221750 0.414460 0.414460 0.223160 0.223160 0.430900 0.430900 0.335990 0.269270 0.269270 0.289060 0.522690 0.522690 0.355060 0.397130 0.397130 0.247690 0.247690 0.406720 0.406720 0.246830 0.246830 0.307540 0.307540 0.429910 0.429910 0.519130 0.342300", "is_slurs": "0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0"}
|
@ -0,0 +1,191 @@
|
||||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import re
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
|
||||
import librosa
|
||||
import numpy as np
|
||||
import paddle
|
||||
import ToJyutping
|
||||
from pypinyin import lazy_pinyin
|
||||
from pypinyin import pinyin
|
||||
from pypinyin import Style
|
||||
|
||||
from paddlespeech.t2s.frontend.zh_normalization.text_normlization import TextNormalizer
|
||||
|
||||
INITIALS = [
|
||||
'zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h',
|
||||
'j', 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w'
|
||||
]
|
||||
FINALS = [
|
||||
'a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia',
|
||||
'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong',
|
||||
'ou', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've',
|
||||
'vn'
|
||||
]
|
||||
|
||||
|
||||
class SingFrontend():
|
||||
def __init__(self, pinyin_phone_path: str, phone_vocab_path: str):
|
||||
"""SVS Frontend
|
||||
|
||||
Args:
|
||||
pinyin_phone_path (str): pinyin to phone file path, a 'pinyin|phones' (like: ba|b a ) pair per line.
|
||||
phone_vocab_path (str): phone to phone id file path, a 'phone phone id' (like: a 4 ) pair per line.
|
||||
"""
|
||||
self.punc = '[:,;。?!“”‘’\':,;.?!]'
|
||||
|
||||
self.pinyin_phones = {'AP': 'AP', 'SP': 'SP'}
|
||||
if pinyin_phone_path:
|
||||
with open(pinyin_phone_path, 'rt', encoding='utf-8') as f:
|
||||
for line in f.readlines():
|
||||
pinyin_phn = [
|
||||
x.strip() for x in line.split('|') if x.strip() != ''
|
||||
]
|
||||
self.pinyin_phones[pinyin_phn[0]] = pinyin_phn[1]
|
||||
|
||||
self.vocab_phones = {}
|
||||
if phone_vocab_path:
|
||||
with open(phone_vocab_path, 'rt', encoding='utf-8') as f:
|
||||
phn_id = [line.strip().split() for line in f.readlines()]
|
||||
for phn, id in phn_id:
|
||||
self.vocab_phones[phn] = int(id)
|
||||
|
||||
def get_phones(self, sentence: str) -> List[int]:
|
||||
"""_summary_
|
||||
|
||||
Args:
|
||||
sentence (str): sentence
|
||||
|
||||
Returns:
|
||||
List[int]: phones list
|
||||
|
||||
Example:
|
||||
sentence = "你好"
|
||||
phones = ['n i', 'h ao']
|
||||
"""
|
||||
# remove all punc
|
||||
sentence = re.sub(self.punc, "", sentence)
|
||||
|
||||
# Pypinyin can't solve polyphonic words
|
||||
sentence = sentence.replace('最长', '最常').replace('长睫毛', '常睫毛') \
|
||||
.replace('那么长', '那么常').replace('多长', '多常') \
|
||||
.replace('很长', '很常')
|
||||
|
||||
# lyric
|
||||
pinyins = lazy_pinyin(sentence, strict=False)
|
||||
# replace unk word with SP
|
||||
pinyins = [
|
||||
pinyin if pinyin in self.pinyin_phones.keys() else "SP"
|
||||
for pinyin in pinyins
|
||||
]
|
||||
phones = [
|
||||
self.pinyin_phones[pinyin.strip()] for pinyin in pinyins
|
||||
if pinyin.strip() in self.pinyin_phones
|
||||
]
|
||||
|
||||
return phones
|
||||
|
||||
def get_note_info(self, note_info: str) -> List[str]:
|
||||
note_info = [x.strip() for x in note_info.split('|') if x.strip() != '']
|
||||
return note_info
|
||||
|
||||
def process(
|
||||
self,
|
||||
phones: List[int],
|
||||
notes: List[str],
|
||||
note_durs: List[float], ) -> Dict[str, List[paddle.Tensor]]:
|
||||
new_phones = []
|
||||
new_notes = []
|
||||
new_note_durs = []
|
||||
is_slurs = []
|
||||
assert len(phones) == len(notes) == len(
|
||||
note_durs
|
||||
), "Please check the input, text, notes, note_durs should be the same length."
|
||||
for i in range(len(phones)):
|
||||
phone = phones[i].split()
|
||||
note = notes[i].split()
|
||||
note_dur = note_durs[i].split()
|
||||
|
||||
for phn in phone:
|
||||
new_phones.append(phn)
|
||||
new_notes.append(note[0])
|
||||
new_note_durs.append(note_dur[0])
|
||||
is_slurs.append(0)
|
||||
|
||||
if len(note) > 1:
|
||||
for i in range(1, len(note)):
|
||||
new_phones.append(phone[-1])
|
||||
new_notes.append(note[i])
|
||||
new_note_durs.append(note_dur[i])
|
||||
is_slurs.append(1)
|
||||
|
||||
return new_phones, new_notes, new_note_durs, is_slurs
|
||||
|
||||
def get_input_ids(self, svs_input: Dict[str, str],
|
||||
to_tensor: bool=True) -> Dict[str, List[paddle.Tensor]]:
|
||||
"""convert input to int/float.
|
||||
|
||||
Args:
|
||||
svs_input (Dict[str, str]): include keys: if input_type is phones, phones, notes, note_durs and is_slurs are needed.
|
||||
if input_type is word, text, notes, and note_durs sre needed.
|
||||
to_tensor (bool, optional): whether to convert to Tensor. Defaults to True.
|
||||
|
||||
Returns:
|
||||
Dict[str, List[paddle.Tensor]]: result include phone_ids, note_ids, note_durs, is_slurs.
|
||||
"""
|
||||
result = {}
|
||||
input_type = svs_input['input_type']
|
||||
if input_type == 'phoneme':
|
||||
assert "phones" in svs_input.keys() and "notes" in svs_input.keys() and "note_durs" in svs_input.keys() and "is_slurs" in svs_input.keys(), \
|
||||
"When input_type is phoneme, phones, notes, note_durs, is_slurs should be in the svs_input."
|
||||
phones = svs_input["phones"].split()
|
||||
notes = svs_input["notes"].split()
|
||||
note_durs = svs_input["note_durs"].split()
|
||||
is_slurs = svs_input["is_slurs"].split()
|
||||
assert len(phones) == len(notes) == len(note_durs) == len(
|
||||
is_slurs
|
||||
), "Please check the input, phones, notes, note_durs is_slurs should be the same length."
|
||||
elif input_type == "word":
|
||||
assert "text" in svs_input.keys() and "notes" in svs_input.keys() and "note_durs" in svs_input.keys(), \
|
||||
"When input_type is word, text, notes, note_durs, should be in the svs_input."
|
||||
phones = self.get_phones(svs_input['text'])
|
||||
notes = self.get_note_info(svs_input['notes'])
|
||||
note_durs = self.get_note_info(svs_input['note_durs'])
|
||||
phones, notes, note_durs, is_slurs = self.process(
|
||||
phones=phones, notes=notes, note_durs=note_durs)
|
||||
|
||||
phone_ids = [self.vocab_phones[phn] for phn in phones]
|
||||
phone_ids = np.array(phone_ids, np.int64)
|
||||
note_ids = [
|
||||
librosa.note_to_midi(note.split("/")[0]) if note != 'rest' else 0
|
||||
for note in notes
|
||||
]
|
||||
note_ids = np.array(note_ids, np.int64)
|
||||
note_durs = np.array(note_durs, np.float32)
|
||||
is_slurs = np.array(is_slurs, np.int64)
|
||||
|
||||
if to_tensor:
|
||||
phone_ids = paddle.to_tensor(phone_ids)
|
||||
note_ids = paddle.to_tensor(note_ids)
|
||||
note_durs = paddle.to_tensor(note_durs)
|
||||
is_slurs = paddle.to_tensor(is_slurs)
|
||||
|
||||
result['phone_ids'] = [phone_ids]
|
||||
result['note_ids'] = [note_ids]
|
||||
result['note_durs'] = [note_durs]
|
||||
result['is_slurs'] = [is_slurs]
|
||||
|
||||
return result
|
Loading…
Reference in new issue