Merge branch 'PaddlePaddle:develop' into test

pull/2354/head
Zhao Yuting 2 years ago committed by GitHub
commit 43af22e4e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -613,7 +613,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
</td>
</tr>
<tr>
<td rowspan="3">Voice Cloning</td>
<td rowspan="4">Voice Cloning</td>
<td>GE2E</td>
<td >Librispeech, etc.</td>
<td>
@ -633,13 +633,20 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
<td>
<a href = "./examples/aishell3/vc1">ge2e-fastspeech2-aishell3</a>
</td>
</tr>
<tr>
<td>GE2E + VITS</td>
<td>AISHELL-3</td>
<td>
<a href = "./examples/aishell3/vits-vc">ge2e-vits-aishell3</a>
</td>
</tr>
<tr>
<td rowspan="3">End-to-End</td>
<td>VITS</td>
<td >CSMSC</td>
<td>CSMSC / AISHELL-3</td>
<td>
<a href = "./examples/csmsc/vits">VITS-csmsc</a>
<a href = "./examples/csmsc/vits">VITS-csmsc</a> / <a href = "./examples/aishell3/vits">VITS-aishell3</a>
</td>
</tr>
</tbody>

@ -608,7 +608,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
</td>
</tr>
<tr>
<td rowspan="3">声音克隆</td>
<td rowspan="4">声音克隆</td>
<td>GE2E</td>
<td >Librispeech, etc.</td>
<td>
@ -629,13 +629,20 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
<a href = "./examples/aishell3/vc1">ge2e-fastspeech2-aishell3</a>
</td>
</tr>
<tr>
<td>GE2E + VITS</td>
<td>AISHELL-3</td>
<td>
<a href = "./examples/aishell3/vits-vc">ge2e-vits-aishell3</a>
</td>
</tr>
</tr>
<tr>
<td rowspan="3">端到端</td>
<td>VITS</td>
<td >CSMSC</td>
<td>CSMSC / AISHELL-3</td>
<td>
<a href = "./examples/csmsc/vits">VITS-csmsc</a>
<a href = "./examples/csmsc/vits">VITS-csmsc</a> / <a href = "./examples/aishell3/vits">VITS-aishell3</a>
</td>
</tr>
</tbody>

@ -26,7 +26,8 @@ def get_audios(path):
"""
supported_formats = [".wav", ".mp3", ".ogg", ".flac", ".m4a"]
return [
item for sublist in [[os.path.join(dir, file) for file in files]
item
for sublist in [[os.path.join(dir, file) for file in files]
for dir, _, files in list(os.walk(path))]
for item in sublist if os.path.splitext(item)[1] in supported_formats
]

@ -3,40 +3,40 @@
# 2. 接收录音音频,返回识别结果
# 3. 接收ASR识别结果返回NLP对话结果
# 4. 接收NLP对话结果返回TTS音频
import argparse
import base64
import yaml
import os
import json
import datetime
import json
import os
from typing import List
import aiofiles
import librosa
import soundfile as sf
import numpy as np
import argparse
import uvicorn
import aiofiles
from typing import Optional, List
from pydantic import BaseModel
from fastapi import FastAPI, Header, File, UploadFile, Form, Cookie, WebSocket, WebSocketDisconnect
from fastapi import FastAPI
from fastapi import File
from fastapi import Form
from fastapi import UploadFile
from fastapi import WebSocket
from fastapi import WebSocketDisconnect
from fastapi.responses import StreamingResponse
from starlette.responses import FileResponse
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.websockets import WebSocketState as WebSocketState
from pydantic import BaseModel
from src.AudioManeger import AudioMannger
from src.util import *
from src.robot import Robot
from src.WebsocketManeger import ConnectionManager
from src.SpeechBase.vpr import VPR
from src.util import *
from src.WebsocketManeger import ConnectionManager
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import FileResponse
from starlette.websockets import WebSocketState as WebSocketState
from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler
from paddlespeech.server.utils.audio_process import float2pcm
# 解析配置
parser = argparse.ArgumentParser(
prog='PaddleSpeechDemo', add_help=True)
parser = argparse.ArgumentParser(prog='PaddleSpeechDemo', add_help=True)
parser.add_argument(
"--port",
@ -60,39 +60,41 @@ ie_model_path = "source/model"
UPLOAD_PATH = "source/vpr"
WAV_PATH = "source/wav"
base_sources = [
UPLOAD_PATH, WAV_PATH
]
base_sources = [UPLOAD_PATH, WAV_PATH]
for path in base_sources:
os.makedirs(path, exist_ok=True)
# 初始化
app = FastAPI()
chatbot = Robot(asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path)
chatbot = Robot(
asr_config, tts_config, asr_init_path, ie_model_path=ie_model_path)
manager = ConnectionManager()
aumanager = AudioMannger(chatbot)
aumanager.init()
vpr = VPR(db_path, dim=192, top_k=5)
# 服务配置
class NlpBase(BaseModel):
chat: str
class TtsBase(BaseModel):
text: str
class Audios:
def __init__(self) -> None:
self.audios = b""
audios = Audios()
######################################################################
########################### ASR 服务 #################################
#####################################################################
# 接收文件返回ASR结果
# 上传文件
@app.post("/asr/offline")
@ -101,7 +103,8 @@ async def speech2textOffline(files: List[UploadFile]):
asr_res = ""
for file in files[:1]:
# 生成时间戳
now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
now_name = "asr_offline_" + datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
async with aiofiles.open(out_file_path, 'wb') as out_file:
content = await file.read() # async read
@ -110,10 +113,9 @@ async def speech2textOffline(files: List[UploadFile]):
# 返回ASR识别结果
asr_res = chatbot.speech2text(out_file_path)
return SuccessRequest(result=asr_res)
# else:
# return ErrorRequest(message="文件不是.wav格式")
return ErrorRequest(message="上传文件为空")
# 接收文件同时将wav强制转成16k, int16类型
@app.post("/asr/offlinefile")
async def speech2textOfflineFile(files: List[UploadFile]):
@ -121,7 +123,8 @@ async def speech2textOfflineFile(files: List[UploadFile]):
asr_res = ""
for file in files[:1]:
# 生成时间戳
now_name = "asr_offline_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
now_name = "asr_offline_" + datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
async with aiofiles.open(out_file_path, 'wb') as out_file:
content = await file.read() # async read
@ -140,16 +143,12 @@ async def speech2textOfflineFile(files: List[UploadFile]):
# 返回ASR识别结果
asr_res = chatbot.speech2text(out_file_path)
response_res = {
"asr_result": asr_res,
"wav_base64": wav_base64
}
response_res = {"asr_result": asr_res, "wav_base64": wav_base64}
return SuccessRequest(result=response_res)
return ErrorRequest(message="上传文件为空")
# 流式接收测试
@app.post("/asr/online1")
async def speech2textOnlineRecive(files: List[UploadFile]):
@ -161,6 +160,7 @@ async def speech2textOnlineRecive(files: List[UploadFile]):
print(f"audios长度变化: {len(audios.audios)}")
return SuccessRequest(message="接收成功")
# 采集环境噪音大小
@app.post("/asr/collectEnv")
async def collectEnv(files: List[UploadFile]):
@ -171,6 +171,7 @@ async def collectEnv(files: List[UploadFile]):
vad_ = aumanager.vad_threshold
return SuccessRequest(result=vad_, message="采集环境噪音成功")
# 停止录音
@app.get("/asr/stopRecord")
async def stopRecord():
@ -179,6 +180,7 @@ async def stopRecord():
print("Online录音暂停")
return SuccessRequest(message="停止成功")
# 恢复录音
@app.get("/asr/resumeRecord")
async def resumeRecord():
@ -210,9 +212,9 @@ async def websocket_endpoint(websocket: WebSocket):
# print(f"用户-{user}-离开")
# Online识别的ASR
# 流式识别的 ASR
@app.websocket('/ws/asr/onlineStream')
async def websocket_endpoint(websocket: WebSocket):
async def websocket_endpoint_online(websocket: WebSocket):
"""PaddleSpeech Online ASR Server api
Args:
@ -298,10 +300,12 @@ async def websocket_endpoint(websocket: WebSocket):
except WebSocketDisconnect:
pass
######################################################################
########################### NLP 服务 #################################
#####################################################################
@app.post("/nlp/chat")
async def chatOffline(nlp_base: NlpBase):
chat = nlp_base.chat
@ -311,6 +315,7 @@ async def chatOffline(nlp_base:NlpBase):
res = chatbot.chat(chat)
return SuccessRequest(result=res)
@app.post("/nlp/ie")
async def ieOffline(nlp_base: NlpBase):
nlp_text = nlp_base.chat
@ -320,17 +325,20 @@ async def ieOffline(nlp_base:NlpBase):
res = chatbot.ie(nlp_text)
return SuccessRequest(result=res)
######################################################################
########################### TTS 服务 #################################
#####################################################################
@app.post("/tts/offline")
async def text2speechOffline(tts_base: TtsBase):
text = tts_base.text
if not text:
return ErrorRequest(message="文本为空")
else:
now_name = "tts_"+ datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
now_name = "tts_" + datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
out_file_path = os.path.join(WAV_PATH, now_name)
# 保存为文件再转成base64传输
chatbot.text2speech(text, outpath=out_file_path)
@ -339,12 +347,14 @@ async def text2speechOffline(tts_base:TtsBase):
base_str = base64.b64encode(data_bin)
return SuccessRequest(result=base_str)
# http流式TTS
@app.post("/tts/online")
async def stream_tts(request_body: TtsBase):
text = request_body.text
return StreamingResponse(chatbot.text2speechStreamBytes(text=text))
# ws流式TTS
@app.websocket("/ws/tts/online")
async def stream_ttsWS(websocket: WebSocket):
@ -356,17 +366,11 @@ async def stream_ttsWS(websocket: WebSocket):
if text:
for sub_wav in chatbot.text2speechStream(text=text):
# print("发送sub wav: ", len(sub_wav))
res = {
"wav": sub_wav,
"done": False
}
res = {"wav": sub_wav, "done": False}
await websocket.send_json(res)
# 输送结束
res = {
"wav": sub_wav,
"done": True
}
res = {"wav": sub_wav, "done": True}
await websocket.send_json(res)
# manager.disconnect(websocket)
@ -396,7 +400,8 @@ async def vpr_enroll(table_name: str=None,
return {'status': False, 'msg': "spk_id can not be None"}
# Save the upload data to server.
content = await audio.read()
now_name = "vpr_enroll_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
now_name = "vpr_enroll_" + datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
audio_path = os.path.join(UPLOAD_PATH, now_name)
with open(audio_path, "wb+") as f:
@ -415,7 +420,8 @@ async def vpr_recog(request: Request,
# try:
# Save the upload data to server.
content = await audio.read()
now_name = "vpr_query_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
now_name = "vpr_query_" + datetime.datetime.strftime(
datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav"
query_audio_path = os.path.join(UPLOAD_PATH, now_name)
with open(query_audio_path, "wb+") as f:
f.write(content)
@ -425,8 +431,6 @@ async def vpr_recog(request: Request,
# Sort results by distance metric, closest distances first
res = sorted(res.items(), key=lambda item: item[1][1], reverse=True)
return res
# except Exception as e:
# return {'status': False, 'msg': e}, 400
@app.post('/vpr/del')
@ -471,6 +475,7 @@ async def vpr_database64(vprId: int):
except Exception as e:
return {'status': False, 'msg': e}, 400
@app.get('/vpr/data')
async def vpr_data(vprId: int):
# Get the audio file from path by spk_id in MySQL
@ -482,11 +487,6 @@ async def vpr_data(vprId: int):
except Exception as e:
return {'status': False, 'msg': e}, 400
if __name__ == '__main__':
uvicorn.run(app=app, host='0.0.0.0', port=port)

@ -1,14 +1,13 @@
aiofiles
faiss-cpu
fastapi
librosa
numpy
paddlenlp
paddlepaddle
paddlespeech
pydantic
scikit_learn
python-multipartscikit_learn
SoundFile
starlette
uvicorn
paddlepaddle
paddlespeech
paddlenlp
faiss-cpu
python-multipart

@ -1,15 +1,19 @@
import imp
from queue import Queue
import numpy as np
import datetime
import os
import wave
import random
import datetime
import numpy as np
from .util import randName
class AudioMannger:
def __init__(self, robot, frame_length=160, frame=10, data_width=2, vad_default = 300):
def __init__(self,
robot,
frame_length=160,
frame=10,
data_width=2,
vad_default=300):
# 二进制 pcm 流
self.audios = b''
self.asr_result = ""
@ -20,7 +24,8 @@ class AudioMannger:
os.makedirs(self.file_dir, exist_ok=True)
self.vad_deafult = vad_default
self.vad_threshold = vad_default
self.vad_threshold_path = os.path.join(self.file_dir, "vad_threshold.npy")
self.vad_threshold_path = os.path.join(self.file_dir,
"vad_threshold.npy")
# 10ms 一帧
self.frame_length = frame_length
@ -37,14 +42,11 @@ class AudioMannger:
self.max_silence_cnt = 4
self.is_pause = False # 录音暂停与恢复
def init(self):
if os.path.exists(self.vad_threshold_path):
# 平均响度文件存在
self.vad_threshold = np.load(self.vad_threshold_path)
def clear_audio(self):
# 清空 pcm 累积片段与 asr 识别结果
self.audios = b''
@ -52,7 +54,6 @@ class AudioMannger:
def clear_asr(self):
self.asr_result = ""
def compute_chunk_volume(self, start_index, pcm_bins):
# 根据帧长计算能量平均值
pcm_bin = pcm_bins[start_index:start_index + self.window_length]
@ -63,13 +64,13 @@ class AudioMannger:
x = np.abs(x)
return np.mean(x)
def is_speech(self, start_index, pcm_bins):
# 检查是否没
if start_index > len(pcm_bins):
return False
# 检查从这个 start 开始是否为静音帧
energy = self.compute_chunk_volume(start_index=start_index, pcm_bins=pcm_bins)
energy = self.compute_chunk_volume(
start_index=start_index, pcm_bins=pcm_bins)
# print(energy)
if energy > self.vad_threshold:
return True
@ -80,7 +81,8 @@ class AudioMannger:
max_energy = 0
start = 0
while start < len(pcm_bins):
energy = self.compute_chunk_volume(start_index=start, pcm_bins=pcm_bins)
energy = self.compute_chunk_volume(
start_index=start, pcm_bins=pcm_bins)
if energy > max_energy:
max_energy = energy
start += self.window_length
@ -110,7 +112,11 @@ class AudioMannger:
print("录音停止")
# audios 保存为 wav, 送入 ASR
if len(self.audios) > 2 * 16000:
file_path = os.path.join(self.file_dir, "asr_" + datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d%H%M%S') + randName() + ".wav")
file_path = os.path.join(
self.file_dir,
"asr_" + datetime.datetime.strftime(
datetime.datetime.now(),
'%Y%m%d%H%M%S') + randName() + ".wav")
self.save_audio(file_path=file_path)
self.asr_result = self.robot.speech2text(file_path)
self.clear_audio()
@ -145,6 +151,3 @@ class AudioMannger:
def resume(self):
self.is_pause = False

@ -1,13 +1,10 @@
from re import sub
import numpy as np
import paddle
import librosa
import soundfile
from paddlespeech.server.engine.asr.online.python.asr_engine import ASREngine
from paddlespeech.server.engine.asr.online.python.asr_engine import PaddleASRConnectionHanddler
from paddlespeech.server.utils.config import get_config
def readWave(samples):
x_len = len(samples)
@ -31,14 +28,17 @@ def readWave(samples):
class ASR:
def __init__(self, config_path, ) -> None:
def __init__(
self,
config_path, ) -> None:
self.config = get_config(config_path)['asr_online']
self.engine = ASREngine()
self.engine.init(self.config)
self.connection_handler = PaddleASRConnectionHanddler(self.engine)
def offlineASR(self, samples, sample_rate=16000):
x_chunk, x_chunk_lens = self.engine.preprocess(samples=samples, sample_rate=sample_rate)
x_chunk, x_chunk_lens = self.engine.preprocess(
samples=samples, sample_rate=sample_rate)
self.engine.run(x_chunk, x_chunk_lens)
result = self.engine.postprocess()
self.engine.reset()
@ -58,5 +58,3 @@ class ASR:
asr_results = self.connection_handler.get_result()
self.connection_handler.reset()
return asr_results

@ -1,14 +1,16 @@
from paddlenlp import Taskflow
class NLP:
def __init__(self, ie_model_path=None):
schema = ["时间", "出发地", "目的地", "费用"]
if ie_model_path:
self.ie_model = Taskflow("information_extraction",
schema=schema, task_path=ie_model_path)
self.ie_model = Taskflow(
"information_extraction",
schema=schema,
task_path=ie_model_path)
else:
self.ie_model = Taskflow("information_extraction",
schema=schema)
self.ie_model = Taskflow("information_extraction", schema=schema)
self.dialogue_model = Taskflow("dialogue")
@ -19,5 +21,3 @@ class NLP:
def ie(self, text):
result = self.ie_model(text)
return result

@ -1,8 +1,8 @@
import base64
import sqlite3
import os
import sqlite3
import numpy as np
from pkg_resources import resource_stream
def dict_factory(cursor, row):
@ -11,6 +11,7 @@ def dict_factory(cursor, row):
d[col[0]] = row[idx]
return d
class DataBase(object):
def __init__(self, db_path: str):
db_path = os.path.realpath(db_path)
@ -50,11 +51,12 @@ class DataBase(object):
if not os.path.exists(wav_path):
return None, "wav not exists"
else:
sql = f"""
sql = """
insert into
vprtable (username, vector, wavpath)
values (?, ?, ?)
"""
try:
self.cursor.execute(sql, (username, vector_base64, wav_path))
self.conn.commit()
@ -75,6 +77,7 @@ class DataBase(object):
sql = f"""
SELECT * from vprtable WHERE `id` = {vpr_id}
"""
result = self.cursor.execute(sql).fetchall()
return result
@ -82,6 +85,7 @@ class DataBase(object):
sql = f"""
SELECT * from vprtable WHERE `username` = '{username}'
"""
result = self.cursor.execute(sql).fetchall()
return result
@ -89,20 +93,23 @@ class DataBase(object):
sql = f"""
DELETE from vprtable WHERE `username`='{username}'
"""
self.cursor.execute(sql)
self.conn.commit()
def drop_all(self):
sql = f"""
sql = """
DELETE from vprtable
"""
self.cursor.execute(sql)
self.conn.commit()
def drop_table(self):
sql = f"""
sql = """
DROP TABLE vprtable
"""
self.cursor.execute(sql)
self.conn.commit()
@ -113,4 +120,3 @@ class DataBase(object):
b = base64.b64decode(vector_base64)
vc = np.frombuffer(b, dtype=dtype)
return vc

@ -5,18 +5,19 @@
# 2. 加载模型
# 3. 端到端推理
# 4. 流式推理
import base64
import math
import logging
import math
import numpy as np
from paddlespeech.server.utils.onnx_infer import get_sess
from paddlespeech.t2s.frontend.zh_frontend import Frontend
from paddlespeech.server.utils.util import denorm, get_chunks
from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
from paddlespeech.server.utils.audio_process import float2pcm
from paddlespeech.server.utils.config import get_config
from paddlespeech.server.utils.util import denorm
from paddlespeech.server.utils.util import get_chunks
from paddlespeech.t2s.frontend.zh_frontend import Frontend
from paddlespeech.server.engine.tts.online.onnx.tts_engine import TTSEngine
class TTS:
def __init__(self, config_path):
@ -54,15 +55,12 @@ class TTS:
merge_sentences = False
input_ids = self.frontend.get_input_ids(
text,
merge_sentences=merge_sentences,
get_tone_ids=get_tone_ids)
text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
wav_list = []
for i in range(len(phone_ids)):
orig_hs = self.engine.executor.am_encoder_infer_sess.run(
None, input_feed={'text': phone_ids[i].numpy()}
)
None, input_feed={'text': phone_ids[i].numpy()})
hs = orig_hs[0]
am_decoder_output = self.engine.executor.am_decoder_sess.run(
None, input_feed={'xs': hs})
@ -74,7 +72,8 @@ class TTS:
am_output_data = am_decoder_output + np.transpose(
am_postnet_output[0], (0, 2, 1))
normalized_mel = am_output_data[0][0]
mel = denorm(normalized_mel, self.engine.executor.am_mu, self.engine.executor.am_std)
mel = denorm(normalized_mel, self.engine.executor.am_mu,
self.engine.executor.am_std)
wav = self.engine.executor.voc_sess.run(
output_names=None, input_feed={'logmel': mel})[0]
wav_list.append(wav)
@ -88,9 +87,7 @@ class TTS:
# front
input_ids = self.frontend.get_input_ids(
text,
merge_sentences=merge_sentences,
get_tone_ids=get_tone_ids)
text, merge_sentences=merge_sentences, get_tone_ids=get_tone_ids)
phone_ids = input_ids["phone_ids"]
for i in range(len(phone_ids)):
@ -105,14 +102,15 @@ class TTS:
mel = mel[0]
# voc streaming
mel_chunks = get_chunks(mel, self.config.voc_block, self.config.voc_pad, "voc")
mel_chunks = get_chunks(mel, self.config.voc_block,
self.config.voc_pad, "voc")
voc_chunk_num = len(mel_chunks)
for i, mel_chunk in enumerate(mel_chunks):
sub_wav = self.executor.voc_sess.run(
output_names=None, input_feed={'logmel': mel_chunk})
sub_wav = self.depadding(sub_wav[0], voc_chunk_num, i,
self.config.voc_block, self.config.voc_pad,
self.config.voc_upsample)
sub_wav = self.depadding(
sub_wav[0], voc_chunk_num, i, self.config.voc_block,
self.config.voc_pad, self.config.voc_upsample)
yield self.after_process(sub_wav)
@ -130,7 +128,8 @@ class TTS:
end = min(self.config.voc_block + self.config.voc_pad, mel_len)
# streaming am
hss = get_chunks(orig_hs, self.config.am_block, self.config.am_pad, "am")
hss = get_chunks(orig_hs, self.config.am_block,
self.config.am_pad, "am")
am_chunk_num = len(hss)
for i, hs in enumerate(hss):
am_decoder_output = self.executor.am_decoder_sess.run(
@ -147,7 +146,8 @@ class TTS:
sub_mel = denorm(normalized_mel, self.executor.am_mu,
self.executor.am_std)
sub_mel = self.depadding(sub_mel, am_chunk_num, i,
self.config.am_block, self.config.am_pad, 1)
self.config.am_block,
self.config.am_pad, 1)
if i == 0:
mel_streaming = sub_mel
@ -165,23 +165,22 @@ class TTS:
output_names=None, input_feed={'logmel': voc_chunk})
sub_wav = self.depadding(
sub_wav[0], voc_chunk_num, voc_chunk_id,
self.config.voc_block, self.config.voc_pad, self.config.voc_upsample)
self.config.voc_block, self.config.voc_pad,
self.config.voc_upsample)
yield self.after_process(sub_wav)
voc_chunk_id += 1
start = max(
0, voc_chunk_id * self.config.voc_block - self.config.voc_pad)
end = min(
(voc_chunk_id + 1) * self.config.voc_block + self.config.voc_pad,
mel_len)
start = max(0, voc_chunk_id * self.config.voc_block -
self.config.voc_pad)
end = min((voc_chunk_id + 1) * self.config.voc_block +
self.config.voc_pad, mel_len)
else:
logging.error(
"Only support fastspeech2_csmsc or fastspeech2_cnndecoder_csmsc on streaming tts."
)
def streamTTSBytes(self, text):
for wav in self.engine.executor.infer(
text=text,
@ -192,7 +191,6 @@ class TTS:
wav_bytes = wav.tobytes() # to bytes
yield wav_bytes
def after_process(self, wav):
# for tvm
wav = float2pcm(wav) # float32 to int16
@ -203,7 +201,3 @@ class TTS:
def streamTTS_TVM(self, text):
# 用 TVM 优化
pass

@ -1,11 +1,13 @@
# vpr Demo 没有使用 mysql 与 muilvs, 仅用于docker演示
import logging
import faiss
from matplotlib import use
import numpy as np
from .sql_helper import DataBase
from .vpr_encode import get_audio_embedding
class VPR:
def __init__(self, db_path, dim, top_k) -> None:
# 初始化
@ -34,7 +36,8 @@ class VPR:
if len(vc.shape) == 1:
vc = np.expand_dims(vc, axis=0)
# 构建数据库
self.index_ip.add_with_ids(vc, np.array((idx,)).astype('int64'))
self.index_ip.add_with_ids(vc, np.array(
(idx, )).astype('int64'))
logging.info("faiss 构建完毕")
def faiss_enroll(self, idx, vc):
@ -63,7 +66,8 @@ class VPR:
D, I = self.index_ip.search(emb_search, self.top_k)
D = D.tolist()[0]
I = I.tolist()[0]
return [(round(D[i] * 100, 2 ), I[i]) for i in range(len(D)) if I[i] != -1]
return [(round(D[i] * 100, 2), I[i]) for i in range(len(D))
if I[i] != -1]
else:
logging.error("识别失败")
return None
@ -104,7 +108,6 @@ class VPR:
res = self.db.select_by_id(vpr_idx)
return res[0]['wavpath']
def vpr_data(self, idx):
# 获取对应ID的数据
res = self.db.select_by_id(idx)
@ -115,4 +118,3 @@ class VPR:
self.db.drop_table()
# 清空 faiss
self.index_ip.reset()

@ -1,9 +1,12 @@
from paddlespeech.cli.vector import VectorExecutor
import numpy as np
import logging
import numpy as np
from paddlespeech.cli.vector import VectorExecutor
vector_executor = VectorExecutor()
def get_audio_embedding(path):
"""
Use vpr_inference to generate embedding of audio
@ -16,5 +19,3 @@ def get_audio_embedding(path):
except Exception as e:
logging.error(f"Error with embedding:{e}")
return None

@ -2,6 +2,7 @@ from typing import List
from fastapi import WebSocket
class ConnectionManager:
def __init__(self):
# 存放激活的ws连接对象

@ -1,15 +1,18 @@
from paddlespeech.cli.asr.infer import ASRExecutor
import soundfile as sf
import os
import librosa
import soundfile as sf
from src.SpeechBase.asr import ASR
from src.SpeechBase.tts import TTS
from src.SpeechBase.nlp import NLP
from src.SpeechBase.tts import TTS
from paddlespeech.cli.asr.infer import ASRExecutor
class Robot:
def __init__(self, asr_config, tts_config,asr_init_path,
def __init__(self,
asr_config,
tts_config,
asr_init_path,
ie_model_path=None) -> None:
self.nlp = NLP(ie_model_path=ie_model_path)
self.asr = ASR(config_path=asr_config)
@ -22,7 +25,6 @@ class Robot:
self.asr_name = "conformer_wenetspeech"
self.warm_up_asrmodel(asr_init_path)
def warm_up_asrmodel(self, asr_init_path):
if not os.path.exists(asr_init_path):
path_dir = os.path.dirname(asr_init_path)
@ -34,9 +36,12 @@ class Robot:
self.text2speech(text, asr_init_path)
# asr model初始化
self.asr_model(asr_init_path, model=self.asr_name,lang='zh',
sample_rate=16000, force_yes=True)
self.asr_model(
asr_init_path,
model=self.asr_name,
lang='zh',
sample_rate=16000,
force_yes=True)
def speech2text(self, audio_file):
self.asr_model.preprocess(self.asr_name, audio_file)
@ -46,8 +51,7 @@ class Robot:
def text2speech(self, text, outpath):
wav = self.tts.offlineTTS(text)
sf.write(
outpath, wav, samplerate=self.tts_sample_rate)
sf.write(outpath, wav, samplerate=self.tts_sample_rate)
res = wav
return res
@ -66,5 +70,3 @@ class Robot:
def ie(self, text):
result = self.nlp.ie(text)
return result

@ -1,18 +1,13 @@
import random
def randName(n=5):
return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba', n))
def SuccessRequest(result=None, message="ok"):
return {
"code": 0,
"result":result,
"message": message
}
return {"code": 0, "result": result, "message": message}
def ErrorRequest(result=None, message="error"):
return {
"code": -1,
"result":result,
"message": message
}
return {"code": -1, "result": result, "message": message}

@ -1,12 +1,6 @@
myst-parser
numpydoc
recommonmark>=0.5.0
sphinx
sphinx-autobuild
sphinx-markdown-tables
sphinx_rtd_theme
paddlepaddle>=2.2.2
braceexpandcolorlog
editdistance
fastapi
g2p_en
g2pM
h5py
@ -14,40 +8,45 @@ inflect
jieba
jsonlines
kaldiio
keyboard
librosa==0.8.1
loguru
matplotlib
myst-parser
nara_wpe
numpydoc
onnxruntime==1.10.0
opencc
pandas
paddlenlp
paddlepaddle>=2.2.2
paddlespeech_feat
pandas
pathos == 0.2.8
pattern_singleton
Pillow>=9.0.0
praatio==5.0.0
prettytable
pypinyin
pypinyin-dict
python-dateutil
pyworld==0.2.12
recommonmark>=0.5.0
resampy==0.2.2
sacrebleu
scipy
sentencepiece~=0.1.96
soundfile~=0.10
sphinx
sphinx-autobuild
sphinx-markdown-tables
sphinx_rtd_theme
textgrid
timer
tqdm
typeguard
uvicorn
visualdl
webrtcvad
websockets
yacs~=0.1.8
prettytable
zhon
colorlog
pathos == 0.2.8
fastapi
websockets
keyboard
uvicorn
pattern_singleton
braceexpand

@ -20,10 +20,11 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import recommonmark.parser
import sphinx_rtd_theme
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ["soundfile", "librosa"]

@ -44,8 +44,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_aishell3
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -0,0 +1,154 @@
# VITS with AISHELL-3
This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3). The trained model can be used in Voice Cloning Task, We refer to the model structure of [Transfer Learning from Speaker Verification to Multispeaker Text-To-Speech Synthesis](https://arxiv.org/pdf/1806.04558.pdf). The general steps are as follows:
1. Speaker Encoder: We use Speaker Verification to train a speaker encoder. Datasets used in this task are different from those used in `VITS` because the transcriptions are not needed, we use more datasets, refer to [ge2e](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/ge2e).
2. Synthesizer and Vocoder: We use the trained speaker encoder to generate speaker embedding for each sentence in AISHELL-3. This embedding is an extra input of `VITS` which will be concated with encoder outputs. The vocoder is part of `VITS` due to its special structure.
## Dataset
### Download and Extract
Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
### Get MFA Result and Extract
We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here.
You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
## Pretrained GE2E Model
We use pretrained GE2E model to generate speaker embedding for each sentence.
Download pretrained GE2E model from here [ge2e_ckpt_0.3.zip](https://bj.bcebos.com/paddlespeech/Parakeet/released_models/ge2e/ge2e_ckpt_0.3.zip), and `unzip` it.
## Get Started
Assume the path to the dataset is `~/datasets/data_aishell3`.
Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
Assume the path to the pretrained ge2e model is `./ge2e_ckpt_0.3`.
Run the command below to
1. **source path**.
2. preprocess the dataset.
3. train the model.
4. synthesize waveform from `metadata.jsonl`.
5. start a voice cloning inference.
```bash
./run.sh
```
You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
```bash
./run.sh --stage 0 --stop-stage 0
```
### Data Preprocessing
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${ge2e_ckpt_path}
```
When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
```text
dump
├── dev
│   ├── norm
│   └── raw
├── embed
│ ├── SSB0005
│ ├── SSB0009
│ ├── ...
│ └── ...
├── phone_id_map.txt
├── speaker_id_map.txt
├── test
│   ├── norm
│   └── raw
└── train
├── feats_stats.npy
├── norm
└── raw
```
The `embed` contains the generated speaker embedding for each sentence in AISHELL-3, which has the same file structure with wav files and the format is `.npy`.
The computing time of utterance embedding can be x hours.
The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`.
Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance.
The preprocessing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but there is one more `ge2e/inference` step here.
### Model Training
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
```
The training step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/train.py`.
### Synthesizing
`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
```
```text
usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT]
[--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT]
[--voice-cloning VOICE_CLONING] [--ngpu NGPU]
[--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR]
Synthesize with VITS
optional arguments:
-h, --help show this help message and exit
--config CONFIG Config of VITS.
--ckpt CKPT Checkpoint file of VITS.
--phones_dict PHONES_DICT
phone vocabulary file.
--speaker_dict SPEAKER_DICT
speaker id map file.
--voice-cloning VOICE_CLONING
whether training voice cloning model.
--ngpu NGPU if ngpu == 0, use cpu.
--test_metadata TEST_METADATA
test metadata.
--output_dir OUTPUT_DIR
output dir.
```
The synthesizing step is very similar to that one of [vits](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/vits), but we should set `--voice-cloning=True` when calling `${BIN_DIR}/../synthesize.py`.
### Voice Cloning
Assume there are some reference audios in `./ref_audio`
```text
ref_audio
├── 001238.wav
├── LJ015-0254.wav
└── audio_self_test.mp3
```
`./local/voice_cloning.sh` calls `${BIN_DIR}/voice_cloning.py`
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir}
```
If you want to convert a speaker audio file to refered speaker, run:
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} ${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path}
```
<!-- TODO display these after we trained the model -->
<!--
## Pretrained Model
The pretrained model can be downloaded here:
- [vits_vc_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/vits/vits_vc_aishell3_ckpt_1.1.0.zip) (add_blank=true)
VITS checkpoint contains files listed below.
(There is no need for `speaker_id_map.txt` here )
```text
vits_vc_aishell3_ckpt_1.1.0
├── default.yaml # default config used to train vitx
├── phone_id_map.txt # phone vocabulary file when training vits
└── snapshot_iter_333000.pdz # model parameters and optimizer states
```
ps: This ckpt is not good enough, a better result is training
-->

@ -0,0 +1,185 @@
# This configuration tested on 4 GPUs (V100) with 32GB GPU
# memory. It takes around 2 weeks to finish the training
# but 100k iters model should generate reasonable results.
###########################################################
# FEATURE EXTRACTION SETTING #
###########################################################
fs: 22050 # sr
n_fft: 1024 # FFT size (samples).
n_shift: 256 # Hop size (samples). 12.5ms
win_length: null # Window length (samples). 50ms
# If set to null, it will be the same as fft_size.
window: "hann" # Window function.
##########################################################
# TTS MODEL SETTING #
##########################################################
model:
# generator related
generator_type: vits_generator
generator_params:
hidden_channels: 192
spk_embed_dim: 256
global_channels: 256
segment_size: 32
text_encoder_attention_heads: 2
text_encoder_ffn_expand: 4
text_encoder_blocks: 6
text_encoder_positionwise_layer_type: "conv1d"
text_encoder_positionwise_conv_kernel_size: 3
text_encoder_positional_encoding_layer_type: "rel_pos"
text_encoder_self_attention_layer_type: "rel_selfattn"
text_encoder_activation_type: "swish"
text_encoder_normalize_before: True
text_encoder_dropout_rate: 0.1
text_encoder_positional_dropout_rate: 0.0
text_encoder_attention_dropout_rate: 0.1
use_macaron_style_in_text_encoder: True
use_conformer_conv_in_text_encoder: False
text_encoder_conformer_kernel_size: -1
decoder_kernel_size: 7
decoder_channels: 512
decoder_upsample_scales: [8, 8, 2, 2]
decoder_upsample_kernel_sizes: [16, 16, 4, 4]
decoder_resblock_kernel_sizes: [3, 7, 11]
decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
use_weight_norm_in_decoder: True
posterior_encoder_kernel_size: 5
posterior_encoder_layers: 16
posterior_encoder_stacks: 1
posterior_encoder_base_dilation: 1
posterior_encoder_dropout_rate: 0.0
use_weight_norm_in_posterior_encoder: True
flow_flows: 4
flow_kernel_size: 5
flow_base_dilation: 1
flow_layers: 4
flow_dropout_rate: 0.0
use_weight_norm_in_flow: True
use_only_mean_in_flow: True
stochastic_duration_predictor_kernel_size: 3
stochastic_duration_predictor_dropout_rate: 0.5
stochastic_duration_predictor_flows: 4
stochastic_duration_predictor_dds_conv_layers: 3
# discriminator related
discriminator_type: hifigan_multi_scale_multi_period_discriminator
discriminator_params:
scales: 1
scale_downsample_pooling: "AvgPool1D"
scale_downsample_pooling_params:
kernel_size: 4
stride: 2
padding: 2
scale_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes: [15, 41, 5, 3]
channels: 128
max_downsample_channels: 1024
max_groups: 16
bias: True
downsample_scales: [2, 2, 4, 4, 1]
nonlinear_activation: "leakyrelu"
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: True
use_spectral_norm: False
follow_official_norm: False
periods: [2, 3, 5, 7, 11]
period_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes: [5, 3]
channels: 32
downsample_scales: [3, 3, 3, 3, 1]
max_downsample_channels: 1024
bias: True
nonlinear_activation: "leakyrelu"
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: True
use_spectral_norm: False
# others
sampling_rate: 22050 # needed in the inference for saving wav
cache_generator_outputs: True # whether to cache generator outputs in the training
###########################################################
# LOSS SETTING #
###########################################################
# loss function related
generator_adv_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
loss_type: mse # loss type, "mse" or "hinge"
discriminator_adv_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
loss_type: mse # loss type, "mse" or "hinge"
feat_match_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
average_by_layers: False # whether to average loss value by #layers of each discriminator
include_final_outputs: True # whether to include final outputs for loss calculation
mel_loss_params:
fs: 22050 # must be the same as the training data
fft_size: 1024 # fft points
hop_size: 256 # hop size
win_length: null # window length
window: hann # window type
num_mels: 80 # number of Mel basis
fmin: 0 # minimum frequency for Mel basis
fmax: null # maximum frequency for Mel basis
log_base: null # null represent natural log
###########################################################
# ADVERSARIAL LOSS SETTING #
###########################################################
lambda_adv: 1.0 # loss scaling coefficient for adversarial loss
lambda_mel: 45.0 # loss scaling coefficient for Mel loss
lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss
lambda_dur: 1.0 # loss scaling coefficient for duration loss
lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss
# others
sampling_rate: 22050 # needed in the inference for saving wav
cache_generator_outputs: True # whether to cache generator outputs in the training
###########################################################
# DATA LOADER SETTING #
###########################################################
batch_size: 50 # Batch size.
num_workers: 4 # Number of workers in DataLoader.
##########################################################
# OPTIMIZER & SCHEDULER SETTING #
##########################################################
# optimizer setting for generator
generator_optimizer_params:
beta1: 0.8
beta2: 0.99
epsilon: 1.0e-9
weight_decay: 0.0
generator_scheduler: exponential_decay
generator_scheduler_params:
learning_rate: 2.0e-4
gamma: 0.999875
# optimizer setting for discriminator
discriminator_optimizer_params:
beta1: 0.8
beta2: 0.99
epsilon: 1.0e-9
weight_decay: 0.0
discriminator_scheduler: exponential_decay
discriminator_scheduler_params:
learning_rate: 2.0e-4
gamma: 0.999875
generator_first: False # whether to start updating generator first
##########################################################
# OTHER TRAINING SETTING #
##########################################################
num_snapshots: 10 # max number of snapshots to keep while training
train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000
save_interval_steps: 1000 # Interval steps to save checkpoint.
eval_interval_steps: 250 # Interval steps to evaluate the network.
seed: 777 # random seed number

@ -0,0 +1,79 @@
#!/bin/bash
stage=0
stop_stage=100
config_path=$1
add_blank=$2
ge2e_ckpt_path=$3
# gen speaker embedding
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
python3 ${MAIN_ROOT}/paddlespeech/vector/exps/ge2e/inference.py \
--input=~/datasets/data_aishell3/train/wav/ \
--output=dump/embed \
--checkpoint_path=${ge2e_ckpt_path}
fi
# copy from tts3/preprocess
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
# get durations from MFA's result
echo "Generate durations.txt from MFA results ..."
python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \
--inputdir=./aishell3_alignment_tone \
--output durations.txt \
--config=${config_path}
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# extract features
echo "Extract features ..."
python3 ${BIN_DIR}/preprocess.py \
--dataset=aishell3 \
--rootdir=~/datasets/data_aishell3/ \
--dumpdir=dump \
--dur-file=durations.txt \
--config=${config_path} \
--num-cpu=20 \
--cut-sil=True \
--spk_emb_dir=dump/embed
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
# get features' stats(mean and std)
echo "Get features' stats ..."
python3 ${MAIN_ROOT}/utils/compute_statistics.py \
--metadata=dump/train/raw/metadata.jsonl \
--field-name="feats"
fi
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
# normalize and covert phone/speaker to id, dev and test should use train's stats
echo "Normalize ..."
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/train/raw/metadata.jsonl \
--dumpdir=dump/train/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/dev/raw/metadata.jsonl \
--dumpdir=dump/dev/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/test/raw/metadata.jsonl \
--dumpdir=dump/test/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
fi

@ -0,0 +1,19 @@
#!/bin/bash
config_path=$1
train_output_path=$2
ckpt_name=$3
stage=0
stop_stage=0
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize.py \
--config=${config_path} \
--ckpt=${train_output_path}/checkpoints/${ckpt_name} \
--phones_dict=dump/phone_id_map.txt \
--test_metadata=dump/test/norm/metadata.jsonl \
--output_dir=${train_output_path}/test \
--voice-cloning=True
fi

@ -0,0 +1,18 @@
#!/bin/bash
config_path=$1
train_output_path=$2
# install monotonic_align
cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align
python3 setup.py build_ext --inplace
cd -
python3 ${BIN_DIR}/train.py \
--train-metadata=dump/train/norm/metadata.jsonl \
--dev-metadata=dump/dev/norm/metadata.jsonl \
--config=${config_path} \
--output-dir=${train_output_path} \
--ngpu=4 \
--phones-dict=dump/phone_id_map.txt \
--voice-cloning=True

@ -0,0 +1,22 @@
#!/bin/bash
config_path=$1
train_output_path=$2
ckpt_name=$3
ge2e_params_path=$4
add_blank=$5
ref_audio_dir=$6
src_audio_path=$7
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/voice_cloning.py \
--config=${config_path} \
--ckpt=${train_output_path}/checkpoints/${ckpt_name} \
--ge2e_params_path=${ge2e_params_path} \
--phones_dict=dump/phone_id_map.txt \
--text="凯莫瑞安联合体的经济崩溃迫在眉睫。" \
--audio-path=${src_audio_path} \
--input-dir=${ref_audio_dir} \
--output-dir=${train_output_path}/vc_syn \
--add-blank=${add_blank}

@ -0,0 +1,13 @@
#!/bin/bash
export MAIN_ROOT=`realpath ${PWD}/../../../`
export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH}
export LC_ALL=C
export PYTHONDONTWRITEBYTECODE=1
# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
export PYTHONIOENCODING=UTF-8
export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
MODEL=vits
export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL}

@ -0,0 +1,45 @@
#!/bin/bash
set -e
source path.sh
gpus=0,1,2,3
stage=0
stop_stage=100
conf_path=conf/default.yaml
train_output_path=exp/default
ckpt_name=snapshot_iter_153.pdz
add_blank=true
ref_audio_dir=ref_audio
src_audio_path=''
# not include ".pdparams" here
ge2e_ckpt_path=./ge2e_ckpt_0.3/step-3000000
# include ".pdparams" here
ge2e_params_path=${ge2e_ckpt_path}.pdparams
# with the following command, you can choose the stage range you want to run
# such as `./run.sh --stage 0 --stop-stage 0`
# this can not be mixed use with `$1`, `$2` ...
source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
# prepare data
CUDA_VISIBLE_DEVICES=${gpus} ./local/preprocess.sh ${conf_path} ${add_blank} ${ge2e_ckpt_path} || exit -1
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
# train model, all `ckpt` under `train_output_path/checkpoints/` dir
CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
CUDA_VISIBLE_DEVICES=${gpus} ./local/voice_cloning.sh ${conf_path} ${train_output_path} ${ckpt_name} \
${ge2e_params_path} ${add_blank} ${ref_audio_dir} ${src_audio_path} || exit -1
fi

@ -0,0 +1,202 @@
# VITS with AISHELL-3
This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [AISHELL-3](http://www.aishelltech.com/aishell_3).
AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus that could be used to train multi-speaker Text-to-Speech (TTS) systems.
We use AISHELL-3 to train a multi-speaker VITS model here.
## Dataset
### Download and Extract
Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`.
### Get MFA Result and Extract
We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here.
You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo.
## Get Started
Assume the path to the dataset is `~/datasets/data_aishell3`.
Assume the path to the MFA result of AISHELL-3 is `./aishell3_alignment_tone`.
Run the command below to
1. **source path**.
2. preprocess the dataset.
3. train the model.
4. synthesize wavs.
- synthesize waveform from `metadata.jsonl`.
- synthesize waveform from a text file.
```bash
./run.sh
```
You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset.
```bash
./run.sh --stage 0 --stop-stage 0
```
### Data Preprocessing
```bash
./local/preprocess.sh ${conf_path}
```
When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below.
```text
dump
├── dev
│   ├── norm
│   └── raw
├── phone_id_map.txt
├── speaker_id_map.txt
├── test
│   ├── norm
│   └── raw
└── train
├── feats_stats.npy
├── norm
└── raw
```
The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`.
Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance.
### Model Training
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path}
```
`./local/train.sh` calls `${BIN_DIR}/train.py`.
Here's the complete help message.
```text
usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA]
[--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR]
[--ngpu NGPU] [--phones-dict PHONES_DICT]
[--speaker-dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING]
Train a VITS model.
optional arguments:
-h, --help show this help message and exit
--config CONFIG config file to overwrite default config.
--train-metadata TRAIN_METADATA
training data.
--dev-metadata DEV_METADATA
dev data.
--output-dir OUTPUT_DIR
output dir.
--ngpu NGPU if ngpu == 0, use cpu.
--phones-dict PHONES_DICT
phone vocabulary file.
--speaker-dict SPEAKER_DICT
speaker id map file for multiple speaker model.
--voice-cloning VOICE_CLONING
whether training voice cloning model.
```
1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`.
2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder.
3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory.
4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu.
5. `--phones-dict` is the path of the phone vocabulary file.
6. `--speaker-dict` is the path of the speaker id map file when training a multi-speaker VITS.
### Synthesizing
`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`.
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name}
```
```text
usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT]
[--phones_dict PHONES_DICT] [--speaker_dict SPEAKER_DICT]
[--voice-cloning VOICE_CLONING] [--ngpu NGPU]
[--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR]
Synthesize with VITS
optional arguments:
-h, --help show this help message and exit
--config CONFIG Config of VITS.
--ckpt CKPT Checkpoint file of VITS.
--phones_dict PHONES_DICT
phone vocabulary file.
--speaker_dict SPEAKER_DICT
speaker id map file.
--voice-cloning VOICE_CLONING
whether training voice cloning model.
--ngpu NGPU if ngpu == 0, use cpu.
--test_metadata TEST_METADATA
test metadata.
--output_dir OUTPUT_DIR
output dir.
```
`./local/synthesize_e2e.sh` calls `${BIN_DIR}/synthesize_e2e.py`, which can synthesize waveform from text file.
```bash
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name}
```
```text
usage: synthesize_e2e.py [-h] [--config CONFIG] [--ckpt CKPT]
[--phones_dict PHONES_DICT]
[--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID]
[--lang LANG]
[--inference_dir INFERENCE_DIR] [--ngpu NGPU]
[--text TEXT] [--output_dir OUTPUT_DIR]
Synthesize with VITS
optional arguments:
-h, --help show this help message and exit
--config CONFIG Config of VITS.
--ckpt CKPT Checkpoint file of VITS.
--phones_dict PHONES_DICT
phone vocabulary file.
--speaker_dict SPEAKER_DICT
speaker id map file.
--spk_id SPK_ID spk id for multi speaker acoustic model
--lang LANG Choose model language. zh or en
--inference_dir INFERENCE_DIR
dir to save inference models
--ngpu NGPU if ngpu == 0, use cpu.
--text TEXT text to synthesize, a 'utt_id sentence' pair per line.
--output_dir OUTPUT_DIR
output dir.
```
1. `--config`, `--ckpt`, `--phones_dict` and `--speaker_dict` are arguments for acoustic model, which correspond to the 3 files in the VITS pretrained model.
2. `--lang` is the model language, which can be `zh` or `en`.
3. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder.
4. `--text` is the text file, which contains sentences to synthesize.
5. `--output_dir` is the directory to save synthesized audio files.
6. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu.
<!-- TODO display these after we trained the model -->
<!--
## Pretrained Model
The pretrained model can be downloaded here:
- [vits_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/vits/vits_aishell3_ckpt_1.1.0.zip) (add_blank=true)
VITS checkpoint contains files listed below.
```text
vits_aishell3_ckpt_1.1.0
├── default.yaml # default config used to train vitx
├── phone_id_map.txt # phone vocabulary file when training vits
├── speaker_id_map.txt # speaker id map file when training a multi-speaker vits
└── snapshot_iter_333000.pdz # model parameters and optimizer states
```
ps: This ckpt is not good enough, a better result is training
You can use the following scripts to synthesize for `${BIN_DIR}/../sentences.txt` using pretrained VITS.
```bash
source path.sh
add_blank=true
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize_e2e.py \
--config=vits_aishell3_ckpt_1.1.0/default.yaml \
--ckpt=vits_aishell3_ckpt_1.1.0/snapshot_iter_333000.pdz \
--phones_dict=vits_aishell3_ckpt_1.1.0/phone_id_map.txt \
--speaker_dict=vits_aishell3_ckpt_1.1.0/speaker_id_map.txt \
--output_dir=exp/default/test_e2e \
--text=${BIN_DIR}/../sentences.txt \
--add-blank=${add_blank}
```
-->

@ -0,0 +1,184 @@
# This configuration tested on 4 GPUs (V100) with 32GB GPU
# memory. It takes around 2 weeks to finish the training
# but 100k iters model should generate reasonable results.
###########################################################
# FEATURE EXTRACTION SETTING #
###########################################################
fs: 22050 # sr
n_fft: 1024 # FFT size (samples).
n_shift: 256 # Hop size (samples). 12.5ms
win_length: null # Window length (samples). 50ms
# If set to null, it will be the same as fft_size.
window: "hann" # Window function.
##########################################################
# TTS MODEL SETTING #
##########################################################
model:
# generator related
generator_type: vits_generator
generator_params:
hidden_channels: 192
global_channels: 256
segment_size: 32
text_encoder_attention_heads: 2
text_encoder_ffn_expand: 4
text_encoder_blocks: 6
text_encoder_positionwise_layer_type: "conv1d"
text_encoder_positionwise_conv_kernel_size: 3
text_encoder_positional_encoding_layer_type: "rel_pos"
text_encoder_self_attention_layer_type: "rel_selfattn"
text_encoder_activation_type: "swish"
text_encoder_normalize_before: True
text_encoder_dropout_rate: 0.1
text_encoder_positional_dropout_rate: 0.0
text_encoder_attention_dropout_rate: 0.1
use_macaron_style_in_text_encoder: True
use_conformer_conv_in_text_encoder: False
text_encoder_conformer_kernel_size: -1
decoder_kernel_size: 7
decoder_channels: 512
decoder_upsample_scales: [8, 8, 2, 2]
decoder_upsample_kernel_sizes: [16, 16, 4, 4]
decoder_resblock_kernel_sizes: [3, 7, 11]
decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
use_weight_norm_in_decoder: True
posterior_encoder_kernel_size: 5
posterior_encoder_layers: 16
posterior_encoder_stacks: 1
posterior_encoder_base_dilation: 1
posterior_encoder_dropout_rate: 0.0
use_weight_norm_in_posterior_encoder: True
flow_flows: 4
flow_kernel_size: 5
flow_base_dilation: 1
flow_layers: 4
flow_dropout_rate: 0.0
use_weight_norm_in_flow: True
use_only_mean_in_flow: True
stochastic_duration_predictor_kernel_size: 3
stochastic_duration_predictor_dropout_rate: 0.5
stochastic_duration_predictor_flows: 4
stochastic_duration_predictor_dds_conv_layers: 3
# discriminator related
discriminator_type: hifigan_multi_scale_multi_period_discriminator
discriminator_params:
scales: 1
scale_downsample_pooling: "AvgPool1D"
scale_downsample_pooling_params:
kernel_size: 4
stride: 2
padding: 2
scale_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes: [15, 41, 5, 3]
channels: 128
max_downsample_channels: 1024
max_groups: 16
bias: True
downsample_scales: [2, 2, 4, 4, 1]
nonlinear_activation: "leakyrelu"
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: True
use_spectral_norm: False
follow_official_norm: False
periods: [2, 3, 5, 7, 11]
period_discriminator_params:
in_channels: 1
out_channels: 1
kernel_sizes: [5, 3]
channels: 32
downsample_scales: [3, 3, 3, 3, 1]
max_downsample_channels: 1024
bias: True
nonlinear_activation: "leakyrelu"
nonlinear_activation_params:
negative_slope: 0.1
use_weight_norm: True
use_spectral_norm: False
# others
sampling_rate: 22050 # needed in the inference for saving wav
cache_generator_outputs: True # whether to cache generator outputs in the training
###########################################################
# LOSS SETTING #
###########################################################
# loss function related
generator_adv_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
loss_type: mse # loss type, "mse" or "hinge"
discriminator_adv_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
loss_type: mse # loss type, "mse" or "hinge"
feat_match_loss_params:
average_by_discriminators: False # whether to average loss value by #discriminators
average_by_layers: False # whether to average loss value by #layers of each discriminator
include_final_outputs: True # whether to include final outputs for loss calculation
mel_loss_params:
fs: 22050 # must be the same as the training data
fft_size: 1024 # fft points
hop_size: 256 # hop size
win_length: null # window length
window: hann # window type
num_mels: 80 # number of Mel basis
fmin: 0 # minimum frequency for Mel basis
fmax: null # maximum frequency for Mel basis
log_base: null # null represent natural log
###########################################################
# ADVERSARIAL LOSS SETTING #
###########################################################
lambda_adv: 1.0 # loss scaling coefficient for adversarial loss
lambda_mel: 45.0 # loss scaling coefficient for Mel loss
lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss
lambda_dur: 1.0 # loss scaling coefficient for duration loss
lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss
# others
sampling_rate: 22050 # needed in the inference for saving wav
cache_generator_outputs: True # whether to cache generator outputs in the training
###########################################################
# DATA LOADER SETTING #
###########################################################
batch_size: 50 # Batch size.
num_workers: 4 # Number of workers in DataLoader.
##########################################################
# OPTIMIZER & SCHEDULER SETTING #
##########################################################
# optimizer setting for generator
generator_optimizer_params:
beta1: 0.8
beta2: 0.99
epsilon: 1.0e-9
weight_decay: 0.0
generator_scheduler: exponential_decay
generator_scheduler_params:
learning_rate: 2.0e-4
gamma: 0.999875
# optimizer setting for discriminator
discriminator_optimizer_params:
beta1: 0.8
beta2: 0.99
epsilon: 1.0e-9
weight_decay: 0.0
discriminator_scheduler: exponential_decay
discriminator_scheduler_params:
learning_rate: 2.0e-4
gamma: 0.999875
generator_first: False # whether to start updating generator first
##########################################################
# OTHER TRAINING SETTING #
##########################################################
num_snapshots: 10 # max number of snapshots to keep while training
train_max_steps: 350000 # Number of training steps. == total_iters / ngpus, total_iters = 1000000
save_interval_steps: 1000 # Interval steps to save checkpoint.
eval_interval_steps: 250 # Interval steps to evaluate the network.
seed: 777 # random seed number

@ -0,0 +1,69 @@
#!/bin/bash
stage=0
stop_stage=100
config_path=$1
add_blank=$2
# copy from tts3/preprocess
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
# get durations from MFA's result
echo "Generate durations.txt from MFA results ..."
python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \
--inputdir=./aishell3_alignment_tone \
--output durations.txt \
--config=${config_path}
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
# extract features
echo "Extract features ..."
python3 ${BIN_DIR}/preprocess.py \
--dataset=aishell3 \
--rootdir=~/datasets/data_aishell3/ \
--dumpdir=dump \
--dur-file=durations.txt \
--config=${config_path} \
--num-cpu=20 \
--cut-sil=True
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# get features' stats(mean and std)
echo "Get features' stats ..."
python3 ${MAIN_ROOT}/utils/compute_statistics.py \
--metadata=dump/train/raw/metadata.jsonl \
--field-name="feats"
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
# normalize and covert phone/speaker to id, dev and test should use train's stats
echo "Normalize ..."
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/train/raw/metadata.jsonl \
--dumpdir=dump/train/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/dev/raw/metadata.jsonl \
--dumpdir=dump/dev/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
python3 ${BIN_DIR}/normalize.py \
--metadata=dump/test/raw/metadata.jsonl \
--dumpdir=dump/test/norm \
--feats-stats=dump/train/feats_stats.npy \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt \
--add-blank=${add_blank} \
--skip-wav-copy
fi

@ -0,0 +1,19 @@
#!/bin/bash
config_path=$1
train_output_path=$2
ckpt_name=$3
stage=0
stop_stage=0
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize.py \
--config=${config_path} \
--ckpt=${train_output_path}/checkpoints/${ckpt_name} \
--phones_dict=dump/phone_id_map.txt \
--speaker_dict=dump/speaker_id_map.txt \
--test_metadata=dump/test/norm/metadata.jsonl \
--output_dir=${train_output_path}/test
fi

@ -0,0 +1,24 @@
#!/bin/bash
config_path=$1
train_output_path=$2
ckpt_name=$3
add_blank=$4
stage=0
stop_stage=0
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 ${BIN_DIR}/synthesize_e2e.py \
--config=${config_path} \
--ckpt=${train_output_path}/checkpoints/${ckpt_name} \
--phones_dict=dump/phone_id_map.txt \
--speaker_dict=dump/speaker_id_map.txt \
--spk_id=0 \
--output_dir=${train_output_path}/test_e2e \
--text=${BIN_DIR}/../sentences.txt \
--add-blank=${add_blank}
fi

@ -0,0 +1,18 @@
#!/bin/bash
config_path=$1
train_output_path=$2
# install monotonic_align
cd ${MAIN_ROOT}/paddlespeech/t2s/models/vits/monotonic_align
python3 setup.py build_ext --inplace
cd -
python3 ${BIN_DIR}/train.py \
--train-metadata=dump/train/norm/metadata.jsonl \
--dev-metadata=dump/dev/norm/metadata.jsonl \
--config=${config_path} \
--output-dir=${train_output_path} \
--ngpu=4 \
--phones-dict=dump/phone_id_map.txt \
--speaker-dict=dump/speaker_id_map.txt

@ -0,0 +1,13 @@
#!/bin/bash
export MAIN_ROOT=`realpath ${PWD}/../../../`
export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH}
export LC_ALL=C
export PYTHONDONTWRITEBYTECODE=1
# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
export PYTHONIOENCODING=UTF-8
export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
MODEL=vits
export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL}

@ -0,0 +1,36 @@
#!/bin/bash
set -e
source path.sh
gpus=0,1,2,3
stage=0
stop_stage=100
conf_path=conf/default.yaml
train_output_path=exp/default
ckpt_name=snapshot_iter_153.pdz
add_blank=true
# with the following command, you can choose the stage range you want to run
# such as `./run.sh --stage 0 --stop-stage 0`
# this can not be mixed use with `$1`, `$2` ...
source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
# prepare data
./local/preprocess.sh ${conf_path} ${add_blank}|| exit -1
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
# train model, all `ckpt` under `train_output_path/checkpoints/` dir
CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} ${add_blank}|| exit -1
fi

@ -46,8 +46,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx speedyspeech_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -46,8 +46,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -59,8 +59,8 @@ fi
if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_csmsc
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder
@ -79,8 +79,8 @@ fi
if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
# streaming acoustic model
./local/paddle2onnx.sh ${train_output_path} inference_streaming inference_onnx_streaming fastspeech2_csmsc_am_encoder_infer

@ -3,7 +3,7 @@
set -e
source path.sh
gpus=0,1
gpus=0,1,2,3
stage=0
stop_stage=100

@ -1,13 +1,15 @@
import argparse
import os
def process_sentence(line):
if line == '': return ''
if line == '':
return ''
res = line[0]
for i in range(1, len(line)):
res += (' ' + line[i])
return res
if __name__ == "__main__":
paser = argparse.ArgumentParser(description="Input filename")
paser.add_argument('-input_file')

@ -46,8 +46,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_ljspeech
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -12,13 +12,13 @@ Run the command below to get the results of the test.
./run.sh
```
The `avg WER` of g2p is: 0.024169315564825305
The `avg WER` of g2p is: 0.024075726733983775
```text
,--------------------------------------------------------------------.
| ./exp/g2p/text.g2p |
|--------------------------------------------------------------------|
| SPKR | # Snt # Wrd | Corr Sub Del Ins Err S.Err |
| Sum/Avg| 9996 299181 | 97.6 2.4 0.0 0.0 2.4 49.2 |
| Sum/Avg| 9996 299181 | 97.6 2.4 0.0 0.0 2.4 49.0 |
`--------------------------------------------------------------------'
```

@ -17,15 +17,14 @@ from pathlib import Path
from typing import Union
import yaml
from paddle import distributed as dist
from yacs.config import CfgNode
from paddlespeech.t2s.exps.fastspeech2.train import train_sp
from local.check_oov import get_check_result
from local.extract import extract_feature
from local.label_process import get_single_label
from local.prepare_env import generate_finetune_env
from paddle import distributed as dist
from yacs.config import CfgNode
from paddlespeech.t2s.exps.fastspeech2.train import train_sp
from utils.gen_duration_from_textgrid import gen_duration_from_textgrid
DICT_EN = 'tools/aligner/cmudict-0.7b'

@ -44,8 +44,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_vctk
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -47,8 +47,8 @@ fi
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
# install paddle2onnx
version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}')
if [[ -z "$version" || ${version} != '0.9.8' ]]; then
pip install paddle2onnx==0.9.8
if [[ -z "$version" || ${version} != '1.0.0' ]]; then
pip install paddle2onnx==1.0.0
fi
./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix
# considering the balance between speed and quality, we recommend that you use hifigan as vocoder

@ -14,5 +14,3 @@
import _locale
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])

@ -14,12 +14,12 @@
from . import compliance
from . import datasets
from . import features
from . import text
from . import transform
from . import streamdata
from . import functional
from . import io
from . import metric
from . import sox_effects
from . import streamdata
from . import text
from . import transform
from .backends import load
from .backends import save

@ -4,67 +4,66 @@
# Modified from https://github.com/webdataset/webdataset
#
# flake8: noqa
from .cache import (
cached_tarfile_samples,
cached_tarfile_to_samples,
lru_cleanup,
pipe_cleaner,
)
from .compat import WebDataset, WebLoader, FluidWrapper
from .extradatasets import MockDataset, with_epoch, with_length
from .filters import (
associate,
batched,
decode,
detshuffle,
extract_keys,
getfirst,
info,
map,
map_dict,
map_tuple,
pipelinefilter,
rename,
rename_keys,
audio_resample,
select,
shuffle,
slice,
to_tuple,
transform_with,
unbatched,
xdecode,
audio_data_filter,
audio_tokenize,
audio_resample,
audio_compute_fbank,
audio_spec_aug,
sort,
audio_padding,
audio_cmvn,
placeholder,
)
from .handlers import (
ignore_and_continue,
ignore_and_stop,
reraise_exception,
warn_and_continue,
warn_and_stop,
)
from .cache import cached_tarfile_samples
from .cache import cached_tarfile_to_samples
from .cache import lru_cleanup
from .cache import pipe_cleaner
from .compat import FluidWrapper
from .compat import WebDataset
from .compat import WebLoader
from .extradatasets import MockDataset
from .extradatasets import with_epoch
from .extradatasets import with_length
from .filters import associate
from .filters import audio_cmvn
from .filters import audio_compute_fbank
from .filters import audio_data_filter
from .filters import audio_padding
from .filters import audio_resample
from .filters import audio_spec_aug
from .filters import audio_tokenize
from .filters import batched
from .filters import decode
from .filters import detshuffle
from .filters import extract_keys
from .filters import getfirst
from .filters import info
from .filters import map
from .filters import map_dict
from .filters import map_tuple
from .filters import pipelinefilter
from .filters import placeholder
from .filters import rename
from .filters import rename_keys
from .filters import select
from .filters import shuffle
from .filters import slice
from .filters import sort
from .filters import to_tuple
from .filters import transform_with
from .filters import unbatched
from .filters import xdecode
from .handlers import ignore_and_continue
from .handlers import ignore_and_stop
from .handlers import reraise_exception
from .handlers import warn_and_continue
from .handlers import warn_and_stop
from .mix import RandomMix
from .mix import RoundRobin
from .pipeline import DataPipeline
from .shardlists import (
MultiShardSample,
ResampledShards,
SimpleShardList,
non_empty,
resampled,
shardspec,
single_node_only,
split_by_node,
split_by_worker,
)
from .tariterators import tarfile_samples, tarfile_to_samples
from .utils import PipelineStage, repeatedly
from .writer import ShardWriter, TarWriter, numpy_dumps
from .mix import RandomMix, RoundRobin
from .shardlists import MultiShardSample
from .shardlists import non_empty
from .shardlists import resampled
from .shardlists import ResampledShards
from .shardlists import shardspec
from .shardlists import SimpleShardList
from .shardlists import single_node_only
from .shardlists import split_by_node
from .shardlists import split_by_worker
from .tariterators import tarfile_samples
from .tariterators import tarfile_to_samples
from .utils import PipelineStage
from .utils import repeatedly
from .writer import numpy_dumps
from .writer import ShardWriter
from .writer import TarWriter

@ -5,18 +5,19 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#
"""Automatically decode webdataset samples."""
import io, json, os, pickle, re, tempfile
import io
import json
import os
import pickle
import re
import tempfile
from functools import partial
import numpy as np
"""Extensions passed on to the image decoder."""
image_extensions = "jpg jpeg png ppm pgm pbm pnm".split()
################################################################
# handle basic datatypes
################################################################
@ -268,7 +269,6 @@ def imagehandler(imagespec, extensions=image_extensions):
################################################################
# torch video
################################################################
'''
def torch_video(key, data):
"""Decode video using the torchvideo library.
@ -289,7 +289,6 @@ def torch_video(key, data):
return torchvision.io.read_video(fname, pts_unit="sec")
'''
################################################################
# paddlespeech.audio
################################################################
@ -359,7 +358,6 @@ def gzfilter(key, data):
# decode entire training amples
################################################################
default_pre_handlers = [gzfilter]
default_post_handlers = [basichandlers]
@ -387,7 +385,8 @@ class Decoder:
pre = default_pre_handlers
if post is None:
post = default_post_handlers
assert all(callable(h) for h in handlers), f"one of {handlers} not callable"
assert all(callable(h)
for h in handlers), f"one of {handlers} not callable"
assert all(callable(h) for h in pre), f"one of {pre} not callable"
assert all(callable(h) for h in post), f"one of {post} not callable"
self.handlers = pre + handlers + post

@ -2,7 +2,10 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
import itertools, os, random, re, sys
import os
import random
import re
import sys
from urllib.parse import urlparse
from . import filters
@ -69,8 +72,7 @@ def get_file_cached(
cache_size=-1,
cache_dir=None,
url_to_name=pipe_cleaner,
verbose=False,
):
verbose=False, ):
if cache_size == -1:
cache_size = default_cache_size
if cache_dir is None:
@ -114,8 +116,7 @@ def cached_url_opener(
url_to_name=pipe_cleaner,
validator=check_tar_format,
verbose=False,
always=False,
):
always=False, ):
"""Given a stream of url names (packaged in `dict(url=url)`), yield opened streams."""
verbose = verbose or verbose_cache
for sample in data:
@ -132,8 +133,7 @@ def cached_url_opener(
cache_size=cache_size,
cache_dir=cache_dir,
url_to_name=url_to_name,
verbose=verbose,
)
verbose=verbose, )
if verbose:
print("# opening %s" % dest, file=sys.stderr)
assert os.path.exists(dest)
@ -143,9 +143,8 @@ def cached_url_opener(
data = f.read(200)
os.remove(dest)
raise ValueError(
"%s (%s) is not a tar archive, but a %s, contains %s"
% (dest, url, ftype, repr(data))
)
"%s (%s) is not a tar archive, but a %s, contains %s" %
(dest, url, ftype, repr(data)))
try:
stream = open(dest, "rb")
sample.update(stream=stream)
@ -172,8 +171,7 @@ def cached_tarfile_samples(
cache_dir=None,
verbose=False,
url_to_name=pipe_cleaner,
always=False,
):
always=False, ):
streams = cached_url_opener(
src,
handler=handler,
@ -181,8 +179,7 @@ def cached_tarfile_samples(
cache_dir=cache_dir,
verbose=verbose,
url_to_name=url_to_name,
always=always,
)
always=always, )
samples = tar_file_and_group_expander(streams, handler=handler)
return samples

@ -2,17 +2,17 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
from dataclasses import dataclass
from itertools import islice
from typing import List
import braceexpand, yaml
import yaml
from . import autodecode
from . import cache, filters, shardlists, tariterators
from . import cache
from . import filters
from . import shardlists
from . import tariterators
from .filters import reraise_exception
from .paddle_utils import DataLoader
from .paddle_utils import IterableDataset
from .pipeline import DataPipeline
from .paddle_utils import DataLoader, IterableDataset
class FluidInterface:
@ -26,7 +26,8 @@ class FluidInterface:
return self.compose(filters.unbatched())
def listed(self, batchsize, partial=True):
return self.compose(filters.batched(), batchsize=batchsize, collation_fn=None)
return self.compose(
filters.batched(), batchsize=batchsize, collation_fn=None)
def unlisted(self):
return self.compose(filters.unlisted())
@ -43,9 +44,19 @@ class FluidInterface:
def map(self, f, handler=reraise_exception):
return self.compose(filters.map(f, handler=handler))
def decode(self, *args, pre=None, post=None, only=None, partial=False, handler=reraise_exception):
handlers = [autodecode.ImageHandler(x) if isinstance(x, str) else x for x in args]
decoder = autodecode.Decoder(handlers, pre=pre, post=post, only=only, partial=partial)
def decode(self,
*args,
pre=None,
post=None,
only=None,
partial=False,
handler=reraise_exception):
handlers = [
autodecode.ImageHandler(x) if isinstance(x, str) else x
for x in args
]
decoder = autodecode.Decoder(
handlers, pre=pre, post=post, only=only, partial=partial)
return self.map(decoder, handler=handler)
def map_dict(self, handler=reraise_exception, **kw):
@ -102,6 +113,7 @@ class FluidInterface:
def audio_cmvn(self, cmvn_file):
return self.compose(filters.audio_cmvn(cmvn_file))
class WebDataset(DataPipeline, FluidInterface):
"""Small fluid-interface wrapper for DataPipeline."""
@ -116,13 +128,13 @@ class WebDataset(DataPipeline, FluidInterface):
cache_dir=None,
detshuffle=False,
nodesplitter=shardlists.single_node_only,
verbose=False,
):
verbose=False, ):
super().__init__()
if isinstance(urls, IterableDataset):
assert not resampled
self.append(urls)
elif isinstance(urls, str) and (urls.endswith(".yaml") or urls.endswith(".yml")):
elif isinstance(urls, str) and (urls.endswith(".yaml") or
urls.endswith(".yml")):
with (open(urls)) as stream:
spec = yaml.safe_load(stream)
assert "datasets" in spec
@ -152,9 +164,7 @@ class WebDataset(DataPipeline, FluidInterface):
handler=handler,
verbose=verbose,
cache_size=cache_size,
cache_dir=cache_dir,
)
)
cache_dir=cache_dir, ))
class FluidWrapper(DataPipeline, FluidInterface):

@ -5,20 +5,10 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#
"""Train PyTorch models directly from POSIX tar archive.
Code works locally or over HTTP connections.
"""
import itertools as itt
import os
import random
import sys
import braceexpand
from . import utils
from .paddle_utils import IterableDataset
from .utils import PipelineStage
@ -63,8 +53,7 @@ class repeatedly(IterableDataset, PipelineStage):
return utils.repeatedly(
source,
nepochs=self.nepochs,
nbatches=self.nbatches,
)
nbatches=self.nbatches, )
class with_epoch(IterableDataset):

@ -3,7 +3,6 @@
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
# Modified from https://github.com/webdataset/webdataset
# Modified from wenet(https://github.com/wenet-e2e/wenet)
"""A collection of iterators for data transformations.
@ -12,28 +11,29 @@ These functions are plain iterator functions. You can find curried versions
in webdataset.filters, and you can find IterableDataset wrappers in
webdataset.processing.
"""
import io
from fnmatch import fnmatch
import itertools
import os
import random
import re
import itertools, os, random, sys, time
from functools import reduce, wraps
import sys
import time
from fnmatch import fnmatch
from functools import reduce
import numpy as np
import paddle
from . import autodecode
from . import utils
from .paddle_utils import PaddleTensor
from .utils import PipelineStage
from .. import backends
from ..compliance import kaldi
import paddle
from ..transform.cmvn import GlobalCMVN
from ..utils.tensor_utils import pad_sequence
from ..transform.spec_augment import time_warp
from ..transform.spec_augment import time_mask
from ..transform.spec_augment import freq_mask
from ..transform.spec_augment import time_mask
from ..transform.spec_augment import time_warp
from ..utils.tensor_utils import pad_sequence
from .utils import PipelineStage
class FilterFunction(object):
"""Helper class for currying pipeline stages.
@ -159,10 +159,12 @@ def transform_with(sample, transformers):
result[i] = f(sample[i])
return result
###
# Iterators
###
def _info(data, fmt=None, n=3, every=-1, width=50, stream=sys.stderr, name=""):
"""Print information about the samples that are passing through.
@ -278,10 +280,16 @@ def _log_keys(data, logfile=None):
log_keys = pipelinefilter(_log_keys)
def _minedecode(x):
if isinstance(x, str):
return autodecode.imagehandler(x)
else:
return x
def _decode(data, *args, handler=reraise_exception, **kw):
"""Decode data based on the decoding functions given as arguments."""
decoder = lambda x: autodecode.imagehandler(x) if isinstance(x, str) else x
decoder = _minedecode
handlers = [decoder(x) for x in args]
f = autodecode.Decoder(handlers, **kw)
@ -325,15 +333,24 @@ def _rename(data, handler=reraise_exception, keep=True, **kw):
for sample in data:
try:
if not keep:
yield {k: getfirst(sample, v, missing_is_error=True) for k, v in kw.items()}
yield {
k: getfirst(sample, v, missing_is_error=True)
for k, v in kw.items()
}
else:
def listify(v):
return v.split(";") if isinstance(v, str) else v
to_be_replaced = {x for v in kw.values() for x in listify(v)}
result = {k: v for k, v in sample.items() if k not in to_be_replaced}
result.update({k: getfirst(sample, v, missing_is_error=True) for k, v in kw.items()})
result = {
k: v
for k, v in sample.items() if k not in to_be_replaced
}
result.update({
k: getfirst(sample, v, missing_is_error=True)
for k, v in kw.items()
})
yield result
except Exception as exn:
if handler(exn):
@ -381,7 +398,11 @@ def _map_dict(data, handler=reraise_exception, **kw):
map_dict = pipelinefilter(_map_dict)
def _to_tuple(data, *args, handler=reraise_exception, missing_is_error=True, none_is_error=None):
def _to_tuple(data,
*args,
handler=reraise_exception,
missing_is_error=True,
none_is_error=None):
"""Convert dict samples to tuples."""
if none_is_error is None:
none_is_error = missing_is_error
@ -390,7 +411,10 @@ def _to_tuple(data, *args, handler=reraise_exception, missing_is_error=True, non
for sample in data:
try:
result = tuple([getfirst(sample, f, missing_is_error=missing_is_error) for f in args])
result = tuple([
getfirst(sample, f, missing_is_error=missing_is_error)
for f in args
])
if none_is_error and any(x is None for x in result):
raise ValueError(f"to_tuple {args} got {sample.keys()}")
yield result
@ -463,19 +487,28 @@ rsample = pipelinefilter(_rsample)
slice = pipelinefilter(itertools.islice)
def _extract_keys(source, *patterns, duplicate_is_error=True, ignore_missing=False):
def _extract_keys(source,
*patterns,
duplicate_is_error=True,
ignore_missing=False):
for sample in source:
result = []
for pattern in patterns:
pattern = pattern.split(";") if isinstance(pattern, str) else pattern
matches = [x for x in sample.keys() if any(fnmatch("." + x, p) for p in pattern)]
pattern = pattern.split(";") if isinstance(pattern,
str) else pattern
matches = [
x for x in sample.keys()
if any(fnmatch("." + x, p) for p in pattern)
]
if len(matches) == 0:
if ignore_missing:
continue
else:
raise ValueError(f"Cannot find {pattern} in sample keys {sample.keys()}.")
raise ValueError(
f"Cannot find {pattern} in sample keys {sample.keys()}.")
if len(matches) > 1 and duplicate_is_error:
raise ValueError(f"Multiple sample keys {sample.keys()} match {pattern}.")
raise ValueError(
f"Multiple sample keys {sample.keys()} match {pattern}.")
value = sample[matches[0]]
result.append(value)
yield tuple(result)
@ -484,7 +517,12 @@ def _extract_keys(source, *patterns, duplicate_is_error=True, ignore_missing=Fal
extract_keys = pipelinefilter(_extract_keys)
def _rename_keys(source, *args, keep_unselected=False, must_match=True, duplicate_is_error=True, **kw):
def _rename_keys(source,
*args,
keep_unselected=False,
must_match=True,
duplicate_is_error=True,
**kw):
renamings = [(pattern, output) for output, pattern in args]
renamings += [(pattern, output) for output, pattern in kw.items()]
for sample in source:
@ -504,11 +542,15 @@ def _rename_keys(source, *args, keep_unselected=False, must_match=True, duplicat
continue
if new_name in new_sample:
if duplicate_is_error:
raise ValueError(f"Duplicate value in sample {sample.keys()} after rename.")
raise ValueError(
f"Duplicate value in sample {sample.keys()} after rename."
)
continue
new_sample[new_name] = value
if must_match and not all(matched.values()):
raise ValueError(f"Not all patterns ({matched}) matched sample keys ({sample.keys()}).")
raise ValueError(
f"Not all patterns ({matched}) matched sample keys ({sample.keys()})."
)
yield new_sample
@ -541,7 +583,8 @@ def find_decoder(decoders, path):
if fname.startswith("__"):
return lambda x: x
for pattern, fun in decoders[::-1]:
if fnmatch(fname.lower(), pattern) or fnmatch("." + fname.lower(), pattern):
if fnmatch(fname.lower(), pattern) or fnmatch("." + fname.lower(),
pattern):
return fun
return None
@ -551,8 +594,7 @@ def _xdecode(
*args,
must_decode=True,
defaults=default_decoders,
**kw,
):
**kw, ):
decoders = list(defaults) + list(args)
decoders += [("*." + k, v) for k, v in kw.items()]
for sample in source:
@ -575,8 +617,8 @@ def _xdecode(
new_sample[path] = value
yield new_sample
xdecode = pipelinefilter(_xdecode)
xdecode = pipelinefilter(_xdecode)
def _audio_data_filter(source,
@ -613,7 +655,8 @@ def _audio_data_filter(source,
assert 'wav' in sample
assert 'label' in sample
# sample['wav'] is paddle.Tensor, we have 100 frames every second (default)
num_frames = sample['wav'].shape[1] / sample['sample_rate'] * (1000 / frame_shift)
num_frames = sample['wav'].shape[1] / sample['sample_rate'] * (
1000 / frame_shift)
if num_frames < min_length:
continue
if num_frames > max_length:
@ -629,8 +672,10 @@ def _audio_data_filter(source,
continue
yield sample
audio_data_filter = pipelinefilter(_audio_data_filter)
def _audio_tokenize(source,
symbol_table,
bpe_model=None,
@ -693,8 +738,10 @@ def _audio_tokenize(source,
sample['label'] = label
yield sample
audio_tokenize = pipelinefilter(_audio_tokenize)
def _audio_resample(source, resample_rate=16000):
""" Resample data.
Inplace operation.
@ -713,13 +760,17 @@ def _audio_resample(source, resample_rate=16000):
waveform = sample['wav']
if sample_rate != resample_rate:
sample['sample_rate'] = resample_rate
sample['wav'] = paddle.to_tensor(backends.soundfile_backend.resample(
waveform.numpy(), src_sr = sample_rate, target_sr = resample_rate
))
sample['wav'] = paddle.to_tensor(
backends.soundfile_backend.resample(
waveform.numpy(),
src_sr=sample_rate,
target_sr=resample_rate))
yield sample
audio_resample = pipelinefilter(_audio_resample)
def _audio_compute_fbank(source,
num_mel_bins=80,
frame_length=25,
@ -746,7 +797,8 @@ def _audio_compute_fbank(source,
waveform = sample['wav']
waveform = waveform * (1 << 15)
# Only keep fname, feat, label
mat = kaldi.fbank(waveform,
mat = kaldi.fbank(
waveform,
n_mels=num_mel_bins,
frame_length=frame_length,
frame_shift=frame_shift,
@ -758,7 +810,9 @@ def _audio_compute_fbank(source,
audio_compute_fbank = pipelinefilter(_audio_compute_fbank)
def _audio_spec_aug(source,
def _audio_spec_aug(
source,
max_w=5,
w_inplace=True,
w_mode="PIL",
@ -794,11 +848,22 @@ def _audio_spec_aug(source,
x = sample['feat']
x = x.numpy()
x = time_warp(x, max_time_warp=max_w, inplace=w_inplace, mode=w_mode)
x = freq_mask(x, F = max_f, n_mask = num_f_mask, inplace = f_inplace, replace_with_zero = f_replace_with_zero)
x = time_mask(x, T = max_t, n_mask = num_t_mask, inplace = t_inplace, replace_with_zero = t_replace_with_zero)
x = freq_mask(
x,
F=max_f,
n_mask=num_f_mask,
inplace=f_inplace,
replace_with_zero=f_replace_with_zero)
x = time_mask(
x,
T=max_t,
n_mask=num_t_mask,
inplace=t_inplace,
replace_with_zero=t_replace_with_zero)
sample['feat'] = paddle.to_tensor(x, dtype=paddle.float32)
yield sample
audio_spec_aug = pipelinefilter(_audio_spec_aug)
@ -829,8 +894,10 @@ def _sort(source, sort_size=500):
for x in buf:
yield x
sort = pipelinefilter(_sort)
def _batched(source, batch_size=16):
""" Static batch the data by `batch_size`
@ -850,8 +917,10 @@ def _batched(source, batch_size=16):
if len(buf) > 0:
yield buf
batched = pipelinefilter(_batched)
def dynamic_batched(source, max_frames_in_batch=12000):
""" Dynamic batch the data until the total frames in batch
reach `max_frames_in_batch`
@ -892,8 +961,8 @@ def _audio_padding(source):
"""
for sample in source:
assert isinstance(sample, list)
feats_length = paddle.to_tensor([x['feat'].shape[0] for x in sample],
dtype="int64")
feats_length = paddle.to_tensor(
[x['feat'].shape[0] for x in sample], dtype="int64")
order = paddle.argsort(feats_length, descending=True)
feats_lengths = paddle.to_tensor(
[sample[i]['feat'].shape[0] for i in order], dtype="int64")
@ -902,20 +971,20 @@ def _audio_padding(source):
sorted_labels = [
paddle.to_tensor(sample[i]['label'], dtype="int32") for i in order
]
label_lengths = paddle.to_tensor([x.shape[0] for x in sorted_labels],
dtype="int64")
padded_feats = pad_sequence(sorted_feats,
batch_first=True,
padding_value=0)
padding_labels = pad_sequence(sorted_labels,
batch_first=True,
padding_value=-1)
label_lengths = paddle.to_tensor(
[x.shape[0] for x in sorted_labels], dtype="int64")
padded_feats = pad_sequence(
sorted_feats, batch_first=True, padding_value=0)
padding_labels = pad_sequence(
sorted_labels, batch_first=True, padding_value=-1)
yield (sorted_keys, padded_feats, feats_lengths, padding_labels,
label_lengths)
audio_padding = pipelinefilter(_audio_padding)
def _audio_cmvn(source, cmvn_file):
global_cmvn = GlobalCMVN(cmvn_file)
for batch in source:
@ -926,10 +995,13 @@ def _audio_cmvn(source, cmvn_file):
yield (sorted_keys, padded_feats, feats_lengths, padding_labels,
label_lengths)
audio_cmvn = pipelinefilter(_audio_cmvn)
def _placeholder(source):
for data in source:
yield data
placeholder = pipelinefilter(_placeholder)

@ -3,12 +3,12 @@
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""Open URLs by calling subcommands."""
import os, sys, re
from subprocess import PIPE, Popen
import os
import re
import sys
from subprocess import PIPE
from subprocess import Popen
from urllib.parse import urlparse
# global used for printing additional node information during verbose output
@ -37,8 +37,7 @@ class Pipe:
timeout=7200.0,
ignore_errors=False,
ignore_status=[],
**kw,
):
**kw, ):
"""Create an IO Pipe."""
self.ignore_errors = ignore_errors
self.ignore_status = [0] + ignore_status
@ -75,8 +74,7 @@ class Pipe:
if verbose:
print(
f"pipe exit [{self.status} {os.getpid()}:{self.proc.pid}] {self.args} {info}",
file=sys.stderr,
)
file=sys.stderr, )
if self.status not in self.ignore_status and not self.ignore_errors:
raise Exception(f"{self.args}: exit {self.status} (read) {info}")
@ -114,9 +112,11 @@ class Pipe:
self.close()
def set_options(
obj, timeout=None, ignore_errors=None, ignore_status=None, handler=None
):
def set_options(obj,
timeout=None,
ignore_errors=None,
ignore_status=None,
handler=None):
"""Set options for Pipes.
This function can be called on any stream. It will set pipe options only
@ -168,16 +168,14 @@ def gopen_pipe(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141],
) # skipcq: BAN-B604
ignore_status=[141], ) # skipcq: BAN-B604
elif mode[0] == "w":
return Pipe(
cmd,
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141],
) # skipcq: BAN-B604
ignore_status=[141], ) # skipcq: BAN-B604
else:
raise ValueError(f"{mode}: unknown mode")
@ -196,8 +194,7 @@ def gopen_curl(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 23],
) # skipcq: BAN-B604
ignore_status=[141, 23], ) # skipcq: BAN-B604
elif mode[0] == "w":
cmd = f"curl -s -L -T - '{url}'"
return Pipe(
@ -205,8 +202,7 @@ def gopen_curl(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 26],
) # skipcq: BAN-B604
ignore_status=[141, 26], ) # skipcq: BAN-B604
else:
raise ValueError(f"{mode}: unknown mode")
@ -226,15 +222,13 @@ def gopen_htgs(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 23],
) # skipcq: BAN-B604
ignore_status=[141, 23], ) # skipcq: BAN-B604
elif mode[0] == "w":
raise ValueError(f"{mode}: cannot write")
else:
raise ValueError(f"{mode}: unknown mode")
def gopen_gsutil(url, mode="rb", bufsize=8192):
"""Open a URL with `curl`.
@ -249,8 +243,7 @@ def gopen_gsutil(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 23],
) # skipcq: BAN-B604
ignore_status=[141, 23], ) # skipcq: BAN-B604
elif mode[0] == "w":
cmd = f"gsutil cp - '{url}'"
return Pipe(
@ -258,13 +251,11 @@ def gopen_gsutil(url, mode="rb", bufsize=8192):
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 26],
) # skipcq: BAN-B604
ignore_status=[141, 26], ) # skipcq: BAN-B604
else:
raise ValueError(f"{mode}: unknown mode")
def gopen_error(url, *args, **kw):
"""Raise a value error.
@ -285,8 +276,7 @@ gopen_schemes = dict(
ftps=gopen_curl,
scp=gopen_curl,
gs=gopen_gsutil,
htgs=gopen_htgs,
)
htgs=gopen_htgs, )
def gopen(url, mode="rb", bufsize=8192, **kw):

@ -3,7 +3,6 @@
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
"""Pluggable exception handlers.
These are functions that take an exception as an argument and then return...
@ -14,8 +13,8 @@ These are functions that take an exception as an argument and then return...
They are used as handler= arguments in much of the library.
"""
import time, warnings
import time
import warnings
def reraise_exception(exn):

@ -5,17 +5,12 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#
"""Classes for mixing samples from multiple sources."""
import itertools, os, random, time, sys
from functools import reduce, wraps
import random
import numpy as np
from . import autodecode, utils
from .paddle_utils import PaddleTensor, IterableDataset
from .utils import PipelineStage
from .paddle_utils import IterableDataset
def round_robin_shortest(*sources):

@ -5,12 +5,11 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#
"""Mock implementations of paddle interfaces when paddle is not available."""
try:
from paddle.io import DataLoader, IterableDataset
from paddle.io import DataLoader
from paddle.io import IterableDataset
except ModuleNotFoundError:
class IterableDataset:
@ -22,12 +21,3 @@ except ModuleNotFoundError:
"""Empty implementation of DataLoader when paddle is not available."""
pass
try:
from paddle import Tensor as PaddleTensor
except ModuleNotFoundError:
class TorchTensor:
"""Empty implementation of PaddleTensor when paddle is not available."""
pass

@ -3,15 +3,12 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#%%
import copy, os, random, sys, time
from dataclasses import dataclass
import copy
import sys
from itertools import islice
from typing import List
import braceexpand, yaml
from .handlers import reraise_exception
from .paddle_utils import DataLoader, IterableDataset
from .paddle_utils import DataLoader
from .paddle_utils import IterableDataset
from .utils import PipelineStage
@ -22,8 +19,7 @@ def add_length_method(obj):
Combined = type(
obj.__class__.__name__ + "_Length",
(obj.__class__, IterableDataset),
{"__len__": length},
)
{"__len__": length}, )
obj.__class__ = Combined
return obj

@ -4,28 +4,30 @@
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
# Modified from https://github.com/webdataset/webdataset
"""Train PyTorch models directly from POSIX tar archive.
Code works locally or over HTTP connections.
"""
import os, random, sys, time
from dataclasses import dataclass, field
import os
import random
import sys
import time
from dataclasses import dataclass
from dataclasses import field
from itertools import islice
from typing import List
import braceexpand, yaml
import braceexpand
import yaml
from . import utils
from ..utils.log import Logger
from .filters import pipelinefilter
from .paddle_utils import IterableDataset
logger = Logger(__name__)
from ..utils.log import Logger
logger = Logger(__name__)
def expand_urls(urls):
if isinstance(urls, str):
urllist = urls.split("::")
@ -64,7 +66,8 @@ class SimpleShardList(IterableDataset):
def split_by_node(src, group=None):
rank, world_size, worker, num_workers = utils.paddle_worker_info(group=group)
rank, world_size, worker, num_workers = utils.paddle_worker_info(
group=group)
logger.info(f"world_size:{world_size}, rank:{rank}")
if world_size > 1:
for s in islice(src, rank, None, world_size):
@ -75,9 +78,11 @@ def split_by_node(src, group=None):
def single_node_only(src, group=None):
rank, world_size, worker, num_workers = utils.paddle_worker_info(group=group)
rank, world_size, worker, num_workers = utils.paddle_worker_info(
group=group)
if world_size > 1:
raise ValueError("input pipeline needs to be reconfigured for multinode training")
raise ValueError(
"input pipeline needs to be reconfigured for multinode training")
for s in src:
yield s
@ -104,7 +109,8 @@ def resampled_(src, n=sys.maxsize):
rng = random.Random(seed)
print("# resampled loading", file=sys.stderr)
items = list(src)
print(f"# resampled got {len(items)} samples, yielding {n}", file=sys.stderr)
print(
f"# resampled got {len(items)} samples, yielding {n}", file=sys.stderr)
for i in range(n):
yield rng.choice(items)
@ -118,7 +124,9 @@ def non_empty(src):
yield s
count += 1
if count == 0:
raise ValueError("pipeline stage received no data at all and this was declared as an error")
raise ValueError(
"pipeline stage received no data at all and this was declared as an error"
)
@dataclass
@ -138,10 +146,6 @@ def expand(s):
return os.path.expanduser(os.path.expandvars(s))
class MultiShardSample(IterableDataset):
def __init__(self, fname):
"""Construct a shardlist from multiple sources using a YAML spec."""
self.epoch = -1
class MultiShardSample(IterableDataset):
def __init__(self, fname):
"""Construct a shardlist from multiple sources using a YAML spec."""
@ -156,20 +160,23 @@ class MultiShardSample(IterableDataset):
else:
with open(fname) as stream:
spec = yaml.safe_load(stream)
assert set(spec.keys()).issubset(set("prefix datasets buckets".split())), list(spec.keys())
assert set(spec.keys()).issubset(
set("prefix datasets buckets".split())), list(spec.keys())
prefix = expand(spec.get("prefix", ""))
self.sources = []
for ds in spec["datasets"]:
assert set(ds.keys()).issubset(set("buckets name shards resample choose".split())), list(
ds.keys()
)
assert set(ds.keys()).issubset(
set("buckets name shards resample choose".split())), list(
ds.keys())
buckets = ds.get("buckets", spec.get("buckets", []))
if isinstance(buckets, str):
buckets = [buckets]
buckets = [expand(s) for s in buckets]
if buckets == []:
buckets = [""]
assert len(buckets) == 1, f"{buckets}: FIXME support for multiple buckets unimplemented"
assert len(
buckets
) == 1, f"{buckets}: FIXME support for multiple buckets unimplemented"
bucket = buckets[0]
name = ds.get("name", "@" + bucket)
urls = ds["shards"]
@ -177,15 +184,19 @@ class MultiShardSample(IterableDataset):
urls = [urls]
# urls = [u for url in urls for u in braceexpand.braceexpand(url)]
urls = [
prefix + os.path.join(bucket, u) for url in urls for u in braceexpand.braceexpand(expand(url))
prefix + os.path.join(bucket, u)
for url in urls for u in braceexpand.braceexpand(expand(url))
]
resample = ds.get("resample", -1)
nsample = ds.get("choose", -1)
if nsample > len(urls):
raise ValueError(f"perepoch {nsample} must be no greater than the number of shards")
raise ValueError(
f"perepoch {nsample} must be no greater than the number of shards"
)
if (nsample > 0) and (resample > 0):
raise ValueError("specify only one of perepoch or choose")
entry = MSSource(name=name, urls=urls, perepoch=nsample, resample=resample)
entry = MSSource(
name=name, urls=urls, perepoch=nsample, resample=resample)
self.sources.append(entry)
print(f"# {name} {len(urls)} {nsample}", file=sys.stderr)
@ -231,8 +242,7 @@ class ResampledShards(IterableDataset):
urls,
nshards=sys.maxsize,
worker_seed=None,
deterministic=False,
):
deterministic=False, ):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
@ -252,7 +262,8 @@ class ResampledShards(IterableDataset):
if self.deterministic:
seed = utils.make_seed(self.worker_seed(), self.epoch)
else:
seed = utils.make_seed(self.worker_seed(), self.epoch, os.getpid(), time.time_ns(), os.urandom(4))
seed = utils.make_seed(self.worker_seed(), self.epoch,
os.getpid(), time.time_ns(), os.urandom(4))
if os.environ.get("WDS_SHOW_SEED", "0") == "1":
print(f"# ResampledShards seed {seed}")
self.rng = random.Random(seed)

@ -3,13 +3,12 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
# Modified from wenet(https://github.com/wenet-e2e/wenet)
"""Low level iteration functions for tar archives."""
import random, re, tarfile
import random
import re
import tarfile
import braceexpand
@ -27,6 +26,7 @@ import numpy as np
AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
def base_plus_ext(path):
"""Split off all file extensions.
@ -47,12 +47,8 @@ def valid_sample(sample):
:param sample: sample to be checked
"""
return (
sample is not None
and isinstance(sample, dict)
and len(list(sample.keys())) > 0
and not sample.get("__bad__", False)
)
return (sample is not None and isinstance(sample, dict) and
len(list(sample.keys())) > 0 and not sample.get("__bad__", False))
# FIXME: UNUSED
@ -86,9 +82,9 @@ def url_opener(data, handler=reraise_exception, **kw):
break
def tar_file_iterator(
fileobj, skip_meta=r"__[^/]*__($|/)", handler=reraise_exception
):
def tar_file_iterator(fileobj,
skip_meta=r"__[^/]*__($|/)",
handler=reraise_exception):
"""Iterate over tar file, yielding filename, content pairs for the given tar stream.
:param fileobj: byte stream suitable for tarfile
@ -103,11 +99,8 @@ def tar_file_iterator(
continue
if fname is None:
continue
if (
"/" not in fname
and fname.startswith(meta_prefix)
and fname.endswith(meta_suffix)
):
if ("/" not in fname and fname.startswith(meta_prefix) and
fname.endswith(meta_suffix)):
# skipping metadata for now
continue
if skip_meta is not None and re.match(skip_meta, fname):
@ -118,8 +111,10 @@ def tar_file_iterator(
assert pos > 0
prefix, postfix = name[:pos], name[pos + 1:]
if postfix == 'wav':
waveform, sample_rate = paddlespeech.audio.load(stream.extractfile(tarinfo), normal=False)
result = dict(fname=prefix, wav=waveform, sample_rate = sample_rate)
waveform, sample_rate = paddlespeech.audio.load(
stream.extractfile(tarinfo), normal=False)
result = dict(
fname=prefix, wav=waveform, sample_rate=sample_rate)
else:
txt = stream.extractfile(tarinfo).read().decode('utf8').strip()
result = dict(fname=prefix, txt=txt)
@ -135,9 +130,10 @@ def tar_file_iterator(
break
del stream
def tar_file_and_group_iterator(
fileobj, skip_meta=r"__[^/]*__($|/)", handler=reraise_exception
):
def tar_file_and_group_iterator(fileobj,
skip_meta=r"__[^/]*__($|/)",
handler=reraise_exception):
""" Expand a stream of open tar files into a stream of tar file contents.
And groups the file with same prefix
@ -167,8 +163,11 @@ def tar_file_and_group_iterator(
if postfix == 'txt':
example['txt'] = file_obj.read().decode('utf8').strip()
elif postfix in AUDIO_FORMAT_SETS:
waveform, sample_rate = paddlespeech.audio.load(file_obj, normal=False)
waveform = paddle.to_tensor(np.expand_dims(np.array(waveform),0), dtype=paddle.float32)
waveform, sample_rate = paddlespeech.audio.load(
file_obj, normal=False)
waveform = paddle.to_tensor(
np.expand_dims(np.array(waveform), 0),
dtype=paddle.float32)
example['wav'] = waveform
example['sample_rate'] = sample_rate
@ -176,7 +175,8 @@ def tar_file_and_group_iterator(
example[postfix] = file_obj.read()
except Exception as exn:
if hasattr(exn, "args") and len(exn.args) > 0:
exn.args = (exn.args[0] + " @ " + str(fileobj),) + exn.args[1:]
exn.args = (exn.args[0] + " @ " + str(fileobj),
) + exn.args[1:]
if handler(exn):
continue
else:
@ -189,6 +189,7 @@ def tar_file_and_group_iterator(
yield example
stream.close()
def tar_file_expander(data, handler=reraise_exception):
"""Expand a stream of open tar files into a stream of tar file contents.
@ -200,9 +201,8 @@ def tar_file_expander(data, handler=reraise_exception):
assert isinstance(source, dict)
assert "stream" in source
for sample in tar_file_iterator(source["stream"]):
assert (
isinstance(sample, dict) and "data" in sample and "fname" in sample
)
assert (isinstance(sample, dict) and "data" in sample and
"fname" in sample)
sample["__url__"] = url
yield sample
except Exception as exn:
@ -213,8 +213,6 @@ def tar_file_expander(data, handler=reraise_exception):
break
def tar_file_and_group_expander(data, handler=reraise_exception):
"""Expand a stream of open tar files into a stream of tar file contents.
@ -226,9 +224,8 @@ def tar_file_and_group_expander(data, handler=reraise_exception):
assert isinstance(source, dict)
assert "stream" in source
for sample in tar_file_and_group_iterator(source["stream"]):
assert (
isinstance(sample, dict) and "wav" in sample and "txt" in sample and "fname" in sample
)
assert (isinstance(sample, dict) and "wav" in sample and
"txt" in sample and "fname" in sample)
sample["__url__"] = url
yield sample
except Exception as exn:
@ -239,7 +236,11 @@ def tar_file_and_group_expander(data, handler=reraise_exception):
break
def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
def group_by_keys(data,
keys=base_plus_ext,
lcase=True,
suffixes=None,
handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
@ -254,8 +255,8 @@ def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=N
print(
prefix,
suffix,
current_sample.keys() if isinstance(current_sample, dict) else None,
)
current_sample.keys()
if isinstance(current_sample, dict) else None, )
if prefix is None:
continue
if lcase:

@ -4,22 +4,23 @@
# This file is part of the WebDataset library.
# See the LICENSE file for licensing terms (BSD-style).
#
# Modified from https://github.com/webdataset/webdataset
"""Miscellaneous utility functions."""
import importlib
import itertools as itt
import os
import re
import sys
from typing import Any, Callable, Iterator, Optional, Union
from typing import Any
from typing import Callable
from typing import Iterator
from typing import Union
from ..utils.log import Logger
logger = Logger(__name__)
def make_seed(*args):
seed = 0
for arg in args:
@ -54,9 +55,9 @@ def lookup_sym(sym: str, modules: list):
return None
def repeatedly0(
loader: Iterator, nepochs: int = sys.maxsize, nbatches: int = sys.maxsize
):
def repeatedly0(loader: Iterator,
nepochs: int=sys.maxsize,
nbatches: int=sys.maxsize):
"""Repeatedly returns batches from a DataLoader."""
for epoch in range(nepochs):
for sample in itt.islice(loader, nbatches):
@ -73,8 +74,7 @@ def repeatedly(
nepochs: int=None,
nbatches: int=None,
nsamples: int=None,
batchsize: Callable[..., int] = guess_batchsize,
):
batchsize: Callable[..., int]=guess_batchsize, ):
"""Repeatedly yield samples from an iterator."""
epoch = 0
batch = 0
@ -93,6 +93,7 @@ def repeatedly(
if nepochs is not None and epoch >= nepochs:
return
def paddle_worker_info(group=None):
"""Return node and worker info for PyTorch and some distributed environments."""
rank = 0
@ -116,7 +117,7 @@ def paddle_worker_info(group=None):
else:
try:
from paddle.io import get_worker_info
worker_info = paddle.io.get_worker_info()
worker_info = get_worker_info()
if worker_info is not None:
worker = worker_info.id
num_workers = worker_info.num_workers
@ -126,6 +127,7 @@ def paddle_worker_info(group=None):
return rank, world_size, worker, num_workers
def paddle_worker_seed(group=None):
"""Compute a distinct, deterministic RNG seed for each worker and node."""
rank, world_size, worker, num_workers = paddle_worker_info(group=group)

@ -5,11 +5,17 @@
# See the LICENSE file for licensing terms (BSD-style).
# Modified from https://github.com/webdataset/webdataset
#
"""Classes and functions for writing tar files and WebDataset files."""
import io, json, pickle, re, tarfile, time
from typing import Any, Callable, Optional, Union
import io
import json
import pickle
import re
import tarfile
import time
from typing import Any
from typing import Callable
from typing import Optional
from typing import Union
import numpy as np
@ -67,6 +73,7 @@ def bytestr(data: Any):
return data.encode("ascii")
return str(data).encode("ascii")
def paddle_dumps(data: Any):
"""Dump data into a bytestring using paddle.dumps.
@ -82,6 +89,7 @@ def paddle_dumps(data: Any):
paddle.save(data, stream)
return stream.getvalue()
def numpy_dumps(data: np.ndarray):
"""Dump data into a bytestring using numpy npy format.
@ -139,9 +147,8 @@ def add_handlers(d, keys, value):
def make_handlers():
"""Create a list of handlers for encoding data."""
handlers = {}
add_handlers(
handlers, "cls cls2 class count index inx id", lambda x: str(x).encode("ascii")
)
add_handlers(handlers, "cls cls2 class count index inx id",
lambda x: str(x).encode("ascii"))
add_handlers(handlers, "txt text transcript", lambda x: x.encode("utf-8"))
add_handlers(handlers, "html htm", lambda x: x.encode("utf-8"))
add_handlers(handlers, "pyd pickle", pickle.dumps)
@ -152,7 +159,8 @@ def make_handlers():
add_handlers(handlers, "json jsn", lambda x: json.dumps(x).encode("utf-8"))
add_handlers(handlers, "mp msgpack msg", mp_dumps)
add_handlers(handlers, "cbor", cbor_dumps)
add_handlers(handlers, "jpg jpeg img image", lambda data: imageencoder(data, "jpg"))
add_handlers(handlers, "jpg jpeg img image",
lambda data: imageencoder(data, "jpg"))
add_handlers(handlers, "png", lambda data: imageencoder(data, "png"))
add_handlers(handlers, "pbm", lambda data: imageencoder(data, "pbm"))
add_handlers(handlers, "pgm", lambda data: imageencoder(data, "pgm"))
@ -192,7 +200,8 @@ def encode_based_on_extension(sample: dict, handlers: dict):
:param handlers: handlers for encoding
"""
return {
k: encode_based_on_extension1(v, k, handlers) for k, v in list(sample.items())
k: encode_based_on_extension1(v, k, handlers)
for k, v in list(sample.items())
}
@ -265,8 +274,7 @@ class TarWriter:
mode: int=0o0444,
compress: Optional[bool]=None,
encoder: Union[None, bool, Callable]=True,
keep_meta: bool = False,
):
keep_meta: bool=False, ):
"""Create a tar writer.
:param fileobj: stream to write data to
@ -330,8 +338,7 @@ class TarWriter:
continue
if not isinstance(v, (bytes, bytearray, memoryview)):
raise ValueError(
f"{k} doesn't map to a bytes after encoding ({type(v)})"
)
f"{k} doesn't map to a bytes after encoding ({type(v)})")
key = obj["__key__"]
for k in sorted(obj.keys()):
if k == "__key__":
@ -349,7 +356,8 @@ class TarWriter:
ti.uname = self.user
ti.gname = self.group
if not isinstance(v, (bytes, bytearray, memoryview)):
raise ValueError(f"converter didn't yield bytes: {k}, {type(v)}")
raise ValueError(
f"converter didn't yield bytes: {k}, {type(v)}")
stream = io.BytesIO(v)
self.tarstream.addfile(ti, stream)
total += ti.size
@ -366,8 +374,7 @@ class ShardWriter:
maxsize: float=3e9,
post: Optional[Callable]=None,
start_shard: int=0,
**kw,
):
**kw, ):
"""Create a ShardWriter.
:param pattern: output file pattern
@ -400,8 +407,7 @@ class ShardWriter:
self.fname,
self.count,
"%.1f GB" % (self.size / 1e9),
self.total,
)
self.total, )
self.shard += 1
stream = open(self.fname, "wb")
self.tarstream = TarWriter(stream, **self.kw)
@ -413,11 +419,8 @@ class ShardWriter:
:param obj: sample to be written
"""
if (
self.tarstream is None
or self.count >= self.maxcount
or self.size >= self.maxsize
):
if (self.tarstream is None or self.count >= self.maxcount or
self.size >= self.maxsize):
self.next_stream()
size = self.tarstream.write(obj)
self.count += 1

@ -17,6 +17,7 @@ from typing import Union
import sentencepiece as spm
from ..utils.log import Logger
from .utility import BLANK
from .utility import EOS
from .utility import load_dict
@ -24,7 +25,6 @@ from .utility import MASKCTC
from .utility import SOS
from .utility import SPACE
from .utility import UNK
from ..utils.log import Logger
logger = Logger(__name__)

@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
import io
import os
import h5py
import librosa
import numpy
import numpy as np
import scipy
import soundfile
import io
import os
import h5py
import numpy as np
class SoundHDF5File():
"""Collecting sound files to a HDF5 file
@ -109,6 +110,7 @@ class SoundHDF5File():
def close(self):
self.file.close()
class SpeedPerturbation():
"""SpeedPerturbation
@ -558,4 +560,3 @@ class RIRConvolve():
[scipy.convolve(x, r, mode="same") for r in rir], axis=-1)
else:
return scipy.convolve(x, rir, mode="same")

@ -14,6 +14,7 @@
# Modified from espnet(https://github.com/espnet/espnet)
"""Spec Augment module for preprocessing i.e., data augmentation"""
import random
import numpy
from PIL import Image

@ -114,6 +114,7 @@ if not hasattr(paddle.Tensor, 'new_full'):
paddle.Tensor.new_full = new_full
paddle.static.Variable.new_full = new_full
def contiguous(xs: paddle.Tensor) -> paddle.Tensor:
return xs

@ -25,8 +25,6 @@ import paddle
from paddle import distributed as dist
from paddlespeech.s2t.frontend.featurizer import TextFeaturizer
from paddlespeech.s2t.io.dataloader import BatchDataLoader
from paddlespeech.s2t.io.dataloader import StreamDataLoader
from paddlespeech.s2t.io.dataloader import DataLoaderFactory
from paddlespeech.s2t.models.u2 import U2Model
from paddlespeech.s2t.training.optimizer import OptimizerFactory
@ -109,7 +107,8 @@ class U2Trainer(Trainer):
def valid(self):
self.model.eval()
if not self.use_streamdata:
logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}")
logger.info(
f"Valid Total Examples: {len(self.valid_loader.dataset)}")
valid_losses = defaultdict(list)
num_seen_utts = 1
total_loss = 0.0
@ -136,7 +135,8 @@ class U2Trainer(Trainer):
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
if not self.use_streamdata:
msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader))
msg += "batch: {}/{}, ".format(i + 1,
len(self.valid_loader))
msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in valid_dump.items())
logger.info(msg)
@ -157,7 +157,8 @@ class U2Trainer(Trainer):
self.before_train()
if not self.use_streamdata:
logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
logger.info(
f"Train Total Examples: {len(self.train_loader.dataset)}")
while self.epoch < self.config.n_epoch:
with Timer("Epoch-Train Time Cost: {}"):
self.model.train()
@ -225,14 +226,18 @@ class U2Trainer(Trainer):
config = self.config.clone()
self.use_streamdata = config.get("use_stream_data", False)
if self.train:
self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args)
self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args)
self.train_loader = DataLoaderFactory.get_dataloader(
'train', config, self.args)
self.valid_loader = DataLoaderFactory.get_dataloader(
'valid', config, self.args)
logger.info("Setup train/valid Dataloader!")
else:
decode_batch_size = config.get('decode', dict()).get(
'decode_batch_size', 1)
self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args)
self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args)
self.test_loader = DataLoaderFactory.get_dataloader('test', config,
self.args)
self.align_loader = DataLoaderFactory.get_dataloader(
'align', config, self.args)
logger.info("Setup test/align Dataloader!")
def setup_model(self):

@ -105,7 +105,8 @@ class U2Trainer(Trainer):
def valid(self):
self.model.eval()
if not self.use_streamdata:
logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}")
logger.info(
f"Valid Total Examples: {len(self.valid_loader.dataset)}")
valid_losses = defaultdict(list)
num_seen_utts = 1
total_loss = 0.0
@ -133,7 +134,8 @@ class U2Trainer(Trainer):
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
if not self.use_streamdata:
msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader))
msg += "batch: {}/{}, ".format(i + 1,
len(self.valid_loader))
msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in valid_dump.items())
logger.info(msg)
@ -153,7 +155,8 @@ class U2Trainer(Trainer):
self.before_train()
if not self.use_streamdata:
logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
logger.info(
f"Train Total Examples: {len(self.train_loader.dataset)}")
while self.epoch < self.config.n_epoch:
with Timer("Epoch-Train Time Cost: {}"):
self.model.train()
@ -165,8 +168,8 @@ class U2Trainer(Trainer):
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
if not self.use_streamdata:
msg += "batch : {}/{}, ".format(batch_index + 1,
len(self.train_loader))
msg += "batch : {}/{}, ".format(
batch_index + 1, len(self.train_loader))
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
msg += "data time: {:>.3f}s, ".format(dataload_time)
self.train_batch(batch_index, batch, msg)
@ -204,21 +207,24 @@ class U2Trainer(Trainer):
self.use_streamdata = config.get("use_stream_data", False)
if self.train:
config = self.config.clone()
self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args)
self.train_loader = DataLoaderFactory.get_dataloader(
'train', config, self.args)
config = self.config.clone()
config['preprocess_config'] = None
self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args)
self.valid_loader = DataLoaderFactory.get_dataloader(
'valid', config, self.args)
logger.info("Setup train/valid Dataloader!")
else:
config = self.config.clone()
config['preprocess_config'] = None
self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args)
self.test_loader = DataLoaderFactory.get_dataloader('test', config,
self.args)
config = self.config.clone()
config['preprocess_config'] = None
self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args)
self.align_loader = DataLoaderFactory.get_dataloader(
'align', config, self.args)
logger.info("Setup test/align Dataloader!")
def setup_model(self):
config = self.config

@ -121,7 +121,8 @@ class U2STTrainer(Trainer):
def valid(self):
self.model.eval()
if not self.use_streamdata:
logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}")
logger.info(
f"Valid Total Examples: {len(self.valid_loader.dataset)}")
valid_losses = defaultdict(list)
num_seen_utts = 1
total_loss = 0.0
@ -155,7 +156,8 @@ class U2STTrainer(Trainer):
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
if not self.use_streamdata:
msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader))
msg += "batch: {}/{}, ".format(i + 1,
len(self.valid_loader))
msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in valid_dump.items())
logger.info(msg)
@ -175,7 +177,8 @@ class U2STTrainer(Trainer):
self.before_train()
if not self.use_streamdata:
logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
logger.info(
f"Train Total Examples: {len(self.train_loader.dataset)}")
while self.epoch < self.config.n_epoch:
with Timer("Epoch-Train Time Cost: {}"):
self.model.train()
@ -248,14 +251,16 @@ class U2STTrainer(Trainer):
config['load_transcript'] = load_transcript
self.use_streamdata = config.get("use_stream_data", False)
if self.train:
self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args)
self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args)
self.train_loader = DataLoaderFactory.get_dataloader(
'train', config, self.args)
self.valid_loader = DataLoaderFactory.get_dataloader(
'valid', config, self.args)
logger.info("Setup train/valid Dataloader!")
else:
self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args)
self.test_loader = DataLoaderFactory.get_dataloader('test', config,
self.args)
logger.info("Setup test Dataloader!")
def setup_model(self):
config = self.config
model_conf = config

@ -22,17 +22,16 @@ import paddle
from paddle.io import BatchSampler
from paddle.io import DataLoader
from paddle.io import DistributedBatchSampler
from yacs.config import CfgNode
import paddlespeech.audio.streamdata as streamdata
from paddlespeech.audio.text.text_featurizer import TextFeaturizer
from paddlespeech.s2t.io.batchfy import make_batchset
from paddlespeech.s2t.io.converter import CustomConverter
from paddlespeech.s2t.io.dataset import TransformDataset
from paddlespeech.s2t.io.reader import LoadInputsAndTargets
from paddlespeech.s2t.utils.log import Log
import paddlespeech.audio.streamdata as streamdata
from paddlespeech.audio.text.text_featurizer import TextFeaturizer
from yacs.config import CfgNode
__all__ = ["BatchDataLoader", "StreamDataLoader"]
logger = Log(__name__).getlog()
@ -61,6 +60,7 @@ def batch_collate(x):
"""
return x[0]
def read_preprocess_cfg(preprocess_conf_file):
augment_conf = dict()
preprocess_cfg = CfgNode(new_allowed=True)
@ -84,6 +84,7 @@ def read_preprocess_cfg(preprocess_conf_file):
augment_conf['t_replace_with_zero'] = process['replace_with_zero']
return augment_conf
class StreamDataLoader():
def __init__(self,
manifest_file: str,
@ -131,10 +132,14 @@ class StreamDataLoader():
world_size = paddle.distributed.get_world_size()
except Exception as e:
logger.warninig(e)
logger.warninig("can not get world_size using paddle.distributed.get_world_size(), use world_size=1")
assert(len(shardlist) >= world_size, "the length of shard list should >= number of gpus/xpus/...")
logger.warninig(
"can not get world_size using paddle.distributed.get_world_size(), use world_size=1"
)
assert len(shardlist) >= world_size, \
"the length of shard list should >= number of gpus/xpus/..."
update_n_iter_processes = int(max(min(len(shardlist)/world_size - 1, self.n_iter_processes), 0))
update_n_iter_processes = int(
max(min(len(shardlist) / world_size - 1, self.n_iter_processes), 0))
logger.info(f"update_n_iter_processes {update_n_iter_processes}")
if update_n_iter_processes != self.n_iter_processes:
self.n_iter_processes = update_n_iter_processes
@ -142,44 +147,50 @@ class StreamDataLoader():
if self.dist_sampler:
base_dataset = streamdata.DataPipeline(
streamdata.SimpleShardList(shardlist),
streamdata.split_by_node if train_mode else streamdata.placeholder(),
streamdata.SimpleShardList(shardlist), streamdata.split_by_node
if train_mode else streamdata.placeholder(),
streamdata.split_by_worker,
streamdata.tarfile_to_samples(streamdata.reraise_exception)
)
streamdata.tarfile_to_samples(streamdata.reraise_exception))
else:
base_dataset = streamdata.DataPipeline(
streamdata.SimpleShardList(shardlist),
streamdata.split_by_worker,
streamdata.tarfile_to_samples(streamdata.reraise_exception)
)
streamdata.tarfile_to_samples(streamdata.reraise_exception))
self.dataset = base_dataset.append_list(
streamdata.audio_tokenize(symbol_table),
streamdata.audio_data_filter(frame_shift=frame_shift, max_length=maxlen_in, min_length=minlen_in, token_max_length=maxlen_out, token_min_length=minlen_out),
streamdata.audio_data_filter(
frame_shift=frame_shift,
max_length=maxlen_in,
min_length=minlen_in,
token_max_length=maxlen_out,
token_min_length=minlen_out),
streamdata.audio_resample(resample_rate=resample_rate),
streamdata.audio_compute_fbank(num_mel_bins=num_mel_bins, frame_length=frame_length, frame_shift=frame_shift, dither=dither),
streamdata.audio_spec_aug(**augment_conf) if train_mode else streamdata.placeholder(), # num_t_mask=2, num_f_mask=2, max_t=40, max_f=30, max_w=80)
streamdata.audio_compute_fbank(
num_mel_bins=num_mel_bins,
frame_length=frame_length,
frame_shift=frame_shift,
dither=dither),
streamdata.audio_spec_aug(**augment_conf)
if train_mode else streamdata.placeholder(
), # num_t_mask=2, num_f_mask=2, max_t=40, max_f=30, max_w=80)
streamdata.shuffle(shuffle_size),
streamdata.sort(sort_size=sort_size),
streamdata.batched(batch_size),
streamdata.audio_padding(),
streamdata.audio_cmvn(cmvn_file)
)
streamdata.audio_cmvn(cmvn_file))
if paddle.__version__ >= '2.3.2':
self.loader = streamdata.WebLoader(
self.dataset,
num_workers=self.n_iter_processes,
prefetch_factor=self.prefetch_factor,
batch_size=None
)
batch_size=None)
else:
self.loader = streamdata.WebLoader(
self.dataset,
num_workers=self.n_iter_processes,
batch_size=None
)
batch_size=None)
def __iter__(self):
return self.loader.__iter__()
@ -188,7 +199,9 @@ class StreamDataLoader():
return self.__iter__()
def __len__(self):
logger.info("Stream dataloader does not support calculate the length of the dataset")
logger.info(
"Stream dataloader does not support calculate the length of the dataset"
)
return -1
@ -358,7 +371,9 @@ class DataLoaderFactory():
config['maxlen_out'] = float('inf')
config['dist_sampler'] = False
else:
raise KeyError("not valid mode type!!, please input one of 'train, valid, test, align'")
raise KeyError(
"not valid mode type!!, please input one of 'train, valid, test, align'"
)
return StreamDataLoader(
manifest_file=config.manifest,
train_mode=config.train_mode,
@ -380,8 +395,7 @@ class DataLoaderFactory():
prefetch_factor=config.prefetch_factor,
dist_sampler=config.dist_sampler,
cmvn_file=config.cmvn_file,
vocab_filepath=config.vocab_filepath,
)
vocab_filepath=config.vocab_filepath, )
else:
if mode == 'train':
config['manifest'] = config.train_manifest
@ -427,7 +441,9 @@ class DataLoaderFactory():
config['dist_sampler'] = False
config['shortest_first'] = False
else:
raise KeyError("not valid mode type!!, please input one of 'train, valid, test, align'")
raise KeyError(
"not valid mode type!!, please input one of 'train, valid, test, align'"
)
return BatchDataLoader(
json_file=config.manifest,
@ -450,4 +466,3 @@ class DataLoaderFactory():
num_encs=config.num_encs,
dist_sampler=config.dist_sampler,
shortest_first=config.shortest_first)

@ -18,7 +18,6 @@ Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recogni
"""
import time
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
@ -26,6 +25,8 @@ import paddle
from paddle import jit
from paddle import nn
from paddlespeech.audio.utils.tensor_utils import add_sos_eos
from paddlespeech.audio.utils.tensor_utils import th_accuracy
from paddlespeech.s2t.frontend.utility import IGNORE_ID
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.modules.cmvn import GlobalCMVN
@ -38,8 +39,6 @@ from paddlespeech.s2t.modules.mask import subsequent_mask
from paddlespeech.s2t.utils import checkpoint
from paddlespeech.s2t.utils import layer_tools
from paddlespeech.s2t.utils.log import Log
from paddlespeech.audio.utils.tensor_utils import add_sos_eos
from paddlespeech.audio.utils.tensor_utils import th_accuracy
from paddlespeech.s2t.utils.utility import UpdateConfig
__all__ = ["U2STModel", "U2STInferModel"]
@ -435,8 +434,8 @@ class U2STBaseModel(nn.Layer):
paddle.Tensor: new conformer cnn cache required for next chunk, with
same shape as the original cnn_cache.
"""
return self.encoder.forward_chunk(
xs, offset, required_cache_size, att_cache, cnn_cache)
return self.encoder.forward_chunk(xs, offset, required_cache_size,
att_cache, cnn_cache)
# @jit.to_static
def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor:

@ -11,9 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle import nn
import math
"""
To align the initializer between paddle and torch,
the API below are set defalut initializer with priority higger than global initializer.
@ -81,10 +82,18 @@ class Linear(nn.Linear):
name=None):
if weight_attr is None:
if global_init_type == "kaiming_uniform":
weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
weight_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
if bias_attr is None:
if global_init_type == "kaiming_uniform":
bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
bias_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
super(Linear, self).__init__(in_features, out_features, weight_attr,
bias_attr, name)
@ -104,10 +113,18 @@ class Conv1D(nn.Conv1D):
data_format='NCL'):
if weight_attr is None:
if global_init_type == "kaiming_uniform":
weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
weight_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
if bias_attr is None:
if global_init_type == "kaiming_uniform":
bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
bias_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
super(Conv1D, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, padding_mode, weight_attr, bias_attr, data_format)
@ -128,10 +145,18 @@ class Conv2D(nn.Conv2D):
data_format='NCHW'):
if weight_attr is None:
if global_init_type == "kaiming_uniform":
weight_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
weight_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
if bias_attr is None:
if global_init_type == "kaiming_uniform":
bias_attr = paddle.ParamAttr(initializer=nn.initializer.KaimingUniform(fan_in=None, negative_slope=math.sqrt(5), nonlinearity='leaky_relu'))
bias_attr = paddle.ParamAttr(
initializer=nn.initializer.KaimingUniform(
fan_in=None,
negative_slope=math.sqrt(5),
nonlinearity='leaky_relu'))
super(Conv2D, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
groups, padding_mode, weight_attr, bias_attr, data_format)

@ -255,6 +255,7 @@ class BaseEncoder(nn.Layer):
xs,
att_mask,
pos_emb,
mask_pad=paddle.ones([0, 0, 0], dtype=paddle.bool),
att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
cnn_cache=cnn_cache[i:i + 1]
if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, )

@ -195,8 +195,7 @@ class ConformerEncoderLayer(nn.Layer):
x: paddle.Tensor,
mask: paddle.Tensor,
pos_emb: paddle.Tensor,
mask_pad: paddle.
Tensor, # paddle.ones([0, 0, 0], dtype=paddle.bool)
mask_pad: paddle.Tensor, #paddle.ones([0, 0, 0],dtype=paddle.bool)
att_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0])
cnn_cache: paddle.Tensor, # paddle.zeros([0, 0, 0, 0])
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]:

@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class DefaultInitializerContext(object):
"""

@ -103,7 +103,9 @@ class OnlineCTCEndpoint:
assert self.num_frames_decoded >= self.trailing_silence_frames
assert self.frame_shift_in_ms > 0
decoding_something = (self.num_frames_decoded > self.trailing_silence_frames) and decoding_something
decoding_something = (
self.num_frames_decoded > self.trailing_silence_frames
) and decoding_something
utterance_length = self.num_frames_decoded * self.frame_shift_in_ms
trailing_silence = self.trailing_silence_frames * self.frame_shift_in_ms

@ -21,12 +21,12 @@ import paddle
from numpy import float32
from yacs.config import CfgNode
from paddlespeech.audio.transform.transformation import Transformation
from paddlespeech.cli.asr.infer import ASRExecutor
from paddlespeech.cli.log import logger
from paddlespeech.resource import CommonTaskResource
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.modules.ctc import CTCDecoder
from paddlespeech.audio.transform.transformation import Transformation
from paddlespeech.s2t.utils.utility import UpdateConfig
from paddlespeech.server.engine.base_engine import BaseEngine
from paddlespeech.server.utils import onnx_infer

@ -21,10 +21,10 @@ import paddle
from numpy import float32
from yacs.config import CfgNode
from paddlespeech.audio.transform.transformation import Transformation
from paddlespeech.cli.asr.infer import ASRExecutor
from paddlespeech.cli.log import logger
from paddlespeech.resource import CommonTaskResource
from paddlespeech.audio.transform.transformation import Transformation
from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
from paddlespeech.s2t.modules.ctc import CTCDecoder
from paddlespeech.s2t.utils.utility import UpdateConfig

@ -476,8 +476,12 @@ class PaddleASRConnectionHanddler:
# forward chunk
(y, self.att_cache,
self.cnn_cache) = self.model.encoder.forward_chunk(
chunk_xs, self.offset, required_cache_size, self.att_cache,
self.cnn_cache, paddle.ones([0, 0, 0], dtype=paddle.bool))
chunk_xs,
self.offset,
required_cache_size,
att_cache=self.att_cache,
cnn_cache=self.cnn_cache,
att_mask=paddle.ones([0, 0, 0], dtype=paddle.bool))
outputs.append(y)
# update the global offset, in decoding frame unit

@ -68,11 +68,13 @@ class ASREngine(BaseEngine):
return False
self.executor._init_from_path(
model_type = self.config.model, lang = self.config.lang, sample_rate = self.config.sample_rate,
cfg_path = self.config.cfg_path, decode_method = self.config.decode_method,
model_type=self.config.model,
lang=self.config.lang,
sample_rate=self.config.sample_rate,
cfg_path=self.config.cfg_path,
decode_method=self.config.decode_method,
ckpt_path=self.config.ckpt_path)
logger.info("Initialize ASR server engine successfully on device: %s." %
(self.device))
return True

@ -483,3 +483,58 @@ def vits_single_spk_batch_fn(examples):
"speech": speech
}
return batch
def vits_multi_spk_batch_fn(examples):
"""
Returns:
Dict[str, Any]:
- text (Tensor): Text index tensor (B, T_text).
- text_lengths (Tensor): Text length tensor (B,).
- feats (Tensor): Feature tensor (B, T_feats, aux_channels).
- feats_lengths (Tensor): Feature length tensor (B,).
- speech (Tensor): Speech waveform tensor (B, T_wav).
- spk_id (Optional[Tensor]): Speaker index tensor (B,) or (B, 1).
- spk_emb (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim).
"""
# fields = ["text", "text_lengths", "feats", "feats_lengths", "speech", "spk_id"/"spk_emb"]
text = [np.array(item["text"], dtype=np.int64) for item in examples]
feats = [np.array(item["feats"], dtype=np.float32) for item in examples]
speech = [np.array(item["wave"], dtype=np.float32) for item in examples]
text_lengths = [
np.array(item["text_lengths"], dtype=np.int64) for item in examples
]
feats_lengths = [
np.array(item["feats_lengths"], dtype=np.int64) for item in examples
]
text = batch_sequences(text)
feats = batch_sequences(feats)
speech = batch_sequences(speech)
# convert each batch to paddle.Tensor
text = paddle.to_tensor(text)
feats = paddle.to_tensor(feats)
text_lengths = paddle.to_tensor(text_lengths)
feats_lengths = paddle.to_tensor(feats_lengths)
batch = {
"text": text,
"text_lengths": text_lengths,
"feats": feats,
"feats_lengths": feats_lengths,
"speech": speech
}
# spk_emb has a higher priority than spk_id
if "spk_emb" in examples[0]:
spk_emb = [
np.array(item["spk_emb"], dtype=np.float32) for item in examples
]
spk_emb = batch_sequences(spk_emb)
spk_emb = paddle.to_tensor(spk_emb)
batch["spk_emb"] = spk_emb
elif "spk_id" in examples[0]:
spk_id = [np.array(item["spk_id"], dtype=np.int64) for item in examples]
spk_id = paddle.to_tensor(spk_id)
batch["spk_id"] = spk_id
return batch

@ -1,8 +1,9 @@
import paddle
import math
import numpy as np
from paddle.io import BatchSampler
class ErnieSATSampler(BatchSampler):
"""Sampler that restricts data loading to a subset of the dataset.
In such case, each process can pass a DistributedBatchSampler instance
@ -110,8 +111,8 @@ class ErnieSATSampler(BatchSampler):
subsampled_indices.extend(indices[i:i + self.batch_size])
indices = indices[len(indices) - last_batch_size:]
subsampled_indices.extend(indices[
self.local_rank * last_local_batch_size:(
subsampled_indices.extend(
indices[self.local_rank * last_local_batch_size:(
self.local_rank + 1) * last_local_batch_size])
return subsampled_indices

@ -25,7 +25,6 @@ from paddle import DataParallel
from paddle import distributed as dist
from paddle import nn
from paddle.io import DataLoader
from paddle.io import DistributedBatchSampler
from paddle.optimizer import Adam
from yacs.config import CfgNode

@ -11,32 +11,35 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
from pathlib import Path
from typing import Dict
from typing import List
from typing import Union
import os
import numpy as np
import paddle
import yaml
from yacs.config import CfgNode
import hashlib
from paddlespeech.t2s.exps.syn_utils import get_am_inference
from paddlespeech.t2s.exps.syn_utils import get_voc_inference
def _get_user():
return os.path.expanduser('~').split('/')[-1]
def str2md5(string):
md5_val = hashlib.md5(string.encode('utf8')).hexdigest()
return md5_val
def get_tmp_name(text: str):
return _get_user() + '_' + str(os.getpid()) + '_' + str2md5(text)
def get_dict(dictfile: str):
word2phns_dict = {}
with open(dictfile, 'r') as fid:

@ -298,8 +298,8 @@ def am_to_static(am_inference,
am_name = am[:am.rindex('_')]
am_dataset = am[am.rindex('_') + 1:]
if am_name == 'fastspeech2':
if am_dataset in {"aishell3", "vctk", "mix"
} and speaker_dict is not None:
if am_dataset in {"aishell3", "vctk",
"mix"} and speaker_dict is not None:
am_inference = jit.to_static(
am_inference,
input_spec=[
@ -311,8 +311,8 @@ def am_to_static(am_inference,
am_inference, input_spec=[InputSpec([-1], dtype=paddle.int64)])
elif am_name == 'speedyspeech':
if am_dataset in {"aishell3", "vctk", "mix"
} and speaker_dict is not None:
if am_dataset in {"aishell3", "vctk",
"mix"} and speaker_dict is not None:
am_inference = jit.to_static(
am_inference,
input_spec=[

@ -15,6 +15,7 @@ import argparse
from pathlib import Path
import jsonlines
import numpy as np
import paddle
import soundfile as sf
import yaml
@ -23,6 +24,7 @@ from yacs.config import CfgNode
from paddlespeech.t2s.datasets.data_table import DataTable
from paddlespeech.t2s.models.vits import VITS
from paddlespeech.t2s.utils import str2bool
def evaluate(args):
@ -40,8 +42,26 @@ def evaluate(args):
print(config)
fields = ["utt_id", "text"]
converters = {}
spk_num = None
if args.speaker_dict is not None:
print("multiple speaker vits!")
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id)
fields += ["spk_id"]
elif args.voice_cloning:
print("Evaluating voice cloning!")
fields += ["spk_emb"]
else:
print("single speaker vits!")
print("spk_num:", spk_num)
test_dataset = DataTable(data=test_metadata, fields=fields)
test_dataset = DataTable(
data=test_metadata,
fields=fields,
converters=converters, )
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
@ -49,6 +69,7 @@ def evaluate(args):
print("vocab_size:", vocab_size)
odim = config.n_fft // 2 + 1
config["model"]["generator_params"]["spks"] = spk_num
vits = VITS(idim=vocab_size, odim=odim, **config["model"])
vits.set_state_dict(paddle.load(args.ckpt)["main_params"])
@ -65,7 +86,15 @@ def evaluate(args):
phone_ids = paddle.to_tensor(datum["text"])
with timer() as t:
with paddle.no_grad():
out = vits.inference(text=phone_ids)
spk_emb = None
spk_id = None
# multi speaker
if args.voice_cloning and "spk_emb" in datum:
spk_emb = paddle.to_tensor(np.load(datum["spk_emb"]))
elif "spk_id" in datum:
spk_id = paddle.to_tensor(datum["spk_id"])
out = vits.inference(
text=phone_ids, sids=spk_id, spembs=spk_emb)
wav = out["wav"]
wav = wav.numpy()
N += wav.size
@ -90,6 +119,13 @@ def parse_args():
'--ckpt', type=str, default=None, help='Checkpoint file of VITS.')
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
"--voice-cloning",
type=str2bool,
default=False,
help="whether training voice cloning model.")
# other
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")

@ -42,12 +42,23 @@ def evaluate(args):
# frontend
frontend = get_frontend(lang=args.lang, phones_dict=args.phones_dict)
spk_num = None
if args.speaker_dict is not None:
print("multiple speaker vits!")
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id)
else:
print("single speaker vits!")
print("spk_num:", spk_num)
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
odim = config.n_fft // 2 + 1
config["model"]["generator_params"]["spks"] = spk_num
vits = VITS(idim=vocab_size, odim=odim, **config["model"])
vits.set_state_dict(paddle.load(args.ckpt)["main_params"])
@ -78,7 +89,10 @@ def evaluate(args):
flags = 0
for i in range(len(phone_ids)):
part_phone_ids = phone_ids[i]
out = vits.inference(text=part_phone_ids)
spk_id = None
if spk_num is not None:
spk_id = paddle.to_tensor(args.spk_id)
out = vits.inference(text=part_phone_ids, sids=spk_id)
wav = out["wav"]
if flags == 0:
wav_all = wav
@ -109,6 +123,13 @@ def parse_args():
'--ckpt', type=str, default=None, help='Checkpoint file of VITS.')
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--speaker_dict", type=str, default=None, help="speaker id map file.")
parser.add_argument(
'--spk_id',
type=int,
default=0,
help='spk id for multi speaker acoustic model')
# other
parser.add_argument(
'--lang',

@ -28,6 +28,7 @@ from paddle.io import DistributedBatchSampler
from paddle.optimizer import Adam
from yacs.config import CfgNode
from paddlespeech.t2s.datasets.am_batch_fn import vits_multi_spk_batch_fn
from paddlespeech.t2s.datasets.am_batch_fn import vits_single_spk_batch_fn
from paddlespeech.t2s.datasets.data_table import DataTable
from paddlespeech.t2s.models.vits import VITS
@ -43,6 +44,7 @@ from paddlespeech.t2s.training.extensions.visualizer import VisualDL
from paddlespeech.t2s.training.optimizer import scheduler_classes
from paddlespeech.t2s.training.seeding import seed_everything
from paddlespeech.t2s.training.trainer import Trainer
from paddlespeech.t2s.utils import str2bool
def train_sp(args, config):
@ -72,6 +74,23 @@ def train_sp(args, config):
"wave": np.load,
"feats": np.load,
}
spk_num = None
if args.speaker_dict is not None:
print("multiple speaker vits!")
collate_fn = vits_multi_spk_batch_fn
with open(args.speaker_dict, 'rt') as f:
spk_id = [line.strip().split() for line in f.readlines()]
spk_num = len(spk_id)
fields += ["spk_id"]
elif args.voice_cloning:
print("Training voice cloning!")
collate_fn = vits_multi_spk_batch_fn
fields += ["spk_emb"]
converters["spk_emb"] = np.load
else:
print("single speaker vits!")
collate_fn = vits_single_spk_batch_fn
print("spk_num:", spk_num)
# construct dataset for training and validation
with jsonlines.open(args.train_metadata, 'r') as reader:
@ -100,18 +119,16 @@ def train_sp(args, config):
drop_last=False)
print("samplers done!")
train_batch_fn = vits_single_spk_batch_fn
train_dataloader = DataLoader(
train_dataset,
batch_sampler=train_sampler,
collate_fn=train_batch_fn,
collate_fn=collate_fn,
num_workers=config.num_workers)
dev_dataloader = DataLoader(
dev_dataset,
batch_sampler=dev_sampler,
collate_fn=train_batch_fn,
collate_fn=collate_fn,
num_workers=config.num_workers)
print("dataloaders done!")
@ -121,6 +138,7 @@ def train_sp(args, config):
print("vocab_size:", vocab_size)
odim = config.n_fft // 2 + 1
config["model"]["generator_params"]["spks"] = spk_num
model = VITS(idim=vocab_size, odim=odim, **config["model"])
gen_parameters = model.generator.parameters()
dis_parameters = model.discriminator.parameters()
@ -240,6 +258,17 @@ def main():
"--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.")
parser.add_argument(
"--phones-dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--speaker-dict",
type=str,
default=None,
help="speaker id map file for multiple speaker model.")
parser.add_argument(
"--voice-cloning",
type=str2bool,
default=False,
help="whether training voice cloning model.")
args = parser.parse_args()

@ -0,0 +1,213 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import librosa
import numpy as np
import paddle
import soundfile as sf
import yaml
from yacs.config import CfgNode
from paddlespeech.t2s.datasets.get_feats import LinearSpectrogram
from paddlespeech.t2s.exps.syn_utils import get_frontend
from paddlespeech.t2s.models.vits import VITS
from paddlespeech.t2s.utils import str2bool
from paddlespeech.vector.exps.ge2e.audio_processor import SpeakerVerificationPreprocessor
from paddlespeech.vector.models.lstm_speaker_encoder import LSTMSpeakerEncoder
def voice_cloning(args):
# Init body.
with open(args.config) as f:
config = CfgNode(yaml.safe_load(f))
print("========Args========")
print(yaml.safe_dump(vars(args)))
print("========Config========")
print(config)
# speaker encoder
spec_extractor = LinearSpectrogram(
n_fft=config.n_fft,
hop_length=config.n_shift,
win_length=config.win_length,
window=config.window)
p = SpeakerVerificationPreprocessor(
sampling_rate=16000,
audio_norm_target_dBFS=-30,
vad_window_length=30,
vad_moving_average_width=8,
vad_max_silence_length=6,
mel_window_length=25,
mel_window_step=10,
n_mels=40,
partial_n_frames=160,
min_pad_coverage=0.75,
partial_overlap_ratio=0.5)
print("Audio Processor Done!")
speaker_encoder = LSTMSpeakerEncoder(
n_mels=40, num_layers=3, hidden_size=256, output_size=256)
speaker_encoder.set_state_dict(paddle.load(args.ge2e_params_path))
speaker_encoder.eval()
print("GE2E Done!")
frontend = get_frontend(lang=args.lang, phones_dict=args.phones_dict)
print("frontend done!")
with open(args.phones_dict, "r") as f:
phn_id = [line.strip().split() for line in f.readlines()]
vocab_size = len(phn_id)
print("vocab_size:", vocab_size)
odim = config.n_fft // 2 + 1
vits = VITS(idim=vocab_size, odim=odim, **config["model"])
vits.set_state_dict(paddle.load(args.ckpt)["main_params"])
vits.eval()
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
input_dir = Path(args.input_dir)
if args.audio_path == "":
args.audio_path = None
if args.audio_path is None:
sentence = args.text
merge_sentences = True
add_blank = args.add_blank
if args.lang == 'zh':
input_ids = frontend.get_input_ids(
sentence, merge_sentences=merge_sentences, add_blank=add_blank)
elif args.lang == 'en':
input_ids = frontend.get_input_ids(
sentence, merge_sentences=merge_sentences)
phone_ids = input_ids["phone_ids"][0]
else:
wav, _ = librosa.load(str(args.audio_path), sr=config.fs)
feats = paddle.to_tensor(spec_extractor.get_linear_spectrogram(wav))
mel_sequences = p.extract_mel_partials(
p.preprocess_wav(args.audio_path))
with paddle.no_grad():
spk_emb_src = speaker_encoder.embed_utterance(
paddle.to_tensor(mel_sequences))
for name in os.listdir(input_dir):
utt_id = name.split(".")[0]
ref_audio_path = input_dir / name
mel_sequences = p.extract_mel_partials(p.preprocess_wav(ref_audio_path))
# print("mel_sequences: ", mel_sequences.shape)
with paddle.no_grad():
spk_emb = speaker_encoder.embed_utterance(
paddle.to_tensor(mel_sequences))
# print("spk_emb shape: ", spk_emb.shape)
with paddle.no_grad():
if args.audio_path is None:
out = vits.inference(text=phone_ids, spembs=spk_emb)
else:
out = vits.voice_conversion(
feats=feats, spembs_src=spk_emb_src, spembs_tgt=spk_emb)
wav = out["wav"]
sf.write(
str(output_dir / (utt_id + ".wav")),
wav.numpy(),
samplerate=config.fs)
print(f"{utt_id} done!")
# Randomly generate numbers of 0 ~ 0.2, 256 is the dim of spk_emb
random_spk_emb = np.random.rand(256) * 0.2
random_spk_emb = paddle.to_tensor(random_spk_emb, dtype='float32')
utt_id = "random_spk_emb"
with paddle.no_grad():
if args.audio_path is None:
out = vits.inference(text=phone_ids, spembs=random_spk_emb)
else:
out = vits.voice_conversion(
feats=feats, spembs_src=spk_emb_src, spembs_tgt=random_spk_emb)
wav = out["wav"]
sf.write(
str(output_dir / (utt_id + ".wav")), wav.numpy(), samplerate=config.fs)
print(f"{utt_id} done!")
def parse_args():
# parse args and config
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'--config', type=str, default=None, help='Config of VITS.')
parser.add_argument(
'--ckpt', type=str, default=None, help='Checkpoint file of VITS.')
parser.add_argument(
"--phones_dict", type=str, default=None, help="phone vocabulary file.")
parser.add_argument(
"--text",
type=str,
default="每当你觉得,想要批评什么人的时候,你切要记着,这个世界上的人,并非都具备你禀有的条件。",
help="text to synthesize, a line")
parser.add_argument(
'--lang',
type=str,
default='zh',
help='Choose model language. zh or en')
parser.add_argument(
"--audio-path",
type=str,
default=None,
help="audio as content to synthesize")
parser.add_argument(
"--ge2e_params_path", type=str, help="ge2e params path.")
parser.add_argument(
"--ngpu", type=int, default=1, help="if ngpu=0, use cpu.")
parser.add_argument(
"--input-dir",
type=str,
help="input dir of *.wav, the sample rate will be resample to 16k.")
parser.add_argument("--output-dir", type=str, help="output dir.")
parser.add_argument(
"--add-blank",
type=str2bool,
default=True,
help="whether to add blank between phones")
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.ngpu == 0:
paddle.set_device("cpu")
elif args.ngpu > 0:
paddle.set_device("gpu")
else:
print("ngpu should >= 0 !")
voice_cloning(args)
if __name__ == "__main__":
main()

@ -1,2 +1 @@
from paddlespeech.t2s.frontend.g2pw.onnx_api import G2PWOnnxConverter

@ -61,8 +61,11 @@ class MixFrontend():
return False
def is_end(self, before_char, after_char) -> bool:
if ((self.is_alphabet(before_char) or before_char == " ") and
(self.is_alphabet(after_char) or after_char == " ")):
flag = 0
for char in (before_char, after_char):
if self.is_alphabet(char) or char == " ":
flag += 1
if flag == 2:
return True
else:
return False

@ -84,9 +84,7 @@ class ToneSandhi():
if j - 1 >= 0 and item == word[j - 1] and pos[0] in {"n", "v", "a"}:
finals[j] = finals[j][:-1] + "5"
ge_idx = word.find("")
if (len(word) > 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒滴哩哟喽啰耶喔诶") or (
len(word) > 1 and word[-2] in '好是帅酷棒衰烂臭狗糗' and
word[-1] == '') or (len(word) == 1 and word[-1] in "额嗯"):
if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒滴哩哟喽啰耶喔诶":
finals[-1] = finals[-1][:-1] + "5"
elif len(word) >= 1 and word[-1] in "的地得":
finals[-1] = finals[-1][:-1] + "5"

@ -522,6 +522,82 @@ class VITSGenerator(nn.Layer):
return wav.squeeze(1), attn.squeeze(1), dur.squeeze(1)
def voice_conversion(
self,
feats: paddle.Tensor=None,
feats_lengths: paddle.Tensor=None,
sids_src: Optional[paddle.Tensor]=None,
sids_tgt: Optional[paddle.Tensor]=None,
spembs_src: Optional[paddle.Tensor]=None,
spembs_tgt: Optional[paddle.Tensor]=None,
lids: Optional[paddle.Tensor]=None, ) -> paddle.Tensor:
"""Run voice conversion.
Args:
feats (Tensor): Feature tensor (B, aux_channels, T_feats,).
feats_lengths (Tensor): Feature length tensor (B,).
sids_src (Optional[Tensor]): Speaker index tensor of source feature (B,) or (B, 1).
sids_tgt (Optional[Tensor]): Speaker index tensor of target feature (B,) or (B, 1).
spembs_src (Optional[Tensor]): Speaker embedding tensor of source feature (B, spk_embed_dim).
spembs_tgt (Optional[Tensor]): Speaker embedding tensor of target feature (B, spk_embed_dim).
lids (Optional[Tensor]): Language index tensor (B,) or (B, 1).
Returns:
Tensor: Generated waveform tensor (B, T_wav).
"""
# encoder
g_src = None
g_tgt = None
if self.spks is not None:
# (B, global_channels, 1)
g_src = self.global_emb(
paddle.reshape(sids_src, [-1])).unsqueeze(-1)
g_tgt = self.global_emb(
paddle.reshape(sids_tgt, [-1])).unsqueeze(-1)
if self.spk_embed_dim is not None:
# (B, global_channels, 1)
g_src_ = self.spemb_proj(
F.normalize(spembs_src.unsqueeze(0))).unsqueeze(-1)
if g_src is None:
g_src = g_src_
else:
g_src = g_src + g_src_
# (B, global_channels, 1)
g_tgt_ = self.spemb_proj(
F.normalize(spembs_tgt.unsqueeze(0))).unsqueeze(-1)
if g_tgt is None:
g_tgt = g_tgt_
else:
g_tgt = g_tgt + g_tgt_
if self.langs is not None:
# (B, global_channels, 1)
g_ = self.lang_emb(paddle.reshape(lids, [-1])).unsqueeze(-1)
if g_src is None:
g_src = g_
else:
g_src = g_src + g_
if g_tgt is None:
g_tgt = g_
else:
g_tgt = g_tgt + g_
# forward posterior encoder
z, m_q, logs_q, y_mask = self.posterior_encoder(
feats, feats_lengths, g=g_src)
# forward flow
# (B, H, T_feats)
z_p = self.flow(z, y_mask, g=g_src)
# decoder
z_hat = self.flow(z_p, y_mask, g=g_tgt, inverse=True)
wav = self.decoder(z_hat * y_mask, g=g_tgt)
return wav.squeeze(1)
def _generate_path(self, dur: paddle.Tensor,
mask: paddle.Tensor) -> paddle.Tensor:
"""Generate path a.k.a. monotonic attention.

@ -381,7 +381,7 @@ class VITS(nn.Layer):
if use_teacher_forcing:
assert feats is not None
feats = feats[None].transpose([0, 2, 1])
feats_lengths = paddle.to_tensor([paddle.shape(feats)[2]])
feats_lengths = paddle.to_tensor(paddle.shape(feats)[2])
wav, att_w, dur = self.generator.inference(
text=text,
text_lengths=text_lengths,
@ -406,3 +406,43 @@ class VITS(nn.Layer):
max_len=max_len, )
return dict(
wav=paddle.reshape(wav, [-1]), att_w=att_w[0], duration=dur[0])
def voice_conversion(
self,
feats: paddle.Tensor,
sids_src: Optional[paddle.Tensor]=None,
sids_tgt: Optional[paddle.Tensor]=None,
spembs_src: Optional[paddle.Tensor]=None,
spembs_tgt: Optional[paddle.Tensor]=None,
lids: Optional[paddle.Tensor]=None, ) -> paddle.Tensor:
"""Run voice conversion.
Args:
feats (Tensor): Feature tensor (T_feats, aux_channels).
sids_src (Optional[Tensor]): Speaker index tensor of source feature (1,).
sids_tgt (Optional[Tensor]): Speaker index tensor of target feature (1,).
spembs_src (Optional[Tensor]): Speaker embedding tensor of source feature (spk_embed_dim,).
spembs_tgt (Optional[Tensor]): Speaker embedding tensor of target feature (spk_embed_dim,).
lids (Optional[Tensor]): Language index tensor (1,).
Returns:
Dict[str, Tensor]:
* wav (Tensor): Generated waveform tensor (T_wav,).
"""
assert feats is not None
feats = feats[None].transpose([0, 2, 1])
feats_lengths = paddle.to_tensor(paddle.shape(feats)[2])
sids_none = sids_src is None and sids_tgt is None
spembs_none = spembs_src is None and spembs_tgt is None
assert not sids_none or not spembs_none
wav = self.generator.voice_conversion(
feats,
feats_lengths,
sids_src,
sids_tgt,
spembs_src,
spembs_tgt,
lids, )
return dict(wav=paddle.reshape(wav, [-1]))

@ -111,6 +111,8 @@ class VITSUpdater(StandardUpdater):
text_lengths=batch["text_lengths"],
feats=batch["feats"],
feats_lengths=batch["feats_lengths"],
sids=batch.get("spk_id", None),
spembs=batch.get("spk_emb", None),
forward_generator=turn == "generator")
# Generator
if turn == "generator":
@ -268,6 +270,8 @@ class VITSEvaluator(StandardEvaluator):
text_lengths=batch["text_lengths"],
feats=batch["feats"],
feats_lengths=batch["feats_lengths"],
sids=batch.get("spk_id", None),
spembs=batch.get("spk_emb", None),
forward_generator=turn == "generator")
# Generator
if turn == "generator":

@ -24,10 +24,11 @@ from paddle.nn import Layer
from paddle.optimizer import Optimizer
from timer import timer
from paddlespeech.t2s.datasets.sampler import ErnieSATSampler
from paddlespeech.t2s.training.reporter import report
from paddlespeech.t2s.training.updater import UpdaterBase
from paddlespeech.t2s.training.updater import UpdaterState
from paddlespeech.t2s.datasets.sampler import ErnieSATSampler
class StandardUpdater(UpdaterBase):
"""An example of over-simplification. Things may not be that simple, but

@ -77,12 +77,7 @@ base = [
"pybind11",
]
server = [
"fastapi",
"uvicorn",
"pattern_singleton",
"websockets"
]
server = ["fastapi", "uvicorn", "pattern_singleton", "websockets"]
requirements = {
"install":

@ -490,18 +490,10 @@ class SymbolicShapeInference:
def _onnx_infer_single_node(self, node):
# skip onnx shape inference for some ops, as they are handled in _infer_*
skip_infer = node.op_type in [
'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', \
# contrib ops
'Attention', 'BiasGelu', \
'EmbedLayerNormalization', \
'FastGelu', 'Gelu', 'LayerNormalization', \
'LongformerAttention', \
'SkipLayerNormalization', \
'PythonOp'
'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', 'Attention',
'BiasGelu', 'EmbedLayerNormalization', 'FastGelu', 'Gelu',
'LayerNormalization', 'LongformerAttention',
'SkipLayerNormalization', 'PythonOp'
]
if not skip_infer:
@ -514,8 +506,8 @@ class SymbolicShapeInference:
if (get_opset(self.out_mp_) >= 9) and node.op_type in ['Unsqueeze']:
initializers = [
self.initializers_[name] for name in node.input
if (name in self.initializers_ and
name not in self.graph_inputs_)
if (name in self.initializers_ and name not in
self.graph_inputs_)
]
# run single node inference with self.known_vi_ shapes
@ -601,8 +593,8 @@ class SymbolicShapeInference:
for o in symbolic_shape_inference.out_mp_.graph.output
]
subgraph_new_symbolic_dims = set([
d for s in subgraph_shapes if s for d in s
if type(d) == str and not d in self.symbolic_dims_
d for s in subgraph_shapes
if s for d in s if type(d) == str and not d in self.symbolic_dims_
])
new_dims = {}
for d in subgraph_new_symbolic_dims:
@ -729,8 +721,9 @@ class SymbolicShapeInference:
for d, s in zip(sympy_shape[-rank:], strides)
]
total_pads = [
max(0, (k - s) if r == 0 else (k - r)) for k, s, r in
zip(effective_kernel_shape, strides, residual)
max(0, (k - s) if r == 0 else (k - r))
for k, s, r in zip(effective_kernel_shape, strides,
residual)
]
except TypeError: # sympy may throw TypeError: cannot determine truth value of Relational
total_pads = [
@ -1276,8 +1269,9 @@ class SymbolicShapeInference:
if pads is not None:
assert len(pads) == 2 * rank
new_sympy_shape = [
d + pad_up + pad_down for d, pad_up, pad_down in
zip(sympy_shape, pads[:rank], pads[rank:])
d + pad_up + pad_down
for d, pad_up, pad_down in zip(sympy_shape, pads[:rank], pads[
rank:])
]
self._update_computed_dims(new_sympy_shape)
else:
@ -1590,8 +1584,8 @@ class SymbolicShapeInference:
scales = list(scales)
new_sympy_shape = [
sympy.simplify(sympy.floor(d * (end - start) * scale))
for d, start, end, scale in
zip(input_sympy_shape, roi_start, roi_end, scales)
for d, start, end, scale in zip(input_sympy_shape,
roi_start, roi_end, scales)
]
self._update_computed_dims(new_sympy_shape)
else:
@ -2204,8 +2198,9 @@ class SymbolicShapeInference:
# topological sort nodes, note there might be dead nodes so we check if all graph outputs are reached to terminate
sorted_nodes = []
sorted_known_vi = set([
i.name for i in list(self.out_mp_.graph.input) +
list(self.out_mp_.graph.initializer)
i.name
for i in list(self.out_mp_.graph.input) + list(
self.out_mp_.graph.initializer)
])
if any([o.name in sorted_known_vi for o in self.out_mp_.graph.output]):
# Loop/Scan will have some graph output in graph inputs, so don't do topological sort

Loading…
Cancel
Save