You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
PaddleSpeech/docs/tutorial/tts/tts_tutorial.ipynb

1542 lines
9.5 MiB

3 years ago
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Story Talker"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 用 PaddleOCR 识别图片中的文字"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAC7QAAAkECAIAAAAe3ECHAAABVmlDQ1BJQ0MgUHJvZmlsZQAAeJxjYGBSSSwoyGFhYGDIzSspCnJ3UoiIjFJgf8jADoS8DGIMConJxQWOAQE+QCUMMBoVfLvGwAiiL+uCzDolNbVJtV7A12Km8NWLr0SbMNWjAK6U1OJkIP0HiFOTC4pKGBgYU4Bs5fKSAhC7A8gWKQI6CsieA2KnQ9gbQOwkCPsIWE1IkDOQfQPIVkjOSASawfgDyNZJQhJPR2JD7QUBbpfM4oKcxEqFAGMCriUDlKRWlIBo5/yCyqLM9IwSBUdgKKUqeOYl6+koGBkYmjMwgMIcovpzIDgsGcXOIMSa7zMw2O7/////boSY134Gho1AnVw7EWIaFgwMgtwMDCd2FiQWJYKFmIGYKS2NgeHTcgYG3kgGBuELQD3RxWnGRmB5Rh4nBgbWe///f1ZjYGCfzMDwd8L//78X/f//dzFQ8x0GhgN5ABUhZe5sUv9jAAAAnGVYSWZNTQAqAAAACAAEARoABQAAAAEAAAA+ARsABQAAAAEAAABGASgAAwAAAAEAAgAAh2kABAAAAAEAAABOAAAAAAAAAEgAAAABAAAASAAAAAEABpAAAAcAAAAEMDIyMZEBAAcAAAAEAQIDAKAAAAcAAAAEMDEwMKACAAQAAAABAAALtKADAAQAAAABAAAJBKQGAAMAAAABAAAAAAAAAADLhdh3AAEAAElEQVR4nOz9XbMkSZIdBp6jZu4RN29mVXVXd0/P9HwBGGAwJARLcEAQBAmCfCAp+yEr+7KPuyuyb/vL+AdWyJflAhAsCGAXXJKzQgADDOa7pz9muqurMvPeCHdTPfugZh4R9yMrs7qyuronTbJuxY0b4W5ubm6mevToUUqrAwLtsNrLW9zeLvZynV7+8OV3Pv74O5988sM/+N73/1//47/6B//ot377t/7QnwP27G//J3/3//J/+j//7/93/9uf/8a14oXZbfjz25fP3UU92U3vT7v3yBksIGQCQBJvqwUASAAgIEIe7l5KIQkjtp+AJLK8tZ68az/7TRKZk01QQPJ1PR6PtdZSSqkVLDCCBUBAhrc389+1d+1d+yKalLuYAAAByGMlSRIwgJCN1+/a59lyxO+toXF6IQHKGwQAfrz8wPhc3H0nG3c7AEQBjDD0f/dbjJ/1jfr/56WN4X+t7U6P/2l8XRIQksDtxgmAcfeGHTvI3dvCFkSYBA9FOxwOZJERtdZpKvu91QozxB7AOGlIogIASJAQRw/HJHkTe1LS27SEf9x2/7bc7asANkGIoBRtdXdE5HUVm2yqsAIrkImf5VoDz3OVpbYR7hNA1NljCPbX7ax7FuOMBM+7r3Fx/aC5XPB8TYizn7j3Or813jmbHqf+5NX2dwQEGEBAjsueX7ZttYmLF1K/Ibz8Sh+WuPOtUJACJXmemhCAaDeKVfEi2ktrL6LdoC30pa2HQqfBzETSJnKCWEySJIogSSuwSjOUCphIyETLngRhyq2PJFkMJGzMozg5ZdskcSwkzax/zAPyvjjLYzQK/Zgxdl0AEe5rNIcCHmAgnHJAgudzWlUAkyCHmtoa3po83FdEQ3jeMrm3NVprJCVFwN2jqTV3d4TMzASySEKfVQZgLubu7oqIHKns2bIcrKDWOk/7WucIHG6Px+PxWf3A3aO15bi2ZW0tonmAH/3w493VftrvaKUpYJzmeZ7nau7ux+O6LEtrjVZ3u920m1mKiCDEGMMtAj/67g9aW2yqT55eF5sO7t7CytXV9dNSdqVWkorW/BixCG7V1vW4LAcwSimlFEOhl/W2LbfL8XYBcLWb9vvZJgPUloVCtFgPx+VwBFDnqe6mMLgkgjSSCMYa7l7nfT+sGYxA3s9GhCQzm6zUOlPIMb8VzDiVUgpLYZ2slILCUgpMMKEAFiigSQyEctKWWmutVouZBRggZEGDDEYzS/d/2j0hCll6PwFANA+tpOc7EQYVqAJ0E3L3ESUC+SSZew7XZGYKuvu6ru4e1tAv0uXH0JoXe/3k2eHl8Y//4M/+7W9/+zt/9MMf/eDm5fPmiwfx3gfTf/Sf/MYHH6Lslm9+65vG3X73td3uq7v9V588+Wqdn4LVOJdpqmWu7/9lWC3zFacd6hVKhRUAkufzDggEhvGpvuZt++h21W/JNM31zYEGOLAijqEFsUpOE+IlSUKIRb74uoQfEauiAVEoM+YzmHPapw/MKurM8gRlB+yBvbADClC3y8NYFIjbh+2JXCdzpYKdvdMe+DByvX1oiLgDLOcGOD08Aj/G2CpvlVaoIZbQynDI3RvRLFb4AX7AeqP1NnzF+h15i3YMP0YcLFaD5wQEICvBamVXpollR7MSE4CxysW2dpkZth32sv+S5BFq0Twi4IFQIQ0d92Aexl0R7fZwOBwOty8ln2udSjVBEZT5si6HY6zNKMjd1wjfrYflcFxvD+12fXl7uBGuf+lXP/iNv/7v/q//D3r289h9nXgfmiAEwC8APdEKCDgijoijdIAa0WJdDYFwyEMrYpFcilpr+Co/eiwRC+iGnMDIbULiyU+kSt05LWql1WpzKVdABYEmhP7kt3//v/u//4N/8T//7q/8+m/+H/+v/7cP/8Jf0vxmeKm8W6C5YgGIiGL9LYK//S/+xT/4f/79P/y9P/z617/+q7/8K//Ff/6f5kYNKNqyHF8uxxtfb2J96e3W221bXxxe/Oj5i4+8HUvh06dPJbHUebraXT3bX71X6jVtt3vylVKvUHe0qXUbxWDc756NtVcoVmtFNZCCBBKMPtlMgEu7j3/w2//T//iP/uHf/63/+Z9/9KPvfePnv/q3/s5v/nt/829+9ed+8en737h675vgNaIAAIEQJvaxfVstcGYuvqINT+ANn315/g9skgsNXKkVXIkGNegIb9CqaAzF4RZyxBJ+iDggDtAirIV9kgQKZLSZtIDxyVfMqtmMUnPVEpEGXdppoNFmswpUwLT5szpdTgfJZbkBEufIxvn1nq+rd8Zh/Ko385f1yBP/EG6fZ18e+vhdQ/3sTzfQ+c1Vt9IRUAANsUCBWCCXxLiVJAURDIdWKCCPdaFc4QwpWq6ZkuCfgC4ewIOwCE2SRbF6Bc3GCTAggk1aBQ9eQWQxs1psgk20vXFavRBVqBjWdXpUpRwVsYbCQZtqncu0R53hEkxGjS/I8uZtN2K4PjTILKcHDSrABFi/g7oC8nEzKN+0uOvLffanj2/83Uf260dbzoezff/ixWgnvKLPkDj7pOFsc0x/Cq37dLYCK7BCt+IBuKEOwlGxUg655FCfUQZEfIz0YSxoDkomQoIBBZgQO2hP7YEZMNjXTh2LhljDb00H+A1wA78RFqKhBCiQKIEINMXafG3RpOaQx9qW4/Fwu7TWzOo07UophJmWiJC3iBCcZElPS3DXurqv4U0AyUKU6r7deonpJUVoXbyUSWHL0toKsrTmy7FdX19LWtvxeDwuyyJqmqZabb/f21RrtXQNrHKe52ma2nr8+JNPopSvfP1rTz/4Soiwsrt66mFlejLtntj+GfZXmK9QK2jCV5jjph24h54AOXQVCBBCKB0GTAHMY/7o8rFHN5H7Tr39yZRWNNFX+Dsz59I3B4CPgTS/c4uLS0//fLYbJLVPhFWxUC2iKRoVFkGt5AotiCV8jXATAcP6HGmSqT/ZfRlH6MGVsi3B4TszX5V8J11qwMafCwDyqnv0mkIlQMBEzLudjJLAYmWGqmCk6cxOJgmUHvCCKb+bO0h/fBiWyOpEVKASFaoiE3F96AI+tYXgHMCgcGf97wPOewjJ+GW7LwHAzv8qhxqwAg6toPePJdQAqHm/NI/19lAUiiPjCL8Jv4GOQluX75Kk1VKqWWWZzCrI8H40oYUWU4BOsgXBiai0K3BPzGaVLOt6lBwmMzMzWTFWmjW7IkxWaLNxAqfgHKjTfA1O4py7RsDIQpC4HZe3gdsFKEKu/HZnY1Uu/hDSxzohXQ5sbtdp0AIT+9tBCGzAAh3W5396+LM/iU++7598/+Pv/O73v/17Lz/50/1Un+2rmbG
"text/plain": [
"<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=2996x2308 at 0x7FE9DD8FC470>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from PIL import Image\n",
"img_path = 'source/frog_prince.jpg'\n",
"im = Image.open(img_path)\n",
"im.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 使用 TTS 合成的音频"
]
},
{
"cell_type": "code",
"execution_count": 49,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/x-wav;base64,UklGRmR+BQBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAAZGF0YUB+BQDj/9//3//h/+D/5P/k/+T/5v/m/+b/5//r/+z/6//r/+r/7P/s/+7/7//v/+//7//v//H/8f/x//D/8P/x//P/9f/1//T/9P/2//f/9//3//j/9//3//n/+P/4//j/+P/6//r/+v/6//v/+v/6//n/+v/6//r/+//6//n/+P/6//3//P/7//v//P/8//v//P/7//r/+P/5//r/+f/6//r/+f/7//v/+v/7//z/+v/7//v//P/7//v/+//8//v/+//8//z//P/9//z//P/9//3//f/8//7//v/+//7/AAD//wAAAAABAAAAAQABAAIAAgAAAAIAAQACAAEAAgABAAEAAgABAAEAAQACAAIAAgABAAEAAQAAAAEAAAABAAAAAAD//wAA//8AAAAA//8AAP//AAD/////AAD///7////+//3//f/8//3//f/9//3//v/9//3//f/9//3//v/8//3//f/9//7//f/9//z//f/8//3//P/9//z//P/8//z//P/7//v/+v/6//r/+v/6//r/+v/6//n/+v/5//n/+f/5//j/+f/5//n/+f/5//n/+f/5//n/+f/5//n/+f/5//n/+f/5//n/+v/5//r/+v/6//r/+v/6//r/+//7//v/+//6//v/+v/7//v/+v/7//v/+v/7//v/+//7//r/+//7//3//f/+///////+/////v/+//7//v///////v///////////wAAAAD//wAAAAD//wAAAAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAP//AAAAAAEAAAABAAAAAQAAAAAAAAAAAAAAAAABAAAAAQABAAEAAAABAAAA//8AAAAA//8AAAAAAQAAAAEA//8BAAAA//8AAAAAAgAAAAEAAAABAAEAAAABAAEAAQABAAAAAQABAAEAAQACAAIAAQABAAEAAQABAAIAAQABAAIAAQABAAAAAQACAAIAAgACAAEAAQABAAEAAgACAAMAAgACAAMAAgACAAIAAgACAAMABAACAAIAAgACAAIAAwADAAIAAwADAAIAAwADAAMAAwADAAIAAgADAAIAAwADAAMAAwAEAAQABAADAAQABAADAAMAAwADAAMAAQACAAMAAQACAAIAAgABAAIAAgABAAIAAQACAAIAAwACAAMAAgADAAMAAwACAAIAAgACAAMAAwADAAMAAwADAAQABAADAAMAAwACAAIAAgACAAIAAQACAAIAAQABAAEAAgABAAEAAgABAAIAAgABAAEAAAAAAAEAAgACAAMAAwADAAMAAgACAAIAAgACAAEAAgAEAAQAAgAEAAMAAwACAAEAAQABAAIAAgADAAIAAwABAAIAAwAEAAMAAwAEAAMAAwADAAQAAwAFAAMAAwACAAIAAQABAAEAAAACAAEAAQABAAEAAAABAAEAAAAAAAAAAAAAAAEAAQAAAP7//v///wEAAAACAAIAAAAAAP///////wAAAQAAAAEAAQAAAAAAAAAAAP/////+//7////+//7////+//3//f/9//7//f/9//7//v/8//3//v/9//z//f/8//z//f/9//7//f/9//3//v/9/////v/+//7//v/+//7//v/9//7//v////7////9/////v/9/////v/+//3//f/9//z//f/8//7//f/9//7//P/8//z//P/8//z//P/8//3//P/8/////v/+//7//v/+//3//P/+/////////wAA//////7//f/+//7//v/+/////v/+//7//v/+//7//v/9//3//v/+//7//v/+//3//f/9//3//v////z//f/9//z/+//+//z/+//7//r//P/8//z//f/8//v/+//7//v//P/7//v/+//7//v//P/8//z/+//7//r/+v/6//z//P/9//3/+//7//r/+//8//z/+//6//z/+v/8//v/+//6//r/+f/6//r/+v/5//n/+//7//v/+v/7//n/+P/4//j/+f/3//f/+P/5//v/+v/7//r/+v/6//r/+v/6//v//P/9//7//f////7//f/9//7//v/+/////v/9//////////7/AQABAAAA//////3//v/+//7//v/+/////v/////////////////+//3//v/+//3//////wAAAQADAAIAAAABAP///v8AAAIABAADAAMAAAAAAP//AAABAAIAAgABAAEAAAABAP/////+//7//////wAAAQAAAP///f/9//7//////wAA/////////v/+//3//f/9//7/AAABAAAAAAD+//3///8BAAEAAQACAAEAAAABAAAAAQAAAP/////+//7///8BAP//AQABAAIAAQABAAAAAAD/////AAD///7/////////AQACAAIAAAD///7//v///wAAAwADAAIAAgABAAAAAAABAAAA///9////AAABAAEAAgABAAAAAQABAAAAAQD///7///8AAAIAAwADAAIAAQABAAAAAAAAAAAAAQABAAQAAQACAP///////wAAAAABAAIABAAEAAQAAwADAAIAAQAFAAUABQADAAEAAQACAAMABgAFAAMAAwADAAQABQAGAAQABAADAAMABQAIAAgACQAIAAUABAACAAIAAwAEAAYABwAIAAkACgAJAAcAAwAAAP//AgAHAAoACgAKAAgABgAFAAQAAwABAAUACwANAAsACAAFAAUABgAHAAYAAgD7//z/AgAGAAcACwAJAP//+//9/wIABwALAAoACwAHAAEAAQAEAAYACAAJAAkACgALAA0ADAALAAsABgD/////BQAOABgAGQAUAAsAAAD6//z/AgAKABAAFAASABEACwADAPz//f/+//v/AgAOABAADgAOAAsABgAFAAcABgAMABAADQAHAAQAAwAFAA4ADgABAAAA/v/1//b///8VACIAKAAkABsACwD1/+z/6P/h/+f/CQAlADwAQwAyAAgA3v/R/+n/FAAmACwAJQALAAAAAgASABYAFAAIAP//AgAIAA4AFgARAAYABwAQAAoAAwALABcAKwAmABAA+//w/+H/8f8TADkAUgBUADsAAwDM/7v/0P/u/wIAJABbAGcARgAVAOj/v/+q/8D/+f9OAHIAXQBHABgA1v+w/8T/2f/z/zEAYwBkADwA9/+3/5v/tv/0/yoAdwCDADEA/f8FAPr/nf92/+L/IwAvAEAAVAAsAPD/4f/2/+//3P/l//z/BgDz/yMAXQBJAAMAzf+d/6P/7v8+AFYAMQAPAPv/2f+k/7X/9v8fAB8ARwBnABsA+v8EAA8AAgDu//b/CQDy/7b/lf/O/x4AQQBSAFMAUADn/5b/v/8IAPX/zv/k/yUAOAAmACsAHAADAAwAVQAtAH7/V/+4//f/NACTALQAhQAhAND/pf+Z//v/SgAIAPD/MABEABEAEQA9AB8A2//N/woAMQAoADcANQAKAB0AIADi/47/bv/M/20A/wDwAGUAuf8A/+v+zf8jAcMBVQEAAF3+9f3i/l8AlAHKAfQA3P9C/2z/4/8BAMD/of8LAKIAFQHVANT/Dv8w/+T/eQC4ANwAtwDO/9n+5P7c/60A0wDxALwAHgC//8f/XwAzAEL/PP+m/6//ZQAxASABuADQ/+f++P7e/90ABQF4AHz/D//Q/2MAswCqABwAg/9D/7L/4wAxAYAAGgDE/0H/Qv/0/2wAjgD7AO0A4/8X/1L/UwDAADcAqf/O/y4AGQBFAOYAbgCI/7j//v+R/2L/owAtAQYAwf84AFwA6v9g/zb/4f80AXYBOwBJ//f+Sf89ANYAjgEgAXf/a/4G/9z/SAAHAU4BwwC//yj/c/9kAOMA3P/N/h7/YgCdAZcBJwHIAOH/Bv9S/sv9x/7fAKAC9QKTAZf/ef5+/vb+kP8OAMUAjwFqAbYA+v/E//r/2f9b/+3+2f9cAUABFQASAA4APv9F/1oATgGHA
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"execution_count": 49,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import IPython.display as dp\n",
"dp.Audio(\"source/ocr.wav\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<font size=4>具体实现代码请参考: https://github.com/DeepSpeech/demos/story_talker/run.sh<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 元宇宙来袭,构造你的虚拟人!"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 使用 PaddleGAN 合成的唇形视频"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/html": [
"\n",
"<video controls width=\"650\" height=\"365\" src=\"output/tts_lips.mp4\">animation</video>\n"
],
"text/plain": [
"<IPython.core.display.HTML object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"from IPython.display import HTML\n",
"html_str = '''\n",
"<video controls width=\"650\" height=\"365\" src=\"{}\">animation</video>\n",
"'''.format(\"output/tts_lips.mp4\")\n",
"dp.display(HTML(html_str))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<font size=4>具体实现代码请参考: https://github.com/DeepSpeech/demos/metaverse/run.sh<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 前言\n",
"<br></br>\n",
"近年来,随着深度学习算法上的进步以及不断丰厚的硬件资源条件,**文本转语音Text-To-Speech, TTS** 技术在智能语音助手、虚拟娱乐等领域得到了广泛的应用。本教程将结合背景知识让用户能够使用PaddlePaddle完成文本转语音任务并结合光学字符识别Optical Character RecognitionOCR、自然语言处理Natural Language ProcessingNLP等技术“听”书、让名人开口说话。\n",
"\n",
"<br></br>\n",
"## 背景知识\n",
"<br></br>\n",
"为了更好地了解文本转语音任务的要素,我们先简要地回顾一下文本转语音的发展历史。如果你对此已经有所了解,或希望能尽快使用代码实现,请跳至第二章。\n",
"\n",
"<br></br>\n",
"### 定义\n",
"<br></br>\n",
"<!----\n",
"Note: \n",
"1.此句抄自 [李沐Dive into Dive Learning](https://zh-v2.d2l.ai/chapter_introduction/index.html)\n",
"2.修改参考A survey on Neural Speech Sysnthesis.\n",
"---> \n",
"<font size=4> 文本转语音又称语音合成Speech Sysnthesis指的是将一段文本按照一定需求转化成对应的音频这种特性决定了的输出数据比输入输入长得多。文本转语音是一项包含了语义学、声学、数字信号处理以及机器学习的等多项学科的交叉任务。虽然辨识低质量音频文件的内容对人类来说很容易但这对计算机来说并非易事。\n",
"</font>\n",
"\n",
"> Note: 这里可以提供一下资料出处嘛? 2021/11/09\n",
"<br></br>\n",
"\n",
"按照不同的应用需求,更广义的语音合成研究包括:\n",
"- <font size=4>语音转换Voice Transformation/Conversion</font>\n",
" - 说话人转换\n",
" - 语音到歌唱转换Speech to Singing\n",
" - 语音情感转换\n",
" - 口音转换\n",
"- <font size=4>歌唱合成 Singing Synthesis</font>\n",
" - <font size=4>歌词到歌唱转换Text/Lyric to Singing</font>\n",
"- <font size=4>可视语音合成Visual Speech Synthesis</font>\n",
"\n",
"<br></br>\n",
"### 发展历史\n",
"<br></br>\n",
"<!--\n",
"以下摘自维基百科 https://en.wikipedia.org/wiki/Speech_synthesis\n",
"--->\n",
"#### 机械式语音合成19世纪及以前\n",
"在第二次工业革命之前语音的合成主要以机械式的音素合成为主。1779年德裔丹麦科学家 Christian Gottlieb Kratzenstein 建造了人类的声道模型使其可以产生五个长元音。1791年 Wolfgang von Kempelen 添加了唇和舌的模型,使其能够发出辅音和元音。\n",
"#### 电子语音合成20世纪30年代\n",
"贝尔实验室于20世纪30年代发明了声码器Vocoder将语音自动分解为音调和共振此项技术由 Homer Dudley 改进为键盘式合成器并于 1939年纽约世界博览会展出。\n",
"#### 电子语音合成\n",
"第一台基于计算机的语音合成系统起源于 20 世纪 50 年代。1961 年IBM 的 John Larry Kelly以及 Louis Gerstman 使用 IBM 704 计算机合成语音,成为贝尔实验室最著名的成就之一。 1975年第一代语音合成系统之一 —— MUSAMUltichannel Speaking Automation问世其由一个独立的硬件和配套的软件组成。1978年发行的第二个版本也可以进行无伴奏演唱。90 年代的主流是采用 MIT 和贝尔实验室的系统,并结合自然语言处理模型。\n",
"> Note: 这里插一张timeline图\n",
"#### 当前的主流方法\n",
"\n",
"- <font size=4>基于统计参数的语音合成</font>\n",
" - <font size=4>隐马尔可夫模型Hidden Markov Model,HMM</font>\n",
" - <font size=4>深度学习网络Deep Neural NetworkDNN</font>\n",
"- <font size=4>波形拼接语音合成</font>\n",
" \n",
"- <font size=4>混合方法</font>\n",
" - <font size=4>参数轨迹指导的波形拼接</font>\n",
"- <font size=4>端到端神经网络语音合成</font>\n",
" - <font size=4>声学模型 + 声码器</font>\n",
" - <font size=4>“完全”端到端方法</font>\n",
"\n",
"<br></br>\n",
"## 基于深度学习的语音合成技术\n",
"<br></br>\n",
"### 语音合成基本知识\n",
"<br></br>\n",
"![信号处理流水线](source/signal_pipeline.png)\n",
"<br></br>\n",
"<font size=4>语音合成流水线包含 <font color=\"#ff0000\">**文本前端Text Frontend**</font> 、<font color=\"#ff0000\">**声学模型Acoustic Model**</font> 和 <font color=\"#ff0000\">**声码器Vocoder**</font> 三个主要模块:</font>\n",
"- <font size=4>通过文本前端模块将原始文本转换为字符/音素。</font>\n",
"- <font size=4>通过声学模型将字符/音素转换为声学特征如线性频谱图、mel 频谱图、LPC 特征等。</font>\n",
"- <font size=4>通过声码器将声学特征转换为波形。</font>\n",
"<br></br>\n",
"<img style=\"float: center;\" src=\"source/tts_pipeline.png\" width=\"85%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 实践\n",
"<br></br>\n",
"<font size=4>环境安装请参考: https://github.com/PaddlePaddle/DeepSpeech/blob/develop/docs/source/install.md</font>\n",
"\n",
"<br></br>\n",
"\n",
"<font size=4>使用 **PaddleSpeech** 提供的预训练模型合成一句中文。</font>\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step 0 准备"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 获取预训练模型"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true,
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"mkdir: cannot create directory 'download': File exists\n",
"--2021-11-06 13:50:44-- https://paddlespeech.bj.bcebos.com/Parakeet/pwg_baker_ckpt_0.4.zip\n",
"Connecting to 172.19.56.199:3128... connected.\n",
"Proxy request sent, awaiting response... 200 OK\n",
"Length: 15774206 (15M) [application/zip]\n",
"Saving to: 'download/pwg_baker_ckpt_0.4.zip'\n",
"\n",
"pwg_baker_ckpt_0.4. 100%[===================>] 15.04M 454KB/s in 42s \n",
"\n",
"2021-11-06 13:51:30 (364 KB/s) - 'download/pwg_baker_ckpt_0.4.zip' saved [15774206/15774206]\n",
"\n",
"Archive: download/pwg_baker_ckpt_0.4.zip\n",
" creating: download/pwg_baker_ckpt_0.4/\n",
" inflating: download/pwg_baker_ckpt_0.4/pwg_default.yaml \n",
" inflating: download/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz \n",
" inflating: download/pwg_baker_ckpt_0.4/pwg_stats.npy \n",
"--2021-11-06 13:51:31-- https://paddlespeech.bj.bcebos.com/Parakeet/fastspeech2_nosil_baker_ckpt_0.4.zip\n",
"Connecting to 172.19.56.199:3128... connected.\n",
"Proxy request sent, awaiting response... 200 OK\n",
"Length: 488622795 (466M) [application/octet-stream]\n",
"Saving to: 'download/fastspeech2_nosil_baker_ckpt_0.4.zip'\n",
"\n",
"fastspeech2_nosil_b 100%[===================>] 465.99M 1.82MB/s in 4m 33s \n",
"\n",
"2021-11-06 13:56:06 (1.71 MB/s) - 'download/fastspeech2_nosil_baker_ckpt_0.4.zip' saved [488622795/488622795]\n",
"\n",
"Archive: download/fastspeech2_nosil_baker_ckpt_0.4.zip\n",
" creating: download/fastspeech2_nosil_baker_ckpt_0.4/\n",
" inflating: download/fastspeech2_nosil_baker_ckpt_0.4/phone_id_map.txt \n",
" inflating: download/fastspeech2_nosil_baker_ckpt_0.4/speech_stats.npy \n",
" inflating: download/fastspeech2_nosil_baker_ckpt_0.4/default.yaml \n",
" inflating: download/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz \n"
]
}
],
"source": [
"!mkdir download\n",
"!wget -P download https://paddlespeech.bj.bcebos.com/Parakeet/pwg_baker_ckpt_0.4.zip\n",
"!unzip -d download download/pwg_baker_ckpt_0.4.zip\n",
"!wget -P download https://paddlespeech.bj.bcebos.com/Parakeet/fastspeech2_nosil_baker_ckpt_0.4.zip\n",
"!unzip -d download download/fastspeech2_nosil_baker_ckpt_0.4.zip"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 查看预训练模型的结构"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[01;34mdownload/pwg_baker_ckpt_0.4\u001b[00m\n",
"|-- pwg_default.yaml\n",
"|-- pwg_snapshot_iter_400000.pdz\n",
"`-- pwg_stats.npy\n",
"\n",
"0 directories, 3 files\n",
"\u001b[01;34mdownload/fastspeech2_nosil_baker_ckpt_0.4\u001b[00m\n",
"|-- default.yaml\n",
"|-- phone_id_map.txt\n",
"|-- snapshot_iter_76000.pdz\n",
"`-- speech_stats.npy\n",
"\n",
"0 directories, 4 files\n"
]
}
],
"source": [
"!tree download/pwg_baker_ckpt_0.4\n",
"!tree download/fastspeech2_nosil_baker_ckpt_0.4"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 导入 Python 包"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"import logging\n",
"import sys\n",
"import warnings\n",
"warnings.filterwarnings('ignore')\n",
"# PaddleSpeech 项目根目录放到 python 路径中\n",
"sys.path.insert(0,\"../../../\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/yuantian01/yt_py37/lib/python3.7/site-packages/scipy/linalg/__init__.py:212: DeprecationWarning: The module numpy.dual is deprecated. Instead of using dual, use the functions directly from numpy or scipy.\n",
" from numpy.dual import register_func\n",
"/home/yuantian01/yt_py37/lib/python3.7/site-packages/scipy/special/orthogonal.py:81: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n",
"/home/yuantian01/yt_py37/lib/python3.7/site-packages/librosa/core/constantq.py:1059: DeprecationWarning: `np.complex` is a deprecated alias for the builtin `complex`. To silence this warning, use `complex` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.complex128` here.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" dtype=np.complex,\n",
"/home/yuantian01/yt_py37/lib/python3.7/site-packages/scipy/io/matlab/mio5.py:98: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
"Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
" from .mio5_utils import VarReader5\n"
]
}
],
"source": [
"import argparse\n",
"import os\n",
"from pathlib import Path\n",
"import IPython.display as dp\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import paddle\n",
"import soundfile as sf\n",
"import yaml\n",
"from paddlespeech.t2s.frontend.zh_frontend import Frontend\n",
"from paddlespeech.t2s.models.fastspeech2 import FastSpeech2\n",
"from paddlespeech.t2s.models.fastspeech2 import FastSpeech2Inference\n",
"from paddlespeech.t2s.models.parallel_wavegan import PWGGenerator\n",
"from paddlespeech.t2s.models.parallel_wavegan import PWGInference\n",
"from paddlespeech.t2s.modules.normalizer import ZScore\n",
"from yacs.config import CfgNode"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 设置预训练模型的路径"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"========Config========\n",
"batch_size: 64\n",
"f0max: 400\n",
"f0min: 80\n",
"fmax: 7600\n",
"fmin: 80\n",
"fs: 24000\n",
"max_epoch: 1000\n",
"model:\n",
" adim: 384\n",
" aheads: 2\n",
" decoder_normalize_before: True\n",
" dlayers: 4\n",
" dunits: 1536\n",
" duration_predictor_chans: 256\n",
" duration_predictor_kernel_size: 3\n",
" duration_predictor_layers: 2\n",
" elayers: 4\n",
" encoder_normalize_before: True\n",
" energy_embed_dropout: 0.0\n",
" energy_embed_kernel_size: 1\n",
" energy_predictor_chans: 256\n",
" energy_predictor_dropout: 0.5\n",
" energy_predictor_kernel_size: 3\n",
" energy_predictor_layers: 2\n",
" eunits: 1536\n",
" init_dec_alpha: 1.0\n",
" init_enc_alpha: 1.0\n",
" init_type: xavier_uniform\n",
" pitch_embed_dropout: 0.0\n",
" pitch_embed_kernel_size: 1\n",
" pitch_predictor_chans: 256\n",
" pitch_predictor_dropout: 0.5\n",
" pitch_predictor_kernel_size: 5\n",
" pitch_predictor_layers: 5\n",
" positionwise_conv_kernel_size: 3\n",
" positionwise_layer_type: conv1d\n",
" postnet_chans: 256\n",
" postnet_filts: 5\n",
" postnet_layers: 5\n",
" reduction_factor: 1\n",
" stop_gradient_from_energy_predictor: False\n",
" stop_gradient_from_pitch_predictor: True\n",
" transformer_dec_attn_dropout_rate: 0.2\n",
" transformer_dec_dropout_rate: 0.2\n",
" transformer_dec_positional_dropout_rate: 0.2\n",
" transformer_enc_attn_dropout_rate: 0.2\n",
" transformer_enc_dropout_rate: 0.2\n",
" transformer_enc_positional_dropout_rate: 0.2\n",
" use_masking: True\n",
" use_scaled_pos_enc: True\n",
"n_fft: 2048\n",
"n_mels: 80\n",
"n_shift: 300\n",
"num_snapshots: 5\n",
"num_workers: 4\n",
"optimizer:\n",
" learning_rate: 0.001\n",
" optim: adam\n",
"seed: 10086\n",
"updater:\n",
" use_masking: True\n",
"win_length: 1200\n",
"window: hann\n",
"---------------------\n",
"allow_cache: True\n",
"batch_max_steps: 25500\n",
"batch_size: 6\n",
"discriminator_grad_norm: 1\n",
"discriminator_optimizer_params:\n",
" epsilon: 1e-06\n",
" weight_decay: 0.0\n",
"discriminator_params:\n",
" bias: True\n",
" conv_channels: 64\n",
" in_channels: 1\n",
" kernel_size: 3\n",
" layers: 10\n",
" nonlinear_activation: LeakyReLU\n",
" nonlinear_activation_params:\n",
" negative_slope: 0.2\n",
" out_channels: 1\n",
" use_weight_norm: True\n",
"discriminator_scheduler_params:\n",
" gamma: 0.5\n",
" learning_rate: 5e-05\n",
" step_size: 200000\n",
"discriminator_train_start_steps: 100000\n",
"eval_interval_steps: 1000\n",
"fmax: 7600\n",
"fmin: 80\n",
"fs: 24000\n",
"generator_grad_norm: 10\n",
"generator_optimizer_params:\n",
" epsilon: 1e-06\n",
" weight_decay: 0.0\n",
"generator_params:\n",
" aux_channels: 80\n",
" aux_context_window: 2\n",
" bias: True\n",
" dropout: 0.0\n",
" freq_axis_kernel_size: 1\n",
" gate_channels: 128\n",
" in_channels: 1\n",
" interpolate_mode: nearest\n",
" kernel_size: 3\n",
" layers: 30\n",
" nonlinear_activation: None\n",
" nonlinear_activation_params:\n",
" \n",
" out_channels: 1\n",
" residual_channels: 64\n",
" skip_channels: 64\n",
" stacks: 3\n",
" upsample_scales: [4, 5, 3, 5]\n",
" use_causal_conv: False\n",
" use_weight_norm: True\n",
"generator_scheduler_params:\n",
" gamma: 0.5\n",
" learning_rate: 0.0001\n",
" step_size: 200000\n",
"lambda_adv: 4.0\n",
"n_fft: 2048\n",
"n_mels: 80\n",
"n_shift: 300\n",
"num_save_intermediate_results: 4\n",
"num_snapshots: 10\n",
"num_workers: 4\n",
"pin_memory: True\n",
"remove_short_samples: True\n",
"save_interval_steps: 5000\n",
"seed: 42\n",
"stft_loss_params:\n",
" fft_sizes: [1024, 2048, 512]\n",
" hop_sizes: [120, 240, 50]\n",
" win_lengths: [600, 1200, 240]\n",
" window: hann\n",
"top_db: 60\n",
"train_max_steps: 400000\n",
"trim_frame_length: 2048\n",
"trim_hop_length: 512\n",
"trim_silence: False\n",
"win_length: 1200\n",
"window: hann\n"
]
}
],
"source": [
"fastspeech2_config = \"download/fastspeech2_nosil_baker_ckpt_0.4/default.yaml\"\n",
"fastspeech2_checkpoint = \"download/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz\"\n",
"fastspeech2_stat = \"download/fastspeech2_nosil_baker_ckpt_0.4/speech_stats.npy\"\n",
"pwg_config = \"download/pwg_baker_ckpt_0.4/pwg_default.yaml\"\n",
"pwg_checkpoint = \"download/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz\"\n",
"pwg_stat = \"download/pwg_baker_ckpt_0.4/pwg_stats.npy\"\n",
"phones_dict = \"download/fastspeech2_nosil_baker_ckpt_0.4/phone_id_map.txt\"\n",
"# 读取 conf 文件并结构化\n",
"with open(fastspeech2_config) as f:\n",
" fastspeech2_config = CfgNode(yaml.safe_load(f))\n",
"with open(pwg_config) as f:\n",
" pwg_config = CfgNode(yaml.safe_load(f))\n",
"print(\"========Config========\")\n",
"print(fastspeech2_config)\n",
"print(\"---------------------\")\n",
"print(pwg_config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step1 文本前端\n",
"<br></br>\n",
"\n",
"<font size=4>一个文本前端模块主要包含</font>:\n",
"- <font size=4>分段Text Segmentation</font>\n",
"\n",
"- <font size=4>文本正则化Text Normalization, TN</font>\n",
"\n",
"- <font size=4>分词Word Segmentation, 主要是在中文中)</font>\n",
"\n",
"- <font size=4>词性标注Part-of-Speech, PoS</font>\n",
"- <font size=4>韵律预测Prosody</font>\n",
"- <font size=4>字音转换Grapheme-to-PhonemeG2P</font>\n",
"<br></br>\n",
"<font size=2>Grapheme: **语言**书写系统的最小有意义单位; Phoneme: 区分单词的最小**语音**单位)</font>\n",
" - <font size=4>多音字Polyphone</font>\n",
" - <font size=4>变调Tone Sandhi</font>\n",
" - <font size=4>“一”、“不”变调</font>\n",
" - <font size=4>三声变调</font>\n",
" - <font size=4>轻声变调</font>\n",
" - <font size=4>儿化音</font>\n",
" - <font size=4>方言</font>\n",
"- ...\n",
"<br></br>\n",
"\n",
"<font size=4>(输入给声学模型之前,还需要把音素序列转换为 id</font>\n",
"\n",
"<br></br>\n",
"<font size=4>其中最重要的模块是<font color=\"#ff0000\"> 文本正则化 </font>模块和<font color=\"#ff0000\"> 字音转换TTS 中更常用 G2P代指 </font>模块。</font>\n",
"\n",
"<br></br>\n",
"\n",
"<font size=4>各模块输出示例:</font>\n",
"```text\n",
"• Text: 全国一共有112所211高校\n",
"• Text Normalization: 全国一共有一百一十二所二一一高校\n",
"• Word Segmentation: 全国/一共/有/一百一十二/所/二一一/高校/\n",
"• G2P注意此句中“一”的读音:\n",
" quan2 guo2 yi2 gong4 you3 yi4 bai3 yi1 shi2 er4 suo3 er4 yao1 yao1 gao1 xiao4\n",
" (可以进一步把声母和韵母分开)\n",
" q uan2 g uo2 y i2 g ong4 y ou3 y i4 b ai3 y i1 sh i2 er4 s uo3 er4 y ao1 y ao1 g ao1 x iao4\n",
" (把音调和声韵母分开)\n",
" q uan g uo y i g ong y ou y i b ai y i sh i er s uo er y ao y ao g ao x iao\n",
" 0 2 0 2 0 2 0 4 0 3 ...\n",
"• Prosody (prosodic words #1, prosodic phrases #2, intonation phrases #3, sentence #4):\n",
" 全国#2一共有#2一百#1一十二所#2二一一#1高校#4\n",
" (分词的结果一般是固定的,但是不同人习惯不同,可能有不同的韵律)\n",
"```\n",
"\n",
"<br></br>\n",
"<font size=4>文本前端模块的设计需要融入很多专业的或经验性的知识,人类在读文本的时候可以自然而然地读出正确的发音,但是这些计算机都是不知道的!</font>\n",
"\n",
"<br></br>\n",
"<font size=4>分词:</font>\n",
"```text\n",
"我也想过过过儿过过的生活\n",
"我也想/过过/过儿/过过的/生活\n",
"\n",
"货拉拉拉不拉拉布拉多\n",
"货拉拉/拉不拉/拉布拉多\n",
"\n",
"南京市长江大桥\n",
"南京市长/江大桥\n",
"南京市/长江大桥\n",
"```\n",
"<font size=4>变调和儿化音:</font>\n",
"```\n",
"你要不要和我们一起出去玩?\n",
"你要不2声要和我们一4声起出去玩\n",
"\n",
"不好,我要一个人出去。\n",
"不4声我要一2声个人出去。\n",
"\n",
"(以下每个词的所有字都是三声的,请你读一读,体会一下在读的时候,是否每个字都被读成了三声?)\n",
"纸老虎、虎骨酒、展览馆、岂有此理、手表厂有五种好产品\n",
"```\n",
"<font size=4>多音字(通常需要先正确分词):</font>\n",
"```text\n",
"人要行,干一行行一行,一行行行行行;\n",
"人要是不行,干一行不行一行,一行不行行行不行。\n",
"\n",
"佟大为妻子产下一女\n",
"\n",
"海水朝朝朝朝朝朝朝落\n",
"浮云长长长长长长长消\n",
"```\n",
"<br></br>\n",
"\n",
"<font size=4>PaddleSpeech TTS 文本前端解决方案:</font>\n",
"- <font size=4>文本正则: 规则</font>\n",
"- <font size=4>G2P:</font>\n",
" - <font size=4>多音字模块: pypinyin/g2pM</font>\n",
" - <font size=4>变调模块: 用分词 + 规则</font>\n",
"\n",
"<br></br>\n",
"<font size=4>相关 examples:\n",
" \n",
"https://github.com/PaddlePaddle/DeepSpeech/tree/develop/examples/other/tn\n",
"https://github.com/PaddlePaddle/DeepSpeech/tree/develop/examples/other/g2p</font>\n",
"\n",
"<br></br>\n",
"<font size=4>(未来计划推出基于深度学习的文本前端模块)</font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 构造文本前端对象"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Frontend done!\n"
]
}
],
"source": [
"# 传入 phones_dict 会把相应的 phones 转换成 phone_ids\n",
"frontend = Frontend(phone_vocab_path=phones_dict)\n",
"print(\"Frontend done!\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 调用文本前端"
]
},
{
"cell_type": "code",
"execution_count": 119,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"----------------------------\n",
"text norm results:\n",
"['你好,', '欢迎使用百度飞桨框架进行深度学习研究!']\n",
"----------------------------\n",
"g2p results:\n",
"[['n', 'i1', 'h', 'ao1', 'sp', 'h', 'uan1', 'ing1', 'sh', 'iii1', 'iong1', 'b', 'ai1', 'd', 'u1', 'f', 'ei1', 'j', 'iang1', 'k', 'uang1', 'j', 'ia1', 'j', 'in1', 'x', 'ing1', 'sh', 'en1', 'd', 'u1', 'x', 've1', 'x', 'i1', 'ian1', 'j', 'iou1', 'sp']]\n",
"----------------------------\n",
"phone_ids:\n",
"Tensor(shape=[39], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n",
" [155, 72 , 71 , 27 , 179, 71 , 199, 125, 177, 113, 135, 37 , 7 , 40 ,\n",
" 183, 69 , 46 , 151, 87 , 152, 204, 151, 77 , 151, 120, 260, 125, 177,\n",
" 51 , 40 , 183, 260, 250, 260, 72 , 82 , 151, 140, 179])\n"
]
}
],
"source": [
"input = \"你好,欢迎使用百度飞桨框架进行深度学习研究!\"\n",
"# text norm 时会进行分句merge_sentences 表示把分句的结果合成一条\n",
"# 可以把 merge_sentences 设置为 False, 多个子句并行调用声学模型和声码器提升合成速度\n",
"input_ids = frontend.get_input_ids(input, merge_sentences=True, print_info=True,robot=True)\n",
"# 由于 merge_sentences=True, input_ids[\"phone_ids\"][0] 即表示整句的 phone_ids\n",
"phone_ids = input_ids[\"phone_ids\"][0]\n",
"print(\"phone_ids:\")\n",
"print(phone_ids)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step1+ 文本前端深度学习化\n",
"<br></br>\n",
"<img style=\"float: center;\" src=\"source/text_frontend_struct.png\" width=\"100%\"/>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step2 声学模型\n",
"<br></br>\n",
"<font size=4>声学模型将字符/音素转换为声学特征如线性频谱图、mel 频谱图、LPC 特征等,声学特征以 “帧” 为单位,一般一帧是 10ms 左右,一个音素一般对应 5~20 帧左右, 声学模型需要解决的是 <font color=\"#ff0000\">“不等长序列间的映射问题”</font>,“不等长”是指,同一个人发不同音素的持续时间不同,同一个人在不同时刻说同一句话的语速可能不同,对应各个音素的持续时间不同,不同人说话的特色不同,对应各个音素的持续时间不同。这是一个困难的“一对多”问题。</font>\n",
"```\n",
"# 卡尔普陪外孙玩滑梯\n",
"000001|baker_corpus|sil 20 k 12 a2 4 er2 10 p 12 u3 12 p 9 ei2 9 uai4 15 s 11 uen1 12 uan2 14 h 10 ua2 11 t 15 i1 16 sil 20\n",
"```\n",
"\n",
"<font size=4>声学模型主要分为自回归模型和非自回归模型,其中自回归模型在 `t` 时刻的预测需要依赖 `t-1` 时刻的输出作为输入,预测时间长,但是音质相对较好,非自回归模型不存在预测上的依赖关系,预测时间快,音质相对较差。</font>\n",
"\n",
"<br></br>\n",
"<font size=4>主流声学模型发展的脉络:</font>\n",
"- <font size=4>自回归模型:</font>\n",
" - <font size=4>Tacotron</font>\n",
" - <font size=4>Tacotron2</font>\n",
" - <font size=4>Transformer TTS</font>\n",
"- <font size=4>非自回归模型:</font>\n",
" - <font size=4>FastSpeech</font>\n",
" - <font size=4>SpeedySpeech</font>\n",
" - <font size=4>FastPitch</font>\n",
" - <font size=4>FastSpeech2</font>\n",
" - ...\n",
" \n",
"<br></br>\n",
"<font size=4>在本教程中,我们使用 `FastSpeech2` 作为声学模型。<font>\n",
"![FastSpeech2](source/fastspeech2.png)\n",
"<font size=4>PaddleSpeech TTS 实现的 FastSpeech2 与论文不同的地方在于,我们使用的的是 phone 级别的 `pitch` 和 `energy`(与 FastPitch 类似)。<font>\n",
"![FastPitch](source/fastpitch.png)\n",
"<font size=4>更多关于声学模型的发展及改进的介绍: https://paddlespeech.readthedocs.io/en/latest/tts/models_introduction.html<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 初始化声学模型 FastSpeech2"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"vocab_size: 268\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/yuantian01/yt_py37/lib/python3.7/site-packages/paddle/framework/io.py:415: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
" if isinstance(obj, collections.Iterable) and not isinstance(obj, (\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"FastSpeech2 done!\n"
]
}
],
"source": [
"with open(phones_dict, \"r\") as f:\n",
" phn_id = [line.strip().split() for line in f.readlines()]\n",
"vocab_size = len(phn_id)\n",
"print(\"vocab_size:\", vocab_size)\n",
"odim = fastspeech2_config.n_mels\n",
"model = FastSpeech2(\n",
" idim=vocab_size, odim=odim, **fastspeech2_config[\"model\"])\n",
"# 预训练好的参数赋值给模型\n",
"model.set_state_dict(paddle.load(fastspeech2_checkpoint)[\"main_params\"])\n",
"# 推理阶段不启用 batch norm 和 dropout\n",
"model.eval()\n",
"# 读取数据预处理阶段数据集的均值和标准差\n",
"stat = np.load(fastspeech2_stat)\n",
"mu, std = stat\n",
"mu = paddle.to_tensor(mu)\n",
"std = paddle.to_tensor(std)\n",
"fastspeech2_normalizer = ZScore(mu, std)\n",
"# 构造包含 normalize 的新模型\n",
"fastspeech2_inference = FastSpeech2Inference(fastspeech2_normalizer, model)\n",
"fastspeech2_inference.eval()\n",
"print(\"FastSpeech2 done!\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 调用声学模型"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shepe of mel (n_frames x n_mels):\n",
"[347, 80]\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAk8AAAGoCAYAAABfbgHJAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9e9BtXZcXBv3GnGs/5/3e7+vur+luOt1NE64SKhiicolVVgQhFUKstKG0Q+IFCZEYy1hRq4QkiEii1VqWAcuUsW1FiAZpiRFSYkgFpaKWJDQkQC5FGS6dbuhu+v69t/M8e805/GNc51xr7Wc/5/Ke89B71NlnP3vvteaa1zF+4zLHJGbGjW50oxvd6EY3utGNrqPyritwoxvd6EY3utGNbvSc6AaebnSjG93oRje60Y2eQDfwdKMb3ehGN7rRjW70BLqBpxvd6EY3utGNbnSjJ9ANPN3oRje60Y1udKMbPYFu4OlGN7rRjW50oxvd6Al0A083utFfZ0REv4KIvv9d1+NGN7rRjf56pRt4utGN3iMior9MRA9E9PXT9/8WETER/aw38IxvI6J/m4i+QkQ/QkT/DyL62a9b7iPPZCL6eW/zGTe60Y1u9HnRDTzd6EbvH/0lAH+/fSCi/yiAD99EwQpgfh+A/y6ArwHwswH8swDamyj/Neq1vOHy6pss70Y3utGNMt3A041u9P7RPw/gv5I+/wYI4HEiohdE9D8jov+QiH6IiP45IvrCFWX/rQD+EjP/MRb6iJn/RWb+D7Xc30FEf5CI/gARfUREf5qIfnF67jcT0b9IRD9MRH+JiP5b6bdKRP8EEf0FvfdPEdG3EtG/rpf8GSL6mIj+PnMtEtFvIaIfBPB7tE2/i4j+qr5+FxG9SOX/94joB/S3fyhbs4jof09E/ysi+iNE9AmAX0lEf7da7L5CRN9HRL8jlfWz9P7fqL/9OBH914nolxLRnyWinyCi/+V1w3WjG93opxrdwNONbvT+0Z8A8NVE9AvVgvLrAfwfpmu+A8B/BAKGfh6AbwHw268o+08D+JuI6J8hol9JRF/auebbAPyfAfw0AP8CgP8rEZ2IqAD4lwH8GX3erwLwjxHR36n3/XcgFrNfC+CrAfyDAD5l5r9df//FzPwlZv4D+vlv0Gf8jQB+M4B/EsDfpm36xQB+GYDfBgBE9Gu0/F+t7f0VO/X+BwD8jwB8FYD/N4BPICD0ywD+bgD/CBH956Z7fjmAnw/g7wPwu7QOvxrA3wzg24noP73znBvd6EY/xekGnm50o/eTzPr0dwD49wH8FfuBiAgCNv7bzPxjzPwRgP8xBGRdJGb+ixDg8S0AvhvAj6jVJoOoP8XMf5CZzwD+5wA+gICaXwrgG5j5dzLzg5b1v0nP/YcA/DZm/vNq1fozzPyjF6rTAfwPmPmemT8D8F8E8DuZ+a8x8w8D+B8C+C/rtd8O4Pcw87/LzJ8C+B075f0hZv7/MHNn5pfM/MeZ+c/p5z8L4PcDmMHQP6XX/qsQsPX79fl/BcD/C8B/7GKH3uhGN/opSW80zuBGN7rRG6N/HsC/DolJ+n3Tb98AiYH6U4KjAAAE4Ko4H2b+ExAwAiL6pQD+AMTi8o/rJd+Xru26c++bATCAbyain0jFVQjIAIBvBfAXrqmD0g8z88v0+ZsBfG/6/L36nf32Pem378OWhu+I6JdDLHS/CMAdgBcQi1qmH0p/f7bzec8yd6Mb3einON0sTze60XtIzPy9kMDxXwvg/zL9/CMQwf43M/OX9fU1zPxkQc/Mf1LL/0Xp62+1P9RV9zMA/FUIOPlL6ZlfZuavYuZfq5d/H4Cf+5THT5//KsSFZ/Qz9TsA+AGtx6aOF8r7FwD8YQDfysxfA+Cfg4DMG93oRjd6LbqBpxvd6P2l3wTgP8PMn+QvmblD3GX/DBH9dAAgom9JsUeHRET/KSL6r6X7/iYAfw8kzsroP0FEv053wP1jAO71938TwEca5P0FDRD/RWq9AoDvAvBPEdHPJ6G/hYi+Tn/7IQA/55Hq/X4Av42IvkFTNfx2RKzXdwP4jRoH9iGA//5jbYXEPv0YM78kol8GiYm60Y1udKPXpht4utGN3lNi5r/AzN9z8PNvAfAfAPgTRPQVAP8agF9wRbE/AQFLf46IPgbwrwD4lwD8T9M1fwgSQP3jkJijX8fMZ2ZuAP6z0B17EAvYd0FSHgASH/XdAP5VAF8B8L8FYDsAfweA36u72L79oG7/NMQ192cB/DlIcPs/DQDM/H8H8L8A8P+0dus99xfa+t8A8DuJ6CMIEPvuC9fe6EY3utHVRMyzpftGN7rRT1XS7fw/j5n/S++6LpeIiH4hgH8HwAtmXt91fW50oxv91KKb5elGN7rRsyAi+ns1F9TXAvifAPiXb8DpRje60bugG3i60Y1u9FzoHwbw1yA7+hqAf+TdVudGN7rRT1W6ue1udKMb3ehGN7rRjZ5AN8vTjW50oxvd6EY3utET6FkkybyjF/wBfQlEBJQCLBW8FPRTQT8BvQIoABMkiwupNc0+A5IBhiEXmbHNrp3v2yO7L9/rv+kX+jv5s+Lv+Tv/vjOoA+gM6gz0DnQGOF7ZOuh9UAp4qeh3BK4ELgCXqBen+s3N4twve9/tZcLh6e/cZ3M/z9cj+py6/lkALB21dhRiFK0kTbd2JjATOhP6WkArgRpALYqe2+uPfKQ93i89TRmePndO48Y+ZlwJ7UVBu5vaPj8zj3kfy0Z+JgPloYNaB8A4f/UJ/S6Nabpv09bc7DTPAMS6yHWi9PBOUi/Eb8P1+dqpYfWuYykNlRgM4H5d0NcCNPLxGeZ/j36gVec76/vQCEK/K+gLoZ8Arvs1GNp52CFxLbHMG+vHdgfgg46lNiylg5nw0Cp6K8CZ4v5iBaTnAtFRnH62cbZ2aru5An0B+MRY7hqW2lHQwSCce0VrBdwI6DT1ua4dYqAAy6nhVBoWknXTmdBBaL1I3dciY5qPec7zbC66AlwZtEgH+dCTsR8CNwKtif9NKretR+rWdh54zvoFQl8AFI72eIVSH6ZxsrXElYHKKEVf2tHMwht6L1G/vLZy84v0e1k6ltqxUIPllmUmNJb+a1oW2lSnS5nB0rhv5uIkI0rmW5D5x3eMeup4UVeQjufaC9ZWwavOh0vl5jpM/eiX577teawY1GIdfvbwE3hon33uedD+zl/5Rf7RH3v9c8n/1J+9/6PM/GveQJWuomcBnj7AF/G3vfi7QMsC+vBD4Bu+Fudv+CI++/o7fPJNFfdfBtoXGO0Fo98xeBFGg8rAopKhE3AuKC+LL7K+APyiA5VBpw5auiyqvAKZwB3o5yoM1UBUGa+hVRZdWYFyJpR7+ZtWoN4D9UEERjnL3/XMKGfG8rJj+bShfraifHYGffIS9PIBfP8AnB/AD2dwi4lFd3egDz4AffELaN/wNfj0W76A+6+qOH8JOH9RhA1Ihe4knIm1+hXgBeDCDpr6It+jsAvsAYD5gpP2c2XwAvS7Dtz1YKgMYT5db+4AsTK2lbB8SuhVxou+7h5f/ppP8MW7M14sK2oSCKxC4bPzCffrgk/vT/j0Rz/E6ccWnH6ScPcVOCPnonWP4UC/izbxzOxZxoWaMLTyAJQzUHx82D8vLzvKmWXsWke5byifPmD9mi/gJ3/OB/jobyRwjTJ5AXplBy3UgLISygNw+hio91Y2UBp739YHxhe//1OUn/wUtDb81V/zzfjkZzLWDztQgfpJccZrAs/GJ4C4AKFyr8KTgH7HAhJ0XFnXBev8rZ8W1JdyHxPAJ+tPFoG3MDitB1LhzpXx5W/6Cr7xqz7Cl0736Ez4Cz/29fjKj3wR9cdPOH1E0b9nXQP3jHoPnD7tuPuJFcunK8pnK8r9GWgdKKIY8Knis2/+Ij79hgWffSPh/ssxJzMQpSZzysFfFnY2f9M41Hvg7icZ9UEu+8rPAfjnfopv+NqP8I0ffoSX7YS/8pNfg49+8guoP/gCYOk/Np6S0WsHSOc5NV3zvvaB5SX0nbF8xrj/MuHl1xE++xkrvuFbfxw//Ysf48PlAQ9twQ9+8lX4sa98iPP
"text/plain": [
"<Figure size 648x432 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"with paddle.no_grad():\n",
" mel = fastspeech2_inference(phone_ids)\n",
"print(\"shepe of mel (n_frames x n_mels):\")\n",
"print(mel.shape)\n",
"# 绘制声学模型输出的 mel 频谱\n",
"fig, ax = plt.subplots(figsize=(9, 6))\n",
"im = ax.imshow(mel.T, aspect='auto',origin='lower')\n",
"fig.colorbar(im, ax=ax)\n",
"plt.title('Mel Spectrogram')\n",
"plt.xlabel('Time')\n",
"plt.ylabel('Frequency')\n",
"plt.tight_layout()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step3 声码器\n",
"<br></br>\n",
"<font size=4>声码器将声学特征转换为波形。声码器需要解决的是 <font color=\"#ff0000\">“信息缺失的补全问题”</font>。信息缺失是指,在音频波形转换为频谱图的时候,存在**相位信息**的缺失,在频谱图转换为 mel 频谱图的时候,存在**频域压缩**导致的信息缺失假设音频的采样率是16kHZ, 一帧的音频有 10ms也就是说1s 的音频有 16000 个采样点,而 1s 中包含 100 帧,每一帧有 160 个采样点,声码器的作用就是将一个频谱帧变成音频波形的 160 个采样点,所以声码器中一般会包含**上采样**模块。<font>\n",
" \n",
"<br></br>\n",
"<font size=4>与声学模型类似,声码器也分为自回归模型和非自回归模型, 更细致的分类如下:<font>\n",
"\n",
"- <font size=4>Autoregression<font>\n",
" - <font size=4>WaveNet<font>\n",
" - <font size=4>WaveRNN<font>\n",
" - <font size=4>LPCNet<font>\n",
"- <font size=4>Flow<font>\n",
" - <font size=4>WaveFlow<font>\n",
" - <font size=4>WaveGlow<font>\n",
" - <font size=4>FloWaveNet<font>\n",
" - <font size=4>Parallel WaveNet<font>\n",
"- <font size=4>GAN<font>\n",
" - <font size=4>WaveGAN<font>\n",
" - <font size=4>arallel WaveGAN<font>\n",
" - <font size=4>MelGAN<font>\n",
" - <font size=4>HiFi-GAN<font>\n",
"- <font size=4>VAE\n",
" - <font size=4>Wave-VAE<font>\n",
"- <font size=4>Diffusion<font>\n",
" - <font size=4>WaveGrad<font>\n",
" - <font size=4>DiffWave<font>\n",
"\n",
"<br></br>\n",
"<font size=4>PaddleSpeech TTS 主要实现了百度的 `WaveFlow` 和一些主流的 GAN Vocoder, 在本教程中,我们使用 `Parallel WaveGAN` 作为声码器。<font>\n",
"\n",
"<br></br> \n",
"<img style=\"float: center;\" src=\"source/pwgan.png\" width=\"75%\"/> \n",
"\n",
"<br></br>\n",
"<font size=4>各 GAN Vocoder 的生成器和判别器的 Loss 的区别如下表格所示:<font>\n",
" \n",
"Model | Generator Loss |Discriminator Loss\n",
":-------------:| :------------:| :-----\n",
"Parallel Wave GAN| adversial loss <br> Feature Matching | Multi-Scale Discriminator |\n",
"Mel GAN |adversial loss <br> Multi-resolution STFT loss | adversial loss|\n",
"Multi-Band Mel GAN | adversial loss <br> full band Multi-resolution STFT loss <br> sub band Multi-resolution STFT loss |Multi-Scale Discriminator|\n",
"HiFi GAN |adversial loss <br> Feature Matching <br> Mel-Spectrogram Loss | Multi-Scale Discriminator <br> Multi-Period Discriminato |\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 初始化声码器 Parallel WaveGAN"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Parallel WaveGAN done!\n"
]
}
],
"source": [
"vocoder = PWGGenerator(**pwg_config[\"generator_params\"])\n",
"# 预训练好的参数赋值给模型\n",
"vocoder.set_state_dict(paddle.load(pwg_checkpoint)[\"generator_params\"])\n",
"vocoder.remove_weight_norm()\n",
"# 推理阶段不启用 batch norm 和 dropout\n",
"vocoder.eval()\n",
"# 读取数据预处理阶段数据集的均值和标准差\n",
"stat = np.load(pwg_stat)\n",
"mu, std = stat\n",
"mu = paddle.to_tensor(mu)\n",
"std = paddle.to_tensor(std)\n",
"pwg_normalizer = ZScore(mu, std)\n",
"# 构造包含 normalize 的新模型\n",
"pwg_inference = PWGInference(pwg_normalizer, vocoder)\n",
"pwg_inference.eval()\n",
"print(\"Parallel WaveGAN done!\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 调用声码器"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"shepe of wav (time x n_channels):\n",
"[104100, 1]\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGoCAYAAADW2lTlAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAABeHUlEQVR4nO3dd5QUZdYG8OfCkKNIjiOSJEgaERUQFBHBnHV1za67nzkthjVhQF3DGtacs64JBEFEJCNRcg4CkkVynJn7/dE90NPTobq7qt6q6ud3zhymu2u6LhOqb7/hXlFVEBEREVH2KGU6ACIiIiJyFxNAIiIioizDBJCIiIgoyzABJCIiIsoyTACJiIiIsgwTQCIiIqIswwSQiMhhIlJHRMaKyA4RecZ0PERETACJKNBE5B4R+T7qviVx7rvYoTCuB7AZQFVVvcOhcxARWcYEkIiCbiyA40WkNACISD0AZQB0jLqvWfhYJzQBMF/TqLwvIjkOxENEWY4JIBEF3VSEEr4O4dvdAYwGsCjqvmUAThWRBeGp2uUi8reiJwnff3rE7RwR2SQincK3u4rIRBHZKiKzRKRn+P53AVwB4G4R2SkivUWknIg8LyJrwx/Pi0i58PE9RWSNiPxTRNYDeEdEHhKRL0Tkw3Bsc0SkRXh0c6OIrBaRPg59/4gogJgAElGgqep+AL8A6BG+qweAcQDGR903FsBGAKcDqArgKgDPFSV4AD4BcEnEU58KYLOqzhCRBgCGAngUQA0AdwL4UkRqqeqVAD4C8JSqVlbVHwHcB6ArQgloewBdANwf8dx1w8/TBKHpYwA4A8AHAA4DMBPACISu4Q0APALgtbS+QUSUlZgAElE2GINDyV53hBLAcVH3jVHVoaq6TEPGAPgh/BgAfAzgTBGpGL59KUJJIQBcBmCYqg5T1UJVHQlgGoB+ceL5C4BHVHWjqm4C8DCAyyMeLwTwoKruU9U94fvGqeoIVc0H8AWAWgAGqeoBAJ8CyBWR6ql+Y4goOzEBJKJsMBZANxGpAaCWqi4BMBGhtYE1ALQFMFZEThORySKyRUS2IpTA1QQAVV0KYAGAM8JJ4JkIJYVAaKTugvD079bw13YDUC9OPPUB/BZx+7fwfUU2qereqK/ZEPH5HoRGHwsibgNA5WTfCCIiAODiYiLKBpMAVANwHYAJAKCq20Vkbfi+teGPeQD+CuBbVT0gIt8AkIjnKZoGLoXQpo6l4ftXA/hAVa+zGM9ahJLGeeHbjcP3FUl5swgRUSo4AkhEgReeRp0G4HaEpn6LjA/fNxZAWQDlAGwCkC8ipwGI3ljxafi+v+PQ6B8AfIjQyOCpIlJaRMqHN3M0jBPSJwDuF5FaIlITwAPh5yAicgUTQCLKFmMA1EYo6SsyLnzfWFXdAeBmAJ8D+BOhNX6DI59AVdchNJp4PIDPIu5fDeAsAPcilECuBnAX4l9jH0UoIZ0NYA6AGeH7iIhcIWmUpSIiIiIiH+MIIBEREVGWYQJIRERElGWYABIRERFlGSaARERERFkmcHUAa9asqbm5uabDICIiIjJu+vTpm1W1VvT9gUsAc3NzMW3aNNNhEBERERknIr/Fup9TwERERERZhgkgERERUZZhAkhERESUZZgAEhEREWUZJoBEREREWYYJIBEREVGWYQJIRERElGWYABIRERFlGSaARERERFmGCSARERFRlmECSERERJRlmAASERERZRkmgERERERZhgkgERERUZZhAkhEaXl25GK8PHqp6TCIiCgNOaYDICJ/emHUEgBAn9Z10LxOFcPREBFRKjgCSEQZufN/s02HQEREKWICSERERJRlmAASERERZRkmgERERERZhgkgERERUZZhAkhERESUZZgAEhEREWUZJoBElLIPJq00HQIREWWACSARpezDyatMh0BERBlgAkhERESUZZgAEhEREWUZJoBElBlV0xEQEVGKmAASERERZRkmgESUkv35hVi0YYfpMIiIKANMAIkoJdv2HDAdAhERZYgJIBEREVGWYQJIRERElGWYABJRRmat2Ya9BwpMh0FERClgAkhEGduwfa/pEIiIKAVMAImIiIiyDBPAgPvPj0twwwfTTYdBREREHpJjOgByzoGCQjz342LTYRAREZHHcAQwwN6buNJ0CERERORBTAADbOe+fNMhEBERkQcxASQiIiLKMkYTQBHpKyKLRGSpiAyIc8yFIjJfROaJyMdux0hExT00eJ7pEIiIKEPGEkARKQ3gZQCnAWgN4BIRaR11THMA9wA4QVXbALjV7TiJqLihc9aVuO+MF8cbiISIiNJlcgSwC4ClqrpcVfcD+BTAWVHHXAfgZVX9EwBUdaPLMRKRBdv3cr0pEZGfmEwAGwBYHXF7Tfi+SC0AtBCRCSIyWUT6xnoiEbleRKaJyLRNmzY5FC4RERFRMHh9E0gOgOYAegK4BMAbIlI9+iBVfV1V81Q1r1atWu5G6GHP/7jk4Ofbdh8wGAkRERF5ickE8HcAjSJuNwzfF2kNgMGqekBVVwBYjFBCSCm69v2ppkMgIiIijzCZAE4F0FxEjhCRsgAuBjA46phvEBr9g4jURGhKeLmLMQbGsk27TIdAREREHmEsAVTVfAA3AhgBYAGAz1V1nog8IiJnhg8bAeAPEZkPYDSAu1T1DzMRExEREQWD0V7AqjoMwLCo+x6I+FwB3B7+ICIiIiIbeH0TCBERERHZjAkgEdnihVFLkh9ERESewAQwS2zZtd90CBRw709aaToEIiKyiAlgFlm5mTuBKTM/zFtvOgQiIrIBE8Assr+g0HQI5HMzV281HQIREdmACSARERFRlmECmEVGzt9gOgQiIiLyACaAWeTpEYtMh0BEREQewASQiIiIKMswASQiIiLKMkwAs8zeAwWmQyAiIiLDmABmmUeHzjcdAgWUqukIiIjIKiaAWWbl5t2mQyAiIiLDmAASERERZRkmgAE1dPY60yFQAInpAIiIyBZMAAPq7QkrTIdAREREHsUEkIiIiCjLMAEkIiIiyjJMAImIiIiyDBNAIiIioizDBDCgtuzaH/P+ZZt2uhwJEREReQ0TwIBasXlXzPvXbduLfflsB0fp+e/Py+I+xkYgRET+wQQwCxUU8qWaiIgomzEBJCJbbNm1Hzv2HjAdBhERWcAEkHzl+znr0Oe5MSjkKKYn/bxok+kQiIjIAiaA5Cu3fPYrFm/YiRV/xF7jSERERMkxASRf2Z9fCAC44/NZhiMhIiLyLyaAWeiVBDs5/YIbWYiIiNLHBDALvfjTUtMhEBERkUFMAImIiIiyDBNAIiIioizDBJB8ac7v20yHQERE5FtMAInINtyaQ0TkD0wAs9TG7XtNh0BERESGMAHMUvd8Ncd0CERERGQIE8AAyi8oTH4M6+gRERFlLSaAAfTo0AWmQyAiIiIPYwIYQOOWbDIdAhEREXkYE8AsNWYxk0QiIqJsxQQwi23dvd90CERERGQAE8AstnHHPtMhEBERkQFMALNYn+fGmg6BiIiIDGACSL6xeSdHLE36dMqqpMe8P3Gl84EQEVHGjCaAItJXRBaJyFIRGZDguPNEREUkz834yFvGL9lsOoSs9vrY5UmPmfbbny5EQkREmTKWAIpIaQAvAzgNQGsAl4hI6xjHVQFwC4Bf3I2QiIiIKJhMjgB2AbBUVZer6n4AnwI4K8ZxAwE8CYDNax3w5rjkozpetXbrHtMhEBER+ZLJBLABgNURt9eE7ztIRDoBaKSqQxM9kYhcLyLTRGTapk2sb5eKz6auTn6QR81bu910CERERL7k2U0gIlIKwLMA7kh2rKq+rqp5qppXq1Yt54MjT7ju/WmmQyAiIvIlkwng7wAaRdxuGL6vSBUAbQH8LCIrAXQFMJgbQbIXdwETERHZw2QCOBVAcxE5QkTKArgYwOCiB1V1m6rWVNVcVc0FMBnAmarKYZ8s9ejQBaZDICIiCgRjCaCq5gO4EcAIAAsAfK6q80TkERE501RcREREREGXY/LkqjoMwLCo+x6Ic2xPN2IKAhExHQIFEX+tiIgCw7ObQCh9SzfuNB0CEREReRgTQCIiIqI
"text/plain": [
"<Figure size 648x432 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"with paddle.no_grad():\n",
" wav = pwg_inference(mel)\n",
"print(\"shepe of wav (time x n_channels):\")\n",
"print(wav.shape)\n",
"# 绘制声码器输出的波形图\n",
"wave_data = wav.numpy().T\n",
"time = np.arange(0, wave_data.shape[1]) * (1.0 / fastspeech2_config.fs)\n",
"fig, ax = plt.subplots(figsize=(9, 6))\n",
"plt.plot(time, wave_data[0])\n",
"plt.title('Waveform')\n",
"plt.xlabel('Time (seconds)')\n",
"plt.ylabel('Amplitude (normed)')\n",
"plt.tight_layout()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 播放音频"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"scrolled": true
},
"outputs": [
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRmwtAwBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAAZGF0YUgtAwDS/9D/0P/T/9L/1f/X/9b/2v/b/9n/2v/g/+P/5P/j/+L/4//m/+n/6//r/+z/7f/t/+3/7//t/+//7//w//D/9P/0//L/8v/0//T/9//3//f/9v/2//f/+P/4//f/9v/4//f/+P/4//j/9//3//b/9v/3//f/9f/2//b/9P/1/wEAAAD///3//P/+//7//v/+//z//P/7//v/+//7//z/+//7//v//P/7//z/+//7//z/+//7//v//P/7//z/+v/9//z/+v/8//v/+//8//v//P/9//z//f/8//7//v/+/wAA//8AAAAAAAAAAAAAAAD//////v/+//////8AAP///P/7//v/+//7//r/+v/5//f/9v/1//X/9P/0//T/9f/1//T/9P/z//P/8//z//P/9P/z//P/8v/y//L/8v/y//H/8v/y//D/8P/x//D/8f/x//H/8f/v//H/8P/w//D/8f/w//D/8P/v//D/8f/y//D/8v/y//L/8P/x//H/8P/1//T/9P/z//L/8//y//L/8f/y//L/8v/y//L/8v/x//L/8v/y//L/8f/y//P/8v/z//P/9f/z//P/9P/z//T/8//z//T/8//0//P/9f/0//T/9P/1//P/9P/1//X/9f/2//b/9//2//j/9//3//j/9//3//f/9//3//f/9//4/wAAAAAAAAAA/////////v/9//3//P/7//z//P/8//z//P/8//v/+//9//v//P/9//v//P/7//v/+v/6//z/+//5//r/+v/6//r/+//7//r/+v/7//v/+v/6//v/+//7//v/+//8//v//P/8//v/+//7//z/+//9//v/+//7//3//P/9//v/+//8//3//f/8//3//P/9//3//P/8//3//P/9//z//f/8//z//f/8//3//P/8//7//f/+/////v/+//3//v/9//3//f/9//z//P/9//z//f/+//3//v/+//3//v////7/AAD+/wAAAAAAAP7//v////7///8AAP//AAD///7////+/////v/+//3//v/9//3//v/9//z//f/+//7//P/9//v//P/8//z//f/+//7//f/+//v////+/wAA/v8AAP/////+//7//v/+//7//f/+//7//f/+//7//v////7///8AAP///f8AAAAAAAD//wAAAAD////////+//7//////////f////7////+////AAD///7////+//3//v/+//7//f/+//3//f/9//3//v/+//7//v////7///8AAAAAAAAAAP///////wAAAAABAAAAAQAAAAIAAAACAAEAAgAAAAAAAAAAAP////8AAP///v/+/////f/+//3//P/7//v/+v/6//n/+v/6//n/+v/5//n/+f/6//n/+f/5//r/+P/5//r/+v/6//v/+f/6//v/+f/5//n/+f/5//f/9//2//X/9v/3//X/9v/2//f/9f/2//f/9//2//X/9//1//b/9v/1//X/9P/2//T/9P/0//X/9P/2//b/9v/2//f/9//0//f/9v/2//f/9//2//j/+P/3//j/+v/5//j/+f/5//j/+f/4//n/+f/4//f/+f/3//j/+P/5//n/+f/4//f/+P/2//j/+P/3//j/+P/5//j/+f/5//j/+P/4//f/+f/3//j/+P/4//j/+P/4//j/+P/4//j/+P/3//j/+f/4//r/+v/5//r/+v/6//r/+//5//r/+v/5//v/+//7//r/+//7//n/+//7//v//P/9//z//f/8//3//f/7//z//P/9//z//f/+//z//f/8//z//P/9//z//f/9//z//f/+//7//f/+/////f/+/////v/+/////v///////v////7//v8AAAAAAAD//wAA//8AAAAAAAD//wAAAAD+//////////7////+/////v////7////+///////9//3//v//////AAAAAAAAAAD/////AAD///7/AAD+//3///8AAP///v8AAAAA////////AAAAAAAA//8AAAAAAAAAAAAAAAD//wAAAAD//wAA//8AAAAA//8BAAEAAAAAAAEAAAD//wAAAAD+/wAAAAD9/wAA//8AAAAAAAAAAAAA/f/9//3////+//3////9//v//f/8//z/+//6//3/+v/6//v//P/5//r/+//8//r/+v/6//r/+f/4//r/+//6//r/+v/6//v/+v/6//v/+//7//v/+f/6//r//f/8//v//f/+//z//P/9//7//v/9//////////3//v/9//3////+////////////AAD+/wAA//8AAP/////+//3//v/+//7//f/+//7////9//7////8//3////8//z//f/+//7/+//9//3////7//3//f/8//7//P/+//3////+//7//f/9//7////9//7////9//7////+//7///8AAP3/AAAAAP//AAAAAP//AAD+/wAAAAD//wAA//////7//P8AAP7///8AAP7/AwD9//z////+//7//v///////P/7/wAA/f/9/wAA//8BAP7//v8AAP///v///wAAAgD6/wAAAAD///////8DAP7/AAAAAAEA//8AAAAA/v8AAP3//v/+//7//P/8/wAA+f/+//3//v////v//f/9//r/+P/7//j/9v/6//b/9v/4//r/9P/3//b/8//6//L/9v/3//X/9P/y//n/8P/1//P/8//3//L/8//y//P/9P/2//X/9v/0//X/9v/z//X/9f/3//L/9f/z//j/9//y//v/9f/7//P/+P/9//L//v/0//z/+f/0/wAA9v/+//j////4//b//P/1////9//4//7/+f/4//b/9f/4//T/+P/1//X/+//q//r/9//x//n/9//4//T/8P/1//j/8P/x//X/9v/z/+//9v/4//b/9v/0//r/9//0//v/+f/4//r/+f////f/+f8CAAAA+/8AAPr//v////j/AAD7/wAA+v/5//n/+f/6//r//P/1/wAA9v/2//z/8v/9//f/+P/5//j/+v/5//7/8v/5//f/9P/x//X////s////6//3/wMA6v8FAPv/+/8BAPr/AAACAAIACwAKAAAACgABAAYABwD5/xYAAAABAAoAAAAVAAMAEwAOAAUADgAAAAsAAAAOAAQAAQALAP//CwABAAsACgAIAAgAAAASAAMABAALAAEAEwAFAAoAEQAIABYADQATABgACAAZABMAFgATABUAHgAhAB4AGgAnAB0AJQAjABwAJAAfABsAKQAgAB0AKAAmAC4AIgAfACYAEQAoABEAHQAhAA4AIAARACAADQAYABAAFAATAAcAGwAIAAsADAACAA4A/P8GABEA+/8MAAAACQAJAPv/DgD9/xEABAABABUAAgASAAoAEAAQABYACAAQABUADQAbAA8AEgAaAA8AGgAhABQAKAAZACIAIwASACoAHAAaACEAFAAqABQAHQAfABYAKAARABgAFQAQABMADgANABIACwAOAAoABQABAAoAAAAHAAYAAAACAP7////3/wcA3/8AAOz/4v///9v/+P/r/+v/8v/w/+b/5//4/+b/6f/0/+T/6f/4/+X/+f/u/+b/8//s/+v/5//0/+P/8P/8/+//+//5/+T/+v/0/+L/CgDd/+v/AADT/wAA5//n//b/1//7/9b/7f/n/9H//f/R/+7/8P/N//j/z//m/+T/0f/y/9T/6//Z/93/9v/Z/+n/5//l//P/2v/2/+z/4f/s/9r/8f/c/+H/1f/Y/+L/zf/a/9n/4//Q/9//0v/a/97/y//j/8D/3v/O/8H/1v/O/9f/vv/k/8X/y//n/7//8v/g/9T/4P/H/8v/vP+7/7D/wf+2/6//uv+6/67/n/+u/6n/q/+h/6T/qv+4/7f/0f/E/+L/5P/w/wkA/P8oABoAOwBSAEwAcgB6AIs
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"dp.Audio(wav.numpy().T, rate=fastspeech2_config.fs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 保存音频"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"mkdir: cannot create directory 'output': File exists\r\n"
]
}
],
"source": [
"!mkdir output\n",
"sf.write(\n",
" \"output/output.wav\",\n",
" wav.numpy(),\n",
" samplerate=fastspeech2_config.fs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 实验代码,非教程部分\n",
"# change pitch\n",
"\n",
"with paddle.no_grad():\n",
" mel, d_outs, p_outs, e_outs = fastspeech2_inference(phone_ids, output_dpe=True)\n",
" wav = pwg_inference(mel)\n",
"dp.Audio(wav.numpy().T, rate=fastspeech2_config.fs)"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRmwtAwBXQVZFZm10IBAAAAABAAEAwF0AAIC7AAACABAAZGF0YUgtAwDQ/8v/y//M/8v/0f/T/9H/1f/W/9T/1v/d/9//3//e/97/4P/j/+T/5f/l/+b/5v/n/+j/5v/o/+j/5//q/+v/7f/t/+v/7P/u/+7/7//w//L/8v/y//P/8//z//H/8//0//X/9P/1//T/9P/z//P/9f/z//T/9P/0//T/8//0//3//f/9//r//P/8//z//P/9//v/+v/6//r/+f/7//r/+f/8//n//P/7//v//f/8//3//P/9//7//f/8//3//f8AAP7///8AAP//AAAAAAAAAAABAP//AAACAAEAAQADAAEAAwADAAMAAgADAAMAAgADAAIAAgABAAAAAgAAAAAA/P/8//v/+//9//z/+v/5//n/9//2//b/9f/1//T/8//y//P/8v/y//P/8v/y//H/8//x//H/8f/y//H/7//w/+//7v/u//D/8P/v//D/7//v/+//7//u/+3/7v/v/+//7//u//D/7//v/+//7f/u/+7/7f/u/+7/7f/u/+z/7v/x/+//7//v//D/7//t/+//7//u/+//7//v/+3/7f/v//D/7v/w/+//8P/x/+//8P/v/+//8P/w//D/8P/v//H/8f/x//D/8f/x//L/8v/x//H/8v/w//H/8f/x//L/8//y//P/8//y//L/9f/0//T/9f/2//P/9P/2//T/9P/z//z//P/+//3//v/7//r/+v/4//n/+f/5//n/+P/3//j/9//4//n/+f/6//j/+P/3//f/+P/4//j/+f/6//r/+v/6//r/+v/6//r//P/7//z/+v/8//z//P/8//z/+//8//v//f/9//z/+//8//v//P/7//z//P/6//z/+//8//v//P/8//v/+//7//v/+//7//v/+//7//z//P/9//z//f/7//3//P/7//3//P/9//r//f/8//7//v/+//3//////////////////////////v///wAA///+//////8AAAAA//8AAAAAAAAAAAEAAAABAAAAAAAAAAEAAQAAAAEAAAAAAAAAAAAAAAEAAQAAAAAAAAABAAEAAgADAAIAAgAAAAIAAgACAAMAAwAEAAMAAgAEAAMAAwADAAMAAwAEAAMABQAEAAUABQAFAAQABQAEAAUAAwAEAAMABAADAAMABAAEAAIABAADAAQAAwAEAAUABAADAAYAAwAEAAMAAwACAAEAAgADAAIAAgACAAIAAgAAAAMAAQAAAAAAAAD//wAAAAAAAAAAAAAAAP//AAAAAP//AAAAAAAAAAAAAAAAAAD//wAA//8AAAAA/////wAAAAD+/wAAAAAAAAAAAAAAAAEAAAD//wAAAAD//wAA/v////3//v/9//3//f/8//z/+v/7//v/+f/5//r/9//5//f/9v/4//X/9v/1//X/9f/1//X/8//0//P/8//0//X/8v/z//T/8v/z//P/8f/y//L/8f/y//L/8v/z//T/8P/x//L/8f/y//H/8//z//L/8f/0//L/8P/y//H/8v/y//D/8//z//L/8f/z//P/8//z//T/9P/z//P/8v/y//T/9P/1//T/9f/1//X/9f/z//b/9f/2//f/+P/3//f/9//5//f/9//4//j/+f/6//n/+v/6//r/+v/5//r/+v/5//r/+//6//r/+v/7//v/+v/6//r/+v/7//r/+v/8//z/+//7//z/+//7//z//P/7//v/+//5//v/+f/6//v/+//7//z/+//8//v//f/8//3/+//8//7/+v/7//3//P/9//3//P/9//z//v/9//z/+//9//7//v/8//z///////z//f/9//3//P/9//3//P/9//7//v/9//3//v/7//7//P/+//z//f/8//7//P/8//7//v/+///////+//7//v/+//7///8AAP7//v////3/+//7//3//P/9//3/+//8//3/+//8//z//P/6//v//P/7//r//P/8//n/+//8//z/+//8//v//f/+//v//P/9//v//v/9//3//P/9//z/+//9//3//P/8//7/////////AAAAAAAAAAABAP//AAABAAIAAQACAAIAAAACAAEAAwADAAIAAwAFAAYABAAHAAUABgAGAAMABQAFAAMAAwAEAAEAAwAEAAMAAgADAAAAAgADAAIAAQACAAIAAAABAAAAAQAAAP7/AAAAAAAA/f/+/////f/+//7/+//6//r/+//6//n/+P/4//j/+f/5//n/+P/5//n/+f/3//r/+f/5//j/+f/5//f/+v/4//v/+v/5//v/+//+//r/+//+//z//v///////f8AAP//AAD/////AAAAAP///v8AAP7//f/+/////v/+//7//v////////8BAP7////6/////v/6//7//P////v/+v/8//z/+//6//v/+P/6//f/+v/5//j/+//3//r/9v/4//j/9v/0//f/9P/1//P/7//2//D/8v/u//H/7v/u/+3/7v/w/+z/7//u//P/7f/0//X/8f/4//L/+P/1//P/9v/z//n/8v/3//X/9P/2//L/+P/2//T/9//4//j/+f/3//j//f/2//z/+f/2//n/+P/2//T/9//1//f/8//0//j/8//1//X/9v/1//H/9f/w//T/7v/x//H/6//w/+z/8P/r/+7/7P/y/+7/6v/z/+r/7v/s/+v/8P/p/+3/6//v/+r/6f/t/+v/6v/r/+7/6//w/+3/8P/z/+j/7//x/+3/7f/s//H/7P/t/+z/7//z/+r/8P/z//D/7f/0/+7/8P/z/+z/8//x/+r/8P/w/+X/7v/r/+f/7v/n/+v/7f/s/+j/7//p/+z/7f/r//D/6//p/+3/7v/q/+z/8P/v//D/7//x//T/9f/q//r/9P/n//7/7//v//X/7P/8//D/7//2//L/9v/x//n/8f/v//r/7f/y//D/7v/0/+7/7f/v//L/7v/v/+7/8f/v/+3/8P/v/+//8f/u/+7/8v/o//T/7v/n//f/6P/u//H/5//t/+z/7P/t/+j/6//t/+v/6P/q/+//5f/r/+r/5f/u/+X/6f/w/+L/6v/x/+L/7v/p/+v/6v/q/+v/4v/x/9//6P/q/9//7P/k/+T/5P/p/+P/5f/q/+D/6P/l/+P/5v/g/+j/4v/m/+L/4//y/97/6f/q/+L/8v/o//P/9P/n//z/9f/y//r/+v/6//b//v/x/////P/3/wEA9v////j/AgD1//T/AQDx//7/9f/y//z/9//v//r//P/x//P/9//2//L/+f/3//T//v/0//P/BwDu//v/AgDt/wwA8P/4////9/8CAO7/BAD9//T/BQD3//n//v/x//7/AgDw//r/AQD0//j//f/1/wMA9//5//7/9/8JAPP/BgD1//b/BgDo/wMA9//v/wEA8P/r/wQA4f/3//L/4v///9//9P/i/+r/7v/n/+j/5P/r/+D/4//k//P/0v/0/+v/3f8AANz/+f/0/+z/+//4//r/8v8AAPv/+f8AAPX//v////X/AQD5//n/AQD3/////v/4//z/+v/5//b/+f/1//f/9v/s//j/7P/v//D/5f/v/+z/7f/3/+v/9P/0/+X/+v/x//b/+f/v//L/8//4/woAEwAlABgAKAAuACwAJwAiACAAFwAaABkAFwAbABwAGwAZAB8AIAAhACwAHgAbACQAHwATAD8ADAAtACsAHgA4AAkAOgANACEADgAAAAgA2//x/9v/x/+8/6f/pP+h/4X/kv+D/3D/jv9u/3X/cv9s/2X/Zf9q/0X/Yv9H/zz/a/9D/17/dv9h/3H/l/+k/5j/4v/P//H/DADn/ykAIgAqACwAZABkAHMAvQClAOUALQH7AG0BZwFwAcIBhgHOAZsBsQGnAYwBqwFUAV0BSQH9AAIBvACwALQATQBtAA0ADADO/1n/of/r/tj+ZP73/ez9Av0y/Zf8Qvw6/Iv71fth+y7
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# 实验代码,非教程部分\n",
"# 变速,变调,机械音\n",
"# 机械音:输入的 token 都变成一声pitch 变成 zeroduration 变成\n",
"# 输入一个参数搞定\n",
"\n",
"phones = ['n', 'i1', 'h', 'ao1', 'sp', 'h', 'uan1', 'ing1', 'sh', 'iii1', 'iong1', 'b', 'ai1', 'd', 'u1', 'f', 'ei1', 'j', 'iang1', 'k', 'uang1', 'j', 'ia1', 'j', 'in1', 'x', 'ing1', 'sh', 'en1', 'd', 'u1', 'x', 've1', 'x', 'i1', 'ian1', 'j', 'iou1', 'sp']\n",
"phone_dict = {}\n",
"with open(phones_dict, \"r\") as f:\n",
" phn_id = [line.strip().split() for line in f.readlines()]\n",
"for phn, id in phn_id:\n",
" phone_dict[phn]=int(id)\n",
" \n",
"vocab_size = len(phn_id)\n",
"phone_ids = [phone_dict[phn] for phn in phones]\n",
"phone_ids = paddle.to_tensor(phone_ids)\n",
"pitch_stats_path=\"/home/yuantian01/myPaddle/Parakeet/examples/fastspeech2/baker/dump/train/pitch_stats.npy\"\n",
"pitch_mean, pitch_std = np.load(pitch_stats_path)\n",
"# print(\"p_outs:\", p_outs)\n",
"# pitch 变换为 Hz 表示\n",
"# 在数据预处理的时候,是先求 log 再减均值除方差\n",
"\n",
"# 变换为童声\n",
"# p = p * 1.5\n",
"# p 变为均值\n",
"# p = np.full(p.shape, p.mean())\n",
"# p = p -50\n",
"# p = np.zeros(p.shape)\n",
"# 变换为 fastspeech2 的输入\n",
"# p = (np.log(p) - pitch_mean)/pitch_std\n",
"# d = paddle.full(d_outs.shape, d_outs.mean())\n",
"# e = paddle.full(e_outs.shape, e_outs.mean())\n",
"# d 是可以控制的p 要对输出的控制,需要输入 pitch 的均值方差文件\n",
"# p = paddle.to_tensor(p)\n",
"with paddle.no_grad():\n",
" mel = fastspeech2_inference(phone_ids, d_ratio=1)\n",
" wav = pwg_inference(mel)\n",
"dp.Audio(wav.numpy().T, rate=fastspeech2_config.fs)\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": 121,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"sf.write(\n",
" \"output/ocr.wav\",\n",
" wav.numpy(),\n",
" samplerate=fastspeech2_config.fs)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## step4 FastSpeech2 进阶 — 变调变速\n",
"<br></br>\n",
"<font size=3>FastSpeech2 模型可以个性化地调节音素时长、音调和能量,通过一些简单的调节就可以获得一些有意思的效果<font>"
]
},
{
"cell_type": "code",
"execution_count": 120,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"原始音频\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/001.wav\" type=\"audio/x-wav\" />\n",
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"speed x 1.2\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\" type=\"audio/x-wav\" />\n",
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"speed x 0.8\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\" type=\"audio/x-wav\" />\n",
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"speed x 1.2(童声)\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\" type=\"audio/x-wav\" />\n",
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# jupyter 在一个 cell 显示多个音频\n",
"print(\"原始音频\")\n",
"dp.display(dp.Audio(url=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/001.wav\"))\n",
"print(\"speed x 1.2\")\n",
"dp.display(dp.Audio(url=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\"))\n",
"print(\"speed x 0.8\")\n",
"dp.display(dp.Audio(url=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\"))\n",
"print(\"speed x 1.2(童声)\")\n",
"dp.display(dp.Audio(url=\"https://paddlespeech.bj.bcebos.com/Parakeet/docs/demos/parakeet_espnet_fs2_pwg_demo/tn_g2p/espent/002.wav\"))\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<font size=4>具体实现代码请参考: https://github.com/DeepSpeech/demos/style_fs2/run.sh<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<br></br>\n",
"# 用 PaddleSpeech 训练 TTS 模型\n",
"<br></br>\n",
"<font size=3>PaddleSpeech 的 examples 是按照 数据集/模型 的结构安排的:<font>\n",
"```text\n",
"examples \n",
"|-- aishell3\n",
"| |-- README.md\n",
"| |-- tts3\n",
"| `-- vc0\n",
"|-- csmsc\n",
"| |-- README.md\n",
"| |-- tts2\n",
"| |-- tts3\n",
"| |-- voc1\n",
"| `-- voc3\n",
"```\n",
"<font size=3>我们在每个数据集的 README.md 介绍了子目录和模型的对应关系, 在 TTS 中有如下对应关系:<font>\n",
"```text\n",
"tts0 - Tactron2\n",
"tts1 - TransformerTTS\n",
"tts2 - SpeedySpeech\n",
"tts3 - FastSpeech2\n",
"voc0 - WaveFlow\n",
"voc1 - Parallel WaveGAN\n",
"voc2 - MelGAN\n",
"voc3 - MultiBand MelGAN\n",
"```\n",
"<br></br>\n",
"## 基于 CSMCS 数据集训练 FastSpeech2 模型\n",
"```bash\n",
"git clone https://github.com/PaddlePaddle/DeepSpeech.git\n",
"cd examples/csmsc/tts\n",
"```\n",
"<font size=3>根据 README.md, 下载 CSMCS 数据集和其对应的强制对齐文件, 并放置在对应的位置<font>\n",
"```bash\n",
"./run.sh\n",
"```\n",
"<font size=3>`run.sh` 中包含预处理、训练、合成、静态图推理等步骤:</font>\n",
"\n",
"```bash\n",
"#!/bin/bash\n",
"set -e\n",
"source path.sh\n",
"gpus=0,1\n",
"stage=0\n",
"stop_stage=100\n",
"conf_path=conf/default.yaml\n",
"train_output_path=exp/default\n",
"ckpt_name=snapshot_iter_153.pdz\n",
"\n",
"# with the following command, you can choice the stage range you want to run\n",
"# such as `./run.sh --stage 0 --stop-stage 0`\n",
"# this can not be mixed use with `$1`, `$2` ...\n",
"source ${MAIN_ROOT}/utils/parse_options.sh || exit 1\n",
"\n",
"if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then\n",
" # prepare data\n",
" bash ./local/preprocess.sh ${conf_path} || exit -1\n",
"fi\n",
"if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then\n",
" # train model, all `ckpt` under `train_output_path/checkpoints/` dir\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1\n",
"fi\n",
"if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then\n",
" # synthesize, vocoder is pwgan\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1\n",
"fi\n",
"if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then\n",
" # synthesize_e2e, vocoder is pwgan\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1\n",
"fi\n",
"if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then\n",
" # inference with static model\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/inference.sh ${train_output_path} || exit -1\n",
"fi\n",
"```\n",
"<br></br>\n",
"## 基于 CSMCS 数据集训练 Parallel WaveGAN 模型\n",
"```bash\n",
"git clone https://github.com/PaddlePaddle/DeepSpeech.git\n",
"cd examples/csmsc/voc1\n",
"```\n",
"<font size=3>根据 README.md, 下载 CSMCS 数据集和其对应的强制对齐文件, 并放置在对应的位置<font>\n",
"```bash\n",
"./run.sh\n",
"```\n",
"<font size=3>`run.sh` 中包含预处理、训练、合成等步骤:</font>\n",
"```bash\n",
"#!/bin/bash\n",
"set -e\n",
"source path.sh\n",
"gpus=0,1\n",
"stage=0\n",
"stop_stage=100\n",
"conf_path=conf/default.yaml\n",
"train_output_path=exp/default\n",
"ckpt_name=snapshot_iter_5000.pdz\n",
"\n",
"# with the following command, you can choice the stage range you want to run\n",
"# such as `./run.sh --stage 0 --stop-stage 0`\n",
"# this can not be mixed use with `$1`, `$2` ...\n",
"source ${MAIN_ROOT}/utils/parse_options.sh || exit 1\n",
"\n",
"if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then\n",
" # prepare data\n",
" ./local/preprocess.sh ${conf_path} || exit -1\n",
"fi\n",
"if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then\n",
" # train model, all `ckpt` under `train_output_path/checkpoints/` dir\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1\n",
"fi\n",
"if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then\n",
" # synthesize\n",
" CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1\n",
"fi\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# FAQ\n",
"\n",
"- <font size=3>需要注意的问题<font>\n",
"- <font size=3>经验与分享<font>\n",
"- <font size=3>用户的其他问题<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 作业\n",
"<font size=4>在 CSMSC 数据集上利用 FastSpeech2 和 Parallel WaveGAN 实现一个 TTS 系统<font>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 关注 PaddleSpeech\n",
"<font size=3>https://github.com/PaddlePaddle/DeepSpeech/<font>"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.7.0 64-bit ('yt_py37_develop': venv)",
"language": "python",
"name": "python37064bitytpy37developvenv88cd689abeac41d886f9210a708a170b"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {
"height": "calc(100% - 180px)",
"left": "10px",
"top": "150px",
"width": "263.594px"
},
"toc_section_display": true,
"toc_window_display": true
}
},
"nbformat": 4,
"nbformat_minor": 4
}