diff --git a/.flake8 b/.flake8
index 6b50de7ed..ae15ad2be 100644
--- a/.flake8
+++ b/.flake8
@@ -33,7 +33,7 @@ filename =
# Specify a list of codes to ignore.
ignore =
W503
- E252,E262,E127,E265,E126,E266,E241,E261,E128,E125
+ E252,E262,E127,E265,E126,E266,E241,E261,E128,E125,E129
W291,W293,W605
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
diff --git a/README.md b/README.md
index 59c61f776..49e40624d 100644
--- a/README.md
+++ b/README.md
@@ -19,11 +19,9 @@
@@ -159,6 +157,9 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision
- 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV).
### Recent Update
+- 👑 2022.10.11: Add [Wav2vec2ASR](./examples/librispeech/asr3), wav2vec2.0 fine-tuning for ASR on LibriSpeech.
+- 🔥 2022.09.26: Add Voice Cloning, TTS finetune, and ERNIE-SAT in [PaddleSpeech Web Demo](./demos/speech_web).
+- ⚡ 2022.09.09: Add AISHELL-3 Voice Cloning [example](./examples/aishell3/vc2) with ECAPA-TDNN speaker encoder.
- ⚡ 2022.08.25: Release TTS [finetune](./examples/other/tts_finetune/tts3) example.
- 🔥 2022.08.22: Add ERNIE-SAT models: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat).
- 🔥 2022.08.15: Add [g2pW](https://github.com/GitYCC/g2pW) into TTS Chinese Text Frontend.
@@ -178,17 +179,17 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision
- Scan the QR code below with your Wechat, you can access to official technical exchange group and get the bonus ( more than 20GB learning materials, such as papers, codes and videos ) and the live link of the lessons. Look forward to your participation.
-
+
## Installation
-We strongly recommend our users to install PaddleSpeech in **Linux** with *python>=3.7* and *paddlepaddle>=2.3.1*.
+We strongly recommend our users to install PaddleSpeech in **Linux** with *python>=3.7* and *paddlepaddle>=2.4rc*.
### **Dependency Introduction**
+ gcc >= 4.8.5
-+ paddlepaddle >= 2.3.1
++ paddlepaddle >= 2.4rc
+ python >= 3.7
+ OS support: Linux(recommend), Windows, Mac OSX
@@ -197,6 +198,13 @@ PaddleSpeech depends on paddlepaddle. For installation, please refer to the offi
```bash
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
```
+You can also specify the version of paddlepaddle or install the develop version.
+```bash
+# install 2.3.1 version. Note, 2.3.1 is just an example, please follow the minimum dependency of paddlepaddle for your selection
+pip install paddlepaddle==2.3.1 -i https://mirror.baidu.com/pypi/simple
+# install develop version
+pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html
+```
There are two quick installation methods for PaddleSpeech, one is pip installation, and the other is source code compilation (recommended).
### pip install
@@ -705,7 +713,7 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
Speaker Verification
- VoxCeleb12
+ VoxCeleb1/2
ECAPA-TDNN
ecapa-tdnn-voxceleb12
@@ -714,6 +722,31 @@ PaddleSpeech supports a series of most popular models. They are summarized in [r
+
+
+**Speaker Diarization**
+
+
+
+
+ Task
+ Dataset
+ Model Type
+ Example
+
+
+
+
+ Speaker Diarization
+ AMI
+ ECAPA-TDNN + AHC / SC
+
+ ecapa-tdnn-ami
+
+
+
+
+
**Punctuation Restoration**
@@ -767,6 +800,7 @@ Normally, [Speech SoTA](https://paperswithcode.com/area/speech), [Audio SoTA](ht
- [Text-to-Speech](#TextToSpeech)
- [Audio Classification](#AudioClassification)
- [Speaker Verification](#SpeakerVerification)
+ - [Speaker Diarization](#SpeakerDiarization)
- [Punctuation Restoration](#PunctuationRestoration)
- [Community](#Community)
- [Welcome to contribute](#contribution)
diff --git a/README_cn.md b/README_cn.md
index 070a656a2..bf3ff4dfd 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -19,13 +19,11 @@
@@ -164,23 +162,11 @@
- 🧩 级联模型应用: 作为传统语音任务的扩展,我们结合了自然语言处理、计算机视觉等任务,实现更接近实际需求的产业级应用。
-### 近期活动
-
- ❗️重磅❗️飞桨智慧金融行业系列直播课
-✅ 覆盖智能风控、智能运维、智能营销、智能客服四大金融主流场景
-
-📆 9月6日-9月29日每周二、四19:00
-+ 智慧金融行业深入洞察
-+ 8节理论+实践精品直播课
-+ 10+真实产业场景范例教学及实践
-+ 更有免费算力+结业证书等礼品等你来拿
-扫码报名码住直播链接,与行业精英深度交流
-
-
-
-
-
+
### 近期更新
+- 👑 2022.10.11: 新增 [Wav2vec2ASR](./examples/librispeech/asr3), 在 LibriSpeech 上针对ASR任务对wav2vec2.0 的fine-tuning.
+- 🔥 2022.09.26: 新增 Voice Cloning, TTS finetune 和 ERNIE-SAT 到 [PaddleSpeech 网页应用](./demos/speech_web)。
+- ⚡ 2022.09.09: 新增基于 ECAPA-TDNN 声纹模型的 AISHELL-3 Voice Cloning [示例](./examples/aishell3/vc2)。
- ⚡ 2022.08.25: 发布 TTS [finetune](./examples/other/tts_finetune/tts3) 示例。
- 🔥 2022.08.22: 新增 ERNIE-SAT 模型: [ERNIE-SAT-vctk](./examples/vctk/ernie_sat)、[ERNIE-SAT-aishell3](./examples/aishell3/ernie_sat)、[ERNIE-SAT-zh_en](./examples/aishell3_vctk/ernie_sat)。
- 🔥 2022.08.15: 将 [g2pW](https://github.com/GitYCC/g2pW) 引入 TTS 中文文本前端。
@@ -199,13 +185,13 @@
### 🔥 加入技术交流群获取入群福利
- - 3 日直播课链接: 深度解读 PP-TTS、PP-ASR、PP-VPR 三项核心语音系统关键技术
+ - 3 日直播课链接: 深度解读 【一句话语音合成】【小样本语音合成】【定制化语音识别】语音交互技术
- 20G 学习大礼包:视频课程、前沿论文与学习资料
微信扫描二维码关注公众号,点击“马上报名”填写问卷加入官方交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。
-
+
@@ -215,7 +201,7 @@
### 相关依赖
+ gcc >= 4.8.5
-+ paddlepaddle >= 2.3.1
++ paddlepaddle >= 2.4rc
+ python >= 3.7
+ linux(推荐), mac, windows
@@ -224,7 +210,13 @@ PaddleSpeech 依赖于 paddlepaddle,安装可以参考[ paddlepaddle 官网](h
```shell
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
```
-
+你也可以安装指定版本的paddlepaddle,或者安装 develop 版本。
+```bash
+# 安装2.3.1版本. 注意:2.3.1只是一个示例,请按照对paddlepaddle的最小依赖进行选择。
+pip install paddlepaddle==2.3.1 -i https://mirror.baidu.com/pypi/simple
+# 安装 develop 版本
+pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html
+```
PaddleSpeech 快速安装方式有两种,一种是 pip 安装,一种是源码编译(推荐)。
### pip 安装
@@ -717,8 +709,8 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
- Speaker Verification
- VoxCeleb12
+ 声纹识别
+ VoxCeleb1/2
ECAPA-TDNN
ecapa-tdnn-voxceleb12
@@ -727,6 +719,31 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
+
+
+**说话人日志**
+
+
+
+
+ 任务
+ 数据集
+ 模型类型
+ 脚本
+
+
+
+
+ 说话人日志
+ AMI
+ ECAPA-TDNN + AHC / SC
+
+ ecapa-tdnn-ami
+
+
+
+
+
**标点恢复**
@@ -786,6 +803,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声
- [语音合成](#语音合成模型)
- [声音分类](#声音分类模型)
- [声纹识别](#声纹识别模型)
+ - [说话人日志](#说话人日志模型)
- [标点恢复](#标点恢复模型)
- [技术交流群](#技术交流群)
- [欢迎贡献](#欢迎贡献)
diff --git a/demos/speech_server/README.md b/demos/speech_server/README.md
index e400f7e74..7e7d4b2c5 100644
--- a/demos/speech_server/README.md
+++ b/demos/speech_server/README.md
@@ -13,7 +13,7 @@ For service interface definition, please check:
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-It is recommended to use **paddlepaddle 2.3.1** or above.
+It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md
index 628468c83..594928281 100644
--- a/demos/speech_server/README_cn.md
+++ b/demos/speech_server/README_cn.md
@@ -14,7 +14,7 @@
### 1. 安装
请看 [安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-推荐使用 **paddlepaddle 2.3.1** 或以上版本。
+推荐使用 **paddlepaddle 2.4rc** 或以上版本。
你可以从简单,中等,困难 几种方式中选择一种方式安装 PaddleSpeech。
diff --git a/demos/speech_web/README.md b/demos/speech_web/README.md
index e8c59ea8b..572781ab6 100644
--- a/demos/speech_web/README.md
+++ b/demos/speech_web/README.md
@@ -21,14 +21,14 @@ Paddle Speech Demo 是一个以 PaddleSpeech 的语音交互功能为主体开
+ 小数据微调:基于小数据集的微调方案,内置用12句话标贝中文女声微调示例,你也可以通过一键重置,录制自己的声音,注意在安静环境下录制,效果会更好。你可以在 [【Finetune your own AM based on FastSpeech2 with AISHELL-3】](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/tts_finetune/tts3)中尝试使用自己的数据集进行微调。
-+ ENIRE-SAT:语言-语音跨模态大模型 ENIRE-SAT 可视化展示示例,支持个性化合成,跨语言语音合成(音频为中文则输入英文文本进行合成),语音编辑(修改音频文字中间的结果)功能。 ENIRE-SAT 更多实现细节,可以参考:
++ ERNIE-SAT:语言-语音跨模态大模型 ERNIE-SAT 可视化展示示例,支持个性化合成,跨语言语音合成(音频为中文则输入英文文本进行合成),语音编辑(修改音频文字中间的结果)功能。 ERNIE-SAT 更多实现细节,可以参考:
+ [【ERNIE-SAT with AISHELL-3 dataset】](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/ernie_sat)
+ [【ERNIE-SAT with with AISHELL3 and VCTK datasets】](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3_vctk/ernie_sat)
+ [【ERNIE-SAT with VCTK dataset】](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/vctk/ernie_sat)
运行效果:
- 
+ 
@@ -36,6 +36,7 @@ Paddle Speech Demo 是一个以 PaddleSpeech 的语音交互功能为主体开
### 后端环境安装
```bash
+# 需要先安装 PaddleSpeech
cd speech_server
pip install -r requirements.txt -i https://mirror.baidu.com/pypi/simple
cd ../
@@ -44,6 +45,8 @@ cd ../
### 前端环境安装
前端依赖 `node.js` ,需要提前安装,确保 `npm` 可用,`npm` 测试版本 `8.3.1`,建议下载[官网](https://nodejs.org/en/)稳定版的 `node.js`
+如果因为网络问题,无法下载依赖库,可以参考 FAQ 部分,`npm / yarn 下载速度慢问题`
+
```bash
# 进入前端目录
cd web_client
@@ -70,7 +73,7 @@ mkdir -p source/model
cd source/model
# 下载IE模型
wget https://bj.bcebos.com/paddlenlp/applications/speech-cmd-analysis/finetune/model_state.pdparams
-cd ../../
+cd ../../../
```
#### 启动后端服务
@@ -84,6 +87,10 @@ python main.py --port 8010
### 启动 `vc.py` 后端服务
+参照下面的步骤自行配置项目所需环境。
+
+Aistudio 在线体验小样本合成后端功能:[【PaddleSpeech进阶】PaddleSpeech小样本合成方案体验](https://aistudio.baidu.com/aistudio/projectdetail/4573549?sUid=2470186&shared=1&ts=1664174385948)
+
#### 下载相关模型和音频
```bash
@@ -172,8 +179,19 @@ cd web_client
yarn dev --port 8011
```
-默认配置下,前端中配置的后台地址信息是 localhost,确保后端服务器和打开页面的游览器在同一台机器上,不在一台机器的配置方式见下方的 FAQ:【后端如果部署在其它机器或者别的端口如何修改】
+默认配置下,前端配置的后台地址信息是 `localhost`,确保后端服务器和打开页面的游览器在同一台机器上,不在一台机器的配置方式见下方的 FAQ:【后端如果部署在其它机器或者别的端口如何修改】
+
+#### 关于前端的一些说明
+
+为了方便后期的维护,这里并没有给出打包好的 HTML 文件,而是 Vue3 的项目,使用 `yarn dev --port 8011` 的方式启动测试,方便大家debug,相当于是启动了一个前端服务器。
+
+比如我们在本机启动的这个前端服务(运行 `yarn dev --port 8011` ),我们就可以通过在游览器中通过 `http://localhost:8011` 访问前端页面
+
+如果我们在其它服务器上(例如:`*.*.*.*` )启动这个前端服务(运行 `yarn dev --port 8011` ),我们就可以通过在游览器中访问 `http://*.*.*.*:8011` 访问前端页面
+那前端跟后端是什么关系呢? 两个是独立的,只要前端能够通过代理访问到后端的接口,那就没有问题。你可以在 A 机器上部署后端服务,然后在 B 机器上部署前端服务。我们在 `./web_client/vite.config.js` 中将 `/api` 映射到的是 `http://localhost:8010`,你可以把它配置成任意你想要访问后端地址。
+
+当前端在以 `*.*.*.*` 这类以 IP 地址形式的网页中访问时,由于游览器的安全限制,会禁止录音,需要重新配置游览器的安全策略, 可以看下面 FAQ 部分: [【前端以IP地址的形式访问,无法录音】]
## FAQ
@@ -210,12 +228,24 @@ ASR_SOCKET_RECORD: 'ws://localhost:8010/ws/asr/onlineStream', // Stream ASR 接
TTS_SOCKET_RECORD: 'ws://localhost:8010/ws/tts/online', // Stream TTS 接口
```
-#### Q:后端以IP地址的形式,前端无法录音
+#### Q:前端以IP地址的形式访问,无法录音
A:这里主要是游览器安全策略的限制,需要配置游览器后重启。游览器修改配置可参考[使用js-audio-recorder报浏览器不支持getUserMedia](https://blog.csdn.net/YRY_LIKE_YOU/article/details/113745273)
chrome设置地址: chrome://flags/#unsafely-treat-insecure-origin-as-secure
+#### Q: npm / yarn 配置淘宝镜像源
+
+A: 配置淘宝镜像源,详细可以参考 [【yarn npm 设置淘宝镜像】](https://www.jianshu.com/p/f6f43e8f9d6b)
+
+```bash
+# npm 配置淘宝镜像源
+npm config set registry https://registry.npmmirror.com
+
+# yarn 配置淘宝镜像源
+yarn config set registry http://registry.npm.taobao.org/
+```
+
## 参考资料
vue实现录音参考资料:https://blog.csdn.net/qq_41619796/article/details/107865602#t1
diff --git a/demos/speech_web/speech_server/src/ernie_sat.py b/demos/speech_web/speech_server/src/ernie_sat.py
index b74dd8e3f..02e1ed9d9 100644
--- a/demos/speech_web/speech_server/src/ernie_sat.py
+++ b/demos/speech_web/speech_server/src/ernie_sat.py
@@ -1,5 +1,6 @@
import os
+from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
@@ -171,6 +172,7 @@ class SAT:
output_name: str,
source_lang: str,
target_lang: str):
+ ngpu = get_ngpu()
cmd = f"""
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
@@ -189,7 +191,8 @@ class SAT:
--voc_config={voc_config} \
--voc_ckpt={voc_ckpt} \
--voc_stat={voc_stat} \
- --output_name={output_name}
+ --output_name={output_name} \
+ --ngpu={ngpu}
"""
return cmd
diff --git a/demos/speech_web/speech_server/src/finetune.py b/demos/speech_web/speech_server/src/finetune.py
index d7a440f9a..6ca99251b 100644
--- a/demos/speech_web/speech_server/src/finetune.py
+++ b/demos/speech_web/speech_server/src/finetune.py
@@ -1,5 +1,6 @@
import os
+from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
@@ -38,7 +39,7 @@ class FineTune:
dump_dir = os.path.join(exp_dir, 'dump')
output_dir = os.path.join(exp_dir, 'exp')
lang = "zh"
- ngpu = 1
+ ngpu = get_ngpu()
cmd = f"""
# check oov
@@ -91,7 +92,7 @@ class FineTune:
output_dir = os.path.join(exp_dir, 'exp')
text_path = os.path.join(exp_dir, 'sentences.txt')
lang = "zh"
- ngpu = 1
+ ngpu = get_ngpu()
model_path = f"{output_dir}/checkpoints"
ckpt = find_max_ckpt(model_path)
@@ -117,7 +118,8 @@ class FineTune:
--output_dir={out_wav_dir} \
--phones_dict={dump_dir}/phone_id_map.txt \
--speaker_dict={dump_dir}/speaker_id_map.txt \
- --spk_id=0
+ --spk_id=0 \
+ --ngpu={ngpu}
"""
out_path = os.path.join(out_wav_dir, f"{wav_name}.wav")
diff --git a/demos/speech_web/speech_server/src/ge2e_clone.py b/demos/speech_web/speech_server/src/ge2e_clone.py
index d90013b98..83c2b3f35 100644
--- a/demos/speech_web/speech_server/src/ge2e_clone.py
+++ b/demos/speech_web/speech_server/src/ge2e_clone.py
@@ -1,6 +1,7 @@
import os
import shutil
+from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
@@ -30,11 +31,12 @@ class VoiceCloneGE2E():
ref_audio_dir = os.path.realpath("tmp_dir/ge2e")
if os.path.exists(ref_audio_dir):
shutil.rmtree(ref_audio_dir)
- else:
- os.makedirs(ref_audio_dir, exist_ok=True)
- shutil.copy(input_wav, ref_audio_dir)
+
+ os.makedirs(ref_audio_dir, exist_ok=True)
+ shutil.copy(input_wav, ref_audio_dir)
output_dir = os.path.dirname(out_wav)
+ ngpu = get_ngpu()
cmd = f"""
python3 {self.BIN_DIR}/voice_cloning.py \
@@ -50,7 +52,8 @@ class VoiceCloneGE2E():
--text="{text}" \
--input-dir={ref_audio_dir} \
--output-dir={output_dir} \
- --phones-dict={self.phones_dict}
+ --phones-dict={self.phones_dict} \
+ --ngpu={ngpu}
"""
output_name = os.path.join(output_dir, full_file_name)
diff --git a/demos/speech_web/speech_server/src/tdnn_clone.py b/demos/speech_web/speech_server/src/tdnn_clone.py
index c24b9b077..53c5a3816 100644
--- a/demos/speech_web/speech_server/src/tdnn_clone.py
+++ b/demos/speech_web/speech_server/src/tdnn_clone.py
@@ -1,6 +1,7 @@
import os
import shutil
+from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
@@ -27,11 +28,11 @@ class VoiceCloneTDNN():
ref_audio_dir = os.path.realpath("tmp_dir/tdnn")
if os.path.exists(ref_audio_dir):
shutil.rmtree(ref_audio_dir)
- else:
- os.makedirs(ref_audio_dir, exist_ok=True)
- shutil.copy(input_wav, ref_audio_dir)
+ os.makedirs(ref_audio_dir, exist_ok=True)
+ shutil.copy(input_wav, ref_audio_dir)
output_dir = os.path.dirname(out_wav)
+ ngpu = get_ngpu()
cmd = f"""
python3 {self.BIN_DIR}/voice_cloning.py \
@@ -47,7 +48,8 @@ class VoiceCloneTDNN():
--input-dir={ref_audio_dir} \
--output-dir={output_dir} \
--phones-dict={self.phones_dict} \
- --use_ecapa=True
+ --use_ecapa=True \
+ --ngpu={ngpu}
"""
output_name = os.path.join(output_dir, full_file_name)
diff --git a/demos/speech_web/speech_server/src/util.py b/demos/speech_web/speech_server/src/util.py
index a69e6c42f..0188f0280 100644
--- a/demos/speech_web/speech_server/src/util.py
+++ b/demos/speech_web/speech_server/src/util.py
@@ -2,10 +2,19 @@ import os
import random
import subprocess
+import paddle
+
NOW_FILE_PATH = os.path.dirname(__file__)
MAIN_ROOT = os.path.realpath(os.path.join(NOW_FILE_PATH, "../../../../"))
+def get_ngpu():
+ if paddle.device.get_device() == "cpu":
+ return 0
+ else:
+ return 1
+
+
def randName(n=5):
return "".join(random.sample('zyxwvutsrqponmlkjihgfedcba', n))
diff --git a/demos/speech_web/speech_server/vc.py b/demos/speech_web/speech_server/vc.py
index 99e56b404..d035c02a4 100644
--- a/demos/speech_web/speech_server/vc.py
+++ b/demos/speech_web/speech_server/vc.py
@@ -281,15 +281,18 @@ async def VcCloneG2P(base: VcBaseText):
if base.func == 'ge2e':
wavName = base.wavName
wavPath = os.path.join(VC_OUT_PATH, wavName)
- vc_model.vc(
+ wavPath = vc_model.vc(
text=base.text, input_wav=base.wavPath, out_wav=wavPath)
else:
wavName = base.wavName
wavPath = os.path.join(VC_OUT_PATH, wavName)
- vc_model_tdnn.vc(
+ wavPath = vc_model_tdnn.vc(
text=base.text, input_wav=base.wavPath, out_wav=wavPath)
- res = {"wavName": wavName, "wavPath": wavPath}
- return SuccessRequest(result=res)
+ if wavPath:
+ res = {"wavName": wavName, "wavPath": wavPath}
+ return SuccessRequest(result=res)
+ else:
+ return ErrorRequest(message="克隆失败,检查克隆脚本是否有效")
except Exception as e:
print(e)
return ErrorRequest(message="克隆失败,合成过程报错")
diff --git a/demos/speech_web/web_client/src/components/Experience.vue b/demos/speech_web/web_client/src/components/Experience.vue
index 4f32faf95..ca0e1440f 100644
--- a/demos/speech_web/web_client/src/components/Experience.vue
+++ b/demos/speech_web/web_client/src/components/Experience.vue
@@ -7,7 +7,7 @@ import VPRT from './SubMenu/VPR/VPRT.vue'
import IET from './SubMenu/IE/IET.vue'
import VoiceCloneT from './SubMenu/VoiceClone/VoiceClone.vue'
-import ENIRE_SATT from './SubMenu/ENIRE_SAT/ENIRE_SAT.vue'
+import ERNIE_SATT from './SubMenu/ERNIE_SAT/ERNIE_SAT.vue'
import FineTuneT from './SubMenu/FineTune/FineTune.vue'
@@ -47,8 +47,8 @@ import FineTuneT from './SubMenu/FineTune/FineTune.vue'
-
-
+
+
diff --git a/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue
index 761a5c11f..5494bb8f8 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/ASR/RealTime/RealTime.vue
@@ -58,9 +58,6 @@ export default {
mounted () {
this.wsUrl = apiURL.ASR_SOCKET_RECORD
this.ws = new WebSocket(this.wsUrl)
- if(this.ws.readyState === this.ws.CONNECTING){
- this.$message.success("实时识别 Websocket 连接成功")
- }
var _that = this
this.ws.addEventListener('message', function (event) {
var temp = JSON.parse(event.data);
@@ -78,7 +75,7 @@ export default {
// 检查 websocket 状态
// debugger
if(this.ws.readyState != this.ws.OPEN){
- this.$message.error("websocket 链接失败,请检查链接地址是否正确")
+ this.$message.error("websocket 链接失败,请检查 Websocket 后端服务是否正确开启")
return
}
diff --git a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue
deleted file mode 100644
index 9d356fc80..000000000
--- a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/Chat.vue
+++ /dev/null
@@ -1,298 +0,0 @@
-
-
-
语音聊天
-
- {{ recoText }}
-
- {{ envText }}
-
- 清空聊天
-
-
-
-
-
{{Result}}
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue
index c37c083ff..6db847706 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/ChatBot/ChatT.vue
@@ -91,6 +91,10 @@ export default {
methods: {
// 开始录音
startRecorder(){
+ if(this.ws.readyState != this.ws.OPEN){
+ this.$message.error("websocket 链接失败,请检查 Websocket 后端服务是否正确开启")
+ return
+ }
this.allResultList = []
if(!this.onReco){
this.asrResult = this.speakingText
diff --git a/demos/speech_web/web_client/src/components/SubMenu/ENIRE_SAT/ENIRE_SAT.vue b/demos/speech_web/web_client/src/components/SubMenu/ERNIE_SAT/ERNIE_SAT.vue
similarity index 99%
rename from demos/speech_web/web_client/src/components/SubMenu/ENIRE_SAT/ENIRE_SAT.vue
rename to demos/speech_web/web_client/src/components/SubMenu/ERNIE_SAT/ERNIE_SAT.vue
index e1a4f2343..4a0aa2c63 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/ENIRE_SAT/ENIRE_SAT.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/ERNIE_SAT/ERNIE_SAT.vue
@@ -98,7 +98,7 @@
播放
- 播放
+ 播放
下载
下载
diff --git a/demos/speech_web/web_client/src/components/SubMenu/FineTune/FineTune.vue b/demos/speech_web/web_client/src/components/SubMenu/FineTune/FineTune.vue
index 895dd586d..abf203ae8 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/FineTune/FineTune.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/FineTune/FineTune.vue
@@ -80,7 +80,7 @@
- 播放
+ 播放
播放
下载
下载
@@ -126,7 +126,7 @@
expPath: '',
wav: '',
wav_base64: '',
- ttsText: '',
+ ttsText: '欢迎使用飞桨语音套件',
cloneWav: '',
onEnrollRec: 0, // 录音状态
diff --git a/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue b/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue
deleted file mode 100644
index c7dd04e9d..000000000
--- a/demos/speech_web/web_client/src/components/SubMenu/IE/IE.vue
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
-
信息抽取体验
- {{ recoText }}
- 识别结果: {{ asrResultOffline }}
- 时间:{{ time }}
- 出发地:{{ outset }}
- 目的地:{{ destination }}
- 费用:{{ amount }}
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue b/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue
index 353221f7b..ef5591783 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/TTS/TTST.vue
@@ -228,6 +228,10 @@ export default {
},
// 基于WS的流式合成
async getTtsChunkWavWS(){
+ if(this.ws.readyState != this.ws.OPEN){
+ this.$message.error("websocket 链接失败,请检查 Websocket 后端服务是否正确开启")
+ return
+ }
// 初始化 chunks
chunks = []
chunk_index = 0
diff --git a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue
deleted file mode 100644
index 1fe71e4d8..000000000
--- a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPR.vue
+++ /dev/null
@@ -1,178 +0,0 @@
-
-
-
-
声纹识别展示
-
- {{ recoText }}
- 注册
- 识别
-
-
-
声纹得分结果
-
-
-
-
-
-
-
声纹数据列表
-
-
-
-
-
-
-
-
-
-
-
- Delete
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue
index e398da00c..47eb41df5 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/VPR/VPRT.vue
@@ -214,14 +214,17 @@ export default {
let formData = new FormData()
formData.append('spk_id', this.enrollSpkId)
formData.append('audio', this.wav)
-
+
const result = await vprEnroll(formData)
+ if (!result){
+ this.$message.error("请检查后端服务是否正确开启")
+ return
+ }
if(result.data.status){
this.$message.success("声纹注册成功")
} else {
this.$message.error(result.data.msg)
}
- // console.log(result)
this.GetList()
this.wav = ''
this.randomSpkId()
diff --git a/demos/speech_web/web_client/src/components/SubMenu/VoiceClone/VoiceClone.vue b/demos/speech_web/web_client/src/components/SubMenu/VoiceClone/VoiceClone.vue
index 1e380d288..afa572417 100644
--- a/demos/speech_web/web_client/src/components/SubMenu/VoiceClone/VoiceClone.vue
+++ b/demos/speech_web/web_client/src/components/SubMenu/VoiceClone/VoiceClone.vue
@@ -71,7 +71,7 @@
- 播放
+ 播放
播放
下载
下载
@@ -270,6 +270,7 @@ export default {
} else if (this.nowIndex >= this.vcDatas.length){
return this.$message.error("当前序号不可以超过音频个数")
}
+ this.cloneWav = ""
let func = ''
if(this.func_radio === '1'){
func = 'ge2e'
@@ -289,12 +290,12 @@ export default {
}
);
this.g2pOnSys = 0
- if(!result.data.code){
+ if(result.data.code == 0){
this.cloneWav = result.data.result
console.log("clone wav: ", this.cloneWav)
- this.$message.success("音色克隆成功")
+ this.$message.success("音频合成成功")
} else {
- this.$message.error(result.data.msg)
+ this.$message.error("音频合成失败,请检查后台错误后重试!")
}
},
// 播放表格
diff --git a/demos/streaming_asr_server/README.md b/demos/streaming_asr_server/README.md
index a97486757..5eef82866 100644
--- a/demos/streaming_asr_server/README.md
+++ b/demos/streaming_asr_server/README.md
@@ -14,7 +14,7 @@ Streaming ASR server only support `websocket` protocol, and doesn't support `htt
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-It is recommended to use **paddlepaddle 2.3.1** or above.
+It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
diff --git a/demos/streaming_asr_server/README_cn.md b/demos/streaming_asr_server/README_cn.md
index 267367729..1902a2fa9 100644
--- a/demos/streaming_asr_server/README_cn.md
+++ b/demos/streaming_asr_server/README_cn.md
@@ -14,7 +14,7 @@
### 1. 安装
安装 PaddleSpeech 的详细过程请看 [安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md)。
-推荐使用 **paddlepaddle 2.3.1** 或以上版本。
+推荐使用 **paddlepaddle 2.4rc** 或以上版本。
你可以从简单,中等,困难 几种方式中选择一种方式安装 PaddleSpeech。
diff --git a/demos/streaming_asr_server/conf/application.yaml b/demos/streaming_asr_server/conf/application.yaml
index a89d312ab..d446e13b6 100644
--- a/demos/streaming_asr_server/conf/application.yaml
+++ b/demos/streaming_asr_server/conf/application.yaml
@@ -21,7 +21,7 @@ engine_list: ['asr_online']
################################### ASR #########################################
################### speech task: asr; engine_type: online #######################
asr_online:
- model_type: 'conformer_online_wenetspeech'
+ model_type: 'conformer_u2pp_online_wenetspeech'
am_model: # the pdmodel file of am static model [optional]
am_params: # the pdiparams file of am static model [optional]
lang: 'zh'
diff --git a/demos/streaming_tts_server/README.md b/demos/streaming_tts_server/README.md
index 15448a46f..ca5d6f1f8 100644
--- a/demos/streaming_tts_server/README.md
+++ b/demos/streaming_tts_server/README.md
@@ -13,7 +13,7 @@ For service interface definition, please check:
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-It is recommended to use **paddlepaddle 2.3.1** or above.
+It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
diff --git a/demos/streaming_tts_server/README_cn.md b/demos/streaming_tts_server/README_cn.md
index b99155bca..125f37033 100644
--- a/demos/streaming_tts_server/README_cn.md
+++ b/demos/streaming_tts_server/README_cn.md
@@ -12,7 +12,7 @@
### 1. 安装
请看 [安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-推荐使用 **paddlepaddle 2.3.1** 或以上版本。
+推荐使用 **paddlepaddle 2.4rc** 或以上版本。
你可以从简单,中等,困难 几种方式中选择一种方式安装 PaddleSpeech。
diff --git a/demos/streaming_tts_serving_fastdeploy/README.md b/demos/streaming_tts_serving_fastdeploy/README.md
new file mode 100644
index 000000000..3e983a06d
--- /dev/null
+++ b/demos/streaming_tts_serving_fastdeploy/README.md
@@ -0,0 +1,67 @@
+([简体中文](./README_cn.md)|English)
+
+# Streaming Speech Synthesis Service
+
+## Introduction
+This demo is an implementation of starting the streaming speech synthesis service and accessing the service.
+
+`Server` must be started in the docker, while `Client` does not have to be in the docker.
+
+**The streaming_tts_serving under the path of this article ($PWD) contains the configuration and code of the model, which needs to be mapped to the docker for use.**
+
+## Usage
+### 1. Server
+#### 1.1 Docker
+
+```bash
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy_serving_cpu_only:22.09
+docker run -dit --net=host --name fastdeploy --shm-size="1g" -v $PWD:/models registry.baidubce.com/paddlepaddle/fastdeploy_serving_cpu_only:22.09
+docker exec -it -u root fastdeploy bash
+```
+
+#### 1.2 Installation(inside the docker)
+```bash
+apt-get install build-essential python3-dev libssl-dev libffi-dev libxml2 libxml2-dev libxslt1-dev zlib1g-dev libsndfile1 language-pack-zh-hans wget zip
+pip3 install paddlespeech
+export LC_ALL="zh_CN.UTF-8"
+export LANG="zh_CN.UTF-8"
+export LANGUAGE="zh_CN:zh:en_US:en"
+```
+
+#### 1.3 Download models(inside the docker)
+```bash
+cd /models/streaming_tts_serving/1
+wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip
+wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/mb_melgan/mb_melgan_csmsc_onnx_0.2.0.zip
+unzip fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip
+unzip mb_melgan_csmsc_onnx_0.2.0.zip
+```
+**For the convenience of users, we recommend that you use the command `docker -v` to map $PWD (streaming_tts_service and the configuration and code of the model contained therein) to the docker path `/models`. You can also use other methods, but regardless of which method you use, the final model directory and structure in the docker are shown in the following figure.**
+
+
+
+
+
+#### 1.4 Start the server(inside the docker)
+
+```bash
+fastdeployserver --model-repository=/models --model-control-mode=explicit --load-model=streaming_tts_serving
+```
+Arguments:
+ - `model-repository`(required): Path of model storage.
+ - `model-control-mode`(required): The mode of loading the model. At present, you can use 'explicit'.
+ - `load-model`(required): Name of the model to be loaded.
+ - `http-port`(optional): Port for http service. Default: `8000`. This is not used in our example.
+ - `grpc-port`(optional): Port for grpc service. Default: `8001`.
+ - `metrics-port`(optional): Port for metrics service. Default: `8002`. This is not used in our example.
+
+### 2. Client
+#### 2.1 Installation
+```bash
+pip3 install tritonclient[all]
+```
+
+#### 2.2 Send request
+```bash
+python3 /models/streaming_tts_serving/stream_client.py
+```
diff --git a/demos/streaming_tts_serving_fastdeploy/README_cn.md b/demos/streaming_tts_serving_fastdeploy/README_cn.md
new file mode 100644
index 000000000..7edd32830
--- /dev/null
+++ b/demos/streaming_tts_serving_fastdeploy/README_cn.md
@@ -0,0 +1,67 @@
+(简体中文|[English](./README.md))
+
+# 流式语音合成服务
+
+## 介绍
+
+本文介绍了使用FastDeploy搭建流式语音合成服务的方法。
+
+`服务端`必须在docker内启动,而`客户端`不是必须在docker容器内.
+
+**本文所在路径`($PWD)下的streaming_tts_serving里包含模型的配置和代码`(服务端会加载模型和代码以启动服务),需要将其映射到docker中使用。**
+
+## 使用
+### 1. 服务端
+#### 1.1 Docker
+```bash
+docker pull registry.baidubce.com/paddlepaddle/fastdeploy_serving_cpu_only:22.09
+docker run -dit --net=host --name fastdeploy --shm-size="1g" -v $PWD:/models registry.baidubce.com/paddlepaddle/fastdeploy_serving_cpu_only:22.09
+docker exec -it -u root fastdeploy bash
+```
+
+#### 1.2 安装(在docker内)
+```bash
+apt-get install build-essential python3-dev libssl-dev libffi-dev libxml2 libxml2-dev libxslt1-dev zlib1g-dev libsndfile1 language-pack-zh-hans wget zip
+pip3 install paddlespeech
+export LC_ALL="zh_CN.UTF-8"
+export LANG="zh_CN.UTF-8"
+export LANGUAGE="zh_CN:zh:en_US:en"
+```
+
+#### 1.3 下载模型(在docker内)
+```bash
+cd /models/streaming_tts_serving/1
+wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip
+wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/mb_melgan/mb_melgan_csmsc_onnx_0.2.0.zip
+unzip fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0.zip
+unzip mb_melgan_csmsc_onnx_0.2.0.zip
+```
+**为了方便用户使用,我们推荐用户使用1.1中的`docker -v`命令将`$PWD(streaming_tts_serving及里面包含的模型的配置和代码)映射到了docker内的/models路径`,用户也可以使用其他办法,但无论使用哪种方法,最终在docker内的模型目录及结构如下图所示。**
+
+
+
+
+
+#### 1.4 启动服务端(在docker内)
+```bash
+fastdeployserver --model-repository=/models --model-control-mode=explicit --load-model=streaming_tts_serving
+```
+
+参数:
+ - `model-repository`(required): 整套模型streaming_tts_serving存放的路径.
+ - `model-control-mode`(required): 模型加载的方式,现阶段, 使用'explicit'即可.
+ - `load-model`(required): 需要加载的模型的名称.
+ - `http-port`(optional): HTTP服务的端口号. 默认: `8000`. 本示例中未使用该端口.
+ - `grpc-port`(optional): GRPC服务的端口号. 默认: `8001`.
+ - `metrics-port`(optional): 服务端指标的端口号. 默认: `8002`. 本示例中未使用该端口.
+
+### 2. 客户端
+#### 2.1 安装
+```bash
+pip3 install tritonclient[all]
+```
+
+#### 2.2 发送请求
+```bash
+python3 /models/streaming_tts_serving/stream_client.py
+```
diff --git a/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/1/model.py b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/1/model.py
new file mode 100644
index 000000000..46473fdb2
--- /dev/null
+++ b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/1/model.py
@@ -0,0 +1,289 @@
+import codecs
+import json
+import math
+import sys
+import threading
+import time
+
+import numpy as np
+import onnxruntime as ort
+import triton_python_backend_utils as pb_utils
+
+from paddlespeech.server.utils.util import denorm
+from paddlespeech.server.utils.util import get_chunks
+from paddlespeech.t2s.frontend.zh_frontend import Frontend
+
+voc_block = 36
+voc_pad = 14
+am_block = 72
+am_pad = 12
+voc_upsample = 300
+
+# 模型路径
+dir_name = "/models/streaming_tts_serving/1/"
+phones_dict = dir_name + "fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0/phone_id_map.txt"
+am_stat_path = dir_name + "fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0/speech_stats.npy"
+
+onnx_am_encoder = dir_name + "fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0/fastspeech2_csmsc_am_encoder_infer.onnx"
+onnx_am_decoder = dir_name + "fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0/fastspeech2_csmsc_am_decoder.onnx"
+onnx_am_postnet = dir_name + "fastspeech2_cnndecoder_csmsc_streaming_onnx_1.0.0/fastspeech2_csmsc_am_postnet.onnx"
+onnx_voc_melgan = dir_name + "mb_melgan_csmsc_onnx_0.2.0/mb_melgan_csmsc.onnx"
+
+frontend = Frontend(phone_vocab_path=phones_dict, tone_vocab_path=None)
+am_mu, am_std = np.load(am_stat_path)
+
+# 用CPU推理
+providers = ['CPUExecutionProvider']
+
+# 配置ort session
+sess_options = ort.SessionOptions()
+
+# 创建session
+am_encoder_infer_sess = ort.InferenceSession(
+ onnx_am_encoder, providers=providers, sess_options=sess_options)
+am_decoder_sess = ort.InferenceSession(
+ onnx_am_decoder, providers=providers, sess_options=sess_options)
+am_postnet_sess = ort.InferenceSession(
+ onnx_am_postnet, providers=providers, sess_options=sess_options)
+voc_melgan_sess = ort.InferenceSession(
+ onnx_voc_melgan, providers=providers, sess_options=sess_options)
+
+
+def depadding(data, chunk_num, chunk_id, block, pad, upsample):
+ """
+ Streaming inference removes the result of pad inference
+ """
+ front_pad = min(chunk_id * block, pad)
+ # first chunk
+ if chunk_id == 0:
+ data = data[:block * upsample]
+ # last chunk
+ elif chunk_id == chunk_num - 1:
+ data = data[front_pad * upsample:]
+ # middle chunk
+ else:
+ data = data[front_pad * upsample:(front_pad + block) * upsample]
+
+ return data
+
+
+class TritonPythonModel:
+ """Your Python model must use the same class name. Every Python model
+ that is created must have "TritonPythonModel" as the class name.
+ """
+
+ def initialize(self, args):
+ """`initialize` is called only once when the model is being loaded.
+ Implementing `initialize` function is optional. This function allows
+ the model to intialize any state associated with this model.
+ Parameters
+ ----------
+ args : dict
+ Both keys and values are strings. The dictionary keys and values are:
+ * model_config: A JSON string containing the model configuration
+ * model_instance_kind: A string containing model instance kind
+ * model_instance_device_id: A string containing model instance device ID
+ * model_repository: Model repository path
+ * model_version: Model version
+ * model_name: Model name
+ """
+ sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
+ print(sys.getdefaultencoding())
+ # You must parse model_config. JSON string is not parsed here
+ self.model_config = model_config = json.loads(args['model_config'])
+ print("model_config:", self.model_config)
+
+ using_decoupled = pb_utils.using_decoupled_model_transaction_policy(
+ model_config)
+
+ if not using_decoupled:
+ raise pb_utils.TritonModelException(
+ """the model `{}` can generate any number of responses per request,
+ enable decoupled transaction policy in model configuration to
+ serve this model""".format(args['model_name']))
+
+ self.input_names = []
+ for input_config in self.model_config["input"]:
+ self.input_names.append(input_config["name"])
+ print("input:", self.input_names)
+
+ self.output_names = []
+ self.output_dtype = []
+ for output_config in self.model_config["output"]:
+ self.output_names.append(output_config["name"])
+ dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
+ self.output_dtype.append(dtype)
+ print("output:", self.output_names)
+
+ # To keep track of response threads so that we can delay
+ # the finalizing the model until all response threads
+ # have completed.
+ self.inflight_thread_count = 0
+ self.inflight_thread_count_lck = threading.Lock()
+
+ def execute(self, requests):
+ """`execute` must be implemented in every Python model. `execute`
+ function receives a list of pb_utils.InferenceRequest as the only
+ argument. This function is called when an inference is requested
+ for this model. Depending on the batching configuration (e.g. Dynamic
+ Batching) used, `requests` may contain multiple requests. Every
+ Python model, must create one pb_utils.InferenceResponse for every
+ pb_utils.InferenceRequest in `requests`. If there is an error, you can
+ set the error argument when creating a pb_utils.InferenceResponse.
+ Parameters
+ ----------
+ requests : list
+ A list of pb_utils.InferenceRequest
+ Returns
+ -------
+ list
+ A list of pb_utils.InferenceResponse. The length of this list must
+ be the same as `requests`
+ """
+
+ # This model does not support batching, so 'request_count' should always
+ # be 1.
+ if len(requests) != 1:
+ raise pb_utils.TritonModelException("unsupported batch size " + len(
+ requests))
+
+ input_data = []
+ for idx in range(len(self.input_names)):
+ data = pb_utils.get_input_tensor_by_name(requests[0],
+ self.input_names[idx])
+ data = data.as_numpy()
+ data = data[0].decode('utf-8')
+ input_data.append(data)
+ text = input_data[0]
+
+ # Start a separate thread to send the responses for the request. The
+ # sending back the responses is delegated to this thread.
+ thread = threading.Thread(
+ target=self.response_thread,
+ args=(requests[0].get_response_sender(), text))
+ thread.daemon = True
+ with self.inflight_thread_count_lck:
+ self.inflight_thread_count += 1
+
+ thread.start()
+ # Unlike in non-decoupled model transaction policy, execute function
+ # here returns no response. A return from this function only notifies
+ # Triton that the model instance is ready to receive another request. As
+ # we are not waiting for the response thread to complete here, it is
+ # possible that at any give time the model may be processing multiple
+ # requests. Depending upon the request workload, this may lead to a lot
+ # of requests being processed by a single model instance at a time. In
+ # real-world models, the developer should be mindful of when to return
+ # from execute and be willing to accept next request.
+ return None
+
+ def response_thread(self, response_sender, text):
+ input_ids = frontend.get_input_ids(
+ text, merge_sentences=False, get_tone_ids=False)
+ phone_ids = input_ids["phone_ids"]
+ for i in range(len(phone_ids)):
+ part_phone_ids = phone_ids[i].numpy()
+ voc_chunk_id = 0
+
+ orig_hs = am_encoder_infer_sess.run(
+ None, input_feed={'text': part_phone_ids})
+ orig_hs = orig_hs[0]
+
+ # streaming voc chunk info
+ mel_len = orig_hs.shape[1]
+ voc_chunk_num = math.ceil(mel_len / voc_block)
+ start = 0
+ end = min(voc_block + voc_pad, mel_len)
+
+ # streaming am
+ hss = get_chunks(orig_hs, am_block, am_pad, "am")
+ am_chunk_num = len(hss)
+ for i, hs in enumerate(hss):
+ am_decoder_output = am_decoder_sess.run(
+ None, input_feed={'xs': hs})
+ am_postnet_output = am_postnet_sess.run(
+ None,
+ input_feed={
+ 'xs': np.transpose(am_decoder_output[0], (0, 2, 1))
+ })
+ am_output_data = am_decoder_output + np.transpose(
+ am_postnet_output[0], (0, 2, 1))
+ normalized_mel = am_output_data[0][0]
+
+ sub_mel = denorm(normalized_mel, am_mu, am_std)
+ sub_mel = depadding(sub_mel, am_chunk_num, i, am_block, am_pad,
+ 1)
+
+ if i == 0:
+ mel_streaming = sub_mel
+ else:
+ mel_streaming = np.concatenate(
+ (mel_streaming, sub_mel), axis=0)
+
+ # streaming voc
+ # 当流式AM推理的mel帧数大于流式voc推理的chunk size,开始进行流式voc 推理
+ while (mel_streaming.shape[0] >= end and
+ voc_chunk_id < voc_chunk_num):
+ voc_chunk = mel_streaming[start:end, :]
+
+ sub_wav = voc_melgan_sess.run(
+ output_names=None, input_feed={'logmel': voc_chunk})
+ sub_wav = depadding(sub_wav[0], voc_chunk_num, voc_chunk_id,
+ voc_block, voc_pad, voc_upsample)
+
+ output_np = np.array(sub_wav, dtype=self.output_dtype[0])
+ out_tensor1 = pb_utils.Tensor(self.output_names[0],
+ output_np)
+
+ status = 0 if voc_chunk_id != (voc_chunk_num - 1) else 1
+ output_status = np.array(
+ [status], dtype=self.output_dtype[1])
+ out_tensor2 = pb_utils.Tensor(self.output_names[1],
+ output_status)
+
+ inference_response = pb_utils.InferenceResponse(
+ output_tensors=[out_tensor1, out_tensor2])
+
+ #yield sub_wav
+ response_sender.send(inference_response)
+
+ voc_chunk_id += 1
+ start = max(0, voc_chunk_id * voc_block - voc_pad)
+ end = min((voc_chunk_id + 1) * voc_block + voc_pad, mel_len)
+
+ # We must close the response sender to indicate to Triton that we are
+ # done sending responses for the corresponding request. We can't use the
+ # response sender after closing it. The response sender is closed by
+ # setting the TRITONSERVER_RESPONSE_COMPLETE_FINAL.
+ response_sender.send(
+ flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
+
+ with self.inflight_thread_count_lck:
+ self.inflight_thread_count -= 1
+
+ def finalize(self):
+ """`finalize` is called only once when the model is being unloaded.
+ Implementing `finalize` function is OPTIONAL. This function allows
+ the model to perform any necessary clean ups before exit.
+ Here we will wait for all response threads to complete sending
+ responses.
+ """
+ print('Finalize invoked')
+
+ inflight_threads = True
+ cycles = 0
+ logging_time_sec = 5
+ sleep_time_sec = 0.1
+ cycle_to_log = (logging_time_sec / sleep_time_sec)
+ while inflight_threads:
+ with self.inflight_thread_count_lck:
+ inflight_threads = (self.inflight_thread_count != 0)
+ if (cycles % cycle_to_log == 0):
+ print(
+ f"Waiting for {self.inflight_thread_count} response threads to complete..."
+ )
+ if inflight_threads:
+ time.sleep(sleep_time_sec)
+ cycles += 1
+
+ print('Finalize complete...')
diff --git a/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/config.pbtxt b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/config.pbtxt
new file mode 100644
index 000000000..e63721d1c
--- /dev/null
+++ b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/config.pbtxt
@@ -0,0 +1,33 @@
+name: "streaming_tts_serving"
+backend: "python"
+max_batch_size: 0
+model_transaction_policy {
+ decoupled: True
+}
+input [
+ {
+ name: "INPUT_0"
+ data_type: TYPE_STRING
+ dims: [ 1 ]
+ }
+]
+
+output [
+ {
+ name: "OUTPUT_0"
+ data_type: TYPE_FP32
+ dims: [ -1, 1 ]
+ },
+ {
+ name: "status"
+ data_type: TYPE_BOOL
+ dims: [ 1 ]
+ }
+]
+
+instance_group [
+ {
+ count: 1
+ kind: KIND_CPU
+ }
+]
diff --git a/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/stream_client.py b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/stream_client.py
new file mode 100644
index 000000000..e7f120b7d
--- /dev/null
+++ b/demos/streaming_tts_serving_fastdeploy/streaming_tts_serving/stream_client.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+import argparse
+import queue
+import sys
+from functools import partial
+
+import numpy as np
+import tritonclient.grpc as grpcclient
+from tritonclient.utils import *
+
+FLAGS = None
+
+
+class UserData:
+ def __init__(self):
+ self._completed_requests = queue.Queue()
+
+
+# Define the callback function. Note the last two parameters should be
+# result and error. InferenceServerClient would povide the results of an
+# inference as grpcclient.InferResult in result. For successful
+# inference, error will be None, otherwise it will be an object of
+# tritonclientutils.InferenceServerException holding the error details
+def callback(user_data, result, error):
+ if error:
+ user_data._completed_requests.put(error)
+ else:
+ user_data._completed_requests.put(result)
+
+
+def async_stream_send(triton_client, values, request_id, model_name):
+
+ infer_inputs = []
+ outputs = []
+ for idx, data in enumerate(values):
+ data = np.array([data.encode('utf-8')], dtype=np.object_)
+ infer_input = grpcclient.InferInput('INPUT_0', [len(data)], "BYTES")
+ infer_input.set_data_from_numpy(data)
+ infer_inputs.append(infer_input)
+
+ outputs.append(grpcclient.InferRequestedOutput('OUTPUT_0'))
+ # Issue the asynchronous sequence inference.
+ triton_client.async_stream_infer(
+ model_name=model_name,
+ inputs=infer_inputs,
+ outputs=outputs,
+ request_id=request_id)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '-v',
+ '--verbose',
+ action="store_true",
+ required=False,
+ default=False,
+ help='Enable verbose output')
+ parser.add_argument(
+ '-u',
+ '--url',
+ type=str,
+ required=False,
+ default='localhost:8001',
+ help='Inference server URL and it gRPC port. Default is localhost:8001.')
+
+ FLAGS = parser.parse_args()
+
+ # We use custom "sequence" models which take 1 input
+ # value. The output is the accumulated value of the inputs. See
+ # src/custom/sequence.
+ model_name = "streaming_tts_serving"
+
+ values = ["哈哈哈哈"]
+
+ request_id = "0"
+
+ string_result0_list = []
+
+ user_data = UserData()
+
+ # It is advisable to use client object within with..as clause
+ # when sending streaming requests. This ensures the client
+ # is closed when the block inside with exits.
+ with grpcclient.InferenceServerClient(
+ url=FLAGS.url, verbose=FLAGS.verbose) as triton_client:
+ try:
+ # Establish stream
+ triton_client.start_stream(callback=partial(callback, user_data))
+ # Now send the inference sequences...
+ async_stream_send(triton_client, values, request_id, model_name)
+ except InferenceServerException as error:
+ print(error)
+ sys.exit(1)
+
+ # Retrieve results...
+ recv_count = 0
+ result_dict = {}
+ status = True
+ while True:
+ data_item = user_data._completed_requests.get()
+ if type(data_item) == InferenceServerException:
+ raise data_item
+ else:
+ this_id = data_item.get_response().id
+ if this_id not in result_dict.keys():
+ result_dict[this_id] = []
+ result_dict[this_id].append((recv_count, data_item))
+ sub_wav = data_item.as_numpy('OUTPUT_0')
+ status = data_item.as_numpy('status')
+ print('sub_wav = ', sub_wav, "subwav.shape = ", sub_wav.shape)
+ print('status = ', status)
+ if status[0] == 1:
+ break
+ recv_count += 1
+
+ print("PASS: stream_client")
diff --git a/demos/streaming_tts_serving_fastdeploy/tree.png b/demos/streaming_tts_serving_fastdeploy/tree.png
new file mode 100644
index 000000000..b8d61686a
Binary files /dev/null and b/demos/streaming_tts_serving_fastdeploy/tree.png differ
diff --git a/docker/ubuntu16-gpu/Dockerfile b/docker/ubuntu16-gpu/Dockerfile
index f275471ee..a8c11e37b 100644
--- a/docker/ubuntu16-gpu/Dockerfile
+++ b/docker/ubuntu16-gpu/Dockerfile
@@ -62,7 +62,7 @@ RUN mkdir -p ~/.pip && echo "[global]" > ~/.pip/pip.conf && \
echo "index-url=https://mirror.baidu.com/pypi/simple" >> ~/.pip/pip.conf && \
echo "trusted-host=mirror.baidu.com" >> ~/.pip/pip.conf && \
python3 -m pip install --upgrade pip && \
- pip install paddlepaddle-gpu==2.3.1.post112 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html && \
+ pip install paddlepaddle-gpu==2.4.0rc0.post112 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html && \
rm -rf ~/.cache/pip
RUN git clone https://github.com/PaddlePaddle/PaddleSpeech.git && cd PaddleSpeech && \
diff --git a/docs/source/install.md b/docs/source/install.md
index 6a9ff3bc8..1e6c1c48b 100644
--- a/docs/source/install.md
+++ b/docs/source/install.md
@@ -61,6 +61,13 @@ Then you can use the following commands:
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
pip install paddlespeech -i https://pypi.tuna.tsinghua.edu.cn/simple
```
+You can also specify the version of paddlepaddle or install the develop version.
+```bash
+# install 2.3.1 version. Note, 2.3.1 is just an example, please follow the minimum dependency of paddlepaddle for your selection
+pip install paddlepaddle==2.3.1 -i https://mirror.baidu.com/pypi/simple
+# install develop version
+pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html
+```
> If you encounter problem with downloading **nltk_data** while using paddlespeech, it maybe due to your poor network, we suggest you download the [nltk_data](https://paddlespeech.bj.bcebos.com/Parakeet/tools/nltk_data.tar.gz) provided by us, and extract it to your `${HOME}`.
> If you fail to install paddlespeech-ctcdecoders, you only can not use deepspeech2 model inference. For other models, it doesn't matter.
@@ -117,9 +124,14 @@ conda install -y -c gcc_linux-64=8.4.0 gxx_linux-64=8.4.0
```
(Hip: Do not use the last script if you want to install by **Hard** way):
### Install PaddlePaddle
-You can choose the `PaddlePaddle` version based on your system. For example, for CUDA 10.2, CuDNN7.5 install paddlepaddle-gpu 2.3.1:
+You can choose the `PaddlePaddle` version based on your system. For example, for CUDA 10.2, CuDNN7.6 install paddlepaddle-gpu 2.4rc:
+```bash
+# Note, 2.4rc is just an example, please follow the minimum dependency of paddlepaddle for your selection
+python3 -m pip install paddlepaddle-gpu==2.4.0rc0 -i https://mirror.baidu.com/pypi/simple
+```
+You can also install the develop version of paddlepaddle. For example, for CUDA 10.2, CuDNN7.6 install paddlepaddle-gpu develop:
```bash
-python3 -m pip install paddlepaddle-gpu==2.3.1 -i https://mirror.baidu.com/pypi/simple
+python3 -m pip install paddlepaddle-gpu==0.0.0.post102 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html
```
### Install PaddleSpeech
You can install `paddlespeech` by the following command,then you can use the `ready-made` examples in `paddlespeech` :
@@ -180,9 +192,14 @@ Some users may fail to install `kaldiio` due to the default download source, you
```bash
pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple
```
-Make sure you have GPU and the paddlepaddle version is right. For example, for CUDA 10.2, CuDNN7.5 install paddle 2.3.1:
+Make sure you have GPU and the paddlepaddle version is right. For example, for CUDA 10.2, CuDNN7.6 install paddle 2.4rc:
+```bash
+# Note, 2.4rc is just an example, please follow the minimum dependency of paddlepaddle for your selection
+python3 -m pip install paddlepaddle-gpu==2.4.0rc0 -i https://mirror.baidu.com/pypi/simple
+```
+You can also install the develop version of paddlepaddle. For example, for CUDA 10.2, CuDNN7.6 install paddlepaddle-gpu develop:
```bash
-python3 -m pip install paddlepaddle-gpu==2.3.1 -i https://mirror.baidu.com/pypi/simple
+python3 -m pip install paddlepaddle-gpu==0.0.0.post102 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html
```
### Install PaddleSpeech in Developing Mode
```bash
diff --git a/docs/source/install_cn.md b/docs/source/install_cn.md
index 9f49ebad6..ebc0cf7a2 100644
--- a/docs/source/install_cn.md
+++ b/docs/source/install_cn.md
@@ -56,7 +56,14 @@ pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple
然后你可以使用如下命令:
```bash
pip install paddlepaddle -i https://mirror.baidu.com/pypi/simple
-pip install paddlespeech -i https://pypi.tuna.tsinghua.edu.cn/simple
+pip install paddlespeech -i https://pypi.tuna.tsinghua.edu.cn/simple
+```
+你也可以安装指定版本的paddlepaddle,或者安装 develop 版本。
+```bash
+# 安装2.3.1版本. 注意:2.3.1只是一个示例,请按照对paddlepaddle的最小依赖进行选择。
+pip install paddlepaddle==2.3.1 -i https://mirror.baidu.com/pypi/simple
+# 安装 develop 版本
+pip install paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html
```
> 如果您在使用 paddlespeech 的过程中遇到关于下载 **nltk_data** 的问题,可能是您的网络不佳,我们建议您下载我们提供的 [nltk_data](https://paddlespeech.bj.bcebos.com/Parakeet/tools/nltk_data.tar.gz) 并解压缩到您的 `${HOME}` 目录下。
@@ -111,9 +118,14 @@ conda install -y -c gcc_linux-64=8.4.0 gxx_linux-64=8.4.0
```
(提示: 如果你想使用**困难**方式完成安装,请不要使用最后一条命令)
### 安装 PaddlePaddle
-你可以根据系统配置选择 PaddlePaddle 版本,例如系统使用 CUDA 10.2, CuDNN7.5 ,你可以安装 paddlepaddle-gpu 2.3.1:
+你可以根据系统配置选择 PaddlePaddle 版本,例如系统使用 CUDA 10.2, CuDNN7.6,你可以安装 paddlepaddle-gpu 2.4rc:
+```bash
+# 注意:2.4rc 只是一个示例,请按照对paddlepaddle的最小依赖进行选择。
+python3 -m pip install paddlepaddle-gpu==2.4.0rc0 -i https://mirror.baidu.com/pypi/simple
+```
+你也可以安装 develop 版本的PaddlePaddle. 例如系统使用 CUDA 10.2, CuDNN7.6 ,你可以安装 paddlepaddle-gpu develop:
```bash
-python3 -m pip install paddlepaddle-gpu==2.3.1 -i https://mirror.baidu.com/pypi/simple
+python3 -m pip install paddlepaddle-gpu==0.0.0.post102 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html
```
### 安装 PaddleSpeech
最后安装 `paddlespeech`,这样你就可以使用 `paddlespeech` 中已有的 examples:
@@ -168,13 +180,18 @@ conda activate tools/venv
conda install -y -c conda-forge sox libsndfile swig bzip2 libflac bc
```
### 安装 PaddlePaddle
-请确认你系统是否有 GPU,并且使用了正确版本的 paddlepaddle。例如系统使用 CUDA 10.2, CuDNN7.5 ,你可以安装 paddlepaddle-gpu 2.3.1:
+请确认你系统是否有 GPU,并且使用了正确版本的 paddlepaddle。例如系统使用 CUDA 10.2, CuDNN7.6 ,你可以安装 paddlepaddle-gpu 2.4rc:
+```bash
+python3 -m pip install paddlepaddle-gpu==2.4.0rc0 -i https://mirror.baidu.com/pypi/simple
+```
+你也可以安装 develop 版本的PaddlePaddle. 例如系统使用 CUDA 10.2, CuDNN7.6 ,你可以安装 paddlepaddle-gpu develop:
```bash
-python3 -m pip install paddlepaddle-gpu==2.3.1 -i https://mirror.baidu.com/pypi/simple
+python3 -m pip install paddlepaddle-gpu==0.0.0.post102 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html
```
### 用开发者模式安装 PaddleSpeech
部分用户系统由于默认源的问题,安装中会出现 kaldiio 安转出错的问题,建议首先安装 pytest-runner:
```bash
+# 注意:2.4rc 只是一个示例,请按照对paddlepaddle的最小依赖进行选择。
pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple
```
然后安装 PaddleSpeech:
diff --git a/docs/source/reference.md b/docs/source/reference.md
index 0d36d96f7..9a47a2302 100644
--- a/docs/source/reference.md
+++ b/docs/source/reference.md
@@ -28,6 +28,8 @@ We borrowed a lot of code from these repos to build `model` and `engine`, thanks
* [speechbrain](https://github.com/speechbrain/speechbrain/blob/develop/LICENSE)
- Apache-2.0 License
- ECAPA-TDNN SV model
+- ASR with CTC and pre-trained wav2vec2 models.
+
* [chainer](https://github.com/chainer/chainer/blob/master/LICENSE)
- MIT License
@@ -43,3 +45,7 @@ We borrowed a lot of code from these repos to build `model` and `engine`, thanks
* [g2pW](https://github.com/GitYCC/g2pW/blob/master/LICENCE)
- Apache-2.0 license
+
+*[transformers](https://github.com/huggingface/transformers)
+- Apache-2.0 License
+- Wav2vec2.0
diff --git a/docs/source/released_model.md b/docs/source/released_model.md
index d6691812e..4e76da033 100644
--- a/docs/source/released_model.md
+++ b/docs/source/released_model.md
@@ -9,6 +9,7 @@ Acoustic Model | Training Data | Token-based | Size | Descriptions | CER | WER |
[Ds2 Online Aishell ASR0 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_fbank161_ckpt_0.2.1.model.tar.gz) | Aishell Dataset | Char-based | 491 MB | 2 Conv + 5 LSTM layers | 0.0666 |-| 151 h | [D2 Online Aishell ASR0](../../examples/aishell/asr0) | onnx/inference/python |
[Ds2 Offline Aishell ASR0 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_offline_aishell_ckpt_1.0.1.model.tar.gz)| Aishell Dataset | Char-based | 1.4 GB | 2 Conv + 5 bidirectional LSTM layers| 0.0554 |-| 151 h | [Ds2 Offline Aishell ASR0](../../examples/aishell/asr0) | inference/python |
[Conformer Online Wenetspeech ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_wenetspeech_ckpt_1.0.0a.model.tar.gz) | WenetSpeech Dataset | Char-based | 457 MB | Encoder:Conformer, Decoder:Transformer, Decoding method: Attention rescoring| 0.11 (test\_net) 0.1879 (test\_meeting) |-| 10000 h |- | python |
+[Conformer U2PP Online Wenetspeech ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.4.model.tar.gz) | WenetSpeech Dataset | Char-based | 476 MB | Encoder:Conformer, Decoder:BiTransformer, Decoding method: Attention rescoring| 0.047198 (aishell test\_-1) 0.059212 (aishell test\_16) |-| 10000 h |- | python |
[Conformer Online Aishell ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr1/asr1_chunk_conformer_aishell_ckpt_0.2.0.model.tar.gz) | Aishell Dataset | Char-based | 189 MB | Encoder:Conformer, Decoder:Transformer, Decoding method: Attention rescoring| 0.0544 |-| 151 h | [Conformer Online Aishell ASR1](../../examples/aishell/asr1) | python |
[Conformer Offline Aishell ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr1/asr1_conformer_aishell_ckpt_1.0.1.model.tar.gz) | Aishell Dataset | Char-based | 189 MB | Encoder:Conformer, Decoder:Transformer, Decoding method: Attention rescoring | 0.0460 |-| 151 h | [Conformer Offline Aishell ASR1](../../examples/aishell/asr1) | python |
[Transformer Aishell ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/aishell/asr1/asr1_transformer_aishell_ckpt_0.1.1.model.tar.gz) | Aishell Dataset | Char-based | 128 MB | Encoder:Transformer, Decoder:Transformer, Decoding method: Attention rescoring | 0.0523 || 151 h | [Transformer Aishell ASR1](../../examples/aishell/asr1) | python |
@@ -17,6 +18,12 @@ Acoustic Model | Training Data | Token-based | Size | Descriptions | CER | WER |
[Transformer Librispeech ASR1 Model](https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr1/asr1_transformer_librispeech_ckpt_0.1.1.model.tar.gz) | Librispeech Dataset | subword-based | 131 MB | Encoder:Transformer, Decoder:Transformer, Decoding method: Attention rescoring |-| 0.0381 | 960 h | [Transformer Librispeech ASR1](../../examples/librispeech/asr1) | python |
[Transformer Librispeech ASR2 Model](https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr2/asr2_transformer_librispeech_ckpt_0.1.1.model.tar.gz) | Librispeech Dataset | subword-based | 131 MB | Encoder:Transformer, Decoder:Transformer, Decoding method: JoinCTC w/ LM |-| 0.0240 | 960 h | [Transformer Librispeech ASR2](../../examples/librispeech/asr2) | python |
+### Self-Supervised Pre-trained Model
+Model | Pre-Train Method | Pre-Train Data | Finetune Data | Size | Descriptions | CER | WER | Example Link |
+:-------------:| :------------:| :-----: | -----: | :-----: |:-----:| :-----: | :-----: | :-----: |
+[Wav2vec2-large-960h-lv60-self Model](https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams) | wav2vec2 | Librispeech and LV-60k Dataset (5.3w h) | - | 1.18 GB |Pre-trained Wav2vec2.0 Model | - | - | - |
+[Wav2vec2ASR-large-960h-librispeech Model](https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz) | wav2vec2 | Librispeech and LV-60k Dataset (5.3w h) | Librispeech (960 h) | 1.18 GB |Encoder: Wav2vec2.0, Decoder: CTC, Decoding method: Greedy search | - | 0.0189 | [Wav2vecASR Librispeech ASR3](../../examples/librispeech/asr3) |
+
### Language Model based on NGram
Language Model | Training Data | Token-based | Size | Descriptions
:------------:| :------------:|:------------: | :------------: | :------------:
diff --git a/examples/aishell/asr0/local/train.sh b/examples/aishell/asr0/local/train.sh
index 256b30d22..2b71b7f76 100755
--- a/examples/aishell/asr0/local/train.sh
+++ b/examples/aishell/asr0/local/train.sh
@@ -26,6 +26,10 @@ if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
fi
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/aishell/asr1/local/train.sh b/examples/aishell/asr1/local/train.sh
index f514de303..bfa8dd97d 100755
--- a/examples/aishell/asr1/local/train.sh
+++ b/examples/aishell/asr1/local/train.sh
@@ -35,6 +35,10 @@ echo ${ips_config}
mkdir -p exp
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/iwslt2012/punc0/conf/ernie-3.0-base.yaml b/examples/iwslt2012/punc0/conf/ernie-3.0-base.yaml
new file mode 100644
index 000000000..845b13fd8
--- /dev/null
+++ b/examples/iwslt2012/punc0/conf/ernie-3.0-base.yaml
@@ -0,0 +1,44 @@
+###########################################################
+# DATA SETTING #
+###########################################################
+dataset_type: Ernie
+train_path: data/iwslt2012_zh/train.txt
+dev_path: data/iwslt2012_zh/dev.txt
+test_path: data/iwslt2012_zh/test.txt
+batch_size: 64
+num_workers: 2
+data_params:
+ pretrained_token: ernie-3.0-base-zh
+ punc_path: data/iwslt2012_zh/punc_vocab
+ seq_len: 100
+
+
+###########################################################
+# MODEL SETTING #
+###########################################################
+model_type: ErnieLinear
+model:
+ pretrained_token: ernie-3.0-base-zh
+ num_classes: 4
+
+###########################################################
+# OPTIMIZER SETTING #
+###########################################################
+optimizer_params:
+ weight_decay: 1.0e-6 # weight decay coefficient.
+
+scheduler_params:
+ learning_rate: 1.0e-5 # learning rate.
+ gamma: 0.9999 # scheduler gamma must between(0.0, 1.0) and closer to 1.0 is better.
+
+###########################################################
+# TRAINING SETTING #
+###########################################################
+max_epoch: 20
+num_snapshots: 5
+
+###########################################################
+# OTHER SETTING #
+###########################################################
+num_snapshots: 10 # max number of snapshots to keep while training
+seed: 42 # random seed for paddle, random, and np.random
diff --git a/examples/iwslt2012/punc0/conf/ernie-3.0-medium.yaml b/examples/iwslt2012/punc0/conf/ernie-3.0-medium.yaml
new file mode 100644
index 000000000..392ba011c
--- /dev/null
+++ b/examples/iwslt2012/punc0/conf/ernie-3.0-medium.yaml
@@ -0,0 +1,44 @@
+###########################################################
+# DATA SETTING #
+###########################################################
+dataset_type: Ernie
+train_path: data/iwslt2012_zh/train.txt
+dev_path: data/iwslt2012_zh/dev.txt
+test_path: data/iwslt2012_zh/test.txt
+batch_size: 64
+num_workers: 2
+data_params:
+ pretrained_token: ernie-3.0-medium-zh
+ punc_path: data/iwslt2012_zh/punc_vocab
+ seq_len: 100
+
+
+###########################################################
+# MODEL SETTING #
+###########################################################
+model_type: ErnieLinear
+model:
+ pretrained_token: ernie-3.0-medium-zh
+ num_classes: 4
+
+###########################################################
+# OPTIMIZER SETTING #
+###########################################################
+optimizer_params:
+ weight_decay: 1.0e-6 # weight decay coefficient.
+
+scheduler_params:
+ learning_rate: 1.0e-5 # learning rate.
+ gamma: 0.9999 # scheduler gamma must between(0.0, 1.0) and closer to 1.0 is better.
+
+###########################################################
+# TRAINING SETTING #
+###########################################################
+max_epoch: 20
+num_snapshots: 5
+
+###########################################################
+# OTHER SETTING #
+###########################################################
+num_snapshots: 10 # max number of snapshots to keep while training
+seed: 42 # random seed for paddle, random, and np.random
diff --git a/examples/iwslt2012/punc0/conf/ernie-3.0-mini.yaml b/examples/iwslt2012/punc0/conf/ernie-3.0-mini.yaml
new file mode 100644
index 000000000..c57fd94a8
--- /dev/null
+++ b/examples/iwslt2012/punc0/conf/ernie-3.0-mini.yaml
@@ -0,0 +1,44 @@
+###########################################################
+# DATA SETTING #
+###########################################################
+dataset_type: Ernie
+train_path: data/iwslt2012_zh/train.txt
+dev_path: data/iwslt2012_zh/dev.txt
+test_path: data/iwslt2012_zh/test.txt
+batch_size: 64
+num_workers: 2
+data_params:
+ pretrained_token: ernie-3.0-mini-zh
+ punc_path: data/iwslt2012_zh/punc_vocab
+ seq_len: 100
+
+
+###########################################################
+# MODEL SETTING #
+###########################################################
+model_type: ErnieLinear
+model:
+ pretrained_token: ernie-3.0-mini-zh
+ num_classes: 4
+
+###########################################################
+# OPTIMIZER SETTING #
+###########################################################
+optimizer_params:
+ weight_decay: 1.0e-6 # weight decay coefficient.
+
+scheduler_params:
+ learning_rate: 1.0e-5 # learning rate.
+ gamma: 0.9999 # scheduler gamma must between(0.0, 1.0) and closer to 1.0 is better.
+
+###########################################################
+# TRAINING SETTING #
+###########################################################
+max_epoch: 20
+num_snapshots: 5
+
+###########################################################
+# OTHER SETTING #
+###########################################################
+num_snapshots: 10 # max number of snapshots to keep while training
+seed: 42 # random seed for paddle, random, and np.random
diff --git a/examples/iwslt2012/punc0/conf/ernie-3.0-nano-zh.yaml b/examples/iwslt2012/punc0/conf/ernie-3.0-nano-zh.yaml
new file mode 100644
index 000000000..a7a84c4c1
--- /dev/null
+++ b/examples/iwslt2012/punc0/conf/ernie-3.0-nano-zh.yaml
@@ -0,0 +1,44 @@
+###########################################################
+# DATA SETTING #
+###########################################################
+dataset_type: Ernie
+train_path: data/iwslt2012_zh/train.txt
+dev_path: data/iwslt2012_zh/dev.txt
+test_path: data/iwslt2012_zh/test.txt
+batch_size: 64
+num_workers: 2
+data_params:
+ pretrained_token: ernie-3.0-nano-zh
+ punc_path: data/iwslt2012_zh/punc_vocab
+ seq_len: 100
+
+
+###########################################################
+# MODEL SETTING #
+###########################################################
+model_type: ErnieLinear
+model:
+ pretrained_token: ernie-3.0-nano-zh
+ num_classes: 4
+
+###########################################################
+# OPTIMIZER SETTING #
+###########################################################
+optimizer_params:
+ weight_decay: 1.0e-6 # weight decay coefficient.
+
+scheduler_params:
+ learning_rate: 1.0e-5 # learning rate.
+ gamma: 0.9999 # scheduler gamma must between(0.0, 1.0) and closer to 1.0 is better.
+
+###########################################################
+# TRAINING SETTING #
+###########################################################
+max_epoch: 20
+num_snapshots: 5
+
+###########################################################
+# OTHER SETTING #
+###########################################################
+num_snapshots: 10 # max number of snapshots to keep while training
+seed: 42 # random seed for paddle, random, and np.random
diff --git a/examples/iwslt2012/punc0/conf/ernie-tiny.yaml b/examples/iwslt2012/punc0/conf/ernie-tiny.yaml
new file mode 100644
index 000000000..6a5b7fee2
--- /dev/null
+++ b/examples/iwslt2012/punc0/conf/ernie-tiny.yaml
@@ -0,0 +1,44 @@
+###########################################################
+# DATA SETTING #
+###########################################################
+dataset_type: Ernie
+train_path: data/iwslt2012_zh/train.txt
+dev_path: data/iwslt2012_zh/dev.txt
+test_path: data/iwslt2012_zh/test.txt
+batch_size: 64
+num_workers: 2
+data_params:
+ pretrained_token: ernie-tiny
+ punc_path: data/iwslt2012_zh/punc_vocab
+ seq_len: 100
+
+
+###########################################################
+# MODEL SETTING #
+###########################################################
+model_type: ErnieLinear
+model:
+ pretrained_token: ernie-tiny
+ num_classes: 4
+
+###########################################################
+# OPTIMIZER SETTING #
+###########################################################
+optimizer_params:
+ weight_decay: 1.0e-6 # weight decay coefficient.
+
+scheduler_params:
+ learning_rate: 1.0e-5 # learning rate.
+ gamma: 0.9999 # scheduler gamma must between(0.0, 1.0) and closer to 1.0 is better.
+
+###########################################################
+# TRAINING SETTING #
+###########################################################
+max_epoch: 20
+num_snapshots: 5
+
+###########################################################
+# OTHER SETTING #
+###########################################################
+num_snapshots: 10 # max number of snapshots to keep while training
+seed: 42 # random seed for paddle, random, and np.random
diff --git a/examples/librispeech/README.md b/examples/librispeech/README.md
index 74441fd09..9fcbde97a 100644
--- a/examples/librispeech/README.md
+++ b/examples/librispeech/README.md
@@ -3,7 +3,7 @@
* asr0 - deepspeech2 Streaming/Non-Streaming
* asr1 - transformer/conformer Streaming/Non-Streaming
* asr2 - transformer/conformer Streaming/Non-Streaming with Kaldi feature
-
+* asr3 - wav2vecASR, ASR model with pre-trained wav2vec2 and CTC
## Data
| Data Subset | Duration in Seconds |
diff --git a/examples/librispeech/asr0/local/train.sh b/examples/librispeech/asr0/local/train.sh
index 71659e28d..bb41fd554 100755
--- a/examples/librispeech/asr0/local/train.sh
+++ b/examples/librispeech/asr0/local/train.sh
@@ -26,6 +26,10 @@ if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
fi
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/librispeech/asr1/local/train.sh b/examples/librispeech/asr1/local/train.sh
index f729ed22c..e274b9133 100755
--- a/examples/librispeech/asr1/local/train.sh
+++ b/examples/librispeech/asr1/local/train.sh
@@ -29,6 +29,10 @@ fi
# export FLAGS_cudnn_exhaustive_search=true
# export FLAGS_conv_workspace_size_limit=4000
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/librispeech/asr2/local/train.sh b/examples/librispeech/asr2/local/train.sh
index 1f414ad41..c2f2d4b65 100755
--- a/examples/librispeech/asr2/local/train.sh
+++ b/examples/librispeech/asr2/local/train.sh
@@ -26,6 +26,10 @@ if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
fi
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/librispeech/asr3/README.md b/examples/librispeech/asr3/README.md
new file mode 100644
index 000000000..f99beb338
--- /dev/null
+++ b/examples/librispeech/asr3/README.md
@@ -0,0 +1,197 @@
+# Wav2vec2ASR with Librispeech
+This example contains code used to finetune [wav2vec2.0](https://https://arxiv.org/pdf/2006.11477.pdf) model with [Librispeech dataset](http://www.openslr.org/resources/12)
+## Overview
+All the scripts you need are in `run.sh`. There are several stages in `run.sh`, and each stage has its function.
+| Stage | Function |
+|:---- |:----------------------------------------------------------- |
+| 0 | Process data. It includes: (1) Download the dataset (2) Calculate the CMVN of the train dataset (3) Get the vocabulary file (4) Get the manifest files of the train, development and test dataset (5) Download the pretrained wav2vec2 model |
+| 1 | Train the model |
+| 2 | Get the final model by averaging the top-k models, set k = 1 means to choose the best model |
+| 3 | Test the final model performance |
+| 4 | Infer the single audio file |
+
+
+You can choose to run a range of stages by setting `stage` and `stop_stage `.
+
+For example, if you want to execute the code in stage 2 and stage 3, you can run this script:
+```bash
+bash run.sh --stage 2 --stop_stage 3
+```
+Or you can set `stage` equal to `stop-stage` to only run one stage.
+For example, if you only want to run `stage 0`, you can use the script below:
+```bash
+bash run.sh --stage 0 --stop_stage 0
+```
+The document below will describe the scripts in `run.sh` in detail.
+## The Environment Variables
+The path.sh contains the environment variables.
+```bash
+. ./path.sh
+. ./cmd.sh
+```
+This script needs to be run first. And another script is also needed:
+```bash
+source ${MAIN_ROOT}/utils/parse_options.sh
+```
+It will support the way of using `--variable value` in the shell scripts.
+## The Local Variables
+Some local variables are set in `run.sh`.
+`gpus` denotes the GPU number you want to use. If you set `gpus=`, it means you only use CPU.
+`stage` denotes the number of stages you want to start from in the experiments.
+`stop stage` denotes the number of the stage you want to end at in the experiments.
+`conf_path` denotes the config path of the model.
+`avg_num` denotes the number K of top-K models you want to average to get the final model.
+`audio file` denotes the file path of the single file you want to infer in stage 5
+`ckpt` denotes the checkpoint prefix of the model, e.g. "wav2vec2ASR"
+
+You can set the local variables (except `ckpt`) when you use `run.sh`
+
+For example, you can set the `gpus` and `avg_num` when you use the command line:
+```bash
+bash run.sh --gpus 0,1 --avg_num 20
+```
+## Stage 0: Data Processing
+To use this example, you need to process data firstly and you can use stage 0 in `run.sh` to do this. The code is shown below:
+```bash
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # prepare data
+ bash ./local/data.sh || exit -1
+ fi
+```
+Stage 0 is for processing the data.
+
+If you only want to process the data. You can run
+```bash
+bash run.sh --stage 0 --stop_stage 0
+```
+You can also just run these scripts in your command line.
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+```
+After processing the data, the `data` directory will look like this:
+```bash
+data/
+|-- dev.meta
+|-- lang_char
+| `-- bpe_unigram_5000.model
+| `-- bpe_unigram_5000.vocab
+| `-- vocab.txt
+|-- manifest.dev
+|-- manifest.dev.raw
+|-- manifest.test
+|-- manifest.test.raw
+|-- manifest.train
+|-- manifest.train.raw
+|-- mean_std.json
+|-- test.meta
+`-- train.meta
+```
+
+Stage 0 also downloads the pre-trained [wav2vec2](https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams) model.
+```bash
+mkdir -p exp/wav2vec2
+wget -P exp/wav2vec2 https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams
+```
+## Stage 1: Model Training
+If you want to train the model. you can use stage 1 in `run.sh`. The code is shown below.
+```bash
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # train model, all `ckpt` under `exp` dir
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt}
+ fi
+```
+If you want to train the model, you can use the script below to execute stage 0 and stage 1:
+```bash
+bash run.sh --stage 0 --stop_stage 1
+```
+or you can run these scripts in the command line (only use CPU).
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+```
+## Stage 2: Top-k Models Averaging
+After training the model, we need to get the final model for testing and inference. In every epoch, the model checkpoint is saved, so we can choose the best model from them based on the validation loss or we can sort them and average the parameters of the top-k models to get the final model. We can use stage 2 to do this, and the code is shown below. Note: We only train one epoch for wav2vec2ASR, thus the `avg_num` is set to 1.
+```bash
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # avg n best model
+ avg.sh best exp/${ckpt}/checkpoints ${avg_num}
+ fi
+```
+The `avg.sh` is in the `../../../utils/` which is define in the `path.sh`.
+If you want to get the final model, you can use the script below to execute stage 0, stage 1, and stage 2:
+```bash
+bash run.sh --stage 0 --stop_stage 2
+```
+or you can run these scripts in the command line (only use CPU).
+
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+avg.sh best exp/wav2vec2ASR/checkpoints 1
+```
+## Stage 3: Model Testing
+The test stage is to evaluate the model performance. The code of test stage is shown below:
+```bash
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ # test ckpt avg_n
+ CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
+ fi
+```
+If you want to train a model and test it, you can use the script below to execute stage 0, stage 1, stage 2, and stage 3 :
+```bash
+bash run.sh --stage 0 --stop_stage 3
+```
+or you can run these scripts in the command line (only use CPU).
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+avg.sh best exp/wav2vec2ASR/checkpoints 1
+CUDA_VISIBLE_DEVICES= ./local/test.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1
+```
+## Pretrained Model
+You can get the pretrained wav2vec2ASR from [this](../../../docs/source/released_model.md).
+
+using the `tar` scripts to unpack the model and then you can use the script to test the model.
+
+For example:
+```bash
+wget https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+tar xzvf wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+source path.sh
+# If you have process the data and get the manifest file, you can skip the following 2 steps
+bash local/data.sh --stage -1 --stop_stage -1
+bash local/data.sh --stage 2 --stop_stage 2
+CUDA_VISIBLE_DEVICES= ./local/test.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1
+```
+The performance of the released models are shown in [here](./RESULTS.md).
+
+
+## Stage 4: Single Audio File Inference
+In some situations, you want to use the trained model to do the inference for the single audio file. You can use stage 5. The code is shown below
+```bash
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ # test a single .wav file
+ CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1
+ fi
+```
+you can train the model by yourself using ```bash run.sh --stage 0 --stop_stage 3```, or you can download the pretrained model through the script below:
+```bash
+wget https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+tar xzvf wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+```
+You can download the audio demo:
+```bash
+wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/
+```
+You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of the audio demo by running the script below.
+```bash
+CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1 data/demo_002_en.wav
+```
diff --git a/examples/librispeech/asr3/RESULTS.md b/examples/librispeech/asr3/RESULTS.md
new file mode 100644
index 000000000..1c5626d9e
--- /dev/null
+++ b/examples/librispeech/asr3/RESULTS.md
@@ -0,0 +1,8 @@
+# LibriSpeech
+
+## Wav2VecASR
+train: Epoch 1, 1*V100-32G, batchsize:10
+
+| Model | Params | Config | Augmentation| Test set | Decode method | WER |
+| --- | --- | --- | --- | --- | --- | --- |
+| wav2vec2ASR | 302.86 M | conf/wav2vec2ASR.yaml | spec_aug | test-clean | greedy search | 0.018887 |
diff --git a/examples/librispeech/asr3/cmd.sh b/examples/librispeech/asr3/cmd.sh
new file mode 100644
index 000000000..7b70ef5e0
--- /dev/null
+++ b/examples/librispeech/asr3/cmd.sh
@@ -0,0 +1,89 @@
+# ====== About run.pl, queue.pl, slurm.pl, and ssh.pl ======
+# Usage: .pl [options] JOB=1:
+# e.g.
+# run.pl --mem 4G JOB=1:10 echo.JOB.log echo JOB
+#
+# Options:
+# --time : Limit the maximum time to execute.
+# --mem : Limit the maximum memory usage.
+# -–max-jobs-run : Limit the number parallel jobs. This is ignored for non-array jobs.
+# --num-threads : Specify the number of CPU core.
+# --gpu : Specify the number of GPU devices.
+# --config: Change the configuration file from default.
+#
+# "JOB=1:10" is used for "array jobs" and it can control the number of parallel jobs.
+# The left string of "=", i.e. "JOB", is replaced by (Nth job) in the command and the log file name,
+# e.g. "echo JOB" is changed to "echo 3" for the 3rd job and "echo 8" for 8th job respectively.
+# Note that the number must start with a positive number, so you can't use "JOB=0:10" for example.
+#
+# run.pl, queue.pl, slurm.pl, and ssh.pl have unified interface, not depending on its backend.
+# These options are mapping to specific options for each backend and
+# it is configured by "conf/queue.conf" and "conf/slurm.conf" by default.
+# If jobs failed, your configuration might be wrong for your environment.
+#
+#
+# The official documentation for run.pl, queue.pl, slurm.pl, and ssh.pl:
+# "Parallelization in Kaldi": http://kaldi-asr.org/doc/queue.html
+# =========================================================~
+
+
+# Select the backend used by run.sh from "local", "sge", "slurm", or "ssh"
+cmd_backend='local'
+
+# Local machine, without any Job scheduling system
+if [ "${cmd_backend}" = local ]; then
+
+ # The other usage
+ export train_cmd="run.pl"
+ # Used for "*_train.py": "--gpu" is appended optionally by run.sh
+ export cuda_cmd="run.pl"
+ # Used for "*_recog.py"
+ export decode_cmd="run.pl"
+
+# "qsub" (SGE, Torque, PBS, etc.)
+elif [ "${cmd_backend}" = sge ]; then
+ # The default setting is written in conf/queue.conf.
+ # You must change "-q g.q" for the "queue" for your environment.
+ # To know the "queue" names, type "qhost -q"
+ # Note that to use "--gpu *", you have to setup "complex_value" for the system scheduler.
+
+ export train_cmd="queue.pl"
+ export cuda_cmd="queue.pl"
+ export decode_cmd="queue.pl"
+
+# "sbatch" (Slurm)
+elif [ "${cmd_backend}" = slurm ]; then
+ # The default setting is written in conf/slurm.conf.
+ # You must change "-p cpu" and "-p gpu" for the "partion" for your environment.
+ # To know the "partion" names, type "sinfo".
+ # You can use "--gpu * " by default for slurm and it is interpreted as "--gres gpu:*"
+ # The devices are allocated exclusively using "${CUDA_VISIBLE_DEVICES}".
+
+ export train_cmd="slurm.pl"
+ export cuda_cmd="slurm.pl"
+ export decode_cmd="slurm.pl"
+
+elif [ "${cmd_backend}" = ssh ]; then
+ # You have to create ".queue/machines" to specify the host to execute jobs.
+ # e.g. .queue/machines
+ # host1
+ # host2
+ # host3
+ # Assuming you can login them without any password, i.e. You have to set ssh keys.
+
+ export train_cmd="ssh.pl"
+ export cuda_cmd="ssh.pl"
+ export decode_cmd="ssh.pl"
+
+# This is an example of specifying several unique options in the JHU CLSP cluster setup.
+# Users can modify/add their own command options according to their cluster environments.
+elif [ "${cmd_backend}" = jhu ]; then
+
+ export train_cmd="queue.pl --mem 2G"
+ export cuda_cmd="queue-freegpu.pl --mem 2G --gpu 1 --config conf/gpu.conf"
+ export decode_cmd="queue.pl --mem 4G"
+
+else
+ echo "$0: Error: Unknown cmd_backend=${cmd_backend}" 1>&2
+ return 1
+fi
diff --git a/examples/librispeech/asr3/conf/preprocess.yaml b/examples/librispeech/asr3/conf/preprocess.yaml
new file mode 100644
index 000000000..4a908a83b
--- /dev/null
+++ b/examples/librispeech/asr3/conf/preprocess.yaml
@@ -0,0 +1,4 @@
+process:
+ # use raw audio
+ - type: wav_process
+ dither: 0.0
diff --git a/examples/librispeech/asr3/conf/tuning/decode.yaml b/examples/librispeech/asr3/conf/tuning/decode.yaml
new file mode 100644
index 000000000..2ba393264
--- /dev/null
+++ b/examples/librispeech/asr3/conf/tuning/decode.yaml
@@ -0,0 +1,4 @@
+decode_batch_size: 1
+error_rate_type: wer
+decoding_method: ctc_greedy_search # 'ctc_greedy_search', 'ctc_prefix_beam_search'
+beam_size: 10
diff --git a/examples/librispeech/asr3/conf/wav2vec2ASR.yaml b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
new file mode 100644
index 000000000..b19881b70
--- /dev/null
+++ b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
@@ -0,0 +1,120 @@
+############################################
+# Network Architecture #
+############################################
+freeze_wav2vec2: True
+normalize_wav: True
+output_norm: True
+dnn_blocks: 2
+dnn_neurons: 1024
+blank_id: 0
+ctc_dropout_rate: 0.0
+wav2vec2_params_path: "exp/wav2vec2/wav2vec2-large-960h-lv60-self.pdparams"
+
+############################################
+# Wav2Vec2.0 #
+############################################
+vocab_size: 32
+hidden_size: 1024
+num_hidden_layers: 24
+num_attention_heads: 16
+intermediate_size: 4096
+hidden_act: "gelu"
+hidden_dropout: 0.1
+activation_dropout: 0.1
+attention_dropout: 0.1
+feat_proj_dropout: 0.1
+feat_quantizer_dropout: 0.0
+final_dropout: 0.1
+layerdrop: 0.1
+initializer_range: 0.02
+layer_norm_eps: 1e-5
+feat_extract_norm: "layer"
+feat_extract_activation: "gelu"
+conv_dim: [512, 512, 512, 512, 512, 512, 512]
+conv_stride: [5, 2, 2, 2, 2, 2, 2]
+conv_kernel: [10, 3, 3, 3, 3, 2, 2]
+conv_bias: True
+num_conv_pos_embeddings: 128
+num_conv_pos_embedding_groups: 16
+do_stable_layer_norm: True
+apply_spec_augment: False
+mask_time_prob: 0.05
+mask_time_length: 10
+mask_time_min_masks: 2
+mask_feature_prob: 0.0
+mask_feature_length: 10
+mask_feature_min_masks: 0
+num_codevectors_per_group: 320
+num_codevector_groups: 2
+contrastive_logits_temperature: 0.1
+num_negatives: 100
+codevector_dim: 256
+proj_codevector_dim: 256
+diversity_loss_weight: 0.1
+ctc_loss_reduction: "sum"
+ctc_zero_infinity: False
+use_weighted_layer_sum: False
+pad_token_id: 0
+bos_token_id: 1
+eos_token_id: 2
+add_adapter: False
+adapter_kernel_size: 3
+adapter_stride: 2
+num_adapter_layers: 3
+output_hidden_size: None
+
+###########################################
+# Data #
+###########################################
+train_manifest: data/manifest.train
+dev_manifest: data/manifest.dev
+test_manifest: data/manifest.test-clean
+
+
+###########################################
+# Dataloader #
+###########################################
+vocab_filepath: data/lang_char/vocab.txt
+unit_type: 'char'
+mean_std_filepath: ""
+preprocess_config: conf/preprocess.yaml
+sortagrad: -1 # Feed samples from shortest to longest ; -1: enabled for all epochs 0: disabled other: enabled for 'other' epochs
+batch_size: 10 # Different batch_size may cause large differences in results
+maxlen_in: 51200000000 # if input length > maxlen-in batchsize is automatically reduced
+maxlen_out: 1500000 # if output length > maxlen-out batchsize is automatically reduced
+minibatches: 0 # for debug
+batch_count: auto
+batch_bins: 0
+batch_frames_in: 0
+batch_frames_out: 0
+batch_frames_inout: 0
+num_workers: 0
+subsampling_factor: 1
+num_encs: 1
+dist_sampler: True
+shortest_first: True
+return_lens_rate: True
+
+
+###########################################
+# Training #
+###########################################
+n_epoch: 1
+accum_grad: 1
+global_grad_clip: 3.0
+model_optim: adadelta
+model_optim_conf:
+ lr: 0.9
+ epsilon: 1.0e-6
+ rho: 0.95
+scheduler: constantlr
+scheduler_conf:
+ warmup_steps: 25000
+ lr_decay: 1.0
+log_interval: 1
+checkpoint:
+ kbest_n: 50
+ latest_n: 5
+augment: True
+
+
diff --git a/examples/librispeech/asr3/local/data.sh b/examples/librispeech/asr3/local/data.sh
new file mode 100644
index 000000000..8495a4ab6
--- /dev/null
+++ b/examples/librispeech/asr3/local/data.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+stage=-1
+stop_stage=100
+
+unit_type=char
+dict_dir=data/lang_char
+
+source ${MAIN_ROOT}/utils/parse_options.sh
+
+mkdir -p data
+mkdir -p ${dict_dir}
+TARGET_DIR=${MAIN_ROOT}/dataset
+mkdir -p ${TARGET_DIR}
+
+if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
+ # download data, generate manifests
+ python3 ${TARGET_DIR}/librispeech/librispeech.py \
+ --manifest_prefix="data/manifest" \
+ --target_dir="${TARGET_DIR}/librispeech" \
+ --full_download="True"
+
+ if [ $? -ne 0 ]; then
+ echo "Prepare LibriSpeech failed. Terminated."
+ exit 1
+ fi
+
+ for set in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
+ mv data/manifest.${set} data/manifest.${set}.raw
+ done
+
+ rm -rf data/manifest.train.raw data/manifest.dev.raw data/manifest.test.raw
+ for set in train-clean-100 train-clean-360 train-other-500; do
+ cat data/manifest.${set}.raw >> data/manifest.train.raw
+ done
+
+ for set in dev-clean dev-other; do
+ cat data/manifest.${set}.raw >> data/manifest.dev.raw
+ done
+
+ for set in test-clean test-other; do
+ cat data/manifest.${set}.raw >> data/manifest.test.raw
+ done
+fi
+
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # compute mean and stddev for normalizer
+ num_workers=$(nproc)
+ python3 ${MAIN_ROOT}/utils/compute_mean_std.py \
+ --manifest_path="data/manifest.train.raw" \
+ --num_samples=2000 \
+ --spectrum_type="fbank" \
+ --feat_dim=161 \
+ --delta_delta=false \
+ --sample_rate=16000 \
+ --stride_ms=10 \
+ --window_ms=25 \
+ --use_dB_normalization=False \
+ --num_workers=${num_workers} \
+ --output_path="data/mean_std.json"
+
+ if [ $? -ne 0 ]; then
+ echo "Compute mean and stddev failed. Terminated."
+ exit 1
+ fi
+fi
+
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # build vocabulary
+ python3 ${MAIN_ROOT}/utils/build_vocab.py \
+ --unit_type ${unit_type} \
+ --count_threshold=0 \
+ --vocab_path="${dict_dir}/vocab.txt" \
+ --manifest_paths="data/manifest.train.raw"
+
+ if [ $? -ne 0 ]; then
+ echo "Build vocabulary failed. Terminated."
+ exit 1
+ fi
+fi
+
+if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # format manifest with tokenids, vocab size
+ for set in train dev test dev-clean dev-other test-clean test-other; do
+ {
+ python3 ${MAIN_ROOT}/utils/format_data.py \
+ --cmvn_path "data/mean_std.json" \
+ --unit_type ${unit_type} \
+ --vocab_path="${dict_dir}/vocab.txt" \
+ --manifest_path="data/manifest.${set}.raw" \
+ --output_path="data/manifest.${set}"
+
+ if [ $? -ne 0 ]; then
+ echo "Formt mnaifest.${set} failed. Terminated."
+ exit 1
+ fi
+ }&
+ done
+ wait
+fi
+
+echo "LibriSpeech Data preparation done."
+
+if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ mkdir -p exp/wav2vec2
+ echo "Pretrained wav2vec2 model download"
+ wget -P exp/wav2vec2 https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams
+fi
+
+exit 0
\ No newline at end of file
diff --git a/examples/librispeech/asr3/local/test.sh b/examples/librispeech/asr3/local/test.sh
new file mode 100644
index 000000000..ccc0d84de
--- /dev/null
+++ b/examples/librispeech/asr3/local/test.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+set -e
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+expdir=exp
+datadir=data
+
+train_set=train_960
+recog_set="test-clean test-other dev-clean dev-other"
+recog_set="test-clean"
+
+config_path=$1
+decode_config_path=$2
+ckpt_prefix=$3
+
+source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
+
+# download language model
+#bash local/download_lm_en.sh
+#if [ $? -ne 0 ]; then
+# exit 1
+#fi
+
+python3 utils/format_rsl.py \
+ --origin_ref data/manifest.test-clean.raw \
+ --trans_ref data/manifest.test-clean.text
+
+
+for type in ctc_greedy_search; do
+ echo "decoding ${type}"
+ batch_size=16
+ python3 -u ${BIN_DIR}/test.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${ckpt_prefix}.${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+ python3 utils/format_rsl.py \
+ --origin_hyp ${ckpt_prefix}.${type}.rsl \
+ --trans_hyp ${ckpt_prefix}.${type}.rsl.text
+
+ python3 utils/compute-wer.py --char=1 --v=1 \
+ data/manifest.test-clean.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error
+ echo "decoding ${type} done."
+done
+
+for type in ctc_prefix_beam_search; do
+ echo "decoding ${type}"
+ batch_size=1
+ python3 -u ${BIN_DIR}/test.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${ckpt_prefix}.${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+ python3 utils/format_rsl.py \
+ --origin_hyp ${ckpt_prefix}.${type}.rsl \
+ --trans_hyp ${ckpt_prefix}.${type}.rsl.text
+
+ python3 utils/compute-wer.py --char=1 --v=1 \
+ data/manifest.test-clean.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error
+ echo "decoding ${type} done."
+done
+
+echo "Finished"
+
+exit 0
diff --git a/examples/librispeech/asr3/local/test_wav.sh b/examples/librispeech/asr3/local/test_wav.sh
new file mode 100644
index 000000000..fdf3589f4
--- /dev/null
+++ b/examples/librispeech/asr3/local/test_wav.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+if [ $# != 4 ];then
+ echo "usage: ${0} config_path decode_config_path ckpt_path_prefix audio_file"
+ exit -1
+fi
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+config_path=$1
+decode_config_path=$2
+ckpt_prefix=$3
+audio_file=$4
+
+mkdir -p data
+wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+if [ ! -f ${audio_file} ]; then
+ echo "Plase input the right audio_file path"
+ exit 1
+fi
+
+chunk_mode=false
+if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then
+ chunk_mode=true
+fi
+
+# download language model
+#bash local/download_lm_ch.sh
+#if [ $? -ne 0 ]; then
+# exit 1
+#fi
+
+for type in ctc_greedy_search; do
+ echo "decoding ${type}"
+ batch_size=1
+ output_dir=${ckpt_prefix}
+ mkdir -p ${output_dir}
+ python3 -u ${BIN_DIR}/test_wav.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${output_dir}/${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size} \
+ --audio_file ${audio_file}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+done
+exit 0
diff --git a/examples/librispeech/asr3/local/train.sh b/examples/librispeech/asr3/local/train.sh
new file mode 100644
index 000000000..6913ed17e
--- /dev/null
+++ b/examples/librispeech/asr3/local/train.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+if [ $# -lt 2 ] && [ $# -gt 3 ];then
+ echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name ips(optional)"
+ exit -1
+fi
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+config_path=$1
+ckpt_name=$2
+ips=$3
+
+if [ ! $ips ];then
+ ips_config=
+else
+ ips_config="--ips="${ips}
+fi
+
+mkdir -p exp
+
+# seed may break model convergence
+seed=1998
+if [ ${seed} != 0 ]; then
+ export FLAGS_cudnn_deterministic=True
+fi
+
+# export FLAGS_cudnn_exhaustive_search=true
+# export FLAGS_conv_workspace_size_limit=4000
+export FLAGS_allocator_strategy=naive_best_fit
+if [ ${ngpu} == 0 ]; then
+python3 -u ${BIN_DIR}/train.py \
+--ngpu ${ngpu} \
+--config ${config_path} \
+--output exp/${ckpt_name} \
+--seed ${seed}
+else
+python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${ips_config} ${BIN_DIR}/train.py \
+--ngpu ${ngpu} \
+--config ${config_path} \
+--output exp/${ckpt_name} \
+--seed ${seed}
+fi
+
+if [ ${seed} != 0 ]; then
+ unset FLAGS_cudnn_deterministic
+fi
+
+if [ $? -ne 0 ]; then
+ echo "Failed in training!"
+ exit 1
+fi
+
+exit 0
diff --git a/examples/librispeech/asr3/path.sh b/examples/librispeech/asr3/path.sh
new file mode 100644
index 000000000..f47178382
--- /dev/null
+++ b/examples/librispeech/asr3/path.sh
@@ -0,0 +1,15 @@
+export MAIN_ROOT=`realpath ${PWD}/../../../`
+
+export PATH=${MAIN_ROOT}:${MAIN_ROOT}/tools/sctk/bin:${PWD}/utils:${PATH}
+export LC_ALL=C
+
+export PYTHONDONTWRITEBYTECODE=1
+# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
+export PYTHONIOENCODING=UTF-8
+export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
+
+export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
+
+
+MODEL=wav2vec2
+export BIN_DIR=${MAIN_ROOT}/paddlespeech/s2t/exps/${MODEL}/bin
diff --git a/examples/librispeech/asr3/run.sh b/examples/librispeech/asr3/run.sh
new file mode 100644
index 000000000..3b1abb11b
--- /dev/null
+++ b/examples/librispeech/asr3/run.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -e
+
+. ./path.sh || exit 1;
+. ./cmd.sh || exit 1;
+
+gpus=0
+stage=0
+stop_stage=0
+conf_path=conf/wav2vec2ASR.yaml
+ips= #xx.xx.xx.xx,xx.xx.xx.xx
+decode_conf_path=conf/tuning/decode.yaml
+avg_num=1
+dict_path=data/lang_char/vocab.txt
+
+. ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
+
+audio_file=data/demo_002_en.wav
+
+avg_ckpt=avg_${avg_num}
+ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}')
+echo "checkpoint name ${ckpt}"
+
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # prepare data
+ bash ./local/data.sh || exit -1
+fi
+
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # train model, all `ckpt` under `exp` dir
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} ${ips}
+fi
+
+if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # avg n best model
+ avg.sh best exp/${ckpt}/checkpoints ${avg_num}
+fi
+
+if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ # greedy search decoder
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
+fi
+
+if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ # test a single .wav file
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1
+fi
diff --git a/examples/librispeech/asr3/utils b/examples/librispeech/asr3/utils
new file mode 120000
index 000000000..973afe674
--- /dev/null
+++ b/examples/librispeech/asr3/utils
@@ -0,0 +1 @@
+../../../utils
\ No newline at end of file
diff --git a/examples/other/tts_finetune/tts3/README.md b/examples/other/tts_finetune/tts3/README.md
index ceb8e7970..fa691764c 100644
--- a/examples/other/tts_finetune/tts3/README.md
+++ b/examples/other/tts_finetune/tts3/README.md
@@ -7,7 +7,7 @@ For more information on training Fastspeech2 with AISHELL-3, You can refer [exam
## Prepare
### Download Pretrained model
Assume the path to the model is `./pretrained_models`.
-If you want to finetune Chinese data, you need to download Fastspeech2 pretrained model with AISHELL-3: [fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip) for finetuning. Download HiFiGAN pretrained model with aishell3: [hifigan_aishell3_ckpt_0.2.0](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) for synthesis.
+If you want to finetune Chinese pretrained model, you need to download Fastspeech2 pretrained model with AISHELL-3: [fastspeech2_aishell3_ckpt_1.1.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_aishell3_ckpt_1.1.0.zip) for finetuning. Download HiFiGAN pretrained model with aishell3: [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) for synthesis.
```bash
mkdir -p pretrained_models && cd pretrained_models
@@ -21,7 +21,7 @@ cd ../
```
-If you want to finetune English data, you need to download Fastspeech2 pretrained model with VCTK: [fastspeech2_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_ckpt_1.2.0.zip) for finetuning. Download HiFiGAN pretrained model with VCTK: [hifigan_vctk_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_vctk_ckpt_0.2.0.zip) for synthesis.
+If you want to finetune English pretrained model, you need to download Fastspeech2 pretrained model with VCTK: [fastspeech2_vctk_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/fastspeech2/fastspeech2_vctk_ckpt_1.2.0.zip) for finetuning. Download HiFiGAN pretrained model with VCTK: [hifigan_vctk_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_vctk_ckpt_0.2.0.zip) for synthesis.
```bash
mkdir -p pretrained_models && cd pretrained_models
@@ -34,6 +34,59 @@ unzip hifigan_vctk_ckpt_0.2.0.zip
cd ../
```
+If you want to finetune Chinese-English Mixed pretrained model, you need to download Fastspeech2 pretrained model with mix datasets: [fastspeech2_mix_ckpt_1.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_1.2.0.zip) for finetuning. Download HiFiGAN pretrained model with aishell3: [hifigan_aishell3_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip) for synthesis.
+
+```bash
+mkdir -p pretrained_models && cd pretrained_models
+# pretrained fastspeech2 model
+wget https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_1.2.0.zip
+unzip fastspeech2_mix_ckpt_1.2.0.zip
+# pretrained hifigan model
+wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_aishell3_ckpt_0.2.0.zip
+unzip hifigan_aishell3_ckpt_0.2.0.zip
+cd ../
+```
+
+### Prepare your data
+Assume the path to the dataset is `./input` which contains a speaker folder. Speaker folder contains audio files (*.wav) and label file (labels.txt). The format of the audio file is wav. The format of the label file is: utt_id|pronunciation.
+
+If you want to finetune Chinese pretrained model, you need to prepare Chinese data. Chinese label example:
+```
+000001|ka2 er2 pu3 pei2 wai4 sun1 wan2 hua2 ti1
+```
+
+Here is an example of the first 200 data of csmsc.
+
+```bash
+mkdir -p input && cd input
+wget https://paddlespeech.bj.bcebos.com/datasets/csmsc_mini.zip
+unzip csmsc_mini.zip
+cd ../
+```
+
+If you want to finetune English pretrained model, you need to prepare English data. English label example:
+```
+LJ001-0001|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition
+```
+
+Here is an example of the first 200 data of ljspeech.
+
+```bash
+mkdir -p input && cd input
+wget https://paddlespeech.bj.bcebos.com/datasets/ljspeech_mini.zip
+unzip ljspeech_mini.zip
+cd ../
+```
+
+If you want to finetune Chinese-English Mixed pretrained model, you need to prepare Chinese data or English data. Here is an example of the first 12 data of SSB0005 (the speaker of aishell3).
+
+```bash
+mkdir -p input && cd input
+wget https://paddlespeech.bj.bcebos.com/datasets/SSB0005_mini.zip
+unzip SSB0005_mini.zip
+cd ../
+```
+
### Download MFA tools and pretrained model
Assume the path to the MFA tool is `./tools`. Download [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz).
@@ -46,7 +99,7 @@ cp montreal-forced-aligner/lib/libpython3.6m.so.1.0 montreal-forced-aligner/lib/
mkdir -p aligner && cd aligner
```
-If you want to finetune Chinese data, you need to download pretrained MFA models with aishell3: [aishell3_model.zip](https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip) and unzip it.
+If you want to get mfa result of Chinese data, you need to download pretrained MFA models with aishell3: [aishell3_model.zip](https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/aishell3_model.zip) and unzip it.
```bash
# pretrained mfa model for Chinese data
@@ -56,30 +109,17 @@ wget https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/simple.lexicon
cd ../../
```
-If you want to finetune English data, you need to download pretrained MFA models with vctk: [vctk_model.zip](https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip) and unzip it.
+If you want to get mfa result of English data, you need to download pretrained MFA models with vctk: [vctk_model.zip](https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip) and unzip it.
```bash
-# pretrained mfa model for Chinese data
+# pretrained mfa model for English data
wget https://paddlespeech.bj.bcebos.com/MFA/ernie_sat/vctk_model.zip
unzip vctk_model.zip
wget https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/cmudict-0.7b
cd ../../
```
-### Prepare your data
-Assume the path to the dataset is `./input` which contains a speaker folder. Speaker folder contains audio files (*.wav) and label file (labels.txt). The format of the audio file is wav. The format of the label file is: utt_id|pronunciation.
-
-If you want to finetune Chinese data, Chinese label example: 000001|ka2 er2 pu3 pei2 wai4 sun1 wan2 hua2 ti1
-Here is an example of the first 200 data of csmsc.
-
-```bash
-mkdir -p input && cd input
-wget https://paddlespeech.bj.bcebos.com/datasets/csmsc_mini.zip
-unzip csmsc_mini.zip
-cd ../
-```
-
-When "Prepare" done. The structure of the current directory is listed below.
+When "Prepare" done. The structure of the current directory is similar to the following.
```text
├── input
│ ├── csmsc_mini
@@ -119,56 +159,6 @@ When "Prepare" done. The structure of the current directory is listed below.
```
-If you want to finetune English data, English label example: LJ001-0001|Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition
-Here is an example of the first 200 data of ljspeech.
-
-```bash
-mkdir -p input && cd input
-wget https://paddlespeech.bj.bcebos.com/datasets/ljspeech_mini.zip
-unzip ljspeech_mini.zip
-cd ../
-```
-
-When "Prepare" done. The structure of the current directory is listed below.
-```text
-├── input
-│ ├── ljspeech_mini
-│ │ ├── LJ001-0001.wav
-│ │ ├── LJ001-0002.wav
-│ │ ├── LJ001-0003.wav
-│ │ ├── ...
-│ │ ├── LJ002-0014.wav
-│ │ ├── labels.txt
-│ └── ljspeech_mini.zip
-├── pretrained_models
-│ ├── fastspeech2_vctk_ckpt_1.2.0
-│ │ ├── default.yaml
-│ │ ├── energy_stats.npy
-│ │ ├── phone_id_map.txt
-│ │ ├── pitch_stats.npy
-│ │ ├── snapshot_iter_66200.pdz
-│ │ ├── speaker_id_map.txt
-│ │ └── speech_stats.npy
-│ ├── fastspeech2_vctk_ckpt_1.2.0.zip
-│ ├── hifigan_vctk_ckpt_0.2.0
-│ │ ├── default.yaml
-│ │ ├── feats_stats.npy
-│ │ └── snapshot_iter_2500000.pdz
-│ └── hifigan_vctk_ckpt_0.2.0.zip
-└── tools
- ├── aligner
- │ ├── vctk_model
- │ ├── vctk_model.zip
- │ └── cmudict-0.7b
- ├── montreal-forced-aligner
- │ ├── bin
- │ ├── lib
- │ └── pretrained_models
- └── montreal-forced-aligner_linux.tar.gz
- ...
-
-```
-
### Set finetune.yaml
`conf/finetune.yaml` contains some configurations for fine-tuning. You can try various options to fine better result. The value of frozen_layers can be change according `conf/fastspeech2_layers.txt` which is the model layer of fastspeech2.
@@ -180,7 +170,7 @@ Arguments:
## Get Started
-For Chinese data finetune, execute `./run.sh`. For English data finetune, execute `./run_en.sh`.
+For finetuning Chinese pretrained model, execute `./run.sh`. For finetuning English pretrained model, execute `./run_en.sh`. For finetuning Chinese-English Mixed pretrained model, execute `./run_mix.sh`.
Run the command below to
1. **source path**.
2. finetune the model.
diff --git a/examples/other/tts_finetune/tts3/local/extract_feature.py b/examples/other/tts_finetune/tts3/local/extract_feature.py
index 3277db531..daa3dacc7 100644
--- a/examples/other/tts_finetune/tts3/local/extract_feature.py
+++ b/examples/other/tts_finetune/tts3/local/extract_feature.py
@@ -56,13 +56,15 @@ def get_stats(pretrained_model_dir: Path):
def get_map(duration_file: Union[str, Path],
dump_dir: Path,
- pretrained_model_dir: Path):
+ pretrained_model_dir: Path,
+ replace_spkid: int=0):
"""get phone map and speaker map, save on dump_dir
Args:
duration_file (str): durantions.txt
dump_dir (Path): dump dir
pretrained_model_dir (Path): pretrained model dir
+ replace_spkid (int): replace spk id
"""
# copy phone map file from pretrained model path
phones_dict = dump_dir / "phone_id_map.txt"
@@ -75,14 +77,24 @@ def get_map(duration_file: Union[str, Path],
speakers = sorted(list(speaker_set))
num = len(speakers)
speaker_dict = dump_dir / "speaker_id_map.txt"
- with open(speaker_dict, 'w') as f, open(pretrained_model_dir /
- "speaker_id_map.txt", 'r') as fr:
- for i, spk in enumerate(speakers):
- f.write(spk + ' ' + str(i) + '\n')
+ spk_dict = {}
+ # get raw spkid-spk dict
+ with open(pretrained_model_dir / "speaker_id_map.txt", 'r') as fr:
for line in fr.readlines():
- spk_id = line.strip().split(" ")[-1]
- if int(spk_id) >= num:
- f.write(line)
+ spk = line.strip().split(" ")[0]
+ spk_id = line.strip().split(" ")[1]
+ spk_dict[spk_id] = spk
+
+ # replace spk on spkid-spk dict
+ assert replace_spkid + num - 1 < len(
+ spk_dict), "Please set correct replace spk id."
+ for i, spk in enumerate(speakers):
+ spk_dict[str(replace_spkid + i)] = spk
+
+ # write a new spk map file
+ with open(speaker_dict, 'w') as f:
+ for spk_id in spk_dict.keys():
+ f.write(spk_dict[spk_id] + ' ' + spk_id + '\n')
vocab_phones = {}
with open(phones_dict, 'rt') as f:
@@ -206,10 +218,11 @@ def extract_feature(duration_file: str,
config,
input_dir: Path,
dump_dir: Path,
- pretrained_model_dir: Path):
+ pretrained_model_dir: Path,
+ replace_spkid: int=0):
- sentences, vocab_phones, vocab_speaker = get_map(duration_file, dump_dir,
- pretrained_model_dir)
+ sentences, vocab_phones, vocab_speaker = get_map(
+ duration_file, dump_dir, pretrained_model_dir, replace_spkid)
mel_extractor, pitch_extractor, energy_extractor = get_extractor(config)
wav_files = sorted(list((input_dir).rglob("*.wav")))
@@ -315,6 +328,9 @@ if __name__ == '__main__':
default="./pretrained_models/fastspeech2_aishell3_ckpt_1.1.0",
help="Path to pretrained model")
+ parser.add_argument(
+ "--replace_spkid", type=int, default=0, help="replace spk id")
+
args = parser.parse_args()
input_dir = Path(args.input_dir).expanduser()
@@ -332,4 +348,5 @@ if __name__ == '__main__':
config=config,
input_dir=input_dir,
dump_dir=dump_dir,
- pretrained_model_dir=pretrained_model_dir)
+ pretrained_model_dir=pretrained_model_dir,
+ replace_spkid=args.replace_spkid)
diff --git a/examples/other/tts_finetune/tts3/local/finetune.py b/examples/other/tts_finetune/tts3/local/finetune.py
index 496c2355b..814497aaa 100644
--- a/examples/other/tts_finetune/tts3/local/finetune.py
+++ b/examples/other/tts_finetune/tts3/local/finetune.py
@@ -131,10 +131,10 @@ def train_sp(args, config):
converters=converters, )
# collate function and dataloader
-
+ train_batch_size = min(len(train_metadata), config.batch_size)
train_sampler = DistributedBatchSampler(
train_dataset,
- batch_size=config.batch_size,
+ batch_size=train_batch_size,
shuffle=True,
drop_last=True)
diff --git a/examples/other/tts_finetune/tts3/run.sh b/examples/other/tts_finetune/tts3/run.sh
index 1faa2b46d..ed1705f8c 100755
--- a/examples/other/tts_finetune/tts3/run.sh
+++ b/examples/other/tts_finetune/tts3/run.sh
@@ -15,6 +15,7 @@ output_dir=./exp/default
lang=zh
ngpu=1
finetune_config=./conf/finetune.yaml
+replace_spkid=0
ckpt=snapshot_iter_96699
@@ -62,7 +63,8 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
--duration_file="./durations.txt" \
--input_dir=${new_dir} \
--dump_dir=${dump_dir} \
- --pretrained_model_dir=${pretrained_model_dir}
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --replace_spkid=$replace_spkid
fi
# create finetune env
@@ -102,5 +104,5 @@ if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
--output_dir=./test_e2e/ \
--phones_dict=${dump_dir}/phone_id_map.txt \
--speaker_dict=${dump_dir}/speaker_id_map.txt \
- --spk_id=0
+ --spk_id=$replace_spkid
fi
diff --git a/examples/other/tts_finetune/tts3/run_en.sh b/examples/other/tts_finetune/tts3/run_en.sh
index e8551667e..765274e85 100755
--- a/examples/other/tts_finetune/tts3/run_en.sh
+++ b/examples/other/tts_finetune/tts3/run_en.sh
@@ -14,6 +14,7 @@ output_dir=./exp/default
lang=en
ngpu=1
finetune_config=./conf/finetune.yaml
+replace_spkid=0
ckpt=snapshot_iter_66300
@@ -61,7 +62,8 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
--duration_file="./durations.txt" \
--input_dir=${new_dir} \
--dump_dir=${dump_dir} \
- --pretrained_model_dir=${pretrained_model_dir}
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --replace_spkid=$replace_spkid
fi
# create finetune env
@@ -101,5 +103,5 @@ if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
--output_dir=./test_e2e/ \
--phones_dict=${dump_dir}/phone_id_map.txt \
--speaker_dict=${dump_dir}/speaker_id_map.txt \
- --spk_id=0
+ --spk_id=$replace_spkid
fi
diff --git a/examples/other/tts_finetune/tts3/run_mix.sh b/examples/other/tts_finetune/tts3/run_mix.sh
new file mode 100644
index 000000000..71008ef5b
--- /dev/null
+++ b/examples/other/tts_finetune/tts3/run_mix.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+set -e
+source path.sh
+
+
+input_dir=./input/SSB0005_mini
+newdir_name="newdir"
+new_dir=${input_dir}/${newdir_name}
+pretrained_model_dir=./pretrained_models/fastspeech2_mix_ckpt_1.2.0
+mfa_tools=./tools
+mfa_dir=./mfa_result
+dump_dir=./dump
+output_dir=./exp/default
+lang=zh
+ngpu=1
+finetune_config=./conf/finetune.yaml
+replace_spkid=174 # csmsc: 174, ljspeech: 175, aishell3: 0~173, vctk: 176
+
+ckpt=snapshot_iter_99300
+
+gpus=1
+CUDA_VISIBLE_DEVICES=${gpus}
+stage=0
+stop_stage=100
+
+
+# with the following command, you can choose the stage range you want to run
+# such as `./run.sh --stage 0 --stop-stage 0`
+# this can not be mixed use with `$1`, `$2` ...
+source ${MAIN_ROOT}/utils/parse_options.sh || exit 1
+
+# check oov
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ echo "check oov"
+ python3 local/check_oov.py \
+ --input_dir=${input_dir} \
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --newdir_name=${newdir_name} \
+ --lang=${lang}
+fi
+
+# get mfa result
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ echo "get mfa result"
+ python3 local/get_mfa_result.py \
+ --input_dir=${new_dir} \
+ --mfa_dir=${mfa_dir} \
+ --lang=${lang}
+fi
+
+# generate durations.txt
+if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ echo "generate durations.txt"
+ python3 local/generate_duration.py \
+ --mfa_dir=${mfa_dir}
+fi
+
+# extract feature
+if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ echo "extract feature"
+ python3 local/extract_feature.py \
+ --duration_file="./durations.txt" \
+ --input_dir=${new_dir} \
+ --dump_dir=${dump_dir} \
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --replace_spkid=$replace_spkid
+
+fi
+
+# create finetune env
+if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ echo "create finetune env"
+ python3 local/prepare_env.py \
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --output_dir=${output_dir}
+fi
+
+# finetune
+if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
+ echo "finetune..."
+ python3 local/finetune.py \
+ --pretrained_model_dir=${pretrained_model_dir} \
+ --dump_dir=${dump_dir} \
+ --output_dir=${output_dir} \
+ --ngpu=${ngpu} \
+ --epoch=100 \
+ --finetune_config=${finetune_config}
+fi
+
+# synthesize e2e
+if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
+ echo "in hifigan syn_e2e"
+ python3 ${BIN_DIR}/../synthesize_e2e.py \
+ --am=fastspeech2_aishell3 \
+ --am_config=${pretrained_model_dir}/default.yaml \
+ --am_ckpt=${output_dir}/checkpoints/${ckpt}.pdz \
+ --am_stat=${pretrained_model_dir}/speech_stats.npy \
+ --voc=hifigan_aishell3 \
+ --voc_config=pretrained_models/hifigan_aishell3_ckpt_0.2.0/default.yaml \
+ --voc_ckpt=pretrained_models/hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \
+ --voc_stat=pretrained_models/hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \
+ --lang=mix \
+ --text=${BIN_DIR}/../sentences_mix.txt \
+ --output_dir=./test_e2e/ \
+ --phones_dict=${dump_dir}/phone_id_map.txt \
+ --speaker_dict=${dump_dir}/speaker_id_map.txt \
+ --spk_id=$replace_spkid
+fi
+
diff --git a/examples/timit/asr1/local/train.sh b/examples/timit/asr1/local/train.sh
index 661407582..1088c7ffa 100755
--- a/examples/timit/asr1/local/train.sh
+++ b/examples/timit/asr1/local/train.sh
@@ -19,6 +19,10 @@ if [ ${seed} != 0 ]; then
export FLAGS_cudnn_deterministic=True
fi
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/tiny/asr0/local/train.sh b/examples/tiny/asr0/local/train.sh
index 8b67902fe..e233a0c0a 100755
--- a/examples/tiny/asr0/local/train.sh
+++ b/examples/tiny/asr0/local/train.sh
@@ -32,6 +32,10 @@ fi
mkdir -p exp
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/tiny/asr1/local/train.sh b/examples/tiny/asr1/local/train.sh
index 459f2e218..fbfb41f6f 100755
--- a/examples/tiny/asr1/local/train.sh
+++ b/examples/tiny/asr1/local/train.sh
@@ -34,6 +34,10 @@ fi
mkdir -p exp
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/examples/wenetspeech/asr1/README.md b/examples/wenetspeech/asr1/README.md
index c08b94e29..5a516f8ea 100644
--- a/examples/wenetspeech/asr1/README.md
+++ b/examples/wenetspeech/asr1/README.md
@@ -12,3 +12,36 @@ show model.tar.gz
```
tar tf model.tar.gz
```
+
+other way is:
+
+```bash
+tar cvzf asr1_chunk_conformer_u2_wenetspeech_ckpt_1.1.0.model.tar.gz model.yaml conf/tuning/ conf/chunk_conformer.yaml conf/preprocess.yaml data/mean_std.json exp/chunk_conformer/checkpoints/
+```
+
+## Export Static Model
+
+>> Need Paddle >= 2.4
+
+>> `data/test_meeting/data.list`
+>> {"input": [{"name": "input1", "shape": [3.2230625, 80], "feat": "/home/PaddleSpeech/dataset/aishell/data_aishell/wav/test/S0764/BAC009S0764W0163.wav", "filetype": "sound"}], "output": [{"name": "target1", "shape": [9, 5538], "text": "\u697c\u5e02\u8c03\u63a7\u5c06\u53bb\u5411\u4f55\u65b9", "token": "\u697c \u5e02 \u8c03 \u63a7 \u5c06 \u53bb \u5411 \u4f55 \u65b9", "tokenid": "1891 1121 3502 1543 1018 477 528 163 1657"}], "utt": "BAC009S0764W0163", "utt2spk": "S0764"}
+
+>> Test Wav:
+>> wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav
+### U2 chunk conformer
+>> UiDecoder
+>> Make sure `reverse_weight` in config is `0.0`
+>> https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2_wenetspeech_ckpt_1.1.0.model.tar.gz
+```
+tar zxvf asr1_chunk_conformer_u2_wenetspeech_ckpt_1.1.0.model.tar.gz
+./local/export.sh conf/chunk_conformer.yaml exp/chunk_conformer/checkpoints/avg_10 ./export.ji
+```
+
+### U2++ chunk conformer
+>> BiDecoder
+>> https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.0.model.tar.gz
+>> Make sure `reverse_weight` in config is not `0.0`
+
+```
+./local/export.sh conf/chunk_conformer_u2pp.yaml exp/chunk_conformer/checkpoints/avg_10 ./export.ji
+```
diff --git a/examples/wenetspeech/asr1/conf/chunk_conformer.yaml b/examples/wenetspeech/asr1/conf/chunk_conformer.yaml
new file mode 100644
index 000000000..d2f43d873
--- /dev/null
+++ b/examples/wenetspeech/asr1/conf/chunk_conformer.yaml
@@ -0,0 +1,101 @@
+############################################
+# Network Architecture #
+############################################
+cmvn_file:
+cmvn_file_type: "json"
+# encoder related
+encoder: conformer
+encoder_conf:
+ output_size: 512 # dimension of attention
+ attention_heads: 8
+ linear_units: 2048 # the number of units of position-wise feed forward
+ num_blocks: 12 # the number of encoder blocks
+ dropout_rate: 0.1
+ positional_dropout_rate: 0.1
+ attention_dropout_rate: 0.0
+ input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
+ normalize_before: True
+ use_cnn_module: True
+ cnn_module_kernel: 15
+ activation_type: swish
+ pos_enc_layer_type: rel_pos
+ selfattention_layer_type: rel_selfattn
+ causal: true
+ use_dynamic_chunk: true
+ cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster
+ use_dynamic_left_chunk: false
+# decoder related
+decoder: transformer
+decoder_conf:
+ attention_heads: 8
+ linear_units: 2048
+ num_blocks: 6
+ dropout_rate: 0.1
+ positional_dropout_rate: 0.1
+ self_attention_dropout_rate: 0.0
+ src_attention_dropout_rate: 0.0
+
+# hybrid CTC/attention
+model_conf:
+ ctc_weight: 0.3
+ lsm_weight: 0.1 # label smoothing option
+ reverse_weight: 0.0 # unidecoder
+ length_normalized_loss: false
+ init_type: 'kaiming_uniform'
+
+# https://yaml.org/type/float.html
+###########################################
+# Data #
+###########################################
+train_manifest: data/train_l/data.list
+dev_manifest: data/dev/data.list
+test_manifest: data/test_meeting/data.list
+
+###########################################
+# Dataloader #
+###########################################
+use_streaming_data: True
+unit_type: 'char'
+vocab_filepath: data/lang_char/vocab.txt
+preprocess_config: conf/preprocess.yaml
+spm_model_prefix: ''
+feat_dim: 80
+stride_ms: 10.0
+window_ms: 25.0
+sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs
+batch_size: 32
+do_filter: True
+maxlen_in: 1200 # if do_filter == False && input length > maxlen-in, batchsize is automatically reduced
+maxlen_out: 100 # if do_filter == False && output length > maxlen-out, batchsize is automatically reduced
+minlen_in: 10
+minlen_out: 0
+minibatches: 0 # for debug
+batch_count: auto
+batch_bins: 0
+batch_frames_in: 0
+batch_frames_out: 0
+batch_frames_inout: 0
+num_workers: 0
+subsampling_factor: 1
+num_encs: 1
+
+
+###########################################
+# Training #
+###########################################
+n_epoch: 26
+accum_grad: 32
+global_grad_clip: 5.0
+dist_sampler: True
+log_interval: 1
+checkpoint:
+ kbest_n: 50
+ latest_n: 5
+optim: adam
+optim_conf:
+ lr: 0.001
+ weight_decay: 1.0e-6
+scheduler: warmuplr
+scheduler_conf:
+ warmup_steps: 5000
+ lr_decay: 1.0
diff --git a/examples/wenetspeech/asr1/conf/chunk_conformer_u2pp.yaml b/examples/wenetspeech/asr1/conf/chunk_conformer_u2pp.yaml
new file mode 100644
index 000000000..2bb2006b5
--- /dev/null
+++ b/examples/wenetspeech/asr1/conf/chunk_conformer_u2pp.yaml
@@ -0,0 +1,100 @@
+############################################
+# Network Architecture #
+############################################
+cmvn_file:
+cmvn_file_type: "json"
+# encoder related
+encoder: conformer
+encoder_conf:
+ output_size: 512 # dimension of attention
+ attention_heads: 8
+ linear_units: 2048 # the number of units of position-wise feed forward
+ num_blocks: 12 # the number of encoder blocks
+ dropout_rate: 0.1
+ positional_dropout_rate: 0.1
+ attention_dropout_rate: 0.1
+ input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8
+ normalize_before: True
+ use_cnn_module: True
+ cnn_module_kernel: 15
+ activation_type: swish
+ pos_enc_layer_type: rel_pos
+ selfattention_layer_type: rel_selfattn
+ causal: true
+ use_dynamic_chunk: true
+ cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster
+ use_dynamic_left_chunk: false
+# decoder related
+decoder: bitransformer
+decoder_conf:
+ attention_heads: 8
+ linear_units: 2048
+ num_blocks: 3 # the number of encoder blocks
+ r_num_blocks: 3 #only for bitransformer
+ dropout_rate: 0.1
+ positional_dropout_rate: 0.1
+ self_attention_dropout_rate: 0.1
+ src_attention_dropout_rate: 0.1
+
+# hybrid CTC/attention
+model_conf:
+ ctc_weight: 0.3
+ lsm_weight: 0.1 # label smoothing option
+ length_normalized_loss: false
+ reverse_weight: 0.3 # only for bitransformer decoder
+ init_type: 'kaiming_uniform' # !Warning: need to convergence
+
+###########################################
+# Data #
+###########################################
+train_manifest: data/train_l/data.list
+dev_manifest: data/dev/data.list
+test_manifest: data/test_meeting/data.list
+
+###########################################
+# Dataloader #
+###########################################
+use_stream_data: True
+vocab_filepath: data/lang_char/vocab.txt
+unit_type: 'char'
+preprocess_config: conf/preprocess.yaml
+spm_model_prefix: ''
+feat_dim: 80
+stride_ms: 10.0
+window_ms: 25.0
+sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs
+batch_size: 32
+do_filter: True
+maxlen_in: 1200 # if do_filter == False && input length > maxlen-in, batchsize is automatically reduced
+maxlen_out: 100 # if do_filter == False && output length > maxlen-out, batchsize is automatically reduced
+minlen_in: 10
+minlen_out: 0
+minibatches: 0 # for debug
+batch_count: auto
+batch_bins: 0
+batch_frames_in: 0
+batch_frames_out: 0
+batch_frames_inout: 0
+num_workers: 0
+subsampling_factor: 1
+num_encs: 1
+
+###########################################
+# Training #
+###########################################
+n_epoch: 150
+accum_grad: 8
+global_grad_clip: 5.0
+dist_sampler: False
+optim: adam
+optim_conf:
+ lr: 0.002
+ weight_decay: 1.0e-6
+scheduler: warmuplr
+scheduler_conf:
+ warmup_steps: 25000
+ lr_decay: 1.0
+log_interval: 100
+checkpoint:
+ kbest_n: 50
+ latest_n: 5
diff --git a/examples/wenetspeech/asr1/conf/preprocess.yaml b/examples/wenetspeech/asr1/conf/preprocess.yaml
index f7f4c58d5..c7ccc522d 100644
--- a/examples/wenetspeech/asr1/conf/preprocess.yaml
+++ b/examples/wenetspeech/asr1/conf/preprocess.yaml
@@ -5,7 +5,7 @@ process:
n_mels: 80
n_shift: 160
win_length: 400
- dither: 0.1
+ dither: 1.0
- type: cmvn_json
cmvn_path: data/mean_std.json
# these three processes are a.k.a. SpecAugument
diff --git a/examples/wenetspeech/asr1/conf/tuning/chunk_decode.yaml b/examples/wenetspeech/asr1/conf/tuning/chunk_decode.yaml
new file mode 100644
index 000000000..6945ed6eb
--- /dev/null
+++ b/examples/wenetspeech/asr1/conf/tuning/chunk_decode.yaml
@@ -0,0 +1,12 @@
+beam_size: 10
+decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
+ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
+reverse_weight: 0.3 # reverse weight for attention rescoring decode mode.
+decoding_chunk_size: 16 # decoding chunk size. Defaults to -1.
+ # <0: for decoding, use full chunk.
+ # >0: for decoding, use fixed chunk size as set.
+ # 0: used for training, it's prohibited here.
+num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
+simulate_streaming: True # simulate streaming inference. Defaults to False.
+decode_batch_size: 128
+error_rate_type: cer
diff --git a/examples/wenetspeech/asr1/conf/tuning/decode.yaml b/examples/wenetspeech/asr1/conf/tuning/decode.yaml
index 6924bfa63..4015e9836 100644
--- a/examples/wenetspeech/asr1/conf/tuning/decode.yaml
+++ b/examples/wenetspeech/asr1/conf/tuning/decode.yaml
@@ -1,11 +1,12 @@
-decode_batch_size: 128
-error_rate_type: cer
-decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
beam_size: 10
+decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
+reverse_weight: 0.3 # reverse weight for attention rescoring decode mode.
decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
# <0: for decoding, use full chunk.
# >0: for decoding, use fixed chunk size as set.
# 0: used for training, it's prohibited here.
num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
-simulate_streaming: False # simulate streaming inference. Defaults to False.
\ No newline at end of file
+simulate_streaming: False # simulate streaming inference. Defaults to False.
+decode_batch_size: 128
+error_rate_type: cer
diff --git a/examples/wenetspeech/asr1/local/export.sh b/examples/wenetspeech/asr1/local/export.sh
index 6b646b469..1f89afd6b 100755
--- a/examples/wenetspeech/asr1/local/export.sh
+++ b/examples/wenetspeech/asr1/local/export.sh
@@ -12,9 +12,14 @@ config_path=$1
ckpt_path_prefix=$2
jit_model_export_path=$3
+
+# export can not using StreamdataDataloader, set use_stream_dta False
+# u2: reverse_weight should be 0.0
+# u2pp: reverse_weight should be same with config file. e.g. 0.3
python3 -u ${BIN_DIR}/export.py \
--ngpu ${ngpu} \
--config ${config_path} \
+--opts use_stream_data False \
--checkpoint_path ${ckpt_path_prefix} \
--export_path ${jit_model_export_path}
diff --git a/examples/wenetspeech/asr1/local/quant.sh b/examples/wenetspeech/asr1/local/quant.sh
new file mode 100755
index 000000000..9dfea9045
--- /dev/null
+++ b/examples/wenetspeech/asr1/local/quant.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+if [ $# != 4 ];then
+ echo "usage: ${0} config_path decode_config_path ckpt_path_prefix audio_file"
+ exit -1
+fi
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+config_path=$1
+decode_config_path=$2
+ckpt_prefix=$3
+audio_file=$4
+
+mkdir -p data
+wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/demo_01_03.wav -P data/
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+if [ ! -f ${audio_file} ]; then
+ echo "Plase input the right audio_file path"
+ exit 1
+fi
+
+
+chunk_mode=false
+if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then
+ chunk_mode=true
+fi
+
+# download language model
+#bash local/download_lm_ch.sh
+#if [ $? -ne 0 ]; then
+# exit 1
+#fi
+
+for type in attention_rescoring; do
+ echo "decoding ${type}"
+ batch_size=1
+ output_dir=${ckpt_prefix}
+ mkdir -p ${output_dir}
+ python3 -u ${BIN_DIR}/quant.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${output_dir}/${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size} \
+ --audio_file ${audio_file}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+done
+exit 0
diff --git a/examples/wenetspeech/asr1/local/train.sh b/examples/wenetspeech/asr1/local/train.sh
index 01af00b61..6813d270c 100755
--- a/examples/wenetspeech/asr1/local/train.sh
+++ b/examples/wenetspeech/asr1/local/train.sh
@@ -35,6 +35,10 @@ echo ${ips_config}
mkdir -p exp
+# default memeory allocator strategy may case gpu training hang
+# for no OOM raised when memory exhaused
+export FLAGS_allocator_strategy=naive_best_fit
+
if [ ${ngpu} == 0 ]; then
python3 -u ${BIN_DIR}/train.py \
--ngpu ${ngpu} \
diff --git a/paddlespeech/audio/compliance/kaldi.py b/paddlespeech/audio/compliance/kaldi.py
index 538be0196..eb92ec1f2 100644
--- a/paddlespeech/audio/compliance/kaldi.py
+++ b/paddlespeech/audio/compliance/kaldi.py
@@ -74,16 +74,16 @@ def _feature_window_function(
window_size: int,
blackman_coeff: float,
dtype: int, ) -> Tensor:
- if window_type == HANNING:
+ if window_type == "hann":
return get_window('hann', window_size, fftbins=False, dtype=dtype)
- elif window_type == HAMMING:
+ elif window_type == "hamming":
return get_window('hamming', window_size, fftbins=False, dtype=dtype)
- elif window_type == POVEY:
+ elif window_type == "povey":
return get_window(
'hann', window_size, fftbins=False, dtype=dtype).pow(0.85)
- elif window_type == RECTANGULAR:
+ elif window_type == "rect":
return paddle.ones([window_size], dtype=dtype)
- elif window_type == BLACKMAN:
+ elif window_type == "blackman":
a = 2 * math.pi / (window_size - 1)
window_function = paddle.arange(window_size, dtype=dtype)
return (blackman_coeff - 0.5 * paddle.cos(a * window_function) +
@@ -216,7 +216,7 @@ def spectrogram(waveform: Tensor,
sr: int=16000,
snip_edges: bool=True,
subtract_mean: bool=False,
- window_type: str=POVEY) -> Tensor:
+ window_type: str="povey") -> Tensor:
"""Compute and return a spectrogram from a waveform. The output is identical to Kaldi's.
Args:
@@ -236,7 +236,7 @@ def spectrogram(waveform: Tensor,
snip_edges (bool, optional): Drop samples in the end of waveform that cann't fit a singal frame when it
is set True. Otherwise performs reflect padding to the end of waveform. Defaults to True.
subtract_mean (bool, optional): Whether to subtract mean of feature files. Defaults to False.
- window_type (str, optional): Choose type of window for FFT computation. Defaults to POVEY.
+ window_type (str, optional): Choose type of window for FFT computation. Defaults to "povey".
Returns:
Tensor: A spectrogram tensor with shape `(m, padded_window_size // 2 + 1)` where m is the number of frames
@@ -357,10 +357,13 @@ def _get_mel_banks(num_bins: int,
('Bad values in options: vtln-low {} and vtln-high {}, versus '
'low-freq {} and high-freq {}'.format(vtln_low, vtln_high, low_freq, high_freq))
- bin = paddle.arange(num_bins).unsqueeze(1)
+ bin = paddle.arange(num_bins, dtype=paddle.float32).unsqueeze(1)
+ # left_mel = mel_low_freq + bin * mel_freq_delta # (num_bins, 1)
+ # center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # (num_bins, 1)
+ # right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # (num_bins, 1)
left_mel = mel_low_freq + bin * mel_freq_delta # (num_bins, 1)
- center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # (num_bins, 1)
- right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # (num_bins, 1)
+ center_mel = left_mel + mel_freq_delta
+ right_mel = center_mel + mel_freq_delta
if vtln_warp_factor != 1.0:
left_mel = _vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq,
@@ -373,7 +376,8 @@ def _get_mel_banks(num_bins: int,
center_freqs = _inverse_mel_scale(center_mel) # (num_bins)
# (1, num_fft_bins)
- mel = _mel_scale(fft_bin_width * paddle.arange(num_fft_bins)).unsqueeze(0)
+ mel = _mel_scale(fft_bin_width * paddle.arange(
+ num_fft_bins, dtype=paddle.float32)).unsqueeze(0)
# (num_bins, num_fft_bins)
up_slope = (mel - left_mel) / (center_mel - left_mel)
@@ -418,11 +422,11 @@ def fbank(waveform: Tensor,
vtln_high: float=-500.0,
vtln_low: float=100.0,
vtln_warp: float=1.0,
- window_type: str=POVEY) -> Tensor:
+ window_type: str="povey") -> Tensor:
"""Compute and return filter banks from a waveform. The output is identical to Kaldi's.
Args:
- waveform (Tensor): A waveform tensor with shape `(C, T)`.
+ waveform (Tensor): A waveform tensor with shape `(C, T)`. `C` is in the range [0,1].
blackman_coeff (float, optional): Coefficient for Blackman window.. Defaults to 0.42.
channel (int, optional): Select the channel of waveform. Defaults to -1.
dither (float, optional): Dithering constant . Defaults to 0.0.
@@ -448,7 +452,7 @@ def fbank(waveform: Tensor,
vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function. Defaults to -500.0.
vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function. Defaults to 100.0.
vtln_warp (float, optional): Vtln warp factor. Defaults to 1.0.
- window_type (str, optional): Choose type of window for FFT computation. Defaults to POVEY.
+ window_type (str, optional): Choose type of window for FFT computation. Defaults to "povey".
Returns:
Tensor: A filter banks tensor with shape `(m, n_mels)`.
@@ -472,7 +476,8 @@ def fbank(waveform: Tensor,
# (n_mels, padded_window_size // 2)
mel_energies, _ = _get_mel_banks(n_mels, padded_window_size, sr, low_freq,
high_freq, vtln_low, vtln_high, vtln_warp)
- mel_energies = mel_energies.astype(dtype)
+ # mel_energies = mel_energies.astype(dtype)
+ assert mel_energies.dtype == dtype
# (n_mels, padded_window_size // 2 + 1)
mel_energies = paddle.nn.functional.pad(
@@ -537,7 +542,7 @@ def mfcc(waveform: Tensor,
vtln_high: float=-500.0,
vtln_low: float=100.0,
vtln_warp: float=1.0,
- window_type: str=POVEY) -> Tensor:
+ window_type: str="povey") -> Tensor:
"""Compute and return mel frequency cepstral coefficients from a waveform. The output is
identical to Kaldi's.
diff --git a/paddlespeech/audio/transform/spectrogram.py b/paddlespeech/audio/transform/spectrogram.py
index 864f3f994..cba60cfdb 100644
--- a/paddlespeech/audio/transform/spectrogram.py
+++ b/paddlespeech/audio/transform/spectrogram.py
@@ -382,6 +382,36 @@ class LogMelSpectrogramKaldi():
return mat
+class WavProcess():
+ def __init__(self, dither=0.0):
+ """
+ Args:
+ dither (float): Dithering constant
+
+ Returns:
+ """
+
+ self.dither = dither
+
+ def __call__(self, x, train):
+ """
+ Args:
+ x (np.ndarray): shape (Ti,)
+ train (bool): True, train mode.
+
+ Raises:
+ ValueError: not support (Ti, C)
+
+ Returns:
+ np.ndarray: (T, D)
+ """
+ dither = self.dither if train else 0.0
+ if x.ndim != 1:
+ raise ValueError("Not support x: [Time, Channel]")
+ waveform = np.expand_dims(x, -1)
+ return waveform
+
+
class LogMelSpectrogramKaldi_decay():
def __init__(
self,
diff --git a/paddlespeech/audio/transform/transformation.py b/paddlespeech/audio/transform/transformation.py
index d24d6437c..e2f66dbf2 100644
--- a/paddlespeech/audio/transform/transformation.py
+++ b/paddlespeech/audio/transform/transformation.py
@@ -41,6 +41,7 @@ import_alias = dict(
utterance_cmvn="paddlespeech.audio.transform.cmvn:UtteranceCMVN",
fbank="paddlespeech.audio.transform.spectrogram:LogMelSpectrogram",
spectrogram="paddlespeech.audio.transform.spectrogram:Spectrogram",
+ wav_process="paddlespeech.audio.transform.spectrogram:WavProcess",
stft="paddlespeech.audio.transform.spectrogram:Stft",
istft="paddlespeech.audio.transform.spectrogram:IStft",
stft2fbank="paddlespeech.audio.transform.spectrogram:Stft2LogMelSpectrogram",
diff --git a/paddlespeech/audio/utils/tensor_utils.py b/paddlespeech/audio/utils/tensor_utils.py
index e9008f174..93883c94d 100644
--- a/paddlespeech/audio/utils/tensor_utils.py
+++ b/paddlespeech/audio/utils/tensor_utils.py
@@ -152,8 +152,8 @@ def add_sos_eos(ys_pad: paddle.Tensor, sos: int, eos: int,
# return pad_sequence(ys_in, padding_value=eos).transpose([1,0]), pad_sequence(ys_out, padding_value=ignore_id).transpose([1,0])
B = ys_pad.shape[0]
- _sos = paddle.ones([B, 1], dtype=ys_pad.dtype) * sos
- _eos = paddle.ones([B, 1], dtype=ys_pad.dtype) * eos
+ _sos = paddle.full([B, 1], sos, dtype=ys_pad.dtype)
+ _eos = paddle.full([B, 1], eos, dtype=ys_pad.dtype)
ys_in = paddle.cat([_sos, ys_pad], dim=1)
mask_pad = (ys_in == ignore_id)
ys_in = ys_in.masked_fill(mask_pad, eos)
@@ -279,8 +279,8 @@ def st_reverse_pad_list(ys_pad: paddle.Tensor,
# >>> tensor([[3, 2, 1],
# >>> [4, 8, 9],
# >>> [2, 2, 2]])
- eos = paddle.full([1], eos, dtype=r_hyps.dtype)
- r_hyps = paddle.where(seq_mask, r_hyps, eos)
+ _eos = paddle.full([1], eos, dtype=r_hyps.dtype)
+ r_hyps = paddle.where(seq_mask, r_hyps, _eos)
# >>> r_hyps
# >>> tensor([[3, 2, 1],
# >>> [4, 8, 9],
diff --git a/paddlespeech/cli/README.md b/paddlespeech/cli/README.md
index 19c822040..e6e216c0b 100644
--- a/paddlespeech/cli/README.md
+++ b/paddlespeech/cli/README.md
@@ -42,3 +42,7 @@
```bash
paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭
```
+- Faster Punctuation Restoration
+ ```bash
+ paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 --model ernie_linear_p3_wudao_fast
+ ```
diff --git a/paddlespeech/cli/README_cn.md b/paddlespeech/cli/README_cn.md
index 4b15d6c7b..6464c598c 100644
--- a/paddlespeech/cli/README_cn.md
+++ b/paddlespeech/cli/README_cn.md
@@ -43,3 +43,7 @@
```bash
paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭
```
+- 快速标点恢复
+ ```bash
+ paddlespeech text --task punc --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 --model ernie_linear_p3_wudao_fast
+ ```
diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py
index 7296776f9..437f64631 100644
--- a/paddlespeech/cli/asr/infer.py
+++ b/paddlespeech/cli/asr/infer.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
+import io
import os
import sys
import time
@@ -51,7 +52,7 @@ class ASRExecutor(BaseExecutor):
self.parser.add_argument(
'--model',
type=str,
- default='conformer_wenetspeech',
+ default='conformer_u2pp_wenetspeech',
choices=[
tag[:tag.index('-')]
for tag in self.task_resource.pretrained_models.keys()
@@ -229,6 +230,8 @@ class ASRExecutor(BaseExecutor):
audio_file = input
if isinstance(audio_file, (str, os.PathLike)):
logger.debug("Preprocess audio_file:" + audio_file)
+ elif isinstance(audio_file, io.BytesIO):
+ audio_file.seek(0)
# Get the object for feature extraction
if "deepspeech2" in model_type or "conformer" in model_type or "transformer" in model_type:
@@ -352,6 +355,8 @@ class ASRExecutor(BaseExecutor):
if not os.path.isfile(audio_file):
logger.error("Please input the right audio file path")
return False
+ elif isinstance(audio_file, io.BytesIO):
+ audio_file.seek(0)
logger.debug("checking the audio file format......")
try:
@@ -465,7 +470,7 @@ class ASRExecutor(BaseExecutor):
@stats_wrapper
def __call__(self,
audio_file: os.PathLike,
- model: str='conformer_wenetspeech',
+ model: str='conformer_u2pp_wenetspeech',
lang: str='zh',
sample_rate: int=16000,
config: os.PathLike=None,
diff --git a/paddlespeech/cli/text/infer.py b/paddlespeech/cli/text/infer.py
index 24b8c9c25..ff822f674 100644
--- a/paddlespeech/cli/text/infer.py
+++ b/paddlespeech/cli/text/infer.py
@@ -20,10 +20,13 @@ from typing import Optional
from typing import Union
import paddle
+import yaml
+from yacs.config import CfgNode
from ..executor import BaseExecutor
from ..log import logger
from ..utils import stats_wrapper
+from paddlespeech.text.models.ernie_linear import ErnieLinear
__all__ = ['TextExecutor']
@@ -139,6 +142,66 @@ class TextExecutor(BaseExecutor):
self.model.eval()
+ #init new models
+ def _init_from_path_new(self,
+ task: str='punc',
+ model_type: str='ernie_linear_p7_wudao',
+ lang: str='zh',
+ cfg_path: Optional[os.PathLike]=None,
+ ckpt_path: Optional[os.PathLike]=None,
+ vocab_file: Optional[os.PathLike]=None):
+ if hasattr(self, 'model'):
+ logger.debug('Model had been initialized.')
+ return
+
+ self.task = task
+
+ if cfg_path is None or ckpt_path is None or vocab_file is None:
+ tag = '-'.join([model_type, task, lang])
+ self.task_resource.set_task_model(tag, version=None)
+ self.cfg_path = os.path.join(
+ self.task_resource.res_dir,
+ self.task_resource.res_dict['cfg_path'])
+ self.ckpt_path = os.path.join(
+ self.task_resource.res_dir,
+ self.task_resource.res_dict['ckpt_path'])
+ self.vocab_file = os.path.join(
+ self.task_resource.res_dir,
+ self.task_resource.res_dict['vocab_file'])
+ else:
+ self.cfg_path = os.path.abspath(cfg_path)
+ self.ckpt_path = os.path.abspath(ckpt_path)
+ self.vocab_file = os.path.abspath(vocab_file)
+
+ model_name = model_type[:model_type.rindex('_')]
+
+ if self.task == 'punc':
+ # punc list
+ self._punc_list = []
+ with open(self.vocab_file, 'r') as f:
+ for line in f:
+ self._punc_list.append(line.strip())
+
+ # model
+ with open(self.cfg_path) as f:
+ config = CfgNode(yaml.safe_load(f))
+ self.model = ErnieLinear(**config["model"])
+
+ _, tokenizer_class = self.task_resource.get_model_class(model_name)
+ state_dict = paddle.load(self.ckpt_path)
+ self.model.set_state_dict(state_dict["main_params"])
+ self.model.eval()
+
+ #tokenizer: fast version: ernie-3.0-mini-zh slow version:ernie-1.0
+ if 'fast' not in model_type:
+ self.tokenizer = tokenizer_class.from_pretrained('ernie-1.0')
+ else:
+ self.tokenizer = tokenizer_class.from_pretrained(
+ 'ernie-3.0-mini-zh')
+
+ else:
+ raise NotImplementedError
+
def _clean_text(self, text):
text = text.lower()
text = re.sub('[^A-Za-z0-9\u4e00-\u9fa5]', '', text)
@@ -179,7 +242,7 @@ class TextExecutor(BaseExecutor):
else:
raise NotImplementedError
- def postprocess(self) -> Union[str, os.PathLike]:
+ def postprocess(self, isNewTrainer: bool=False) -> Union[str, os.PathLike]:
"""
Output postprocess and return human-readable results such as texts and audio files.
"""
@@ -192,13 +255,13 @@ class TextExecutor(BaseExecutor):
input_ids[1:seq_len - 1])
labels = preds[1:seq_len - 1].tolist()
assert len(tokens) == len(labels)
-
+ if isNewTrainer:
+ self._punc_list = [0] + self._punc_list
text = ''
for t, l in zip(tokens, labels):
text += t
if l != 0: # Non punc.
text += self._punc_list[l]
-
return text
else:
raise NotImplementedError
@@ -255,10 +318,20 @@ class TextExecutor(BaseExecutor):
"""
Python API to call an executor.
"""
- paddle.set_device(device)
- self._init_from_path(task, model, lang, config, ckpt_path, punc_vocab)
- self.preprocess(text)
- self.infer()
- res = self.postprocess() # Retrieve result of text task.
-
+ #Here is old version models
+ if model in ['ernie_linear_p7_wudao', 'ernie_linear_p3_wudao']:
+ paddle.set_device(device)
+ self._init_from_path(task, model, lang, config, ckpt_path,
+ punc_vocab)
+ self.preprocess(text)
+ self.infer()
+ res = self.postprocess() # Retrieve result of text task.
+ #Add new way to infer
+ else:
+ paddle.set_device(device)
+ self._init_from_path_new(task, model, lang, config, ckpt_path,
+ punc_vocab)
+ self.preprocess(text)
+ self.infer()
+ res = self.postprocess(isNewTrainer=True)
return res
diff --git a/paddlespeech/resource/model_alias.py b/paddlespeech/resource/model_alias.py
index 9c76dd4b3..f5ec655b7 100644
--- a/paddlespeech/resource/model_alias.py
+++ b/paddlespeech/resource/model_alias.py
@@ -25,6 +25,8 @@ model_alias = {
"deepspeech2online": ["paddlespeech.s2t.models.ds2:DeepSpeech2Model"],
"conformer": ["paddlespeech.s2t.models.u2:U2Model"],
"conformer_online": ["paddlespeech.s2t.models.u2:U2Model"],
+ "conformer_u2pp": ["paddlespeech.s2t.models.u2:U2Model"],
+ "conformer_u2pp_online": ["paddlespeech.s2t.models.u2:U2Model"],
"transformer": ["paddlespeech.s2t.models.u2:U2Model"],
"wenetspeech": ["paddlespeech.s2t.models.u2:U2Model"],
@@ -51,6 +53,10 @@ model_alias = {
"paddlespeech.text.models:ErnieLinear",
"paddlenlp.transformers:ErnieTokenizer"
],
+ "ernie_linear_p3_wudao": [
+ "paddlespeech.text.models:ErnieLinear",
+ "paddlenlp.transformers:ErnieTokenizer"
+ ],
# ---------------------------------
# -------------- TTS --------------
diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py
index f049879a3..efd6bb3f2 100644
--- a/paddlespeech/resource/pretrained_models.py
+++ b/paddlespeech/resource/pretrained_models.py
@@ -68,6 +68,46 @@ asr_dynamic_pretrained_models = {
'',
},
},
+ "conformer_u2pp_wenetspeech-zh-16k": {
+ '1.1': {
+ 'url':
+ 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.3.model.tar.gz',
+ 'md5':
+ '662b347e1d2131b7a4dc5398365e2134',
+ 'cfg_path':
+ 'model.yaml',
+ 'ckpt_path':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10',
+ 'model':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams',
+ 'params':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams',
+ 'lm_url':
+ '',
+ 'lm_md5':
+ '',
+ },
+ },
+ "conformer_u2pp_online_wenetspeech-zh-16k": {
+ '1.1': {
+ 'url':
+ 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.4.model.tar.gz',
+ 'md5':
+ '3100fc1eac5779486cab859366992d0b',
+ 'cfg_path':
+ 'model.yaml',
+ 'ckpt_path':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10',
+ 'model':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams',
+ 'params':
+ 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams',
+ 'lm_url':
+ '',
+ 'lm_md5':
+ '',
+ },
+ },
"conformer_online_multicn-zh-16k": {
'1.0': {
'url':
@@ -529,7 +569,7 @@ text_dynamic_pretrained_models = {
'ckpt/model_state.pdparams',
'vocab_file':
'punc_vocab.txt',
- },
+ }
},
"ernie_linear_p3_wudao-punc-zh": {
'1.0': {
@@ -543,8 +583,22 @@ text_dynamic_pretrained_models = {
'ckpt/model_state.pdparams',
'vocab_file':
'punc_vocab.txt',
- },
+ }
},
+ "ernie_linear_p3_wudao_fast-punc-zh": {
+ '1.0': {
+ 'url':
+ 'https://paddlespeech.bj.bcebos.com/text/ernie_linear_p3_wudao_fast-punc-zh.tar.gz',
+ 'md5':
+ 'c93f9594119541a5dbd763381a751d08',
+ 'cfg_path':
+ 'ckpt/model_config.json',
+ 'ckpt_path':
+ 'ckpt/model_state.pdparams',
+ 'vocab_file':
+ 'punc_vocab.txt',
+ }
+ }
}
# ---------------------------------
diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py
index 5fe2e16b9..6663bcf87 100644
--- a/paddlespeech/s2t/__init__.py
+++ b/paddlespeech/s2t/__init__.py
@@ -22,7 +22,6 @@ from paddle.nn import functional as F
from paddlespeech.s2t.utils.log import Log
-#TODO(Hui Zhang): remove fluid import
logger = Log(__name__).getlog()
########### hack logging #############
@@ -167,13 +166,17 @@ def broadcast_shape(shp1, shp2):
def masked_fill(xs: paddle.Tensor,
mask: paddle.Tensor,
value: Union[float, int]):
+ # will be nan when value is `inf`.
+ # mask = mask.astype(xs.dtype)
+ # return xs * (1.0 - mask) + mask * value
+
bshape = broadcast_shape(xs.shape, mask.shape)
mask.stop_gradient = True
- tmp = paddle.ones(shape=[len(bshape)], dtype='int32')
- for index in range(len(bshape)):
- tmp[index] = bshape[index]
- mask = mask.broadcast_to(tmp)
- trues = paddle.ones_like(xs) * value
+ # tmp = paddle.ones(shape=[len(bshape)], dtype='int32')
+ # for index in range(len(bshape)):
+ # tmp[index] = bshape[index]
+ mask = mask.broadcast_to(bshape)
+ trues = paddle.full_like(xs, fill_value=value)
xs = paddle.where(mask, trues, xs)
return xs
diff --git a/paddlespeech/s2t/exps/u2/bin/quant.py b/paddlespeech/s2t/exps/u2/bin/quant.py
new file mode 100644
index 000000000..c38134c57
--- /dev/null
+++ b/paddlespeech/s2t/exps/u2/bin/quant.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluation for U2 model."""
+import os
+import sys
+from pathlib import Path
+
+import paddle
+import soundfile
+from paddleslim import PTQ
+from yacs.config import CfgNode
+
+from paddlespeech.audio.transform.transformation import Transformation
+from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
+from paddlespeech.s2t.models.u2 import U2Model
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.log import Log
+from paddlespeech.s2t.utils.utility import UpdateConfig
+logger = Log(__name__).getlog()
+
+
+class U2Infer():
+ def __init__(self, config, args):
+ self.args = args
+ self.config = config
+ self.audio_file = args.audio_file
+
+ self.preprocess_conf = config.preprocess_config
+ self.preprocess_args = {"train": False}
+ self.preprocessing = Transformation(self.preprocess_conf)
+ self.text_feature = TextFeaturizer(
+ unit_type=config.unit_type,
+ vocab=config.vocab_filepath,
+ spm_model_prefix=config.spm_model_prefix)
+
+ paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu')
+
+ # model
+ model_conf = config
+ with UpdateConfig(model_conf):
+ model_conf.input_dim = config.feat_dim
+ model_conf.output_dim = self.text_feature.vocab_size
+ model = U2Model.from_config(model_conf)
+ self.model = model
+ self.model.eval()
+ self.ptq = PTQ()
+ self.model = self.ptq.quantize(model)
+
+ # load model
+ params_path = self.args.checkpoint_path + ".pdparams"
+ model_dict = paddle.load(params_path)
+ self.model.set_state_dict(model_dict)
+
+ def run(self):
+ check(args.audio_file)
+
+ with paddle.no_grad():
+ # read
+ audio, sample_rate = soundfile.read(
+ self.audio_file, dtype="int16", always_2d=True)
+ audio = audio[:, 0]
+ logger.info(f"audio shape: {audio.shape}")
+
+ # fbank
+ feat = self.preprocessing(audio, **self.preprocess_args)
+ logger.info(f"feat shape: {feat.shape}")
+
+ ilen = paddle.to_tensor(feat.shape[0])
+ xs = paddle.to_tensor(feat, dtype='float32').unsqueeze(0)
+ decode_config = self.config.decode
+ logger.info(f"decode cfg: {decode_config}")
+ reverse_weight = getattr(decode_config, 'reverse_weight', 0.0)
+ result_transcripts = self.model.decode(
+ xs,
+ ilen,
+ text_feature=self.text_feature,
+ decoding_method=decode_config.decoding_method,
+ beam_size=decode_config.beam_size,
+ ctc_weight=decode_config.ctc_weight,
+ decoding_chunk_size=decode_config.decoding_chunk_size,
+ num_decoding_left_chunks=decode_config.num_decoding_left_chunks,
+ simulate_streaming=decode_config.simulate_streaming,
+ reverse_weight=reverse_weight)
+ rsl = result_transcripts[0][0]
+ utt = Path(self.audio_file).name
+ logger.info(f"hyp: {utt} {rsl}")
+ # print(self.model)
+ # print(self.model.forward_encoder_chunk)
+
+ logger.info("-------------start quant ----------------------")
+ batch_size = 1
+ feat_dim = 80
+ model_size = 512
+ num_left_chunks = -1
+ reverse_weight = 0.3
+ logger.info(
+ f"U2 Export Model Params: batch_size {batch_size}, feat_dim {feat_dim}, model_size {model_size}, num_left_chunks {num_left_chunks}, reverse_weight {reverse_weight}"
+ )
+
+ # ######################## self.model.forward_encoder_chunk ############
+ # input_spec = [
+ # # (T,), int16
+ # paddle.static.InputSpec(shape=[None], dtype='int16'),
+ # ]
+ # self.model.forward_feature = paddle.jit.to_static(
+ # self.model.forward_feature, input_spec=input_spec)
+
+ ######################### self.model.forward_encoder_chunk ############
+ input_spec = [
+ # xs, (B, T, D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, feat_dim], dtype='float32'),
+ # offset, int, but need be tensor
+ paddle.static.InputSpec(shape=[1], dtype='int32'),
+ # required_cache_size, int
+ num_left_chunks,
+ # att_cache
+ paddle.static.InputSpec(
+ shape=[None, None, None, None], dtype='float32'),
+ # cnn_cache
+ paddle.static.InputSpec(
+ shape=[None, None, None, None], dtype='float32')
+ ]
+ self.model.forward_encoder_chunk = paddle.jit.to_static(
+ self.model.forward_encoder_chunk, input_spec=input_spec)
+
+ ######################### self.model.ctc_activation ########################
+ input_spec = [
+ # encoder_out, (B,T,D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, model_size], dtype='float32')
+ ]
+ self.model.ctc_activation = paddle.jit.to_static(
+ self.model.ctc_activation, input_spec=input_spec)
+
+ ######################### self.model.forward_attention_decoder ########################
+ input_spec = [
+ # hyps, (B, U)
+ paddle.static.InputSpec(shape=[None, None], dtype='int64'),
+ # hyps_lens, (B,)
+ paddle.static.InputSpec(shape=[None], dtype='int64'),
+ # encoder_out, (B,T,D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, model_size], dtype='float32'),
+ reverse_weight
+ ]
+ self.model.forward_attention_decoder = paddle.jit.to_static(
+ self.model.forward_attention_decoder, input_spec=input_spec)
+ ################################################################################
+
+ # jit save
+ logger.info(f"export save: {self.args.export_path}")
+ config = {
+ 'is_static': True,
+ 'combine_params': True,
+ 'skip_forward': True
+ }
+ self.ptq.save_quantized_model(self.model, self.args.export_path)
+ # paddle.jit.save(
+ # self.model,
+ # self.args.export_path,
+ # combine_params=True,
+ # skip_forward=True)
+
+
+def check(audio_file):
+ if not os.path.isfile(audio_file):
+ print("Please input the right audio file path")
+ sys.exit(-1)
+
+ logger.info("checking the audio file format......")
+ try:
+ sig, sample_rate = soundfile.read(audio_file)
+ except Exception as e:
+ logger.error(str(e))
+ logger.error(
+ "can not open the wav file, please check the audio file format")
+ sys.exit(-1)
+ logger.info("The sample rate is %d" % sample_rate)
+ assert (sample_rate == 16000)
+ logger.info("The audio file format is right")
+
+
+def main(config, args):
+ U2Infer(config, args).run()
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ # save asr result to
+ parser.add_argument(
+ "--result_file", type=str, help="path of save the asr result")
+ parser.add_argument(
+ "--audio_file", type=str, help="path of the input audio file")
+ parser.add_argument(
+ "--export_path",
+ type=str,
+ default='export',
+ help="path of the input audio file")
+ args = parser.parse_args()
+
+ config = CfgNode(new_allowed=True)
+
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.decode_cfg:
+ decode_confs = CfgNode(new_allowed=True)
+ decode_confs.merge_from_file(args.decode_cfg)
+ config.decode = decode_confs
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ main(config, args)
diff --git a/paddlespeech/s2t/exps/u2/bin/test.py b/paddlespeech/s2t/exps/u2/bin/test.py
index f14d804f1..b13fd0d3f 100644
--- a/paddlespeech/s2t/exps/u2/bin/test.py
+++ b/paddlespeech/s2t/exps/u2/bin/test.py
@@ -20,8 +20,6 @@ from paddlespeech.s2t.exps.u2.model import U2Tester as Tester
from paddlespeech.s2t.training.cli import default_argument_parser
from paddlespeech.s2t.utils.utility import print_arguments
-# TODO(hui zhang): dynamic load
-
def main_sp(config, args):
exp = Tester(config, args)
diff --git a/paddlespeech/s2t/exps/u2/bin/test_wav.py b/paddlespeech/s2t/exps/u2/bin/test_wav.py
index 4588def0b..d12ea3646 100644
--- a/paddlespeech/s2t/exps/u2/bin/test_wav.py
+++ b/paddlespeech/s2t/exps/u2/bin/test_wav.py
@@ -40,7 +40,6 @@ class U2Infer():
self.preprocess_conf = config.preprocess_config
self.preprocess_args = {"train": False}
self.preprocessing = Transformation(self.preprocess_conf)
- self.reverse_weight = getattr(config.model_conf, 'reverse_weight', 0.0)
self.text_feature = TextFeaturizer(
unit_type=config.unit_type,
vocab=config.vocab_filepath,
@@ -69,7 +68,6 @@ class U2Infer():
# read
audio, sample_rate = soundfile.read(
self.audio_file, dtype="int16", always_2d=True)
-
audio = audio[:, 0]
logger.info(f"audio shape: {audio.shape}")
@@ -78,8 +76,10 @@ class U2Infer():
logger.info(f"feat shape: {feat.shape}")
ilen = paddle.to_tensor(feat.shape[0])
- xs = paddle.to_tensor(feat, dtype='float32').unsqueeze(axis=0)
+ xs = paddle.to_tensor(feat, dtype='float32').unsqueeze(0)
decode_config = self.config.decode
+ logger.info(f"decode cfg: {decode_config}")
+ reverse_weight = getattr(decode_config, 'reverse_weight', 0.0)
result_transcripts = self.model.decode(
xs,
ilen,
@@ -90,7 +90,7 @@ class U2Infer():
decoding_chunk_size=decode_config.decoding_chunk_size,
num_decoding_left_chunks=decode_config.num_decoding_left_chunks,
simulate_streaming=decode_config.simulate_streaming,
- reverse_weight=self.reverse_weight)
+ reverse_weight=reverse_weight)
rsl = result_transcripts[0][0]
utt = Path(self.audio_file).name
logger.info(f"hyp: {utt} {result_transcripts[0][0]}")
diff --git a/paddlespeech/s2t/exps/u2/model.py b/paddlespeech/s2t/exps/u2/model.py
index a13a6385e..5b7654d4a 100644
--- a/paddlespeech/s2t/exps/u2/model.py
+++ b/paddlespeech/s2t/exps/u2/model.py
@@ -316,7 +316,6 @@ class U2Tester(U2Trainer):
vocab=self.config.vocab_filepath,
spm_model_prefix=self.config.spm_model_prefix)
self.vocab_list = self.text_feature.vocab_list
- self.reverse_weight = getattr(config.model_conf, 'reverse_weight', 0.0)
def id2token(self, texts, texts_len, text_feature):
""" ord() id to chr() chr """
@@ -338,6 +337,7 @@ class U2Tester(U2Trainer):
errors_sum, len_refs, num_ins = 0.0, 0, 0
errors_func = error_rate.char_errors if decode_config.error_rate_type == 'cer' else error_rate.word_errors
error_rate_func = error_rate.cer if decode_config.error_rate_type == 'cer' else error_rate.wer
+ reverse_weight = getattr(decode_config, 'reverse_weight', 0.0)
start_time = time.time()
target_transcripts = self.id2token(texts, texts_len, self.text_feature)
@@ -352,7 +352,7 @@ class U2Tester(U2Trainer):
decoding_chunk_size=decode_config.decoding_chunk_size,
num_decoding_left_chunks=decode_config.num_decoding_left_chunks,
simulate_streaming=decode_config.simulate_streaming,
- reverse_weight=self.reverse_weight)
+ reverse_weight=reverse_weight)
decode_time = time.time() - start_time
for utt, target, result, rec_tids in zip(
@@ -464,20 +464,120 @@ class U2Tester(U2Trainer):
infer_model = U2InferModel.from_pretrained(self.test_loader,
self.config.clone(),
self.args.checkpoint_path)
+ batch_size = 1
feat_dim = self.test_loader.feat_dim
- input_spec = [
- paddle.static.InputSpec(shape=[1, None, feat_dim],
- dtype='float32'), # audio, [B,T,D]
- paddle.static.InputSpec(shape=[1],
- dtype='int64'), # audio_length, [B]
- ]
- return infer_model, input_spec
+ model_size = self.config.encoder_conf.output_size
+ num_left_chunks = -1
+ logger.info(
+ f"U2 Export Model Params: batch_size {batch_size}, feat_dim {feat_dim}, model_size {model_size}, num_left_chunks {num_left_chunks}"
+ )
+
+ return infer_model, (batch_size, feat_dim, model_size, num_left_chunks)
@paddle.no_grad()
def export(self):
infer_model, input_spec = self.load_inferspec()
- assert isinstance(input_spec, list), type(input_spec)
infer_model.eval()
- static_model = paddle.jit.to_static(infer_model, input_spec=input_spec)
- logger.info(f"Export code: {static_model.forward.code}")
- paddle.jit.save(static_model, self.args.export_path)
+ paddle.set_device('cpu')
+
+ assert isinstance(input_spec, (list, tuple)), type(input_spec)
+ batch_size, feat_dim, model_size, num_left_chunks = input_spec
+
+ ######################## infer_model.forward_encoder_chunk ############
+ input_spec = [
+ # (T,), int16
+ paddle.static.InputSpec(shape=[None], dtype='int16'),
+ ]
+ infer_model.forward_feature = paddle.jit.to_static(
+ infer_model.forward_feature, input_spec=input_spec)
+
+ ######################### infer_model.forward_encoder_chunk ############
+ input_spec = [
+ # xs, (B, T, D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, feat_dim], dtype='float32'),
+ # offset, int, but need be tensor
+ paddle.static.InputSpec(shape=[1], dtype='int32'),
+ # required_cache_size, int
+ num_left_chunks,
+ # att_cache
+ paddle.static.InputSpec(
+ shape=[None, None, None, None], dtype='float32'),
+ # cnn_cache
+ paddle.static.InputSpec(
+ shape=[None, None, None, None], dtype='float32')
+ ]
+ infer_model.forward_encoder_chunk = paddle.jit.to_static(
+ infer_model.forward_encoder_chunk, input_spec=input_spec)
+
+ ######################### infer_model.ctc_activation ########################
+ input_spec = [
+ # encoder_out, (B,T,D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, model_size], dtype='float32')
+ ]
+ infer_model.ctc_activation = paddle.jit.to_static(
+ infer_model.ctc_activation, input_spec=input_spec)
+
+ ######################### infer_model.forward_attention_decoder ########################
+ reverse_weight = 0.3
+ input_spec = [
+ # hyps, (B, U)
+ paddle.static.InputSpec(shape=[None, None], dtype='int64'),
+ # hyps_lens, (B,)
+ paddle.static.InputSpec(shape=[None], dtype='int64'),
+ # encoder_out, (B,T,D)
+ paddle.static.InputSpec(
+ shape=[batch_size, None, model_size], dtype='float32'),
+ reverse_weight
+ ]
+ infer_model.forward_attention_decoder = paddle.jit.to_static(
+ infer_model.forward_attention_decoder, input_spec=input_spec)
+
+ # jit save
+ logger.info(f"export save: {self.args.export_path}")
+ paddle.jit.save(
+ infer_model,
+ self.args.export_path,
+ combine_params=True,
+ skip_forward=True)
+
+ # test dy2static
+ def flatten(out):
+ if isinstance(out, paddle.Tensor):
+ return [out]
+
+ flatten_out = []
+ for var in out:
+ if isinstance(var, (list, tuple)):
+ flatten_out.extend(flatten(var))
+ else:
+ flatten_out.append(var)
+ return flatten_out
+
+ # forward_encoder_chunk dygraph
+ xs1 = paddle.full([1, 67, 80], 0.1, dtype='float32')
+ offset = paddle.to_tensor([0], dtype='int32')
+ required_cache_size = num_left_chunks
+ att_cache = paddle.zeros([0, 0, 0, 0])
+ cnn_cache = paddle.zeros([0, 0, 0, 0])
+ xs_d, att_cache_d, cnn_cache_d = infer_model.forward_encoder_chunk(
+ xs1, offset, required_cache_size, att_cache, cnn_cache)
+
+ # load static model
+ from paddle.jit.layer import Layer
+ layer = Layer()
+ logger.info(f"load export model: {self.args.export_path}")
+ layer.load(self.args.export_path, paddle.CPUPlace())
+
+ # forward_encoder_chunk static
+ xs1 = paddle.full([1, 67, 80], 0.1, dtype='float32')
+ offset = paddle.to_tensor([0], dtype='int32')
+ att_cache = paddle.zeros([0, 0, 0, 0])
+ cnn_cache = paddle.zeros([0, 0, 0, 0])
+ func = getattr(layer, 'forward_encoder_chunk')
+ xs_s, att_cache_s, cnn_cache_s = func(xs1, offset, att_cache, cnn_cache)
+ np.testing.assert_allclose(xs_d, xs_s, atol=1e-5)
+ np.testing.assert_allclose(att_cache_d, att_cache_s, atol=1e-4)
+ np.testing.assert_allclose(cnn_cache_d, cnn_cache_s, atol=1e-4)
+ # logger.info(f"forward_encoder_chunk output: {xs_s}")
diff --git a/paddlespeech/s2t/exps/u2_st/bin/test.py b/paddlespeech/s2t/exps/u2_st/bin/test.py
index 1d70a3103..c07c95bd5 100644
--- a/paddlespeech/s2t/exps/u2_st/bin/test.py
+++ b/paddlespeech/s2t/exps/u2_st/bin/test.py
@@ -20,8 +20,6 @@ from paddlespeech.s2t.exps.u2_st.model import U2STTester as Tester
from paddlespeech.s2t.training.cli import default_argument_parser
from paddlespeech.s2t.utils.utility import print_arguments
-# TODO(hui zhang): dynamic load
-
def main_sp(config, args):
exp = Tester(config, args)
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py b/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py
new file mode 100644
index 000000000..185a92b8d
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test.py b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
new file mode 100644
index 000000000..d1a6fd405
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluation for wav2vec2.0 model."""
+import cProfile
+
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.exps.wav2vec2.model import Wav2Vec2ASRTester as Tester
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.utility import print_arguments
+
+
+def main_sp(config, args):
+ exp = Tester(config, args)
+ with exp.eval():
+ exp.setup()
+ exp.run_test()
+
+
+def main(config, args):
+ main_sp(config, args)
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ # save asr result to
+ parser.add_argument(
+ '--dict-path', type=str, default=None, help='dict path.')
+ parser.add_argument(
+ "--result_file", type=str, help="path of save the asr result")
+ args = parser.parse_args()
+ print_arguments(args, globals())
+
+ # https://yaml.org/type/float.html
+ config = CfgNode(new_allowed=True)
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.decode_cfg:
+ decode_confs = CfgNode(new_allowed=True)
+ decode_confs.merge_from_file(args.decode_cfg)
+ config.decode = decode_confs
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ print(config)
+ if args.dump_config:
+ with open(args.dump_config, 'w') as f:
+ print(config, file=f)
+
+ # Setting for profiling
+ pr = cProfile.Profile()
+ pr.runcall(main, config, args)
+ pr.dump_stats('test.profile')
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
new file mode 100644
index 000000000..3a537bce5
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluation for wav2vec2.0 model."""
+import os
+import sys
+from pathlib import Path
+
+import paddle
+import soundfile
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
+from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.log import Log
+from paddlespeech.s2t.utils.utility import UpdateConfig
+logger = Log(__name__).getlog()
+
+
+class Wav2vec2Infer():
+ def __init__(self, config, args):
+ self.args = args
+ self.config = config
+ self.audio_file = args.audio_file
+
+ self.text_feature = TextFeaturizer(
+ unit_type=config.unit_type, vocab=config.vocab_filepath)
+ paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu')
+
+ # model
+ model_conf = config
+ with UpdateConfig(model_conf):
+ model_conf.output_dim = self.text_feature.vocab_size
+ model = Wav2vec2ASR.from_config(model_conf)
+ self.model = model
+ self.model.eval()
+
+ # load model
+ params_path = self.args.checkpoint_path + ".pdparams"
+ model_dict = paddle.load(params_path)
+ self.model.set_state_dict(model_dict)
+
+ def run(self):
+ check(args.audio_file)
+
+ with paddle.no_grad():
+ # read
+ audio, _ = soundfile.read(
+ self.audio_file, dtype="int16", always_2d=True)
+ logger.info(f"audio shape: {audio.shape}")
+
+ xs = paddle.to_tensor(audio, dtype='float32').unsqueeze(axis=0)
+ decode_config = self.config.decode
+ result_transcripts, result_tokenids = self.model.decode(
+ xs,
+ text_feature=self.text_feature,
+ decoding_method=decode_config.decoding_method,
+ beam_size=decode_config.beam_size)
+ rsl = result_transcripts[0]
+ utt = Path(self.audio_file).name
+ logger.info(f"hyp: {utt} {rsl}")
+ return rsl
+
+
+def check(audio_file):
+ if not os.path.isfile(audio_file):
+ print("Please input the right audio file path")
+ sys.exit(-1)
+
+ logger.info("checking the audio file format......")
+ try:
+ sig, sample_rate = soundfile.read(audio_file)
+ except Exception as e:
+ logger.error(str(e))
+ logger.error(
+ "can not open the wav file, please check the audio file format")
+ sys.exit(-1)
+ logger.info("The sample rate is %d" % sample_rate)
+ assert (sample_rate == 16000)
+ logger.info("The audio file format is right")
+
+
+def main(config, args):
+ Wav2vec2Infer(config, args).run()
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ # save asr result to
+ parser.add_argument(
+ "--result_file", type=str, help="path of save the asr result")
+ parser.add_argument(
+ "--audio_file", type=str, help="path of the input audio file")
+ args = parser.parse_args()
+
+ config = CfgNode(new_allowed=True)
+
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.decode_cfg:
+ decode_confs = CfgNode(new_allowed=True)
+ decode_confs.merge_from_file(args.decode_cfg)
+ config.decode = decode_confs
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ main(config, args)
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/train.py b/paddlespeech/s2t/exps/wav2vec2/bin/train.py
new file mode 100644
index 000000000..b2edecca1
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/train.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Trainer for wav2vec2.0 model."""
+import cProfile
+import os
+
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.exps.wav2vec2.model import Wav2Vec2ASRTrainer as Trainer
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.utility import print_arguments
+
+
+def main_sp(config, args):
+ exp = Trainer(config, args)
+ exp.setup()
+ exp.run()
+
+
+def main(config, args):
+ main_sp(config, args)
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ args = parser.parse_args()
+ print_arguments(args, globals())
+
+ # https://yaml.org/type/float.html
+ config = CfgNode(new_allowed=True)
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ if args.dump_config:
+ with open(args.dump_config, 'w') as f:
+ print(config, file=f)
+
+ # Setting for profiling
+ pr = cProfile.Profile()
+ pr.runcall(main, config, args)
+ pr.dump_stats(os.path.join(args.output, 'train.profile'))
diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py
new file mode 100644
index 000000000..16feac5de
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/model.py
@@ -0,0 +1,455 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains wav2vec2 model."""
+import json
+import math
+import os
+import time
+from collections import defaultdict
+from collections import OrderedDict
+from contextlib import nullcontext
+
+import jsonlines
+import numpy as np
+import paddle
+from paddle import distributed as dist
+
+from paddlespeech.s2t.frontend.featurizer import TextFeaturizer
+from paddlespeech.s2t.io.dataloader import DataLoaderFactory
+from paddlespeech.s2t.models.wav2vec2.processing.speech_augmentation import TimeDomainSpecAugment
+from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
+from paddlespeech.s2t.training.optimizer import OptimizerFactory
+from paddlespeech.s2t.training.reporter import ObsScope
+from paddlespeech.s2t.training.reporter import report
+from paddlespeech.s2t.training.scheduler import LRSchedulerFactory
+from paddlespeech.s2t.training.timer import Timer
+from paddlespeech.s2t.training.trainer import Trainer
+from paddlespeech.s2t.utils import error_rate
+from paddlespeech.s2t.utils import layer_tools
+from paddlespeech.s2t.utils import mp_tools
+from paddlespeech.s2t.utils.log import Log
+from paddlespeech.s2t.utils.utility import UpdateConfig
+
+logger = Log(__name__).getlog()
+
+
+class Wav2Vec2ASRTrainer(Trainer):
+ def __init__(self, config, args):
+ super().__init__(config, args)
+ self.avg_train_loss = 0.0
+
+ def update_average(self, batch_index, loss):
+ """Update running average of the loss.
+ Arguments
+ ---------
+ batch_index : int
+ current batch index
+ loss : paddle.tensor
+ detached loss, a single float value.
+ """
+ if math.isfinite(loss):
+ self.avg_train_loss -= self.avg_train_loss / (batch_index + 1)
+ self.avg_train_loss += loss / (batch_index + 1)
+
+ def train_batch(self, batch_index, batch, msg):
+ train_conf = self.config
+ start = time.time()
+
+ # forward
+ utt, wav, wavs_lens, target, target_lens = batch
+ wavs_lens_rate = wavs_lens / wav.shape[1]
+ target_lens_rate = target_lens / target.shape[1]
+ wav = wav[:, :, 0]
+ wav = self.speech_augmentation(wav, wavs_lens_rate)
+ loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
+ # loss div by `batch_size * accum_grad`
+ loss /= train_conf.accum_grad
+
+ # update self.avg_train_loss
+ self.update_average(batch_index, float(loss))
+
+ # loss backward
+ if (batch_index + 1) % train_conf.accum_grad != 0:
+ # Disable gradient synchronizations across DDP processes.
+ # Within this context, gradients will be accumulated on module
+ # variables, which will later be synchronized.
+ # When using cpu w/o DDP, model does not have `no_sync`
+ context = self.model.no_sync if (hasattr(self.model, "no_sync") and
+ self.parallel) else nullcontext
+ else:
+ # Used for single gpu training and DDP gradient synchronization
+ # processes.
+ context = nullcontext
+ with context():
+ loss.backward()
+ layer_tools.print_grads(self.model, print_func=None)
+
+ # optimizer step old
+ if (batch_index + 1) % train_conf.accum_grad == 0:
+ self.optimizer.step()
+ self.optimizer.clear_grad()
+ self.lr_scheduler.step()
+ self.iteration += 1
+
+ losses_np = {'loss': self.avg_train_loss * train_conf.accum_grad}
+ iteration_time = time.time() - start
+ for k, v in losses_np.items():
+ report(k, v)
+ report("batch_size", self.config.batch_size)
+ report("accum", train_conf.accum_grad)
+ report("step_cost", iteration_time)
+
+ if (batch_index + 1) % train_conf.accum_grad == 0:
+ if dist.get_rank() == 0 and self.visualizer:
+ losses_np_v = losses_np.copy()
+ losses_np_v.update({"lr": self.lr_scheduler()})
+ for key, val in losses_np_v.items():
+ self.visualizer.add_scalar(
+ tag='train/' + key, value=val, step=self.iteration - 1)
+
+ @paddle.no_grad()
+ def valid(self):
+ self.model.eval()
+ if not self.use_streamdata:
+ logger.info(
+ f"Valid Total Examples: {len(self.valid_loader.dataset)}")
+ valid_losses = defaultdict(list)
+ num_seen_utts = 1
+ total_loss = 0.0
+ for i, batch in enumerate(self.valid_loader):
+ utt, wav, wavs_lens, target, target_lens = batch
+ wavs_lens_rate = wavs_lens / wav.shape[1]
+ target_lens_rate = target_lens / target.shape[1]
+ wav = wav[:, :, 0]
+ loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
+
+ if paddle.isfinite(loss):
+ num_utts = batch[1].shape[0]
+ num_seen_utts += num_utts
+ total_loss += float(loss) * num_utts
+ valid_losses['val_loss'].append(float(loss))
+
+ if (i + 1) % self.config.log_interval == 0:
+ valid_dump = {k: np.mean(v) for k, v in valid_losses.items()}
+ valid_dump['val_history_loss'] = total_loss / num_seen_utts
+
+ # logging
+ msg = f"Valid: Rank: {dist.get_rank()}, "
+ msg += "epoch: {}, ".format(self.epoch)
+ msg += "step: {}, ".format(self.iteration)
+ if not self.use_streamdata:
+ msg += "batch: {}/{}, ".format(i + 1,
+ len(self.valid_loader))
+ msg += ', '.join('{}: {:>.6f}'.format(k, v)
+ for k, v in valid_dump.items())
+ logger.info(msg)
+
+ logger.info('Rank {} Val info val_loss {}'.format(
+ dist.get_rank(), total_loss / num_seen_utts))
+ return total_loss, num_seen_utts
+
+ def do_train(self):
+ """The training process control by step."""
+ # !!!IMPORTANT!!!
+ # Try to export the model by script, if fails, we should refine
+ # the code to satisfy the script export requirements
+ # script_model = paddle.jit.to_static(self.model)
+ # script_model_path = str(self.checkpoint_dir / 'init')
+ # paddle.jit.save(script_model, script_model_path)
+
+ self.before_train()
+
+ if not self.use_streamdata:
+ logger.info(
+ f"Train Total Examples: {len(self.train_loader.dataset)}")
+ while self.epoch < self.config.n_epoch:
+ with Timer("Epoch-Train Time Cost: {}"):
+ self.model.train()
+ try:
+ data_start_time = time.time()
+ for batch_index, batch in enumerate(self.train_loader):
+ dataload_time = time.time() - data_start_time
+ msg = "Train:"
+ observation = OrderedDict()
+ with ObsScope(observation):
+ report("Rank", dist.get_rank())
+ report("epoch", self.epoch)
+ report('step', self.iteration)
+ report("lr", self.lr_scheduler())
+ self.train_batch(batch_index, batch, msg)
+ self.after_train_batch()
+ report('iter', batch_index + 1)
+ if not self.use_streamdata:
+ report('total', len(self.train_loader))
+ report('reader_cost', dataload_time)
+ observation['batch_cost'] = observation[
+ 'reader_cost'] + observation['step_cost']
+ observation['samples'] = observation['batch_size']
+ observation['ips,samples/s'] = observation[
+ 'batch_size'] / observation['batch_cost']
+ for k, v in observation.items():
+ msg += f" {k.split(',')[0]}: "
+ msg += f"{v:>.8f}" if isinstance(v,
+ float) else f"{v}"
+ msg += f" {k.split(',')[1]}" if len(
+ k.split(',')) == 2 else ""
+ msg += ","
+ msg = msg[:-1] # remove the last ","
+ if (batch_index + 1) % self.config.log_interval == 0:
+ logger.info(msg)
+ data_start_time = time.time()
+ except Exception as e:
+ logger.error(e)
+ raise e
+ with Timer("Eval Time Cost: {}"):
+ total_loss, num_seen_utts = self.valid()
+ if dist.get_world_size() > 1:
+ num_seen_utts = paddle.to_tensor(num_seen_utts)
+ # the default operator in all_reduce function is sum.
+ dist.all_reduce(num_seen_utts)
+ total_loss = paddle.to_tensor(total_loss)
+ dist.all_reduce(total_loss)
+ cv_loss = total_loss / num_seen_utts
+ cv_loss = float(cv_loss)
+ else:
+ cv_loss = total_loss / num_seen_utts
+
+ logger.info(
+ 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss))
+ if self.visualizer:
+ self.visualizer.add_scalar(
+ tag='eval/cv_loss', value=cv_loss, step=self.epoch)
+ self.visualizer.add_scalar(
+ tag='eval/lr', value=self.lr_scheduler(), step=self.epoch)
+
+ self.save(tag=self.epoch, infos={'val_loss': cv_loss})
+ self.new_epoch()
+
+ def setup_dataloader(self):
+ config = self.config.clone()
+ self.use_streamdata = config.get("use_stream_data", False)
+ if self.train:
+ self.train_loader = DataLoaderFactory.get_dataloader(
+ 'train', config, self.args)
+ self.valid_loader = DataLoaderFactory.get_dataloader(
+ 'valid', config, self.args)
+ logger.info("Setup train/valid Dataloader!")
+ else:
+ decode_batch_size = config.get('decode', dict()).get(
+ 'decode_batch_size', 1)
+ self.test_loader = DataLoaderFactory.get_dataloader('test', config,
+ self.args)
+ self.align_loader = DataLoaderFactory.get_dataloader(
+ 'align', config, self.args)
+ logger.info("Setup test/align Dataloader!")
+
+ def setup_model(self):
+ config = self.config
+ model_conf = config
+
+ with UpdateConfig(model_conf):
+ if self.train:
+ model_conf.input_dim = self.train_loader.feat_dim
+ model_conf.output_dim = self.train_loader.vocab_size
+ else:
+ model_conf.input_dim = self.test_loader.feat_dim
+ model_conf.output_dim = self.test_loader.vocab_size
+
+ model = Wav2vec2ASR.from_config(model_conf)
+
+ if self.parallel:
+ model = paddle.DataParallel(model, find_unused_parameters=True)
+
+ logger.info(f"{model}")
+ layer_tools.print_params(model, logger.info)
+ self.model = model
+ logger.info("Setup model!")
+
+ # setup speech augmentation for wav2vec2
+ self.speech_augmentation = TimeDomainSpecAugment()
+
+ if not self.train:
+ return
+
+ train_config = config
+ optim_type = train_config.model_optim
+ optim_conf = train_config.model_optim_conf
+ scheduler_type = train_config.scheduler
+ scheduler_conf = train_config.scheduler_conf
+
+ scheduler_args = {
+ "learning_rate": optim_conf.lr,
+ "verbose": False,
+ "warmup_steps": scheduler_conf.warmup_steps,
+ "gamma": scheduler_conf.lr_decay,
+ "d_model": model_conf.dnn_neurons,
+ }
+ lr_scheduler = LRSchedulerFactory.from_args(scheduler_type,
+ scheduler_args)
+
+ def optimizer_args(
+ config,
+ parameters,
+ lr_scheduler=None, ):
+ train_config = config
+ optim_type = train_config.model_optim
+ optim_conf = train_config.model_optim_conf
+ scheduler_type = train_config.scheduler
+ scheduler_conf = train_config.scheduler_conf
+ return {
+ "grad_clip": train_config.global_grad_clip,
+ "learning_rate": lr_scheduler
+ if lr_scheduler else optim_conf.lr,
+ "epsilon": optim_conf.epsilon,
+ "rho": optim_conf.rho,
+ "parameters": parameters,
+ "beta1": 0.9 if optim_type == 'noam' else None,
+ "beat2": 0.98 if optim_type == 'noam' else None,
+ }
+
+ optimzer_args = optimizer_args(config, model.parameters(), lr_scheduler)
+ optimizer = OptimizerFactory.from_args(optim_type, optimzer_args)
+
+ self.optimizer = optimizer
+ self.lr_scheduler = lr_scheduler
+ logger.info("Setup optimizer/lr_scheduler!")
+
+
+class Wav2Vec2ASRTester(Wav2Vec2ASRTrainer):
+ def __init__(self, config, args):
+ super().__init__(config, args)
+ self.text_featurizer = TextFeaturizer(
+ unit_type=config.unit_type, vocab=config.vocab_filepath)
+ self.vocab_list = self.text_featurizer.vocab_list
+
+ def id2token(self, texts, texts_len):
+ """ ord() id to chr() chr """
+ trans = []
+ for text, n in zip(texts, texts_len):
+ n = n.numpy().item()
+ ids = text[:n]
+ trans.append(self.text_featurizer.defeaturize(ids.numpy().tolist()))
+ return trans
+
+ def compute_metrics(self,
+ utts,
+ audio,
+ audio_len,
+ texts,
+ texts_len,
+ fout=None):
+ decode_cfg = self.config.decode
+ errors_sum, len_refs, num_ins = 0.0, 0, 0
+ errors_func = error_rate.char_errors if decode_cfg.error_rate_type == 'cer' else error_rate.word_errors
+ error_rate_func = error_rate.cer if decode_cfg.error_rate_type == 'cer' else error_rate.wer
+
+ start_time = time.time()
+ target_transcripts = self.id2token(texts, texts_len)
+ result_transcripts, result_tokenids = self.model.decode(
+ audio,
+ text_feature=self.text_featurizer,
+ decoding_method=decode_cfg.decoding_method,
+ beam_size=decode_cfg.beam_size)
+ decode_time = time.time() - start_time
+
+ for utt, target, result, rec_tids in zip(
+ utts, target_transcripts, result_transcripts, result_tokenids):
+ errors, len_ref = errors_func(target, result)
+ errors_sum += errors
+ len_refs += len_ref
+ num_ins += 1
+ if fout:
+ fout.write({
+ "utt": utt,
+ "refs": [target],
+ "hyps": [result],
+ "hyps_tokenid": [rec_tids],
+ })
+ logger.info(f"Utt: {utt}")
+ logger.info(f"Ref: {target}")
+ logger.info(f"Hyp: {result}")
+ logger.info("One example error rate [%s] = %f" % (
+ decode_cfg.error_rate_type, error_rate_func(target, result)))
+
+ return dict(
+ errors_sum=errors_sum,
+ len_refs=len_refs,
+ num_ins=num_ins, # num examples
+ error_rate=errors_sum / len_refs,
+ error_rate_type=decode_cfg.error_rate_type,
+ num_frames=audio_len.sum().numpy().item(),
+ decode_time=decode_time)
+
+ @mp_tools.rank_zero_only
+ @paddle.no_grad()
+ def test(self):
+ logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}")
+ self.model.eval()
+
+ error_rate_type = None
+ errors_sum, len_refs, num_ins = 0.0, 0, 0
+ num_frames = 0.0
+ num_time = 0.0
+ # Initialized the decoder in model
+ decode_cfg = self.config.decode
+ vocab_list = self.vocab_list
+ decode_batch_size = decode_cfg.decode_batch_size
+
+ with jsonlines.open(self.args.result_file, 'w') as fout:
+ for i, batch in enumerate(self.test_loader):
+ metrics = self.compute_metrics(*batch, fout=fout)
+ num_frames += metrics['num_frames']
+ num_time += metrics["decode_time"]
+ errors_sum += metrics['errors_sum']
+ len_refs += metrics['len_refs']
+ num_ins += metrics['num_ins']
+ error_rate_type = metrics['error_rate_type']
+ rtf = num_time / (num_frames)
+ logger.info(
+ "RTF: %f, Error rate [%s] (%d/?) = %f" %
+ (rtf, error_rate_type, num_ins, errors_sum / len_refs))
+
+ # logging
+ msg = "Test: "
+ msg += "epoch: {}, ".format(self.epoch)
+ msg += "step: {}, ".format(self.iteration)
+ msg += "Final error rate [%s] (%d/%d) = %f" % (
+ error_rate_type, num_ins, num_ins, errors_sum / len_refs)
+ logger.info(msg)
+
+ err_meta_path = os.path.splitext(self.args.result_file)[0] + '.err'
+ err_type_str = "{}".format(error_rate_type)
+ with open(err_meta_path, 'w') as f:
+ data = json.dumps({
+ "epoch":
+ self.epoch,
+ "step":
+ self.iteration,
+ "rtf":
+ rtf,
+ error_rate_type:
+ errors_sum / len_refs,
+ "dataset_hour": (num_frames) / 1000.0 / 3600.0,
+ "process_hour":
+ num_time / 1000.0 / 3600.0,
+ "num_examples":
+ num_ins,
+ "err_sum":
+ errors_sum,
+ "ref_len":
+ len_refs,
+ "decode_method":
+ self.config.decode.decoding_method,
+ })
+ f.write(data + '\n')
diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py
index 48b05d20c..544c1e836 100644
--- a/paddlespeech/s2t/models/u2/u2.py
+++ b/paddlespeech/s2t/models/u2/u2.py
@@ -124,17 +124,15 @@ class U2BaseModel(ASRInterface, nn.Layer):
encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
encoder_time = time.time() - start
#logger.debug(f"encoder time: {encoder_time}")
- #TODO(Hui Zhang): sum not support bool type
- #encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B]
- encoder_out_lens = encoder_mask.squeeze(1).cast(paddle.int64).sum(
- 1) #[B, 1, T] -> [B]
+ encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B]
# 2a. Attention-decoder branch
loss_att = None
if self.ctc_weight != 1.0:
start = time.time()
loss_att, acc_att = self._calc_att_loss(encoder_out, encoder_mask,
- text, text_lengths)
+ text, text_lengths,
+ self.reverse_weight)
decoder_time = time.time() - start
#logger.debug(f"decoder time: {decoder_time}")
@@ -155,12 +153,12 @@ class U2BaseModel(ASRInterface, nn.Layer):
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
return loss, loss_att, loss_ctc
- def _calc_att_loss(
- self,
- encoder_out: paddle.Tensor,
- encoder_mask: paddle.Tensor,
- ys_pad: paddle.Tensor,
- ys_pad_lens: paddle.Tensor, ) -> Tuple[paddle.Tensor, float]:
+ def _calc_att_loss(self,
+ encoder_out: paddle.Tensor,
+ encoder_mask: paddle.Tensor,
+ ys_pad: paddle.Tensor,
+ ys_pad_lens: paddle.Tensor,
+ reverse_weight: float) -> Tuple[paddle.Tensor, float]:
"""Calc attention loss.
Args:
@@ -168,6 +166,7 @@ class U2BaseModel(ASRInterface, nn.Layer):
encoder_mask (paddle.Tensor): [B, 1, Tmax]
ys_pad (paddle.Tensor): [B, Umax]
ys_pad_lens (paddle.Tensor): [B]
+ reverse_weight (float): reverse decoder weight.
Returns:
Tuple[paddle.Tensor, float]: attention_loss, accuracy rate
@@ -182,15 +181,14 @@ class U2BaseModel(ASRInterface, nn.Layer):
# 1. Forward decoder
decoder_out, r_decoder_out, _ = self.decoder(
encoder_out, encoder_mask, ys_in_pad, ys_in_lens, r_ys_in_pad,
- self.reverse_weight)
+ reverse_weight)
# 2. Compute attention loss
loss_att = self.criterion_att(decoder_out, ys_out_pad)
r_loss_att = paddle.to_tensor(0.0)
- if self.reverse_weight > 0.0:
+ if reverse_weight > 0.0:
r_loss_att = self.criterion_att(r_decoder_out, r_ys_out_pad)
- loss_att = loss_att * (1 - self.reverse_weight
- ) + r_loss_att * self.reverse_weight
+ loss_att = loss_att * (1 - reverse_weight) + r_loss_att * reverse_weight
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
@@ -291,8 +289,7 @@ class U2BaseModel(ASRInterface, nn.Layer):
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
- # TODO(Hui Zhang): if end_flag.sum() == running_size:
- if end_flag.cast(paddle.int64).sum() == running_size:
+ if end_flag.sum() == running_size:
break
# 2.1 Forward decoder step
@@ -378,9 +375,7 @@ class U2BaseModel(ASRInterface, nn.Layer):
speech, speech_lengths, decoding_chunk_size,
num_decoding_left_chunks, simulate_streaming)
maxlen = encoder_out.shape[1]
- # (TODO Hui Zhang): bool no support reduce_sum
- # encoder_out_lens = encoder_mask.squeeze(1).sum(1)
- encoder_out_lens = encoder_mask.squeeze(1).astype(paddle.int).sum(1)
+ encoder_out_lens = encoder_mask.squeeze(1).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, axis=2) # (B, maxlen, 1)
@@ -507,16 +502,15 @@ class U2BaseModel(ASRInterface, nn.Layer):
num_decoding_left_chunks, simulate_streaming)
return hyps[0][0]
- def attention_rescoring(
- self,
- speech: paddle.Tensor,
- speech_lengths: paddle.Tensor,
- beam_size: int,
- decoding_chunk_size: int=-1,
- num_decoding_left_chunks: int=-1,
- ctc_weight: float=0.0,
- simulate_streaming: bool=False,
- reverse_weight: float=0.0, ) -> List[int]:
+ def attention_rescoring(self,
+ speech: paddle.Tensor,
+ speech_lengths: paddle.Tensor,
+ beam_size: int,
+ decoding_chunk_size: int=-1,
+ num_decoding_left_chunks: int=-1,
+ ctc_weight: float=0.0,
+ simulate_streaming: bool=False,
+ reverse_weight: float=0.0) -> List[int]:
""" Apply attention rescoring decoding, CTC prefix beam search
is applied first to get nbest, then we resoring the nbest on
attention decoder with corresponding encoder out
@@ -531,6 +525,7 @@ class U2BaseModel(ASRInterface, nn.Layer):
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
+ reverse_weight (float): reverse deocder weight.
Returns:
List[int]: Attention rescoring result
"""
@@ -560,28 +555,22 @@ class U2BaseModel(ASRInterface, nn.Layer):
hyp_content, place=device, dtype=paddle.long)
hyp_list.append(hyp_content)
hyps_pad = pad_sequence(hyp_list, True, self.ignore_id)
- ori_hyps_pad = hyps_pad
hyps_lens = paddle.to_tensor(
[len(hyp[0]) for hyp in hyps], place=device,
dtype=paddle.long) # (beam_size,)
hyps_pad, _ = add_sos_eos(hyps_pad, self.sos, self.eos, self.ignore_id)
hyps_lens = hyps_lens + 1 # Add at begining
- encoder_out = encoder_out.repeat(beam_size, 1, 1)
- encoder_mask = paddle.ones(
- (beam_size, 1, encoder_out.shape[1]), dtype=paddle.bool)
+ logger.debug(
+ f"hyps pad: {hyps_pad} {self.sos} {self.eos} {self.ignore_id}")
- r_hyps_pad = st_reverse_pad_list(ori_hyps_pad, hyps_lens - 1, self.sos,
- self.eos)
- decoder_out, r_decoder_out, _ = self.decoder(
- encoder_out, encoder_mask, hyps_pad, hyps_lens, r_hyps_pad,
- reverse_weight) # (beam_size, max_hyps_len, vocab_size)
# ctc score in ln domain
- decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1)
- decoder_out = decoder_out.numpy()
+ # (beam_size, max_hyps_len, vocab_size)
+ decoder_out, r_decoder_out = self.forward_attention_decoder(
+ hyps_pad, hyps_lens, encoder_out, reverse_weight)
+ decoder_out = decoder_out.numpy()
# r_decoder_out will be 0.0, if reverse_weight is 0.0 or decoder is a
# conventional transformer decoder.
- r_decoder_out = paddle.nn.functional.log_softmax(r_decoder_out, axis=-1)
r_decoder_out = r_decoder_out.numpy()
# Only use decoder score for rescoring
@@ -594,45 +583,68 @@ class U2BaseModel(ASRInterface, nn.Layer):
score += decoder_out[i][j][w]
# last decoder output token is `eos`, for laste decoder input token.
score += decoder_out[i][len(hyp[0])][self.eos]
+
+ logger.debug(
+ f"hyp {i} len {len(hyp[0])} l2r score: {score} ctc_score: {hyp[1]} reverse_weight: {reverse_weight}"
+ )
+
if reverse_weight > 0:
r_score = 0.0
for j, w in enumerate(hyp[0]):
r_score += r_decoder_out[i][len(hyp[0]) - j - 1][w]
r_score += r_decoder_out[i][len(hyp[0])][self.eos]
+
+ logger.debug(
+ f"hyp {i} len {len(hyp[0])} r2l score: {r_score} ctc_score: {hyp[1]} reverse_weight: {reverse_weight}"
+ )
+
score = score * (1 - reverse_weight) + r_score * reverse_weight
# add ctc score (which in ln domain)
score += hyp[1] * ctc_weight
if score > best_score:
best_score = score
best_index = i
+
+ logger.debug(f"result: {hyps[best_index]}")
return hyps[best_index][0]
- #@jit.to_static
+ @jit.to_static(property=True)
def subsampling_rate(self) -> int:
""" Export interface for c++ call, return subsampling_rate of the
model
"""
return self.encoder.embed.subsampling_rate
- #@jit.to_static
+ @jit.to_static(property=True)
def right_context(self) -> int:
""" Export interface for c++ call, return right_context of the model
"""
return self.encoder.embed.right_context
- #@jit.to_static
+ @jit.to_static(property=True)
def sos_symbol(self) -> int:
""" Export interface for c++ call, return sos symbol id of the model
"""
return self.sos
- #@jit.to_static
+ @jit.to_static(property=True)
def eos_symbol(self) -> int:
""" Export interface for c++ call, return eos symbol id of the model
"""
return self.eos
- @jit.to_static
+ @jit.to_static(property=True)
+ def is_bidirectional_decoder(self) -> bool:
+ """
+ Returns:
+ paddle.Tensor: decoder output
+ """
+ if hasattr(self.decoder, 'right_decoder'):
+ return True
+ else:
+ return False
+
+ # @jit.to_static
def forward_encoder_chunk(
self,
xs: paddle.Tensor,
@@ -682,28 +694,16 @@ class U2BaseModel(ASRInterface, nn.Layer):
Args:
xs (paddle.Tensor): encoder output, (B, T, D)
Returns:
- paddle.Tensor: activation before ctc
+ paddle.Tensor: activation before ctc. (B, Tmax, odim)
"""
return self.ctc.log_softmax(xs)
# @jit.to_static
- def is_bidirectional_decoder(self) -> bool:
- """
- Returns:
- paddle.Tensor: decoder output
- """
- if hasattr(self.decoder, 'right_decoder'):
- return True
- else:
- return False
-
- # @jit.to_static
- def forward_attention_decoder(
- self,
- hyps: paddle.Tensor,
- hyps_lens: paddle.Tensor,
- encoder_out: paddle.Tensor,
- reverse_weight: float=0.0, ) -> paddle.Tensor:
+ def forward_attention_decoder(self,
+ hyps: paddle.Tensor,
+ hyps_lens: paddle.Tensor,
+ encoder_out: paddle.Tensor,
+ reverse_weight: float=0.0) -> paddle.Tensor:
""" Export interface for c++ call, forward decoder with multiple
hypothesis from ctc prefix beam search and one encoder output
Args:
@@ -768,6 +768,7 @@ class U2BaseModel(ASRInterface, nn.Layer):
num_decoding_left_chunks (int, optional):
number of left chunks for decoding. Defaults to -1.
simulate_streaming (bool, optional): simulate streaming inference. Defaults to False.
+ reverse_weight (float, optional): reverse decoder weight, used by `attention_rescoring`.
Raises:
ValueError: when not support decoding_method.
@@ -983,6 +984,49 @@ class U2InferModel(U2Model):
def __init__(self, configs: dict):
super().__init__(configs)
+ from paddlespeech.s2t.modules.fbank import KaldiFbank
+ import yaml
+ import json
+ import numpy as np
+
+ input_dim = configs['input_dim']
+ process = configs['preprocess_config']
+ with open(process, encoding="utf-8") as f:
+ conf = yaml.safe_load(f)
+ assert isinstance(conf, dict), type(self.conf)
+
+ for idx, process in enumerate(conf['process']):
+ assert isinstance(process, dict), type(process)
+ opts = dict(process)
+ process_type = opts.pop("type")
+
+ if process_type == 'fbank_kaldi':
+ opts.update({'n_mels': input_dim})
+ opts['dither'] = 0.0
+ self.fbank = KaldiFbank(**opts)
+ logger.info(f"{self.__class__.__name__} export: {self.fbank}")
+ if process_type == 'cmvn_json':
+ # align with paddlespeech.audio.transform.cmvn:GlobalCMVN
+ std_floor = 1.0e-20
+
+ cmvn = opts['cmvn_path']
+ if isinstance(cmvn, dict):
+ cmvn_stats = cmvn
+ else:
+ with open(cmvn) as f:
+ cmvn_stats = json.load(f)
+ count = cmvn_stats['frame_num']
+ mean = np.array(cmvn_stats['mean_stat']) / count
+ square_sums = np.array(cmvn_stats['var_stat'])
+ var = square_sums / count - mean**2
+ std = np.maximum(np.sqrt(var), std_floor)
+ istd = 1.0 / std
+ self.global_cmvn = GlobalCMVN(
+ paddle.to_tensor(mean, dtype=paddle.float),
+ paddle.to_tensor(istd, dtype=paddle.float))
+ logger.info(
+ f"{self.__class__.__name__} export: {self.global_cmvn}")
+
def forward(self,
feats,
feats_lengths,
@@ -998,9 +1042,25 @@ class U2InferModel(U2Model):
Returns:
List[List[int]]: best path result
"""
- return self.ctc_greedy_search(
- feats,
- feats_lengths,
- decoding_chunk_size=decoding_chunk_size,
- num_decoding_left_chunks=num_decoding_left_chunks,
- simulate_streaming=simulate_streaming)
+ # dummy code for dy2st
+ # return self.ctc_greedy_search(
+ # feats,
+ # feats_lengths,
+ # decoding_chunk_size=decoding_chunk_size,
+ # num_decoding_left_chunks=num_decoding_left_chunks,
+ # simulate_streaming=simulate_streaming)
+ return feats, feats_lengths
+
+ def forward_feature(self, x):
+ """feature pipeline.
+
+ Args:
+ x (paddle.Tensor): waveform (T,).
+
+ Return:
+ feat (paddle.Tensor): feature (T, D)
+ """
+ x = paddle.cast(x, paddle.float32)
+ feat = self.fbank(x)
+ feat = self.global_cmvn(feat)
+ return feat
diff --git a/paddlespeech/s2t/models/u2_st/u2_st.py b/paddlespeech/s2t/models/u2_st/u2_st.py
index e8b61bc0d..31defbbaf 100644
--- a/paddlespeech/s2t/models/u2_st/u2_st.py
+++ b/paddlespeech/s2t/models/u2_st/u2_st.py
@@ -111,10 +111,7 @@ class U2STBaseModel(nn.Layer):
encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
encoder_time = time.time() - start
#logger.debug(f"encoder time: {encoder_time}")
- #TODO(Hui Zhang): sum not support bool type
- #encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B]
- encoder_out_lens = encoder_mask.squeeze(1).cast(paddle.int64).sum(
- 1) #[B, 1, T] -> [B]
+ encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B]
# 2a. ST-decoder branch
start = time.time()
diff --git a/paddlespeech/s2t/models/wav2vec2/__init__.py b/paddlespeech/s2t/models/wav2vec2/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
new file mode 100644
index 000000000..ae141d1b3
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
@@ -0,0 +1,44 @@
+"""Vanilla Neural Network for simple tests.
+Authors
+* Elena Rastorgueva 2020
+"""
+import paddle
+
+from paddlespeech.s2t.models.wav2vec2.modules import containers
+from paddlespeech.s2t.models.wav2vec2.modules import linear
+
+
+class VanillaNN(containers.Sequential):
+ """A simple vanilla Deep Neural Network.
+ Arguments
+ ---------
+ activation : paddle class
+ A class used for constructing the activation layers.
+ dnn_blocks : int
+ The number of linear neural blocks to include.
+ dnn_neurons : int
+ The number of neurons in the linear layers.
+ Example
+ -------
+ >>> inputs = paddle.rand([10, 120, 60])
+ >>> model = VanillaNN(input_shape=inputs.shape)
+ >>> outputs = model(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 120, 512])
+ """
+
+ def __init__(
+ self,
+ input_shape,
+ activation=paddle.nn.LeakyReLU,
+ dnn_blocks=2,
+ dnn_neurons=512, ):
+ super().__init__(input_shape=input_shape)
+
+ for block_index in range(dnn_blocks):
+ self.append(
+ linear.Linear,
+ n_neurons=dnn_neurons,
+ bias=True,
+ layer_name="linear", )
+ self.append(activation(), layer_name="act")
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/activations.py b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
new file mode 100644
index 000000000..722d8a0d6
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
@@ -0,0 +1,180 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+
+from paddle import nn
+from paddle import Tensor
+
+from paddlespeech.s2t.utils.log import Log
+logger = Log(__name__).getlog()
+
+
+class NewGELUActivation(nn.Layer):
+ """
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return 0.5 * input * (1.0 + paddle.tanh(
+ math.sqrt(2.0 / math.pi) *
+ (input + 0.044715 * paddle.pow(input, 3.0))))
+
+
+class GELUActivation(nn.Layer):
+ """
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
+ paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))) This is now written in C in nn.functional
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
+ """
+
+ def __init__(self, use_gelu_python: bool=False):
+ super().__init__()
+ self.act = nn.functional.gelu
+
+ def _gelu_python(self, input: Tensor) -> Tensor:
+ return input * 0.5 * (1.0 + paddle.erf(input / math.sqrt(2.0)))
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class FastGELUActivation(nn.Layer):
+ """
+ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return 0.5 * input * (
+ 1.0 + paddle.tanh(input * 0.7978845608 *
+ (1.0 + 0.044715 * input * input)))
+
+
+class QuickGELUActivation(nn.Layer):
+ """
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return input * paddle.sigmoid(1.702 * input)
+
+
+class ClippedGELUActivation(nn.Layer):
+ """
+ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
+ it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
+ https://arxiv.org/abs/2004.09602.
+
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
+ initially created.
+
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
+ paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
+ """
+
+ def __init__(self, min: float, max: float):
+ if min > max:
+ raise ValueError(
+ f"min should be < max (got min: {min}, max: {max})")
+
+ super().__init__()
+ self.min = min
+ self.max = max
+
+ def forward(self, x: Tensor) -> Tensor:
+ return paddle.clip(gelu(x), self.min, self.max)
+
+
+class SiLUActivation(nn.Layer):
+ """
+ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
+ Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
+ Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
+ Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
+ later.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.act = nn.functional.silu
+
+ def _silu_python(self, input: Tensor) -> Tensor:
+ return input * paddle.sigmoid(input)
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class MishActivation(nn.Layer):
+ """
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.act = nn.functional.mish
+
+ def _mish_python(self, input: Tensor) -> Tensor:
+ return input * paddle.tanh(nn.functional.softplus(input))
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class LinearActivation(nn.Layer):
+ """
+ Applies the linear activation function, i.e. forwarding input directly to output.
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return input
+
+
+ACT2FN = {
+ "gelu": GELUActivation(),
+ "gelu_10": ClippedGELUActivation(-10, 10),
+ "gelu_fast": FastGELUActivation(),
+ "gelu_new": NewGELUActivation(),
+ "gelu_python": GELUActivation(use_gelu_python=True),
+ "linear": LinearActivation(),
+ "mish": MishActivation(),
+ "quick_gelu": QuickGELUActivation(),
+ "relu": nn.ReLU(),
+ "sigmoid": nn.Sigmoid(),
+ "silu": SiLUActivation(),
+ "swish": SiLUActivation(),
+ "tanh": nn.Tanh(),
+}
+
+
+def get_activation(activation_string):
+ if activation_string in ACT2FN:
+ return ACT2FN[activation_string]
+ else:
+ raise KeyError(
+ f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}"
+ )
+
+
+# For backwards compatibility with: from activations import gelu_python
+gelu_python = get_activation("gelu_python")
+gelu_new = get_activation("gelu_new")
+gelu = get_activation("gelu")
+gelu_fast = get_activation("gelu_fast")
+quick_gelu = get_activation("quick_gelu")
+silu = get_activation("silu")
+mish = get_activation("mish")
+linear_act = get_activation("linear")
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/containers.py b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
new file mode 100644
index 000000000..b39733570
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
@@ -0,0 +1,129 @@
+import inspect
+
+import paddle
+
+
+class Sequential(paddle.nn.LayerDict):
+ """A sequence of modules with potentially inferring shape on construction.
+ If layers are passed with names, these can be referenced with dot notation.
+ Arguments
+ ---------
+ input_shape : iterable
+ A list or tuple of ints or None, representing the expected shape of an
+ input tensor. None represents a variable-length dimension. If no
+ ``input_shape`` is passed, no shape inference will be performed.
+ *layers, **named_layers
+ The inputs are treated as a list of layers to be
+ applied in sequence. The output shape of each layer is used to
+ infer the shape of the following layer. If a tuple is returned,
+ only the shape of the first element is used to determine input
+ shape of the next layer (e.g. RNN returns output, hidden).
+ Example
+ -------
+ >>> inputs = paddle.rand(10, 40, 50)
+ >>> model = Sequential(input_shape=inputs.shape)
+ >>> model.append(Linear, n_neurons=100, layer_name="layer1")
+ >>> model.append(Linear, n_neurons=200, layer_name="layer2")
+ >>> outputs = model(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 40, 200])
+ >>> outputs = model.layer1(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 40, 100])
+ """
+
+ def __init__(self, *layers, input_shape=None, **named_layers):
+ super().__init__()
+
+ # Make sure either layers or input_shape is passed
+ if not layers and input_shape is None and not named_layers:
+ raise ValueError("Must pass either layers or input shape")
+
+ # Keep track of what layers need "lengths" passed
+ self.length_layers = []
+
+ # Replace None dimensions with arbitrary value
+ self.input_shape = input_shape
+ if input_shape and None in input_shape:
+ self.input_shape = list(input_shape)
+ for i, dim in enumerate(self.input_shape):
+
+ # To reduce size of dummy tensors, use 1 for batch dim
+ if i == 0 and dim is None:
+ dim = 1
+
+ # Use 64 as nice round arbitrary value, big enough that
+ # halving this dimension a few times doesn't reach 1
+ self.input_shape[i] = dim or 256
+
+ # Append non-named layers
+ for layer in layers:
+ self.append(layer)
+
+ # Append named layers
+ for name, layer in named_layers.items():
+ self.append(layer, layer_name=name)
+
+ def append(self, layer, *args, layer_name=None, **kwargs):
+ """Add a layer to the list of layers, inferring shape if necessary.
+ Arguments
+ ---------
+ layer : A paddle.nn.Module class or object
+ If the layer is a class, it should accept an argument called
+ ``input_shape`` which will be inferred and passed. If the layer
+ is a module object, it is added as-is.
+ layer_name : str
+ The name of the layer, for reference. If the name is in use,
+ ``_{count}`` will be appended.
+ *args, **kwargs
+ These are passed to the layer if it is constructed.
+ """
+
+ # Compute layer_name
+ if layer_name is None:
+ layer_name = str(len(self))
+ elif layer_name in self:
+ index = 0
+ while f"{layer_name}_{index}" in self:
+ index += 1
+ layer_name = f"{layer_name}_{index}"
+ # Check if it needs to be constructed with input shape
+ if self.input_shape:
+ argspec = inspect.getfullargspec(layer)
+ if "input_shape" in argspec.args + argspec.kwonlyargs:
+ input_shape = self.get_output_shape()
+ layer = layer(*args, input_shape=input_shape, **kwargs)
+
+ # Finally, append the layer.
+ try:
+ self[layer_name] = layer
+ # self.add_module(layer_name, layer)
+ except TypeError:
+ raise ValueError(
+ "Must pass `input_shape` at initialization and use "
+ "modules that take `input_shape` to infer shape when "
+ "using `append()`.")
+
+ def get_output_shape(self):
+ """Returns expected shape of the output.
+ Computed by passing dummy input constructed with the
+ ``self.input_shape`` attribute.
+ """
+ with paddle.no_grad():
+ dummy_input = paddle.zeros(self.input_shape)
+ dummy_output = self(dummy_input)
+ return dummy_output.shape
+
+ def forward(self, x):
+ """Applies layers in sequence, passing only the first element of tuples.
+ Arguments
+ ---------
+ x : paddle.Tensor
+ The input tensor to run through the network.
+ """
+ for layer in self.values():
+ x = layer(x)
+ if isinstance(x, tuple):
+ x = x[0]
+
+ return x
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/linear.py b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
new file mode 100644
index 000000000..488949d14
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
@@ -0,0 +1,72 @@
+"""Library implementing linear transformation.
+Authors
+ * Mirco Ravanelli 2020
+ * Davide Borra 2021
+"""
+import logging
+
+import paddle
+
+from paddlespeech.s2t.modules import align
+
+logger = logging.getLogger(__name__)
+
+
+class Linear(paddle.nn.Layer):
+ """Computes a linear transformation y = wx + b.
+ Arguments
+ ---------
+ n_neurons : int
+ It is the number of output neurons (i.e, the dimensionality of the
+ output).
+ input_shape: tuple
+ It is the shape of the input tensor.
+ input_size: int
+ Size of the input tensor.
+ bias : bool
+ If True, the additive bias b is adopted.
+ combine_dims : bool
+ If True and the input is 4D, combine 3rd and 4th dimensions of input.
+ Example
+ -------
+ >>> inputs = paddle.rand(10, 50, 40)
+ >>> lin_t = Linear(input_shape=(10, 50, 40), n_neurons=100)
+ >>> output = lin_t(inputs)
+ >>> output.shape
+ paddle.shape([10, 50, 100])
+ """
+
+ def __init__(
+ self,
+ n_neurons,
+ input_shape=None,
+ input_size=None,
+ bias=True,
+ combine_dims=False, ):
+ super().__init__()
+ self.combine_dims = combine_dims
+
+ if input_shape is None and input_size is None:
+ raise ValueError("Expected one of input_shape or input_size")
+
+ if input_size is None:
+ input_size = input_shape[-1]
+ if len(input_shape) == 4 and self.combine_dims:
+ input_size = input_shape[2] * input_shape[3]
+
+ # Weights are initialized following paddle approach
+ self.w = align.Linear(input_size, n_neurons, bias_attr=bias)
+
+ def forward(self, x):
+ """Returns the linear transformation of input tensor.
+ Arguments
+ ---------
+ x : paddle.Tensor
+ Input to transform linearly.
+ """
+ if x.rank == 4 and self.combine_dims:
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
+
+ wx = self.w(x)
+
+ return wx
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
new file mode 100644
index 000000000..fb2a87122
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
@@ -0,0 +1,1137 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from collections import OrderedDict
+from dataclasses import dataclass
+from dataclasses import fields
+from typing import Optional
+from typing import Tuple
+
+import paddle
+
+
+class ModelOutput(OrderedDict):
+ """
+ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
+ tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
+ python dictionary.
+
+
+
+ You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple
+ before.
+
+
+ """
+
+ def __post_init__(self):
+ class_fields = fields(self)
+
+ # Safety and consistency checks
+ if not len(class_fields):
+ raise ValueError(f"{self.__class__.__name__} has no fields.")
+ if not all(field.default is None for field in class_fields[1:]):
+ raise ValueError(
+ f"{self.__class__.__name__} should not have more than one required field."
+ )
+
+ first_field = getattr(self, class_fields[0].name)
+ other_fields_are_none = all(
+ getattr(self, field.name) is None for field in class_fields[1:])
+
+ if other_fields_are_none and not paddle.is_tensor(first_field):
+ if isinstance(first_field, dict):
+ iterator = first_field.items()
+ first_field_iterator = True
+ else:
+ try:
+ iterator = iter(first_field)
+ first_field_iterator = True
+ except TypeError:
+ first_field_iterator = False
+
+ # if we provided an iterator as first field and the iterator is a (key, value) iterator
+ # set the associated fields
+ if first_field_iterator:
+ for element in iterator:
+ if (not isinstance(element, (list, tuple)) or
+ not len(element) == 2 or
+ not isinstance(element[0], str)):
+ break
+ setattr(self, element[0], element[1])
+ if element[1] is not None:
+ self[element[0]] = element[1]
+ elif first_field is not None:
+ self[class_fields[0].name] = first_field
+ else:
+ for field in class_fields:
+ v = getattr(self, field.name)
+ if v is not None:
+ self[field.name] = v
+
+ def __delitem__(self, *args, **kwargs):
+ raise Exception(
+ f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance."
+ )
+
+ def setdefault(self, *args, **kwargs):
+ raise Exception(
+ f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance."
+ )
+
+ def pop(self, *args, **kwargs):
+ raise Exception(
+ f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
+
+ def update(self, *args, **kwargs):
+ raise Exception(
+ f"You cannot use ``update`` on a {self.__class__.__name__} instance."
+ )
+
+ def __getitem__(self, k):
+ if isinstance(k, str):
+ inner_dict = {k: v for (k, v) in self.items()}
+ return inner_dict[k]
+ else:
+ return self.to_tuple()[k]
+
+ def __setattr__(self, name, value):
+ if name in self.keys() and value is not None:
+ # Don't call self.__setitem__ to avoid recursion errors
+ super().__setitem__(name, value)
+ super().__setattr__(name, value)
+
+ def __setitem__(self, key, value):
+ # Will raise a KeyException if needed
+ super().__setitem__(key, value)
+ # Don't call self.__setattr__ to avoid recursion errors
+ super().__setattr__(key, value)
+
+ def to_tuple(self) -> Tuple:
+ """
+ Convert self to a tuple containing all the attributes/keys that are not `None`.
+ """
+ return tuple(self[k] for k in self.keys())
+
+
+@dataclass
+class BaseModelOutput(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithNoAttention(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ last_hidden_state: paddle = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPooling(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) after further processing
+ through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
+ the classification token after processing through a linear layer and a tanh activation function. The linear
+ layer weights are trained from the next sentence prediction (classification) objective during pretraining.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPoolingAndNoAttention(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state after a pooling operation on the spatial dimensions.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPast(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) after further processing
+ through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
+ the classification token after processing through a linear layer and a tanh activation function. The linear
+ layer weights are trained from the next sentence prediction (classification) objective during pretraining.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutput(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutputWithCrossAttentions(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross attentions weights after the attention softmax, used to compute the weighted average in the
+ cross-attention heads.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `paddle.Tensor` tuples of length `config.n_layers`, with each tuple containing the cached key,
+ value states of the self-attention and the cross-attention layers if model is used in encoder-decoder
+ setting. Only relevant if `config.is_decoder = True`.
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class MaskedLMOutput(ModelOutput):
+ """
+ Base class for masked language models outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class NextSentencePredictorOutput(ModelOutput):
+ """
+ Base class for outputs of models predicting if two sentences are consecutive or not.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided):
+ Next sequence prediction (classification) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqSequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sequence-to-sequence sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class MultipleChoiceModelOutput(ModelOutput):
+ """
+ Base class for outputs of multiple choice models.
+
+ Args:
+ loss (`paddle.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class TokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class QuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of question answering models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ start_logits: paddle.Tensor = None
+ end_logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of sequence-to-sequence question answering models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ start_logits: paddle.Tensor = None
+ end_logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SemanticSegmenterOutput(ModelOutput):
+ """
+ Base class for outputs of semantic segmentation models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class ImageClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of image classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
+ (also called feature maps) of the model at the output of each stage.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class ImageClassifierOutputWithNoAttention(ModelOutput):
+ """
+ Base class for outputs of image classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
+ called feature maps) of the model at the output of each stage.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class DepthEstimatorOutput(ModelOutput):
+ """
+ Base class for outputs of depth estimation models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ predicted_depth (`paddle.Tensor` of shape `(batch_size, height, width)`):
+ Predicted depth for each pixel.
+
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ predicted_depth: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Wav2Vec2BaseModelOutput(ModelOutput):
+ """
+ Base class for models that have been trained with the Wav2Vec2 loss objective.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ extract_features (`paddle.Tensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):
+ Sequence of extracted feature vectors of the last convolutional layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ extract_features: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class XVectorOutput(ModelOutput):
+ """
+ Output type of [`Wav2Vec2ForXVector`].
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
+ Classification hidden states before AMSoftmax.
+ embeddings (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
+ Utterance embeddings used for vector similarity-based retrieval.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ embeddings: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
new file mode 100644
index 000000000..3d5e5fa64
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
@@ -0,0 +1,1193 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Paddle Wav2Vec2 model."""
+from dataclasses import dataclass
+from typing import Optional
+from typing import Tuple
+from typing import Union
+
+import numpy as np
+import paddle
+from paddle import nn
+
+from paddlespeech.s2t.models.wav2vec2.modules.activations import ACT2FN
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import BaseModelOutput
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import ModelOutput
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import Wav2Vec2BaseModelOutput
+from paddlespeech.s2t.utils.log import Log
+logger = Log(__name__).getlog()
+
+
+@dataclass
+class Wav2Vec2ForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions.
+
+ Args:
+ loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
+ paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
+ projected_states (`paddle.Tensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
+ projected quantized states.
+ projected_quantized_states (`paddle.Tensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
+ target vectors for contrastive loss.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ projected_states: paddle.Tensor = None
+ projected_quantized_states: paddle.Tensor = None
+ codevector_perplexity: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ contrastive_loss: Optional[paddle.Tensor] = None
+ diversity_loss: Optional[paddle.Tensor] = None
+
+
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[paddle.Tensor]=None,
+ min_masks: int=0, ) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`")
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None else
+ [sequence_length for _ in range(batch_size)])
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)),
+ num_masked_span,
+ replace=False)
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate([
+ spec_aug_mask_idx,
+ np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) *
+ dummy_mask_idx
+ ])
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None],
+ (batch_size, max_num_masked_span, mask_length))
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(
+ (batch_size, max_num_masked_span * mask_length))
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (
+ batch_size, max_num_masked_span, mask_length)).reshape(
+ (batch_size, max_num_masked_span * mask_length))
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs >
+ sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+def _sample_negative_indices(features_shape: Tuple,
+ num_negatives: int,
+ mask_time_indices: Optional[np.ndarray]=None):
+ """
+ Sample `num_negatives` vectors from feature vectors.
+ """
+ batch_size, sequence_length = features_shape
+
+ # generate indices of the positive vectors themselves, repeat them `num_negatives` times
+ sequence_length_range = np.arange(sequence_length)
+
+ # get `num_negatives` random vector indices from the same utterance
+ sampled_negative_indices = np.zeros(
+ shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
+
+ mask_time_indices = (mask_time_indices.astype(np.bool)
+ if mask_time_indices is not None else
+ np.ones(features_shape, dtype=np.bool))
+
+ for batch_idx in range(batch_size):
+ high = mask_time_indices[batch_idx].sum() - 1
+ mapped_masked_indices = sequence_length_range[mask_time_indices[
+ batch_idx]]
+
+ feature_indices = np.broadcast_to(
+ np.arange(high + 1)[:, None], (high + 1, num_negatives))
+ sampled_indices = np.random.randint(
+ 0, high, size=(high + 1, num_negatives))
+ # avoid sampling the same positive vector, but keep the distribution uniform
+ sampled_indices[sampled_indices >= feature_indices] += 1
+
+ # remap to actual indices
+ sampled_negative_indices[batch_idx][mask_time_indices[
+ batch_idx]] = mapped_masked_indices[sampled_indices]
+
+ # correct for batch size
+ sampled_negative_indices[batch_idx] += batch_idx * sequence_length
+
+ return sampled_negative_indices
+
+
+class Wav2Vec2NoLayerNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias, )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2LayerNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias, )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2GroupNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias, )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(
+ num_groups=self.out_conv_dim, num_channels=self.out_conv_dim)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2PositionalConvEmbedding(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1D(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups, )
+
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = Wav2Vec2SamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ return hidden_states
+
+
+class Wav2Vec2SamePadLayer(nn.Layer):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, :-self.num_pad_remove]
+ return hidden_states
+
+
+class Wav2Vec2FeatureEncoder(nn.Layer):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [
+ Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1)
+ for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [
+ Wav2Vec2LayerNormConvLayer(config, layer_id=i)
+ for i in range(config.num_feat_extract_layers)
+ ]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.LayerList(conv_layers)
+ self.gradient_checkpointing = False
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.trainable = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+ for conv_layer in self.conv_layers:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+
+class Wav2Vec2FeatureProjection(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(
+ config.conv_dim[-1], epsilon=config.layer_norm_eps)
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states, norm_hidden_states
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Wav2Vec2
+class Wav2Vec2Attention(nn.Layer):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float=0.0,
+ is_decoder: bool=False,
+ bias: bool=True, ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads}).")
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+
+ def _shape(self, tensor: paddle.Tensor, seq_len: int, bsz: int):
+ return paddle.reshape(tensor, (bsz, seq_len, self.num_heads,
+ self.head_dim)).transpose([0, 2, 1, 3])
+
+ def forward(
+ self,
+ hidden_states: paddle.Tensor,
+ key_value_states: Optional[paddle.Tensor]=None,
+ past_key_value: Optional[Tuple[paddle.Tensor]]=None,
+ attention_mask: Optional[paddle.Tensor]=None,
+ layer_head_mask: Optional[paddle.Tensor]=None,
+ output_attentions: bool=False, ) -> Tuple[paddle.Tensor, Optional[
+ paddle.Tensor], Optional[Tuple[paddle.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.shape
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = paddle.concat([past_key_value[0], key_states], axis=2)
+ value_states = paddle.concat(
+ [past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(paddle.Tensor, paddle.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(paddle.Tensor, paddle.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len,
+ bsz).reshape(proj_shape)
+ key_states = key_states.reshape(proj_shape)
+ value_states = value_states.reshape(proj_shape)
+
+ src_len = key_states.shape[1]
+ attn_weights = paddle.bmm(query_states, key_states.transpose([0, 2, 1]))
+
+ if attn_weights.shape != [bsz * self.num_heads, tgt_len, src_len]:
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.shape}")
+
+ if attention_mask is not None:
+ if attention_mask.shape != [bsz, 1, tgt_len, src_len]:
+ raise ValueError(
+ f"Attention mask should be of size {[bsz, 1, tgt_len, src_len]}, but is {attention_mask.shape}"
+ )
+ attn_weights = attn_weights.reshape(bsz, self.num_heads, tgt_len,
+ src_len) + attention_mask
+ attn_weights = attn_weights.reshape(bsz * self.num_heads, tgt_len,
+ src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.shape != [
+ self.num_heads,
+ ]:
+ raise ValueError(
+ f"Head mask for a single layer should be of size {[self.num_heads,]}, but is"
+ f" {layer_head_mask.shape}")
+ attn_weights = layer_head_mask.reshape(
+ (1, -1, 1, 1)) * attn_weights.reshape(
+ (bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights.reshape(
+ (bsz * self.num_heads, tgt_len, src_len))
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.reshape(
+ (bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights_reshaped.reshape(
+ (bsz * self.num_heads, tgt_len, src_len))
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(
+ attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = paddle.bmm(attn_probs, value_states)
+
+ if attn_output.shape != [bsz * self.num_heads, tgt_len, self.head_dim]:
+ raise ValueError(
+ f"`attn_output` should be of size {[bsz, self.num_heads, tgt_len, self.head_dim]}, but is"
+ f" {attn_output.shape}")
+
+ attn_output = attn_output.reshape(
+ (bsz, self.num_heads, tgt_len, self.head_dim))
+ attn_output = attn_output.transpose([0, 2, 1, 3])
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned aross GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape((bsz, tgt_len, self.embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class Wav2Vec2FeedForward(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size,
+ config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(config.intermediate_size,
+ config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2EncoderLayer(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = Wav2Vec2Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False, )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+ self.feed_forward = Wav2Vec2FeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+
+ def forward(self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states, )
+
+ if output_attentions:
+ outputs += (attn_weights, )
+
+ return outputs
+
+
+class Wav2Vec2EncoderLayerStableLayerNorm(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = Wav2Vec2Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False, )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+ self.feed_forward = Wav2Vec2FeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+
+ def forward(self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+ hidden_states = hidden_states + self.feed_forward(
+ self.final_layer_norm(hidden_states))
+
+ outputs = (hidden_states, )
+
+ if output_attentions:
+ outputs += (attn_weights, )
+
+ return outputs
+
+
+class Wav2Vec2Encoder(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.LayerList([
+ Wav2Vec2EncoderLayer(config)
+ for _ in range(config.num_hidden_layers)
+ ])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True, ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(
+ 1, 1, hidden_states.shape[2])
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(
+ dtype=hidden_states.dtype)
+ attention_mask = attention_mask * np.iinfo(np.float32).min
+ attention_mask = attention_mask.expand(attention_mask.shape[0], 1,
+ attention_mask.shape[-1],
+ attention_mask.shape[-1])
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ #deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states, )
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+
+ skip_the_layer = True if self.training and (
+ dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer: # or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ # create gradient checkpointing function
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states, )
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in
+ [hidden_states, all_hidden_states, all_self_attentions]
+ if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions, )
+
+
+class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.LayerList([
+ Wav2Vec2EncoderLayerStableLayerNorm(config)
+ for _ in range(config.num_hidden_layers)
+ ])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True, ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens are not attended to
+ expand_attention_mask = attention_mask.unsqueeze(
+ -1).repeat_interleave(
+ hidden_states.shape[2], axis=2)
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(
+ dtype=hidden_states.dtype)
+ attention_mask = attention_mask * np.iinfo(np.float32).min
+ attention_mask = attention_mask.expand(attention_mask.shape[0], 1,
+ attention_mask.shape[-1],
+ attention_mask.shape[-1])
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.dropout(hidden_states)
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states, )
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+
+ skip_the_layer = True if self.training and (
+ dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer: # or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
+ if self.gradient_checkpointing and self.training:
+ # create gradient checkpointing function
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+ else:
+ layer_outputs = layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states, )
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in
+ [hidden_states, all_hidden_states, all_self_attentions]
+ if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions, )
+
+
+class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
+ """
+ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
+ GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.num_groups = config.num_codevector_groups
+ self.num_vars = config.num_codevectors_per_group
+
+ if config.codevector_dim % self.num_groups != 0:
+ raise ValueError(
+ f"`config.codevector_dim {config.codevector_dim} must be divisible "
+ f"by `config.num_codevector_groups` {self.num_groups} for concatenation"
+ )
+
+ # storage for codebook variables (codewords)
+ self.codevectors = paddle.static.create_parameter(
+ shape=[
+ 1, self.num_groups * self.num_vars,
+ config.codevector_dim // self.num_groups
+ ],
+ dtype='float32')
+ self.weight_proj = nn.Linear(config.conv_dim[-1],
+ self.num_groups * self.num_vars)
+
+ # can be decayed for training
+ self.temperature = 2
+
+ @staticmethod
+ def _compute_perplexity(probs, mask=None):
+ if mask is not None:
+ mask_extended = mask.flatten()[:, None, None].expand(probs.shape)
+ probs = paddle.where(mask_extended, probs, paddle.zeros_like(probs))
+ marginal_probs = probs.sum(dim=0) / mask.sum()
+ else:
+ marginal_probs = probs.mean(dim=0)
+
+ perplexity = paddle.exp(-paddle.sum(
+ marginal_probs * paddle.log(marginal_probs + 1e-7), dim=-1)).sum()
+ return perplexity
+
+ def forward(self, hidden_states, mask_time_indices=None):
+ batch_size, sequence_length, hidden_size = hidden_states.shape
+
+ # project to codevector dim
+ hidden_states = self.weight_proj(hidden_states)
+ hidden_states = hidden_states.reshape(
+ (batch_size * sequence_length * self.num_groups, -1))
+
+ if self.training:
+ # sample code vector probs via gumbel in differentiateable way
+ codevector_probs = nn.functional.gumbel_softmax(
+ hidden_states.float(), tau=self.temperature,
+ hard=True).type_as(hidden_states)
+
+ # compute perplexity
+ codevector_soft_dist = paddle.softmax(
+ hidden_states.reshape((batch_size * sequence_length,
+ self.num_groups, -1)).float(),
+ axis=-1)
+ perplexity = self._compute_perplexity(codevector_soft_dist,
+ mask_time_indices)
+ else:
+ # take argmax in non-differentiable way
+ # comptute hard codevector distribution (one hot)
+ codevector_idx = hidden_states.argmax(dim=-1)
+ codevector_probs = hidden_states.new_zeros(
+ *hidden_states.shape).scatter_(-1,
+ codevector_idx.reshape((-1, 1)),
+ 1.0)
+ codevector_probs = codevector_probs.reshape(
+ (batch_size * sequence_length, self.num_groups, -1))
+
+ perplexity = self._compute_perplexity(codevector_probs,
+ mask_time_indices)
+
+ codevector_probs = codevector_probs.reshape(
+ (batch_size * sequence_length, -1))
+ # use probs to retrieve codevectors
+ codevectors_per_group = codevector_probs.unsqueeze(
+ -1) * self.codevectors
+ codevectors = codevectors_per_group.reshape(
+ (batch_size * sequence_length, self.num_groups, self.num_vars, -1))
+ codevectors = codevectors.sum(-2).reshape(
+ (batch_size, sequence_length, -1))
+
+ return codevectors, perplexity
+
+
+class Wav2Vec2Adapter(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+
+ # feature dim might need to be down-projected
+ if config.output_hidden_size != config.hidden_size:
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
+ else:
+ self.proj = self.proj_layer_norm = None
+
+ self.layers = nn.LayerList(
+ Wav2Vec2AdapterLayer(config)
+ for _ in range(config.num_adapter_layers))
+ self.layerdrop = config.layerdrop
+
+ def forward(self, hidden_states):
+ # down project hidden_states if necessary
+ if self.proj is not None and self.proj_layer_norm is not None:
+ hidden_states = self.proj(hidden_states)
+ hidden_states = self.proj_layer_norm(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ for layer in self.layers:
+ layerdrop_prob = np.random.random()
+ if not self.training or (layerdrop_prob > self.layerdrop):
+ hidden_states = layer(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ return hidden_states
+
+
+class Wav2Vec2AdapterLayer(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1D(
+ config.output_hidden_size,
+ 2 * config.output_hidden_size,
+ config.adapter_kernel_size,
+ stride=config.adapter_stride,
+ padding=1, )
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, axis=1)
+
+ return hidden_states
+
+
+class Wav2Vec2Model(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.feature_extractor = Wav2Vec2FeatureEncoder(config)
+ self.feature_projection = Wav2Vec2FeatureProjection(config)
+
+ # model only needs masking vector if mask prob is > 0.0
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ # self.masked_spec_embed = nn.Parameter(paddle.Tensor(config.hidden_size).uniform_())
+ #self.masked_spec_embed = paddle.uniform([config.hidden_size])
+ self.masked_spec_embed = paddle.static.create_parameter(
+ shape=[config.hidden_size],
+ dtype='float32',
+ default_initializer=paddle.nn.initializer.Uniform(
+ low=0, high=1.0))
+ if config.do_stable_layer_norm:
+ self.encoder = Wav2Vec2EncoderStableLayerNorm(config)
+ else:
+ self.encoder = Wav2Vec2Encoder(config)
+
+ self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.feature_extractor._freeze_parameters()
+
+ def _mask_hidden_states(
+ self,
+ hidden_states: paddle.Tensor,
+ mask_time_indices: Optional[paddle.Tensor]=None,
+ attention_mask: Optional[paddle.Tensor]=None, ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.shape
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(
+ hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks, )
+ mask_time_indices = paddle.to_tensor(
+ mask_time_indices, dtype=paddle.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(
+ hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks, )
+ mask_feature_indices = paddle.to_tensor(
+ mask_feature_indices, dtype=paddle.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(
+ -1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ def forward(
+ self,
+ input_values: Optional[paddle.Tensor],
+ attention_mask: Optional[paddle.Tensor]=None,
+ mask_time_indices: Optional[paddle.Tensor]=None,
+ output_attentions: Optional[bool]=None,
+ output_hidden_states: Optional[bool]=None,
+ return_dict: Optional[bool]=None,
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (output_hidden_states
+ if output_hidden_states is not None else
+ self.config.output_hidden_states)
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose([0, 2, 1])
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(
+ extract_features.shape[1], attention_mask, add_adapter=False)
+ hidden_states, extract_features = self.feature_projection(
+ extract_features)
+ hidden_states = self._mask_hidden_states(
+ hidden_states,
+ mask_time_indices=mask_time_indices,
+ attention_mask=attention_mask)
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict, )
+
+ hidden_states = encoder_outputs[0]
+
+ if self.adapter is not None:
+ hidden_states = self.adapter(hidden_states)
+
+ if not return_dict:
+ return (hidden_states, extract_features) + encoder_outputs[1:]
+
+ return Wav2Vec2BaseModelOutput(
+ last_hidden_state=hidden_states,
+ extract_features=extract_features,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions, )
+
+ def post_init(self):
+ """
+ A method executed at the end of each Transformer model initialization, to execute code that needs the model's
+ modules properly initialized (such as weight initialization).
+ """
+ # self.init_weights()
+ # self._backward_compatibility_gradient_checkpointing()
+ pass
+
+
+class Wav2Vec2ConfigPure():
+ model_type = "wav2vec2"
+
+ def __init__(self, config):
+ self.output_attentions = False
+ self.output_hidden_states = False
+ self.use_return_dict = True
+
+ self.pad_token_id = config.pad_token_id
+ self.bos_token_id = config.bos_token_id
+ self.eos_token_id = config.eos_token_id
+ self.hidden_size = config.hidden_size
+ self.feat_extract_norm = config.feat_extract_norm
+ self.feat_extract_activation = config.feat_extract_activation
+ self.conv_dim = config.conv_dim
+ self.conv_stride = config.conv_stride
+ self.conv_kernel = config.conv_kernel
+ self.conv_bias = config.conv_bias
+ self.num_conv_pos_embeddings = config.num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = config.num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+ self.num_hidden_layers = config.num_hidden_layers
+ self.intermediate_size = config.intermediate_size
+ self.hidden_act = config.hidden_act
+ self.num_attention_heads = config.num_attention_heads
+ self.hidden_dropout = config.hidden_dropout
+ self.attention_dropout = config.attention_dropout
+ self.activation_dropout = config.activation_dropout
+ self.feat_proj_dropout = config.feat_proj_dropout
+ self.final_dropout = config.final_dropout
+ self.layerdrop = config.layerdrop
+ self.layer_norm_eps = config.layer_norm_eps
+ self.initializer_range = config.initializer_range
+ self.vocab_size = config.vocab_size
+ self.do_stable_layer_norm = config.do_stable_layer_norm
+ self.use_weighted_layer_sum = config.use_weighted_layer_sum
+
+ if ((len(self.conv_stride) != self.num_feat_extract_layers) or
+ (len(self.conv_kernel) != self.num_feat_extract_layers) or
+ (len(self.conv_dim) != self.num_feat_extract_layers)):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = config.apply_spec_augment
+ self.mask_time_prob = config.mask_time_prob
+ self.mask_time_length = config.mask_time_length
+ self.mask_time_min_masks = config.mask_time_min_masks
+ self.mask_feature_prob = config.mask_feature_prob
+ self.mask_feature_length = config.mask_feature_length
+ self.mask_feature_min_masks = config.mask_feature_min_masks
+
+ # parameters for pretraining with codevector quantized representations
+ self.num_codevectors_per_group = config.num_codevectors_per_group
+ self.num_codevector_groups = config.num_codevector_groups
+ self.contrastive_logits_temperature = config.contrastive_logits_temperature
+ self.feat_quantizer_dropout = config.feat_quantizer_dropout
+ self.num_negatives = config.num_negatives
+ self.codevector_dim = config.codevector_dim
+ self.proj_codevector_dim = config.proj_codevector_dim
+ self.diversity_loss_weight = config.diversity_loss_weight
+
+ # ctc loss
+ self.ctc_loss_reduction = config.ctc_loss_reduction
+ self.ctc_zero_infinity = config.ctc_zero_infinity
+
+ # adapter
+ self.add_adapter = config.add_adapter
+ self.adapter_kernel_size = config.adapter_kernel_size
+ self.adapter_stride = config.adapter_stride
+ self.num_adapter_layers = config.num_adapter_layers
+ self.output_hidden_size = config.output_hidden_size or config.hidden_size
+
+ @property
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
new file mode 100644
index 000000000..9998a8e5e
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
@@ -0,0 +1,236 @@
+"""
+Low level signal processing utilities
+Authors
+ * Peter Plantinga 2020
+ * Francois Grondin 2020
+ * William Aris 2020
+ * Samuele Cornell 2020
+ * Sarthak Yadav 2022
+"""
+import numpy as np
+import paddle
+
+
+def blackman_window(window_length, periodic=True):
+ """Blackman window function.
+ Arguments
+ ---------
+ window_length : int
+ Controlling the returned window size.
+ periodic : bool
+ Determines whether the returned window trims off the
+ last duplicate value from the symmetric window
+
+ Returns
+ -------
+ A 1-D tensor of size (window_length) containing the window
+ """
+ if window_length == 0:
+ return []
+ if window_length == 1:
+ return paddle.ones([1])
+ if periodic:
+ window_length += 1
+ window = paddle.arange(window_length) * (np.pi / (window_length - 1))
+ window = 0.08 * paddle.cos(window * 4) - 0.5 * paddle.cos(window * 2) + 0.42
+ return window[:-1] if periodic else window
+
+
+def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
+ """Compute amplitude of a batch of waveforms.
+ Arguments
+ ---------
+ waveform : tensor
+ The waveforms used for computing amplitude.
+ Shape should be `[time]` or `[batch, time]` or
+ `[batch, time, channels]`.
+ lengths : tensor
+ The lengths of the waveforms excluding the padding.
+ Shape should be a single dimension, `[batch]`.
+ amp_type : str
+ Whether to compute "avg" average or "peak" amplitude.
+ Choose between ["avg", "peak"].
+ scale : str
+ Whether to compute amplitude in "dB" or "linear" scale.
+ Choose between ["linear", "dB"].
+ Returns
+ -------
+ The average amplitude of the waveforms.
+ Example
+ -------
+ >>> signal = paddle.sin(paddle.arange(16000.0)).unsqueeze(0)
+ >>> compute_amplitude(signal, signal.size(1))
+ tensor([[0.6366]])
+ """
+ if len(waveforms.shape) == 1:
+ waveforms = waveforms.unsqueeze(0)
+
+ assert amp_type in ["avg", "peak"]
+ assert scale in ["linear", "dB"]
+
+ if amp_type == "avg":
+ if lengths is None:
+ out = paddle.mean(paddle.abs(waveforms), axis=1, keepdim=True)
+ else:
+ wav_sum = paddle.sum(paddle.abs(waveforms), axis=1, keepdim=True)
+ out = wav_sum / lengths
+ elif amp_type == "peak":
+ out = paddle.max(paddle.abs(waveforms), axis=1, keepdim=True)[0]
+ else:
+ raise NotImplementedError
+
+ if scale == "linear":
+ return out
+ elif scale == "dB":
+ return paddle.clip(20 * paddle.log10(out), min=-80) # clamp zeros
+ else:
+ raise NotImplementedError
+
+
+def convolve1d(
+ waveform,
+ kernel,
+ padding=0,
+ pad_type="constant",
+ stride=1,
+ groups=1,
+ use_fft=False,
+ rotation_index=0, ):
+ """Use paddle.nn.functional to perform 1d padding and conv.
+ Arguments
+ ---------
+ waveform : tensor
+ The tensor to perform operations on.
+ kernel : tensor
+ The filter to apply during convolution.
+ padding : int or tuple
+ The padding (pad_left, pad_right) to apply.
+ If an integer is passed instead, this is passed
+ to the conv1d function and pad_type is ignored.
+ pad_type : str
+ The type of padding to use. Passed directly to
+ `paddle.nn.functional.pad`, see Paddle documentation
+ for available options.
+ stride : int
+ The number of units to move each time convolution is applied.
+ Passed to conv1d. Has no effect if `use_fft` is True.
+ groups : int
+ This option is passed to `conv1d` to split the input into groups for
+ convolution. Input channels should be divisible by the number of groups.
+ use_fft : bool
+ When `use_fft` is passed `True`, then compute the convolution in the
+ spectral domain using complex multiply. This is more efficient on CPU
+ when the size of the kernel is large (e.g. reverberation). WARNING:
+ Without padding, circular convolution occurs. This makes little
+ difference in the case of reverberation, but may make more difference
+ with different kernels.
+ rotation_index : int
+ This option only applies if `use_fft` is true. If so, the kernel is
+ rolled by this amount before convolution to shift the output location.
+ Returns
+ -------
+ The convolved waveform.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> signal = signal.unsqueeze(0).unsqueeze(2)
+ >>> kernel = paddle.rand([1, 10, 1])
+ >>> signal = convolve1d(signal, kernel, padding=(9, 0))
+ """
+ if len(waveform.shape) != 3:
+ raise ValueError("Convolve1D expects a 3-dimensional tensor")
+
+ # Move time dimension last, which pad and fft and conv expect.
+ waveform = waveform.transpose([0, 2, 1])
+ kernel = kernel.transpose([0, 2, 1])
+ # Padding can be a tuple (left_pad, right_pad) or an int
+ if isinstance(padding, tuple):
+ waveform = paddle.nn.functional.pad(
+ x=waveform, pad=padding, mode=pad_type, data_format='NCL')
+
+ # This approach uses FFT, which is more efficient if the kernel is large
+ if use_fft:
+ # Pad kernel to same length as signal, ensuring correct alignment
+ zero_length = waveform.shape[-1] - kernel.shape[-1]
+
+ # Handle case where signal is shorter
+ if zero_length < 0:
+ kernel = kernel[..., :zero_length]
+ zero_length = 0
+
+ # Perform rotation to ensure alignment
+ zeros = paddle.zeros(
+ [kernel.shape[0], kernel.shape[1], zero_length], dtype=kernel.dtype)
+ after_index = kernel[..., rotation_index:]
+ before_index = kernel[..., :rotation_index]
+ kernel = paddle.concat((after_index, zeros, before_index), axis=-1)
+
+ # Multiply in frequency domain to convolve in time domain
+ import paddle.fft as fft
+
+ result = fft.rfft(waveform) * fft.rfft(kernel)
+ convolved = fft.irfft(result, n=waveform.shape[-1])
+
+ # Use the implementation given by paddle, which should be efficient on GPU
+ else:
+ convolved = paddle.nn.functional.conv1d(
+ x=waveform,
+ weight=kernel,
+ stride=stride,
+ groups=groups,
+ padding=padding if not isinstance(padding, tuple) else 0, )
+
+ # Return time dimension to the second dimension.
+ return convolved.transpose([0, 2, 1])
+
+
+def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
+ """Returns a notch filter constructed from a high-pass and low-pass filter.
+ (from https://tomroelandts.com/articles/
+ how-to-create-simple-band-pass-and-band-reject-filters)
+ Arguments
+ ---------
+ notch_freq : float
+ frequency to put notch as a fraction of the
+ sampling rate / 2. The range of possible inputs is 0 to 1.
+ filter_width : int
+ Filter width in samples. Longer filters have
+ smaller transition bands, but are more inefficient.
+ notch_width : float
+ Width of the notch, as a fraction of the sampling_rate / 2.
+ """
+
+ # Check inputs
+ assert 0 < notch_freq <= 1
+ assert filter_width % 2 != 0
+ pad = filter_width // 2
+ inputs = paddle.arange(filter_width) - pad
+
+ # Avoid frequencies that are too low
+ notch_freq += notch_width
+
+ # Define sinc function, avoiding division by zero
+ def sinc(x):
+ "Computes the sinc function."
+
+ def _sinc(x):
+ return paddle.sin(x) / x
+
+ # The zero is at the middle index
+ return paddle.concat(
+ [_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1:])])
+
+ # Compute a low-pass filter with cutoff frequency notch_freq.
+ hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
+ hlpf *= blackman_window(filter_width)
+ hlpf /= paddle.sum(hlpf)
+
+ # Compute a high-pass filter with cutoff frequency notch_freq.
+ hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
+ hhpf *= blackman_window(filter_width)
+ hhpf /= -paddle.sum(hhpf)
+ hhpf[pad] += 1
+
+ # Adding filters creates notch filter
+ return (hlpf + hhpf).view(1, -1, 1)
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
new file mode 100644
index 000000000..471ab7657
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
@@ -0,0 +1,716 @@
+import math
+
+import paddle
+import paddle.nn as nn
+
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import compute_amplitude
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import convolve1d
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import notch_filter
+
+
+class SpeedPerturb(nn.Layer):
+ """Slightly speed up or slow down an audio signal.
+ Resample the audio signal at a rate that is similar to the original rate,
+ to achieve a slightly slower or slightly faster signal. This technique is
+ outlined in the paper: "Audio Augmentation for Speech Recognition"
+ Arguments
+ ---------
+ orig_freq : int
+ The frequency of the original signal.
+ speeds : list
+ The speeds that the signal should be changed to, as a percentage of the
+ original signal (i.e. `speeds` is divided by 100 to get a ratio).
+ perturb_prob : float
+ The chance that the batch will be speed-
+ perturbed. By default, every batch is perturbed.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> perturbator = SpeedPerturb(orig_freq=16000, speeds=[90])
+ >>> clean = signal.unsqueeze(0)
+ >>> perturbed = perturbator(clean)
+ >>> clean.shape
+ paddle.shape([1, 52173])
+ >>> perturbed.shape
+ paddle.shape([1, 46956])
+ """
+
+ def __init__(
+ self,
+ orig_freq,
+ speeds=[90, 100, 110],
+ perturb_prob=1.0, ):
+ super().__init__()
+ self.orig_freq = orig_freq
+ self.speeds = speeds
+ self.perturb_prob = perturb_prob
+
+ # Initialize index of perturbation
+ self.samp_index = 0
+
+ # Initialize resamplers
+ self.resamplers = []
+ for speed in self.speeds:
+ config = {
+ "orig_freq": self.orig_freq,
+ "new_freq": self.orig_freq * speed // 100,
+ }
+ self.resamplers.append(Resample(**config))
+
+ def forward(self, waveform):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ # Don't perturb (return early) 1-`perturb_prob` portion of the batches
+ if paddle.rand([1]) > self.perturb_prob:
+
+ return waveform.clone()
+ # Perform a random perturbation
+ self.samp_index = paddle.randint(len(self.speeds), shape=(1, ))[0]
+ perturbed_waveform = self.resamplers[self.samp_index](waveform)
+
+ return perturbed_waveform
+
+
+class Resample(nn.Layer):
+ """This class resamples an audio signal using sinc-based interpolation.
+
+ It is a modification of the `resample` function from torchaudio
+ (https://pytorch.org/audio/stable/tutorials/audio_resampling_tutorial.html)
+
+ Arguments
+ ---------
+ orig_freq : int
+ the sampling frequency of the input signal.
+ new_freq : int
+ the new sampling frequency after this operation is performed.
+ lowpass_filter_width : int
+ Controls the sharpness of the filter, larger numbers result in a
+ sharper filter, but they are less efficient. Values from 4 to 10 are allowed.
+ """
+
+ def __init__(
+ self,
+ orig_freq=16000,
+ new_freq=16000,
+ lowpass_filter_width=6, ):
+ super().__init__()
+ self.orig_freq = orig_freq
+ self.new_freq = new_freq
+ self.lowpass_filter_width = lowpass_filter_width
+
+ # Compute rate for striding
+ self._compute_strides()
+ assert self.orig_freq % self.conv_stride == 0
+ assert self.new_freq % self.conv_transpose_stride == 0
+
+ def _compute_strides(self):
+ """Compute the phases in polyphase filter.
+
+ (almost directly from torchaudio.compliance.kaldi)
+ """
+
+ # Compute new unit based on ratio of in/out frequencies
+ base_freq = math.gcd(self.orig_freq, self.new_freq)
+ input_samples_in_unit = self.orig_freq // base_freq
+ self.output_samples = self.new_freq // base_freq
+
+ # Store the appropriate stride based on the new units
+ self.conv_stride = input_samples_in_unit
+ self.conv_transpose_stride = self.output_samples
+
+ def forward(self, waveforms):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ if not hasattr(self, "first_indices"):
+ self._indices_and_weights(waveforms)
+
+ # Don't do anything if the frequencies are the same
+ if self.orig_freq == self.new_freq:
+ return waveforms
+ unsqueezed = False
+ if len(waveforms.shape) == 2:
+ waveforms = waveforms.unsqueeze(1)
+ unsqueezed = True
+ elif len(waveforms.shape) == 3:
+ waveforms = waveforms.transpose([0, 2, 1])
+ else:
+ raise ValueError("Input must be 2 or 3 dimensions")
+
+ # Do resampling
+ resampled_waveform = self._perform_resample(waveforms)
+
+ if unsqueezed:
+ resampled_waveform = resampled_waveform.squeeze(1)
+ else:
+ resampled_waveform = resampled_waveform.transpose([0, 2, 1])
+
+ return resampled_waveform
+
+ def _perform_resample(self, waveforms):
+ """Resamples the waveform at the new frequency.
+
+ This matches Kaldi's OfflineFeatureTpl ResampleWaveform which uses a
+ LinearResample (resample a signal at linearly spaced intervals to
+ up/downsample a signal). LinearResample (LR) means that the output
+ signal is at linearly spaced intervals (i.e the output signal has a
+ frequency of `new_freq`). It uses sinc/bandlimited interpolation to
+ upsample/downsample the signal.
+
+ (almost directly from torchaudio.compliance.kaldi)
+
+ https://ccrma.stanford.edu/~jos/resample/
+ Theory_Ideal_Bandlimited_Interpolation.html
+
+ https://github.com/kaldi-asr/kaldi/blob/master/src/feat/resample.h#L56
+
+ Arguments
+ ---------
+ waveforms : tensor
+ The batch of audio signals to resample.
+
+ Returns
+ -------
+ The waveforms at the new frequency.
+ """
+
+ # Compute output size and initialize
+ batch_size, num_channels, wave_len = waveforms.shape
+ window_size = self.weights.shape[1]
+ tot_output_samp = self._output_samples(wave_len)
+ resampled_waveform = paddle.zeros(
+ (batch_size, num_channels, tot_output_samp))
+ # self.weights = self.weights.to(waveforms.device)
+
+ # Check weights are on correct device
+ # if waveforms.device != self.weights.device:
+ # self.weights = self.weights.to(waveforms.device)
+
+ # eye size: (num_channels, num_channels, 1)
+ eye = paddle.eye(num_channels).unsqueeze(2)
+
+ # Iterate over the phases in the polyphase filter
+ for i in range(self.first_indices.shape[0]):
+ wave_to_conv = waveforms
+ first_index = int(self.first_indices[i].item())
+ if first_index >= 0:
+ # trim the signal as the filter will not be applied
+ # before the first_index
+ wave_to_conv = wave_to_conv[..., first_index:]
+
+ # pad the right of the signal to allow partial convolutions
+ # meaning compute values for partial windows (e.g. end of the
+ # window is outside the signal length)
+ max_index = (tot_output_samp - 1) // self.output_samples
+ end_index = max_index * self.conv_stride + window_size
+ current_wave_len = wave_len - first_index
+ right_padding = max(0, end_index + 1 - current_wave_len)
+ left_padding = max(0, -first_index)
+ wave_to_conv = paddle.nn.functional.pad(
+ wave_to_conv, (left_padding, right_padding), data_format='NCL')
+ conv_wave = paddle.nn.functional.conv1d(
+ x=wave_to_conv,
+ weight=self.weights[i].repeat(num_channels, 1, 1),
+ stride=self.conv_stride,
+ groups=num_channels, )
+
+ # we want conv_wave[:, i] to be at
+ # output[:, i + n*conv_transpose_stride]
+ dilated_conv_wave = paddle.nn.functional.conv1d_transpose(
+ conv_wave, eye, stride=self.conv_transpose_stride)
+
+ # pad dilated_conv_wave so it reaches the output length if needed.
+ left_padding = i
+ previous_padding = left_padding + dilated_conv_wave.shape[-1]
+ right_padding = max(0, tot_output_samp - previous_padding)
+ dilated_conv_wave = paddle.nn.functional.pad(
+ dilated_conv_wave, (left_padding, right_padding),
+ data_format='NCL')
+ dilated_conv_wave = dilated_conv_wave[..., :tot_output_samp]
+
+ resampled_waveform += dilated_conv_wave
+
+ return resampled_waveform
+
+ def _output_samples(self, input_num_samp):
+ """Based on LinearResample::GetNumOutputSamples.
+
+ LinearResample (LR) means that the output signal is at
+ linearly spaced intervals (i.e the output signal has a
+ frequency of ``new_freq``). It uses sinc/bandlimited
+ interpolation to upsample/downsample the signal.
+
+ (almost directly from torchaudio.compliance.kaldi)
+
+ Arguments
+ ---------
+ input_num_samp : int
+ The number of samples in each example in the batch.
+
+ Returns
+ -------
+ Number of samples in the output waveform.
+ """
+
+ # For exact computation, we measure time in "ticks" of 1.0 / tick_freq,
+ # where tick_freq is the least common multiple of samp_in and
+ # samp_out.
+ samp_in = int(self.orig_freq)
+ samp_out = int(self.new_freq)
+
+ tick_freq = abs(samp_in * samp_out) // math.gcd(samp_in, samp_out)
+ ticks_per_input_period = tick_freq // samp_in
+
+ # work out the number of ticks in the time interval
+ # [ 0, input_num_samp/samp_in ).
+ interval_length = input_num_samp * ticks_per_input_period
+ if interval_length <= 0:
+ return 0
+ ticks_per_output_period = tick_freq // samp_out
+
+ # Get the last output-sample in the closed interval,
+ # i.e. replacing [ ) with [ ]. Note: integer division rounds down.
+ # See http://en.wikipedia.org/wiki/Interval_(mathematics) for an
+ # explanation of the notation.
+ last_output_samp = interval_length // ticks_per_output_period
+
+ # We need the last output-sample in the open interval, so if it
+ # takes us to the end of the interval exactly, subtract one.
+ if last_output_samp * ticks_per_output_period == interval_length:
+ last_output_samp -= 1
+
+ # First output-sample index is zero, so the number of output samples
+ # is the last output-sample plus one.
+ num_output_samp = last_output_samp + 1
+
+ return num_output_samp
+
+ def _indices_and_weights(self, waveforms):
+ """Based on LinearResample::SetIndexesAndWeights
+
+ Retrieves the weights for resampling as well as the indices in which
+ they are valid. LinearResample (LR) means that the output signal is at
+ linearly spaced intervals (i.e the output signal has a frequency
+ of ``new_freq``). It uses sinc/bandlimited interpolation to
+ upsample/downsample the signal.
+
+ Returns
+ -------
+ - the place where each filter should start being applied
+ - the filters to be applied to the signal for resampling
+ """
+
+ # Lowpass filter frequency depends on smaller of two frequencies
+ min_freq = min(self.orig_freq, self.new_freq)
+ lowpass_cutoff = 0.99 * 0.5 * min_freq
+
+ assert lowpass_cutoff * 2 <= min_freq
+ window_width = self.lowpass_filter_width / (2.0 * lowpass_cutoff)
+
+ assert lowpass_cutoff < min(self.orig_freq, self.new_freq) / 2
+ output_t = paddle.arange(start=0.0, end=self.output_samples)
+ output_t /= self.new_freq
+ min_t = output_t - window_width
+ max_t = output_t + window_width
+
+ min_input_index = paddle.ceil(min_t * self.orig_freq)
+ max_input_index = paddle.floor(max_t * self.orig_freq)
+ num_indices = max_input_index - min_input_index + 1
+
+ max_weight_width = num_indices.max()
+ j = paddle.arange(max_weight_width)
+ input_index = min_input_index.unsqueeze(1) + j.unsqueeze(0)
+ delta_t = (input_index / self.orig_freq) - output_t.unsqueeze(1)
+
+ weights = paddle.zeros_like(delta_t)
+
+ inside_window_indices = delta_t.abs() < (window_width)
+ # raised-cosine (Hanning) window with width `window_width`
+ weights[inside_window_indices] = 0.5 * (1 + paddle.cos(
+ 2 * math.pi * lowpass_cutoff / self.lowpass_filter_width *
+ delta_t[inside_window_indices]))
+ t_eq_zero_indices = delta_t == 0.0
+ t_not_eq_zero_indices = ~t_eq_zero_indices
+
+ # sinc filter function
+ weights[t_not_eq_zero_indices] *= paddle.sin(
+ 2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]) / (
+ math.pi * delta_t[t_not_eq_zero_indices])
+
+ # limit of the function at t = 0
+ weights[t_eq_zero_indices] *= 2 * lowpass_cutoff
+
+ # size (output_samples, max_weight_width)
+ weights /= self.orig_freq
+
+ self.first_indices = min_input_index
+ self.weights = weights
+
+
+class DropFreq(nn.Layer):
+ """This class drops a random frequency from the signal.
+ The purpose of this class is to teach models to learn to rely on all parts
+ of the signal, not just a few frequency bands.
+ Arguments
+ ---------
+ drop_freq_low : float
+ The low end of frequencies that can be dropped,
+ as a fraction of the sampling rate / 2.
+ drop_freq_high : float
+ The high end of frequencies that can be
+ dropped, as a fraction of the sampling rate / 2.
+ drop_count_low : int
+ The low end of number of frequencies that could be dropped.
+ drop_count_high : int
+ The high end of number of frequencies that could be dropped.
+ drop_width : float
+ The width of the frequency band to drop, as
+ a fraction of the sampling_rate / 2.
+ drop_prob : float
+ The probability that the batch of signals will have a frequency
+ dropped. By default, every batch has frequencies dropped.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> dropper = DropFreq()
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> dropped_signal = dropper(signal.unsqueeze(0))
+ """
+
+ def __init__(
+ self,
+ drop_freq_low=1e-14,
+ drop_freq_high=1,
+ drop_count_low=1,
+ drop_count_high=2,
+ drop_width=0.05,
+ drop_prob=1, ):
+ super().__init__()
+ self.drop_freq_low = drop_freq_low
+ self.drop_freq_high = drop_freq_high
+ self.drop_count_low = drop_count_low
+ self.drop_count_high = drop_count_high
+ self.drop_width = drop_width
+ self.drop_prob = drop_prob
+
+ def forward(self, waveforms):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ # Don't drop (return early) 1-`drop_prob` portion of the batches
+ dropped_waveform = waveforms.clone()
+ if paddle.rand([1]) > self.drop_prob:
+ return dropped_waveform
+
+ # Add channels dimension
+ if len(waveforms.shape) == 2:
+ dropped_waveform = dropped_waveform.unsqueeze(-1)
+
+ # Pick number of frequencies to drop
+ drop_count = paddle.randint(
+ low=self.drop_count_low,
+ high=self.drop_count_high + 1,
+ shape=(1, ), )
+
+ # Pick a frequency to drop
+ drop_range = self.drop_freq_high - self.drop_freq_low
+ drop_frequency = (
+ paddle.rand(drop_count) * drop_range + self.drop_freq_low)
+ # Filter parameters
+ filter_length = 101
+ pad = filter_length // 2
+
+ # Start with delta function
+ drop_filter = paddle.zeros([1, filter_length, 1])
+ drop_filter[0, pad, 0] = 1
+ # Subtract each frequency
+ for frequency in drop_frequency:
+ notch_kernel = notch_filter(
+ frequency,
+ filter_length,
+ self.drop_width, )
+ drop_filter = convolve1d(drop_filter, notch_kernel, pad)
+
+ # Apply filter
+ dropped_waveform = convolve1d(dropped_waveform, drop_filter, pad)
+
+ # Remove channels dimension if added
+ return dropped_waveform.squeeze(-1)
+
+
+class DropChunk(nn.Layer):
+ """This class drops portions of the input signal.
+ Using `DropChunk` as an augmentation strategy helps a models learn to rely
+ on all parts of the signal, since it can't expect a given part to be
+ present.
+ Arguments
+ ---------
+ drop_length_low : int
+ The low end of lengths for which to set the
+ signal to zero, in samples.
+ drop_length_high : int
+ The high end of lengths for which to set the
+ signal to zero, in samples.
+ drop_count_low : int
+ The low end of number of times that the signal
+ can be dropped to zero.
+ drop_count_high : int
+ The high end of number of times that the signal
+ can be dropped to zero.
+ drop_start : int
+ The first index for which dropping will be allowed.
+ drop_end : int
+ The last index for which dropping will be allowed.
+ drop_prob : float
+ The probability that the batch of signals will
+ have a portion dropped. By default, every batch
+ has portions dropped.
+ noise_factor : float
+ The factor relative to average amplitude of an utterance
+ to use for scaling the white noise inserted. 1 keeps
+ the average amplitude the same, while 0 inserts all 0's.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> dropper = DropChunk(drop_start=100, drop_end=200, noise_factor=0.)
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> signal = signal.unsqueeze(0) # [batch, time, channels]
+ >>> length = paddle.ones([1])
+ >>> dropped_signal = dropper(signal, length)
+ >>> float(dropped_signal[:, 150])
+ 0.0
+ """
+
+ def __init__(
+ self,
+ drop_length_low=100,
+ drop_length_high=1000,
+ drop_count_low=1,
+ drop_count_high=10,
+ drop_start=0,
+ drop_end=None,
+ drop_prob=1,
+ noise_factor=0.0, ):
+ super().__init__()
+ self.drop_length_low = drop_length_low
+ self.drop_length_high = drop_length_high
+ self.drop_count_low = drop_count_low
+ self.drop_count_high = drop_count_high
+ self.drop_start = drop_start
+ self.drop_end = drop_end
+ self.drop_prob = drop_prob
+ self.noise_factor = noise_factor
+
+ # Validate low < high
+ if drop_length_low > drop_length_high:
+ raise ValueError("Low limit must not be more than high limit")
+ if drop_count_low > drop_count_high:
+ raise ValueError("Low limit must not be more than high limit")
+
+ # Make sure the length doesn't exceed end - start
+ if drop_end is not None and drop_end >= 0:
+ if drop_start > drop_end:
+ raise ValueError("Low limit must not be more than high limit")
+
+ drop_range = drop_end - drop_start
+ self.drop_length_low = min(drop_length_low, drop_range)
+ self.drop_length_high = min(drop_length_high, drop_range)
+
+ def forward(self, waveforms, lengths):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or
+ `[batch, time, channels]`
+ """
+
+ # Reading input list
+ lengths = (lengths * waveforms.shape[1]).long()
+ batch_size = waveforms.shape[0]
+ dropped_waveform = waveforms.clone()
+
+ # Don't drop (return early) 1-`drop_prob` portion of the batches
+ if paddle.rand([1]) > self.drop_prob:
+ return dropped_waveform
+
+ # Store original amplitude for computing white noise amplitude
+ clean_amplitude = compute_amplitude(waveforms, lengths.unsqueeze(1))
+
+ # Pick a number of times to drop
+ drop_times = paddle.randint(
+ low=self.drop_count_low,
+ high=self.drop_count_high + 1,
+ shape=(batch_size, ), )
+
+ # Iterate batch to set mask
+ for i in range(batch_size):
+ if drop_times[i] == 0:
+ continue
+
+ # Pick lengths
+ length = paddle.randint(
+ low=self.drop_length_low,
+ high=self.drop_length_high + 1,
+ shape=(drop_times[i], ), )
+
+ # Compute range of starting locations
+ start_min = self.drop_start
+ if start_min < 0:
+ start_min += lengths[i]
+ start_max = self.drop_end
+ if start_max is None:
+ start_max = lengths[i]
+ if start_max < 0:
+ start_max += lengths[i]
+ start_max = max(0, start_max - length.max())
+
+ # Pick starting locations
+ start = paddle.randint(
+ low=start_min,
+ high=start_max + 1,
+ shape=(drop_times[i], ), )
+
+ end = start + length
+
+ # Update waveform
+ if not self.noise_factor:
+ for j in range(drop_times[i]):
+ dropped_waveform[i, start[j]:end[j]] = 0.0
+ else:
+ # Uniform distribution of -2 to +2 * avg amplitude should
+ # preserve the average for normalization
+ noise_max = 2 * clean_amplitude[i] * self.noise_factor
+ for j in range(drop_times[i]):
+ # zero-center the noise distribution
+ noise_vec = paddle.rand([length[j]])
+ noise_vec = 2 * noise_max * noise_vec - noise_max
+ dropped_waveform[i, start[j]:end[j]] = noise_vec
+
+ return dropped_waveform
+
+
+class TimeDomainSpecAugment(nn.Layer):
+ """A time-domain approximation of the SpecAugment algorithm.
+
+ This augmentation module implements three augmentations in
+ the time-domain.
+
+ 1. Drop chunks of the audio (zero amplitude or white noise)
+ 2. Drop frequency bands (with band-drop filters)
+ 3. Speed peturbation (via resampling to slightly different rate)
+
+ Arguments
+ ---------
+ perturb_prob : float from 0 to 1
+ The probability that a batch will have speed perturbation applied.
+ drop_freq_prob : float from 0 to 1
+ The probability that a batch will have frequencies dropped.
+ drop_chunk_prob : float from 0 to 1
+ The probability that a batch will have chunks dropped.
+ speeds : list of ints
+ A set of different speeds to use to perturb each batch.
+ See ``speechbrain.processing.speech_augmentation.SpeedPerturb``
+ sample_rate : int
+ Sampling rate of the input waveforms.
+ drop_freq_count_low : int
+ Lowest number of frequencies that could be dropped.
+ drop_freq_count_high : int
+ Highest number of frequencies that could be dropped.
+ drop_chunk_count_low : int
+ Lowest number of chunks that could be dropped.
+ drop_chunk_count_high : int
+ Highest number of chunks that could be dropped.
+ drop_chunk_length_low : int
+ Lowest length of chunks that could be dropped.
+ drop_chunk_length_high : int
+ Highest length of chunks that could be dropped.
+ drop_chunk_noise_factor : float
+ The noise factor used to scale the white noise inserted, relative to
+ the average amplitude of the utterance. Default 0 (no noise inserted).
+
+ Example
+ -------
+ >>> inputs = paddle.randn([10, 16000])
+ >>> feature_maker = TimeDomainSpecAugment(speeds=[80])
+ >>> feats = feature_maker(inputs, paddle.ones(10))
+ >>> feats.shape
+ paddle.shape([10, 12800])
+ """
+
+ def __init__(
+ self,
+ perturb_prob=1.0,
+ drop_freq_prob=1.0,
+ drop_chunk_prob=1.0,
+ speeds=[95, 100, 105],
+ sample_rate=16000,
+ drop_freq_count_low=0,
+ drop_freq_count_high=3,
+ drop_chunk_count_low=0,
+ drop_chunk_count_high=5,
+ drop_chunk_length_low=1000,
+ drop_chunk_length_high=2000,
+ drop_chunk_noise_factor=0, ):
+ super().__init__()
+ self.speed_perturb = SpeedPerturb(
+ perturb_prob=perturb_prob, orig_freq=sample_rate, speeds=speeds)
+ self.drop_freq = DropFreq(
+ drop_prob=drop_freq_prob,
+ drop_count_low=drop_freq_count_low,
+ drop_count_high=drop_freq_count_high, )
+ self.drop_chunk = DropChunk(
+ drop_prob=drop_chunk_prob,
+ drop_count_low=drop_chunk_count_low,
+ drop_count_high=drop_chunk_count_high,
+ drop_length_low=drop_chunk_length_low,
+ drop_length_high=drop_chunk_length_high,
+ noise_factor=drop_chunk_noise_factor, )
+
+ def forward(self, waveforms, lengths):
+ """Returns the distorted waveforms.
+
+ Arguments
+ ---------
+ waveforms : tensor
+ The waveforms to distort
+ """
+ # Augmentation
+ with paddle.no_grad():
+ waveforms = self.speed_perturb(waveforms)
+ waveforms = self.drop_freq(waveforms)
+ waveforms = self.drop_chunk(waveforms, lengths)
+ return waveforms
diff --git a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
new file mode 100644
index 000000000..0d99e8708
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
@@ -0,0 +1,228 @@
+from collections import defaultdict
+from typing import Dict
+from typing import List
+from typing import Tuple
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2ConfigPure
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2Model
+from paddlespeech.s2t.models.wav2vec2.modules.VanillaNN import VanillaNN
+from paddlespeech.s2t.modules.ctc import CTCDecoderBase as CTC
+from paddlespeech.s2t.utils.ctc_utils import remove_duplicates_and_blank
+from paddlespeech.s2t.utils.utility import log_add
+
+
+class Wav2vec2ASR(nn.Layer):
+ def __init__(self, config: dict):
+ super().__init__()
+
+ wav2vec2_config = Wav2Vec2ConfigPure(config)
+ wav2vec2 = Wav2Vec2Model(wav2vec2_config)
+ model_dict = paddle.load(config.wav2vec2_params_path)
+ wav2vec2.set_state_dict(model_dict)
+ self.normalize_wav = config.normalize_wav
+ self.output_norm = config.output_norm
+ if config.freeze_wav2vec2:
+ wav2vec2.eval()
+ for parm in wav2vec2.parameters():
+ parm.trainable = False
+ self.wav2vec2 = wav2vec2
+ self.enc = VanillaNN(
+ input_shape=[None, None, wav2vec2_config.hidden_size],
+ activation=nn.LeakyReLU,
+ dnn_blocks=config.dnn_blocks,
+ dnn_neurons=config.dnn_neurons)
+ self.ctc = CTC(odim=config.output_dim,
+ enc_n_units=config.dnn_neurons,
+ blank_id=config.blank_id,
+ dropout_rate=config.ctc_dropout_rate,
+ reduction='mean')
+
+ def forward(self, wav, wavs_lens_rate, target, target_lens_rate):
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+
+ x = self.enc(feats)
+ x_lens = (wavs_lens_rate * x.shape[1]).round().astype(paddle.int64)
+ target_lens = (target_lens_rate *
+ target.shape[1]).round().astype(paddle.int64)
+
+ ctc_loss = self.ctc(x, x_lens, target, target_lens)
+ return ctc_loss
+
+ @paddle.no_grad()
+ def decode(self,
+ feats: paddle.Tensor,
+ text_feature: Dict[str, int],
+ decoding_method: str,
+ beam_size: int):
+ batch_size = feats.shape[0]
+
+ if decoding_method == 'ctc_prefix_beam_search' and batch_size > 1:
+ logger.error(
+ f'decoding mode {decoding_method} must be running with batch_size == 1'
+ )
+ logger.error(f"current batch_size is {batch_size}")
+ sys.exit(1)
+
+ if decoding_method == 'ctc_greedy_search':
+ hyps = self.ctc_greedy_search(feats)
+ res = [text_feature.defeaturize(hyp) for hyp in hyps]
+ res_tokenids = [hyp for hyp in hyps]
+ # ctc_prefix_beam_search and attention_rescoring only return one
+ # result in List[int], change it to List[List[int]] for compatible
+ # with other batch decoding mode
+ elif decoding_method == 'ctc_prefix_beam_search':
+ assert feats.shape[0] == 1
+ hyp = self.ctc_prefix_beam_search(feats, beam_size)
+ res = [text_feature.defeaturize(hyp)]
+ res_tokenids = [hyp]
+ else:
+ raise ValueError(
+ f"wav2vec2 not support decoding method: {decoding_method}")
+
+ return res, res_tokenids
+
+ @classmethod
+ def from_config(cls, config):
+ model = cls(config)
+ return model
+
+ def ctc_greedy_search(self, wav) -> List[List[int]]:
+ """ Apply CTC greedy search
+ Args:
+ speech (paddle.Tensor): (batch, max_len)
+ speech_length (paddle.Tensor): (batch, )
+ Returns:
+ List[List[int]]: best path result
+ """
+ batch_size = wav.shape[0]
+ wav = wav[:, :, 0]
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+ x = self.enc(feats)
+ x_lens = x.shape[1]
+ ctc_probs = self.ctc.log_softmax(x) # (B, maxlen, vocab_size)
+ topk_prob, topk_index = ctc_probs.topk(1, axis=2) # (B, maxlen, 1)
+ topk_index = topk_index.view(batch_size, x_lens) # (B, maxlen)
+
+ hyps = [hyp.tolist() for hyp in topk_index]
+ hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
+ return hyps
+
+ def _ctc_prefix_beam_search(
+ self,
+ wav,
+ beam_size,
+ blank_id: int=0, ) -> Tuple[List[Tuple[int, float]], paddle.Tensor]:
+ """ CTC prefix beam search inner implementation
+ Args:
+ speech (paddle.Tensor): (batch, max_len, feat_dim)
+ speech_length (paddle.Tensor): (batch, )
+ beam_size (int): beam size for beam search
+ decoding_chunk_size (int): decoding chunk for dynamic chunk
+ trained model.
+ <0: for decoding, use full chunk.
+ >0: for decoding, use fixed chunk size as set.
+ 0: used for training, it's prohibited here
+ simulate_streaming (bool): whether do encoder forward in a
+ streaming fashion
+ Returns:
+ List[Tuple[int, float]]: nbest results, (N,1), (text, likelihood)
+ paddle.Tensor: encoder output, (1, max_len, encoder_dim),
+ it will be used for rescoring in attention rescoring mode
+ """
+ wav = wav[:, :, 0]
+
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+
+ x = self.enc(feats)
+ maxlen = x.shape[1]
+ ctc_probs = self.ctc.log_softmax(x) # (1, maxlen, vocab_size)
+ ctc_probs = ctc_probs.squeeze(0)
+
+ # cur_hyps: (prefix, (blank_ending_score, none_blank_ending_score))
+ # blank_ending_score and none_blank_ending_score in ln domain
+ cur_hyps = [(tuple(), (0.0, -float('inf')))]
+ # 2. CTC beam search step by step
+ for t in range(0, maxlen):
+ logp = ctc_probs[t] # (vocab_size,)
+ # key: prefix, value (pb, pnb), default value(-inf, -inf)
+ next_hyps = defaultdict(lambda: (-float('inf'), -float('inf')))
+ # 2.1 First beam prune: select topk best
+ top_k_logp, top_k_index = logp.topk(beam_size) # (beam_size,)
+ for s in top_k_index:
+ s = s.item()
+ ps = logp[s].item()
+ for prefix, (pb, pnb) in cur_hyps:
+ last = prefix[-1] if len(prefix) > 0 else None
+ if s == blank_id: # blank
+ n_pb, n_pnb = next_hyps[prefix]
+ n_pb = log_add([n_pb, pb + ps, pnb + ps])
+ next_hyps[prefix] = (n_pb, n_pnb)
+ elif s == last:
+ # Update *ss -> *s;
+ n_pb, n_pnb = next_hyps[prefix]
+ n_pnb = log_add([n_pnb, pnb + ps])
+ next_hyps[prefix] = (n_pb, n_pnb)
+ # Update *s-s -> *ss, - is for blank
+ n_prefix = prefix + (s, )
+ n_pb, n_pnb = next_hyps[n_prefix]
+ n_pnb = log_add([n_pnb, pb + ps])
+ next_hyps[n_prefix] = (n_pb, n_pnb)
+ else:
+ n_prefix = prefix + (s, )
+ n_pb, n_pnb = next_hyps[n_prefix]
+ n_pnb = log_add([n_pnb, pb + ps, pnb + ps])
+ next_hyps[n_prefix] = (n_pb, n_pnb)
+
+ # 2.2 Second beam prune
+ next_hyps = sorted(
+ next_hyps.items(),
+ key=lambda x: log_add(list(x[1])),
+ reverse=True)
+ cur_hyps = next_hyps[:beam_size]
+
+ hyps = [(y[0], log_add([y[1][0], y[1][1]])) for y in cur_hyps]
+ return hyps
+
+ def ctc_prefix_beam_search(self, wav, beam_size) -> List[int]:
+ """ Apply CTC prefix beam search
+ Args:
+ speech (paddle.Tensor): (batch, max_len, feat_dim)
+ speech_length (paddle.Tensor): (batch, )
+ beam_size (int): beam size for beam search
+ decoding_chunk_size (int): decoding chunk for dynamic chunk
+ trained model.
+ <0: for decoding, use full chunk.
+ >0: for decoding, use fixed chunk size as set.
+ 0: used for training, it's prohibited here
+ simulate_streaming (bool): whether do encoder forward in a
+ streaming fashion
+ Returns:
+ List[int]: CTC prefix beam search nbest results
+ """
+ hyps = self._ctc_prefix_beam_search(wav, beam_size)
+ return hyps[0][0]
diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py
index 2d236743a..d9568dcc9 100644
--- a/paddlespeech/s2t/modules/attention.py
+++ b/paddlespeech/s2t/modules/attention.py
@@ -45,6 +45,7 @@ class MultiHeadedAttention(nn.Layer):
"""
super().__init__()
assert n_feat % n_head == 0
+ self.n_feat = n_feat
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
@@ -73,9 +74,11 @@ class MultiHeadedAttention(nn.Layer):
(#batch, n_head, time2, d_k).
"""
n_batch = query.shape[0]
+
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
+
q = q.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
k = k.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
v = v.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
@@ -108,10 +111,10 @@ class MultiHeadedAttention(nn.Layer):
# When will `if mask.size(2) > 0` be False?
# 1. onnx(16/-1, -1/-1, 16/0)
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
- if paddle.shape(mask)[2] > 0: # time2 > 0
+ if mask.shape[2] > 0: # time2 > 0
mask = mask.unsqueeze(1).equal(0) # (batch, 1, *, time2)
# for last chunk, time2 might be larger than scores.size(-1)
- mask = mask[:, :, :, :paddle.shape(scores)[-1]]
+ mask = mask[:, :, :, :scores.shape[-1]]
scores = scores.masked_fill(mask, -float('inf'))
attn = paddle.softmax(
scores, axis=-1).masked_fill(mask,
@@ -179,7 +182,7 @@ class MultiHeadedAttention(nn.Layer):
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
- if paddle.shape(cache)[0] > 0:
+ if cache.shape[0] > 0:
# last dim `d_k * 2` for (key, val)
key_cache, value_cache = paddle.split(cache, 2, axis=-1)
k = paddle.concat([key_cache, k], axis=2)
@@ -188,8 +191,9 @@ class MultiHeadedAttention(nn.Layer):
# non-trivial to calculate `next_cache_start` here.
new_cache = paddle.concat((k, v), axis=-1)
- scores = paddle.matmul(q,
- k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
+ # scores = paddle.matmul(q,
+ # k.transpose([0, 1, 3, 2])) / math.sqrt(self.d_k)
+ scores = paddle.matmul(q, k, transpose_y=True) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask), new_cache
@@ -270,7 +274,7 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
and `head * d_k == size`
"""
q, k, v = self.forward_qkv(query, key, value)
- q = q.transpose([0, 2, 1, 3]) # (batch, time1, head, d_k)
+ # q = q.transpose([0, 2, 1, 3]) # (batch, time1, head, d_k)
# when export onnx model, for 1st chunk, we feed
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
@@ -287,7 +291,7 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
- if paddle.shape(cache)[0] > 0:
+ if cache.shape[0] > 0:
# last dim `d_k * 2` for (key, val)
key_cache, value_cache = paddle.split(cache, 2, axis=-1)
k = paddle.concat([key_cache, k], axis=2)
@@ -301,19 +305,23 @@ class RelPositionMultiHeadedAttention(MultiHeadedAttention):
p = p.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
- q_with_bias_u = (q + self.pos_bias_u).transpose([0, 2, 1, 3])
+ # q_with_bias_u = (q + self.pos_bias_u).transpose([0, 2, 1, 3])
+ q_with_bias_u = q + self.pos_bias_u.unsqueeze(1)
# (batch, head, time1, d_k)
- q_with_bias_v = (q + self.pos_bias_v).transpose([0, 2, 1, 3])
+ # q_with_bias_v = (q + self.pos_bias_v).transpose([0, 2, 1, 3])
+ q_with_bias_v = q + self.pos_bias_v.unsqueeze(1)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
- matrix_ac = paddle.matmul(q_with_bias_u, k.transpose([0, 1, 3, 2]))
+ # matrix_ac = paddle.matmul(q_with_bias_u, k.transpose([0, 1, 3, 2]))
+ matrix_ac = paddle.matmul(q_with_bias_u, k, transpose_y=True)
# compute matrix b and matrix d
# (batch, head, time1, time2)
- matrix_bd = paddle.matmul(q_with_bias_v, p.transpose([0, 1, 3, 2]))
+ # matrix_bd = paddle.matmul(q_with_bias_v, p.transpose([0, 1, 3, 2]))
+ matrix_bd = paddle.matmul(q_with_bias_v, p, transpose_y=True)
# Remove rel_shift since it is useless in speech recognition,
# and it requires special attention for streaming.
# matrix_bd = self.rel_shift(matrix_bd)
diff --git a/paddlespeech/s2t/modules/cmvn.py b/paddlespeech/s2t/modules/cmvn.py
index 67f71b667..6a8c1660c 100644
--- a/paddlespeech/s2t/modules/cmvn.py
+++ b/paddlespeech/s2t/modules/cmvn.py
@@ -40,6 +40,13 @@ class GlobalCMVN(nn.Layer):
self.register_buffer("mean", mean)
self.register_buffer("istd", istd)
+ def __repr__(self):
+ return ("{name}(mean={mean}, istd={istd}, norm_var={norm_var})".format(
+ name=self.__class__.__name__,
+ mean=self.mean,
+ istd=self.istd,
+ norm_var=self.norm_var))
+
def forward(self, x: paddle.Tensor):
"""
Args:
diff --git a/paddlespeech/s2t/modules/conformer_convolution.py b/paddlespeech/s2t/modules/conformer_convolution.py
index be6056546..09d903eee 100644
--- a/paddlespeech/s2t/modules/conformer_convolution.py
+++ b/paddlespeech/s2t/modules/conformer_convolution.py
@@ -127,11 +127,11 @@ class ConvolutionModule(nn.Layer):
x = x.transpose([0, 2, 1]) # [B, C, T]
# mask batch padding
- if paddle.shape(mask_pad)[2] > 0: # time > 0
+ if mask_pad.shape[2] > 0: # time > 0
x = x.masked_fill(mask_pad, 0.0)
if self.lorder > 0:
- if paddle.shape(cache)[2] == 0: # cache_t == 0
+ if cache.shape[2] == 0: # cache_t == 0
x = nn.functional.pad(
x, [self.lorder, 0], 'constant', 0.0, data_format='NCL')
else:
@@ -161,7 +161,7 @@ class ConvolutionModule(nn.Layer):
x = self.pointwise_conv2(x)
# mask batch padding
- if paddle.shape(mask_pad)[2] > 0: # time > 0
+ if mask_pad.shape[2] > 0: # time > 0
x = x.masked_fill(mask_pad, 0.0)
x = x.transpose([0, 2, 1]) # [B, T, C]
diff --git a/paddlespeech/s2t/modules/ctc.py b/paddlespeech/s2t/modules/ctc.py
index 0f50db21d..e0c01ab46 100644
--- a/paddlespeech/s2t/modules/ctc.py
+++ b/paddlespeech/s2t/modules/ctc.py
@@ -53,7 +53,7 @@ class CTCDecoderBase(nn.Layer):
enc_n_units,
blank_id=0,
dropout_rate: float=0.0,
- reduction: bool=True,
+ reduction: Union[str, bool]=True,
batch_average: bool=True,
grad_norm_type: Union[str, None]=None):
"""CTC decoder
@@ -73,7 +73,10 @@ class CTCDecoderBase(nn.Layer):
self.odim = odim
self.dropout = nn.Dropout(dropout_rate)
self.ctc_lo = Linear(enc_n_units, self.odim)
- reduction_type = "sum" if reduction else "none"
+ if isinstance(reduction, bool):
+ reduction_type = "sum" if reduction else "none"
+ else:
+ reduction_type = reduction
self.criterion = CTCLoss(
blank=self.blank_id,
reduction=reduction_type,
diff --git a/paddlespeech/s2t/modules/decoder.py b/paddlespeech/s2t/modules/decoder.py
index 3b1a7f23d..4ddf057b6 100644
--- a/paddlespeech/s2t/modules/decoder.py
+++ b/paddlespeech/s2t/modules/decoder.py
@@ -140,9 +140,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer):
# m: (1, L, L)
m = subsequent_mask(tgt_mask.shape[-1]).unsqueeze(0)
# tgt_mask: (B, L, L)
- # TODO(Hui Zhang): not support & for tensor
- # tgt_mask = tgt_mask & m
- tgt_mask = tgt_mask.logical_and(m)
+ tgt_mask = tgt_mask & m
x, _ = self.embed(tgt)
for layer in self.decoders:
@@ -153,9 +151,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer):
if self.use_output_layer:
x = self.output_layer(x)
- # TODO(Hui Zhang): reduce_sum not support bool type
- # olens = tgt_mask.sum(1)
- olens = tgt_mask.astype(paddle.int).sum(1)
+ olens = tgt_mask.sum(1)
return x, paddle.to_tensor(0.0), olens
def forward_one_step(
@@ -247,7 +243,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer):
]
# batch decoding
- ys_mask = subsequent_mask(paddle.shape(ys)[-1]).unsqueeze(0) # (B,L,L)
+ ys_mask = subsequent_mask(ys.shape[-1]).unsqueeze(0) # (B,L,L)
xs_mask = make_xs_mask(xs).unsqueeze(1) # (B,1,T)
logp, states = self.forward_one_step(
xs, xs_mask, ys, ys_mask, cache=batch_state)
@@ -343,7 +339,7 @@ class BiTransformerDecoder(BatchScorerInterface, nn.Layer):
"""
l_x, _, olens = self.left_decoder(memory, memory_mask, ys_in_pad,
ys_in_lens)
- r_x = paddle.to_tensor(0.0)
+ r_x = paddle.zeros([1])
if reverse_weight > 0.0:
r_x, _, olens = self.right_decoder(memory, memory_mask, r_ys_in_pad,
ys_in_lens)
diff --git a/paddlespeech/s2t/modules/decoder_layer.py b/paddlespeech/s2t/modules/decoder_layer.py
index 37b124e84..cb7261107 100644
--- a/paddlespeech/s2t/modules/decoder_layer.py
+++ b/paddlespeech/s2t/modules/decoder_layer.py
@@ -114,10 +114,7 @@ class DecoderLayer(nn.Layer):
], f"{cache.shape} == {[tgt.shape[0], tgt.shape[1] - 1, self.size]}"
tgt_q = tgt[:, -1:, :]
residual = residual[:, -1:, :]
- # TODO(Hui Zhang): slice not support bool type
- # tgt_q_mask = tgt_mask[:, -1:, :]
- tgt_q_mask = tgt_mask.cast(paddle.int64)[:, -1:, :].cast(
- paddle.bool)
+ tgt_q_mask = tgt_mask[:, -1:, :]
if self.concat_after:
tgt_concat = paddle.cat(
diff --git a/paddlespeech/s2t/modules/embedding.py b/paddlespeech/s2t/modules/embedding.py
index 3aeebd29b..f41a7b5d4 100644
--- a/paddlespeech/s2t/modules/embedding.py
+++ b/paddlespeech/s2t/modules/embedding.py
@@ -89,7 +89,7 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface):
self.max_len = max_len
self.xscale = paddle.to_tensor(math.sqrt(self.d_model))
self.dropout = nn.Dropout(p=dropout_rate)
- self.pe = paddle.zeros([self.max_len, self.d_model]) #[T,D]
+ self.pe = paddle.zeros([1, self.max_len, self.d_model]) #[B=1,T,D]
position = paddle.arange(
0, self.max_len, dtype=paddle.float32).unsqueeze(1) #[T, 1]
@@ -97,9 +97,8 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface):
paddle.arange(0, self.d_model, 2, dtype=paddle.float32) *
-(math.log(10000.0) / self.d_model))
- self.pe[:, 0::2] = paddle.sin(position * div_term)
- self.pe[:, 1::2] = paddle.cos(position * div_term)
- self.pe = self.pe.unsqueeze(0) #[1, T, D]
+ self.pe[:, :, 0::2] = paddle.sin(position * div_term)
+ self.pe[:, :, 1::2] = paddle.cos(position * div_term)
def forward(self, x: paddle.Tensor,
offset: int=0) -> Tuple[paddle.Tensor, paddle.Tensor]:
@@ -111,12 +110,10 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface):
paddle.Tensor: Encoded tensor. Its shape is (batch, time, ...)
paddle.Tensor: for compatibility to RelPositionalEncoding, (batch=1, time, ...)
"""
- T = x.shape[1]
assert offset + x.shape[
1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format(
offset, x.shape[1], self.max_len)
- #TODO(Hui Zhang): using T = paddle.shape(x)[1], __getitem__ not support Tensor
- pos_emb = self.pe[:, offset:offset + T]
+ pos_emb = self.pe[:, offset:offset + x.shape[1]]
x = x * self.xscale + pos_emb
return self.dropout(x), self.dropout(pos_emb)
@@ -165,6 +162,5 @@ class RelPositionalEncoding(PositionalEncoding):
1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format(
offset, x.shape[1], self.max_len)
x = x * self.xscale
- #TODO(Hui Zhang): using paddle.shape(x)[1], __getitem__ not support Tensor
pos_emb = self.pe[:, offset:offset + x.shape[1]]
return self.dropout(x), self.dropout(pos_emb)
diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py
index 2f4ad1b29..fd7bd7b9a 100644
--- a/paddlespeech/s2t/modules/encoder.py
+++ b/paddlespeech/s2t/modules/encoder.py
@@ -164,12 +164,8 @@ class BaseEncoder(nn.Layer):
if self.global_cmvn is not None:
xs = self.global_cmvn(xs)
- #TODO(Hui Zhang): self.embed(xs, masks, offset=0), stride_slice not support bool tensor
- xs, pos_emb, masks = self.embed(xs, masks.astype(xs.dtype), offset=0)
- #TODO(Hui Zhang): remove mask.astype, stride_slice not support bool tensor
- masks = masks.astype(paddle.bool)
- #TODO(Hui Zhang): mask_pad = ~masks
- mask_pad = masks.logical_not()
+ xs, pos_emb, masks = self.embed(xs, masks, offset=0)
+ mask_pad = ~masks
chunk_masks = add_optional_chunk_mask(
xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk,
decoding_chunk_size, self.static_chunk_size,
@@ -215,11 +211,8 @@ class BaseEncoder(nn.Layer):
same shape as the original cnn_cache
"""
assert xs.shape[0] == 1 # batch size must be one
- # tmp_masks is just for interface compatibility
- # TODO(Hui Zhang): stride_slice not support bool tensor
- # tmp_masks = paddle.ones([1, paddle.shape(xs)[1]], dtype=paddle.bool)
- tmp_masks = paddle.ones([1, xs.shape[1]], dtype=paddle.int32)
- tmp_masks = tmp_masks.unsqueeze(1) #[B=1, C=1, T]
+ # tmp_masks is just for interface compatibility, [B=1, C=1, T]
+ tmp_masks = paddle.ones([1, 1, xs.shape[1]], dtype=paddle.bool)
if self.global_cmvn is not None:
xs = self.global_cmvn(xs)
@@ -228,9 +221,8 @@ class BaseEncoder(nn.Layer):
xs, pos_emb, _ = self.embed(xs, tmp_masks, offset=offset)
# after embed, xs=(B=1, chunk_size, hidden-dim)
- elayers = paddle.shape(att_cache)[0]
- cache_t1 = paddle.shape(att_cache)[2]
- chunk_size = paddle.shape(xs)[1]
+ elayers, _, cache_t1, _ = att_cache.shape
+ chunk_size = xs.shape[1]
attention_key_size = cache_t1 + chunk_size
# only used when using `RelPositionMultiHeadedAttention`
@@ -249,25 +241,30 @@ class BaseEncoder(nn.Layer):
for i, layer in enumerate(self.encoders):
# att_cache[i:i+1] = (1, head, cache_t1, d_k*2)
# cnn_cache[i:i+1] = (1, B=1, hidden-dim, cache_t2)
+
+ # WARNING: eliminate if-else cond op in graph
+ # tensor zeros([0,0,0,0]) support [i:i+1] slice, will return zeros([0,0,0,0]) tensor
+ # raw code as below:
+ # att_cache=att_cache[i:i+1] if elayers > 0 else att_cache,
+ # cnn_cache=cnn_cache[i:i+1] if cnn_cache.shape[0] > 0 else cnn_cache,
xs, _, new_att_cache, new_cnn_cache = layer(
xs,
att_mask,
pos_emb,
- att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
- cnn_cache=cnn_cache[i:i + 1]
- if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, )
+ att_cache=att_cache[i:i + 1],
+ cnn_cache=cnn_cache[i:i + 1], )
# new_att_cache = (1, head, attention_key_size, d_k*2)
# new_cnn_cache = (B=1, hidden-dim, cache_t2)
r_att_cache.append(new_att_cache[:, :, next_cache_start:, :])
- r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) # add elayer dim
+ r_cnn_cache.append(new_cnn_cache) # add elayer dim
if self.normalize_before:
xs = self.after_norm(xs)
# r_att_cache (elayers, head, T, d_k*2)
- # r_cnn_cache (elayers, B=1, hidden-dim, cache_t2)
+ # r_cnn_cache (elayers, B=1, hidden-dim, cache_t2)
r_att_cache = paddle.concat(r_att_cache, axis=0)
- r_cnn_cache = paddle.concat(r_cnn_cache, axis=0)
+ r_cnn_cache = paddle.stack(r_cnn_cache, axis=0)
return xs, r_att_cache, r_cnn_cache
def forward_chunk_by_chunk(
@@ -397,11 +394,7 @@ class TransformerEncoder(BaseEncoder):
if self.global_cmvn is not None:
xs = self.global_cmvn(xs)
- #TODO(Hui Zhang): self.embed(xs, masks, offset=0), stride_slice not support bool tensor
- xs, pos_emb, masks = self.embed(xs, masks.astype(xs.dtype), offset=0)
- #TODO(Hui Zhang): remove mask.astype, stride_slice not support bool tensor
- masks = masks.astype(paddle.bool)
-
+ xs, pos_emb, masks = self.embed(xs, masks, offset=0)
if cache is None:
cache = [None for _ in range(len(self.encoders))]
new_cache = []
diff --git a/paddlespeech/s2t/modules/fbank.py b/paddlespeech/s2t/modules/fbank.py
new file mode 100644
index 000000000..8d76a4727
--- /dev/null
+++ b/paddlespeech/s2t/modules/fbank.py
@@ -0,0 +1,72 @@
+import paddle
+from paddle import nn
+
+from paddlespeech.audio.compliance import kaldi
+from paddlespeech.s2t.utils.log import Log
+
+logger = Log(__name__).getlog()
+
+__all__ = ['KaldiFbank']
+
+
+class KaldiFbank(nn.Layer):
+ def __init__(
+ self,
+ fs=16000,
+ n_mels=80,
+ n_shift=160, # unit:sample, 10ms
+ win_length=400, # unit:sample, 25ms
+ energy_floor=0.0,
+ dither=0.0):
+ """
+ Args:
+ fs (int): sample rate of the audio
+ n_mels (int): number of mel filter banks
+ n_shift (int): number of points in a frame shift
+ win_length (int): number of points in a frame windows
+ energy_floor (float): Floor on energy in Spectrogram computation (absolute)
+ dither (float): Dithering constant. Default 0.0
+ """
+ super().__init__()
+ self.fs = fs
+ self.n_mels = n_mels
+ num_point_ms = fs / 1000
+ self.n_frame_length = win_length / num_point_ms
+ self.n_frame_shift = n_shift / num_point_ms
+ self.energy_floor = energy_floor
+ self.dither = dither
+
+ def __repr__(self):
+ return (
+ "{name}(fs={fs}, n_mels={n_mels}, "
+ "n_frame_shift={n_frame_shift}, n_frame_length={n_frame_length}, "
+ "dither={dither}))".format(
+ name=self.__class__.__name__,
+ fs=self.fs,
+ n_mels=self.n_mels,
+ n_frame_shift=self.n_frame_shift,
+ n_frame_length=self.n_frame_length,
+ dither=self.dither, ))
+
+ def forward(self, x: paddle.Tensor):
+ """
+ Args:
+ x (paddle.Tensor): shape (Ti).
+ Not support: [Time, Channel] and Batch mode.
+
+ Returns:
+ paddle.Tensor: (T, D)
+ """
+ assert x.ndim == 1
+
+ feat = kaldi.fbank(
+ x.unsqueeze(0), # append channel dim, (C, Ti)
+ n_mels=self.n_mels,
+ frame_length=self.n_frame_length,
+ frame_shift=self.n_frame_shift,
+ dither=self.dither,
+ energy_floor=self.energy_floor,
+ sr=self.fs)
+
+ assert feat.ndim == 2 # (T,D)
+ return feat
diff --git a/paddlespeech/s2t/modules/loss.py b/paddlespeech/s2t/modules/loss.py
index 884fb70c1..afd5201aa 100644
--- a/paddlespeech/s2t/modules/loss.py
+++ b/paddlespeech/s2t/modules/loss.py
@@ -85,7 +85,7 @@ class CTCLoss(nn.Layer):
Returns:
[paddle.Tensor]: scalar. If reduction is 'none', then (N), where N = \text{batch size}.
"""
- B = paddle.shape(logits)[0]
+ B = logits.shape[0]
# warp-ctc need logits, and do softmax on logits by itself
# warp-ctc need activation with shape [T, B, V + 1]
# logits: (B, L, D) -> (L, B, D)
@@ -158,7 +158,7 @@ class LabelSmoothingLoss(nn.Layer):
Returns:
loss (paddle.Tensor) : The KL loss, scalar float value
"""
- B, T, D = paddle.shape(x)
+ B, T, D = x.shape
assert D == self.size
x = x.reshape((-1, self.size))
target = target.reshape([-1])
diff --git a/paddlespeech/s2t/modules/mask.py b/paddlespeech/s2t/modules/mask.py
index 1f66c015a..65619eb90 100644
--- a/paddlespeech/s2t/modules/mask.py
+++ b/paddlespeech/s2t/modules/mask.py
@@ -109,12 +109,7 @@ def subsequent_mask(size: int) -> paddle.Tensor:
[1, 1, 1]]
"""
ret = paddle.ones([size, size], dtype=paddle.bool)
- #TODO(Hui Zhang): tril not support bool
- #return paddle.tril(ret)
- ret = ret.astype(paddle.float)
- ret = paddle.tril(ret)
- ret = ret.astype(paddle.bool)
- return ret
+ return paddle.tril(ret)
def subsequent_chunk_mask(
diff --git a/paddlespeech/s2t/modules/subsampling.py b/paddlespeech/s2t/modules/subsampling.py
index 88451ddd7..782a437ee 100644
--- a/paddlespeech/s2t/modules/subsampling.py
+++ b/paddlespeech/s2t/modules/subsampling.py
@@ -139,8 +139,8 @@ class Conv2dSubsampling4(Conv2dSubsampling):
"""
x = x.unsqueeze(1) # (b, c=1, t, f)
x = self.conv(x)
- b, c, t, f = paddle.shape(x)
- x = self.out(x.transpose([0, 2, 1, 3]).reshape([b, t, c * f]))
+ b, c, t, f = x.shape
+ x = self.out(x.transpose([0, 2, 1, 3]).reshape([b, -1, c * f]))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2]
@@ -192,8 +192,8 @@ class Conv2dSubsampling6(Conv2dSubsampling):
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
- b, c, t, f = paddle.shape(x)
- x = self.linear(x.transpose([0, 2, 1, 3]).reshape([b, t, c * f]))
+ b, c, t, f = x.shape
+ x = self.linear(x.transpose([0, 2, 1, 3]).reshape([b, -1, c * f]))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-4:3]
@@ -245,6 +245,7 @@ class Conv2dSubsampling8(Conv2dSubsampling):
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
- x = self.linear(x.transpose([0, 2, 1, 3]).reshape([b, t, c * f]))
+ b, c, t, f = x.shape
+ x = self.linear(x.transpose([0, 2, 1, 3]).reshape([b, -1, c * f]))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]
diff --git a/paddlespeech/s2t/utils/tensor_utils.py b/paddlespeech/s2t/utils/tensor_utils.py
index 422d4f82a..3ac102f3c 100644
--- a/paddlespeech/s2t/utils/tensor_utils.py
+++ b/paddlespeech/s2t/utils/tensor_utils.py
@@ -184,13 +184,8 @@ def th_accuracy(pad_outputs: paddle.Tensor,
pad_pred = pad_outputs.view(pad_targets.shape[0], pad_targets.shape[1],
pad_outputs.shape[1]).argmax(2)
mask = pad_targets != ignore_label
- #TODO(Hui Zhang): sum not support bool type
- # numerator = paddle.sum(
- # pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
- numerator = (
+
+ numerator = paddle.sum(
pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
- numerator = paddle.sum(numerator.type_as(pad_targets))
- #TODO(Hui Zhang): sum not support bool type
- # denominator = paddle.sum(mask)
- denominator = paddle.sum(mask.type_as(pad_targets))
+ denominator = paddle.sum(mask)
return float(numerator) / float(denominator)
diff --git a/paddlespeech/server/conf/ws_conformer_application.yaml b/paddlespeech/server/conf/ws_conformer_application.yaml
index d72eb2379..d5357c853 100644
--- a/paddlespeech/server/conf/ws_conformer_application.yaml
+++ b/paddlespeech/server/conf/ws_conformer_application.yaml
@@ -30,7 +30,7 @@ asr_online:
decode_method:
num_decoding_left_chunks: -1
force_yes: True
- device: # cpu or gpu:id
+ device: cpu # cpu or gpu:id
continuous_decoding: True # enable continue decoding when endpoint detected
am_predictor_conf:
diff --git a/paddlespeech/server/engine/asr/online/python/asr_engine.py b/paddlespeech/server/engine/asr/online/python/asr_engine.py
index ae0260929..536ffe0a9 100644
--- a/paddlespeech/server/engine/asr/online/python/asr_engine.py
+++ b/paddlespeech/server/engine/asr/online/python/asr_engine.py
@@ -580,6 +580,7 @@ class PaddleASRConnectionHanddler:
self.update_result()
beam_size = self.ctc_decode_config.beam_size
+ reverse_weight = getattr(self.ctc_decode_config, 'reverse_weight', 0.0)
hyps = self.searcher.get_hyps()
if hyps is None or len(hyps) == 0:
logger.info("No Hyps!")
@@ -602,6 +603,7 @@ class PaddleASRConnectionHanddler:
hyps_pad = pad_sequence(
hyp_list, batch_first=True, padding_value=self.model.ignore_id)
+ ori_hyps_pad = hyps_pad
hyps_lens = paddle.to_tensor(
[len(hyp[0]) for hyp in hyps], place=self.device,
dtype=paddle.long) # (beam_size,)
@@ -609,16 +611,15 @@ class PaddleASRConnectionHanddler:
self.model.ignore_id)
hyps_lens = hyps_lens + 1 # Add at begining
- encoder_out = self.encoder_out.repeat(beam_size, 1, 1)
- encoder_mask = paddle.ones(
- (beam_size, 1, encoder_out.shape[1]), dtype=paddle.bool)
-
- decoder_out, _, _ = self.model.decoder(
- encoder_out, encoder_mask, hyps_pad,
- hyps_lens) # (beam_size, max_hyps_len, vocab_size)
# ctc score in ln domain
- decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1)
+ # (beam_size, max_hyps_len, vocab_size)
+ decoder_out, r_decoder_out = self.model.forward_attention_decoder(
+ hyps_pad, hyps_lens, self.encoder_out, reverse_weight)
+
decoder_out = decoder_out.numpy()
+ # r_decoder_out will be 0.0, if reverse_weight is 0.0 or decoder is a
+ # conventional transformer decoder.
+ r_decoder_out = r_decoder_out.numpy()
# Only use decoder score for rescoring
best_score = -float('inf')
@@ -631,6 +632,12 @@ class PaddleASRConnectionHanddler:
# last decoder output token is `eos`, for laste decoder input token.
score += decoder_out[i][len(hyp[0])][self.model.eos]
+ if reverse_weight > 0:
+ r_score = 0.0
+ for j, w in enumerate(hyp[0]):
+ r_score += r_decoder_out[i][len(hyp[0]) - j - 1][w]
+ r_score += r_decoder_out[i][len(hyp[0])][self.model.eos]
+ score = score * (1 - reverse_weight) + r_score * reverse_weight
# add ctc score (which in ln domain)
score += hyp[1] * self.ctc_decode_config.ctc_weight
diff --git a/paddlespeech/server/engine/text/python/text_engine.py b/paddlespeech/server/engine/text/python/text_engine.py
index 6167e7784..cc72c0543 100644
--- a/paddlespeech/server/engine/text/python/text_engine.py
+++ b/paddlespeech/server/engine/text/python/text_engine.py
@@ -107,11 +107,14 @@ class PaddleTextConnectionHandler:
assert len(tokens) == len(labels)
text = ''
+ is_fast_model = 'fast' in self.text_engine.config.model_type
for t, l in zip(tokens, labels):
text += t
if l != 0: # Non punc.
- text += self._punc_list[l]
-
+ if is_fast_model:
+ text += self._punc_list[l - 1]
+ else:
+ text += self._punc_list[l]
return text
else:
raise NotImplementedError
@@ -160,14 +163,23 @@ class TextEngine(BaseEngine):
return False
self.executor = TextServerExecutor()
- self.executor._init_from_path(
- task=config.task,
- model_type=config.model_type,
- lang=config.lang,
- cfg_path=config.cfg_path,
- ckpt_path=config.ckpt_path,
- vocab_file=config.vocab_file)
-
+ if 'fast' in config.model_type:
+ self.executor._init_from_path_new(
+ task=config.task,
+ model_type=config.model_type,
+ lang=config.lang,
+ cfg_path=config.cfg_path,
+ ckpt_path=config.ckpt_path,
+ vocab_file=config.vocab_file)
+ else:
+ self.executor._init_from_path(
+ task=config.task,
+ model_type=config.model_type,
+ lang=config.lang,
+ cfg_path=config.cfg_path,
+ ckpt_path=config.ckpt_path,
+ vocab_file=config.vocab_file)
+ logger.info("Using model: %s." % (config.model_type))
logger.info("Initialize Text server engine successfully on device: %s."
% (self.device))
return True
diff --git a/paddlespeech/server/tests/asr/online/README.md b/paddlespeech/server/tests/asr/online/README.md
index e1e4d9506..1d7fa8824 100644
--- a/paddlespeech/server/tests/asr/online/README.md
+++ b/paddlespeech/server/tests/asr/online/README.md
@@ -11,8 +11,8 @@ This document introduces a client for streaming asr service: microphone
### 1. Install
Refer [Install](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
- **paddlepaddle 2.2.1** 或以上版本。
-It is recommended to use **paddlepaddle 2.2.1** or above.
+ **paddlepaddle 2.4rc** 或以上版本。
+It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from meduim and hard to install paddlespeech.
diff --git a/paddlespeech/server/tests/asr/online/README_cn.md b/paddlespeech/server/tests/asr/online/README_cn.md
index 46dff250e..403216369 100644
--- a/paddlespeech/server/tests/asr/online/README_cn.md
+++ b/paddlespeech/server/tests/asr/online/README_cn.md
@@ -10,7 +10,7 @@
### 1. 安装
请看 [安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
-推荐使用 **paddlepaddle 2.2.1** 或以上版本。
+推荐使用 **paddlepaddle 2.4rc** 或以上版本。
你可以从 medium,hard 三中方式中选择一种方式安装 PaddleSpeech。
diff --git a/paddlespeech/t2s/frontend/mix_frontend.py b/paddlespeech/t2s/frontend/mix_frontend.py
index 101a1e503..19c98d53f 100644
--- a/paddlespeech/t2s/frontend/mix_frontend.py
+++ b/paddlespeech/t2s/frontend/mix_frontend.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import re
from typing import Dict
from typing import List
@@ -30,7 +29,6 @@ class MixFrontend():
self.zh_frontend = Frontend(
phone_vocab_path=phone_vocab_path, tone_vocab_path=tone_vocab_path)
self.en_frontend = English(phone_vocab_path=phone_vocab_path)
- self.SENTENCE_SPLITOR = re.compile(r'([:、,;。?!,;?!][”’]?)')
self.sp_id = self.zh_frontend.vocab_phones["sp"]
self.sp_id_tensor = paddle.to_tensor([self.sp_id])
@@ -47,188 +45,56 @@ class MixFrontend():
else:
return False
- def is_number(self, char):
- if char >= '\u0030' and char <= '\u0039':
- return True
- else:
- return False
-
def is_other(self, char):
- if not (self.is_chinese(char) or self.is_number(char) or
- self.is_alphabet(char)):
+ if not (self.is_chinese(char) or self.is_alphabet(char)):
return True
else:
return False
- def is_end(self, before_char, after_char) -> bool:
- flag = 0
- for char in (before_char, after_char):
- if self.is_alphabet(char) or char == " ":
- flag += 1
- if flag == 2:
- return True
- else:
- return False
-
- def _replace(self, text: str) -> str:
- new_text = ""
-
- # get "." indexs
- point = "."
- point_indexs = []
- index = -1
- for i in range(text.count(point)):
- index = text.find(".", index + 1, len(text))
- point_indexs.append(index)
-
- # replace "." -> "。" when English sentence ending
- if len(point_indexs) == 0:
- new_text = text
-
- elif len(point_indexs) == 1:
- point_index = point_indexs[0]
- if point_index == 0 or point_index == len(text) - 1:
- new_text = text
- else:
- if not self.is_end(text[point_index - 1], text[point_index +
- 1]):
- new_text = text
- else:
- new_text = text[:point_index] + "。" + text[point_index + 1:]
-
- elif len(point_indexs) == 2:
- first_index = point_indexs[0]
- end_index = point_indexs[1]
-
- # first
- if first_index != 0:
- if not self.is_end(text[first_index - 1], text[first_index +
- 1]):
- new_text += (text[:first_index] + ".")
- else:
- new_text += (text[:first_index] + "。")
- else:
- new_text += "."
- # last
- if end_index != len(text) - 1:
- if not self.is_end(text[end_index - 1], text[end_index + 1]):
- new_text += text[point_indexs[-2] + 1:]
- else:
- new_text += (text[point_indexs[-2] + 1:end_index] + "。" +
- text[end_index + 1:])
- else:
- new_text += "."
-
- else:
- first_index = point_indexs[0]
- end_index = point_indexs[-1]
- # first
- if first_index != 0:
- if not self.is_end(text[first_index - 1], text[first_index +
- 1]):
- new_text += (text[:first_index] + ".")
- else:
- new_text += (text[:first_index] + "。")
- else:
- new_text += "."
- # middle
- for j in range(1, len(point_indexs) - 1):
- point_index = point_indexs[j]
- if not self.is_end(text[point_index - 1], text[point_index +
- 1]):
- new_text += (
- text[point_indexs[j - 1] + 1:point_index] + ".")
- else:
- new_text += (
- text[point_indexs[j - 1] + 1:point_index] + "。")
- # last
- if end_index != len(text) - 1:
- if not self.is_end(text[end_index - 1], text[end_index + 1]):
- new_text += text[point_indexs[-2] + 1:]
- else:
- new_text += (text[point_indexs[-2] + 1:end_index] + "。" +
- text[end_index + 1:])
- else:
- new_text += "."
-
- return new_text
-
- def _split(self, text: str) -> List[str]:
- text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text)
- # 替换英文句子的句号 "." --> "。" 用于后续分句
- text = self._replace(text)
- text = self.SENTENCE_SPLITOR.sub(r'\1\n', text)
- text = text.strip()
- sentences = [sentence.strip() for sentence in re.split(r'\n+', text)]
- return sentences
-
- def _distinguish(self, text: str) -> List[str]:
+ def get_segment(self, text: str) -> List[str]:
# sentence --> [ch_part, en_part, ch_part, ...]
-
segments = []
types = []
-
flag = 0
temp_seg = ""
temp_lang = ""
# Determine the type of each character. type: blank, chinese, alphabet, number, unk and point.
for ch in text:
- if ch == ".":
- types.append("point")
- elif self.is_chinese(ch):
+ if self.is_chinese(ch):
types.append("zh")
elif self.is_alphabet(ch):
types.append("en")
- elif ch == " ":
- types.append("blank")
- elif self.is_number(ch):
- types.append("num")
else:
- types.append("unk")
+ types.append("other")
assert len(types) == len(text)
for i in range(len(types)):
-
# find the first char of the seg
if flag == 0:
- # 首个字符是中文,英文或者数字
- if types[i] == "zh" or types[i] == "en" or types[i] == "num":
- temp_seg += text[i]
- temp_lang = types[i]
- flag = 1
+ temp_seg += text[i]
+ temp_lang = types[i]
+ flag = 1
else:
- # 数字和小数点均与前面的字符合并,类型属于前面一个字符的类型
- if types[i] == temp_lang or types[i] == "num" or types[
- i] == "point":
- temp_seg += text[i]
-
- # 数字与后面的任意字符都拼接
- elif temp_lang == "num":
- temp_seg += text[i]
- if types[i] == "zh" or types[i] == "en":
+ if temp_lang == "other":
+ if types[i] == temp_lang:
+ temp_seg += text[i]
+ else:
+ temp_seg += text[i]
temp_lang = types[i]
- # 如果是空格则与前面字符拼接
- elif types[i] == "blank":
- temp_seg += text[i]
-
- elif types[i] == "unk":
- pass
-
else:
- segments.append((temp_seg, temp_lang))
-
- if types[i] == "zh" or types[i] == "en":
+ if types[i] == temp_lang:
+ temp_seg += text[i]
+ elif types[i] == "other":
+ temp_seg += text[i]
+ else:
+ segments.append((temp_seg, temp_lang))
temp_seg = text[i]
temp_lang = types[i]
flag = 1
- else:
- flag = 0
- temp_seg = ""
- temp_lang = ""
segments.append((temp_seg, temp_lang))
@@ -241,34 +107,30 @@ class MixFrontend():
add_sp: bool=True,
to_tensor: bool=True) -> Dict[str, List[paddle.Tensor]]:
- sentences = self._split(sentence)
+ segments = self.get_segment(sentence)
+
phones_list = []
result = {}
- for text in sentences:
- phones_seg = []
- segments = self._distinguish(text)
- for seg in segments:
- content = seg[0]
- lang = seg[1]
- if content != '':
- if lang == "en":
- input_ids = self.en_frontend.get_input_ids(
- content, merge_sentences=True, to_tensor=to_tensor)
- else:
- input_ids = self.zh_frontend.get_input_ids(
- content,
- merge_sentences=True,
- get_tone_ids=get_tone_ids,
- to_tensor=to_tensor)
- phones_seg.append(input_ids["phone_ids"][0])
- if add_sp:
- phones_seg.append(self.sp_id_tensor)
-
- if phones_seg == []:
- phones_seg.append(self.sp_id_tensor)
- phones = paddle.concat(phones_seg)
- phones_list.append(phones)
+ for seg in segments:
+ content = seg[0]
+ lang = seg[1]
+ if content != '':
+ if lang == "en":
+ input_ids = self.en_frontend.get_input_ids(
+ content, merge_sentences=False, to_tensor=to_tensor)
+ else:
+ input_ids = self.zh_frontend.get_input_ids(
+ content,
+ merge_sentences=False,
+ get_tone_ids=get_tone_ids,
+ to_tensor=to_tensor)
+ if add_sp:
+ input_ids["phone_ids"][-1] = paddle.concat(
+ [input_ids["phone_ids"][-1], self.sp_id_tensor])
+
+ for phones in input_ids["phone_ids"]:
+ phones_list.append(phones)
if merge_sentences:
merge_list = paddle.concat(phones_list)
diff --git a/paddlespeech/t2s/frontend/polyphonic.yaml b/paddlespeech/t2s/frontend/polyphonic.yaml
index 51b76f23f..6885035e7 100644
--- a/paddlespeech/t2s/frontend/polyphonic.yaml
+++ b/paddlespeech/t2s/frontend/polyphonic.yaml
@@ -46,4 +46,5 @@ polyphonic:
幸免于难: ['xing4','mian3','yu2','nan4']
恶行: ['e4','xing2']
唉: ['ai4']
-
+ 扎实: ['zha1','shi2']
+ 干将: ['gan4','jiang4']
\ No newline at end of file
diff --git a/paddlespeech/t2s/frontend/tone_sandhi.py b/paddlespeech/t2s/frontend/tone_sandhi.py
index 10a9540c3..42f7b8b2f 100644
--- a/paddlespeech/t2s/frontend/tone_sandhi.py
+++ b/paddlespeech/t2s/frontend/tone_sandhi.py
@@ -65,7 +65,7 @@ class ToneSandhi():
'男子', '女子', '分子', '原子', '量子', '莲子', '石子', '瓜子', '电子', '人人', '虎虎',
'幺幺', '干嘛', '学子', '哈哈', '数数', '袅袅', '局地', '以下', '娃哈哈', '花花草草', '留得',
'耕地', '想想', '熙熙', '攘攘', '卵子', '死死', '冉冉', '恳恳', '佼佼', '吵吵', '打打',
- '考考', '整整', '莘莘'
+ '考考', '整整', '莘莘', '落地', '算子', '家家户户'
}
self.punc = ":,;。?!“”‘’':,;.?!"
diff --git a/tests/unit/asr/reverse_pad_list.py b/tests/unit/asr/reverse_pad_list.py
new file mode 100644
index 000000000..215ed5ceb
--- /dev/null
+++ b/tests/unit/asr/reverse_pad_list.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import unittest
+
+import paddle
+
+import paddlespeech.s2t # noqa: F401
+from paddlespeech.audio.utils.tensor_utils import add_sos_eos
+from paddlespeech.audio.utils.tensor_utils import pad_sequence
+
+# from paddlespeech.audio.utils.tensor_utils import reverse_pad_list
+
+
+def reverse_pad_list(ys_pad: paddle.Tensor,
+ ys_lens: paddle.Tensor,
+ pad_value: float=-1.0) -> paddle.Tensor:
+ """Reverse padding for the list of tensors.
+ Args:
+ ys_pad (tensor): The padded tensor (B, Tokenmax).
+ ys_lens (tensor): The lens of token seqs (B)
+ pad_value (int): Value for padding.
+ Returns:
+ Tensor: Padded tensor (B, Tokenmax).
+ Examples:
+ >>> x
+ tensor([[1, 2, 3, 4], [5, 6, 7, 0], [8, 9, 0, 0]])
+ >>> pad_list(x, 0)
+ tensor([[4, 3, 2, 1],
+ [7, 6, 5, 0],
+ [9, 8, 0, 0]])
+ """
+ r_ys_pad = pad_sequence([(paddle.flip(y[:i], [0]))
+ for y, i in zip(ys_pad, ys_lens)], True, pad_value)
+ return r_ys_pad
+
+
+def naive_reverse_pad_list_with_sos_eos(r_hyps,
+ r_hyps_lens,
+ sos=5000,
+ eos=5000,
+ ignore_id=-1):
+ r_hyps = reverse_pad_list(r_hyps, r_hyps_lens, float(ignore_id))
+ r_hyps, _ = add_sos_eos(r_hyps, sos, eos, ignore_id)
+ return r_hyps
+
+
+def reverse_pad_list_with_sos_eos(r_hyps,
+ r_hyps_lens,
+ sos=5000,
+ eos=5000,
+ ignore_id=-1):
+ # >>> r_hyps = reverse_pad_list(r_hyps, r_hyps_lens, float(self.ignore_id))
+ # >>> r_hyps, _ = add_sos_eos(r_hyps, self.sos, self.eos, self.ignore_id)
+ max_len = paddle.max(r_hyps_lens)
+ index_range = paddle.arange(0, max_len, 1)
+ seq_len_expand = r_hyps_lens.unsqueeze(1)
+ seq_mask = seq_len_expand > index_range # (beam, max_len)
+
+ index = (seq_len_expand - 1) - index_range # (beam, max_len)
+ # >>> index
+ # >>> tensor([[ 2, 1, 0],
+ # >>> [ 2, 1, 0],
+ # >>> [ 0, -1, -2]])
+ index = index * seq_mask
+
+ # >>> index
+ # >>> tensor([[2, 1, 0],
+ # >>> [2, 1, 0],
+ # >>> [0, 0, 0]])
+ def paddle_gather(x, dim, index):
+ index_shape = index.shape
+ index_flatten = index.flatten()
+ if dim < 0:
+ dim = len(x.shape) + dim
+ nd_index = []
+ for k in range(len(x.shape)):
+ if k == dim:
+ nd_index.append(index_flatten)
+ else:
+ reshape_shape = [1] * len(x.shape)
+ reshape_shape[k] = x.shape[k]
+ x_arange = paddle.arange(x.shape[k], dtype=index.dtype)
+ x_arange = x_arange.reshape(reshape_shape)
+ dim_index = paddle.expand(x_arange, index_shape).flatten()
+ nd_index.append(dim_index)
+ ind2 = paddle.transpose(paddle.stack(nd_index), [1, 0]).astype("int64")
+ paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape)
+ return paddle_out
+
+ r_hyps = paddle_gather(r_hyps, 1, index)
+ # >>> r_hyps
+ # >>> tensor([[3, 2, 1],
+ # >>> [4, 8, 9],
+ # >>> [2, 2, 2]])
+ r_hyps = paddle.where(seq_mask, r_hyps, eos)
+ # >>> r_hyps
+ # >>> tensor([[3, 2, 1],
+ # >>> [4, 8, 9],
+ # >>> [2, eos, eos]])
+ B = r_hyps.shape[0]
+ _sos = paddle.ones([B, 1], dtype=r_hyps.dtype) * sos
+ # r_hyps = paddle.concat([hyps[:, 0:1], r_hyps], axis=1)
+ r_hyps = paddle.concat([_sos, r_hyps], axis=1)
+ # >>> r_hyps
+ # >>> tensor([[sos, 3, 2, 1],
+ # >>> [sos, 4, 8, 9],
+ # >>> [sos, 2, eos, eos]])
+ return r_hyps
+
+
+class TestU2Model(unittest.TestCase):
+ def setUp(self):
+ paddle.set_device('cpu')
+
+ self.sos = 5000
+ self.eos = 5000
+ self.ignore_id = -1
+ self.reverse_hyps = paddle.to_tensor([[4, 3, 2, 1, -1],
+ [5, 4, 3, 2, 1]])
+ self.reverse_hyps_sos_eos = paddle.to_tensor(
+ [[self.sos, 4, 3, 2, 1, self.eos], [self.sos, 5, 4, 3, 2, 1]])
+
+ self.hyps = paddle.to_tensor([[1, 2, 3, 4, -1], [1, 2, 3, 4, 5]])
+
+ self.hyps_lens = paddle.to_tensor([4, 5], paddle.int32)
+
+ def test_reverse_pad_list(self):
+ r_hyps = reverse_pad_list(self.hyps, self.hyps_lens)
+ self.assertSequenceEqual(r_hyps.tolist(), self.reverse_hyps.tolist())
+
+ def test_naive_reverse_pad_list_with_sos_eos(self):
+ r_hyps_sos_eos = naive_reverse_pad_list_with_sos_eos(self.hyps,
+ self.hyps_lens)
+ self.assertSequenceEqual(r_hyps_sos_eos.tolist(),
+ self.reverse_hyps_sos_eos.tolist())
+
+ def test_static_reverse_pad_list_with_sos_eos(self):
+ r_hyps_sos_eos_static = reverse_pad_list_with_sos_eos(self.hyps,
+ self.hyps_lens)
+ self.assertSequenceEqual(r_hyps_sos_eos_static.tolist(),
+ self.reverse_hyps_sos_eos.tolist())
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/cli/test_cli.sh b/tests/unit/cli/test_cli.sh
index 15604961d..c6837c303 100755
--- a/tests/unit/cli/test_cli.sh
+++ b/tests/unit/cli/test_cli.sh
@@ -7,7 +7,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/cat.wav https://paddlespe
paddlespeech cls --input ./cat.wav --topk 10
# Punctuation_restoration
-paddlespeech text --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭
+paddlespeech text --input 今天的天气真不错啊你下午有空吗我想约你一起去吃饭 --model ernie_linear_p3_wudao_fast
# Speech_recognition
wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav