diff --git a/.bashrc b/.bashrc new file mode 100644 index 000000000..8abbb3c7d --- /dev/null +++ b/.bashrc @@ -0,0 +1,15 @@ +unset GREP_OPTIONS + +# https://zhuanlan.zhihu.com/p/33050965 +alias nvs='nvidia-smi' +alias his='history' +alias jobs='jobs -l' +alias ports='netstat -tulanp' +alias wget='wget -c' + +## Colorize the grep command output for ease of use (good for log files)## +alias grep='grep --color=auto' +alias egrep='egrep --color=auto' +alias fgrep='fgrep --color=auto' + + diff --git a/.flake8 b/.flake8 index 722899439..44685f23a 100644 --- a/.flake8 +++ b/.flake8 @@ -42,6 +42,10 @@ ignore = # these ignores are from flake8-comprehensions; please fix! C400,C401,C402,C403,C404,C405,C407,C411,C413,C414,C415 + +per-file-ignores = + */__init__.py: F401 + # Specify the list of error codes you wish Flake8 to report. select = E, diff --git a/.gitignore b/.gitignore index 6fa377222..e4134a082 100644 --- a/.gitignore +++ b/.gitignore @@ -10,8 +10,13 @@ .ipynb_checkpoints *.npz *.done +*.whl tools/venv tools/kenlm tools/sox-14.4.2 tools/soxbindings +tools/montreal-forced-aligner/ +tools/Montreal-Forced-Aligner/ + +*output/ diff --git a/.notebook/Linear_test.ipynb b/.notebook/Linear_test.ipynb deleted file mode 100644 index 5c7370cf3..000000000 --- a/.notebook/Linear_test.ipynb +++ /dev/null @@ -1,605 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "academic-surname", - "metadata": {}, - "outputs": [], - "source": [ - "import paddle\n", - "from paddle import nn" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "fundamental-treasure", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-dev/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "L = nn.Linear(256, 2048)\n", - "L2 = nn.Linear(2048, 256)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "consolidated-elephant", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import torch\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "moderate-noise", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "float64\n", - "Tensor(shape=[2, 51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[[-1.54171216, -2.61531472, -1.79881978, ..., -0.31395876, 0.56513089, -0.44516513],\n", - " [-0.79492962, 1.91157901, 0.66567147, ..., 0.54825783, -1.01471853, -0.84924090],\n", - " [-1.22556651, -0.36225814, 0.65063190, ..., 0.65726501, 0.05563191, 0.09009409],\n", - " ...,\n", - " [ 0.38615900, -0.77905393, 0.99732304, ..., -1.38463700, -3.32365036, -1.31089687],\n", - " [ 0.05579993, 0.06885809, -1.66662002, ..., -0.23346378, -3.29372883, 1.30561364],\n", - " [ 1.90676069, 1.95093191, -0.28849599, ..., -0.06860496, 0.95347673, 1.00475824]],\n", - "\n", - " [[-0.91453546, 0.55298805, -1.06146812, ..., -0.86378336, 1.00454640, 1.26062179],\n", - " [ 0.10223761, 0.81301165, 2.36865163, ..., 0.16821407, 0.29240361, 1.05408621],\n", - " [-1.33196676, 1.94433689, 0.01934209, ..., 0.48036841, 0.51585966, 1.22893548],\n", - " ...,\n", - " [-0.19558455, -0.47075930, 0.90796155, ..., -1.28598249, -0.24321797, 0.17734711],\n", - " [ 0.89819717, -1.39516675, 0.17138045, ..., 2.39761519, 1.76364994, -0.52177650],\n", - " [ 0.94122332, -0.18581429, 1.36099780, ..., 0.67647684, -0.04699665, 1.51205540]]])\n", - "tensor([[[-1.5417, -2.6153, -1.7988, ..., -0.3140, 0.5651, -0.4452],\n", - " [-0.7949, 1.9116, 0.6657, ..., 0.5483, -1.0147, -0.8492],\n", - " [-1.2256, -0.3623, 0.6506, ..., 0.6573, 0.0556, 0.0901],\n", - " ...,\n", - " [ 0.3862, -0.7791, 0.9973, ..., -1.3846, -3.3237, -1.3109],\n", - " [ 0.0558, 0.0689, -1.6666, ..., -0.2335, -3.2937, 1.3056],\n", - " [ 1.9068, 1.9509, -0.2885, ..., -0.0686, 0.9535, 1.0048]],\n", - "\n", - " [[-0.9145, 0.5530, -1.0615, ..., -0.8638, 1.0045, 1.2606],\n", - " [ 0.1022, 0.8130, 2.3687, ..., 0.1682, 0.2924, 1.0541],\n", - " [-1.3320, 1.9443, 0.0193, ..., 0.4804, 0.5159, 1.2289],\n", - " ...,\n", - " [-0.1956, -0.4708, 0.9080, ..., -1.2860, -0.2432, 0.1773],\n", - " [ 0.8982, -1.3952, 0.1714, ..., 2.3976, 1.7636, -0.5218],\n", - " [ 0.9412, -0.1858, 1.3610, ..., 0.6765, -0.0470, 1.5121]]])\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-dev/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "x = np.random.randn(2, 51, 256)\n", - "print(x.dtype)\n", - "px = paddle.to_tensor(x, dtype='float32')\n", - "tx = torch.tensor(x, dtype=torch.float32)\n", - "print(px)\n", - "print(tx)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cooked-progressive", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "mechanical-prisoner", - "metadata": {}, - "outputs": [], - "source": [ - "data = np.load('enc_0_ff_out.npz', allow_pickle=True)\n", - "t_norm_ff = data['norm_ff']\n", - "t_ff_out = data['ff_out']\n", - "t_ff_l_x = data['ff_l_x']\n", - "t_ff_l_a_x = data['ff_l_a_x']\n", - "t_ff_l_a_l_x = data['ff_l_a_l_x']\n", - "t_ps = data['ps']" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "indie-marriage", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "assured-zambia", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "True\n", - "True\n" - ] - } - ], - "source": [ - "L.set_state_dict({'weight': t_ps[0].T, 'bias': t_ps[1]})\n", - "L2.set_state_dict({'weight': t_ps[2].T, 'bias': t_ps[3]})\n", - "\n", - "ps = []\n", - "for n, p in L.named_parameters():\n", - " ps.append(p)\n", - "\n", - "for n, p in L2.state_dict().items():\n", - " ps.append(p)\n", - " \n", - "for p, tp in zip(ps, t_ps):\n", - " print(np.allclose(p.numpy(), tp.T))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "committed-jacob", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "extreme-traffic", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "optimum-milwaukee", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "viral-indian", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "True\n", - "True\n" - ] - } - ], - "source": [ - "# data = np.load('enc_0_ff_out.npz', allow_pickle=True)\n", - "# t_norm_ff = data['norm_ff']\n", - "# t_ff_out = data['ff_out']\n", - "# t_ff_l_x = data['ff_l_x']\n", - "# t_ff_l_a_x = data['ff_l_a_x']\n", - "# t_ff_l_a_l_x = data['ff_l_a_l_x']\n", - "# t_ps = data['ps']\n", - "TL = torch.nn.Linear(256, 2048)\n", - "TL2 = torch.nn.Linear(2048, 256)\n", - "TL.load_state_dict({'weight': torch.tensor(t_ps[0]), 'bias': torch.tensor(t_ps[1])})\n", - "TL2.load_state_dict({'weight': torch.tensor(t_ps[2]), 'bias': torch.tensor(t_ps[3])})\n", - "\n", - "# for n, p in TL.named_parameters():\n", - "# print(n, p)\n", - "# for n, p in TL2.named_parameters():\n", - "# print(n, p)\n", - "\n", - "ps = []\n", - "for n, p in TL.state_dict().items():\n", - " ps.append(p.data.numpy())\n", - " \n", - "for n, p in TL2.state_dict().items():\n", - " ps.append(p.data.numpy())\n", - " \n", - "for p, tp in zip(ps, t_ps):\n", - " print(np.allclose(p, tp))" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "skilled-vietnamese", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[ 0.67277956 0.08313607 -0.62761104 ... -0.17480263 0.42718208\n", - " -0.5787626 ]\n", - " [ 0.91516656 0.5393416 1.7159258 ... 0.06144593 0.06486575\n", - " -0.03350811]\n", - " [ 0.438351 0.6227843 0.24096036 ... 1.0912522 -0.90929437\n", - " -1.012989 ]\n", - " ...\n", - " [ 0.68631977 0.14240924 0.10763275 ... -0.11513516 0.48065388\n", - " 0.04070369]\n", - " [-0.9525228 0.23197874 0.31264272 ... 0.5312439 0.18773697\n", - " -0.8450228 ]\n", - " [ 0.42024016 -0.04561988 0.54541194 ... -0.41933843 -0.00436018\n", - " -0.06663495]]\n", - "\n", - " [[-0.11638781 -0.33566502 -0.20887226 ... 0.17423287 -0.9195841\n", - " -0.8161046 ]\n", - " [-0.3469874 0.88269687 -0.11887559 ... -0.15566081 0.16357468\n", - " -0.20766167]\n", - " [-0.3847657 0.3984318 -0.06963477 ... -0.00360622 1.2360432\n", - " -0.26811332]\n", - " ...\n", - " [ 0.08230796 -0.46158582 0.54582864 ... 0.15747628 -0.44790155\n", - " 0.06020184]\n", - " [-0.8095085 0.43163058 -0.42837143 ... 0.8627463 0.90656304\n", - " 0.15847842]\n", - " [-1.485811 -0.18216592 -0.8882585 ... 0.32596245 0.7822631\n", - " -0.6460344 ]]]\n", - "[[[ 0.67278004 0.08313602 -0.6276114 ... -0.17480245 0.42718196\n", - " -0.5787625 ]\n", - " [ 0.91516703 0.5393413 1.7159253 ... 0.06144581 0.06486579\n", - " -0.03350812]\n", - " [ 0.43835106 0.62278455 0.24096027 ... 1.0912521 -0.9092943\n", - " -1.0129892 ]\n", - " ...\n", - " [ 0.6863195 0.14240888 0.10763284 ... -0.11513527 0.48065376\n", - " 0.04070365]\n", - " [-0.9525231 0.23197863 0.31264275 ... 0.53124386 0.18773702\n", - " -0.84502304]\n", - " [ 0.42024007 -0.04561983 0.545412 ... -0.41933888 -0.00436005\n", - " -0.066635 ]]\n", - "\n", - " [[-0.11638767 -0.33566508 -0.20887226 ... 0.17423296 -0.9195838\n", - " -0.8161046 ]\n", - " [-0.34698725 0.88269705 -0.11887549 ... -0.15566081 0.16357464\n", - " -0.20766166]\n", - " [-0.3847657 0.3984319 -0.06963488 ... -0.00360619 1.2360426\n", - " -0.26811326]\n", - " ...\n", - " [ 0.08230786 -0.4615857 0.5458287 ... 0.15747619 -0.44790167\n", - " 0.06020182]\n", - " [-0.8095083 0.4316307 -0.42837155 ... 0.862746 0.9065631\n", - " 0.15847899]\n", - " [-1.485811 -0.18216613 -0.8882584 ... 0.32596254 0.7822631\n", - " -0.6460344 ]]]\n", - "True\n", - "False\n" - ] - } - ], - "source": [ - "y = L(px)\n", - "print(y.numpy())\n", - "\n", - "ty = TL(tx)\n", - "print(ty.data.numpy())\n", - "print(np.allclose(px.numpy(), tx.detach().numpy()))\n", - "print(np.allclose(y.numpy(), ty.detach().numpy()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "incorrect-allah", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "prostate-cameroon", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "governmental-surge", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 0.04476918 0.554463 -0.3027508 ... -0.49600336 0.3751858\n", - " 0.8254095 ]\n", - " [ 0.95594174 -0.29528382 -1.2899452 ... 0.43718258 0.05584608\n", - " -0.06974669]]\n", - "[[ 0.04476918 0.5544631 -0.3027507 ... -0.49600336 0.37518573\n", - " 0.8254096 ]\n", - " [ 0.95594174 -0.29528376 -1.2899454 ... 0.4371827 0.05584623\n", - " -0.0697467 ]]\n", - "True\n", - "False\n", - "True\n" - ] - } - ], - "source": [ - "x = np.random.randn(2, 256)\n", - "px = paddle.to_tensor(x, dtype='float32')\n", - "tx = torch.tensor(x, dtype=torch.float32)\n", - "y = L(px)\n", - "print(y.numpy())\n", - "ty = TL(tx)\n", - "print(ty.data.numpy())\n", - "print(np.allclose(px.numpy(), tx.detach().numpy()))\n", - "print(np.allclose(y.numpy(), ty.detach().numpy()))\n", - "print(np.allclose(y.numpy(), ty.detach().numpy(), atol=1e-5))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "confidential-jacket", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "improved-civilization", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "5e7e7c9fde8350084abf1898cf52651cfc84b17a\n" - ] - } - ], - "source": [ - "print(paddle.version.commit)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "d1e2d3b4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['__builtins__',\n", - " '__cached__',\n", - " '__doc__',\n", - " '__file__',\n", - " '__loader__',\n", - " '__name__',\n", - " '__package__',\n", - " '__spec__',\n", - " 'commit',\n", - " 'full_version',\n", - " 'istaged',\n", - " 'major',\n", - " 'minor',\n", - " 'mkl',\n", - " 'patch',\n", - " 'rc',\n", - " 'show',\n", - " 'with_mkl']" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dir(paddle.version)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "c880c719", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.1.0\n" - ] - } - ], - "source": [ - "print(paddle.version.full_version)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "f26977bf", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "commit: 5e7e7c9fde8350084abf1898cf52651cfc84b17a\n", - "None\n" - ] - } - ], - "source": [ - "print(paddle.version.show())" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "04ad47f6", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1.6.0\n" - ] - } - ], - "source": [ - "print(torch.__version__)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "e1e03830", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['__builtins__',\n", - " '__cached__',\n", - " '__doc__',\n", - " '__file__',\n", - " '__loader__',\n", - " '__name__',\n", - " '__package__',\n", - " '__spec__',\n", - " '__version__',\n", - " 'cuda',\n", - " 'debug',\n", - " 'git_version',\n", - " 'hip']" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dir(torch.version)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "4ad0389b", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'b31f58de6fa8bbda5353b3c77d9be4914399724d'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "torch.version.git_version" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "7870ea10", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'10.2'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "torch.version.cuda" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "db8ee5a7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6321ec2a", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/WarmupLR.ipynb b/.notebook/WarmupLR.ipynb deleted file mode 100644 index 21abf9cbe..000000000 --- a/.notebook/WarmupLR.ipynb +++ /dev/null @@ -1,339 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "d6a0e098", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Union\n", - "\n", - "import torch\n", - "from torch.optim.lr_scheduler import _LRScheduler\n", - "\n", - "from typeguard import check_argument_types\n", - "\n", - "\n", - "class WarmupLR(_LRScheduler):\n", - " \"\"\"The WarmupLR scheduler\n", - " This scheduler is almost same as NoamLR Scheduler except for following\n", - " difference:\n", - " NoamLR:\n", - " lr = optimizer.lr * model_size ** -0.5\n", - " * min(step ** -0.5, step * warmup_step ** -1.5)\n", - " WarmupLR:\n", - " lr = optimizer.lr * warmup_step ** 0.5\n", - " * min(step ** -0.5, step * warmup_step ** -1.5)\n", - " Note that the maximum lr equals to optimizer.lr in this scheduler.\n", - " \"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " optimizer: torch.optim.Optimizer,\n", - " warmup_steps: Union[int, float] = 25000,\n", - " last_epoch: int = -1,\n", - " ):\n", - " assert check_argument_types()\n", - " self.warmup_steps = warmup_steps\n", - "\n", - " # __init__() must be invoked before setting field\n", - " # because step() is also invoked in __init__()\n", - " super().__init__(optimizer, last_epoch)\n", - "\n", - " def __repr__(self):\n", - " return f\"{self.__class__.__name__}(warmup_steps={self.warmup_steps})\"\n", - "\n", - " def get_lr(self):\n", - " step_num = self.last_epoch + 1\n", - " return [\n", - " lr\n", - " * self.warmup_steps ** 0.5\n", - " * min(step_num ** -0.5, step_num * self.warmup_steps ** -1.5)\n", - " for lr in self.base_lrs\n", - " ]\n", - "\n", - " def set_step(self, step: int):\n", - " self.last_epoch = step" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0d496677", - "metadata": {}, - "outputs": [], - "source": [ - "import torch.optim as optim\n", - "model = torch.nn.Linear(10, 200)\n", - "optimizer = optim.Adam(model.parameters())\n", - "scheduler = WarmupLR(optimizer, warmup_steps=25000)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "e3e3f3dc", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0 0.0 -1\n" - ] - } - ], - "source": [ - "infos = {}\n", - "start_epoch = infos.get('epoch', -1) + 1\n", - "cv_loss = infos.get('cv_loss', 0.0)\n", - "step = infos.get('step', -1)\n", - "print(start_epoch, cv_loss, step)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "dc3d550c", - "metadata": {}, - "outputs": [], - "source": [ - "scheduler.set_step(step)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "e527634e", - "metadata": {}, - "outputs": [], - "source": [ - "lrs=[]\n", - "for i in range(100000):\n", - " scheduler.step()\n", - " lrs.append(scheduler.get_lr())" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "f1452db9", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting matplotlib\n", - " Downloading matplotlib-3.4.1-cp38-cp38-manylinux1_x86_64.whl (10.3 MB)\n", - "\u001b[K |████████████████████████████████| 10.3 MB 575 kB/s eta 0:00:01\n", - "\u001b[?25hCollecting kiwisolver>=1.0.1\n", - " Downloading kiwisolver-1.3.1-cp38-cp38-manylinux1_x86_64.whl (1.2 MB)\n", - "\u001b[K |████████████████████████████████| 1.2 MB 465 kB/s eta 0:00:01\n", - "\u001b[?25hRequirement already satisfied: pillow>=6.2.0 in /workspace/wenet/venv/lib/python3.8/site-packages (from matplotlib) (8.1.2)\n", - "Requirement already satisfied: numpy>=1.16 in /workspace/wenet/venv/lib/python3.8/site-packages (from matplotlib) (1.20.1)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /workspace/wenet/venv/lib/python3.8/site-packages (from matplotlib) (2.8.1)\n", - "Collecting cycler>=0.10\n", - " Downloading cycler-0.10.0-py2.py3-none-any.whl (6.5 kB)\n", - "Requirement already satisfied: pyparsing>=2.2.1 in /workspace/wenet/venv/lib/python3.8/site-packages (from matplotlib) (2.4.7)\n", - "Requirement already satisfied: six in /workspace/wenet/venv/lib/python3.8/site-packages (from cycler>=0.10->matplotlib) (1.15.0)\n", - "Installing collected packages: kiwisolver, cycler, matplotlib\n", - "Successfully installed cycler-0.10.0 kiwisolver-1.3.1 matplotlib-3.4.1\n" - ] - } - ], - "source": [ - "!pip install matplotlib\n", - "import matplotlib.pyplot as plt\n", - "\n", - "%matplotlib inline" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "0f36d04f", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAD4CAYAAADy46FuAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAqc0lEQVR4nO3deXxV1b338c8vCUkYkkAghJAEAhLQIJMEHHFCBa2KVkG0T7Wt1qet9ra1w9Xn3ufe1ld7b21tvVq1alut+mhJQK3Yqjig1SpCDgIyBiLTSZhCAglTyLSeP86GxjTDQZKc6ft+vXh5zjrrrLM2O+bL3mvv3zHnHCIiIu2JC/UEREQkvCkoRESkQwoKERHpkIJCREQ6pKAQEZEOJYR6Al1h0KBBLi8vL9TTEBGJKMuXL9/rnMvorF9UBEVeXh4+ny/U0xARiShmti2Yfjr1JCIiHVJQiIhIhxQUIiLSIQWFiIh0SEEhIiIdCioozGymmZWaWZmZ3d3G60lmVuS9vtTM8lq8do/XXmpmM1q0P2lme8xsTaux0s3sTTPb5P13wElsn4iInKROg8LM4oFHgMuBAuBGMyto1e1WYJ9zbhTwAHCf994CYC4wFpgJPOqNB/BHr621u4G3nXP5wNvecxERCZFgjiimAmXOuc3OuXpgHjCrVZ9ZwNPe4wXAdDMzr32ec+6oc24LUOaNh3PuPaC6jc9rOdbTwDXBb450p82VB3m3dE+opyEiPSyYoMgG/C2el3ttbfZxzjUCNcDAIN/bWqZzbqf3eBeQ2VYnM7vdzHxm5qusrAxiM+Rk3fS7pXzlqRLeWrc71FMRkR4U1ovZLvCtSm1+s5Jz7gnnXKFzrjAjo9M70OUkle05wK7aOgC+V7SSTysPhnhGItJTggmKCiC3xfMcr63NPmaWAKQBVUG+t7XdZpbljZUF6FxHGCj2lZMQZ7xy53kkJsRx+zM+DtQ1hHpaItIDggmKEiDfzEaYWSKBxemFrfosBG7xHl8PLPaOBhYCc72rokYA+cCyTj6v5Vi3AC8HMUfpRg1Nzbz4cTnTTxvMuJw0Hr7pDLZWHeZ7RatobtZX6YpEu06DwltzuBNYBKwHip1za83sXjO72uv2B2CgmZUBd+FdqeScWwsUA+uA14E7nHNNAGb2J2AJMMbMys3sVm+snwOXmtkm4BLvuYTQ4g172HuwnjmFgYPDs08ZyL9/4TTeWr+bB9/eFOLZiUh3s8A//CNbYWGhU/XY7nPb0yV8Ul7Dh3dfTEJ84N8Wzjl+uOATFiwv58G5E5k1sbNrFEQk3JjZcudcYWf9wnoxW0JvT20d75RWct3knOMhAWBm/Oza0zlzRDo/nP8JJVvbutJZRKKBgkI6tODjcpqa3fHTTi0lJcTz+Jcnk5Pem68/42PL3kMhmKGIdDcFhbTLOcd8XzlT89IZMahvm33690nkqa9MIc6Mrz61jOpD9T08SxHpbgoKaVfJ1n1s2XuIOVP++WiipeED+/K7myezo6aOrz/j40h9Uw/NUER6goJC2lXs89MvKYErxg3ptO/k4ek8eMNEVmzfxzefW059Y3MPzFBEeoKCQtp0oK6Bv36yk6smZNEnMbivVr98XBY/u3Yc75ZW8oP5usdCJFoE9xtAYs5fP9nJkYamNhexO3Lj1GHsP9zAfa9vIK13L+6dNZZAfUgRiVQKCmlTkc9P/uB+TMztf8Lv/eaFp7D/cD2Pv7eZ/n168f3LxnT9BEWkxygo5J9s2n2AFdv38+9fOO1zHw3cffmp1Bxp4DeLy0iMj+Pb0/O7eJYi0lMUFPJPin1+EuKMayZ9/rutAzfkjaO+sZlfvbkRM7jzYoWFSCRSUMhnBAoAVnDJaZkM6pd0UmPFxxm/nD0BgPvf2AgoLEQikYJCPuPt9XuoOlTPnCk5XTJe67AwM+64aFSXjC0iPUNBIZ8x3+cnMzWJ8/O77sugjoWFA365qJT6xma+e0m+roYSiRAKCjlud20d75Tu4RsXnPKZAoBdIT7OuH/2BBLijAff3kTNkQb+48oC4uIUFiLhTkEhxy1YXk6z44TvnQhWfJxx33XjSUnuxZMfbOFAXSP3XTeuy0NJRLqWgkKAYwUA/UwdkU5eOwUAu0JcnPF/rzyNtN69eOCtjRyoa+A3N00iKSG+2z5TRE6O/iknACzbUs3WqsPc0E1HEy2ZGd+5JJ//vKqAN9bt5qtPlVCr798WCVsKCgGg2FfuFQDM6rHP/Oq5I/j1nAks21LN9b/9kIr9R3rss0UkeAoK4UBdA6+u3slVE4bSO7FnTwF98Ywcnv7aVHbur+PaRz5gTUVNj36+iHROQSH8xSsAeEMn3zvRXc4dNYgF3zyHXvFxzHl8Ce9s2BOSeYhI2xQUQlGJn9GZ/ZiQkxayOYwZksJL3zqHkRl9ufXpEp5ZshXnVKZcJBwoKGLcxt0HWOnfz5zC3JDfADc4NZmi28/mojGD+Y+X13LPi6s52qhvyxMJNQVFjCsu8dMr3rj2JAoAdqW+SQk8cXMhd1x0CvNK/Nz0u6Xsqa0L9bREYpqCIobVNzbz0opAAcCBJ1kAsCvFxxk/nHEqj9x0But21HLVw39npX9/qKclErMUFDFs8YbdgQKAPXDvxOfxhfFZvPDNc0iICyxyF5f4Qz0lkZikoIhhxb5yhqQmc/7orisA2NUKhqbyyrfPo3D4AH70wid8v3gVh+sbQz0tkZiioIhRu2rqeLd0D9dNziY+zAvzpfdN5Nlbz+Rfpufz4opyZj38AWV7DoR6WiIxQ0ERo174OFAAcPbk8Dzt1Fp8nHHXpaN55mtTqT5Uz1W/+YCXVpSHeloiMUFBEYOccxT7/JzZzQUAu8O0/Axe/c40xuWk8b2iVfxowSoOHdWpKJHupKCIQUu3VLOt6nDI7sQ+WZmpyTx/25ncedEo5i8v54qH3mf5tn2hnpZI1FJQxKBin5+UpAQuP73nCgB2tYT4OH4wYwxFt59NY5Nj9mMf8us3N9LQ1BzqqYlEnaCCwsxmmlmpmZWZ2d1tvJ5kZkXe60vNLK/Fa/d47aVmNqOzMc1supl9bGYrzezvZqYvWO5CtccKAE7s+QKA3WHqiHRe++40rpmUzUNvb2L2Y0vYsvdQqKclElU6DQoziwceAS4HCoAbzaygVbdbgX3OuVHAA8B93nsLgLnAWGAm8KiZxXcy5m+BLznnJgLPA/9+Ulson/GXVTupa2juke+d6Cmpyb349ZyJPHzTJLbsPcQVD77PUx9soblZtaJEukIwRxRTgTLn3GbnXD0wD5jVqs8s4Gnv8QJgugUKB80C5jnnjjrntgBl3ngdjemAVO9xGrDj822atKXI52dMZgrjQ1gAsLtcOX4or393GmeOTOcnr6xjzuNL+LTyYKinJRLxggmKbKDlLbHlXlubfZxzjUANMLCD93Y05m3Aq2ZWDnwZ+HlbkzKz283MZ2a+ysrKIDZDSncdYJV/P3OmhL4AYHfJSuvNU1+Zwq9mT2DTnoNc/uD7/PbdT2nU2oXI5xaOi9nfA65wzuUATwG/bquTc+4J51yhc64wIyN87ywOJ8W+8CoA2F3MjOsm5/DmXedz0ZgM7nt9A9c++iHrd9aGemoiESmYoKgAWp7QzvHa2uxjZgkEThlVdfDeNtvNLAOY4Jxb6rUXAecEtSXSoWMFAC8tyCS9b2Kop9MjBqck89j/mswjN53Bjv1HuPI3f+e/Xl2v+y5ETlAwQVEC5JvZCDNLJLA4vbBVn4XALd7j64HFLvCtMwuBud5VUSOAfGBZB2PuA9LMbLQ31qXA+s+/eXLM2+t3U32ontlRtIgdDDPjC+OzeOuuC5hTmMMT721m+q/+xmurd+qLkUSClNBZB+dco5ndCSwC4oEnnXNrzexewOecWwj8AXjWzMqAagK/+PH6FQPrgEbgDudcE0BbY3rtXwdeMLNmAsHxtS7d4hhV5PMHCgDmx+ZpugF9E/nvL45ndmEu//bSGr753MdcMDqDn1w9NuLuThfpaRYN/6oqLCx0Pp8v1NMIWztrjnDuzxfzrQtH8YMZY0I9nZBrbGrm2Y+28as3NlLf1Mw3LjiFb1wwkj6Jnf67SSSqmNly51xhZ/3CcTFbutgLy70CgIU5oZ5KWEiIj+Or547g7e9fwMyxQ3jo7U1cfP/fePHjct17IdIGBUWUa252FPvKOWtkOsMH6hRLS5mpyTx04yQWfONsMlOTuKt4Fdc8+gG+rdWhnppIWFFQRLmlW6rZXh25BQB7QmFeOi9961weuGECe2qPcv1jS7jj+Y/xVx8O9dREwoJOyka5+T4/KcmRXQCwJ8TFGddOymHG2CE8/rfNPP7ep7y5djdfOmsYd1w0ikFh9J3iIj1NRxRRrLaugVfX7OTqCUNJ7hX5BQB7Qp/EBL536Wje+cGFXDspm6c/3MoFv3iHX7+5kQN1DaGenkhIKCii2CurdgQKAOq00wnLSuvNfdeP543vXcAFYzJ46O1NnP+Ld/j9+5upa2gK9fREepSCIooVl/g5dUgK47KjrwBgTxk1uB+PfmkyC+88l9Oz0/jpX9dz0f3v8vzS7dQ3qn6UxAYFRZTasKuWVeU1zCmM3gKAPWl8Tn+evfVMnr/tTDJTk/k/L63mwl++w7NLtnK0UUcYEt0UFFGquKScXvHGNVFeALCnnTNqEC996xye+dpUsvr35v++vJYLfvEuf/xgi05JSdRSUEShQAHAci4rGBIzBQB7kplx/ugMFnzjbJ677UyGpffhx6+sY5q3hnG4XkUHJbro8tgo9Nb63ew73KA7sbuZmXHuqEGcO2oQH22u4qG3N/HTv67n4XfKuPms4dx8Tp4uq5WooKCIQkUlfrLSkpkWowUAQ+GskQM5a+RAlm+r5rG/beahxWU8/t5mrp+cw9enjVThQYloCooos2P/Ed7bVMmdF40iPk6L2D1t8vB0fndzOmV7DvL79zcz31fO88u2M3PsEG4/fySThg0I9RRFTpiCIsq8sLwc52D2ZN07EUqjBvfj59eN565LR/PHD7fy/z7axmtrdjE1L51bzsnjsrGZ9IrXEqFEBpUZjyLNzY4L73+XnAG9ef7rZ4V6OtLCwaONzFu2naeXbMVffYQhqcl8+ezhzJ2Sy0CtY0iIqMx4DPpoSxXbqw8zJ8a+xS4S9EtK4LZpI3n3Bxfxu5sLGTW4H79cVMrZP1/M94tXsbq8JtRTFGmXTj1Fkfm+clKSE5h5+pBQT0XaER9nXFqQyaUFmZTtOcDTH27jhY/LeeHjcs4Y1p8vnz2cy0/PUm0uCSs69RQlao40MPVnbzG7MIefXjMu1NORE1Bb18ACXznPLNnK1qrDpPXuxbWTsrlx6jDGDEkJ9fQkigV76klHFFHilVU7ONrYzA2Fw0I9FTlBqcm9+Np5I/jKOXl8tLmK55dt57ml2/jjh1s5Y1h/bpw6jCvHD6V3oo4yJDR0RBElrn7479Q3NvPad6aptlMUqDp4lBc/ruBPJdvZXHmIlOQErpmYzQ1Tchk7NFX7WLqEjihiyPqdtXxSXsN/XlWgXyBRYmC/JL5+/khumzaCZVuq+dOy7RT5/Dz70TbGZKZw3eRsZk3MJjM1OdRTlRigoIgCxT4/ifFxXDNRBQCjjZlx5siBnDlyID8+XM8rn+zkxY/L+a9XN/Dz1zZwXn4G152RzWUFQ3RqSrqNgiLCHW1s4s8rKrh0bCYDVAAwqvXvk8iXzxrOl88azqeVB3np4wpeWlHBd+atpF9SAl8Yl8UXz8hmSl46cborX7qQgiLCvbVuD/sON+jeiRhzSkY/fjBjDHddOpqPtlTx4scV/OWTHRT5AnW+rhyfxZXjhzI+J02nI+WkaTE7wt385DLKdh/g/X+9WLWdYtzh+kbeWLubv3yyg79trKShyTEsvQ9XTcjiqglDGZOZotCQz9BidgzYsf8I72+q5NsqAChAn8QErpmUzTWTsqk53MCitbt45ZMdPPa3zTzyzqeMGtyPq8YP5coJWZyS0S/U05UIoqCIYAuOFQDUaSdpJa1PL+ZMyWXOlFz2HjzKa2t28ZdVO/iftzfywFsbOXVICpeNHcKMsZkUZOlyW+mYTj1FqOZmxwX3v8Ow9D48d5sKAEpwdtXU8erqnby+dhe+rdU0O8hN782MgiHMOH0IZwwboKPTGKJTT1Huo81V+KuP8IPLxoR6KhJBhqQl87XzRvC180aw9+BR3lq3m0Vrd/HMkm38/u9bGNQviUsLMpl5+hDOHjmQxATVDRUFRcQq9vlJTU5gxlgVAJTPZ1C/JOZOHcbcqcM4UNfAO6WVLFq7i5dXVvCnZdtJSUrg/NEZXHTqYC4ck6GvdY1hQQWFmc0EHgTigd87537e6vUk4BlgMlAF3OCc2+q9dg9wK9AE/ItzblFHY1rgZOlPgdnee37rnHvo5DYzutQcaeC1NbuYU5irKqPSJVKSe3H1hKFcPWEodQ1NfFC2lzfW7uad0j38dfVOzGBCTn+mnzqYi04drDIiMabToDCzeOAR4FKgHCgxs4XOuXUtut0K7HPOjTKzucB9wA1mVgDMBcYCQ4G3zGy09572xvwKkAuc6pxrNrPBXbGh0WThsQKAU7SILV0vuVc800/LZPppmTQ3O9btrOXt9XtYXLqHX725kV+9uZEhqclcdGoGF5+aybmjBtInUScnolkwe3cqUOac2wxgZvOAWUDLoJgF/Nh7vAB42DsymAXMc84dBbaYWZk3Hh2M+U3gJudcM4Bzbs/n37zoVFzi57SsVMYOTQ31VCTKxcUZp2encXp2Gt+5JJ89B+p4t7SSdzbsYeHKHfxpmZ/EhDim5qUzLX8Q54/O4NQhul8j2gQTFNmAv8XzcuDM9vo45xrNrAYY6LV/1Oq9xwoStTfmKQSORq4FKgmcrtrUelJmdjtwO8CwYbFTWnvdjlpWV9TwYxUAlBAYnJLMnMJc5hTmUt/YTMnWahZv2MP7myr579c28N+vbSAjJYlpowYxbfQgzhuVQUaK1jYiXTgeLyYBdc65QjP7IvAkMK11J+fcE8ATELg8tmenGDrHCgDOUgFACbHEhDjOHTWIc0cNAgKX3r6/qZL3N+3l3Y2VvLiiAoDTslI5P38Q0/IzKMwboHW1CBRMUFQQWDM4Jsdra6tPuZklAGkEFrU7em977eXAi97jl4CngphjTDja2MSfV1ZwmQoAShgakpbM7MJcZhfmHl/beG9TJe9v3MuTH2zh8fc2k5QQR2HeAM4eOZCzRg5kfE5/XYIbAYIJihIg38xGEPhlPhe4qVWfhcAtwBLgemCxc86Z2ULgeTP7NYHF7HxgGWAdjPln4CJgC3ABsPFzb12UeXPdbvarAKBEgJZrG9+6cBSHjjaybEs172/ay5LNVdz/RuB/69694gPBccpAzh45kHHZaSTEKzjCTadB4a053AksInAp65POubVmdi/gc84tBP4APOstVlcT+MWP16+YwCJ1I3CHc64JoK0xvY/8OfCcmX0POAjc1nWbG9mKSvxk9+99/FBfJFL0TUrgIu/SWoB9h+pZuqWKJZ9WsWRzFb94vRSAfkkJTDkeHIMoGJqqO8XDgEp4RIiK/Uc4777FfPvifO66dHTnbxCJIHsPHuWjzf8Ijs2VhwBISUrgjOEDmJI3gMK8dCbm9tcaRxdSCY8os8BXDsDsyTkhnolI1xvUL4krxw/lyvFDAdhdW8dHm6tYtqUa39Z9x09V9YoPnNKakpdO4fBAeKRrva7bKSgiQHOzY/5yP+eeMojc9D6hno5It8tMTWbWxOzjV/ftP1zP8m37KNm6D9/Wav74wVaeeG8zAKdk9A0Ehxcewwf20aXjXUxBEQGWbK6ifN8RfjhDBQAlNvXvk3j8bnGAuoYmVlfUULI1cMTx6uqdzCsJ3JqV3jeRibn9mZTbn0nDBjA+N43U5F6hnH7EU1BEABUAFPms5F7xTMlLZ0peOhA46t605yC+bdWs3L6fFf79LN4QKOpgBqMy+gXCY9gAJub2Z3RmP11ddQIUFGGu5nCgAODcKSoAKNKeuDhjzJAUxgxJ4UtnDgcCxTM/Kd/Piu37Wenfz1vrdzN/eWCtr09iPONz0piYGwiOCblpDElN1imrdigowtzCVRXUNzbr3gmRE5TWuxfT8jOYlp8BgHOObVWHWenfz4rt+1jp38/v399MY3Pgys9B/RI5PTuN8d79H+NyFB7HKCjCXJHPT0FWKqdnp4V6KiIRzczIG9SXvEF9uWZSYJG8rqGJtTtqWVNRw+qKGlaX1/Dexkq87GBQvyTGZacyLqc/47LTGJ+TRmZqcgi3IjQUFGFs7Y4a1lTU8pOrx4Z6KiJRKblXPJOHD2Dy8AHH247UN7FuZyA0VlfUsrpiP39rER4ZKUmM8446CrwqzjkDekf1kYeCIozN95WTmBDHrIlDQz0VkZjROzGeycPTmTw8/Xjb4fpG1u+s5ZPywJHHmooa3i3dczw8UpISOC0rldOyUjgtK5WCoamMzkyJmnVFBUWYqmto4qUVFcwYO4T+fXRDkUgo9UlM+KfwOFLfROnuA6zbUcv6nbWs21nLguXlHKpvAiDO4JSMfseD41iQDE6JvFNXCoow9ea63dQcaWBOoe7EFglHvRPjmZjbn4m5/Y+3NTc7tlcfZv3Of4TH8m37WLhqx/E+g/olcVpWCmMyUxg9JIXRmSnkD+5H36Tw/XUcvjOLccU+rwDgKSoAKBIp4uL+sWB++bis4+37D9ezfueB4+Gxfmctz360jaONzcf75Kb3ZkxmCvmZXohkpnDK4L4kJYT+9JWCIgyV7zvM38v28p3p+cSpcqZIxOvfJzFQEfeUgcfbmpod/urDlO4+wMZdByjdfYBNuw/ybmnl8Ut24+OMvIF9GO0Fx5ghKYzO7EfewL49esOggiIMLfBuCrpeBQBFolZ8i6OPllUX6hub2Vp1iI0tAmTDrgMsWrvr+OJ5YnwcIwb1ZdTgftx9+andXgNOQRFmmpsd833lnDdqEDkDVABQJNYkJsQdP4Jg/D/a6xqaKNtzMBAguw9Stucga3fU9Mg3BCoowsyHn1ZRsf8I/3r5qaGeioiEkeRe8ce/NbCnqSpWmCn2+Unr3YvLCjJDPRUREUBBEVZqDjfw+tpdXDNxaNTcqCMikU9BEUZePlYAcIoKAIpI+FBQhJGiEj9jh6YydqgKAIpI+FBQhIk1FTWs3VHLDTqaEJEwo6AIE/N9/kABwAnZoZ6KiMhnKCjCQF1DE39euYOZY4eQ1kff7Ssi4UVBEQbeOF4AUKedRCT8KCjCQHGJn5wBvTmnRR0YEZFwoaAIMX/1YT74dC+zJ+eqAKCIhCUFRYgdLwCo750QkTCloAih5mbHguWBAoDZ/XuHejoiIm1SUITQB5/upWL/ES1ii0hYU1CEULGvnP59enHZWBUAFJHwpaAIkf2H61m0dhfXTMwOi686FBFpT1BBYWYzzazUzMrM7O42Xk8ysyLv9aVmltfitXu89lIzm3ECYz5kZgc/53aFvZdX7ggUANRpJxEJc50GhZnFA48AlwMFwI1mVtCq263APufcKOAB4D7vvQXAXGAsMBN41MziOxvTzAqBASe5bWGtqMTP6dmpFAxNDfVUREQ6FMwRxVSgzDm32TlXD8wDZrXqMwt42nu8AJhuZua1z3POHXXObQHKvPHaHdMLkV8CPzq5TQtfaypqWLezlht0NCEiESCYoMgG/C2el3ttbfZxzjUCNcDADt7b0Zh3Agudczs7mpSZ3W5mPjPzVVZWBrEZ4aPYKwB4tQoAikgECKvFbDMbCswGftNZX+fcE865QudcYUZGRvdProvUNTTx5xUVXH66CgCKSGQIJigqgJbnSHK8tjb7mFkCkAZUdfDe9tonAaOAMjPbCvQxs7IgtyUiLFq7i9q6Ri1ii0jECCYoSoB8MxthZokEFqcXtuqzELjFe3w9sNg557z2ud5VUSOAfGBZe2M65/7qnBvinMtzzuUBh70F8qhR7POTm96bs0eqAKCIRIaEzjo45xrN7E5gERAPPOmcW2tm9wI+59xC4A/As96//qsJ/OLH61cMrAMagTucc00AbY3Z9ZsXXvzVh/mgrIq7Lh2tAoAiEjE6DQoA59yrwKut2v6jxeM6AmsLbb33Z8DPghmzjT79gplfpJi/vBwzuG6yCgCKSOQIq8XsaNbU7Fjg8zMtP0MFAEUkoigoesgHZXvZUVPHHJUTF5EIo6DoIcU+P/379OLSAhUAFJHIoqDoAfsO1fPG2t0qACgiEUlB0QNeXllBfZMKAIpIZFJQdDPnHEW+csZlp6kAoIhEJAVFN1tTUcv6nbXMmaKjCRGJTAqKblbs85OUEMfVE4aGeioiIp+LgqIb1TU08eeVXgHA3ioAKCKRSUHRjRat3cWBukaddhKRiKag6EZFJYECgGeNUAFAEYlcCopu4q8+zIefVjFncq4KAIpIRFNQdJP5Pr8KAIpIVFBQdIOmZseC5eWcn5/BUBUAFJEIp6DoBn8/XgBQi9giEvkUFN2g2OdnQJ9eXFIwONRTERE5aQqKLrbvUD1vrt3NNZNUAFBEooOCoou9tCJQAPAG3TshIlFCQdGFnHMU+/yMz0nj1CEqACgi0UFB0YVWV9SwYdcBLWKLSFRRUHShYwUAr1IBQBGJIgqKLlLX0MTLK3dwxbgsFQAUkaiioOgir6/xCgDqtJOIRBkFRRcpKvEzLL0PZ45ID/VURES6lIKiC2yvOsySzVXMKcxRAUARiToKii4wf7mfOBUAFJEopaA4SccLAI7OICtNBQBFJPooKE7S+5sq2akCgCISxRQUJ2m+r5z0volcclpmqKciItItFBQnofpQPW+s28U1E7NJTNBfpYhEp6B+u5nZTDMrNbMyM7u7jdeTzKzIe32pmeW1eO0er73UzGZ0NqaZPee1rzGzJ80sbO9ee2lFBQ1NTgUARSSqdRoUZhYPPAJcDhQAN5pZQatutwL7nHOjgAeA+7z3FgBzgbHATOBRM4vvZMzngFOBcUBv4LaT2sJu4pxjvs/PhJw0xgxJCfV0RES6TTBHFFOBMufcZudcPTAPmNWqzyzgae/xAmC6mZnXPs85d9Q5twUo88Zrd0zn3KvOAywDwvKa00/KvQKAOpoQkSgXTFBkA/4Wz8u9tjb7OOcagRpgYAfv7XRM75TTl4HX25qUmd1uZj4z81VWVgaxGV2r2OcnuZcKAIpI9AvnFdhHgfecc++39aJz7gnnXKFzrjAjI6NHJ3akvomFK3dwxelZpCaH7RKKiEiXSAiiTwXQ8vxKjtfWVp9yM0sA0oCqTt7b7phm9p9ABvC/g5hfj3t97U4OHG3UaScRiQnBHFGUAPlmNsLMEgksTi9s1WchcIv3+HpgsbfGsBCY610VNQLIJ7Du0O6YZnYbMAO40TnXfHKb1z2KSvwMH6gCgCISGzo9onDONZrZncAiIB540jm31szuBXzOuYXAH4BnzawMqCbwix+vXzGwDmgE7nDONQG0Nab3kY8B24AlgfVwXnTO3dtlW3yStlUd4qPN1fxwxhi8+YmIRLVgTj3hnHsVeLVV23+0eFwHzG7nvT8DfhbMmF57UHMKlfm+8kABwDPC8mIsEZEuF86L2WHnWAHAC0ZnMCQtOdTTERHpEQqKE/Depkp21aoAoIjEFgXFCZjv85PeN5HpKgAoIjFEQRGkqoNHeXPdbq6dpAKAIhJb9BsvSMcKAOq0k4jEGgVFEJxzFPv8TMjtrwKAIhJzFBRBWFVew8bdB7lBRxMiEoMUFEH4RwHArFBPRUSkxykoOnGkvolXVu7ginFZpKgAoIjEIAVFJ15bEygAqNNOIhKrFBSdKCrxkzewD1NVAFBEYpSCogNb9x5i6ZZqZhfmqgCgiMQsBUUH5i/3qwCgiMQ8BUU7jhUAvHDMYBUAFJGYpqBox3sbK9lde5Q5hTqaEJHYpqBoR1GJn4F9E7n4VBUAFJHYpqBoQ9XBo7y1XgUARURAQdGml1ZU0NjsmDNF906IiCgoWnHOUVTiZ2Juf0ZnqgCgiIiCopWV/v1s2nOQG3Q0ISICKCj+SbGvnN694rlyvAoAioiAguIzDtc38soqFQAUEWlJQdHCa6t3cfBoo047iYi0oKBoocjnZ8SgvkzJGxDqqYiIhA0FhWfL3kMs21LN7MIcFQAUEWlBQeGZ71MBQBGRtigogMamZl74uJyLxgwmM1UFAEVEWlJQAO9tChQAnK1vsRMR+ScKCgIFAAf1S2T6aYNDPRURkbAT80Gx9+BR3l6/h2snZdMrPub/OkRE/knM/2Z86eNAAUDdOyEi0raggsLMZppZqZmVmdndbbyeZGZF3utLzSyvxWv3eO2lZjajszHNbIQ3Rpk3ZuJJbmO7nHMU+/ycMaw/owarAKCISFs6DQoziwceAS4HCoAbzaygVbdbgX3OuVHAA8B93nsLgLnAWGAm8KiZxXcy5n3AA95Y+7yxu8UKrwDgHC1ii4i0K5gjiqlAmXNus3OuHpgHzGrVZxbwtPd4ATDdAnetzQLmOeeOOue2AGXeeG2O6b3nYm8MvDGv+dxb14n5Pn+gAOCEod31ESIiES+YoMgG/C2el3ttbfZxzjUCNcDADt7bXvtAYL83RnufBYCZ3W5mPjPzVVZWBrEZ/2xYel++cm4e/ZISPtf7RURiQcT+hnTOPQE8AVBYWOg+zxjfvPCULp2TiEg0CuaIogJoeRI/x2trs4+ZJQBpQFUH722vvQro743R3meJiEgPCiYoSoB872qkRAKL0wtb9VkI3OI9vh5Y7JxzXvtc76qoEUA+sKy9Mb33vOONgTfmy59/80RE5GR1eurJOddoZncCi4B44Enn3FozuxfwOecWAn8AnjWzMqCawC9+vH7FwDqgEbjDOdcE0NaY3kf+KzDPzH4KrPDGFhGRELHAP+IjW2FhofP5fKGehohIRDGz5c65ws76xfyd2SIi0jEFhYiIdEhBISIiHVJQiIhIh6JiMdvMKoFtn/Ptg4C9XTidSKBtjg3a5uh3sts73DmX0VmnqAiKk2FmvmBW/aOJtjk2aJujX09tr049iYhIhxQUIiLSIQWFV1gwxmibY4O2Ofr1yPbG/BqFiIh0TEcUIiLSIQWFiIh0KKaDwsxmmlmpmZWZ2d2hns+JMLNcM3vHzNaZ2Voz+47Xnm5mb5rZJu+/A7x2M7OHvG39xMzOaDHWLV7/TWZ2S4v2yWa22nvPQ95X1Yac973rK8zsL97zEWa21JtnkVe6Hq+8fZHXvtTM8lqMcY/XXmpmM1q0h93PhJn1N7MFZrbBzNab2dnRvp/N7Hvez/UaM/uTmSVH2342syfNbI+ZrWnR1u37tb3P6JBzLib/EChv/ikwEkgEVgEFoZ7XCcw/CzjDe5wCbAQKgF8Ad3vtdwP3eY+vAF4DDDgLWOq1pwObvf8O8B4P8F5b5vU1772Xh3q7vXndBTwP/MV7XgzM9R4/BnzTe/wt4DHv8VygyHtc4O3vJGCE93MQH64/EwS+O/4273Ei0D+a9zOBrz/eAvRusX+/Em37GTgfOANY06Kt2/dre5/R4VxD/T9BCH8YzwYWtXh+D3BPqOd1EtvzMnApUApkeW1ZQKn3+HHgxhb9S73XbwQeb9H+uNeWBWxo0f6ZfiHczhzgbeBi4C/e/wR7gYTW+5XA952c7T1O8PpZ6319rF84/kwQ+LbILXgXnrTef9G4nwkEhd/75Zfg7ecZ0bifgTw+GxTdvl/b+4yO/sTyqadjP4zHlHttEcc71J4ELAUynXM7vZd2AZne4/a2t6P28jbaQ+1/gB8Bzd7zgcB+51yj97zlPI9vm/d6jdf/RP8uQmkEUAk85Z1u+72Z9SWK97NzrgK4H9gO7CSw35YT3fv5mJ7Yr+19RrtiOSiigpn1A14Avuucq235mgv8kyFqrn82syuBPc655aGeSw9KIHB64rfOuUnAIQKnC46Lwv08AJhFICSHAn2BmSGdVAj0xH4N9jNiOSgqgNwWz3O8tohhZr0IhMRzzrkXvebdZpblvZ4F7PHa29vejtpz2mgPpXOBq81sKzCPwOmnB4H+Znbsa31bzvP4tnmvpwFVnPjfRSiVA+XOuaXe8wUEgiOa9/MlwBbnXKVzrgF4kcC+j+b9fExP7Nf2PqNdsRwUJUC+dyVFIoFFsIUhnlPQvCsY/gCsd879usVLC4FjVz7cQmDt4lj7zd7VE2cBNd7h5yLgMjMb4P1L7jIC5293ArVmdpb3WTe3GCsknHP3OOdynHN5BPbXYufcl4B3gOu9bq23+djfxfVef+e1z/WulhkB5BNY+Au7nwnn3C7Ab2ZjvKbpBL6DPmr3M4FTTmeZWR9vTse2OWr3cws9sV/b+4z2hXLRKtR/CFxJsJHAFRD/Fur5nODczyNwyPgJsNL7cwWBc7NvA5uAt4B0r78Bj3jbuhoobDHW14Ay789XW7QXAmu89zxMqwXVEG//hfzjqqeRBH4BlAHzgSSvPdl7Xua9PrLF+//N265SWlzlE44/E8BEwOft6z8TuLolqvcz8BNggzevZwlcuRRV+xn4E4E1mAYCR4639sR+be8zOvqjEh4iItKhWD71JCIiQVBQiIhIhxQUIiLSIQWFiIh0SEEhIiIdUlCIiEiHFBQiItKh/w/uhegfvR+Q7QAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "xs = list(range(100000))\n", - "plt.plot(xs, lrs)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "4f4e282c", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/wenet/venv/lib/python3.8/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "from typing import Union\n", - "\n", - "from paddle.optimizer.lr import LRScheduler\n", - "from typeguard import check_argument_types\n", - "\n", - "class WarmupLR(LRScheduler):\n", - " \"\"\"The WarmupLR scheduler\n", - " This scheduler is almost same as NoamLR Scheduler except for following\n", - " difference:\n", - " NoamLR:\n", - " lr = optimizer.lr * model_size ** -0.5\n", - " * min(step ** -0.5, step * warmup_step ** -1.5)\n", - " WarmupLR:\n", - " lr = optimizer.lr * warmup_step ** 0.5\n", - " * min(step ** -0.5, step * warmup_step ** -1.5)\n", - " Note that the maximum lr equals to optimizer.lr in this scheduler.\n", - " \"\"\"\n", - "\n", - " def __init__(self,\n", - " warmup_steps: Union[int, float]=25000,\n", - " learning_rate=1.0,\n", - " last_epoch=-1,\n", - " verbose=False):\n", - " assert check_argument_types()\n", - " self.warmup_steps = warmup_steps\n", - " super().__init__(learning_rate, last_epoch, verbose)\n", - "\n", - " def __repr__(self):\n", - " return f\"{self.__class__.__name__}(warmup_steps={self.warmup_steps})\"\n", - "\n", - " def get_lr(self):\n", - " step_num = self.last_epoch + 1\n", - " return self.base_lr * self.warmup_steps**0.5 * min(\n", - " step_num**-0.5, step_num * self.warmup_steps**-1.5)\n", - "\n", - " def set_step(self, step: int):\n", - " self.step(step)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "8c40b202", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-1\n" - ] - } - ], - "source": [ - "sc = WarmupLR(warmup_steps=25000, learning_rate=0.001)\n", - "print(step)\n", - "#sc.set_step(step)\n", - "sc.set_step(0)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "ecbc7e37", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAD4CAYAAADy46FuAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAAqaUlEQVR4nO3de3xU9Z3/8dcnCUm4JIGEEAIBEiCAQW4SEG94F7QqagGhu9Varb9a3W51267+tr/dtrvdVevW1VardrVaa4WAN7QqKqJ4QchwvwYiAZMQICQQ7uT2/f0xB4xpLoMkmcnM+/l48GDmO99z5ns4Yd4553vOZ8w5h4iISHOigj0AEREJbQoKERFpkYJCRERapKAQEZEWKShERKRFMcEeQFvo3bu3y8zMDPYwREQ6lRUrVux1zqW21i8sgiIzMxOfzxfsYYiIdCpmtiOQfjr1JCIiLVJQiIhIixQUIiLSIgWFiIi0SEEhIiItCigozGyqmRWYWaGZ3dvE63FmNtd7fZmZZTZ47T6vvcDMpjRof8bM9pjZ+kbrSjazd81sq/d3r9PYPhEROU2tBoWZRQOPAVcCOcBsM8tp1O1WYJ9zbijwMPCAt2wOMAsYCUwFHvfWB/Cs19bYvcAi51w2sMh7LiIiQRLIEcVEoNA5t805Vw3MAaY16jMNeM57PB+41MzMa5/jnDvunCsCCr314ZxbAlQ28X4N1/UccF3gmyPtaVv5IT4o2BPsYYhIBwskKPoDxQ2el3htTfZxztUCVUBKgMs2luacK/Me7wLSmupkZrebmc/MfOXl5QFshpyuWU99xnf+mM+iTbuDPRQR6UAhPZnt/N+q1OQ3KznnnnLO5TrnclNTW70DXU7T1t0H2XPwOAA/mrOaz8sPBXlEItJRAgmKUmBAg+cZXluTfcwsBkgCKgJctrHdZpburSsd0LmOEJDnKyYmynj9rvPpEhPF7X/ycfBYTbCHJSIdIJCgyAeyzSzLzGLxT04vaNRnAXCz93g68L53NLAAmOVdFZUFZAPLW3m/huu6GXgtgDFKO6qpq+fllaVcdkYaozKSeOxbZ7G94gj35K2hvl5fpSsS7loNCm/O4S5gIbAJyHPObTCzX5rZtV63p4EUMysE7sG7Usk5twHIAzYCbwN3OufqAMzsRWApMNzMSszsVm9d9wOXm9lW4DLvuQTRok17qDhczcwJGQCcMySFf7nqDN7duJtH398a5NGJSHsz/y/+nVtubq5T9dj2c+uz+azfWcUn/3wJMdH+3y2cc/x43lpeWlnCI7PGMm1sa9coiEioMbMVzrnc1vqF9GS2BN/uA8dYXLCHb56VcTIkAMyM/7zhTCZmJfOTeWvJ397Ulc4iEg4UFNKil1aWUO9gZu6Av3ktLiaap749noxeXbn9Tz627z0chBGKSHtTUEiznHPM85UwMSuZzN7dm+zTs1ssf7xlAmbGLc/ms+9wdQePUkTam4JCmrW8qJKivYe5sYmjiYYGpXTnDzeNp3T/Ub73Jx9Hq+s6aIQi0hEUFNKsPF8JPeJiuHJU31b7jh+UzP/cOJYVX+zjBy+soKauvgNGKCIdQUEhTTp4rIY315VxzZh+dIsN7KvVrxqVzn9eP4rFBeX8eJ7usRAJF4F9AkjEeWNtGUdr6rhxQsunnRqbPXEg+45U8+DbBSR17cIvrh2Jvz6kiHRWCgpp0tz8Yoal9WBMRtIpL3vHhUPYf6SGp5Zso2fXLtxzxfB2GKGIdBQFhfyNLbsPsrp4Pz/7xhlf62jAzLjvyhFUHanh0fcLiY2J4q5LstthpCLSERQU8jfy8ovpEm1cP+7r323tvyFvFDV19Tz0zhbMjDsvHtqGoxSRjqKgkK+orq3nlVX+AoApPeJOa13RUcavZ4zBAb9eWACgsBDphBQU8hXvb97tLwDYyr0TgYqOMh6aMQbwh4UZ/OAihYVIZ6KgkK/I85XQNzGeycPa7sugToSFc44H3y6gptbxw0uH6mookU5CQSEn7ao6xgcFe7jjoiFER7Xth3h0lPHfM8cSEx3Fw+9toepoDT/7xhlEtfH7iEjbU1DISScKAM4Y3zannRqLjjIe/OZoEuJjeOaTIg4cq+H+G0Z9pSqtiIQeBYUAJwoAFnN2CwUA20JUlPGvV+eQ1LUL//PeVg4dq+WR2WOJi4lut/cUkdOjX+UEgGVFlWyvOHLKd2J/HWbGjy4bxr9encPbG3bx3WfzOaDv3xYJWQoKASDPV0xCXAxXnpneYe/53fOz+O8ZY1i2rZIZv1/Kzv1HO+y9RSRwCgrhwIkCgGP70TW2Y08BfXN8Bs/eMpGd+49y3WOfsL60qkPfX0Rap6AQ3lhTxrGa+ja7d+JUnZ/dm3l3nENMlHHjk0tZXLAnKOMQkaYpKIS5vmKGpyV8rQKAbWVE30ReufM8BqV057bnfDy/dDvOqUy5SChQUES4gl0HWVO8n5kTBgT9Bri0xHjyvn8OFw5L5f+9toH/+8o6qmv1BUgiwaagiHB5vtMvANiWesTF8IebcvnBRUN4cXkxs//wGXsOHgv2sEQimoIigp0oAHh5ThrJ3WODPZyToqOMn04dwe++NY6NOw9w7W8/YW3J/mAPSyRiKSgi2KJNu6k8XM2MIE1it+bq0f2Yf8c5REcZ059YyjxfcbCHJBKRFBQRLM9X7C8AmN12BQDb2sh+SSy46zxyB/XiJ/PX8uN5azhaXRfsYYlEFAVFhNpVdYwPt5QzfXxGmxcAbGspPeJ4/taz+eElQ3lpZQnTHvuYwj0Hgz0skYihoIhQJwsA5mYEeygBiY4y7rliOM/dMpGKQ9Vc+7tPeGVVSbCHJRIRFBQRqL7ekecrZtLgZAaltF8BwPYweVgqf/3hBZzZL4m7567hp/PXcPh4bbCHJRLWFBQRaPn2SnZ0UAHA9tA3KZ6/fO9sfnDREOatKOGqRz9i5Rf7gj0skbCloIhAefn+AoBTR3ZcAcC2FhMdxU+njmDO9yZRW+eY8cRSHn53C7V1ukFPpK0FFBRmNtXMCsys0MzubeL1ODOb672+zMwyG7x2n9deYGZTWlunmV1qZivNbLWZfWxm+oLlNnTgWA1vri/j2iAUAGwPZw9O4a0fXcC0Mf14ZNFWpj+xlKK9h4M9LJGw0mpQmFk08BhwJZADzDaznEbdbgX2OeeGAg8DD3jL5gCzgJHAVOBxM4tuZZ2/B/7OOTcW+Avws9PaQvmK19fsDGoBwPaQGN+F39w4lt/OHse28kNc9chHPPtJEfX1qhUl0hYCOaKYCBQ657Y556qBOcC0Rn2mAc95j+cDl5q/cNA0YI5z7rhzrggo9NbX0jodkOg9TgJ2fr1Nk6bk5Rczom8Co4NYALC9XDOmHwvvnszErGR+/vpGZj65lG3lh4I9LJFOL5Cg6A80vCW2xGtrso9zrhaoAlJaWLaldd4GvGlmJcC3gfubGpSZ3W5mPjPzlZeXB7AZsnnXAdaUVDEzN/gFANtLelJXnr1lAg/NGMOW3QeZ+shHPPHh55q7EDkNoTiZfTdwlXMuA/gj8JumOjnnnnLO5TrnclNTQ/fO4lCSl19Cl2jjuhApANhezIzp4zN4754LuXh4Kve/tZkbfv8pm3cdCPbQRDqlQIKiFGh4QjvDa2uyj5nF4D9lVNHCsk22m1kqMMY5t8xrnwucG9CWSIv8BQBLuCKnb0gVAGxPfRLjeeLvx/O7b42jdN9RvvHox/znm5t034XIKQokKPKBbDPLMrNY/JPTCxr1WQDc7D2eDrzv/N86swCY5V0VlQVkA8tbWOc+IMnMhnnruhzY9PU3T054b9Nu9h2p6TR3YrcVM+Pq0f14754LmTE+g6eWbOOy33zIW+vK9MVIIgGKaa2Dc67WzO4CFgLRwDPOuQ1m9kvA55xbADwNPG9mhUAl/g9+vH55wEagFrjTOVcH0NQ6vfbvAS+ZWT3+4Phum25xhMrzFZOeFM8FIVwAsD316h7L/d8czYzcAfzs1fXc8cJKLhyWyi+uHUlm7851d7pIR7Nw+K0qNzfX+Xy+YA8jZJVVHeW8+9/nzouH8k9XDA/2cIKutq6ePy3dwW/e3UJ1XT3fv3AI379wMN1iW/29SSSsmNkK51xua/1CcTJb2thLK7wCgOPD596J0xETHcV3z89i0T9dyJSRfXl00VYueehDXl5ZonsvRJqgoAhz/gKAJZwzOIWBKd2CPZyQkpYYz29nj2Pe98+hT2Ic9+St4brHP8G3vTLYQxMJKQqKMLesqJIvKjtvAcCOMCEzmVd/cB6/mTmG3QeOMf2Jpdz5l5UUVx4J9tBEQoJOyoa5PF8xCfExTD2zb7CHEtKioowbzspg6pl9efLDbTy55HPe3bCbv580iDsvHkJKj7hgD1EkaHREEcaqjtbw5roypo3tR3yXzl8AsCN0i43h7suHsfjHF3H9uP48+2kRkx9czMPvbuHgsZpgD08kKBQUYez1NTs5XhteBQA7SnpSVx6YPpp37p7M5GGpPLJoKxf++gOe/riIYzX6zm6JLAqKMJbn8xcAHNU//AoAdpShfRL4/d+P57U7zyMnPZF/f2Mjlzz0AS8u/4LqWtWPksigoAhTm8oOsDbMCwB2pDEDevLn287mhdvOJjUxnvteXsfFD33Anz/bwfFaHWFIeFNQhKk8XzGx0VFcH+YFADvaeUN78+oPzuXZWybQJzGOn726ngsf/IDnPt2uU1ISthQUYeh4bR2vrirl8pFp9IqQAoAdycy4aHgfXr7jXP5869kMSO7Kvy3YwOQHF/P0x0UcrVZgSHjR5bFh6L2Ne9h3pEaT2O3MzDg/uzfnDU1h6bYKHl20lX9/YyO/e38rN52TyU3nDNJltRIWFBRhKM9XTL+keM4f2jvYQ4kIZsa5Q3pz7pDe5G+v5MkPP+eRRVt5csnnzBg/gNsuyGJQigoPSueloAgzO/cfZcnWcv7h4qFER2kSu6NNyExmQmYyhXsO8tSSbczNL+aFZTu48sx0bp88mDEDegZ7iCKnTEERZl5aUYJzMEOnnYJqaJ8EHpw+hh9fMZw/frqdP3+2g7+uK2NiVjLfOTeTK3LSiInWFKF0DiozHkbq6x0XPrSYAb268ZfvTQr2cKSBQ8drmbP8C579dDsl+47SLymev5s0iNkTB0bMNw5K6FGZ8Qj0WVEFxZVHVQAwBPWIi+G2Cwbz4U8u5g835ZKV2p1fLyxg0n8t4sfz1rC+tCrYQxRplk49hZG8fH8BwCkjVQAwVEVHGZfnpHF5Thpbdx/kuaXbeXllKfNXlDB+UC9uOmcQU0b2VW0uCSkKijBRdbSGt9bvYmbuAH3IdBLZaQn8x3Wj+MmUEcxfUcLzS7fzj3NW07NbF24Yl8HsiQPITksI9jBFFBThYoEKAHZaSV27cOv5WdxybiZLt1Xw4vIveP6z7TzzSRG5g3oxe+JArhqVTtdY/QIgwaHJ7DBxzW8/prbe8eYPz1dtpzBQceg4L68s5cXlX7Bt72ES4mO4YVx/Zk4YwMh+KvIobSPQyWwdUYSBjTsPsK60in+7JkchESZSesTxvcmDue2CLJYXVfLi8i94Mb+Y55buYETfBL55VgbTxvajT2J8sIcqEUBBEQZOFAC8bqwKAIYbM+PswSmcPTiFnx+p5vW1Zby0ooRfvbmJ/3prE5OHpXLDWRlckZOmuSlpNwqKTu54bR2vrlYBwEjQs1ss3540iG9PGsTn5Yd4eWUJr6ws5YcvriIhLoZvjE7nhrMyyB3UiyjdlS9tSEHRyb27cTf7j9RwoyaxI8qQ1B78ZMoI/uny4XxWVMFLK0pZsGYnc/L9db6uHtOPq0enM6p/kk5HymnTZHYnd9Mzy/l8zyGW/PRi1XaKcEeqa3lnw25eX7OTJVvLqalzDErpxjWj+3H1mHSGpyUoNOQrNJkdAUr3H+WjreX8wyXZCgmhW2wM143rz3Xj+lN1pIaFG3bx+tqdPP5BIb9bXEh2nx5cPbof14xJZ3Bqj2APVzoRBUUndrIA4PiMYA9FQkxSty7MnDCAmRMGsPfQcd5aV8bra8t4+L0tPPzeFkb0TWDKyL5MGdmXM9J1pCEt06mnTqq+3jH514sZlNKNF25TAUAJTFnVUd5ct4uF63eRv6MS52BgcjemjExjysi+nDVQE+GRRKeewtxn2yoo2XeUn0wZHuyhSCeSntSVW8/P4tbzsyg/eJz3Nu1m4YZdPPvpdv7wURGpCXFcnpPG1JF9mTQ4hdgY1Q0VBUWnNddXTKIKAMppSE2IY/bEgcyeOJADx2pYvHkPCzfs4tVVpfxl2RckxMcweVgql47ow0XD+6gcegQLKCjMbCrwCBAN/K9z7v5Gr8cBfwLGAxXAjc657d5r9wG3AnXAD51zC1tap/lPlv4HMMNb5vfOuUdPbzPDS9URfwHAWRNUAFDaRmJ8F6aN7c+0sf05VlPHx1v38s7GXSwuKOeva8swg3EDenLJiD5cMiJN8xoRptWgMLNo4DHgcqAEyDezBc65jQ263Qrsc84NNbNZwAPAjWaWA8wCRgL9gPfMbJi3THPr/A4wABjhnKs3sz5tsaHhZMGaUqpVAFDaSXyXaC7LSeOynDTq6x3rd1bx/uY9vL95Dw+9s4WH3tlCelI8F4/owyXD+3De0N4qWBjmAjmimAgUOue2AZjZHGAa0DAopgE/9x7PB37nHRlMA+Y4544DRWZW6K2PFtZ5B/At51w9gHNuz9ffvPA011dMTnoiZ/ZXcThpX1FRxuiMnozO6MmPLhvGngPH+KCgnEWbd/Oad4oqLiaKiVnJTM5O5YJhvXW/RhgKJCj6A8UNnpcAZzfXxzlXa2ZVQIrX/lmjZU8UJGpunUPwH41cD5TjP121tfGgzOx24HaAgQMHBrAZ4WHDzirWlx7g59fkBHsoEoH6JMafvOz2eG0dy4sqWby5nI+2lvOrNzfBm/65jwuyezM5O5XzhvYmNSEu2MOW0xSKk9lxwDHnXK6Z3QA8A1zQuJNz7ingKfBfHtuxQwyeeb4SfwHAcSoAKMEVFxPNBdmpXJCdCvgvvf1o614+2rqXxZv38PLKUgBy0hO5YJg/OHIzexEXo9NUnU0gQVGKf87ghAyvrak+JWYWAyThn9Ruadnm2kuAl73HrwB/DGCMEeFYTR2vrCrlipFp9OymK1AktKQndWVm7gBm5g6gvt6xYecBlmwtZ8mWcp75uIgnP9xGfJcocgclc86QFCYNTmF0RhJdonUJbqgLJCjygWwzy8L/YT4L+FajPguAm4GlwHTgfeecM7MFwF/M7Df4J7OzgeWAtbDOV4GLgSLgQmDL1966MPPuxt1UHa3hxgmaxJbQFhVljMpIYlRGEndePJTDx2tZVlTBki17+WxbBb9eWABAt9hoJmQmM2lwCucMSeHMfonEKDhCTqtB4c053AUsxH8p6zPOuQ1m9kvA55xbADwNPO9NVlfi/+DH65eHf5K6FrjTOVcH0NQ6vbe8H3jBzO4GDgG3td3mdm55vmL69+zKeUN6B3soIqeke1wMl4xI45IRaYD/G/yWFVXy2bYKln5ewQNvbwYgIS6GCVnJnOMFxxnpiapjFgJUwqOTKNl3hAseXMwPL8nm7suHtb6ASCdSfvC4PzS2VfDZ5xVs23sYgIT4GMYP6sWEzGRyB/VizICeuneoDamER5h5aYV/CmdGrgoASvhJTYjjmjH9uGZMPwB2VR3js20VLN9eSX5RJR8U+E9VdYk2RvVP8geHFx76wq72pyOKTuBEAcDMlO78+bbGVyaLhL99h6tZsWMf+Tsq8W3fx9qS/dTU+T+7hvbpwYTMXuQOSmZCZjIDkrvqPo4A6YgijCz1CgD+dOqIYA9FJCh6dY89ebc4+K8AXFtSRf72SnzbK3ljbRkvLvffmpXSPZaxA3oybmBPxg3sxeiMJBLiuwRz+J2egqITmJtfTFLXLlzh/ScRiXTxXaKZmJXMxKxkwH/UvWXPQfK372P1F/tZXbyPRZv9RR3MILtPDy88ejF2QE+GpSVokvwUKChCXNWRGt7esIvZKgAo0qyoKGNE30RG9E3k25MGAf7/O2tK9rPKC453Nu4mz1cCQPfYaEZlJJ0MjtEZSfRNjNcpq2YoKELca14BwBkqAChySpK6dWHysFQmD/PfOe6cY0fFEVYV+486VhXv5w9LtlFb75/r6N0jjlH9ExnVP4lRGT0Z1T+JtMQ4hQcKipCX5ytmZD8VABQ5XWZGZu/uZPbuzvXj/FcPHqupY8POA6wvrWJtSRXrS6v4cEs5XnbQu0ccozOSOLN/EqP7+28gTEuMD+JWBIeCIoSdKAD4i2tHBnsoImEpvks04wf1YvygXifbjlTXsqnsAOtKqlhb6g+PDwr2nAyP1IQ4Rvf3h0dOv0Ry0hPJ6BXeV1opKEJYXn4xsTFRTBvbL9hDEYkY3WJjGD8omfGDkk+2HamuZePOA6wrrWJdSRXrSqtY3CA8EuJjOKNvIjn9EjkjPYEz0hMZlpYQNvOKCooQdaymjldX72TKyL4qACgSZN1iY/w3+GV+NTwKdh1kU9lBNpZVsansIHm+Yo5U1wEQHWUM7t3dCw//n5z0xE5Zdl1BEaLeOVEAUJPYIiGpW2wM4wb2YtzAL09b1dc7vqg8wsayA2wqO8DGnQfIL6rktdU7T/bp3SOOM9ITGJ6WwLC+/r+z03rQLTZ0P45Dd2QRbp5XAPDcISnBHoqIBCgq6ssJ86tGpZ9s33e4mk27/MGxqewgm8oO8KeiHVTX1p/sMyC5qz880hIY3tf/9+DU7iHx/R0KihBUsu8IHxfu5R8vzSZKNwWJdHq9usdy7pDenNug8nNdvWNHxWG27D7Ilt2HKNh9kC27DvJBQfnJS3ajo4zMlG4ng+PEn8yUbh1ajl1BEYLmr/DfFDR9vAoAioSr6ChjcGoPBqf2YOqZX7ZX19ZTtPfwyeDYsvsgG3ce4K31uzhRmi82Ooqs3t0ZmtaDe6eOYEByt3Ydq4IixNTXO+b5Sjh/aG8yerXvzheR0BMbE8Xwvv7TT4z5sv1odR2Few55RyAHKdxziHUlVcTGtP+RhYIixHz6eQWl+49y75UqACgiX+rqlR0ZldHxN9/qOwdDzFyfvwDg5SoAKCIhQkERQvYfqWbhhl1cP65/2NyoIyKdn4IihLy2eqdXAFCT2CISOhQUISTPV8yZ/RMZ2U8FAEUkdCgoQsT60io27DzATN2JLSIhRkERIvJ8XgHAMf2DPRQRka9QUISAYzV1vLqqlKkj+5LUTd/tKyKhRUERAhZu2MWBY7XcOEGnnUQk9CgoQsA8XwkZvbpyzmAVABSR0KOgCLLiSn8BwBnjB6gAoIiEJAVFkM1fUYIZTNe9EyISohQUQVRX75i/wl8AsH/PrsEejohIkxQUQfTp53sp3X9Uk9giEtIUFEE0N7+Ynt1UAFBEQpuCIkj2H6nmnQ27uW5s/5D4qkMRkeYEFBRmNtXMCsys0MzubeL1ODOb672+zMwyG7x2n9deYGZTTmGdj5rZoa+5XSHv1VWlVNfVq2SHiIS8VoPCzKKBx4ArgRxgtpnlNOp2K7DPOTcUeBh4wFs2B5gFjASmAo+bWXRr6zSzXKDXaW5bSMvzlTCqfxI5/RKDPRQRkRYFckQxESh0zm1zzlUDc4BpjfpMA57zHs8HLjUz89rnOOeOO+eKgEJvfc2u0wuRXwM/Pb1NC13rS6vYWHaAmbokVkQ6gUCCoj9Q3OB5idfWZB/nXC1QBaS0sGxL67wLWOCcK2tpUGZ2u5n5zMxXXl4ewGaEjjxfMXExUVw7VgUARST0hdRktpn1A2YAv22tr3PuKedcrnMuNzU1tf0H10ZOFgA8sy9JXVUAUERCXyBBUQo0nHHN8Nqa7GNmMUASUNHCss21jwOGAoVmth3oZmaFAW5Lp3CyAKAmsUWkkwgkKPKBbDPLMrNY/JPTCxr1WQDc7D2eDrzvnHNe+yzvqqgsIBtY3tw6nXN/dc71dc5lOucygSPeBHnYyPMVMyC5K5NUAFBEOomY1jo452rN7C5gIRANPOOc22BmvwR8zrkFwNPA895v/5X4P/jx+uUBG4Fa4E7nXB1AU+ts+80LLcWVR/iksIJ7Lh+mAoAi0mm0GhQAzrk3gTcbtf1rg8fH8M8tNLXsr4BfBbLOJvr0CGR8ncU8rwDgN8fraicR6TxCajI7nNXVO+b7irkgO1UFAEWkU1FQdJBPCveys+qYJrFFpNNRUHSQub5ienXrwmU5fYI9FBGRU6Kg6AD7Dlfz7obdXDdOBQBFpPNRUHSAV1erAKCIdF4KinbmnGNufjGjM5I4I10FAEWk81FQtLP1pQfYvOsgM3Q0ISKdlIKinZ0sADimX7CHIiLytSgo2tGxmjpeXV3KlSoAKCKdmIKiHb29fhcHj9Uyc4JOO4lI56WgaEcnCwBmqQCgiHReCop28kXFET79vIKZ4weoAKCIdGoKinYyf0WxCgCKSFhQULSDunrHvBUlTM5OpZ8KAIpIJ6egaAcfF+6lrOoYN2oSW0TCgIKiHeTl+wsAXnqGCgCKSOenoGhjlYereWfjLq4fl6ECgCISFhQUbezVVaXU1DlmTtAktoiEBwVFG3LOkecrZkxGEiP6qgCgiIQHBUUbWldapQKAIhJ2FBRt6GQBwLEqACgi4UNB0UaO1dTx2uqdXDUqncR4FQAUkfChoGgjJwsA6rSTiIQZBUUbmZtfzMDkbpydlRzsoYiItCkFRRvYUXGYpdsqmJmboQKAIhJ2FBRtYP6KEqJUAFBEwpSC4jTV1Tvmryhh8rBU0pNUAFBEwo+C4jR9tLWcsqpjmsQWkbCloDhNeb5ikrvHctkZacEeiohIu1BQnIbKw9W8u3E314/rT2yM/ilFJDwF9OlmZlPNrMDMCs3s3iZejzOzud7ry8wss8Fr93ntBWY2pbV1mtkLXvt6M3vGzEL27rVXThQA1GknEQljrQaFmUUDjwFXAjnAbDPLadTtVmCfc24o8DDwgLdsDjALGAlMBR43s+hW1vkCMAIYBXQFbjutLWwnzjnm+YoZM6Anw/smBHs4IiLtJpAjiolAoXNum3OuGpgDTGvUZxrwnPd4PnCpmZnXPsc5d9w5VwQUeutrdp3OuTedB1gOhOQ1p2tL/AUAZ+aG5PBERNpMIEHRHyhu8LzEa2uyj3OuFqgCUlpYttV1eqecvg283dSgzOx2M/OZma+8vDyAzWhbeb5i4rtEcc0YFQAUkfAWyjOwjwNLnHMfNfWic+4p51yucy43NTW1Qwd2tLqOBat3ctWZKgAoIuEvJoA+pUDD2doMr62pPiVmFgMkARWtLNvsOs3s34BU4P8EML4O9/aGMg4er2XmBE1ii0j4C+SIIh/INrMsM4vFPzm9oFGfBcDN3uPpwPveHMMCYJZ3VVQWkI1/3qHZdZrZbcAUYLZzrv70Nq99zM0vZlCKCgCKSGRo9YjCOVdrZncBC4Fo4Bnn3AYz+yXgc84tAJ4GnjezQqAS/wc/Xr88YCNQC9zpnKsDaGqd3ls+AewAlvrnw3nZOffLNtvi07Sj4jCfbavkJ1OG441PRCSsBXLqCefcm8Cbjdr+tcHjY8CMZpb9FfCrQNbptQc0pmCZ5/MKAJ6lq51EJDKE8mR2yDlRAPDCYan0TYoP9nBERDqEguIULNlazq4DKgAoIpFFQXEK8vKLSekey6UqACgiEURBEaCKQ8d5b5MKAIpI5NEnXoBOFgDUvRMiEmEUFAFwzpHnK2bsgJ4MS1MBQBGJLAqKAKwpqWLL7kOaxBaRiKSgCMCXBQDTgz0UEZEOp6BoxdHqOl5fvZOrRqWToAKAIhKBFBSteGu9vwDgjTrtJCIRSkHRirn5xWSmdGOiCgCKSIRSULRg+97DLCuqZEbuABUAFJGIpaBowbwVxSoAKCIRT0HRjNq6euavKOGi4X1UAFBEIpqCohkfbd3L7gPHmZmrowkRiWwKimbM9QoAXjJCBQBFJLIpKJqgAoAiIl/Sp2ATXllVSm2940YVABQRUVA05pxjbn4x4wb2JFsFAEVEFBSNrS7ez9Y9KgAoInKCgqKRPF8JXbtEc/VoFQAUEQEFxVccqa7l9TUqACgi0pCCooG31u3i0PFaTWKLiDSgoGhgrq+YrN7dmZDZK9hDEREJGQoKT9HewywvqmRGboYKAIqINKCg8MzzqQCgiEhTFBT4CwC+tLKEi4f3IS1RBQBFRBpSUABLtpaz+8BxZujeCRGRv6GgwF8AsHePWC49o0+whyIiEnIiPij2HjrOok17uH5cf7pER/w/h4jI34j4T8ZXVqoAoIhISwIKCjObamYFZlZoZvc28Xqcmc31Xl9mZpkNXrvPay8wsymtrdPMsrx1FHrrjD3NbWyWc448XzFnDezJ0D4qACgi0pRWg8LMooHHgCuBHGC2meU06nYrsM85NxR4GHjAWzYHmAWMBKYCj5tZdCvrfAB42FvXPm/d7WKVCgCKiLQqkCOKiUChc26bc64amANMa9RnGvCc93g+cKn571qbBsxxzh13zhUBhd76mlynt8wl3jrw1nnd1966VszzFfsLAI7p115vISLS6QUSFP2B4gbPS7y2Jvs452qBKiClhWWba08B9nvraO69ADCz283MZ2a+8vLyADbjbw1M7s53zsukR1zM11peRCQSdNpPSOfcU8BTALm5ue7rrOOOi4a06ZhERMJRIEcUpUDDk/gZXluTfcwsBkgCKlpYtrn2CqCnt47m3ktERDpQIEGRD2R7VyPF4p+cXtCozwLgZu/xdOB955zz2md5V0VlAdnA8ubW6S2z2FsH3jpf+/qbJyIip6vVU0/OuVozuwtYCEQDzzjnNpjZLwGfc24B8DTwvJkVApX4P/jx+uUBG4Fa4E7nXB1AU+v03vKfgTlm9h/AKm/dIiISJOb/Jb5zy83NdT6fL9jDEBHpVMxshXMut7V+EX9ntoiItExBISIiLVJQiIhIixQUIiLSorCYzDazcmDH11y8N7C3DYfTGWibI4O2Ofyd7vYOcs6lttYpLILidJiZL5BZ/3CibY4M2ubw11Hbq1NPIiLSIgWFiIi0SEHhFRaMMNrmyKBtDn8dsr0RP0chIiIt0xGFiIi0SEEhIiItiuigMLOpZlZgZoVmdm+wx3MqzGyAmS02s41mtsHM/tFrTzazd81sq/d3L6/dzOxRb1vXmtlZDdZ1s9d/q5nd3KB9vJmt85Z51Puq2qDzvnd9lZm94T3PMrNl3jjneqXr8crbz/Xal5lZZoN13Oe1F5jZlAbtIfczYWY9zWy+mW02s01mdk6472czu9v7uV5vZi+aWXy47Wcze8bM9pjZ+gZt7b5fm3uPFjnnIvIP/vLmnwODgVhgDZAT7HGdwvjTgbO8xwnAFiAHeBC412u/F3jAe3wV8BZgwCRgmdeeDGzz/u7lPe7lvbbc62veslcGe7u9cd0D/AV4w3ueB8zyHj8B3OE9/gHwhPd4FjDXe5zj7e84IMv7OYgO1Z8J/N8df5v3OBboGc77Gf/XHxcBXRvs3++E234GJgNnAesbtLX7fm3uPVoca7D/EwTxh/EcYGGD5/cB9wV7XKexPa8BlwMFQLrXlg4UeI+fBGY36F/gvT4beLJB+5NeWzqwuUH7V/oFcTszgEXAJcAb3n+CvUBM4/2K//tOzvEex3j9rPG+PtEvFH8m8H9bZBHehSeN91847mf8QVHsffjFePt5SjjuZyCTrwZFu+/X5t6jpT+RfOrpxA/jCSVeW6fjHWqPA5YBac65Mu+lXUCa97i57W2pvaSJ9mD7H+CnQL33PAXY75yr9Z43HOfJbfNer/L6n+q/RTBlAeXAH73Tbf9rZt0J4/3snCsFHgK+AMrw77cVhPd+PqEj9mtz79GsSA6KsGBmPYCXgB855w40fM35f2UIm+ufzexqYI9zbkWwx9KBYvCfnvi9c24ccBj/6YKTwnA/9wKm4Q/JfkB3YGpQBxUEHbFfA32PSA6KUmBAg+cZXlunYWZd8IfEC865l73m3WaW7r2eDuzx2pvb3pbaM5poD6bzgGvNbDswB//pp0eAnmZ24mt9G47z5LZ5rycBFZz6v0UwlQAlzrll3vP5+IMjnPfzZUCRc67cOVcDvIx/34fzfj6hI/Zrc+/RrEgOinwg27uSIhb/JNiCII8pYN4VDE8Dm5xzv2nw0gLgxJUPN+OfuzjRfpN39cQkoMo7/FwIXGFmvbzf5K7Af/62DDhgZpO897qpwbqCwjl3n3MuwzmXiX9/ve+c+ztgMTDd69Z4m0/8W0z3+juvfZZ3tUwWkI1/4i/kfiacc7uAYjMb7jVdiv876MN2P+M/5TTJzLp5YzqxzWG7nxvoiP3a3Hs0L5iTVsH+g/9Kgi34r4D4l2CP5xTHfj7+Q8a1wGrvz1X4z80uArYC7wHJXn8DHvO2dR2Q22Bd3wUKvT+3NGjPBdZ7y/yORhOqQd7+i/jyqqfB+D8ACoF5QJzXHu89L/ReH9xg+X/xtquABlf5hOLPBDAW8Hn7+lX8V7eE9X4GfgFs9sb1PP4rl8JqPwMv4p+DqcF/5HhrR+zX5t6jpT8q4SEiIi2K5FNPIiISAAWFiIi0SEEhIiItUlCIiEiLFBQiItIiBYWIiLRIQSEiIi36/zob5nVzA95IAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "lrs=[]\n", - "for i in range(100000):\n", - " sc.step()\n", - " lrs.append(sc.get_lr())\n", - "xs = list(range(100000))\n", - "plt.plot(xs, lrs)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e613fe16", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f0fd9f40", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/audio_feature.ipynb b/.notebook/audio_feature.ipynb deleted file mode 100644 index 04b4a3924..000000000 --- a/.notebook/audio_feature.ipynb +++ /dev/null @@ -1,1207 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 94, - "id": "matched-camera", - "metadata": {}, - "outputs": [], - "source": [ - "from nnAudio import Spectrogram\n", - "from scipy.io import wavfile\n", - "import torch\n", - "import soundfile as sf\n", - "import numpy as np" - ] - }, - { - "cell_type": "code", - "execution_count": 95, - "id": "quarterly-solution", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[43 75 69 ... 7 6 3]\n", - "[43 75 69 ... 7 6 3]\n", - "[43 75 69 ... 7 6 3]\n" - ] - } - ], - "source": [ - "import scipy.io.wavfile as wav\n", - "\n", - "rate,sig = wav.read('./BAC009S0764W0124.wav')\n", - "sr, song = wavfile.read('./BAC009S0764W0124.wav') # Loading your audio\n", - "sample, sr = sf.read('./BAC009S0764W0124.wav', dtype='int16')\n", - "print(sig)\n", - "print(song)\n", - "print(sample)" - ] - }, - { - "cell_type": "code", - "execution_count": 96, - "id": "middle-salem", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "16000\n", - "[43 75 69 ... 7 6 3]\n", - "(83792,)\n", - "int16\n", - "sampling rate = 16000. Please make sure the sampling rate is correct in order toget a valid freq range\n", - "STFT kernels created, time used = 0.2733 seconds\n", - "tensor([[[[-4.0940e+03, 1.2600e+04],\n", - " [ 8.5108e+03, -5.4930e+03],\n", - " [-3.3631e+03, -1.7904e+03],\n", - " ...,\n", - " [ 8.2279e+03, -9.3340e+03],\n", - " [-3.1990e+03, 2.0969e+03],\n", - " [-1.2669e+03, 4.4488e+03]],\n", - "\n", - " [[ 3.4886e+03, -9.9620e+03],\n", - " [-4.5364e+03, 4.1907e+02],\n", - " [ 2.5074e+03, 7.1339e+03],\n", - " ...,\n", - " [-5.4819e+03, 3.9258e+01],\n", - " [ 4.7221e+03, 6.5887e+01],\n", - " [ 9.6492e+02, -3.4386e+03]],\n", - "\n", - " [[-3.4947e+03, 9.2981e+03],\n", - " [-7.5164e+03, 8.1856e+02],\n", - " [-5.3766e+03, -9.0889e+03],\n", - " ...,\n", - " [ 1.4317e+03, 5.7447e+03],\n", - " [-3.1178e+03, 3.0740e+03],\n", - " [-3.4351e+03, 5.6900e+02]],\n", - "\n", - " ...,\n", - "\n", - " [[ 6.7112e+01, -4.5737e+00],\n", - " [-9.6295e+00, 3.5554e+01],\n", - " [ 1.8527e+00, -1.0491e+01],\n", - " ...,\n", - " [-1.1157e+01, 3.4423e+00],\n", - " [ 3.1193e+00, -4.4388e+00],\n", - " [-8.8242e+00, 8.0324e+00]],\n", - "\n", - " [[-6.5080e+01, 2.9543e+00],\n", - " [ 3.9992e+01, -1.3836e+01],\n", - " [-9.2803e+00, 1.0318e+01],\n", - " ...,\n", - " [ 4.2928e+00, 9.2397e+00],\n", - " [ 3.6642e+00, 9.4680e+00],\n", - " [ 4.8932e+00, -2.5199e+01]],\n", - "\n", - " [[ 4.7264e+01, -1.0721e+00],\n", - " [-6.0516e+00, -1.4589e+01],\n", - " [ 1.3127e+01, 1.4995e+00],\n", - " ...,\n", - " [ 1.7333e+01, -1.4380e+01],\n", - " [-3.6046e+00, -6.1019e+00],\n", - " [ 1.3321e+01, 2.3184e+01]]]])\n" - ] - } - ], - "source": [ - "sr, song = wavfile.read('./BAC009S0764W0124.wav') # Loading your audio\n", - "print(sr)\n", - "print(song)\n", - "print(song.shape)\n", - "print(song.dtype)\n", - "x = song\n", - "x = torch.tensor(x).float() # casting the array into a PyTorch Tensor\n", - "\n", - "spec_layer = Spectrogram.STFT(n_fft=2048, freq_bins=None, hop_length=512,\n", - " window='hann', freq_scale='linear', center=True, pad_mode='reflect',\n", - " fmin=50,fmax=8000, sr=sr) # Initializing the model\n", - "\n", - "spec = spec_layer(x) # Feed-forward your waveform to get the spectrogram\n", - "print(spec)" - ] - }, - { - "cell_type": "code", - "execution_count": 97, - "id": "finished-sterling", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "16000\n", - "[43 75 69 ... 7 6 3]\n", - "(83792,)\n", - "int16\n", - "True\n", - "sampling rate = 16000. Please make sure the sampling rate is correct in order toget a valid freq range\n", - "STFT kernels created, time used = 0.2001 seconds\n", - "torch.Size([1, 1025, 164, 2])\n", - "tensor([[[[-4.0940e+03, 1.2600e+04],\n", - " [ 8.5108e+03, -5.4930e+03],\n", - " [-3.3631e+03, -1.7904e+03],\n", - " ...,\n", - " [ 8.2279e+03, -9.3340e+03],\n", - " [-3.1990e+03, 2.0969e+03],\n", - " [-1.2669e+03, 4.4488e+03]],\n", - "\n", - " [[ 3.4886e+03, -9.9620e+03],\n", - " [-4.5364e+03, 4.1907e+02],\n", - " [ 2.5074e+03, 7.1339e+03],\n", - " ...,\n", - " [-5.4819e+03, 3.9258e+01],\n", - " [ 4.7221e+03, 6.5887e+01],\n", - " [ 9.6492e+02, -3.4386e+03]],\n", - "\n", - " [[-3.4947e+03, 9.2981e+03],\n", - " [-7.5164e+03, 8.1856e+02],\n", - " [-5.3766e+03, -9.0889e+03],\n", - " ...,\n", - " [ 1.4317e+03, 5.7447e+03],\n", - " [-3.1178e+03, 3.0740e+03],\n", - " [-3.4351e+03, 5.6900e+02]],\n", - "\n", - " ...,\n", - "\n", - " [[ 6.7112e+01, -4.5737e+00],\n", - " [-9.6295e+00, 3.5554e+01],\n", - " [ 1.8527e+00, -1.0491e+01],\n", - " ...,\n", - " [-1.1157e+01, 3.4423e+00],\n", - " [ 3.1193e+00, -4.4388e+00],\n", - " [-8.8242e+00, 8.0324e+00]],\n", - "\n", - " [[-6.5080e+01, 2.9543e+00],\n", - " [ 3.9992e+01, -1.3836e+01],\n", - " [-9.2803e+00, 1.0318e+01],\n", - " ...,\n", - " [ 4.2928e+00, 9.2397e+00],\n", - " [ 3.6642e+00, 9.4680e+00],\n", - " [ 4.8932e+00, -2.5199e+01]],\n", - "\n", - " [[ 4.7264e+01, -1.0721e+00],\n", - " [-6.0516e+00, -1.4589e+01],\n", - " [ 1.3127e+01, 1.4995e+00],\n", - " ...,\n", - " [ 1.7333e+01, -1.4380e+01],\n", - " [-3.6046e+00, -6.1019e+00],\n", - " [ 1.3321e+01, 2.3184e+01]]]])\n", - "True\n" - ] - } - ], - "source": [ - "wav, sr = sf.read('./BAC009S0764W0124.wav', dtype='int16')\n", - "print(sr)\n", - "print(wav)\n", - "print(wav.shape)\n", - "print(wav.dtype)\n", - "print(np.allclose(wav, song))\n", - "\n", - "x = wav\n", - "x = torch.tensor(x).float() # casting the array into a PyTorch Tensor\n", - "\n", - "spec_layer = Spectrogram.STFT(n_fft=2048, freq_bins=None, hop_length=512,\n", - " window='hann', freq_scale='linear', center=True, pad_mode='reflect',\n", - " fmin=50,fmax=8000, sr=sr) # Initializing the model\n", - "\n", - "wav_spec = spec_layer(x) # Feed-forward your waveform to get the spectrogram\n", - "print(wav_spec.shape)\n", - "print(wav_spec)\n", - "print(np.allclose(wav_spec, spec))" - ] - }, - { - "cell_type": "code", - "execution_count": 98, - "id": "running-technology", - "metadata": {}, - "outputs": [], - "source": [ - "import decimal\n", - "\n", - "import numpy\n", - "import math\n", - "import logging\n", - "def round_half_up(number):\n", - " return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))\n", - "\n", - "\n", - "def rolling_window(a, window, step=1):\n", - " # http://ellisvalentiner.com/post/2017-03-21-np-strides-trick\n", - " shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n", - " strides = a.strides + (a.strides[-1],)\n", - " return numpy.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)[::step]\n", - "\n", - "\n", - "def framesig(sig, frame_len, frame_step, dither=1.0, preemph=0.97, remove_dc_offset=True, wintype='hamming', stride_trick=True):\n", - " \"\"\"Frame a signal into overlapping frames.\n", - "\n", - " :param sig: the audio signal to frame.\n", - " :param frame_len: length of each frame measured in samples.\n", - " :param frame_step: number of samples after the start of the previous frame that the next frame should begin.\n", - " :param winfunc: the analysis window to apply to each frame. By default no window is applied.\n", - " :param stride_trick: use stride trick to compute the rolling window and window multiplication faster\n", - " :returns: an array of frames. Size is NUMFRAMES by frame_len.\n", - " \"\"\"\n", - " slen = len(sig)\n", - " frame_len = int(round_half_up(frame_len))\n", - " frame_step = int(round_half_up(frame_step))\n", - " if slen <= frame_len:\n", - " numframes = 1\n", - " else:\n", - " numframes = 1 + (( slen - frame_len) // frame_step)\n", - "\n", - " # check kaldi/src/feat/feature-window.h\n", - " padsignal = sig[:(numframes-1)*frame_step+frame_len]\n", - " if wintype is 'povey':\n", - " win = numpy.empty(frame_len)\n", - " for i in range(frame_len):\n", - " win[i] = (0.5-0.5*numpy.cos(2*numpy.pi/(frame_len-1)*i))**0.85 \n", - " else: # the hamming window\n", - " win = numpy.hamming(frame_len)\n", - " \n", - " if stride_trick:\n", - " frames = rolling_window(padsignal, window=frame_len, step=frame_step)\n", - " else:\n", - " indices = numpy.tile(numpy.arange(0, frame_len), (numframes, 1)) + numpy.tile(\n", - " numpy.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T\n", - " indices = numpy.array(indices, dtype=numpy.int32)\n", - " frames = padsignal[indices]\n", - " win = numpy.tile(win, (numframes, 1))\n", - " \n", - " frames = frames.astype(numpy.float32)\n", - " raw_frames = numpy.zeros(frames.shape)\n", - " for frm in range(frames.shape[0]):\n", - " raw_frames[frm,:] = frames[frm,:]\n", - " frames[frm,:] = do_dither(frames[frm,:], dither) # dither\n", - " frames[frm,:] = do_remove_dc_offset(frames[frm,:]) # remove dc offset\n", - " # raw_frames[frm,:] = frames[frm,:]\n", - " frames[frm,:] = do_preemphasis(frames[frm,:], preemph) # preemphasize\n", - "\n", - " return frames * win, raw_frames\n", - "\n", - "\n", - "def magspec(frames, NFFT):\n", - " \"\"\"Compute the magnitude spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).\n", - "\n", - " :param frames: the array of frames. Each row is a frame.\n", - " :param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.\n", - " :returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the magnitude spectrum of the corresponding frame.\n", - " \"\"\"\n", - " if numpy.shape(frames)[1] > NFFT:\n", - " logging.warn(\n", - " 'frame length (%d) is greater than FFT size (%d), frame will be truncated. Increase NFFT to avoid.',\n", - " numpy.shape(frames)[1], NFFT)\n", - " complex_spec = numpy.fft.rfft(frames, NFFT)\n", - " return numpy.absolute(complex_spec)\n", - "\n", - "\n", - "def powspec(frames, NFFT):\n", - " \"\"\"Compute the power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).\n", - "\n", - " :param frames: the array of frames. Each row is a frame.\n", - " :param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.\n", - " :returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the power spectrum of the corresponding frame.\n", - " \"\"\"\n", - " return numpy.square(magspec(frames, NFFT))\n", - "\n", - "\n", - "def do_dither(signal, dither_value=1.0):\n", - " signal += numpy.random.normal(size=signal.shape) * dither_value\n", - " return signal\n", - " \n", - "def do_remove_dc_offset(signal):\n", - " signal -= numpy.mean(signal)\n", - " return signal\n", - "\n", - "def do_preemphasis(signal, coeff=0.97):\n", - " \"\"\"perform preemphasis on the input signal.\n", - "\n", - " :param signal: The signal to filter.\n", - " :param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.\n", - " :returns: the filtered signal.\n", - " \"\"\"\n", - " return numpy.append((1-coeff)*signal[0], signal[1:] - coeff * signal[:-1])" - ] - }, - { - "cell_type": "code", - "execution_count": 99, - "id": "ignored-retreat", - "metadata": {}, - "outputs": [], - "source": [ - "def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n", - " nfilt=40,nfft=512,lowfreq=0,highfreq=None,dither=1.0,remove_dc_offset=True, preemph=0.97, \n", - " wintype='hamming'):\n", - " highfreq= highfreq or samplerate/2\n", - " frames, raw_frames = framesig(signal, winlen*samplerate, winstep*samplerate, dither, preemph, remove_dc_offset, wintype)\n", - " spec = magspec(frames, nfft) # nearly the same until this part\n", - " rspec = magspec(raw_frames, nfft)\n", - " return spec, rspec\n", - "\n", - "\n", - "\n", - "def frames(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n", - " nfilt=40,nfft=512,lowfreq=0,highfreq=None,dither=1.0,remove_dc_offset=True, preemph=0.97, \n", - " wintype='hamming'):\n", - " highfreq= highfreq or samplerate/2\n", - " frames, raw_frames = framesig(signal, winlen*samplerate, winstep*samplerate, dither, preemph, remove_dc_offset, wintype)\n", - " return raw_frames" - ] - }, - { - "cell_type": "code", - "execution_count": 100, - "id": "federal-teacher", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import torch\n", - "import torch.nn as nn\n", - "from torch.nn.functional import conv1d, conv2d, fold\n", - "import scipy # used only in CFP\n", - "\n", - "import numpy as np\n", - "from time import time\n", - "\n", - "def pad_center(data, size, axis=-1, **kwargs):\n", - "\n", - " kwargs.setdefault('mode', 'constant')\n", - "\n", - " n = data.shape[axis]\n", - "\n", - " lpad = int((size - n) // 2)\n", - "\n", - " lengths = [(0, 0)] * data.ndim\n", - " lengths[axis] = (lpad, int(size - n - lpad))\n", - "\n", - " if lpad < 0:\n", - " raise ParameterError(('Target size ({:d}) must be '\n", - " 'at least input size ({:d})').format(size, n))\n", - "\n", - " return np.pad(data, lengths, **kwargs)\n", - "\n", - "\n", - "\n", - "sz_float = 4 # size of a float\n", - "epsilon = 10e-8 # fudge factor for normalization\n", - "\n", - "def create_fourier_kernels(n_fft, win_length=None, freq_bins=None, fmin=50,fmax=6000, sr=44100,\n", - " freq_scale='linear', window='hann', verbose=True):\n", - "\n", - " if freq_bins==None: freq_bins = n_fft//2+1\n", - " if win_length==None: win_length = n_fft\n", - "\n", - " s = np.arange(0, n_fft, 1.)\n", - " wsin = np.empty((freq_bins,1,n_fft))\n", - " wcos = np.empty((freq_bins,1,n_fft))\n", - " start_freq = fmin\n", - " end_freq = fmax\n", - " bins2freq = []\n", - " binslist = []\n", - "\n", - " # num_cycles = start_freq*d/44000.\n", - " # scaling_ind = np.log(end_freq/start_freq)/k\n", - "\n", - " # Choosing window shape\n", - "\n", - " #window_mask = get_window(window, int(win_length), fftbins=True)\n", - " window_mask = np.hamming(int(win_length))\n", - " window_mask = pad_center(window_mask, n_fft)\n", - "\n", - " if freq_scale == 'linear':\n", - " if verbose==True:\n", - " print(f\"sampling rate = {sr}. Please make sure the sampling rate is correct in order to\"\n", - " f\"get a valid freq range\")\n", - " \n", - " start_bin = start_freq*n_fft/sr\n", - " scaling_ind = (end_freq-start_freq)*(n_fft/sr)/freq_bins\n", - "\n", - " for k in range(freq_bins): # Only half of the bins contain useful info\n", - " # print(\"linear freq = {}\".format((k*scaling_ind+start_bin)*sr/n_fft))\n", - " bins2freq.append((k*scaling_ind+start_bin)*sr/n_fft)\n", - " binslist.append((k*scaling_ind+start_bin))\n", - " wsin[k,0,:] = np.sin(2*np.pi*(k*scaling_ind+start_bin)*s/n_fft)\n", - " wcos[k,0,:] = np.cos(2*np.pi*(k*scaling_ind+start_bin)*s/n_fft)\n", - "\n", - " elif freq_scale == 'log':\n", - " if verbose==True:\n", - " print(f\"sampling rate = {sr}. Please make sure the sampling rate is correct in order to\"\n", - " f\"get a valid freq range\")\n", - " start_bin = start_freq*n_fft/sr\n", - " scaling_ind = np.log(end_freq/start_freq)/freq_bins\n", - "\n", - " for k in range(freq_bins): # Only half of the bins contain useful info\n", - " # print(\"log freq = {}\".format(np.exp(k*scaling_ind)*start_bin*sr/n_fft))\n", - " bins2freq.append(np.exp(k*scaling_ind)*start_bin*sr/n_fft)\n", - " binslist.append((np.exp(k*scaling_ind)*start_bin))\n", - " wsin[k,0,:] = np.sin(2*np.pi*(np.exp(k*scaling_ind)*start_bin)*s/n_fft)\n", - " wcos[k,0,:] = np.cos(2*np.pi*(np.exp(k*scaling_ind)*start_bin)*s/n_fft)\n", - "\n", - " elif freq_scale == 'no':\n", - " for k in range(freq_bins): # Only half of the bins contain useful info\n", - " bins2freq.append(k*sr/n_fft)\n", - " binslist.append(k)\n", - " wsin[k,0,:] = np.sin(2*np.pi*k*s/n_fft)\n", - " wcos[k,0,:] = np.cos(2*np.pi*k*s/n_fft)\n", - " else:\n", - " print(\"Please select the correct frequency scale, 'linear' or 'log'\")\n", - " return wsin.astype(np.float32),wcos.astype(np.float32), bins2freq, binslist, window_mask.astype(np.float32)\n", - "\n", - "\n", - "\n", - "def broadcast_dim(x):\n", - " \"\"\"\n", - " Auto broadcast input so that it can fits into a Conv1d\n", - " \"\"\"\n", - "\n", - " if x.dim() == 2:\n", - " x = x[:, None, :]\n", - " elif x.dim() == 1:\n", - " # If nn.DataParallel is used, this broadcast doesn't work\n", - " x = x[None, None, :]\n", - " elif x.dim() == 3:\n", - " pass\n", - " else:\n", - " raise ValueError(\"Only support input with shape = (batch, len) or shape = (len)\")\n", - " return x\n", - "\n", - "\n", - "\n", - "### --------------------------- Spectrogram Classes ---------------------------###\n", - "class STFT(torch.nn.Module):\n", - "\n", - " def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',\n", - " freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,\n", - " fmin=50, fmax=6000, sr=22050, trainable=False,\n", - " output_format=\"Complex\", verbose=True):\n", - "\n", - " super().__init__()\n", - "\n", - " # Trying to make the default setting same as librosa\n", - " if win_length==None: win_length = n_fft\n", - " if hop_length==None: hop_length = int(win_length // 4)\n", - "\n", - " self.output_format = output_format\n", - " self.trainable = trainable\n", - " self.stride = hop_length\n", - " self.center = center\n", - " self.pad_mode = pad_mode\n", - " self.n_fft = n_fft\n", - " self.freq_bins = freq_bins\n", - " self.trainable = trainable\n", - " self.pad_amount = self.n_fft // 2\n", - " self.window = window\n", - " self.win_length = win_length\n", - " self.iSTFT = iSTFT\n", - " self.trainable = trainable\n", - " start = time()\n", - "\n", - "\n", - "\n", - " # Create filter windows for stft\n", - " kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,\n", - " win_length=win_length,\n", - " freq_bins=freq_bins,\n", - " window=window,\n", - " freq_scale=freq_scale,\n", - " fmin=fmin,\n", - " fmax=fmax,\n", - " sr=sr,\n", - " verbose=verbose)\n", - "\n", - "\n", - " kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)\n", - " kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)\n", - " \n", - " # In this way, the inverse kernel and the forward kernel do not share the same memory...\n", - " kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)\n", - " kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)\n", - " \n", - " if iSTFT:\n", - " self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))\n", - " self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))\n", - "\n", - " # Applying window functions to the Fourier kernels\n", - " if window:\n", - " window_mask = torch.tensor(window_mask)\n", - " wsin = kernel_sin * window_mask\n", - " wcos = kernel_cos * window_mask\n", - " else:\n", - " wsin = kernel_sin\n", - " wcos = kernel_cos\n", - " \n", - " if self.trainable==False:\n", - " self.register_buffer('wsin', wsin)\n", - " self.register_buffer('wcos', wcos) \n", - " \n", - " if self.trainable==True:\n", - " wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)\n", - " wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable) \n", - " self.register_parameter('wsin', wsin)\n", - " self.register_parameter('wcos', wcos) \n", - " \n", - " # Prepare the shape of window mask so that it can be used later in inverse\n", - " # self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))\n", - " \n", - " if verbose==True:\n", - " print(\"STFT kernels created, time used = {:.4f} seconds\".format(time()-start))\n", - " else:\n", - " pass\n", - "\n", - " def forward(self, x, output_format=None):\n", - " \"\"\"\n", - " Convert a batch of waveforms to spectrograms.\n", - " \n", - " Parameters\n", - " ----------\n", - " x : torch tensor\n", - " Input signal should be in either of the following shapes.\\n\n", - " 1. ``(len_audio)``\\n\n", - " 2. ``(num_audio, len_audio)``\\n\n", - " 3. ``(num_audio, 1, len_audio)``\n", - " It will be automatically broadcast to the right shape\n", - " \n", - " output_format : str\n", - " Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.\n", - " Default value is ``Complex``. \n", - " \n", - " \"\"\"\n", - " output_format = output_format or self.output_format\n", - " self.num_samples = x.shape[-1]\n", - " \n", - " x = broadcast_dim(x)\n", - " if self.center:\n", - " if self.pad_mode == 'constant':\n", - " padding = nn.ConstantPad1d(self.pad_amount, 0)\n", - "\n", - " elif self.pad_mode == 'reflect':\n", - " if self.num_samples < self.pad_amount:\n", - " raise AssertionError(\"Signal length shorter than reflect padding length (n_fft // 2).\")\n", - " padding = nn.ReflectionPad1d(self.pad_amount)\n", - "\n", - " x = padding(x)\n", - " spec_imag = conv1d(x, self.wsin, stride=self.stride)\n", - " spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d\n", - "\n", - " # remove redundant parts\n", - " spec_real = spec_real[:, :self.freq_bins, :]\n", - " spec_imag = spec_imag[:, :self.freq_bins, :]\n", - "\n", - " if output_format=='Magnitude':\n", - " spec = spec_real.pow(2) + spec_imag.pow(2)\n", - " if self.trainable==True:\n", - " return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0\n", - " else:\n", - " return torch.sqrt(spec)\n", - "\n", - " elif output_format=='Complex':\n", - " return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part\n", - "\n", - " elif output_format=='Phase':\n", - " return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase\n", - "\n", - " def inverse(self, X, onesided=True, length=None, refresh_win=True):\n", - " \"\"\"\n", - " This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class, \n", - " which is to convert spectrograms back to waveforms. \n", - " It only works for the complex value spectrograms. If you have the magnitude spectrograms,\n", - " please use :func:`~nnAudio.Spectrogram.Griffin_Lim`. \n", - " \n", - " Parameters\n", - " ----------\n", - " onesided : bool\n", - " If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,\n", - " else use ``onesided=False``\n", - "\n", - " length : int\n", - " To make sure the inverse STFT has the same output length of the original waveform, please\n", - " set `length` as your intended waveform length. By default, ``length=None``,\n", - " which will remove ``n_fft//2`` samples from the start and the end of the output.\n", - " \n", - " refresh_win : bool\n", - " Recalculating the window sum square. If you have an input with fixed number of timesteps,\n", - " you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``\n", - " \n", - " \n", - " \"\"\"\n", - " if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):\n", - " raise NameError(\"Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`\") \n", - " \n", - " assert X.dim()==4 , \"Inverse iSTFT only works for complex number,\" \\\n", - " \"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2).\"\\\n", - " \"\\nIf you have a magnitude spectrogram, please consider using Griffin-Lim.\"\n", - " if onesided:\n", - " X = extend_fbins(X) # extend freq\n", - "\n", - " \n", - " X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]\n", - "\n", - " # broadcast dimensions to support 2D convolution\n", - " X_real_bc = X_real.unsqueeze(1)\n", - " X_imag_bc = X_imag.unsqueeze(1)\n", - " a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))\n", - " b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))\n", - " \n", - " # compute real and imag part. signal lies in the real part\n", - " real = a1 - b2\n", - " real = real.squeeze(-2)*self.window_mask\n", - "\n", - " # Normalize the amplitude with n_fft\n", - " real /= (self.n_fft)\n", - "\n", - " # Overlap and Add algorithm to connect all the frames\n", - " real = overlap_add(real, self.stride)\n", - " \n", - " # Prepare the window sumsqure for division\n", - " # Only need to create this window once to save time\n", - " # Unless the input spectrograms have different time steps\n", - " if hasattr(self, 'w_sum')==False or refresh_win==True:\n", - " self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()\n", - " self.nonzero_indices = (self.w_sum>1e-10) \n", - " else:\n", - " pass\n", - " real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])\n", - " # Remove padding\n", - " if length is None: \n", - " if self.center:\n", - " real = real[:, self.pad_amount:-self.pad_amount]\n", - "\n", - " else:\n", - " if self.center:\n", - " real = real[:, self.pad_amount:self.pad_amount + length] \n", - " else:\n", - " real = real[:, :length] \n", - " \n", - " return real\n", - " \n", - " def extra_repr(self) -> str:\n", - " return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(\n", - " self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable\n", - " ) " - ] - }, - { - "cell_type": "code", - "execution_count": 128, - "id": "unusual-baker", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "16000\n", - "(83792,)\n", - "sampling rate = 16000. Please make sure the sampling rate is correct in order toget a valid freq range\n", - "STFT kernels created, time used = 0.0153 seconds\n", - "torch.Size([521, 257])\n", - "(522, 257)\n", - "[[5.84560000e+04 2.55260664e+04 9.83611035e+03 ... 7.80710554e+00\n", - " 2.32206573e+01 1.90274487e+01]\n", - " [1.35420000e+04 3.47535000e+04 1.51204707e+04 ... 1.69094101e+02\n", - " 1.80534729e+02 1.84179596e+02]\n", - " [3.47560000e+04 2.83094609e+04 8.20204883e+03 ... 1.02080307e+02\n", - " 1.21321175e+02 1.08345497e+02]\n", - " ...\n", - " [9.36700000e+03 2.86213008e+04 1.41182402e+04 ... 1.19344498e+02\n", - " 1.25670158e+02 1.20691467e+02]\n", - " [2.87510000e+04 2.04348242e+04 8.76390625e+03 ... 9.74485092e+01\n", - " 9.01831894e+01 9.84055099e+01]\n", - " [4.45240000e+04 8.93593262e+03 4.39246826e+03 ... 6.16300154e+00\n", - " 8.94473553e+00 9.61348629e+00]]\n", - "[[5.89760000e+04 2.51006729e+04 8.59603890e+03 ... 2.02818313e+01\n", - " 2.40645984e+01 2.20000000e+01]\n", - " [2.92660000e+04 2.72981079e+04 4.77242582e+03 ... 6.69265842e+01\n", - " 1.18775735e+02 1.62000000e+02]\n", - " [1.96300000e+04 2.81174834e+04 5.28803149e+03 ... 2.85011387e+01\n", - " 9.57810428e+01 1.42000000e+02]\n", - " ...\n", - " [1.67720000e+04 2.13227930e+04 4.06079895e+02 ... 2.60119790e+01\n", - " 7.84053656e+01 9.00000000e+01]\n", - " [3.86930000e+04 1.35982074e+04 6.77068420e+03 ... 6.10707909e+01\n", - " 5.13101944e+01 3.50000000e+01]\n", - " [3.10200000e+04 1.59203961e+04 4.30198496e+03 ... 5.36851600e+01\n", - " 6.36197377e+01 4.40000000e+01]]\n" - ] - } - ], - "source": [ - "wav, sr = sf.read('./BAC009S0764W0124.wav', dtype='int16')\n", - "print(sr)\n", - "print(wav.shape)\n", - "\n", - "x = wav\n", - "x = torch.tensor(x).float() # casting the array into a PyTorch Tensor\n", - "\n", - "spec_layer = STFT(n_fft=512, win_length=400, hop_length=160,\n", - " window='', freq_scale='linear', center=False, pad_mode='constant',\n", - " fmin=0, fmax=8000, sr=sr, output_format='Magnitude')\n", - "wav_spec = spec_layer(x) # Feed-forward your waveform to get the spectrogram\n", - "wav_spec = wav_spec[0].T\n", - "print(wav_spec.shape)\n", - "\n", - "\n", - "spec, rspec = fbank(wav, samplerate=16000,winlen=0.025,winstep=0.01,\n", - " nfilt=40, nfft=512,lowfreq=0,highfreq=None,\n", - " dither=0.0,remove_dc_offset=False, preemph=1.0, \n", - " wintype='hamming')\n", - "print(spec.shape)\n", - "\n", - "print(wav_spec.numpy())\n", - "print(rspec)\n", - "# print(spec)\n", - "\n", - "# spec, rspec = fbank(wav, samplerate=16000,winlen=0.032,winstep=0.01,\n", - "# nfilt=40, nfft=512,lowfreq=0,highfreq=None,\n", - "# dither=0.0,remove_dc_offset=False, preemph=1.0, \n", - "# wintype='hamming')\n", - "# print(rspec)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "white-istanbul", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 129, - "id": "modern-rescue", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[0. 0.11697778 0.41317591 0.75 0.96984631 0.96984631\n", - " 0.75 0.41317591 0.11697778 0. ]\n" - ] - }, - { - "data": { - "text/plain": [ - "array([0. , 0.0954915, 0.3454915, 0.6545085, 0.9045085, 1. ,\n", - " 0.9045085, 0.6545085, 0.3454915, 0.0954915])" - ] - }, - "execution_count": 129, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "print(np.hanning(10))\n", - "from scipy.signal import get_window\n", - "get_window('hann', 10, fftbins=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "professional-journalism", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 153, - "id": "involved-motion", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(522, 400)\n", - "[[ 43. 75. 69. ... 46. 46. 45.]\n", - " [ 210. 215. 216. ... -86. -89. -91.]\n", - " [ 128. 128. 128. ... -154. -151. -151.]\n", - " ...\n", - " [ -60. -61. -61. ... 112. 109. 110.]\n", - " [ 20. 22. 24. ... 91. 87. 87.]\n", - " [ 111. 107. 108. ... -6. -4. -8.]]\n", - "torch.Size([1, 1, 83792])\n", - "torch.Size([400, 1, 512])\n", - "torch.Size([1, 400, 521])\n", - "conv frame tensor([[ 43., 75., 69., ..., 46., 46., 45.],\n", - " [ 210., 215., 216., ..., -86., -89., -91.],\n", - " [ 128., 128., 128., ..., -154., -151., -151.],\n", - " ...,\n", - " [-143., -141., -142., ..., 96., 101., 101.],\n", - " [ -60., -61., -61., ..., 112., 109., 110.],\n", - " [ 20., 22., 24., ..., 91., 87., 87.]])\n", - "xx [[5.8976000e+04 2.5100676e+04 8.5960371e+03 ... 2.0281837e+01\n", - " 2.4064583e+01 2.2000000e+01]\n", - " [2.9266000e+04 2.7298107e+04 4.7724253e+03 ... 6.6926659e+01\n", - " 1.1877571e+02 1.6200000e+02]\n", - " [1.9630000e+04 2.8117480e+04 5.2880322e+03 ... 2.8501144e+01\n", - " 9.5781029e+01 1.4200000e+02]\n", - " ...\n", - " [2.1113000e+04 2.3099363e+04 7.1594033e+03 ... 3.1945959e+01\n", - " 9.1511757e+01 1.1500000e+02]\n", - " [1.6772000e+04 2.1322793e+04 4.0607855e+02 ... 2.6011946e+01\n", - " 7.8405365e+01 9.0000000e+01]\n", - " [3.8693000e+04 1.3598203e+04 6.7706826e+03 ... 6.1070789e+01\n", - " 5.1310158e+01 3.5000000e+01]]\n", - "torch.Size([521, 257])\n", - "yy [[5.89760000e+04 2.51006729e+04 8.59603890e+03 ... 2.02818313e+01\n", - " 2.40645984e+01 2.20000000e+01]\n", - " [2.92660000e+04 2.72981079e+04 4.77242582e+03 ... 6.69265842e+01\n", - " 1.18775735e+02 1.62000000e+02]\n", - " [1.96300000e+04 2.81174834e+04 5.28803149e+03 ... 2.85011387e+01\n", - " 9.57810428e+01 1.42000000e+02]\n", - " ...\n", - " [2.11130000e+04 2.30993602e+04 7.15940084e+03 ... 3.19459779e+01\n", - " 9.15117270e+01 1.15000000e+02]\n", - " [1.67720000e+04 2.13227930e+04 4.06079895e+02 ... 2.60119790e+01\n", - " 7.84053656e+01 9.00000000e+01]\n", - " [3.86930000e+04 1.35982074e+04 6.77068420e+03 ... 6.10707909e+01\n", - " 5.13101944e+01 3.50000000e+01]]\n", - "yy (522, 257)\n", - "[[5.8976000e+04 2.5100676e+04 8.5960371e+03 ... 2.0281837e+01\n", - " 2.4064583e+01 2.2000000e+01]\n", - " [2.9266000e+04 2.7298107e+04 4.7724253e+03 ... 6.6926659e+01\n", - " 1.1877571e+02 1.6200000e+02]\n", - " [1.9630000e+04 2.8117480e+04 5.2880322e+03 ... 2.8501144e+01\n", - " 9.5781029e+01 1.4200000e+02]\n", - " ...\n", - " [2.1113000e+04 2.3099363e+04 7.1594033e+03 ... 3.1945959e+01\n", - " 9.1511757e+01 1.1500000e+02]\n", - " [1.6772000e+04 2.1322793e+04 4.0607855e+02 ... 2.6011946e+01\n", - " 7.8405365e+01 9.0000000e+01]\n", - " [3.8693000e+04 1.3598203e+04 6.7706826e+03 ... 6.1070789e+01\n", - " 5.1310158e+01 3.5000000e+01]]\n", - "[[5.89760000e+04 2.51006729e+04 8.59603890e+03 ... 2.02818313e+01\n", - " 2.40645984e+01 2.20000000e+01]\n", - " [2.92660000e+04 2.72981079e+04 4.77242582e+03 ... 6.69265842e+01\n", - " 1.18775735e+02 1.62000000e+02]\n", - " [1.96300000e+04 2.81174834e+04 5.28803149e+03 ... 2.85011387e+01\n", - " 9.57810428e+01 1.42000000e+02]\n", - " ...\n", - " [2.11130000e+04 2.30993602e+04 7.15940084e+03 ... 3.19459779e+01\n", - " 9.15117270e+01 1.15000000e+02]\n", - " [1.67720000e+04 2.13227930e+04 4.06079895e+02 ... 2.60119790e+01\n", - " 7.84053656e+01 9.00000000e+01]\n", - " [3.86930000e+04 1.35982074e+04 6.77068420e+03 ... 6.10707909e+01\n", - " 5.13101944e+01 3.50000000e+01]]\n", - "False\n" - ] - } - ], - "source": [ - "f = frames(wav, samplerate=16000,winlen=0.025,winstep=0.01,\n", - " nfilt=40, nfft=512,lowfreq=0,highfreq=None,\n", - " dither=0.0,remove_dc_offset=False, preemph=1.0, \n", - " wintype='hamming')\n", - "print(f.shape)\n", - "print(f)\n", - "\n", - "n_fft=512\n", - "freq_bins = n_fft//2+1\n", - "s = np.arange(0, n_fft, 1.)\n", - "wsin = np.empty((freq_bins,1,n_fft))\n", - "wcos = np.empty((freq_bins,1,n_fft))\n", - "for k in range(freq_bins): # Only half of the bins contain useful info\n", - " wsin[k,0,:] = np.sin(2*np.pi*k*s/n_fft)\n", - " wcos[k,0,:] = np.cos(2*np.pi*k*s/n_fft)\n", - "\n", - "\n", - "wsin = np.empty((n_fft,1,n_fft))\n", - "wcos = np.empty((n_fft,1,n_fft))\n", - "for k in range(n_fft): # Only half of the bins contain useful info\n", - " wsin[k,0,:] = np.eye(n_fft, n_fft)[k]\n", - " wcos[k,0,:] = np.eye(n_fft, n_fft)[k]\n", - " \n", - " \n", - "wsin = np.empty((400,1,n_fft))\n", - "wcos = np.empty((400,1,n_fft))\n", - "for k in range(400): # Only half of the bins contain useful info\n", - " wsin[k,0,:] = np.eye(400, n_fft)[k]\n", - " wcos[k,0,:] = np.eye(400, n_fft)[k]\n", - " \n", - "\n", - " \n", - "x = torch.tensor(wav).float() # casting the array into a PyTorch Tensor\n", - "x = x[None, None, :]\n", - "print(x.size())\n", - "kernel_sin = torch.tensor(wsin, dtype=torch.float)\n", - "kernel_cos = torch.tensor(wcos, dtype=torch.float)\n", - "print(kernel_sin.size())\n", - "\n", - "from torch.nn.functional import conv1d, conv2d, fold\n", - "spec_imag = conv1d(x, kernel_sin, stride=160)\n", - "spec_real = conv1d(x, kernel_cos, stride=160) # Doing STFT by using conv1d\n", - "\n", - "print(spec_imag.size())\n", - "print(\"conv frame\", spec_imag[0].T)\n", - "# print(spec_imag[0].T[:, :400])\n", - "\n", - "# remove redundant parts\n", - "# spec_real = spec_real[:, :freq_bins, :]\n", - "# spec_imag = spec_imag[:, :freq_bins, :]\n", - "# spec = spec_real.pow(2) + spec_imag.pow(2)\n", - "# spec = torch.sqrt(spec)\n", - "# print(spec)\n", - "\n", - "\n", - "\n", - "s = np.arange(0, 512, 1.)\n", - "# s = s[::-1]\n", - "wsin = np.empty((freq_bins, 400))\n", - "wcos = np.empty((freq_bins, 400))\n", - "for k in range(freq_bins): # Only half of the bins contain useful info\n", - " wsin[k,:] = np.sin(2*np.pi*k*s/n_fft)[:400]\n", - " wcos[k,:] = np.cos(2*np.pi*k*s/n_fft)[:400]\n", - "\n", - "spec_real = torch.mm(spec_imag[0].T, torch.tensor(wcos, dtype=torch.float).T)\n", - "spec_imag = torch.mm(spec_imag[0].T, torch.tensor(wsin, dtype=torch.float).T)\n", - "\n", - "\n", - "# remove redundant parts\n", - "spec = spec_real.pow(2) + spec_imag.pow(2)\n", - "spec = torch.sqrt(spec)\n", - "\n", - "print('xx', spec.numpy())\n", - "print(spec.size())\n", - "print('yy', rspec[:521, :])\n", - "print('yy', rspec.shape)\n", - "\n", - "\n", - "x = spec.numpy()\n", - "y = rspec[:-1, :]\n", - "print(x)\n", - "print(y)\n", - "print(np.allclose(x, y))" - ] - }, - { - "cell_type": "code", - "execution_count": 160, - "id": "mathematical-traffic", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.Size([257, 1, 400])\n", - "tensor([[[5.8976e+04, 2.9266e+04, 1.9630e+04, ..., 1.6772e+04,\n", - " 3.8693e+04, 3.1020e+04],\n", - " [2.5101e+04, 2.7298e+04, 2.8117e+04, ..., 2.1323e+04,\n", - " 1.3598e+04, 1.5920e+04],\n", - " [8.5960e+03, 4.7724e+03, 5.2880e+03, ..., 4.0608e+02,\n", - " 6.7707e+03, 4.3020e+03],\n", - " ...,\n", - " [2.0282e+01, 6.6927e+01, 2.8501e+01, ..., 2.6012e+01,\n", - " 6.1071e+01, 5.3685e+01],\n", - " [2.4065e+01, 1.1878e+02, 9.5781e+01, ..., 7.8405e+01,\n", - " 5.1310e+01, 6.3620e+01],\n", - " [2.2000e+01, 1.6200e+02, 1.4200e+02, ..., 9.0000e+01,\n", - " 3.5000e+01, 4.4000e+01]]])\n", - "[[5.8976000e+04 2.5100672e+04 8.5960391e+03 ... 2.0281828e+01\n", - " 2.4064537e+01 2.2000000e+01]\n", - " [2.9266000e+04 2.7298107e+04 4.7724243e+03 ... 6.6926659e+01\n", - " 1.1877571e+02 1.6200000e+02]\n", - " [1.9630000e+04 2.8117475e+04 5.2880312e+03 ... 2.8501148e+01\n", - " 9.5781006e+01 1.4200000e+02]\n", - " ...\n", - " [1.6772000e+04 2.1322793e+04 4.0607657e+02 ... 2.6011934e+01\n", - " 7.8405350e+01 9.0000000e+01]\n", - " [3.8693000e+04 1.3598203e+04 6.7706841e+03 ... 6.1070808e+01\n", - " 5.1310150e+01 3.5000000e+01]\n", - " [3.1020000e+04 1.5920403e+04 4.3019902e+03 ... 5.3685162e+01\n", - " 6.3619797e+01 4.4000000e+01]]\n", - "[[5.89760000e+04 2.51006729e+04 8.59603890e+03 ... 2.02818313e+01\n", - " 2.40645984e+01 2.20000000e+01]\n", - " [2.92660000e+04 2.72981079e+04 4.77242582e+03 ... 6.69265842e+01\n", - " 1.18775735e+02 1.62000000e+02]\n", - " [1.96300000e+04 2.81174834e+04 5.28803149e+03 ... 2.85011387e+01\n", - " 9.57810428e+01 1.42000000e+02]\n", - " ...\n", - " [1.67720000e+04 2.13227930e+04 4.06079895e+02 ... 2.60119790e+01\n", - " 7.84053656e+01 9.00000000e+01]\n", - " [3.86930000e+04 1.35982074e+04 6.77068420e+03 ... 6.10707909e+01\n", - " 5.13101944e+01 3.50000000e+01]\n", - " [3.10200000e+04 1.59203961e+04 4.30198496e+03 ... 5.36851600e+01\n", - " 6.36197377e+01 4.40000000e+01]]\n", - "False\n" - ] - } - ], - "source": [ - "f = frames(wav, samplerate=16000,winlen=0.025,winstep=0.01,\n", - " nfilt=40, nfft=512,lowfreq=0,highfreq=None,\n", - " dither=0.0,remove_dc_offset=False, preemph=1.0, \n", - " wintype='hamming')\n", - "\n", - "n_fft=512\n", - "freq_bins = n_fft//2+1\n", - "s = np.arange(0, n_fft, 1.)\n", - "wsin = np.empty((freq_bins,1,400))\n", - "wcos = np.empty((freq_bins,1,400)) #[Cout, Cin, kernel_size]\n", - "for k in range(freq_bins): # Only half of the bins contain useful info\n", - " wsin[k,0,:] = np.sin(2*np.pi*k*s/n_fft)[:400]\n", - " wcos[k,0,:] = np.cos(2*np.pi*k*s/n_fft)[:400]\n", - "\n", - " \n", - "x = torch.tensor(wav).float() # casting the array into a PyTorch Tensor\n", - "x = x[None, None, :] #[B, C, T]\n", - "\n", - "kernel_sin = torch.tensor(wsin, dtype=torch.float)\n", - "kernel_cos = torch.tensor(wcos, dtype=torch.float)\n", - "print(kernel_sin.size())\n", - "\n", - "from torch.nn.functional import conv1d, conv2d, fold\n", - "spec_imag = conv1d(x, kernel_sin, stride=160) #[1, Cout, T]\n", - "spec_real = conv1d(x, kernel_cos, stride=160) # Doing STFT by using conv1d\n", - "\n", - "# remove redundant parts\n", - "spec = spec_real.pow(2) + spec_imag.pow(2)\n", - "spec = torch.sqrt(spec)\n", - "print(spec)\n", - "\n", - "x = spec[0].T.numpy()\n", - "y = rspec[:, :]\n", - "print(x)\n", - "print(y)\n", - "print(np.allclose(x, y))" - ] - }, - { - "cell_type": "code", - "execution_count": 162, - "id": "olive-nicaragua", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in true_divide\n", - " \"\"\"Entry point for launching an IPython kernel.\n" - ] - }, - { - "data": { - "text/plain": [ - "27241" - ] - }, - "execution_count": 162, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.argmax(np.abs(x -y) / np.abs(y))" - ] - }, - { - "cell_type": "code", - "execution_count": 165, - "id": "ultimate-assault", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "0.0" - ] - }, - "execution_count": 165, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "y[np.unravel_index(27241, y.shape)]" - ] - }, - { - "cell_type": "code", - "execution_count": 166, - "id": "institutional-stock", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "4.2412265e-10" - ] - }, - "execution_count": 166, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "x[np.unravel_index(27241, y.shape)]" - ] - }, - { - "cell_type": "code", - "execution_count": 167, - "id": "integrated-courage", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "False" - ] - }, - "execution_count": 167, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.allclose(y, x)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "different-operation", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/compute_cmvn_loader_test.ipynb b/.notebook/compute_cmvn_loader_test.ipynb deleted file mode 100644 index 2b0a8b75f..000000000 --- a/.notebook/compute_cmvn_loader_test.ipynb +++ /dev/null @@ -1,793 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "purple-consequence", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/home/ssd5/zhanghui/DeepSpeech2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "defensive-mason", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "patient-convention", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Namespace(delta_delta=False, feat_dim=80, manifest_path='examples/aishell/s1/data/manifest.train.raw', num_samples=-1, num_workers=16, output_path='data/librispeech/mean_std.npz', sample_rate=16000, specgram_type='fbank', stride_ms=10.0, window_ms=25.0)\n" - ] - } - ], - "source": [ - "import argparse\n", - "import functools\n", - "\n", - "from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline\n", - "from deepspeech.frontend.featurizer.audio_featurizer import AudioFeaturizer\n", - "from deepspeech.frontend.normalizer import FeatureNormalizer\n", - "from deepspeech.utils.utility import add_arguments\n", - "from deepspeech.utils.utility import print_arguments\n", - "\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "# yapf: disable\n", - "add_arg('num_samples', int, -1, \"# of samples to for statistics.\")\n", - "add_arg('specgram_type', str,\n", - " 'fbank',\n", - " \"Audio feature type. Options: linear, mfcc, fbank.\",\n", - " choices=['linear', 'mfcc', 'fbank'])\n", - "add_arg('feat_dim', int, 80, \"Audio feature dim.\")\n", - "add_arg('delta_delta', bool,\n", - " False,\n", - " \"Audio feature with delta delta.\")\n", - "add_arg('stride_ms', float, 10.0, \"stride length in ms.\")\n", - "add_arg('window_ms', float, 25.0, \"stride length in ms.\")\n", - "add_arg('sample_rate', int, 16000, \"target sample rate.\")\n", - "add_arg('manifest_path', str,\n", - " 'examples/aishell/s1/data/manifest.train.raw',\n", - " \"Filepath of manifest to compute normalizer's mean and stddev.\")\n", - "add_arg('num_workers',\n", - " default=16,\n", - " type=int,\n", - " help='num of subprocess workers for processing')\n", - "add_arg('output_path', str,\n", - " 'data/librispeech/mean_std.npz',\n", - " \"Filepath of write mean and stddev to (.npz).\")\n", - "# yapf: disable\n", - "args = parser.parse_args([])\n", - "print(args)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "enormous-currency", - "metadata": {}, - "outputs": [], - "source": [ - "import random\n", - "\n", - "import numpy as np\n", - "import paddle\n", - "from paddle.io import DataLoader\n", - "from paddle.io import Dataset\n", - "\n", - "from deepspeech.frontend.audio import AudioSegment\n", - "from deepspeech.frontend.utility import load_cmvn\n", - "from deepspeech.frontend.utility import read_manifest\n", - "\n", - "class CollateFunc(object):\n", - " ''' Collate function for AudioDataset\n", - " '''\n", - " def __init__(self):\n", - " pass\n", - " \n", - " def __call__(self, batch):\n", - " mean_stat = None\n", - " var_stat = None\n", - " number = 0\n", - " for feat in batch:\n", - " sums = np.sum(feat, axis=1)\n", - " if mean_stat is None:\n", - " mean_stat = sums\n", - " else:\n", - " mean_stat += sums\n", - "\n", - " square_sums = np.sum(np.square(feat), axis=1)\n", - " if var_stat is None:\n", - " var_stat = square_sums\n", - " else:\n", - " var_stat += square_sums\n", - "\n", - " number += feat.shape[1]\n", - " #return paddle.to_tensor(number), paddle.to_tensor(mean_stat), paddle.to_tensor(var_stat)\n", - " return number, mean_stat, var_stat\n", - "\n", - "\n", - "class AudioDataset(Dataset):\n", - " def __init__(self, manifest_path, feature_func, num_samples=-1, rng=None):\n", - " self.feature_func = feature_func\n", - " self._rng = rng\n", - " manifest = read_manifest(manifest_path)\n", - " if num_samples == -1:\n", - " sampled_manifest = manifest\n", - " else:\n", - " sampled_manifest = self._rng.sample(manifest, num_samples)\n", - " self.items = sampled_manifest\n", - "\n", - " def __len__(self):\n", - " return len(self.items)\n", - "\n", - " def __getitem__(self, idx):\n", - " key = self.items[idx]['feat']\n", - " audioseg = AudioSegment.from_file(key)\n", - " feat = self.feature_func(audioseg) #(D, T)\n", - " return feat" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "armed-semester", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "process 1000 wavs,450739 frames\n", - "process 2000 wavs,887447 frames\n", - "process 3000 wavs,1354148 frames\n", - "process 4000 wavs,1816494 frames\n", - "process 5000 wavs,2359211 frames\n", - "process 6000 wavs,2828455 frames\n", - "process 7000 wavs,3276186 frames\n", - "process 8000 wavs,3692234 frames\n", - "process 9000 wavs,4139360 frames\n", - "process 10000 wavs,4591528 frames\n", - "process 11000 wavs,5020114 frames\n", - "process 12000 wavs,5459523 frames\n", - "process 13000 wavs,5899534 frames\n", - "process 14000 wavs,6323242 frames\n", - "process 15000 wavs,6736597 frames\n", - "process 16000 wavs,7207686 frames\n", - "process 17000 wavs,7637800 frames\n", - "process 18000 wavs,8093004 frames\n", - "process 19000 wavs,8529518 frames\n", - "process 20000 wavs,8906022 frames\n", - "process 21000 wavs,9352652 frames\n", - "process 22000 wavs,9807495 frames\n", - "process 23000 wavs,10247938 frames\n", - "process 24000 wavs,10700011 frames\n", - "process 25000 wavs,11126134 frames\n", - "process 26000 wavs,11558061 frames\n", - "process 27000 wavs,12010359 frames\n", - "process 28000 wavs,12470938 frames\n", - "process 29000 wavs,12916013 frames\n", - "process 30000 wavs,13345816 frames\n", - "process 31000 wavs,13752365 frames\n", - "process 32000 wavs,14174801 frames\n", - "process 33000 wavs,14642170 frames\n", - "process 34000 wavs,15053557 frames\n", - "process 35000 wavs,15531890 frames\n", - "process 36000 wavs,16022711 frames\n", - "process 37000 wavs,16437688 frames\n", - "process 38000 wavs,16859517 frames\n", - "process 39000 wavs,17307676 frames\n", - "process 40000 wavs,17796629 frames\n", - "process 41000 wavs,18264151 frames\n", - "process 42000 wavs,18711898 frames\n", - "process 43000 wavs,19159890 frames\n", - "process 44000 wavs,19576435 frames\n", - "process 45000 wavs,19992793 frames\n", - "process 46000 wavs,20464449 frames\n", - "process 47000 wavs,20886021 frames\n", - "process 48000 wavs,21317318 frames\n", - "process 49000 wavs,21738034 frames\n", - "process 50000 wavs,22171890 frames\n", - "process 51000 wavs,22622238 frames\n", - "process 52000 wavs,23100734 frames\n", - "process 53000 wavs,23526901 frames\n", - "process 54000 wavs,23969746 frames\n", - "process 55000 wavs,24418691 frames\n", - "process 56000 wavs,24862546 frames\n", - "process 57000 wavs,25336448 frames\n", - "process 58000 wavs,25778435 frames\n", - "process 59000 wavs,26216199 frames\n", - "process 60000 wavs,26694692 frames\n", - "process 61000 wavs,27148978 frames\n", - "process 62000 wavs,27617088 frames\n", - "process 63000 wavs,28064946 frames\n", - "process 64000 wavs,28519843 frames\n", - "process 65000 wavs,28989722 frames\n", - "process 66000 wavs,29470156 frames\n", - "process 67000 wavs,29952931 frames\n", - "process 68000 wavs,30360555 frames\n", - "process 69000 wavs,30797929 frames\n", - "process 70000 wavs,31218227 frames\n", - "process 71000 wavs,31663934 frames\n", - "process 72000 wavs,32107468 frames\n", - "process 73000 wavs,32541943 frames\n", - "process 74000 wavs,33010702 frames\n", - "process 75000 wavs,33448082 frames\n", - "process 76000 wavs,33886812 frames\n", - "process 77000 wavs,34338108 frames\n", - "process 78000 wavs,34761495 frames\n", - "process 79000 wavs,35199730 frames\n", - "process 80000 wavs,35669630 frames\n", - "process 81000 wavs,36122402 frames\n", - "process 82000 wavs,36604561 frames\n", - "process 83000 wavs,37085552 frames\n", - "process 84000 wavs,37517500 frames\n", - "process 85000 wavs,37987196 frames\n", - "process 86000 wavs,38415721 frames\n", - "process 87000 wavs,38889467 frames\n", - "process 88000 wavs,39337809 frames\n", - "process 89000 wavs,39792342 frames\n", - "process 90000 wavs,40287946 frames\n", - "process 91000 wavs,40719461 frames\n", - "process 92000 wavs,41178919 frames\n", - "process 93000 wavs,41659635 frames\n", - "process 94000 wavs,42132985 frames\n", - "process 95000 wavs,42584564 frames\n", - "process 96000 wavs,43018598 frames\n", - "process 97000 wavs,43480662 frames\n", - "process 98000 wavs,43973670 frames\n", - "process 99000 wavs,44448190 frames\n", - "process 100000 wavs,44935034 frames\n", - "process 101000 wavs,45379812 frames\n", - "process 102000 wavs,45821207 frames\n", - "process 103000 wavs,46258420 frames\n", - "process 104000 wavs,46743733 frames\n", - "process 105000 wavs,47206922 frames\n", - "process 106000 wavs,47683041 frames\n", - "process 107000 wavs,48122809 frames\n", - "process 108000 wavs,48594623 frames\n", - "process 109000 wavs,49086358 frames\n", - "process 110000 wavs,49525568 frames\n", - "process 111000 wavs,49985820 frames\n", - "process 112000 wavs,50428262 frames\n", - "process 113000 wavs,50897957 frames\n", - "process 114000 wavs,51344589 frames\n", - "process 115000 wavs,51774621 frames\n", - "process 116000 wavs,52243372 frames\n", - "process 117000 wavs,52726025 frames\n", - "process 118000 wavs,53170026 frames\n", - "process 119000 wavs,53614141 frames\n", - "process 120000 wavs,54071271 frames\n" - ] - } - ], - "source": [ - "\n", - "augmentation_pipeline = AugmentationPipeline('{}')\n", - "audio_featurizer = AudioFeaturizer(\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " stride_ms=args.stride_ms,\n", - " window_ms=args.window_ms,\n", - " n_fft=None,\n", - " max_freq=None,\n", - " target_sample_rate=args.sample_rate,\n", - " use_dB_normalization=True,\n", - " target_dB=-20)\n", - "\n", - "def augment_and_featurize(audio_segment):\n", - " augmentation_pipeline.transform_audio(audio_segment)\n", - " return audio_featurizer.featurize(audio_segment)\n", - "\n", - "\n", - "collate_func = CollateFunc()\n", - "\n", - "dataset = AudioDataset(\n", - " args.manifest_path,\n", - " augment_and_featurize, \n", - " args.num_samples)\n", - "\n", - "batch_size = 20\n", - "data_loader = DataLoader(\n", - " dataset,\n", - " batch_size=batch_size,\n", - " shuffle=False,\n", - " num_workers=args.num_workers,\n", - " collate_fn=collate_func)\n", - "\n", - "with paddle.no_grad():\n", - " all_mean_stat = None\n", - " all_var_stat = None\n", - " all_number = 0\n", - " wav_number = 0\n", - " for i, batch in enumerate(data_loader()):\n", - " #for batch in data_loader():\n", - " number, mean_stat, var_stat = batch\n", - " if i == 0:\n", - " all_mean_stat = mean_stat\n", - " all_var_stat = var_stat\n", - " else:\n", - " all_mean_stat += mean_stat\n", - " all_var_stat += var_stat\n", - " all_number += number\n", - " wav_number += batch_size\n", - "\n", - " if wav_number % 1000 == 0:\n", - " print('process {} wavs,{} frames'.format(wav_number,\n", - " all_number))\n", - "\n", - "cmvn_info = {\n", - " 'mean_stat': list(all_mean_stat.tolist()),\n", - " 'var_stat': list(all_var_stat.tolist()),\n", - " 'frame_num': all_number\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "danish-executive", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'mean_stat': [-813852467.7953382, -769025957.9140725, -809499593.411409, -774700574.014532, -750961217.5896736, -760564397.2864963, -805662399.3771614, -843490965.4231446, -850242081.9416809, -857678651.504435, -879067453.9826999, -908602072.3856701, -936850957.7187386, -957242686.489041, -968425442.0916103, -972687545.5953809, -980383731.7683417, -991533337.6343704, -1001966818.1164789, -1010334169.7486078, -1016855066.9099333, -1022176245.7021623, -1025700476.4788507, -1030678878.3195274, -1037075963.124199, -1042705719.0195516, -1047422212.6492896, -1049003537.271861, -1050314833.7453628, -1050772191.0204058, -1050010034.9948177, -1050436065.1336465, -1053327181.7978873, -1058710548.2036785, -1065950852.4966162, -1071709705.0060445, -1077682778.259181, -1083371045.272074, -1089708906.2657735, -1096312217.7865202, -1101089858.8364556, -1104965332.4332569, -1107791702.5223634, -1109431075.2374773, -1110066333.0280604, -1110382732.0722318, -1110480306.3793216, -1110203297.7110727, -1109972534.3583376, -1109378081.8792782, -1108212059.413654, -1107235713.2041805, -1106973581.9280007, -1107352339.7860134, -1108730029.862537, -1110425202.83704, -1113220669.4552443, -1115887535.4870913, -1118105356.3628063, -1120001376.8503075, -1121135822.320366, -1122265971.8751016, -1123990217.401155, -1125786729.6230593, -1127784957.2745507, -1129180108.9033566, -1132000461.6688302, -1134675829.8190608, -1137652487.5164194, -1141755948.0463965, -1145340901.5468378, -1148637682.593287, -1151755522.470022, -1154981643.2268832, -1157417488.840151, -1161240429.0989249, -1165411128.671642, -1170521097.1034513, -1176307165.5109766, -1183456865.0039694, -1190535938.6591117, -1197946309.0472982, -1203596565.037139, -1207563038.1241052, -1209707561.5829782, -1211407066.2452552, -1211884576.9201162, -1212778872.005509, -1214041413.8080075, -1215367953.1745043, -1216850831.482193, -1217678325.5351057, -1218854289.54188, -1219325064.8610544, -1219080344.7580786, -1218541313.657531, -1217889833.2067819, -1216552930.1654336, -1216423777.4113154, -1216575252.225508, -1217075384.9826024, -1217391577.901724, -1217838974.57273, -1218131805.6054134, -1218294889.7465532, -1218566666.1755593, -1218790537.5519717, -1218748668.9956846, -1218603191.4941735, -1218004566.4348054, -1217312410.127734, -1217207493.9522285, -1217284002.3834674, -1217644312.51745, -1218039821.6444128, -1218721811.6269798, -1219121088.9265897, -1219014460.8090584, -1218530127.6776083, -1217952335.451711, -1217316073.8666434, -1217035380.1151958, -1216636431.2964456, -1216257015.2945514, -1215658496.1208403, -1215097272.0976632, -1214669859.2064147, -1214593853.4809475, -1214599475.7838447, -1214575440.823035, -1214158828.8008435, -1213482920.2673717, -1212476577.5897374, -1211251374.2198513, -1210284855.590475, -1209302456.065669, -1209106252.6625297, -1209373211.5146718, -1209689421.7984035, -1210021342.495856, -1210650609.3592312, -1211428521.3900626, -1212616111.4257205, -1213820075.2948189, -1215320588.7144456, -1217175082.2739282, -1219703351.4585004, -1222007827.120464, -1224637375.5900724, -1228367798.912171, -1234853879.862459, -1247222219.867692, -1268562808.1616178, -1302034822.9569275, -1347823631.0776038, -1402753916.9445229, -1458826717.3262982, -1505843092.0970414, -1534278782.249077, -1543955545.8994718, -1600409154.893352], 'var_stat': [12665413908.91729, 11145088801.244318, 12567119446.035736, 11758392758.06822, 11200687982.736668, 11551903443.711124, 12880777868.435602, 14084854368.236998, 14394011058.866192, 14678818621.277662, 15346278722.626339, 16268053979.757076, 17191705347.854794, 17877540386.548733, 18251857849.077663, 18392628178.710472, 18645534548.4045, 19018598212.22902, 19366711357.782673, 19655730286.72857, 19890681996.786858, 20094163350.461906, 20227774955.225887, 20423525628.66887, 20669928826.76939, 20882313568.247944, 21062392676.270527, 21126648821.879055, 21185210734.751118, 21209014745.520447, 21182293842.91236, 21197433134.875977, 21302147790.662144, 21504666657.651955, 21781818550.89697, 21996170165.145462, 22217169779.096275, 22431161762.176693, 22672708668.38104, 22922683961.072956, 23101137011.201683, 23249680793.556847, 23358894817.24979, 23422895267.919228, 23449479198.303394, 23464433357.671055, 23469197140.124596, 23459013479.866177, 23447935341.542686, 23422585038.052387, 23375601301.949135, 23338397991.497776, 23329682884.21905, 23348002892.39853, 23406274659.89975, 23478242518.92228, 23592891371.876236, 23703885161.772205, 23797158601.65954, 23875230355.66992, 23918333664.3946, 23968582109.371258, 24040547318.081936, 24112364295.110058, 24189973697.612144, 24242165205.640236, 24364255205.82311, 24472408850.760197, 24590211203.05312, 24763026764.005527, 24909192634.69144, 25043438176.23281, 25167141466.500504, 25297108031.48665, 25395377064.0999, 25550930772.86505, 25721404827.10336, 25931101211.156487, 26168988710.098465, 26465528802.762875, 26760033029.443783, 27075408488.605213, 27316626931.655052, 27487275073.52796, 27579518448.2332, 27652308513.875782, 27673412508.45838, 27711509210.702576, 27767312240.641487, 27827464683.295334, 27894794590.957966, 27935988489.16511, 27992337099.891083, 28019655483.58796, 28014286886.252903, 27996189233.857716, 27973078840.875465, 27920045013.68706, 27917103211.22359, 27927566165.64652, 27953525818.61368, 27973386070.140022, 27999317832.502476, 28019494120.641834, 28033010746.452637, 28051086123.896503, 28066195174.191753, 28068570977.318798, 28064890246.85437, 28042424375.860577, 28015849655.869568, 28014812222.566605, 28021039053.959835, 28039270607.169422, 28058271295.10199, 28088976520.10178, 28107824988.74732, 28105633030.784756, 28087681357.818607, 28065484299.963837, 28039555887.004284, 28028214431.52875, 28011714871.929447, 27995603790.480755, 27970125897.561134, 27946436130.511288, 27929044772.5522, 27926612443.390316, 27926256324.387302, 27924771848.71099, 27905526922.390133, 27876268519.168198, 27832532606.552593, 27779497699.976765, 27737034351.907337, 27692129825.179924, 27684252911.371475, 27698882622.878677, 27712387157.27985, 27726474638.933037, 27752647691.051613, 27786197932.382797, 27836378752.662235, 27887415700.334576, 27949784230.702114, 28028117657.84245, 28136313097.200474, 28234098926.207996, 28345845477.25874, 28507222800.146496, 28793832339.90449, 29350765483.070816, 30328262350.231213, 31894930713.76519, 34093669067.422382, 36801959396.22739, 39638995447.49344, 42088579425.44825, 43616108982.85117, 44152063315.31461, 47464832889.5967], 'frame_num': 54129649}\n" - ] - } - ], - "source": [ - "print(cmvn_info)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "accurate-terminal", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "dominant-abuse", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n", - "process 1000 wavs,450240 frames\n", - " \n", - "process 2000 wavs,886411 frames\n", - " \n", - "process 3000 wavs,1352580 frames\n", - " \n", - "process 4000 wavs,1814397 frames\n", - " \n", - "process 5000 wavs,2356587 frames\n", - " \n", - "process 6000 wavs,2825310 frames\n", - " \n", - "process 7000 wavs,3272506 frames\n", - " \n", - "process 8000 wavs,3688045 frames\n", - " \n", - "process 9000 wavs,4134669 frames\n", - " \n", - "process 10000 wavs,4586357 frames\n", - " \n", - "process 11000 wavs,5014429 frames\n", - " \n", - "process 12000 wavs,5453334 frames\n", - " \n", - "process 13000 wavs,5892888 frames\n", - " \n", - "process 14000 wavs,6316059 frames\n", - " \n", - "process 15000 wavs,6728870 frames\n", - " \n", - "process 16000 wavs,7199442 frames\n", - " \n", - "process 17000 wavs,7629055 frames\n", - " \n", - "process 18000 wavs,8083729 frames\n", - " \n", - "process 19000 wavs,8519732 frames\n", - " \n", - "process 20000 wavs,8895694 frames\n", - " \n", - "process 21000 wavs,9341778 frames\n", - " \n", - "process 22000 wavs,9796126 frames\n", - " \n", - "process 23000 wavs,10236057 frames\n", - " \n", - "process 24000 wavs,10687461 frames\n", - " \n", - "process 25000 wavs,11113082 frames\n", - " \n", - "process 26000 wavs,11544482 frames\n", - " \n", - "process 27000 wavs,11996273 frames\n", - " \n", - "process 28000 wavs,12456350 frames\n", - " \n", - "process 29000 wavs,12900895 frames\n", - " \n", - "process 30000 wavs,13330353 frames\n", - " \n", - "process 31000 wavs,13736568 frames\n", - " \n", - "process 32000 wavs,14158472 frames\n", - " \n", - "process 33000 wavs,14625316 frames\n", - " \n", - "process 34000 wavs,15036206 frames\n", - " \n", - "process 35000 wavs,15514001 frames\n", - " \n", - "process 36000 wavs,16004323 frames\n", - " \n", - "process 37000 wavs,16418799 frames\n", - " \n", - "process 38000 wavs,16840100 frames\n", - " \n", - "process 39000 wavs,17287752 frames\n", - " \n", - "process 40000 wavs,17776206 frames\n", - " \n", - "process 41000 wavs,18243209 frames\n", - " \n", - "process 42000 wavs,18690449 frames\n", - " \n", - "process 43000 wavs,19137940 frames\n", - " \n", - "process 44000 wavs,19553966 frames\n", - " \n", - "process 45000 wavs,19969813 frames\n", - " \n", - "process 46000 wavs,20440963 frames\n", - " \n", - "process 47000 wavs,20862022 frames\n", - " \n", - "process 48000 wavs,21292801 frames\n", - " \n", - "process 49000 wavs,21713004 frames\n", - " \n", - "process 50000 wavs,22146346 frames\n", - " \n", - "process 51000 wavs,22596172 frames\n", - " \n", - "process 52000 wavs,23074160 frames\n", - " \n", - "process 53000 wavs,23499823 frames\n", - " \n", - "process 54000 wavs,23942151 frames\n", - " \n", - "process 55000 wavs,24390566 frames\n", - " \n", - "process 56000 wavs,24833905 frames\n", - " \n", - "process 57000 wavs,25307270 frames\n", - " \n", - "process 58000 wavs,25748720 frames\n", - " \n", - "process 59000 wavs,26185964 frames\n", - " \n", - "process 60000 wavs,26663953 frames\n", - " \n", - "process 61000 wavs,27117720 frames\n", - " \n", - "process 62000 wavs,27585349 frames\n", - " \n", - "process 63000 wavs,28032693 frames\n", - " \n", - "process 64000 wavs,28487074 frames\n", - " \n", - "process 65000 wavs,28956462 frames\n", - " \n", - "process 66000 wavs,29436358 frames\n", - " \n", - "process 67000 wavs,29918569 frames\n", - " \n", - "process 68000 wavs,30325682 frames\n", - " \n", - "process 69000 wavs,30762528 frames\n", - " \n", - "process 70000 wavs,31182319 frames\n", - " \n", - "process 71000 wavs,31627526 frames\n", - " \n", - "process 72000 wavs,32070556 frames\n", - " \n", - "process 73000 wavs,32504534 frames\n", - " \n", - "process 74000 wavs,32972775 frames\n", - " \n", - "process 75000 wavs,33409637 frames\n", - " \n", - "process 76000 wavs,33847861 frames\n", - " \n", - "process 77000 wavs,34298647 frames\n", - " \n", - "process 78000 wavs,34721536 frames\n", - " \n", - "process 79000 wavs,35159236 frames\n", - " \n", - "process 80000 wavs,35628628 frames\n", - " \n", - "process 81000 wavs,36080909 frames\n", - " \n", - "process 82000 wavs,36562496 frames\n", - " \n", - "process 83000 wavs,37042976 frames\n", - " \n", - "process 84000 wavs,37474403 frames\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " \n", - "process 85000 wavs,37943596 frames\n", - " \n", - "process 86000 wavs,38371620 frames\n", - " \n", - "process 87000 wavs,38844874 frames\n", - " \n", - "process 88000 wavs,39292686 frames\n", - " \n", - "process 89000 wavs,39746715 frames\n", - " \n", - "process 90000 wavs,40241800 frames\n", - " \n", - "process 91000 wavs,40672817 frames\n", - " \n", - "process 92000 wavs,41131773 frames\n", - " \n", - "process 93000 wavs,41612001 frames\n", - " \n", - "process 94000 wavs,42084822 frames\n", - " \n", - "process 95000 wavs,42535878 frames\n", - " \n", - "process 96000 wavs,42969365 frames\n", - " \n", - "process 97000 wavs,43430890 frames\n", - " \n", - "process 98000 wavs,43923378 frames\n", - " \n", - "process 99000 wavs,44397370 frames\n", - " \n", - "process 100000 wavs,44883695 frames\n", - " \n", - "process 101000 wavs,45327968 frames\n", - " \n", - "process 102000 wavs,45768860 frames\n", - " \n", - "process 103000 wavs,46205602 frames\n", - " \n", - "process 104000 wavs,46690407 frames\n", - " \n", - "process 105000 wavs,47153089 frames\n", - " \n", - "process 106000 wavs,47628699 frames\n", - " \n", - "process 107000 wavs,48067945 frames\n", - " \n", - "process 108000 wavs,48539256 frames\n", - " \n", - "process 109000 wavs,49030485 frames\n", - " \n", - "process 110000 wavs,49469189 frames\n", - " \n", - "process 111000 wavs,49928968 frames\n", - " \n", - "process 112000 wavs,50370921 frames\n", - " \n", - "process 113000 wavs,50840090 frames\n", - " \n", - "process 114000 wavs,51286249 frames\n", - " \n", - "process 115000 wavs,51715786 frames\n", - " \n", - "process 116000 wavs,52184017 frames\n", - " \n", - "process 117000 wavs,52666156 frames\n", - " \n", - "process 118000 wavs,53109645 frames\n", - " \n", - "process 119000 wavs,53553253 frames\n", - " \n", - "process 120000 wavs,54009877 frames\n", - "{'mean_stat': [700612678.1184504, 704246512.9321843, 720430663.1822729, 754033269.0474415, 798737761.616614, 829467218.4204571, 851246702.9426627, 862261185.2661449, 859339943.6923889, 846303730.8696194, 832995109.605447, 823196536.6029147, 832626008.2569772, 845571326.1936859, 848801373.0562981, 846503549.328017, 836774344.5500796, 823481091.0445303, 820728368.2518216, 804571348.4957463, 795306095.0083207, 811729024.2415155, 805734803.5703195, 813076782.1959459, 806620199.406499, 809655573.8886961, 804371708.9347517, 809272248.6085774, 810322689.7490631, 814294131.1973915, 816262716.0476038, 816213124.2411841, 817158473.4380915, 821414211.5629157, 827408091.5728914, 834353896.0519086, 840094990.3467333, 842613218.6554606, 842070761.1727513, 834970952.5260613, 837020570.8200948, 829592602.7833654, 830116543.8893851, 829482316.3881509, 833397219.4597517, 839251633.3120549, 845475010.4718693, 852378426.7183967, 859563981.8633184, 866063840.5523493, 867790921.9978689, 868215100.5962687, 869683066.032885, 872467375.6674014, 873097681.1780069, 873025823.0543871, 869897292.7201596, 866386426.3869117, 863166726.7256871, 854653071.2244718, 842402803.9000899, 830838253.4144138, 830143002.3536818, 831492285.0310817, 833304371.8781006, 838896092.8621838, 843866088.9578133, 847316792.1429776, 851038022.3643295, 855931698.0149751, 859320543.9795249, 863031001.3470656, 868325062.1832993, 873626971.0115026, 878726636.924209, 884861725.972504, 886920281.5192285, 883056006.5094173, 863719240.7255149, 773378975.9476194], 'var_stat': [9237018652.657722, 9417257721.82426, 10105084297.159702, 11071318522.587782, 12422783727.426847, 13400306419.784964, 14148498843.406874, 14576436982.89939, 14529009036.494726, 14105645932.596651, 13682988821.478252, 13413013425.088106, 13764134927.293928, 14233704806.737064, 14361631309.367067, 14281358385.45644, 13939662689.213865, 13496884231.929493, 13382566162.783987, 12871350930.6626, 12576198160.876635, 13051463889.56708, 12859205935.513906, 13053861416.098743, 12830323588.550724, 12886405923.897238, 12708529922.84171, 12847306110.231739, 12880398489.53404, 13002566299.565536, 13066708060.463543, 13064231286.858614, 13088983337.353497, 13221393824.891022, 13412425607.755072, 13631485149.777075, 13807797519.156103, 13877277485.033077, 13848613909.96762, 13609176326.2529, 13649815250.130072, 13397698404.696907, 13388964704.359968, 13354326914.968012, 13469861474.898457, 13652539440.283333, 13846837321.329163, 14062143714.601675, 14292571198.61228, 14504626563.299246, 14563864749.132776, 14579720287.991764, 14626700787.353922, 14716185568.128899, 14728532777.28015, 14719101187.113443, 14607945896.239174, 14478517828.531614, 14355110561.681187, 14057430280.249746, 13634284490.879377, 13248236002.494394, 13217602306.335958, 13257856701.946049, 13323688441.072674, 13515395318.023148, 13685827169.67645, 13811622609.426846, 13947347160.615082, 14115883822.884943, 14231204526.433033, 14356066668.651815, 14533604268.238445, 14708971788.69237, 14875667326.732443, 15079098318.79331, 15144888989.667963, 15002658970.504765, 14349232841.34513, 11544480117.013124], 'frame_num': 54068199}\n" - ] - } - ], - "source": [ - "import random\n", - "\n", - "import numpy as np\n", - "import paddle\n", - "from paddle.io import DataLoader\n", - "from paddle.io import Dataset\n", - "\n", - "from deepspeech.frontend.audio import AudioSegment\n", - "from deepspeech.frontend.utility import load_cmvn\n", - "from deepspeech.frontend.utility import read_manifest\n", - "\n", - "# https://github.com/PaddlePaddle/Paddle/pull/31481\n", - "class CollateFunc(object):\n", - " ''' Collate function for AudioDataset\n", - " '''\n", - " def __init__(self, feature_func):\n", - " self.feature_func = feature_func\n", - " \n", - " def __call__(self, batch):\n", - " mean_stat = None\n", - " var_stat = None\n", - " number = 0\n", - " for item in batch:\n", - " audioseg = AudioSegment.from_file(item['feat'])\n", - " feat = self.feature_func(audioseg) #(D, T)\n", - "\n", - " sums = np.sum(feat, axis=1)\n", - " if mean_stat is None:\n", - " mean_stat = sums\n", - " else:\n", - " mean_stat += sums\n", - "\n", - " square_sums = np.sum(np.square(feat), axis=1)\n", - " if var_stat is None:\n", - " var_stat = square_sums\n", - " else:\n", - " var_stat += square_sums\n", - "\n", - " number += feat.shape[1]\n", - " return number, mean_stat, var_stat\n", - "\n", - "\n", - "class AudioDataset(Dataset):\n", - " def __init__(self, manifest_path, num_samples=-1, rng=None, random_seed=0):\n", - " self._rng = rng if rng else np.random.RandomState(random_seed)\n", - " manifest = read_manifest(manifest_path)\n", - " if num_samples == -1:\n", - " sampled_manifest = manifest\n", - " else:\n", - " sampled_manifest = self._rng.choice(manifest, num_samples, replace=False)\n", - " self.items = sampled_manifest\n", - "\n", - " def __len__(self):\n", - " return len(self.items)\n", - "\n", - " def __getitem__(self, idx):\n", - " return self.items[idx]\n", - " \n", - " \n", - "augmentation_pipeline = AugmentationPipeline('{}')\n", - "audio_featurizer = AudioFeaturizer(\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " stride_ms=args.stride_ms,\n", - " window_ms=args.window_ms,\n", - " n_fft=None,\n", - " max_freq=None,\n", - " target_sample_rate=args.sample_rate,\n", - " use_dB_normalization=True,\n", - " target_dB=-20)\n", - "\n", - "def augment_and_featurize(audio_segment):\n", - " augmentation_pipeline.transform_audio(audio_segment)\n", - " return audio_featurizer.featurize(audio_segment)\n", - "\n", - "\n", - "collate_func = CollateFunc(augment_and_featurize)\n", - "\n", - "dataset = AudioDataset(\n", - " args.manifest_path,\n", - " args.num_samples)\n", - "\n", - "batch_size = 20\n", - "data_loader = DataLoader(\n", - " dataset,\n", - " batch_size=batch_size,\n", - " shuffle=False,\n", - " num_workers=args.num_workers,\n", - " collate_fn=collate_func)\n", - "\n", - "with paddle.no_grad():\n", - " all_mean_stat = None\n", - " all_var_stat = None\n", - " all_number = 0\n", - " wav_number = 0\n", - " for i, batch in enumerate(data_loader):\n", - " number, mean_stat, var_stat = batch\n", - " if i == 0:\n", - " all_mean_stat = mean_stat\n", - " all_var_stat = var_stat\n", - " else:\n", - " all_mean_stat += mean_stat\n", - " all_var_stat += var_stat\n", - " all_number += number\n", - " wav_number += batch_size\n", - "\n", - " if wav_number % 1000 == 0:\n", - " print('process {} wavs,{} frames'.format(wav_number,\n", - " all_number))\n", - "\n", - "cmvn_info = {\n", - " 'mean_stat': list(all_mean_stat.tolist()),\n", - " 'var_stat': list(all_var_stat.tolist()),\n", - " 'frame_num': all_number\n", - "}\n", - "print(cmvn_info)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "unlike-search", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/dataloader.ipynb b/.notebook/dataloader.ipynb deleted file mode 100644 index 3de8f64a9..000000000 --- a/.notebook/dataloader.ipynb +++ /dev/null @@ -1,389 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "emerging-meter", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/fftpack/__init__.py:103: DeprecationWarning: The module numpy.dual is deprecated. Instead of using dual, use the functions directly from numpy or scipy.\n", - " from numpy.dual import register_func\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/special/orthogonal.py:81: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:108: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " long_ = _make_signed(np.long)\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:109: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " ulong = _make_unsigned(np.long)\n" - ] - } - ], - "source": [ - "import math\n", - "import random\n", - "import tarfile\n", - "import logging\n", - "import numpy as np\n", - "from collections import namedtuple\n", - "from functools import partial\n", - "\n", - "import paddle\n", - "from paddle.io import Dataset\n", - "from paddle.io import DataLoader\n", - "from paddle.io import BatchSampler\n", - "from paddle.io import DistributedBatchSampler\n", - "from paddle import distributed as dist\n", - "\n", - "from data_utils.utility import read_manifest\n", - "from data_utils.augmentor.augmentation import AugmentationPipeline\n", - "from data_utils.featurizer.speech_featurizer import SpeechFeaturizer\n", - "from data_utils.speech import SpeechSegment\n", - "from data_utils.normalizer import FeatureNormalizer\n", - "\n", - "\n", - "from data_utils.dataset import (\n", - " DeepSpeech2Dataset,\n", - " DeepSpeech2DistributedBatchSampler,\n", - " DeepSpeech2BatchSampler,\n", - " SpeechCollator,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "excessive-american", - "metadata": {}, - "outputs": [], - "source": [ - "def create_dataloader(manifest_path,\t\n", - " vocab_filepath,\t\n", - " mean_std_filepath,\t\n", - " augmentation_config='{}',\t\n", - " max_duration=float('inf'),\t\n", - " min_duration=0.0,\t\n", - " stride_ms=10.0,\t\n", - " window_ms=20.0,\t\n", - " max_freq=None,\t\n", - " specgram_type='linear',\t\n", - " use_dB_normalization=True,\t\n", - " random_seed=0,\t\n", - " keep_transcription_text=False,\t\n", - " is_training=False,\t\n", - " batch_size=1,\t\n", - " num_workers=0,\t\n", - " sortagrad=False,\t\n", - " shuffle_method=None,\t\n", - " dist=False):\t\n", - "\n", - " dataset = DeepSpeech2Dataset(\t\n", - " manifest_path,\t\n", - " vocab_filepath,\t\n", - " mean_std_filepath,\t\n", - " augmentation_config=augmentation_config,\t\n", - " max_duration=max_duration,\t\n", - " min_duration=min_duration,\t\n", - " stride_ms=stride_ms,\t\n", - " window_ms=window_ms,\t\n", - " max_freq=max_freq,\t\n", - " specgram_type=specgram_type,\t\n", - " use_dB_normalization=use_dB_normalization,\t\n", - " random_seed=random_seed,\t\n", - " keep_transcription_text=keep_transcription_text)\t\n", - "\n", - " if dist:\t\n", - " batch_sampler = DeepSpeech2DistributedBatchSampler(\t\n", - " dataset,\t\n", - " batch_size,\t\n", - " num_replicas=None,\t\n", - " rank=None,\t\n", - " shuffle=is_training,\t\n", - " drop_last=is_training,\t\n", - " sortagrad=is_training,\t\n", - " shuffle_method=shuffle_method)\t\n", - " else:\t\n", - " batch_sampler = DeepSpeech2BatchSampler(\t\n", - " dataset,\t\n", - " shuffle=is_training,\t\n", - " batch_size=batch_size,\t\n", - " drop_last=is_training,\t\n", - " sortagrad=is_training,\t\n", - " shuffle_method=shuffle_method)\t\n", - "\n", - " def padding_batch(batch, padding_to=-1, flatten=False, is_training=True):\t\n", - " \"\"\"\t\n", - " Padding audio features with zeros to make them have the same shape (or\t\n", - " a user-defined shape) within one bach.\t\n", - "\n", - " If ``padding_to`` is -1, the maximun shape in the batch will be used\t\n", - " as the target shape for padding. Otherwise, `padding_to` will be the\t\n", - " target shape (only refers to the second axis).\t\n", - "\n", - " If `flatten` is True, features will be flatten to 1darray.\t\n", - " \"\"\"\t\n", - " new_batch = []\t\n", - " # get target shape\t\n", - " max_length = max([audio.shape[1] for audio, text in batch])\t\n", - " if padding_to != -1:\t\n", - " if padding_to < max_length:\t\n", - " raise ValueError(\"If padding_to is not -1, it should be larger \"\t\n", - " \"than any instance's shape in the batch\")\t\n", - " max_length = padding_to\t\n", - " max_text_length = max([len(text) for audio, text in batch])\t\n", - " # padding\t\n", - " padded_audios = []\t\n", - " audio_lens = []\t\n", - " texts, text_lens = [], []\t\n", - " for audio, text in batch:\t\n", - " padded_audio = np.zeros([audio.shape[0], max_length])\t\n", - " padded_audio[:, :audio.shape[1]] = audio\t\n", - " if flatten:\t\n", - " padded_audio = padded_audio.flatten()\t\n", - " padded_audios.append(padded_audio)\t\n", - " audio_lens.append(audio.shape[1])\t\n", - "\n", - " padded_text = np.zeros([max_text_length])\n", - " if is_training:\n", - " padded_text[:len(text)] = text\t# ids\n", - " else:\n", - " padded_text[:len(text)] = [ord(t) for t in text] # string\n", - " \n", - " texts.append(padded_text)\t\n", - " text_lens.append(len(text))\t\n", - "\n", - " padded_audios = np.array(padded_audios).astype('float32')\t\n", - " audio_lens = np.array(audio_lens).astype('int64')\t\n", - " texts = np.array(texts).astype('int32')\t\n", - " text_lens = np.array(text_lens).astype('int64')\t\n", - " return padded_audios, texts, audio_lens, text_lens\t\n", - "\n", - " loader = DataLoader(\t\n", - " dataset,\t\n", - " batch_sampler=batch_sampler,\t\n", - " collate_fn=partial(padding_batch, is_training=is_training),\t\n", - " num_workers=num_workers)\t\n", - " return loader" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "naval-brave", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'num_samples': 5, 'beam_size': 500, 'num_proc_bsearch': 8, 'num_conv_layers': 2, 'num_rnn_layers': 3, 'rnn_layer_size': 2048, 'alpha': 2.5, 'beta': 0.3, 'cutoff_prob': 1.0, 'cutoff_top_n': 40, 'use_gru': False, 'use_gpu': True, 'share_rnn_weights': True, 'infer_manifest': 'examples/aishell/data/manifest.dev', 'mean_std_path': 'examples/aishell/data/mean_std.npz', 'vocab_path': 'examples/aishell/data/vocab.txt', 'lang_model_path': 'models/lm/common_crawl_00.prune01111.trie.klm', 'model_path': 'examples/aishell/checkpoints/step_final', 'decoding_method': 'ctc_beam_search', 'error_rate_type': 'wer', 'specgram_type': 'linear'}\n" - ] - } - ], - "source": [ - "import sys\n", - "import argparse\n", - "import functools\n", - "from utils.utility import add_arguments, print_arguments\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "# yapf: disable\n", - "add_arg('num_samples', int, 5, \"# of samples to infer.\")\n", - "add_arg('beam_size', int, 500, \"Beam search width.\")\n", - "add_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\n", - "add_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\n", - "add_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\n", - "add_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\n", - "add_arg('alpha', float, 2.5, \"Coef of LM for beam search.\")\n", - "add_arg('beta', float, 0.3, \"Coef of WC for beam search.\")\n", - "add_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\n", - "add_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\n", - "add_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\n", - "add_arg('use_gpu', bool, True, \"Use GPU or not.\")\n", - "add_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n", - " \"bi-directional RNNs. Not for GRU.\")\n", - "add_arg('infer_manifest', str,\n", - " 'examples/aishell/data/manifest.dev',\n", - " \"Filepath of manifest to infer.\")\n", - "add_arg('mean_std_path', str,\n", - " 'examples/aishell/data/mean_std.npz',\n", - " \"Filepath of normalizer's mean & std.\")\n", - "add_arg('vocab_path', str,\n", - " 'examples/aishell/data/vocab.txt',\n", - " \"Filepath of vocabulary.\")\n", - "add_arg('lang_model_path', str,\n", - " 'models/lm/common_crawl_00.prune01111.trie.klm',\n", - " \"Filepath for language model.\")\n", - "add_arg('model_path', str,\n", - " 'examples/aishell/checkpoints/step_final',\n", - " \"If None, the training starts from scratch, \"\n", - " \"otherwise, it resumes from the pre-trained model.\")\n", - "add_arg('decoding_method', str,\n", - " 'ctc_beam_search',\n", - " \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n", - " choices = ['ctc_beam_search', 'ctc_greedy'])\n", - "add_arg('error_rate_type', str,\n", - " 'wer',\n", - " \"Error rate type for evaluation.\",\n", - " choices=['wer', 'cer'])\n", - "add_arg('specgram_type', str,\n", - " 'linear',\n", - " \"Audio feature type. Options: linear, mfcc.\",\n", - " choices=['linear', 'mfcc'])\n", - "# yapf: disable\n", - "args = parser.parse_args([])\n", - "print(vars(args))" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "bearing-physics", - "metadata": {}, - "outputs": [], - "source": [ - "batch_reader = create_dataloader(\n", - " manifest_path=args.infer_manifest,\n", - " vocab_filepath=args.vocab_path,\n", - " mean_std_filepath=args.mean_std_path,\n", - " augmentation_config='{}',\n", - " #max_duration=float('inf'),\n", - " max_duration=27.0,\n", - " min_duration=0.0,\n", - " stride_ms=10.0,\n", - " window_ms=20.0,\n", - " max_freq=None,\n", - " specgram_type=args.specgram_type,\n", - " use_dB_normalization=True,\n", - " random_seed=0,\n", - " keep_transcription_text=True,\n", - " is_training=False,\n", - " batch_size=args.num_samples,\n", - " sortagrad=True,\n", - " shuffle_method=None,\n", - " dist=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "classified-melissa", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "test Tensor(shape=[5, 6], dtype=int32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[22823, 26102, 20195, 37324, 0 , 0 ],\n", - " [22238, 26469, 23601, 22909, 0 , 0 ],\n", - " [20108, 26376, 22235, 26085, 0 , 0 ],\n", - " [36824, 35201, 20445, 25345, 32654, 24863],\n", - " [29042, 27748, 21463, 23456, 0 , 0 ]])\n", - "test raw 大时代里\n", - "test raw 煲汤受宠\n", - "audio len Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [163, 167, 180, 186, 186])\n", - "test len Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [4, 4, 4, 6, 4])\n", - "audio Tensor(shape=[5, 161, 186], dtype=float32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[[ 1.11669052, 0.79015088, 0.93658292, ..., 0. , 0. , 0. ],\n", - " [ 0.83549136, 0.72643483, 0.83578080, ..., 0. , 0. , 0. ],\n", - " [-0.89155018, -0.18894747, -0.53357804, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [ 0.33386710, -0.81240511, 0.12869737, ..., 0. , 0. , 0. ],\n", - " [-0.17537928, 0.58380985, 0.70696265, ..., 0. , 0. , 0. ],\n", - " [-0.84175998, 1.22041416, 0.07929770, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[-0.35964420, 0.77392709, 0.71409988, ..., 0. , 0. , 0. ],\n", - " [-0.15990183, 0.42962283, 0.06222462, ..., 0. , 0. , 0. ],\n", - " [-0.31166190, -0.74864638, -0.52836996, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [-0.27546275, 0.32889456, 0.12410031, ..., 0. , 0. , 0. ],\n", - " [ 0.16264282, 0.49418071, -0.15960945, ..., 0. , 0. , 0. ],\n", - " [ 0.12476666, 0.00516864, 1.16021466, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[ 0.90202141, 1.48541915, 0.92062062, ..., 0. , 0. , 0. ],\n", - " [ 0.82661545, 1.37171340, 0.86746097, ..., 0. , 0. , 0. ],\n", - " [-0.62287915, -0.48645937, 0.35041964, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [ 0.07376949, 0.07138316, 0.76355994, ..., 0. , 0. , 0. ],\n", - " [-0.32306790, 0.43247896, 1.27311838, ..., 0. , 0. , 0. ],\n", - " [-0.97667056, 0.60747612, 0.79181534, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[ 0.72022128, 0.95428467, 0.92766261, ..., 0.29105374, -0.45564806, -0.62151009],\n", - " [ 0.42083180, 0.49279949, 0.82724041, ..., -0.17333922, -1.45363355, -0.61673522],\n", - " [-0.76116520, -0.84750438, -0.09512503, ..., -1.01497340, -1.42781055, -0.80859023],\n", - " ...,\n", - " [-0.23009977, 1.06155431, 1.09065628, ..., 0.25581080, 0.53794998, -1.22650719],\n", - " [-1.37693381, 0.30778193, 0.17152318, ..., 0.51650339, 0.25580606, 0.83097816],\n", - " [-1.62180591, 1.30567718, 1.09928656, ..., -0.77590007, 1.27712476, 0.53189957]],\n", - "\n", - " [[ 1.03205252, -0.51535392, 0.21077573, ..., 0.76618457, 1.27425683, 1.52250278],\n", - " [ 0.82059991, 0.43990925, 0.13090958, ..., 0.86662549, 1.01687658, 1.48495352],\n", - " [-0.75489789, -0.01997089, -0.65174174, ..., 0.09061214, -0.55211234, -0.01614586],\n", - " ...,\n", - " [ 0.50985396, 1.84555030, 0.79185146, ..., 1.13666189, 1.19898069, 1.98158395],\n", - " [ 1.98721015, 2.52385354, 1.11714780, ..., 0.19416514, 1.11329341, 0.64460152],\n", - " [ 2.69512844, 1.90993905, 0.50245082, ..., -0.50902629, 0.03333465, -1.24584770]]])\n" - ] - } - ], - "source": [ - "for idx, (audio, audio_len, text, text_len) in enumerate(batch_reader()):\n", - " print('test', text)\n", - " print(\"test raw\", ''.join( chr(i) for i in text[0][:int(text_len[0])] ))\n", - " print(\"test raw\", ''.join( chr(i) for i in text[-1][:int(text_len[-1])] ))\n", - " print('audio len', audio_len)\n", - " print('test len', text_len)\n", - " print('audio', audio)\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "unexpected-skating", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "minus-modern", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/.notebook/dataloader_with_tokens_tokenids.ipynb b/.notebook/dataloader_with_tokens_tokenids.ipynb deleted file mode 100644 index 7d93dd009..000000000 --- a/.notebook/dataloader_with_tokens_tokenids.ipynb +++ /dev/null @@ -1,1204 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "medieval-monday", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/workspace/DeepSpeech-2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "emerging-meter", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n" - ] - } - ], - "source": [ - "import math\n", - "import random\n", - "import tarfile\n", - "import logging\n", - "import numpy as np\n", - "from collections import namedtuple\n", - "from functools import partial\n", - "\n", - "import paddle\n", - "from paddle.io import Dataset\n", - "from paddle.io import DataLoader\n", - "from paddle.io import BatchSampler\n", - "from paddle.io import DistributedBatchSampler\n", - "from paddle import distributed as dist\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "excessive-american", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "naval-brave", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:93] register user softmax to paddle, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:97] register user log_softmax to paddle, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:101] register user sigmoid to paddle, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:105] register user log_sigmoid to paddle, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:109] register user relu to paddle, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:119] override cat of paddle if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:133] override item of paddle.Tensor if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:144] override long of paddle.Tensor if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:164] override new_full of paddle.Tensor if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:179] override eq of paddle.Tensor if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:185] override eq of paddle if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:195] override contiguous of paddle.Tensor if exists or register, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:212] override size of paddle.Tensor (`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:223] register user view to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:233] register user view_as to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:259] register user masked_fill to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:277] register user masked_fill_ to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:288] register user fill_ to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:298] register user repeat to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:303] register user softmax to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:308] register user sigmoid to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:312] register user relu to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:322] register user type_as to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:337] register user to to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:346] register user float to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:356] register user tolist to paddle.Tensor, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:371] register user glu to paddle.nn.functional, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:422] override ctc_loss of paddle.nn.functional if exists, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:428] register user Module to paddle.nn, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:434] register user ModuleList to paddle.nn, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:450] register user GLU to paddle.nn, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:483] register user ConstantPad2d to paddle.nn, remove this when fixed!\n", - "[WARNING 2021/04/16 06:32:09 __init__.py:489] register user export to paddle.jit, remove this when fixed!\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'num_samples': 5, 'beam_size': 500, 'num_proc_bsearch': 8, 'num_conv_layers': 2, 'num_rnn_layers': 3, 'rnn_layer_size': 2048, 'alpha': 2.5, 'beta': 0.3, 'cutoff_prob': 1.0, 'cutoff_top_n': 40, 'use_gru': False, 'use_gpu': True, 'share_rnn_weights': True, 'unit_type': 'char', 'spm_model_prefix': 'examples/tiny/s1/data/spm_bpe', 'infer_manifest': 'examples/tiny/s1/data/manifest.tiny', 'mean_std_path': 'examples/tiny/s1/data/mean_std.npz', 'vocab_path': 'examples/tiny/s1/data/vocab.txt', 'lang_model_path': 'models/lm/common_crawl_00.prune01111.trie.klm', 'model_path': 'examples/tiny/s1/checkpoints/step_final', 'decoding_method': 'ctc_beam_search', 'error_rate_type': 'wer', 'specgram_type': 'fbank', 'feat_dim': 80, 'delta_delta': False}\n" - ] - } - ], - "source": [ - "import sys\n", - "import argparse\n", - "import functools\n", - "from deepspeech.utils.utility import add_arguments, print_arguments\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "# yapf: disable\n", - "add_arg('num_samples', int, 5, \"# of samples to infer.\")\n", - "add_arg('beam_size', int, 500, \"Beam search width.\")\n", - "add_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\n", - "add_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\n", - "add_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\n", - "add_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\n", - "add_arg('alpha', float, 2.5, \"Coef of LM for beam search.\")\n", - "add_arg('beta', float, 0.3, \"Coef of WC for beam search.\")\n", - "add_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\n", - "add_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\n", - "add_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\n", - "add_arg('use_gpu', bool, True, \"Use GPU or not.\")\n", - "add_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n", - " \"bi-directional RNNs. Not for GRU.\")\n", - "add_arg('unit_type', str,\n", - " 'char',\n", - " \"Options: char, word, spm.\",\n", - " choices=['char', 'word', 'spm'])\n", - "add_arg('spm_model_prefix', str,\n", - " 'examples/tiny/s1/data/spm_bpe',\n", - " \"spm model prefix.\",)\n", - "add_arg('infer_manifest', str,\n", - " 'examples/tiny/s1/data/manifest.tiny',\n", - " \"Filepath of manifest to infer.\")\n", - "add_arg('mean_std_path', str,\n", - " 'examples/tiny/s1/data/mean_std.npz',\n", - " \"Filepath of normalizer's mean & std.\")\n", - "add_arg('vocab_path', str,\n", - " 'examples/tiny/s1/data/vocab.txt',\n", - " \"Filepath of vocabulary.\")\n", - "add_arg('lang_model_path', str,\n", - " 'models/lm/common_crawl_00.prune01111.trie.klm',\n", - " \"Filepath for language model.\")\n", - "add_arg('model_path', str,\n", - " 'examples/tiny/s1/checkpoints/step_final',\n", - " \"If None, the training starts from scratch, \"\n", - " \"otherwise, it resumes from the pre-trained model.\")\n", - "add_arg('decoding_method', str,\n", - " 'ctc_beam_search',\n", - " \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n", - " choices = ['ctc_beam_search', 'ctc_greedy'])\n", - "add_arg('error_rate_type', str,\n", - " 'wer',\n", - " \"Error rate type for evaluation.\",\n", - " choices=['wer', 'cer'])\n", - "add_arg('specgram_type', str,\n", - " 'fbank',\n", - " \"Audio feature type. Options: linear, mfcc.\",\n", - " choices=['linear', 'mfcc'])\n", - "add_arg('feat_dim', int, 80, \"mfcc or fbank feat dim.\")\n", - "add_arg('delta_delta', bool, False, \"delta delta\")\n", - "# yapf: disable\n", - "args = parser.parse_args([])\n", - "print(vars(args))" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "wired-principal", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'num_samples': 5, 'beam_size': 500, 'num_proc_bsearch': 8, 'num_conv_layers': 2, 'num_rnn_layers': 3, 'rnn_layer_size': 2048, 'alpha': 2.5, 'beta': 0.3, 'cutoff_prob': 1.0, 'cutoff_top_n': 40, 'use_gru': False, 'use_gpu': True, 'share_rnn_weights': True, 'unit_type': 'char', 'spm_model_prefix': 'examples/aishell/s1/data/spm_bpe', 'infer_manifest': 'examples/aishell/s1/data/manifest.test', 'mean_std_path': '', 'vocab_path': 'examples/aishell/s1/data/vocab.txt', 'lang_model_path': 'models/lm/common_crawl_00.prune01111.trie.klm', 'model_path': 'examples/aishell/s1/checkpoints/step_final', 'decoding_method': 'ctc_beam_search', 'error_rate_type': 'wer', 'specgram_type': 'fbank', 'feat_dim': 80, 'delta_delta': False}\n" - ] - } - ], - "source": [ - "import sys\n", - "import argparse\n", - "import functools\n", - "from deepspeech.utils.utility import add_arguments, print_arguments\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "# yapf: disable\n", - "add_arg('num_samples', int, 5, \"# of samples to infer.\")\n", - "add_arg('beam_size', int, 500, \"Beam search width.\")\n", - "add_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\n", - "add_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\n", - "add_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\n", - "add_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\n", - "add_arg('alpha', float, 2.5, \"Coef of LM for beam search.\")\n", - "add_arg('beta', float, 0.3, \"Coef of WC for beam search.\")\n", - "add_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\n", - "add_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\n", - "add_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\n", - "add_arg('use_gpu', bool, True, \"Use GPU or not.\")\n", - "add_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n", - " \"bi-directional RNNs. Not for GRU.\")\n", - "add_arg('unit_type', str,\n", - " 'char',\n", - " \"Options: char, word, spm.\",\n", - " choices=['char', 'word', 'spm'])\n", - "add_arg('spm_model_prefix', str,\n", - " 'examples/aishell/s1/data/spm_bpe',\n", - " \"spm model prefix.\",)\n", - "add_arg('infer_manifest', str,\n", - " 'examples/aishell/s1/data/manifest.test',\n", - " \"Filepath of manifest to infer.\")\n", - "add_arg('mean_std_path', str,\n", - " '',\n", - " \"examples/aishell/s1/data/mean_std.npz, Filepath of normalizer's mean & std.\")\n", - "add_arg('vocab_path', str,\n", - " 'examples/aishell/s1/data/vocab.txt',\n", - " \"Filepath of vocabulary.\")\n", - "add_arg('lang_model_path', str,\n", - " 'models/lm/common_crawl_00.prune01111.trie.klm',\n", - " \"Filepath for language model.\")\n", - "add_arg('model_path', str,\n", - " 'examples/aishell/s1/checkpoints/step_final',\n", - " \"If None, the training starts from scratch, \"\n", - " \"otherwise, it resumes from the pre-trained model.\")\n", - "add_arg('decoding_method', str,\n", - " 'ctc_beam_search',\n", - " \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n", - " choices = ['ctc_beam_search', 'ctc_greedy'])\n", - "add_arg('error_rate_type', str,\n", - " 'wer',\n", - " \"Error rate type for evaluation.\",\n", - " choices=['wer', 'cer'])\n", - "add_arg('specgram_type', str,\n", - " 'fbank',\n", - " \"Audio feature type. Options: linear, mfcc.\",\n", - " choices=['linear', 'mfcc', 'fbank'])\n", - "add_arg('feat_dim', int, 80, \"mfcc or fbank feat dim.\")\n", - "add_arg('delta_delta', bool, False, \"delta delta\")\n", - "# yapf: disable\n", - "args = parser.parse_args([])\n", - "print(vars(args))" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "bearing-physics", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/fftpack/__init__.py:103: DeprecationWarning: The module numpy.dual is deprecated. Instead of using dual, use the functions directly from numpy or scipy.\n", - " from numpy.dual import register_func\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/special/orthogonal.py:81: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:108: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " long_ = _make_signed(np.long)\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:109: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " ulong = _make_unsigned(np.long)\n" - ] - } - ], - "source": [ - "from deepspeech.frontend.utility import read_manifest\n", - "from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline\n", - "from deepspeech.frontend.featurizer.speech_featurizer import SpeechFeaturizer\n", - "from deepspeech.frontend.speech import SpeechSegment\n", - "from deepspeech.frontend.normalizer import FeatureNormalizer\n", - "\n", - "\n", - "from deepspeech.io.collator import SpeechCollator\n", - "from deepspeech.io.dataset import ManifestDataset\n", - "from deepspeech.io.sampler import (\n", - " SortagradDistributedBatchSampler,\n", - " SortagradBatchSampler,\n", - ")\n", - "from deepspeech.io import create_dataloader\n", - "batch_reader = create_dataloader(\n", - " manifest_path=args.infer_manifest,\n", - " unit_type=args.unit_type,\n", - " vocab_filepath=args.vocab_path,\n", - " mean_std_filepath=args.mean_std_path,\n", - " spm_model_prefix=args.spm_model_prefix,\n", - " augmentation_config='{}',\n", - " max_input_len=27.0,\n", - " min_input_len=0.0,\n", - " max_output_len=float('inf'),\n", - " min_output_len=0.0,\n", - " max_output_input_ratio=float('inf'),\n", - " min_output_input_ratio=0.0,\n", - " stride_ms=10.0,\n", - " window_ms=20.0,\n", - " max_freq=None,\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " use_dB_normalization=True,\n", - " random_seed=0,\n", - " keep_transcription_text=True,\n", - " is_training=False,\n", - " batch_size=args.num_samples,\n", - " num_workers=0,\n", - " sortagrad=True,\n", - " shuffle_method=None,\n", - " dist=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "classified-melissa", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "fbank\n", - "[232 387 331 ... 249 249 262] int16\n", - "fbank\n", - "[-138 -219 -192 ... 338 324 351] int16\n", - "fbank\n", - "[ 694 1175 1022 ... 553 514 627] int16\n", - "fbank\n", - "[-39 -79 -53 ... 139 172 99] int16\n", - "fbank\n", - "[-277 -480 -425 ... 758 767 739] int16\n", - "fbank\n", - "[ 399 693 609 ... 1291 1270 1291] int16\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/dataloader/dataloader_iter.py:354: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. \n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " if arr.dtype == np.object:\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "fbank\n", - "[ -750 -1254 -1107 ... 2276 1889 2067] int16\n", - "fbank\n", - "[ -127 -199 -149 ... -5243 -5065 -5398] int16\n", - "fbank\n", - "[ 465 783 677 ... 980 903 1008] int16\n", - "fbank\n", - "[ 90 160 157 ... -2 -16 -21] int16\n", - "fbank\n", - "[ 213 345 295 ... 2483 2246 2501] int16\n", - "fbank\n", - "[ -86 -159 -131 ... 270 258 290] int16\n", - "fbank\n", - "[-1023 -1714 -1505 ... 1532 1596 1575] int16\n", - "fbank\n", - "[-366 -602 -527 ... 374 370 379] int16\n", - "fbank\n", - "[ 761 1275 1127 ... 369 413 295] int16\n", - "fbank\n", - "[382 621 550 ... 161 161 174] int16\n", - "fbank\n", - "[ -28 -91 -120 ... 28 34 11] int16\n", - "fbank\n", - "[ -5 -5 -5 ... 268 294 341] int16\n", - "fbank\n", - "[240 417 684 ... 267 262 219] int16\n", - "fbank\n", - "[131 206 194 ... 383 320 343] int16\n", - "test: Tensor(shape=[5, 7], dtype=int32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[31069, 21487, 29233, 30340, 20320, -1 , -1 ],\n", - " [20540, 24471, 19968, 25552, 30340, 26159, -1 ],\n", - " [36825, 20010, 31243, 24230, 26159, 32654, 30340],\n", - " [20108, 21040, 20108, -1 , -1 , -1 , -1 ],\n", - " [21435, 34892, 25919, 21270, -1 , -1 , -1 ]])\n", - "fbank\n", - "[1155 1890 1577 ... 1092 989 1130] int16\n", - "fbank\n", - "[296 358 296 ... 140 140 168] int16\n", - "fbank\n", - "[-50 -91 -63 ... 104 104 86] int16\n", - "fbank\n", - "[-37 -66 -50 ... -31 -45 -52] int16\n", - "fbank\n", - "[-401 -652 -547 ... -339 -307 -344] int16\n", - "fbank\n", - "[-21 -47 -51 ... 94 81 107] int16\n", - "fbank\n", - "[ 533 887 755 ... 3074 2853 3254] int16\n", - "fbank\n", - "[ 44 71 66 ... -628 -733 -601] int16\n", - "fbank\n", - "[ 50 86 79 ... 129 116 138] int16\n", - "fbank\n", - "[ 92 146 126 ... -208 -193 -179] int16\n", - "test raw: 祝可爱的你\n", - "test raw: 去行政化\n", - "audio len: Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [184, 194, 196, 204, 207])\n", - "test len: Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [5, 6, 7, 3, 4])\n", - "audio: Tensor(shape=[5, 207, 80], dtype=float32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[[12.25633812, 12.61639309, 10.36936474, ..., 13.02949619, 11.51365757, 10.59789085],\n", - " [13.32148266, 13.41071606, 11.43800735, ..., 13.69783783, 12.83939362, 11.51259613],\n", - " [12.62640572, 12.53621101, 10.97212505, ..., 13.33757591, 12.32293034, 10.75493717],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[10.99619484, 11.35202599, 9.56922054 , ..., 9.94971657 , 9.88354111 , 9.55315971 ],\n", - " [10.44461155, 9.81688595 , 5.62538481 , ..., 10.60468388, 10.94417381, 9.42646980 ],\n", - " [10.23835754, 10.23407459, 7.99464273 , ..., 10.68097591, 9.91640091 , 10.04131031],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[14.10299397, 14.50298119, 12.87738323, ..., 12.62796497, 12.69949627, 11.43171215],\n", - " [13.85035992, 13.15289116, 10.66541386, ..., 13.34364223, 13.46972179, 11.02160740],\n", - " [13.19866467, 13.23537827, 11.65760899, ..., 12.72559357, 12.42716217, 11.74562359],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[12.85668373, 12.82431412, 11.68144703, ..., 14.10119247, 15.12791920, 13.68221378],\n", - " [13.19507027, 13.40244961, 11.43618393, ..., 13.32919979, 13.68267441, 12.73429012],\n", - " [13.02173328, 12.92082500, 11.44303989, ..., 12.77793121, 13.10915661, 11.77327728],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[12.90771198, 13.40234852, 13.01435471, ..., 13.80359459, 14.08088684, 13.17883396],\n", - " [14.06678009, 14.06943512, 12.52837276, ..., 13.66423225, 13.66300583, 13.60142994],\n", - " [12.58743191, 12.94520760, 11.75190544, ..., 14.28828907, 14.08229160, 13.02433395],\n", - " ...,\n", - " [16.20896912, 16.42283821, 14.94358730, ..., 12.91146755, 12.66766262, 11.76361752],\n", - " [13.49324894, 14.14653301, 13.16490936, ..., 13.23435783, 13.45378494, 12.60386276],\n", - " [15.56288910, 15.92445087, 14.90794277, ..., 13.43840790, 13.41075516, 12.55605984]]])\n" - ] - } - ], - "source": [ - "for idx, (audio, audio_len, text, text_len) in enumerate(batch_reader()):\n", - " print('test:', text)\n", - " print(\"test raw:\", ''.join( chr(i) for i in text[0][:int(text_len[0])] ))\n", - " print(\"test raw:\", ''.join( chr(i) for i in text[-1][:int(text_len[-1])] ))\n", - " print('audio len:', audio_len)\n", - " print('test len:', text_len)\n", - " print('audio:', audio)\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "unexpected-skating", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "minus-modern", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "fbank\n", - "[232 387 331 ... 249 249 262] int16\n", - "fbank\n", - "[-138 -219 -192 ... 338 324 351] int16\n", - "fbank\n", - "[ 694 1175 1022 ... 553 514 627] int16\n", - "fbank\n", - "[-39 -79 -53 ... 139 172 99] int16\n", - "fbank\n", - "[-277 -480 -425 ... 758 767 739] int16\n", - "fbank\n", - "test: Tensor(shape=[5, 7], dtype=int32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[2695, 505, 2332, 2553, 169, -1 , -1 ],\n", - " [ 230, 1237, 2 , 1556, 2553, 1694, -1 ],\n", - " [3703, 28 , 2739, 1172, 1694, 2966, 2553],\n", - " [ 70 , 355, 70 , -1 , -1 , -1 , -1 ],\n", - " [ 477, 3363, 1621, 412, -1 , -1 , -1 ]])\n", - "[ 399 693 609 ... 1291 1270 1291] int16\n", - "test raw: ઇǹज৹©\n", - "test raw: ǝണٕƜ\n", - "test len: Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [5, 6, 7, 3, 4])\n", - "audio: Tensor(shape=[5, 207, 80], dtype=float32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[[12.25794601, 12.61855793, 10.37306023, ..., 13.12571049, 11.53678799, 10.32210350],\n", - " [13.32333183, 13.41336918, 11.44248962, ..., 13.65861225, 12.79308128, 11.31168747],\n", - " [12.62584686, 12.53506088, 10.96861362, ..., 13.32526493, 12.41560936, 10.71458912],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[11.00003052, 11.35529137, 9.56384087 , ..., 10.06063652, 10.16322994, 9.43149185 ],\n", - " [10.44556236, 9.81155300 , 5.49400425 , ..., 10.84116268, 11.02734756, 9.42253590 ],\n", - " [10.23620510, 10.23321152, 7.99466419 , ..., 10.93381882, 10.28395081, 10.00841141],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[14.10379314, 14.50375748, 12.87825108, ..., 12.68065739, 12.62359715, 11.53773308],\n", - " [13.84964657, 13.15079498, 10.67198086, ..., 13.24875164, 13.45796680, 10.97363472],\n", - " [13.19808197, 13.23482990, 11.65900230, ..., 12.70375061, 12.41395664, 11.88668156],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[12.85676289, 12.82410812, 11.67961884, ..., 14.12018299, 15.14850044, 13.80065727],\n", - " [13.19532776, 13.40243340, 11.43492508, ..., 13.29144669, 13.70278549, 12.67841339],\n", - " [13.02196407, 12.92111111, 11.43998623, ..., 12.71165752, 13.16518497, 11.92028046],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[12.90661621, 13.40162563, 13.01394463, ..., 13.84056377, 14.11240959, 13.21227264],\n", - " [14.06642914, 14.06922340, 12.52955723, ..., 13.55829811, 13.60157204, 13.50268650],\n", - " [12.58881378, 12.94780254, 11.75758171, ..., 14.29055786, 14.12165928, 13.02695847],\n", - " ...,\n", - " [16.20891571, 16.42290306, 14.94398117, ..., 12.86083794, 12.63515949, 11.67581463],\n", - " [13.49345875, 14.14656067, 13.16498375, ..., 13.28024578, 13.40956783, 12.70357513],\n", - " [15.56265163, 15.92387581, 14.90643024, ..., 13.45694065, 13.44703197, 12.81099033]]])\n", - "audio len: Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [184, 194, 196, 204, 207])\n" - ] - } - ], - "source": [ - "keep_transcription_text=False\n", - "batch_reader = create_dataloader(\n", - " manifest_path=args.infer_manifest,\n", - " unit_type=args.unit_type,\n", - " vocab_filepath=args.vocab_path,\n", - " mean_std_filepath=args.mean_std_path,\n", - " spm_model_prefix=args.spm_model_prefix,\n", - " augmentation_config='{}',\n", - " max_input_len=27.0,\n", - " min_input_len=0.0,\n", - " max_output_len=float('inf'),\n", - " min_output_len=0.0,\n", - " max_output_input_ratio=float('inf'),\n", - " min_output_input_ratio=0.0,\n", - " stride_ms=10.0,\n", - " window_ms=20.0,\n", - " max_freq=None,\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " use_dB_normalization=True,\n", - " random_seed=0,\n", - " keep_transcription_text=keep_transcription_text,\n", - " is_training=False,\n", - " batch_size=args.num_samples,\n", - " num_workers=0,\n", - " sortagrad=True,\n", - " shuffle_method=None,\n", - " dist=False)\n", - "for idx, (audio, audio_len, text, text_len) in enumerate(batch_reader()):\n", - " print('test:', text)\n", - " print(\"test raw:\", ''.join( chr(i) for i in text[0][:int(text_len[0])] ))\n", - " print(\"test raw:\", ''.join( chr(i) for i in text[-1][:int(text_len[-1])] ))\n", - " print('test len:', text_len)\n", - " print('audio:', audio)\n", - " print('audio len:', audio_len)\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "competitive-mounting", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "knowing-military", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'num_samples': 1, 'specgram_type': 'fbank', 'feat_dim': 80, 'delta_delta': False, 'stride_ms': 10.0, 'window_ms': 25.0, 'sample_rate': 16000, 'manifest_path': 'examples/aishell/s1/data/manifest.train', 'output_path': 'examples/aishell/s1/data/mean_std.npz'}\n" - ] - } - ], - "source": [ - "import sys\n", - "import argparse\n", - "import functools\n", - "from deepspeech.utils.utility import add_arguments, print_arguments\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "\n", - "add_arg('num_samples', int, 1, \"# of samples to for statistics.\")\n", - "add_arg('specgram_type', str, 'fbank',\n", - " \"Audio feature type. Options: linear, mfcc, fbank.\",\n", - " choices=['linear', 'mfcc', 'fbank'])\n", - "add_arg('feat_dim', int, 80, \"Audio feature dim.\")\n", - "add_arg('delta_delta', bool, False,\"Audio feature with delta delta.\")\n", - "add_arg('stride_ms', float, 10.0, \"stride length in ms.\")\n", - "add_arg('window_ms', float, 25.0, \"stride length in ms.\")\n", - "add_arg('sample_rate', int, 16000, \"target sample rate.\")\n", - "add_arg('manifest_path', str,\n", - " 'examples/aishell/s1/data/manifest.train',\n", - " \"Filepath of manifest to compute normalizer's mean and stddev.\")\n", - "add_arg('output_path', str,\n", - " 'examples/aishell/s1/data/mean_std.npz',\n", - " \"Filepath of write mean and stddev to (.npz).\")\n", - "args = parser.parse_args([])\n", - "print(vars(args))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "unnecessary-province", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline\n", - "from deepspeech.frontend.featurizer.audio_featurizer import AudioFeaturizer\n", - "from deepspeech.frontend.normalizer import FeatureNormalizer\n", - "from deepspeech.frontend.audio import AudioSegment\n", - "from deepspeech.frontend.utility import load_cmvn\n", - "from deepspeech.frontend.utility import read_manifest\n", - "\n", - "\n", - "\n", - "def mean(args):\n", - " augmentation_pipeline = AugmentationPipeline('{}')\n", - " audio_featurizer = AudioFeaturizer(\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " stride_ms=args.stride_ms,\n", - " window_ms=args.window_ms,\n", - " n_fft=None,\n", - " max_freq=None,\n", - " target_sample_rate=args.sample_rate,\n", - " use_dB_normalization=True,\n", - " target_dB=-20,\n", - " dither=0.0)\n", - "\n", - " def augment_and_featurize(audio_segment):\n", - " augmentation_pipeline.transform_audio(audio_segment)\n", - " return audio_featurizer.featurize(audio_segment)\n", - "\n", - " normalizer = FeatureNormalizer(\n", - " mean_std_filepath=None,\n", - " manifest_path=args.manifest_path,\n", - " featurize_func=augment_and_featurize,\n", - " num_samples=args.num_samples)\n", - " normalizer.write_to_file(args.output_path)\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "interested-camping", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[0.00164795 0.00274658 0.00234985 ... 0.00177002 0.00177002 0.00186157]\n", - "[54. 90. 77. ... 58. 58. 61.]\n", - "29746\n", - "fbank\n", - "[54 90 77 ... 58 58 61] int16\n", - "(184, 80) float64\n", - "[[10.61737914 10.07708936 5.32487528 ... 10.2481839 8.89699394\n", - " 7.80671114]\n", - " [11.0440077 10.3180721 6.30866128 ... 11.23730926 10.35838868\n", - " 8.83860079]\n", - " [10.26930555 9.99636567 7.3296638 ... 10.45131595 9.69295303\n", - " 7.96168491]\n", - " ...\n", - " [10.14497345 9.88674207 6.73801138 ... 10.21580627 9.00343472\n", - " 8.75616521]\n", - " [ 9.97745961 9.67949736 7.90660425 ... 10.22436653 9.59456493\n", - " 7.69287184]\n", - " [ 6.47357374 7.76335491 7.75765843 ... 9.96522077 9.6226365\n", - " 8.16007108]]\n", - "(184, 80) float64\n", - "[[10.61737914 10.07708936 5.32487528 ... 10.2481839 8.89699394\n", - " 7.80671114]\n", - " [11.0440077 10.3180721 6.30866128 ... 11.23730926 10.35838868\n", - " 8.83860079]\n", - " [10.26930555 9.99636567 7.3296638 ... 10.45131595 9.69295303\n", - " 7.96168491]\n", - " ...\n", - " [10.14497345 9.88674207 6.73801138 ... 10.21580627 9.00343472\n", - " 8.75616521]\n", - " [ 9.97745961 9.67949736 7.90660425 ... 10.22436653 9.59456493\n", - " 7.69287184]\n", - " [ 6.47357374 7.76335491 7.75765843 ... 9.96522077 9.6226365\n", - " 8.16007108]]\n" - ] - } - ], - "source": [ - "wav='/workspace/DeepSpeech-2.x/examples/aishell/s1/../../..//examples/dataset/aishell/data_aishell/wav/test/S0916/BAC009S0916W0426.wav'\n", - "test='祝可爱的你'\n", - "audio_featurizer = AudioFeaturizer(\n", - " specgram_type=args.specgram_type,\n", - " feat_dim=args.feat_dim,\n", - " delta_delta=args.delta_delta,\n", - " stride_ms=args.stride_ms,\n", - " window_ms=args.window_ms,\n", - " n_fft=None,\n", - " max_freq=None,\n", - " target_sample_rate=args.sample_rate,\n", - " use_dB_normalization=False,\n", - " target_dB=-20,\n", - " dither=0.0)\n", - "samples = AudioSegment.from_file(wav)\n", - "print(samples._samples)\n", - "print(samples._samples * 2**15)\n", - "print(len(samples._samples))\n", - "feat = audio_featurizer.featurize(samples, False, False)\n", - "feat = feat.T\n", - "print(feat.shape, feat.dtype)\n", - "print(feat)\n", - "\n", - "from python_speech_features import logfbank\n", - "max_freq = args.sample_rate / 2\n", - "fbank_feat = logfbank(\n", - " signal=samples.to('int16'),\n", - " samplerate=args.sample_rate,\n", - " winlen=0.001 * args.window_ms,\n", - " winstep=0.001 * args.stride_ms,\n", - " nfilt=args.feat_dim,\n", - " nfft=512,\n", - " lowfreq=20,\n", - " highfreq=max_freq,\n", - " preemph=0.97,\n", - " dither=0.0,\n", - " wintype='povey')\n", - "print(fbank_feat.shape, fbank_feat.dtype)\n", - "print(fbank_feat)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "numeric-analyst", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(184, 160)\n", - "[ 8.59522397 8.43148278 8.36414052 8.45487173 8.31761643 8.04843683\n", - " 8.01683696 7.6574614 7.95521932 8.22945157 10.20138275 9.0447775\n", - " 9.14763398 9.18184349 9.03801065 9.04852307 8.67706728 8.71894271\n", - " 9.54553655 9.19535135 8.76413076 8.47828946 8.52586143 8.49469288\n", - " 8.72461247 8.28562879 8.11581393 7.99922156 7.91023364 8.04142296\n", - " 7.89762773 7.76257636 8.32043745 8.01592886 8.34109665 8.90115454\n", - " 8.48246945 7.98658664 8.05745122 8.11384088 8.18864479 8.8091827\n", - " 11.8067711 13.25258218 14.44311795 13.90515283 14.00120623 13.99801252\n", - " 13.81595394 13.6379904 13.3574897 13.14933334 12.96518543 13.02601156\n", - " 12.70246737 12.54410834 12.15615068 11.86574681 11.67497882 10.79645481\n", - " 10.48150035 10.03758575 10.05637027 9.92891308 10.06923218 12.43382431\n", - " 12.71428321 14.33135052 13.94470959 14.29188291 14.11483993 14.03496606\n", - " 13.78167331 13.66701466 14.40308625 14.73934137 15.09569382 14.89565815\n", - " 15.10519995 14.94383582 15.03275563 15.42194679 15.29219967 15.41602274\n", - " 15.39242545 15.76836177 16.259222 16.47777231 17.03366795 17.46165793\n", - " 17.52596217 17.78844031 17.99878075 18.11446843 17.95761578 17.99900337\n", - " 17.86282737 17.7290163 17.47686504 17.43425516 17.07750485 16.64395242\n", - " 15.68217043 14.90058399 14.45645737 14.0405463 14.89549542 16.00405781\n", - " 16.27301689 16.37572895 16.31219037 16.31765447 16.44819716 16.36281089\n", - " 16.24932823 15.79302555 14.76361963 13.95761882 13.48917053 13.45543501\n", - " 13.00091327 13.13854248 13.74596395 13.86340629 14.00656109 13.77432101\n", - " 13.64267001 13.35742634 13.23042234 12.97916104 12.80694468 12.70005006\n", - " 13.2802483 13.22644525 13.14579624 13.02536594 13.36511022 11.37167205\n", - " 12.11598045 12.47619798 12.83885973 11.63880287 11.42083924 11.08747705\n", - " 11.04093403 11.11263149 10.74353319 10.58734669 10.46180738 10.34157335\n", - " 9.63131146 9.70582692 9.29059204 8.94583657 8.66065094 8.46799095\n", - " 8.25064103 8.30239167 8.19463371 8.12104567 8.02731234 8.06412715\n", - " 7.84889951 7.73090283 7.74119562 7.85444657 7.80717312 7.7129933\n", - " 7.84087442 7.77907788 7.60660865 7.55051479 7.458385 7.496416\n", - " 7.69519793 7.49086759 7.32199493 8.01617458 7.58525375 7.06661122\n", - " 6.94653756 7.19874283 7.28515661 7.17574078]\n", - "(184,)\n", - "(184,)\n", - "[1.48370471 1.52174523 1.46984238 1.67010478 1.88757689 1.68825992\n", - " 1.74270259 1.55497318 1.29200818 1.68446481 1.88133219 1.97138928\n", - " 2.15910096 2.3149476 1.9820247 2.07694378 1.93498835 2.01493974\n", - " 2.39156824 2.02396518 1.69586449 1.63808752 1.64020228 1.43573473\n", - " 1.93092656 1.37466294 1.34704929 1.59600739 1.03960441 1.45276496\n", - " 1.59360131 1.57466343 1.89491479 1.79333746 1.32701974 1.49441767\n", - " 1.51466756 1.63497989 1.42858074 1.51135396 1.61077201 1.81066387\n", - " 1.83367783 2.3507094 2.87885378 3.26231227 2.1313117 1.98557548\n", - " 1.99105426 2.26150533 2.34298751 2.44621608 2.39201042 2.41226503\n", - " 2.5142992 3.03777565 2.81592295 2.75117863 2.78324175 2.68819666\n", - " 2.8945782 2.84464168 2.680973 2.78397395 2.47996808 1.71829563\n", - " 1.60636949 1.65992483 1.38122631 1.74831825 2.16006884 1.68076185\n", - " 1.69329487 1.44929837 1.63763312 1.80101076 2.01166253 2.03254244\n", - " 1.9583913 2.04542255 2.00859694 2.16600883 2.16095629 1.97541122\n", - " 2.13807632 2.06386436 2.2154187 2.84205688 2.54862449 2.64321545\n", - " 2.6805773 2.52300146 2.53209001 2.54682059 2.4521937 2.43155532\n", - " 2.42571275 2.23421289 2.23164529 2.23597192 2.14215121 2.10406703\n", - " 2.07962874 1.88506161 1.80092372 1.61156092 1.77426835 1.98765563\n", - " 2.0356793 1.87964187 1.779513 1.87187681 1.76463632 1.70978684\n", - " 1.76471778 1.75604749 1.62792552 1.73929352 1.6887024 1.8677704\n", - " 2.17342368 2.08166072 2.14567453 2.15936953 2.18351006 2.41010388\n", - " 2.26101752 2.25468001 2.23739715 2.15395133 2.04547813 1.92038843\n", - " 1.85491264 1.91905927 2.16709365 1.99924152 2.1850471 2.55461622\n", - " 2.72476673 1.69682926 1.73249614 2.06992695 2.1210591 1.66854454\n", - " 1.63907505 1.32203822 1.38992558 1.2436937 1.17932877 1.02963653\n", - " 1.26085036 1.16997132 1.09339504 1.14188689 1.18675772 1.31859788\n", - " 1.21746591 1.3872131 1.26095274 1.34885761 1.46633543 1.64506975\n", - " 1.36013821 1.45574721 1.43766588 1.65119054 1.57163772 1.55082968\n", - " 1.29413316 1.38351736 1.64234673 1.57186432 1.45381083 1.71204761\n", - " 1.51828607 1.30639985 1.32928395 1.49004237 1.6057589 1.81815735\n", - " 1.67784678 1.72180861 1.60703743 1.64850255]\n" - ] - } - ], - "source": [ - "a = np.hstack([feat, feat])\n", - "print(a.shape)\n", - "m = np.mean(a, axis=1)\n", - "print(m)\n", - "print(m.shape)\n", - "std = np.std(a, axis=1)\n", - "print(std.shape)\n", - "print(std)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "nonprofit-potato", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "hispanic-ethics", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torchaudio\n", - "import torchaudio.compliance.kaldi as kaldi\n", - "import torchaudio.sox_effects as sox_effects\n", - "from torch.nn.utils.rnn import pad_sequence\n", - "torchaudio.set_audio_backend(\"sox\")" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "changing-calvin", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.Size([1, 29746])\n", - "tensor([[54., 90., 77., ..., 58., 58., 61.]])\n", - "(184, 80)\n", - "[[10.617376 10.077089 5.3248763 ... 10.248186 8.896992 7.8067265]\n", - " [11.044004 10.318072 6.3086634 ... 11.237308 10.358393 8.838616 ]\n", - " [10.269302 9.9963665 7.3296647 ... 10.451319 9.692951 7.9617033]\n", - " ...\n", - " [10.14497 9.886743 6.738012 ... 10.215809 9.0034275 8.756177 ]\n", - " [ 9.977456 9.679498 7.9066052 ... 10.224365 9.594568 7.6928873]\n", - " [ 6.4735703 7.7633557 7.7576594 ... 9.965221 9.622637 8.160085 ]]\n", - "-----------\n", - "[0.00164795 0.00274658 0.00234985 ... 0.00177002 0.00177002 0.00186157]\n", - "(184, 80)\n", - "[[-10.177039 -10.717326 -15.46954 ... -10.546229 -11.897424 -12.987689]\n", - " [ -9.750411 -10.476343 -14.485752 ... -9.557108 -10.436023 -11.955799]\n", - " [-10.525113 -10.798049 -13.46475 ... -10.343097 -11.101464 -12.832712]\n", - " ...\n", - " [-10.649446 -10.907673 -14.056403 ... -10.578607 -11.790988 -12.038239]\n", - " [-10.816959 -11.114918 -12.88781 ... -10.570049 -11.199847 -13.101528]\n", - " [-14.320845 -13.03106 -13.036756 ... -10.829194 -11.171779 -12.634331]]\n", - "**************\n", - "[0.00164795 0.00274658 0.00234985 ... 0.00177002 0.00177002 0.00186157]\n", - "[54. 90. 77. ... 58. 58. 61.] float32\n", - "(184, 80)\n", - "[[10.617376 10.077089 5.3248763 ... 10.248186 8.896992 7.8067265]\n", - " [11.044004 10.318072 6.3086634 ... 11.237308 10.358393 8.838616 ]\n", - " [10.269302 9.9963665 7.3296647 ... 10.451319 9.692951 7.9617033]\n", - " ...\n", - " [10.14497 9.886743 6.738012 ... 10.215809 9.0034275 8.756177 ]\n", - " [ 9.977456 9.679498 7.9066052 ... 10.224365 9.594568 7.6928873]\n", - " [ 6.4735703 7.7633557 7.7576594 ... 9.965221 9.622637 8.160085 ]]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel_launcher.py:1: UserWarning: torchaudio.backend.sox_backend.load_wav has been deprecated and will be removed from 0.9.0 release. Please use \"torchaudio.load\".\n", - " \"\"\"Entry point for launching an IPython kernel.\n" - ] - } - ], - "source": [ - "waveform, sample_rate = torchaudio.load_wav(wav)\n", - "print(waveform.shape)\n", - "print(waveform)\n", - "mat = kaldi.fbank(\n", - " waveform,\n", - " num_mel_bins=80,\n", - " frame_length=25,\n", - " frame_shift=10,\n", - " dither=0,\n", - " energy_floor=0.0,\n", - " sample_frequency=sample_rate\n", - " )\n", - "mat = mat.detach().numpy()\n", - "print(mat.shape)\n", - "print(mat)\n", - "\n", - "print('-----------')\n", - "print(samples._samples)\n", - "aud = torch.tensor(samples._samples).view(1, -1)\n", - "mat = kaldi.fbank(\n", - " aud,\n", - " num_mel_bins=80,\n", - " frame_length=25,\n", - " frame_shift=10,\n", - " dither=0,\n", - " energy_floor=0.0,\n", - " sample_frequency=sample_rate\n", - " )\n", - "mat = mat.detach().numpy()\n", - "print(mat.shape)\n", - "print(mat)\n", - "\n", - "print('**************')\n", - "print(samples._samples)\n", - "tmp = samples.to('int16').astype('float32')\n", - "print(tmp, tmp.dtype)\n", - "aud = torch.tensor(tmp).view(1, -1)\n", - "mat = kaldi.fbank(\n", - " aud,\n", - " num_mel_bins=80,\n", - " frame_length=25,\n", - " frame_shift=10,\n", - " dither=0,\n", - " energy_floor=0.0,\n", - " sample_frequency=sample_rate\n", - " )\n", - "mat = mat.detach().numpy()\n", - "print(mat.shape)\n", - "print(mat)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "buried-dependence", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "silver-printing", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "outer-space", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(29746,)\n", - "[54 90 77 ... 58 58 61]\n", - "(184, 80)\n", - "[[10.61737914 10.07708936 5.32487528 ... 10.2481839 8.89699394\n", - " 7.80671114]\n", - " [11.0440077 10.3180721 6.30866128 ... 11.23730926 10.35838868\n", - " 8.83860079]\n", - " [10.26930555 9.99636567 7.3296638 ... 10.45131595 9.69295303\n", - " 7.96168491]\n", - " ...\n", - " [10.14497345 9.88674207 6.73801138 ... 10.21580627 9.00343472\n", - " 8.75616521]\n", - " [ 9.97745961 9.67949736 7.90660425 ... 10.22436653 9.59456493\n", - " 7.69287184]\n", - " [ 6.47357374 7.76335491 7.75765843 ... 9.96522077 9.6226365\n", - " 8.16007108]]\n", - "(184, 13)\n", - "[[ 14.73775998 -13.30393391 5.85974818 ... -3.42359739 2.82785335\n", - " 8.86862748]\n", - " [ 15.31274834 -13.33671651 4.06537223 ... 8.15970347 2.15934846\n", - " 6.78353115]\n", - " [ 13.82218765 -13.39296404 6.8304843 ... 2.55332563 8.86724453\n", - " -0.05919222]\n", - " ...\n", - " [ 13.5837844 -13.42104892 11.21222354 ... 4.81477718 1.66627505\n", - " 5.59045842]\n", - " [ 13.75757034 -13.92626662 13.06074011 ... -0.46694046 5.56214833\n", - " 12.0785146 ]\n", - " [ 11.92813809 -15.9169855 8.78372271 ... -1.42014277 -3.25768086\n", - " 0.88337965]]\n" - ] - } - ], - "source": [ - "from python_speech_features import mfcc\n", - "from python_speech_features import delta\n", - "from python_speech_features import logfbank\n", - "import scipy.io.wavfile as iowav\n", - "\n", - "(rate,sig) = iowav.read(wav)\n", - "print(sig.shape)\n", - "print(sig)\n", - "\n", - "# note that generally nfilt=40 is used for speech recognition\n", - "fbank_feat = logfbank(sig,nfilt=80,lowfreq=20,dither=0,wintype='povey')\n", - "print(fbank_feat.shape)\n", - "print(fbank_feat)\n", - "\n", - "# the computed fbank coefficents of english.wav with dimension [110,23]\n", - "# [ 12.2865\t12.6906\t13.1765\t15.714\t16.064\t15.7553\t16.5746\t16.9205\t16.6472\t16.1302\t16.4576\t16.7326\t16.8864\t17.7215\t18.88\t19.1377\t19.1495\t18.6683\t18.3886\t20.3506\t20.2772\t18.8248\t18.1899\n", - "# 11.9198\t13.146\t14.7215\t15.8642\t17.4288\t16.394\t16.8238\t16.1095\t16.4297\t16.6331\t16.3163\t16.5093\t17.4981\t18.3429\t19.6555\t19.6263\t19.8435\t19.0534\t19.001\t20.0287\t19.7707\t19.5852\t19.1112\n", - "# ...\n", - "# ...\n", - "# the same with that using kaldi commands: compute-fbank-feats --dither=0.0\n", - "\n", - "mfcc_feat = mfcc(sig,dither=0,useEnergy=True,wintype='povey')\n", - "print(mfcc_feat.shape)\n", - "print(mfcc_feat)\n", - "\n", - "# the computed mfcc coefficents of english.wav with dimension [110,13]\n", - "# [ 17.1337\t-23.3651\t-7.41751\t-7.73686\t-21.3682\t-8.93884\t-3.70843\t4.68346\t-16.0676\t12.782\t-7.24054\t8.25089\t10.7292\n", - "# 17.1692\t-23.3028\t-5.61872\t-4.0075\t-23.287\t-20.6101\t-5.51584\t-6.15273\t-14.4333\t8.13052\t-0.0345329\t2.06274\t-0.564298\n", - "# ...\n", - "# ...\n", - "# the same with that using kaldi commands: compute-mfcc-feats --dither=0.0" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "sporting-school", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(184, 80)\n", - "[[-10.17703627 -10.71732606 -15.46954014 ... -10.54623152 -11.89742148\n", - " -12.98770428]\n", - " [ -9.75040771 -10.47634331 -14.48575413 ... -9.55710616 -10.43602673\n", - " -11.95581463]\n", - " [-10.52510987 -10.79804975 -13.46475161 ... -10.34309947 -11.10146239\n", - " -12.83273051]\n", - " ...\n", - " [-10.64944197 -10.90767335 -14.05640404 ... -10.57860915 -11.7909807\n", - " -12.03825021]\n", - " [-10.8169558 -11.11491806 -12.88781116 ... -10.57004889 -11.19985048\n", - " -13.10154358]\n", - " [-14.32084168 -13.03106051 -13.03675699 ... -10.82919465 -11.17177892\n", - " -12.63434434]]\n", - "(184, 13)\n", - "[[ -6.05665544 -13.30393391 5.85974818 ... -3.42359739 2.82785335\n", - " 8.86862748]\n", - " [ -5.48166707 -13.33671651 4.06537223 ... 8.15970347 2.15934846\n", - " 6.78353115]\n", - " [ -6.97222776 -13.39296404 6.8304843 ... 2.55332563 8.86724453\n", - " -0.05919222]\n", - " ...\n", - " [ -7.21063102 -13.42104892 11.21222354 ... 4.81477718 1.66627505\n", - " 5.59045842]\n", - " [ -7.03684508 -13.92626662 13.06074011 ... -0.46694046 5.56214833\n", - " 12.0785146 ]\n", - " [ -8.86627732 -15.9169855 8.78372271 ... -1.42014277 -3.25768086\n", - " 0.88337965]]\n" - ] - } - ], - "source": [ - "fbank_feat = logfbank(samples._samples,nfilt=80,lowfreq=20,dither=0,wintype='povey')\n", - "print(fbank_feat.shape)\n", - "print(fbank_feat)\n", - "\n", - "mfcc_feat = mfcc(samples._samples,dither=0,useEnergy=True,wintype='povey')\n", - "print(mfcc_feat.shape)\n", - "print(mfcc_feat)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "restricted-license", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "specialized-threat", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/hack_api_test.ipynb b/.notebook/hack_api_test.ipynb deleted file mode 100644 index f653084e6..000000000 --- a/.notebook/hack_api_test.ipynb +++ /dev/null @@ -1,290 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "breeding-haven", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/home/ssd5/zhanghui/DeepSpeech2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "appropriate-theta", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LICENSE deepspeech examples\t\t requirements.txt tools\r\n", - "README.md docs\t libsndfile-1.0.28\t setup.sh\t utils\r\n", - "README_cn.md env.sh\t libsndfile-1.0.28.tar.gz tests\r\n" - ] - } - ], - "source": [ - "!ls" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "entire-bloom", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n", - "WARNING:root:override cat of paddle.Tensor if exists or register, remove this when fixed!\n", - "WARNING:root:register user masked_fill to paddle.Tensor, remove this when fixed!\n", - "WARNING:root:register user masked_fill_ to paddle.Tensor, remove this when fixed!\n", - "WARNING:root:register user repeat to paddle.Tensor, remove this when fixed!\n", - "WARNING:root:register user glu to paddle.nn.functional, remove this when fixed!\n", - "WARNING:root:register user GLU to paddle.nn, remove this when fixed!\n", - "WARNING:root:register user ConstantPad2d to paddle.nn, remove this when fixed!\n", - "WARNING:root:override ctc_loss of paddle.nn.functional if exists, remove this when fixed!\n" - ] - } - ], - "source": [ - "from deepspeech.modules import loss" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "governmental-aircraft", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "import paddle" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "proprietary-disaster", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - " paddle.VarBase>" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "paddle.Tensor.repeat" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "first-diagram", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "paddle.Tensor.size" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "intelligent-david", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "paddle.Tensor.cat" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "bronze-tenant", - "metadata": {}, - "outputs": [], - "source": [ - "a = paddle.to_tensor([12,32, 10, 12, 123,32 ,4])" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "balanced-bearing", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "7" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a.size" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "extreme-republic", - "metadata": {}, - "outputs": [], - "source": [ - "def size(xs: paddle.Tensor, *args: int) -> paddle.Tensor:\n", - " nargs = len(args)\n", - " assert (nargs <= 1)\n", - " s = paddle.shape(xs)\n", - " if nargs == 1:\n", - " return s[args[0]]\n", - " else:\n", - " return s\n", - "\n", - "# logger.warn(\n", - "# \"override size of paddle.Tensor if exists or register, remove this when fixed!\"\n", - "# )\n", - "paddle.Tensor.size = size" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "gross-addiction", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Tensor(shape=[1], dtype=int32, place=CPUPlace, stop_gradient=True,\n", - " [7])" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a.size(0)\n", - "a.size()" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "adverse-dining", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Tensor(shape=[1], dtype=int32, place=CPUPlace, stop_gradient=True,\n", - " [7])" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "a.size()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "popular-potato", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/jit_infer.ipynb b/.notebook/jit_infer.ipynb deleted file mode 100644 index ba50d8743..000000000 --- a/.notebook/jit_infer.ipynb +++ /dev/null @@ -1,672 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/home/ssd5/zhanghui/DeepSpeech2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2021-03-26 02:55:23,873 - WARNING - register user softmax to paddle, remove this when fixed!\n", - "2021-03-26 02:55:23,875 - WARNING - register user sigmoid to paddle, remove this when fixed!\n", - "2021-03-26 02:55:23,875 - WARNING - register user relu to paddle, remove this when fixed!\n", - "2021-03-26 02:55:23,876 - WARNING - override cat of paddle if exists or register, remove this when fixed!\n", - "2021-03-26 02:55:23,876 - WARNING - override eq of paddle.Tensor if exists or register, remove this when fixed!\n", - "2021-03-26 02:55:23,877 - WARNING - override contiguous of paddle.Tensor if exists or register, remove this when fixed!\n", - "2021-03-26 02:55:23,877 - WARNING - override size of paddle.Tensor (`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!\n", - "2021-03-26 02:55:23,878 - WARNING - register user view to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,878 - WARNING - register user view_as to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,879 - WARNING - register user masked_fill to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,880 - WARNING - register user masked_fill_ to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,880 - WARNING - register user fill_ to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,881 - WARNING - register user repeat to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,881 - WARNING - register user softmax to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,882 - WARNING - register user sigmoid to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,882 - WARNING - register user relu to paddle.Tensor, remove this when fixed!\n", - "2021-03-26 02:55:23,883 - WARNING - register user glu to paddle.nn.functional, remove this when fixed!\n", - "2021-03-26 02:55:23,883 - WARNING - override ctc_loss of paddle.nn.functional if exists, remove this when fixed!\n", - "2021-03-26 02:55:23,884 - WARNING - register user GLU to paddle.nn, remove this when fixed!\n", - "2021-03-26 02:55:23,884 - WARNING - register user ConstantPad2d to paddle.nn, remove this when fixed!\n", - "/home/ssd5/zhanghui/DeepSpeech2.x/tools/venv-dev/lib/python3.7/site-packages/scipy/fftpack/__init__.py:103: DeprecationWarning: The module numpy.dual is deprecated. Instead of using dual, use the functions directly from numpy or scipy.\n", - " from numpy.dual import register_func\n", - "/home/ssd5/zhanghui/DeepSpeech2.x/tools/venv-dev/lib/python3.7/site-packages/scipy/special/orthogonal.py:81: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n" - ] - } - ], - "source": [ - "import os\n", - "import time\n", - "import argparse\n", - "import functools\n", - "import paddle\n", - "import numpy as np\n", - "\n", - "from deepspeech.utils.socket_server import warm_up_test\n", - "from deepspeech.utils.socket_server import AsrTCPServer\n", - "from deepspeech.utils.socket_server import AsrRequestHandler\n", - "\n", - "from deepspeech.training.cli import default_argument_parser\n", - "from deepspeech.exps.deepspeech2.config import get_cfg_defaults\n", - "\n", - "from deepspeech.frontend.utility import read_manifest\n", - "from deepspeech.utils.utility import add_arguments, print_arguments\n", - "\n", - "from deepspeech.models.deepspeech2 import DeepSpeech2Model\n", - "from deepspeech.models.deepspeech2 import DeepSpeech2InferModel\n", - "from deepspeech.io.dataset import ManifestDataset\n", - "\n", - "\n", - "\n", - "from deepspeech.frontend.utility import read_manifest" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.0.0\n", - "e7f28d6c0db54eb9c9a810612300b526687e56a6\n", - "OFF\n", - "OFF\n", - "commit: e7f28d6c0db54eb9c9a810612300b526687e56a6\n", - "None\n", - "0\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ssd5/zhanghui/DeepSpeech2.x/tools/venv-dev/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - }, - { - "data": { - "text/plain": [ - "['__builtins__',\n", - " '__cached__',\n", - " '__doc__',\n", - " '__file__',\n", - " '__loader__',\n", - " '__name__',\n", - " '__package__',\n", - " '__spec__',\n", - " 'commit',\n", - " 'full_version',\n", - " 'istaged',\n", - " 'major',\n", - " 'minor',\n", - " 'mkl',\n", - " 'patch',\n", - " 'rc',\n", - " 'show',\n", - " 'with_mkl']" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "print(paddle.__version__)\n", - "print(paddle.version.commit)\n", - "print(paddle.version.with_mkl)\n", - "print(paddle.version.mkl())\n", - "print(paddle.version.show())\n", - "print(paddle.version.patch)\n", - "dir(paddle.version)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "data:\n", - " augmentation_config: conf/augmentation.config\n", - " batch_size: 64\n", - " dev_manifest: data/manifest.dev\n", - " keep_transcription_text: False\n", - " max_duration: 27.0\n", - " max_freq: None\n", - " mean_std_filepath: examples/aishell/data/mean_std.npz\n", - " min_duration: 0.0\n", - " n_fft: None\n", - " num_workers: 0\n", - " random_seed: 0\n", - " shuffle_method: batch_shuffle\n", - " sortagrad: True\n", - " specgram_type: linear\n", - " stride_ms: 10.0\n", - " target_dB: -20\n", - " target_sample_rate: 16000\n", - " test_manifest: examples/aishell/data/manifest.test\n", - " train_manifest: data/manifest.train\n", - " use_dB_normalization: True\n", - " vocab_filepath: examples/aishell/data/vocab.txt\n", - " window_ms: 20.0\n", - "decoding:\n", - " alpha: 2.6\n", - " batch_size: 128\n", - " beam_size: 300\n", - " beta: 5.0\n", - " cutoff_prob: 0.99\n", - " cutoff_top_n: 40\n", - " decoding_method: ctc_beam_search\n", - " error_rate_type: cer\n", - " lang_model_path: data/lm/zh_giga.no_cna_cmn.prune01244.klm\n", - " num_proc_bsearch: 10\n", - "model:\n", - " num_conv_layers: 2\n", - " num_rnn_layers: 3\n", - " rnn_layer_size: 1024\n", - " share_rnn_weights: False\n", - " use_gru: True\n", - "training:\n", - " global_grad_clip: 5.0\n", - " lr: 0.0005\n", - " lr_decay: 0.83\n", - " n_epoch: 30\n", - " weight_decay: 1e-06\n", - "----------- Configuration Arguments -----------\n", - "checkpoint_path: examples/aishell/ckpt-loss2e-3-0.83-5/checkpoints/step-11725\n", - "config: examples/aishell/conf/deepspeech2.yaml\n", - "device: gpu\n", - "dump_config: None\n", - "export_path: None\n", - "host_ip: localhost\n", - "host_port: 8086\n", - "model_dir: None\n", - "model_file: examples/aishell/jit.model.pdmodel\n", - "nprocs: 1\n", - "opts: ['data.test_manifest', 'examples/aishell/data/manifest.test', 'data.mean_std_filepath', 'examples/aishell/data/mean_std.npz', 'data.vocab_filepath', 'examples/aishell/data/vocab.txt']\n", - "output: None\n", - "params_file: examples/aishell/jit.model.pdiparams\n", - "speech_save_dir: demo_cache\n", - "use_gpu: False\n", - "warmup_manifest: examples/aishell/data/manifest.test\n", - "------------------------------------------------\n" - ] - } - ], - "source": [ - "parser = default_argument_parser()\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "add_arg('host_ip', str,\n", - " 'localhost',\n", - " \"Server's IP address.\")\n", - "add_arg('host_port', int, 8086, \"Server's IP port.\")\n", - "add_arg('speech_save_dir', str,\n", - " 'demo_cache',\n", - " \"Directory to save demo audios.\")\n", - "add_arg('warmup_manifest', \n", - " str, \n", - " \"examples/aishell/data/manifest.test\", \n", - " \"Filepath of manifest to warm up.\")\n", - "add_arg(\n", - " \"--model_file\",\n", - " type=str,\n", - " default=\"examples/aishell/jit.model.pdmodel\",\n", - " help=\"Model filename, Specify this when your model is a combined model.\"\n", - ")\n", - "add_arg(\n", - " \"--params_file\",\n", - " type=str,\n", - " default=\"examples/aishell/jit.model.pdiparams\",\n", - " help=\n", - " \"Parameter filename, Specify this when your model is a combined model.\"\n", - ")\n", - "add_arg(\n", - " \"--model_dir\",\n", - " type=str,\n", - " default=None,\n", - " help=\n", - " \"Model dir, If you load a non-combined model, specify the directory of the model.\"\n", - ")\n", - "add_arg(\"--use_gpu\",type=bool,default=False, help=\"Whether use gpu.\")\n", - "\n", - "\n", - "args = parser.parse_args(\n", - " \"--checkpoint_path examples/aishell/ckpt-loss2e-3-0.83-5/checkpoints/step-11725 --config examples/aishell/conf/deepspeech2.yaml --opts data.test_manifest examples/aishell/data/manifest.test data.mean_std_filepath examples/aishell/data/mean_std.npz data.vocab_filepath examples/aishell/data/vocab.txt\".split()\n", - ")\n", - "\n", - "\n", - "config = get_cfg_defaults()\n", - "if args.config:\n", - " config.merge_from_file(args.config)\n", - "if args.opts:\n", - " config.merge_from_list(args.opts)\n", - "config.freeze()\n", - "print(config)\n", - "\n", - "args.warmup_manifest = config.data.test_manifest\n", - "\n", - "print_arguments(args)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "dataset = ManifestDataset(\n", - " config.data.test_manifest,\n", - " config.data.unit_type,\n", - " config.data.vocab_filepath,\n", - " config.data.mean_std_filepath,\n", - " augmentation_config=\"{}\",\n", - " max_duration=config.data.max_duration,\n", - " min_duration=config.data.min_duration,\n", - " stride_ms=config.data.stride_ms,\n", - " window_ms=config.data.window_ms,\n", - " n_fft=config.data.n_fft,\n", - " max_freq=config.data.max_freq,\n", - " target_sample_rate=config.data.target_sample_rate,\n", - " specgram_type=config.data.specgram_type,\n", - " feat_dim=config.data.feat_dim,\n", - " delta_delta=config.data.delat_delta,\n", - " use_dB_normalization=config.data.use_dB_normalization,\n", - " target_dB=config.data.target_dB,\n", - " random_seed=config.data.random_seed,\n", - " keep_transcription_text=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2021-03-26 02:55:57,930 - INFO - [checkpoint] Rank 0: loaded model from examples/aishell/ckpt-loss2e-3-0.83-5/checkpoints/step-11725.pdparams\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "layer summary:\n", - "encoder.conv.conv_in.conv.weight|[32, 1, 41, 11]|14432\n", - "encoder.conv.conv_in.bn.weight|[32]|32\n", - "encoder.conv.conv_in.bn.bias|[32]|32\n", - "encoder.conv.conv_in.bn._mean|[32]|32\n", - "encoder.conv.conv_in.bn._variance|[32]|32\n", - "encoder.conv.conv_stack.0.conv.weight|[32, 32, 21, 11]|236544\n", - "encoder.conv.conv_stack.0.bn.weight|[32]|32\n", - "encoder.conv.conv_stack.0.bn.bias|[32]|32\n", - "encoder.conv.conv_stack.0.bn._mean|[32]|32\n", - "encoder.conv.conv_stack.0.bn._variance|[32]|32\n", - "encoder.rnn.rnn_stacks.0.fw_fc.weight|[1312, 3072]|4030464\n", - "encoder.rnn.rnn_stacks.0.fw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.fw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.fw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.fw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_fc.weight|[1312, 3072]|4030464\n", - "encoder.rnn.rnn_stacks.0.bw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.fw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.0.fw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.0.bw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.fw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.0.fw_rnn.cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.0.bw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.0.bw_rnn.cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_fc.weight|[2048, 3072]|6291456\n", - "encoder.rnn.rnn_stacks.1.fw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_fc.weight|[2048, 3072]|6291456\n", - "encoder.rnn.rnn_stacks.1.bw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.1.fw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.1.bw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.fw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.1.fw_rnn.cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.1.bw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.1.bw_rnn.cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_fc.weight|[2048, 3072]|6291456\n", - "encoder.rnn.rnn_stacks.2.fw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_fc.weight|[2048, 3072]|6291456\n", - "encoder.rnn.rnn_stacks.2.bw_bn.weight|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_bn.bias|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_bn._mean|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_bn._variance|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.2.fw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.2.bw_cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.fw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.2.fw_rnn.cell.bias_hh|[3072]|3072\n", - "encoder.rnn.rnn_stacks.2.bw_rnn.cell.weight_hh|[3072, 1024]|3145728\n", - "encoder.rnn.rnn_stacks.2.bw_rnn.cell.bias_hh|[3072]|3072\n", - "decoder.ctc_lo.weight|[2048, 4300]|8806400\n", - "decoder.ctc_lo.bias|[4300]|4300\n", - "layer has 66 parameters, 80148012 elements.\n" - ] - } - ], - "source": [ - "model = DeepSpeech2InferModel.from_pretrained(dataset, config,\n", - " args.checkpoint_path)\n", - "model.eval()" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "examples/aishell/jit.model.pdmodel\n", - "examples/aishell/jit.model.pdiparams\n", - "0\n", - "False\n" - ] - } - ], - "source": [ - "\n", - "from paddle.inference import Config\n", - "from paddle.inference import PrecisionType\n", - "from paddle.inference import create_predictor\n", - "\n", - "args.use_gpu=False\n", - "paddle.set_device('cpu')\n", - "\n", - "def init_predictor(args):\n", - " if args.model_dir is not None:\n", - " config = Config(args.model_dir)\n", - " else:\n", - " config = Config(args.model_file, args.params_file)\n", - "\n", - " if args.use_gpu:\n", - " config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)\n", - "# config.enable_tensorrt_engine(precision_mode=PrecisionType.Float32,\n", - "# use_calib_mode=True) # 开启TensorRT预测,精度为fp32,开启int8离线量化\n", - " else:\n", - " # If not specific mkldnn, you can set the blas thread.\n", - " # The thread num should not be greater than the number of cores in the CPU.\n", - " config.set_cpu_math_library_num_threads(1)\n", - " config.enable_mkldnn()\n", - " \n", - " config.enable_memory_optim()\n", - " config.switch_ir_optim(True)\n", - " \n", - " print(config.model_dir())\n", - " print(config.prog_file())\n", - " print(config.params_file())\n", - " print(config.gpu_device_id())\n", - " print(args.use_gpu)\n", - " predictor = create_predictor(config)\n", - " return predictor\n", - "\n", - "def run(predictor, audio, audio_len):\n", - " # copy img data to input tensor\n", - " input_names = predictor.get_input_names()\n", - " for i, name in enumerate(input_names):\n", - " print(\"input:\", i, name)\n", - " \n", - " audio_tensor = predictor.get_input_handle('audio')\n", - " audio_tensor.reshape(audio.shape)\n", - " audio_tensor.copy_from_cpu(audio.copy())\n", - " \n", - " audiolen_tensor = predictor.get_input_handle('audio_len')\n", - " audiolen_tensor.reshape(audio_len.shape)\n", - " audiolen_tensor.copy_from_cpu(audio_len.copy())\n", - "\n", - " output_names = predictor.get_output_names()\n", - " for i, name in enumerate(output_names):\n", - " print(\"output:\", i, name)\n", - "\n", - " # do the inference\n", - " predictor.run()\n", - "\n", - " results = []\n", - " # get out data from output tensor\n", - " output_names = predictor.get_output_names()\n", - " for i, name in enumerate(output_names):\n", - " output_tensor = predictor.get_output_handle(name)\n", - " output_data = output_tensor.copy_to_cpu()\n", - " results.append(output_data)\n", - "\n", - " return results\n", - "\n", - "\n", - "predictor = init_predictor(args)\n", - "\n", - "def file_to_transcript(filename):\n", - " print(filename)\n", - " feature = dataset.process_utterance(filename, \"\")\n", - " audio = np.array([feature[0]]).astype('float32') #[1, D, T]\n", - " audio_len = feature[0].shape[1]\n", - " audio_len = np.array([audio_len]).astype('int64') # [1]\n", - " \n", - " \n", - " i_probs = run(predictor, audio, audio_len)\n", - " print('jit:', i_probs[0], type(i_probs[0]))\n", - " \n", - " audio = paddle.to_tensor(audio)\n", - " audio_len = paddle.to_tensor(audio_len)\n", - " print(audio.shape)\n", - " print(audio_len.shape)\n", - " \n", - " #eouts, eouts_len = model.encoder(audio, audio_len)\n", - " #probs = model.decoder.softmax(eouts)\n", - " probs = model.forward(audio, audio_len)\n", - " print('paddle:', probs.numpy())\n", - " \n", - " flag = np.allclose(i_probs[0], probs.numpy())\n", - " print(flag)\n", - " \n", - " return probs\n", - "\n", - "# result_transcript = model.decode(\n", - "# audio,\n", - "# audio_len,\n", - "# vocab_list=dataset.vocab_list,\n", - "# decoding_method=config.decoding.decoding_method,\n", - "# lang_model_path=config.decoding.lang_model_path,\n", - "# beam_alpha=config.decoding.alpha,\n", - "# beam_beta=config.decoding.beta,\n", - "# beam_size=config.decoding.beam_size,\n", - "# cutoff_prob=config.decoding.cutoff_prob,\n", - "# cutoff_top_n=config.decoding.cutoff_top_n,\n", - "# num_processes=config.decoding.num_proc_bsearch)\n", - "# return result_transcript[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Warm-up Test Case %d: %s 0 /home/ssd5/zhanghui/DeepSpeech2.x/examples/aishell/../dataset/aishell/data_aishell/wav/test/S0764/BAC009S0764W0124.wav\n", - "/home/ssd5/zhanghui/DeepSpeech2.x/examples/aishell/../dataset/aishell/data_aishell/wav/test/S0764/BAC009S0764W0124.wav\n", - "input: 0 audio\n", - "input: 1 audio_len\n", - "output: 0 tmp_75\n", - "jit: [[[8.91786298e-12 4.45648032e-12 3.67572750e-09 ... 8.91767563e-12\n", - " 8.91573707e-12 4.64317296e-08]\n", - " [1.55950222e-15 2.62794089e-14 4.50423509e-12 ... 1.55944271e-15\n", - " 1.55891342e-15 9.99992609e-01]\n", - " [1.24638127e-17 7.61802427e-16 2.93265812e-14 ... 1.24633371e-17\n", - " 1.24587264e-17 1.00000000e+00]\n", - " ...\n", - " [4.37488240e-15 2.43676260e-12 1.98770514e-12 ... 4.37479896e-15\n", - " 4.37354747e-15 1.00000000e+00]\n", - " [3.89334696e-13 1.66754856e-11 1.42900388e-11 ... 3.89329492e-13\n", - " 3.89252270e-13 1.00000000e+00]\n", - " [1.00349985e-10 2.56293708e-10 2.91177582e-10 ... 1.00347876e-10\n", - " 1.00334095e-10 9.99998808e-01]]] \n", - "[1, 161, 522]\n", - "[1]\n", - "paddle: [[[8.91789680e-12 4.45649724e-12 3.67574149e-09 ... 8.91770945e-12\n", - " 8.91577090e-12 4.64319072e-08]\n", - " [1.55950222e-15 2.62794089e-14 4.50423509e-12 ... 1.55944271e-15\n", - " 1.55891342e-15 9.99992609e-01]\n", - " [1.24638599e-17 7.61805339e-16 2.93267472e-14 ... 1.24633842e-17\n", - " 1.24587735e-17 1.00000000e+00]\n", - " ...\n", - " [4.37488240e-15 2.43676737e-12 1.98770514e-12 ... 4.37479896e-15\n", - " 4.37354747e-15 1.00000000e+00]\n", - " [3.89336187e-13 1.66755481e-11 1.42900925e-11 ... 3.89330983e-13\n", - " 3.89253761e-13 1.00000000e+00]\n", - " [1.00349985e-10 2.56293708e-10 2.91177582e-10 ... 1.00347876e-10\n", - " 1.00334095e-10 9.99998808e-01]]]\n", - "False\n" - ] - } - ], - "source": [ - "manifest = read_manifest(args.warmup_manifest)\n", - "\n", - "for idx, sample in enumerate(manifest[:1]):\n", - " print(\"Warm-up Test Case %d: %s\", idx, sample['audio_filepath'])\n", - " start_time = time.time()\n", - " transcript = file_to_transcript(sample['audio_filepath'])\n", - " finish_time = time.time()\n", - "# print(\"Response Time: %f, Transcript: %s\" %\n", - "# (finish_time - start_time, transcript))\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(1, 161, 522) (1,)\n", - "input: 0 audio\n", - "input: 1 audio_len\n", - "output: 0 tmp_75\n", - "jit: [[[8.91789680e-12 4.45649724e-12 3.67574149e-09 ... 8.91770945e-12\n", - " 8.91577090e-12 4.64319072e-08]\n", - " [1.55950222e-15 2.62794089e-14 4.50423509e-12 ... 1.55944271e-15\n", - " 1.55891342e-15 9.99992609e-01]\n", - " [1.24638599e-17 7.61805339e-16 2.93267472e-14 ... 1.24633842e-17\n", - " 1.24587735e-17 1.00000000e+00]\n", - " ...\n", - " [4.37488240e-15 2.43676737e-12 1.98770514e-12 ... 4.37479896e-15\n", - " 4.37354747e-15 1.00000000e+00]\n", - " [3.89336187e-13 1.66755481e-11 1.42900925e-11 ... 3.89330983e-13\n", - " 3.89253761e-13 1.00000000e+00]\n", - " [1.00349985e-10 2.56293708e-10 2.91177582e-10 ... 1.00347876e-10\n", - " 1.00334095e-10 9.99998808e-01]]]\n" - ] - } - ], - "source": [ - "def test(filename):\n", - " feature = dataset.process_utterance(filename, \"\")\n", - " audio = np.array([feature[0]]).astype('float32') #[1, D, T]\n", - " audio_len = feature[0].shape[1]\n", - " audio_len = np.array([audio_len]).astype('int64') # [1]\n", - " \n", - " print(audio.shape, audio_len.shape)\n", - "\n", - " i_probs = run(predictor, audio, audio_len)\n", - " print('jit:', i_probs[0])\n", - " return i_probs\n", - " \n", - "probs = test(sample['audio_filepath'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} \ No newline at end of file diff --git a/.notebook/layer_norm_test.ipynb b/.notebook/layer_norm_test.ipynb deleted file mode 100644 index eac3566ff..000000000 --- a/.notebook/layer_norm_test.ipynb +++ /dev/null @@ -1,229 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 32, - "id": "academic-surname", - "metadata": {}, - "outputs": [], - "source": [ - "import paddle\n", - "from paddle import nn" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "fundamental-treasure", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Parameter containing:\n", - "Tensor(shape=[256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n", - "Parameter containing:\n", - "Tensor(shape=[256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\n" - ] - } - ], - "source": [ - "L = nn.LayerNorm(256, epsilon=1e-12)\n", - "for p in L.parameters():\n", - " print(p)" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "consolidated-elephant", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "moderate-noise", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "float64\n" - ] - } - ], - "source": [ - "x = np.random.randn(2, 51, 256)\n", - "print(x.dtype)" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "cooked-progressive", - "metadata": {}, - "outputs": [], - "source": [ - "y = L(paddle.to_tensor(x, dtype='float32'))" - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "id": "optimum-milwaukee", - "metadata": {}, - "outputs": [], - "source": [ - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": 49, - "id": "viral-indian", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Parameter containing:\n", - "tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n", - " 1., 1., 1., 1.], requires_grad=True)\n", - "Parameter containing:\n", - "tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " requires_grad=True)\n" - ] - } - ], - "source": [ - "TL = torch.nn.LayerNorm(256, eps=1e-12)\n", - "for p in TL.parameters():\n", - " print(p)" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "id": "skilled-vietnamese", - "metadata": {}, - "outputs": [], - "source": [ - "ty = TL(torch.tensor(x, dtype=torch.float32))" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "id": "incorrect-allah", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "False" - ] - }, - "execution_count": 51, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.allclose(y.numpy(), ty.detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "prostate-cameroon", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 52, - "id": "governmental-surge", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 52, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "x = np.random.randn(2, 256)\n", - "y = L(paddle.to_tensor(x, dtype='float32'))\n", - "ty = TL(torch.tensor(x, dtype=torch.float32))\n", - "np.allclose(y.numpy(), ty.detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "confidential-jacket", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/mask_and_masked_fill_test.ipynb b/.notebook/mask_and_masked_fill_test.ipynb deleted file mode 100644 index 265ec536b..000000000 --- a/.notebook/mask_and_masked_fill_test.ipynb +++ /dev/null @@ -1,449 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "primary-organic", - "metadata": {}, - "outputs": [], - "source": [ - "import torch" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "stopped-semester", - "metadata": {}, - "outputs": [], - "source": [ - "def mask_finished_scores(score: torch.Tensor,\n", - " flag: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"\n", - " If a sequence is finished, we only allow one alive branch. This function\n", - " aims to give one branch a zero score and the rest -inf score.\n", - " Args:\n", - " score (torch.Tensor): A real value array with shape\n", - " (batch_size * beam_size, beam_size).\n", - " flag (torch.Tensor): A bool array with shape\n", - " (batch_size * beam_size, 1).\n", - " Returns:\n", - " torch.Tensor: (batch_size * beam_size, beam_size).\n", - " \"\"\"\n", - " beam_size = score.size(-1)\n", - " zero_mask = torch.zeros_like(flag, dtype=torch.bool)\n", - " if beam_size > 1:\n", - " unfinished = torch.cat((zero_mask, flag.repeat([1, beam_size - 1])),\n", - " dim=1)\n", - " finished = torch.cat((flag, zero_mask.repeat([1, beam_size - 1])),\n", - " dim=1)\n", - " else:\n", - " unfinished = zero_mask\n", - " finished = flag\n", - " print(unfinished)\n", - " print(finished)\n", - " score.masked_fill_(unfinished, -float('inf'))\n", - " score.masked_fill_(finished, 0)\n", - " return score" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "id": "agreed-portuguese", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[ True],\n", - " [False]])\n", - "tensor([[-0.8841, 0.7381, -0.9986],\n", - " [ 0.2675, -0.7971, 0.3798]])\n", - "tensor([[ True, True],\n", - " [False, False]])\n" - ] - } - ], - "source": [ - "score = torch.randn((2, 3))\n", - "flag = torch.ones((2, 1), dtype=torch.bool)\n", - "flag[1] = False\n", - "print(flag)\n", - "print(score)\n", - "print(flag.repeat([1, 2]))" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "id": "clean-aspect", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[False, True, True],\n", - " [False, False, False]])\n", - "tensor([[ True, False, False],\n", - " [False, False, False]])\n", - "tensor([[ 0.0000, -inf, -inf],\n", - " [ 0.2675, -0.7971, 0.3798]])\n", - "tensor([[ 0.0000, -inf, -inf],\n", - " [ 0.2675, -0.7971, 0.3798]])\n" - ] - } - ], - "source": [ - "r = mask_finished_scores(score, flag)\n", - "print(r)\n", - "print(score)" - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "id": "thrown-airline", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[2, 1], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[True ],\n", - " [False]])\n", - "Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, 1.87704289, 0.01988174],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "Tensor(shape=[2, 2], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[True , True ],\n", - " [False, False]])\n" - ] - } - ], - "source": [ - "import paddle\n", - "\n", - "score = paddle.randn((2, 3))\n", - "flag = paddle.ones((2, 1), dtype='bool')\n", - "flag[1] = False\n", - "print(flag)\n", - "print(score)\n", - "print(flag.tile([1, 2]))" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "id": "internal-patent", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[2, 3], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[False, True , True ],\n", - " [False, False, False]])\n", - "Tensor(shape=[2, 3], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[True , False, False],\n", - " [False, False, False]])\n", - "x Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, 1.87704289, 0.01988174],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "2 Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, 1.87704289, 0.01988174],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "3 Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, -inf. , -inf. ],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "x Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, -inf. , -inf. ],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "2 Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 2.05994511, -inf. , -inf. ],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "3 Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 0. , -inf. , -inf. ],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n", - "Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[ 0. , -inf. , -inf. ],\n", - " [-0.40165186, 0.77547729, -0.64469045]])\n" - ] - } - ], - "source": [ - "paddle.bool = 'bool'\n", - "\n", - "def masked_fill(xs:paddle.Tensor, mask:paddle.Tensor, value:float):\n", - " print(xs)\n", - " trues = paddle.ones_like(xs) * value\n", - " assert xs.shape == mask.shape\n", - " xs = paddle.where(mask, trues, xs)\n", - " return xs\n", - "\n", - "def masked_fill_(xs:paddle.Tensor, mask:paddle.Tensor, value:float):\n", - " print('x', xs)\n", - " trues = paddle.ones_like(xs) * value\n", - " assert xs.shape == mask.shape\n", - " ret = paddle.where(mask, trues, xs)\n", - " print('2', xs)\n", - " paddle.assign(ret, output=xs)\n", - " print('3', xs)\n", - "\n", - "paddle.Tensor.masked_fill = masked_fill\n", - "paddle.Tensor.masked_fill_ = masked_fill_\n", - "\n", - "def mask_finished_scores_pd(score: paddle.Tensor,\n", - " flag: paddle.Tensor) -> paddle.Tensor:\n", - " \"\"\"\n", - " If a sequence is finished, we only allow one alive branch. This function\n", - " aims to give one branch a zero score and the rest -inf score.\n", - " Args:\n", - " score (torch.Tensor): A real value array with shape\n", - " (batch_size * beam_size, beam_size).\n", - " flag (torch.Tensor): A bool array with shape\n", - " (batch_size * beam_size, 1).\n", - " Returns:\n", - " torch.Tensor: (batch_size * beam_size, beam_size).\n", - " \"\"\"\n", - " beam_size = score.shape[-1]\n", - " zero_mask = paddle.zeros_like(flag, dtype=paddle.bool)\n", - " if beam_size > 1:\n", - " unfinished = paddle.concat((zero_mask, flag.tile([1, beam_size - 1])),\n", - " axis=1)\n", - " finished = paddle.concat((flag, zero_mask.tile([1, beam_size - 1])),\n", - " axis=1)\n", - " else:\n", - " unfinished = zero_mask\n", - " finished = flag\n", - " print(unfinished)\n", - " print(finished)\n", - " \n", - " #score.masked_fill_(unfinished, -float('inf'))\n", - " #score.masked_fill_(finished, 0)\n", - "# infs = paddle.ones_like(score) * -float('inf')\n", - "# score = paddle.where(unfinished, infs, score)\n", - "# score = paddle.where(finished, paddle.zeros_like(score), score)\n", - "\n", - "# score = score.masked_fill(unfinished, -float('inf'))\n", - "# score = score.masked_fill(finished, 0)\n", - " score.masked_fill_(unfinished, -float('inf'))\n", - " score.masked_fill_(finished, 0)\n", - " return score\n", - "\n", - "r = mask_finished_scores_pd(score, flag)\n", - "print(r)" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "id": "vocal-prime", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 57, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "score.value" - ] - }, - { - "cell_type": "code", - "execution_count": 71, - "id": "bacterial-adolescent", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Union, Any" - ] - }, - { - "cell_type": "code", - "execution_count": 72, - "id": "absent-fiber", - "metadata": {}, - "outputs": [], - "source": [ - "def repeat(xs : paddle.Tensor, *size: Any):\n", - " print(size)\n", - " return paddle.tile(xs, size)\n", - "paddle.Tensor.repeat = repeat" - ] - }, - { - "cell_type": "code", - "execution_count": 73, - "id": "material-harbor", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(1, 2)\n", - "Tensor(shape=[2, 2], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[True , True ],\n", - " [False, False]])\n" - ] - } - ], - "source": [ - "flag = paddle.ones((2, 1), dtype='bool')\n", - "flag[1] = False\n", - "print(flag.repeat(1, 2))" - ] - }, - { - "cell_type": "code", - "execution_count": 84, - "id": "acute-brighton", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(Tensor(shape=[1], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [1]), 2)\n", - "Tensor(shape=[2, 2], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[True , True ],\n", - " [False, False]])\n" - ] - } - ], - "source": [ - "flag = paddle.ones((2, 1), dtype='bool')\n", - "flag[1] = False\n", - "print(flag.repeat(paddle.to_tensor(1), 2))" - ] - }, - { - "cell_type": "code", - "execution_count": 85, - "id": "european-rugby", - "metadata": {}, - "outputs": [], - "source": [ - "def size(xs, *args: int):\n", - " nargs = len(args)\n", - " s = paddle.shape(xs)\n", - " assert(nargs <= 1)\n", - " if nargs == 1:\n", - " return s[args[0]]\n", - " else:\n", - " return s\n", - "paddle.Tensor.size = size" - ] - }, - { - "cell_type": "code", - "execution_count": 86, - "id": "moral-special", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Tensor(shape=[2], dtype=int32, place=CPUPlace, stop_gradient=True,\n", - " [2, 1])" - ] - }, - "execution_count": 86, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "flag.size()" - ] - }, - { - "cell_type": "code", - "execution_count": 87, - "id": "ahead-coach", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Tensor(shape=[1], dtype=int32, place=CPUPlace, stop_gradient=True,\n", - " [1])" - ] - }, - "execution_count": 87, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "flag.size(1)" - ] - }, - { - "cell_type": "code", - "execution_count": 88, - "id": "incomplete-fitness", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Tensor(shape=[1], dtype=int32, place=CPUPlace, stop_gradient=True,\n", - " [2])" - ] - }, - "execution_count": 88, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "flag.size(0)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "upset-connectivity", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/position_embeding_check.ipynb b/.notebook/position_embeding_check.ipynb deleted file mode 100644 index d4b9098d9..000000000 --- a/.notebook/position_embeding_check.ipynb +++ /dev/null @@ -1,231 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "id": "designing-borough", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 0.0000000e+00 0.0000000e+00 0.0000000e+00 ... 0.0000000e+00\n", - " 0.0000000e+00 0.0000000e+00]\n", - " [ 8.4147096e-01 8.0196178e-01 7.6172036e-01 ... 1.2409373e-04\n", - " 1.1547816e-04 1.0746076e-04]\n", - " [ 9.0929741e-01 9.5814437e-01 9.8704624e-01 ... 2.4818745e-04\n", - " 2.3095631e-04 2.1492151e-04]\n", - " ...\n", - " [ 3.7960774e-01 7.4510968e-01 7.3418564e-01 ... 1.2036801e-02\n", - " 1.1201146e-02 1.0423505e-02]\n", - " [-5.7338190e-01 -8.9752287e-02 -4.1488394e-02 ... 1.2160885e-02\n", - " 1.1316618e-02 1.0530960e-02]\n", - " [-9.9920684e-01 -8.5234123e-01 -7.8794664e-01 ... 1.2284970e-02\n", - " 1.1432089e-02 1.0638415e-02]]\n", - "True\n", - "True\n" - ] - } - ], - "source": [ - "import torch\n", - "import math\n", - "import numpy as np\n", - "\n", - "max_len=100\n", - "d_model=256\n", - "\n", - "pe = torch.zeros(max_len, d_model)\n", - "position = torch.arange(0, max_len,\n", - " dtype=torch.float32).unsqueeze(1)\n", - "toruch_position = position\n", - "div_term = torch.exp(\n", - " torch.arange(0, d_model, 2, dtype=torch.float32) *\n", - " -(math.log(10000.0) / d_model))\n", - "tourch_div_term = div_term.cpu().detach().numpy()\n", - "\n", - "\n", - "\n", - "torhc_sin = torch.sin(position * div_term)\n", - "torhc_cos = torch.cos(position * div_term)\n", - "print(torhc_sin.cpu().detach().numpy())\n", - "np_sin = np.sin((position * div_term).cpu().detach().numpy())\n", - "np_cos = np.cos((position * div_term).cpu().detach().numpy())\n", - "print(np.allclose(np_sin, torhc_sin.cpu().detach().numpy()))\n", - "print(np.allclose(np_cos, torhc_cos.cpu().detach().numpy()))\n", - "pe[:, 0::2] = torhc_sin\n", - "pe[:, 1::2] = torhc_cos\n", - "tourch_pe = pe.cpu().detach().numpy()" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "swiss-referral", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "False\n", - "False\n", - "False\n", - "False\n", - "[[ 1. 1. 1. ... 1. 1.\n", - " 1. ]\n", - " [ 0.5403023 0.59737533 0.6479059 ... 1. 1.\n", - " 1. ]\n", - " [-0.41614684 -0.28628543 -0.1604359 ... 0.99999994 1.\n", - " 1. ]\n", - " ...\n", - " [-0.92514753 -0.66694194 -0.67894876 ... 0.9999276 0.99993724\n", - " 0.9999457 ]\n", - " [-0.81928825 -0.9959641 -0.999139 ... 0.99992603 0.999936\n", - " 0.99994457]\n", - " [ 0.03982088 -0.52298605 -0.6157435 ... 0.99992454 0.9999347\n", - " 0.99994344]]\n", - "----\n", - "[[ 1. 1. 1. ... 1. 1.\n", - " 1. ]\n", - " [ 0.54030234 0.59737533 0.6479059 ... 1. 1.\n", - " 1. ]\n", - " [-0.41614684 -0.28628543 -0.1604359 ... 1. 1.\n", - " 1. ]\n", - " ...\n", - " [-0.92514753 -0.66694194 -0.67894876 ... 0.9999276 0.9999373\n", - " 0.9999457 ]\n", - " [-0.81928825 -0.9959641 -0.999139 ... 0.99992603 0.999936\n", - " 0.99994457]\n", - " [ 0.03982088 -0.5229861 -0.6157435 ... 0.99992454 0.9999347\n", - " 0.99994344]]\n", - ")))))))\n", - "[[ 0.0000000e+00 0.0000000e+00 0.0000000e+00 ... 0.0000000e+00\n", - " 0.0000000e+00 0.0000000e+00]\n", - " [ 8.4147096e-01 8.0196178e-01 7.6172036e-01 ... 1.2409373e-04\n", - " 1.1547816e-04 1.0746076e-04]\n", - " [ 9.0929741e-01 9.5814437e-01 9.8704624e-01 ... 2.4818745e-04\n", - " 2.3095631e-04 2.1492151e-04]\n", - " ...\n", - " [ 3.7960774e-01 7.4510968e-01 7.3418564e-01 ... 1.2036801e-02\n", - " 1.1201146e-02 1.0423505e-02]\n", - " [-5.7338190e-01 -8.9752287e-02 -4.1488394e-02 ... 1.2160885e-02\n", - " 1.1316618e-02 1.0530960e-02]\n", - " [-9.9920684e-01 -8.5234123e-01 -7.8794664e-01 ... 1.2284970e-02\n", - " 1.1432089e-02 1.0638415e-02]]\n", - "----\n", - "[[ 0.0000000e+00 0.0000000e+00 0.0000000e+00 ... 0.0000000e+00\n", - " 0.0000000e+00 0.0000000e+00]\n", - " [ 8.4147096e-01 8.0196178e-01 7.6172036e-01 ... 1.2409373e-04\n", - " 1.1547816e-04 1.0746076e-04]\n", - " [ 9.0929741e-01 9.5814437e-01 9.8704624e-01 ... 2.4818745e-04\n", - " 2.3095631e-04 2.1492151e-04]\n", - " ...\n", - " [ 3.7960774e-01 7.4510968e-01 7.3418564e-01 ... 1.2036801e-02\n", - " 1.1201146e-02 1.0423505e-02]\n", - " [-5.7338190e-01 -8.9752287e-02 -4.1488394e-02 ... 1.2160885e-02\n", - " 1.1316618e-02 1.0530960e-02]\n", - " [-9.9920684e-01 -8.5234123e-01 -7.8794664e-01 ... 1.2284970e-02\n", - " 1.1432089e-02 1.0638415e-02]]\n" - ] - } - ], - "source": [ - "import paddle\n", - "paddle.set_device('cpu')\n", - "ppe = paddle.zeros((max_len, d_model), dtype='float32')\n", - "position = paddle.arange(0, max_len,\n", - " dtype='float32').unsqueeze(1)\n", - "print(np.allclose(position.numpy(), toruch_position))\n", - "div_term = paddle.exp(\n", - " paddle.arange(0, d_model, 2, dtype='float32') *\n", - " -(math.log(10000.0) / d_model))\n", - "print(np.allclose(div_term.numpy(), tourch_div_term))\n", - "\n", - "\n", - "\n", - "p_sin = paddle.sin(position * div_term)\n", - "p_cos = paddle.cos(position * div_term)\n", - "print(np.allclose(np_sin, p_sin.numpy(), rtol=1.e-6, atol=0))\n", - "print(np.allclose(np_cos, p_cos.numpy(), rtol=1.e-6, atol=0))\n", - "ppe[:, 0::2] = p_sin\n", - "ppe[:, 1::2] = p_cos\n", - "print(np.allclose(p_sin.numpy(), torhc_sin.cpu().detach().numpy()))\n", - "print(np.allclose(p_cos.numpy(), torhc_cos.cpu().detach().numpy()))\n", - "print(p_cos.numpy())\n", - "print(\"----\")\n", - "print(torhc_cos.cpu().detach().numpy())\n", - "print(\")))))))\")\n", - "print(p_sin.numpy())\n", - "print(\"----\")\n", - "print(torhc_sin.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "integrated-boards", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "False\n" - ] - } - ], - "source": [ - "print(np.allclose(ppe.numpy(), pe.numpy()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "flying-reserve", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "revised-divide", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/python_test.ipynb b/.notebook/python_test.ipynb deleted file mode 100644 index 819d4c48f..000000000 --- a/.notebook/python_test.ipynb +++ /dev/null @@ -1,1680 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "choice-lender", - "metadata": {}, - "outputs": [], - "source": [ - "eng=\"one minute a voice said and the time buzzer sounded\"\n", - "chn=\"可控是病毒武器最基本的要求\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ruled-kuwait", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "o\n", - "n\n", - "e\n", - " \n", - "m\n", - "i\n", - "n\n", - "u\n", - "t\n", - "e\n", - " \n", - "a\n", - " \n", - "v\n", - "o\n", - "i\n", - "c\n", - "e\n", - " \n", - "s\n", - "a\n", - "i\n", - "d\n", - " \n", - "a\n", - "n\n", - "d\n", - " \n", - "t\n", - "h\n", - "e\n", - " \n", - "t\n", - "i\n", - "m\n", - "e\n", - " \n", - "b\n", - "u\n", - "z\n", - "z\n", - "e\n", - "r\n", - " \n", - "s\n", - "o\n", - "u\n", - "n\n", - "d\n", - "e\n", - "d\n" - ] - } - ], - "source": [ - "for char in eng:\n", - " print(char)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "passive-petite", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "可\n", - "控\n", - "是\n", - "病\n", - "毒\n", - "武\n", - "器\n", - "最\n", - "基\n", - "本\n", - "的\n", - "要\n", - "求\n" - ] - } - ], - "source": [ - "for char in chn:\n", - " print(char)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "olympic-realtor", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "one\n", - "minute\n", - "a\n", - "voice\n", - "said\n", - "and\n", - "the\n", - "time\n", - "buzzer\n", - "sounded\n" - ] - } - ], - "source": [ - "for word in eng.split():\n", - " print(word)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "induced-enhancement", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "可控是病毒武器最基本的要求\n" - ] - } - ], - "source": [ - "for word in chn.split():\n", - " print(word)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "lovely-bottle", - "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'StringIO'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mStringIO\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'StringIO'" - ] - } - ], - "source": [ - "import StringIO" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "interested-cardiff", - "metadata": {}, - "outputs": [], - "source": [ - "from io import StringIO" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "portable-ivory", - "metadata": {}, - "outputs": [], - "source": [ - "inputs = StringIO()" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "compatible-destination", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "64" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "inputs.write(\"nor is mister quilter's manner less interesting than his matter\" + '\\n')" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "federal-margin", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nor is mister quilter's manner less interesting than his matternor is mister quilter's manner less interesting than his matter\n", - "\n" - ] - } - ], - "source": [ - "print(inputs.getvalue())" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "consecutive-entity", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "64" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "inputs.write(\"nor is mister quilter's manner less interesting than his matter\" + '\\n')" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "desirable-anxiety", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nor is mister quilter's manner less interesting than his matternor is mister quilter's manner less interesting than his matter\n", - "nor is mister quilter's manner less interesting than his matter\n", - "\n" - ] - } - ], - "source": [ - "print(inputs.getvalue())" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "employed-schedule", - "metadata": {}, - "outputs": [], - "source": [ - "import tempfile" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "unlikely-honduras", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['__class__', '__del__', '__delattr__', '__dict__', '__dir__', '__doc__', '__enter__', '__eq__', '__exit__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__lt__', '__ne__', '__new__', '__next__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '_checkClosed', '_checkReadable', '_checkSeekable', '_checkWritable', '_dealloc_warn', '_finalizing', 'close', 'closed', 'detach', 'fileno', 'flush', 'isatty', 'mode', 'name', 'peek', 'raw', 'read', 'read1', 'readable', 'readinto', 'readinto1', 'readline', 'readlines', 'seek', 'seekable', 'tell', 'truncate', 'writable', 'write', 'writelines']\n", - "57\n" - ] - } - ], - "source": [ - "with tempfile.TemporaryFile() as fp:\n", - " print(dir(fp))\n", - " print(fp.name)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "needed-trail", - "metadata": {}, - "outputs": [], - "source": [ - "a = tempfile.mkstemp(suffix=None, prefix='test', dir=None, text=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "hazardous-choir", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['__add__', '__class__', '__contains__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__getnewargs__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', '__mul__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__rmul__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'count', 'index']\n" - ] - } - ], - "source": [ - "print(dir(a))" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "front-sauce", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(57, '/tmp/test27smzbzc')\n" - ] - } - ], - "source": [ - "print(a)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "shared-wages", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "print(a.index)" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "charged-carnival", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__enter__', '__eq__', '__exit__', '__format__', '__ge__', '__getattr__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_closer', 'close', 'delete', 'file', 'name']\n", - "/tmp/tmpfjn7mygy\n" - ] - } - ], - "source": [ - "fp= tempfile.NamedTemporaryFile(mode='w', delete=False)\n", - "print(dir(fp))\n", - "print(fp.name)\n", - "fp.close()" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "id": "religious-terror", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/tmp/tmpfjn7mygy\n" - ] - } - ], - "source": [ - "import os\n", - "os.path.exists(fp.name)\n", - "print(fp.name)" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "communist-gospel", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "fp.write" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "simplified-clarity", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'example'" - ] - }, - "execution_count": 39, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "s='/home/ubuntu/python/example.py'\n", - "os.path.splitext(os.path.basename(s))[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "popular-genius", - "metadata": {}, - "outputs": [], - "source": [ - "from collections import Counter" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "id": "studied-burner", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "dict_items([('hello', 1), ('world', 1)])\n" - ] - } - ], - "source": [ - "counter = Counter()\n", - "counter.update([\"hello\"])\n", - "counter.update([\"world\"])\n", - "print(counter.items())" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "id": "mineral-ceremony", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "dict_items([('h', 1), ('e', 1), ('l', 3), ('o', 2), ('w', 1), ('r', 1), ('d', 1)])\n" - ] - } - ], - "source": [ - "counter = Counter()\n", - "counter.update(\"hello\")\n", - "counter.update(\"world\")\n", - "print(counter.items())" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "id": "nonprofit-freedom", - "metadata": {}, - "outputs": [], - "source": [ - "counter.update(list(\"hello\"))" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "extended-methodology", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "dict_items([('h', 2), ('e', 2), ('l', 5), ('o', 3), ('w', 1), ('r', 1), ('d', 1)])\n" - ] - } - ], - "source": [ - "print(counter.items())" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "grand-benjamin", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['h', 'e', 'l', 'l', 'o']" - ] - }, - "execution_count": 47, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "list(\"hello\")" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "id": "marine-fundamentals", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{}\n" - ] - } - ], - "source": [ - "from io import StringIO\n", - "a = StringIO(initial_value='{}', newline='')\n", - "print(a.read())" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "id": "suitable-charlotte", - "metadata": {}, - "outputs": [ - { - "ename": "TypeError", - "evalue": "expected str, bytes or os.PathLike object, not _io.StringIO", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mTypeError\u001b[0m: expected str, bytes or os.PathLike object, not _io.StringIO" - ] - } - ], - "source": [ - "with io.open(a) as f:\n", - " print(f.read())" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "id": "institutional-configuration", - "metadata": {}, - "outputs": [], - "source": [ - "io.open?" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "id": "pregnant-modem", - "metadata": {}, - "outputs": [], - "source": [ - "def get_default_args(fn):\n", - " if fn is None:\n", - " return {}\n", - "\n", - " signature = inspect.signature(fn)\n", - " return {\n", - " k: v.default\n", - " for k, v in signature.parameters.items()\n", - " if v.default is not inspect.Parameter.empty\n", - " }" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "id": "first-release", - "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'inspect' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mget_default_args\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m\u001b[0m in \u001b[0;36mget_default_args\u001b[0;34m(fn)\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0msignature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minspect\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msignature\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m return {\n\u001b[1;32m 7\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdefault\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'inspect' is not defined" - ] - } - ], - "source": [ - "get_default_args(io.open)" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "convertible-roulette", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: sox in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (1.4.1)\n", - "Requirement already satisfied: numpy>=1.9.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from sox) (1.20.1)\n", - "Requirement already satisfied: librosa in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (0.8.0)\n", - "Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (0.24.1)\n", - "Requirement already satisfied: numba>=0.43.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (0.52.0)\n", - "Requirement already satisfied: pooch>=1.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (1.3.0)\n", - "Requirement already satisfied: scipy>=1.0.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (1.2.1)\n", - "Requirement already satisfied: numpy>=1.15.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (1.20.1)\n", - "Requirement already satisfied: decorator>=3.0.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (4.4.2)\n", - "Requirement already satisfied: resampy>=0.2.2 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (0.2.2)\n", - "Requirement already satisfied: audioread>=2.0.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (2.1.9)\n", - "Requirement already satisfied: soundfile>=0.9.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (0.9.0.post1)\n", - "Requirement already satisfied: joblib>=0.14 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from librosa) (1.0.1)\n", - "Requirement already satisfied: setuptools in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from numba>=0.43.0->librosa) (51.0.0)\n", - "Requirement already satisfied: llvmlite<0.36,>=0.35.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from numba>=0.43.0->librosa) (0.35.0)\n", - "Requirement already satisfied: appdirs in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from pooch>=1.0->librosa) (1.4.4)\n", - "Requirement already satisfied: packaging in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from pooch>=1.0->librosa) (20.9)\n", - "Requirement already satisfied: requests in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from pooch>=1.0->librosa) (2.25.1)\n", - "Requirement already satisfied: six>=1.3 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from resampy>=0.2.2->librosa) (1.15.0)\n", - "Requirement already satisfied: threadpoolctl>=2.0.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from scikit-learn!=0.19.0,>=0.14.0->librosa) (2.1.0)\n", - "Requirement already satisfied: cffi>=0.6 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from soundfile>=0.9.0->librosa) (1.14.4)\n", - "Requirement already satisfied: pycparser in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from cffi>=0.6->soundfile>=0.9.0->librosa) (2.20)\n", - "Requirement already satisfied: pyparsing>=2.0.2 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from packaging->pooch>=1.0->librosa) (2.4.7)\n", - "Requirement already satisfied: idna<3,>=2.5 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from requests->pooch>=1.0->librosa) (2.10)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from requests->pooch>=1.0->librosa) (2020.12.5)\n", - "Requirement already satisfied: chardet<5,>=3.0.2 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from requests->pooch>=1.0->librosa) (4.0.0)\n", - "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from requests->pooch>=1.0->librosa) (1.26.3)\n" - ] - } - ], - "source": [ - "!pip install sox\n", - "!pip install librosa" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "cutting-fleece", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import sox\n", - "tfm = sox.Transformer()\n", - "sample_rate = 44100\n", - "y = np.sin(2 * np.pi * 440.0 * np.arange(sample_rate * 1.0) / sample_rate)\n", - "print(y.dtype.type)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "historical-diving", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 0. 0.06264832 0.12505052 ... -0.18696144 -0.12505052\n", - " -0.06264832]\n" - ] - } - ], - "source": [ - "output_array = tfm.build_array(input_array=y, sample_rate_in=sample_rate)\n", - "print(output_array)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "similar-spice", - "metadata": {}, - "outputs": [], - "source": [ - "tfm.build_array?" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "grand-influence", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['8svx', 'aif', 'aifc', 'aiff', 'aiffc', 'al', 'amb', 'amr-nb', 'amr-wb', 'anb', 'au', 'avr', 'awb', 'caf', 'cdda', 'cdr', 'cvs', 'cvsd', 'cvu', 'dat', 'dvms', 'f32', 'f4', 'f64', 'f8', 'fap', 'flac', 'fssd', 'gsm', 'gsrt', 'hcom', 'htk', 'ima', 'ircam', 'la', 'lpc', 'lpc10', 'lu', 'mat', 'mat4', 'mat5', 'maud', 'nist', 'ogg', 'paf', 'prc', 'pvf', 'raw', 's1', 's16', 's2', 's24', 's3', 's32', 's4', 's8', 'sb', 'sd2', 'sds', 'sf', 'sl', 'sln', 'smp', 'snd', 'sndfile', 'sndr', 'sndt', 'sou', 'sox', 'sph', 'sw', 'txw', 'u1', 'u16', 'u2', 'u24', 'u3', 'u32', 'u4', 'u8', 'ub', 'ul', 'uw', 'vms', 'voc', 'vorbis', 'vox', 'w64', 'wav', 'wavpcm', 'wv', 'wve', 'xa', 'xi']\n" - ] - } - ], - "source": [ - "print(sox.core._get_valid_formats())" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "wireless-hypothetical", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "float64\n", - "(59471,)\n", - "16000\n", - "(54065,)\n", - "1.0999907518727459\n" - ] - } - ], - "source": [ - "import soundfile as sf\n", - "wav='/workspace/DeepSpeech-2.x/examples/aishell/s1/../../..//examples/dataset/aishell/data_aishell/wav/dev/S0724/BAC009S0724W0190.wav'\n", - "samples, sr = sf.read(wav)\n", - "print(samples.dtype)\n", - "print(samples.shape)\n", - "print(sr)\n", - "tfm = sox.Transformer()\n", - "tfm.speed(1.1)\n", - "output_array = tfm.build_array(input_array=samples, sample_rate_in=sr)\n", - "output_array.dtype\n", - "print(output_array.shape)\n", - "print(len(samples)/len(output_array))" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "designed-fluid", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import IPython.display as ipd\n", - "ipd.Audio(wav) # load a local WAV file" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "cultural-friendship", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tfm = sox.Transformer()\n", - "tfm.speed(1.0)\n", - "output_array = tfm.build_array(input_array=samples, sample_rate_in=sr)\n", - "ipd.Audio(output_array, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "fossil-lotus", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tfm = sox.Transformer()\n", - "tfm.speed(1.1)\n", - "output_array = tfm.build_array(input_array=samples, sample_rate_in=sr)\n", - "ipd.Audio(output_array, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "constitutional-poker", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tfm = sox.Transformer()\n", - "tfm.speed(0.9)\n", - "output_array = tfm.build_array(input_array=samples, sample_rate_in=sr)\n", - "ipd.Audio(output_array, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "threaded-strap", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "66078\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEKCAYAAAAfGVI8AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAA8K0lEQVR4nO2dd3hUZfbHvycdQoAEQpEWmlQVJICoKApqABdcF8u6KlbUXX+77rqrIFZs7Lr2si5WXHXtrigI0myoSFB67xDpoYQEUs/vj7kTJpM7M/fO7XPP53nmye33ZObe97zveU8hZoYgCILgX5KcFkAQBEFwFlEEgiAIPkcUgSAIgs8RRSAIguBzRBEIgiD4HFEEgiAIPscURUBEBUS0log2ENF4lf1/IaJVRLSMiOYSUYeQfWOJaL3yGWuGPIIgCIJ2yGgcARElA1gH4DwAOwAsAvBbZl4Vcsw5ABYycxkR3QJgCDNfRkQ5AAoB5ANgAIsB9GPmA4aEEgRBEDRjxohgAIANzLyJmSsAvANgdOgBzDyfmcuU1R8AtFWWLwAwm5mLlcZ/NoACE2QSBEEQNJJiwjXaANgesr4DwMAox18P4PMo57aJdcPmzZtzXl6ePikFQRB8zuLFi/cxc274djMUgWaI6EoEzEBnx3HuOADjAKB9+/YoLCw0WTpBEITEhoi2qm03wzRUBKBdyHpbZVu4AMMATAQwipnL9ZwLAMw8hZnzmTk/N7eeQhMEQRDixAxFsAhAVyLqSERpAC4HMC30ACLqC+DfCCiBPSG7ZgE4n4iyiSgbwPnKNkEQBMEmDJuGmLmKiG5FoAFPBvAqM68kokkACpl5GoDHADQC8D4RAcA2Zh7FzMVE9CACygQAJjFzsVGZBEEQBO0Ydh91gvz8fJY5AkEQBH0Q0WJmzg/fLpHFgiAIPkcUgSAIgs8RRSAIguBzRBEIgiD4HFEEgif42/tLsXirpKASBCuwNbJYEOLl/cU7kJ6ahH4dsp0WRRASDhkRCIKQcKwoOoSZK3Y5LYZnEEUgCELCMeGj5bj5zcVOi+EZRBEInsGDsY+CQzDkYdGDKALBM8irLWhFOg36EEUgCELCIYpAH6IIBEFIOEQP6EMUgeAZ3l64zWkRBI/gxWSaTiKKQBAEweeIIhAEIaGorK7Bml0lTovhKUQRCIKQUBworXBaBM8hikAQhISiRqYHdCOKQPAUFVU1OPHuz50WQ3AxNTJRrBtTFAERFRDRWiLaQETjVfafRUQ/EVEVEY0J21dNREuUz7TwcwUhlKMV1aioqnFaDMHFiCLQj+Hso0SUDOB5AOcB2AFgERFNY+ZVIYdtA3ANgL+qXOIoM/cxKofgE8hpAQS3UyP9BN2YMSIYAGADM29i5goA7wAYHXoAM29h5mUA5CcSdDFj+U7kjZ/utBiCh5ARgX7MUARtAGwPWd+hbNNKBhEVEtEPRHSRCfIkNHnjp2P/kXKnxbCNtWFugEkyIhBiMPKZb5wWwXO4YbK4AzPnA7gCwFNE1FntICIapyiMwr1799orocvYU+IfRRDetyMSTSBEp7SiunZ5n486TUYwQxEUAWgXst5W2aYJZi5S/m4C8CWAvhGOm8LM+cycn5ubG7+0HmD+mj1Ysv1gve3HKgMPeLWf/ONkmC8YYM9hUQRaMEMRLALQlYg6ElEagMsBaPL+IaJsIkpXlpsDOAPAquhnJT7Xvr4If3jrp3rb/zFzLQB/KYLw/3Tu6t2OyCF4E5kv0IZhRcDMVQBuBTALwGoA7zHzSiKaRESjAICI+hPRDgCXAPg3Ea1UTu8BoJCIlgKYD2BymLeRr8kbP73OROn+0kDv5kCZfyMn//TOEgCSVEzQhjwm2jCleD0zzwAwI2zbvSHLixAwGYWf9x2Ak8yQIdFQM4UHH+oHPl2FId1a2CuQQ0R6kcurapCRmmyvMB6kpoZRw4yUZDdMB9qPjAi04c+nw8UcVHr7as9vsZJDZfO+UjtFcpRIJQeHPfGVzZJ4k7/PWoPu98x0WgzH+GHTfqdF8ASiCFzG8qJDqts37DmCbzfss1ka54nUodtx4Ki9gniUVb8cRpWP5pQ+WVLXT0UUgTZEEXiEV77d7LQIjuCfJsxanGgQDx+rxLIdB2295+qddeNOkpOkidOCfEseoVri5gUDXD7lB9vv+cQX6zDquQW23zcUn06N6Ea+Jo/gp+F9KDLXZ4yFm4odu3eVCzovBb1bOS2CJxBF4BFq/KoIxDhkiIpq5xrjJAeiwMOfl7Rk8SzTgigCl1J0sO5kaLWG9rDkWCXKKqosksghRA94FicUgRAfoghcRiRTiJYRwbAnvnLEFmwloge8i+gB72BKQJlgPdOX74x5zO7D5ThyLMFGBIJnkRGBd5ARgcsw+u5UJ9jsqqSS8C4rIsTEWMl7i7bHPkiohygCB2FmVJk8mZdoCelED3iTWSt3YeFm+z2WDpRV1lkXZwNtiCJwkBe+3IguE80txJ5oikDwJou3HqhdPmPyPAclEbQgisBBPltW3+4f+gLFQ6LpgQT7d3xDqIUz3ANOcB8yWewgq3cerrftqTnrHZBEEMzjdy//gAUb3JHjp0hyUmlCRgSCq5E5Am9xoLTCNUoAAB79fA0A4L8/bkNpuXjURUIUgUuoqq7Bawu0J5Yrr6qus/7yN5vMFsn1/PX9pfhmvb/rVwPA7sPHsOvQMafFAAAcrayOfZADTPhoOeau2eO0GK5FFIFL2LK/FA98qr0427pdR+qsPzR9tdkiuZ4PFu/AB4t3OC2G4/z6+QU4+7H5TosRFSkx6m5EEbgE0hlA8Kvnvq1dTuSc6+L+F5tjVTUor6rvhjxzxS7bZYn0GF8/tdBeQQRdmKIIiKiAiNYS0QYiGq+y/ywi+omIqohoTNi+sUS0XvmMNUMerzH6uW9hJI7s2tcWmSaL23htwZaYx0xfthMb9hyJeZzfePyLtbbfkww9yYJTGFYERJQM4HkAwwH0BPBbIuoZdtg2ANcAeDvs3BwA9wEYCGAAgPuIKNuoTF5j6Y5DukcEwPGo20oHM0y6gT+8/RMeneE/01iQSHmoJMODoBUzRgQDAGxg5k3MXAHgHQCjQw9g5i3MvAxAeIt1AYDZzFzMzAcAzAZQYIJMniPaOxvpRT+s5BXya60C4Pj35tdGr7i0AgePVsY+UBCiYIYiaAMgNMHHDmWb1ecmFNESdHW6a4b6DpZcPMf/e/9ogsv+/T0qlDmBaC6RTjwaTipkv9bsMAPPTBYT0TgiKiSiwr17ve8yOPbVH+usx/MCMRiLthiLRE4U9h4pd1oES7lhaiEOllUAABZuLsbhY4FRQFKSuxSgk9IcijAyyhs/3WZJvIcZiqAIQLuQ9bbKNlPPZeYpzJzPzPm5ublxCeomvlpnXJkxu6McoJN8suQXAMDS7QedFcRi5qzejeUh2TyDvX09De/KXw4ldKMo44H4MUMRLALQlYg6ElEagMsBTNN47iwA5xNRtjJJfL6yzXfENyIQL40gLusYW8LRiuPBWkG3Wj05/x/6rO6E+okTP8cvCZQH6JVvowdV+t2MGg3DioCZqwDcikADvhrAe8y8kogmEdEoACCi/kS0A8AlAP5NRCuVc4sBPIiAMlkEYJKyzXfE6zXk10nScOL5/rwGA9hbUn58BdE7EOH7UlPqvu4V1TXYsr/UPAEd5sWv/BddbxamzBEw8wxmPpGZOzPzw8q2e5l5mrK8iJnbMnMmMzdj5l4h577KzF2Uz2tmyONF4mnGGJKLx2/0f3gOAODCZwMBhXqem68Vc+ScVbstm1j9YlXkCGIr3ZwPllVICnYDeGayONFRy0QaC2ZgW3H9Hp0f4wqqaxj7E3zCOJQ9JeXIGz8dO+PIMXTDG4VY8Ysy32By23n3/1ZE3Ldwk3WD/QofPvNmIorAJcQTgs9g3Pnh8nrbS3xat7jfQ3NwJIEzTKqN/lbF0YEArIk9eXZu9BTqKckWmu9kMGAIUQQeJpJZyM+eREcSUAlGGy2GNq16TCMXv/CdAYnUeXz2uqj7Uy1UBFpqdYsZNTKiCDxMpBffz7bSRExSN/zpbwAAN7+5uN6+X0JMQ+EeQJo8ymycY09Jsq65qapOvN/dTkQReJhIj77VL8XmfaU49/EvLb2HoI1nQswxySE+tC99vUlTL9lORj+/wLJra/lXE7GTYBaiCDxMJM+PvUfK6/icm83S7Qexaa873Q79Oj8C1B0JPjxjta8mz2tcpvS8higCBzArsGXwP9SLkVz8wnf44zs/m3IPNYI9q2U7Dlp2j3g5/8mvbb/n/iPl2HnI+cCscM8ZTSbCBGk/tfwbEnwZGVEEDvB+ofVVtfYctq50YVCPXfHSQsvu4SUum/IDBj06z2kx6plH/NRJ1jIiCDUN7Tl8DP+cZX+9BrciisABNu61vohKekqyZdcOvnNHyqtw+FhlbQI0vxJMBuc04SPNkjhcafcfKa/NbBqNPSXHcOGz3+i+vlXoVXqzVu7Cc/M3WCOMBxFF4AQ2jFDTUqz5af/15UY8FtKTuuj5BRj5tHsaBD9jxgCg30Nz8Pjs2D3ldbuOYEXRYcx3SUF4LebW0EN8NFjShCgCB7DDVmmFz/aew8fwxOy12BVidtq0txTbDzhvHw+lyvYoU3fYnoMN3UOfrTJ0nc0aHAGCHkrXvu6OMqnSsBtDFIEDvPjVRsvvkZps/k874JG5qPSAv/Zpj841fI1v1+/DnCh5c9xI0Ab+2bKdms+pZq430f39pv0xz7M0SjgONM0RuP/RdQxRBAlKsh/yMkdg3xFjNvtjldW48pWFuOENbWk/gpHcTuf6DwaU6/GXf+P7rXFNdLvt8dLSyIcqC1EKdRFFkKDYnZX5UFklikvdMWlqlEgpHf73cxG2F5fV2ZY3fjoOlrljsjyoAPQElu+LM9bAbQ2plhFBoVLNb8n2g1KbIAxRBIIpjHnxO5zzzy+dFsMUfh0hD89t7y7B8Ke/qW1EIjWi63eXOJLmI9i21dYs0EC8YrqtGdXSrr9bGCiPftHzC7B2t/Wee15CFEGCYnehlvV7jkSsGZtIHCmvqv0/Rz+nnjLhvCe/xmfLfjHlfnrNTXoVULw943g71Fa5TuuVp9rHiRnVEEVgM1p8tM3AZSZcz3Kssn6qji9W7cayHQexN4pZZXtxme3eSzXMuhv2eEcu8SqQZIs6KHrzCM1zidurW0hxWgC/cfZj6mkh3M6bP2x1WgRHmLbkF1zav12dbXd8sCzmef/8Yh3++UUgLfO8289Gp9xGlsgXCrN+k42aHtDiaBCvaSj82oFyq8aVQ2m5vtxaRh0KEg1TRgREVEBEa4loAxGNV9mfTkTvKvsXElGesj2PiI4S0RLl86IZ8riZeCpKxYOeouZaWLBhX8xj3ORSmDd+OraaUI+3vEpfA7NbJbXHLpt+83gaZ7XEhdecnhf7PBMmW//435/RccIMw9cBgN++9IMp1/ErhhUBESUDeB7AcAA9AfyWiHqGHXY9gAPM3AXAkwD+HrJvIzP3UT43G5VHCGD2CFzL9aqqGTsOlMU+0CYueMp4AroXv9qky3wy8JH6MQzlNpmInpu3Xr+tXOWEBqmx05METexNG6YC0G4qenjG6trlaUvNmUcRjGPGiGAAgA3MvImZKwC8A2B02DGjAUxVlj8AMJTsns0UbGFFUXylE63gWKXxBrjo4FHsKTHWo4+3ULxeO/yc1XtQVqEvv1BQyYUXtYl5niLbwbJArimtrsMzV+zSdR/BHsxQBG0AbA9Z36FsUz2GmasAHALQTNnXkYh+JqKviGiwCfIIMG+y+Ok565E3frpmO266RTmOnOSLlbt1N7ChBL+6eWt266oTce7jX+m+l16dEwyGGxXiAaXlpw5VbuWVNVEnztX4Zv3e2uUPF1ufjTfIT9sO2HYvL+H0W7sTQHtm7gvgLwDeJqLGagcS0TgiKiSiwr1796odIoRg1oDryTlKHVqNDcy/vrQ+fYaVbNtf37R137SVmLE8/p5sMLfUda8X4sOftDd6m/fpn+PQa7sPVrNT844KsmT7wXoeUKHmshpm3VXxrnrlx9rl299fqutcI1z1sqROV8MMRVAEINStoq2yTfUYIkoB0ATAfmYuZ+b9AMDMiwFsBHCi2k2YeQoz5zNzfm5urgli28/8tfa5rH38cxEmf77GtOuVa3R7/XFLsWn3NAO9LpwPfLpSdbtZIyyrA830KoKgPITjpqjwpIgXPb8A05fXzV8UepcaZs/UyRaLtDpmKIJFALoSUUciSgNwOYBpYcdMAzBWWR4DYB4zMxHlKpPNIKJOALoC2GSCTK5ES1ZHMzFzGOxESL4ZMRcHXJD+Yc2uktrl+6apKxqz0Psz7VGikEvKq2o7DkT1A79Ckw3OWbUbEz5aXrs+6NF5ltYjNpMjcdRo8AOGFYFi878VwCwAqwG8x8wriWgSEY1SDnsFQDMi2oCACSjoYnoWgGVEtASBSeSbmdldXUoPk2JiZjAn+nvvLtpm+BoPTzeWkjmIkY7k32fWHZlZ2Xs2cu3QrKNDH/8KRREmkN9auDXuHEWRsHO0LNTHlIAyZp4BYEbYtntDlo8BuETlvA8BfGiGDF7g5+0Hbb2fmcnQnJhkqzAh5fUeHXl3AHsUXg0zki2K/TaiCJbtOATguPkkkreTFeaVb9fvwzndWph+3Vh8sHgHxvRra/t93YZEFtvIpzb7Ta/epc2V81hlNVKSCClRahjoUSo7DpRh6ndbUF0D3Pur8JASe9HTZkWbMDXa9oUGmllpZRv13LeGrxEcSB4N+T5CzXRWqLDDDuWpWhMh06zfcNpryDfocRs0C2Zttv3THp2LP79X13PDSGTuut0leOXbzXh1wea4zg9i57TeoaOV6H7PTBRZVG0tNNDsCgujYM2YE3ng04A57faQZ+KRkEAwo7y1sH66khnLtRfTMROZOw4gisAmzAjJj4d/zFqLT5YURU1NfLCsEou3FGNEWO3hrSqulFogIlMK45jxkgZNJU/MXocNeyJnvgyaQdbuLlHdb2Z50cKt3vBlX150qHY5NysdAOpVM4uHiR+vqLet1IGOEmB+KhavIorAJpxSBKt3Hsaf3lmC/g/PiXrcL4eOYVXYMDleiaurzUkkZsYrGjRzPzN3PSZ8tCyiDf0HDeUZ7cSuLLWxCH5fF/RqBSDgITTXosydRmstx4O4kwYQRWATTqU//3KtvuC7UL/7eJVXZnpKxHTDI5/5RrOZzAzV2TgjtXZ50ZYDmLtavQ7xLW/9FPU6T89db4I0x4kVLOaW2g6/ejYw53CssrreiNFsXv7WmCkxHtxWctMpRBHYhFpyLzdSUV1TayapjtNr595PVkQ066z85TCKy7TlpTHjK5uzeje+XX88c2pVnF418UT5RiPWpG60iWs7CY4Sd6mMGBMBtyhcpxFFYBNujbwM9xXfsq+sdlu8jeb6PUdUba/BiWu7g9OufOV4WgG1uYvwOsR2EKuhj/e7F/Tx1sJtEWtU+wlRBDYRy0bvFGdMnldnfcQz39TWHq4wkD5ZLYIz2LZpNZO99p35poLMtPoe004M1oKms7zx01UnsZ3yoonEzJXHcy01zrDO6/z8J/Un2gO0FdOJxHCLTV5eQBSBDdz3SX0vCSdYExJXMH/tnohFV4K9UbMnLINzDlU1NZpGBduLzXflvOGNRfW2GVF48ZKcRLXRudtVajhMX+YuRRDK4WPWpWlYF2dReSdSoCQSoghsYOr37ijzOPW7LVinuEde+9oifLY0emNz1GQ7ddA8du7jX+EFh7KUqtUoqM2waiPJSVSraIN/3164Fa8qE6bSrGmnvKpad/ptoS6iCHzEf3/cjoKnvq5tkGOl/73nf8ZHMmUVVTh0tBJ546fjlAe+qN3+3LwNMc8deXJrw/ePRkVVDUqOVWKPSnlJqzl8rKr2d2jeKA3b9pfhro9XYJLiQtmmaYbtMrmFw8f0TeA+aILbad746Yav4WVEEVhMvNWprKKGgVIDRVb00vPeWfjzu0sA1E1lfUxDLeDcRumWyJQ3fjqKSysw/sNlOOn+L9CqSQNL7hOLexST4dpdR/C/JXUztw/u6s1U62Zw8v1f6HpvzPbo8iOSa8hiXvgyds/XbuzuAc9TCUA6s0tzW2UI59QHZ9cun9G5me15oIDjMR53fby83r5KB+Yt3ERVDSNNmQA+Ul6FBRv2ITcrHae2z6537Jqd6tHgejlUVokmDVNjH5iAiCKwmNW7zHlIzWTYE8aLuhulb/tsrN55GN1bZalGd57ywBe2+Xh/t9FdUcX//XEbSn2eNz9oNnt9wWbcr+Q+atIgFUvvO7/esfs11kuOxZ0fLsOLV/Uz5VpeQ0xDVuMuy5BrWLPzMIY//Q1mrdxdp3e+YMM+5I2fbmugzzQHRgPRmPDRcvy07aDTYjjK1uJSfL1ub60SUMNsZbm86BAu+/f3pl7TK4giEBzhi1WBVA83v7kYxaUVtUFdoYnO/IzfRwQFT32Dq1/9sc628FCBXvfNwnuLtqNBarIp9yw6eBQLNxdj0KOBTLHrd5dETdaYSIhpyGJ2OeCR4kUG/2M+crPSffPixcIrGUrtpKyiGoVbivHsvA14/NJTAAB3fLjM9PvsPHQMX6zchXH/WQwAmP3ns9C1ZRYAYPqyX3Bqh2y0dsjBwCrIi4EY+fn5XFhY6LQYmjj7H/Ox1YEUBoKQyJzYslHcwWfxsO6h4UhOInS+K1CIccvkkZbfc29JeW3678rqGqQmJ+FIeRUapcfffyeixcycH77dFNMQERUQ0Voi2kBE41X2pxPRu8r+hUSUF7JvgrJ9LRFdYIY8bkKUgCCYz+7D9o4c+z74Be6ftrLOttLyKqwwYMqsqWG8vXAbqmsYh8oq8dScdTikFBZiZvR/eA7W7y5BRVUNuk78HPPW7Ebv+2ZhT4n5VgbDioCIkgE8D2A4gJ4AfktE4fUJrwdwgJm7AHgSwN+Vc3sCuBxALwAFAF5QrqeL8qpqw+kQmFk1TL2sogoFT31de/2t+0s1ZYZk5noyScpbIRx5JOLD7qyhpeXV+M8PxzMElFdVo9d9s3ChkqZ7za7DWKc02sFU7rHaiU37juCuj5dj9c7DmPr9Fjw1Zz3+/fVG/LTtAK55LZAKZe+R8tpMtde9HrCC3PTGYny4eAfOffzLOterqKrBzBU7sWmv+kgp2ryTGXMEAwBsYOZNAEBE7wAYDSB0un80gPuV5Q8APEcBn8HRAN5h5nIAm4log3K9mFP3P24uxvbiMgzslIORz3wb0M4PXIAMZeLoaEU1UpPr1+FlDhRNYWbUMPDLwaNol9MQHSfMQIusdPw4cRiYGW8u3IaUJEJmegrW7CrBDW8U4ut1Ab/viSN6YGCnHMxcsQt/HNoV24vL0LVlFm57Zwn+t6QIn/3fmXh67nrMXlU3973LYssEFyCPhDfpdvfM2uW1u0pQ8NTxxHXDerTE0B4tMOGj5dgyeSTW7S5Bg9RkNG2Yite/24K8ZpnISE3Ggg2B9OgXPvstRvc5AQDwwpcb66Rf+deXG7EmzAX95+0H8fP2gwACOalyMtNQdKAMM1bswrw1e9C0QSp+3bcNGqQlY39pBfI7ZKO0vAr3f7oKyY1btFX7fwzPERDRGAAFzHyDsn4VgIHMfGvIMSuUY3Yo6xsBDERAOfzAzG8q218B8DkzfxDtnrkde3LmZY+p7rtyYHvkZKbhmXkbMKBjDpo0SEWbpg3QPqchpny9ybLJ2yHdcnUXgREEQbCTopduPly5f3uT8O2e8RoionEAxgFAcuNcZEY47s2F22qXf9xcbINkAUQJCILgerhG1V5lxmRxEYB2IettlW2qxxBRCoAmAPZrPBcAwMxTmDmfmfMzsuqHmQfp1jIL/Toc39+0QSo65WaiQKm5ahVm+TILgiBYiOq0lBkjgkUAuhJRRwQa8csBXBF2zDQAYxGw/Y8BMI+ZmYimAXibiJ4AcAKArgB+RAy6t8rCXy8+CYVbinHVoDxc8u/vUVFVg2X3n19bo3Z7cRmaNkxFVkbd3CF7S8rBzMhMT8Huw8ewvOgQRvdpU5t9cM2DBQCAcf9ZjM65mejaIgt3fbwcHZo1xNb9AQ+g24Z1Rc/WjfGvLzfi4V+fhJ2HjuLc7i1w4t2fo7Ka8faNA3HrWz9rLskoCIJ3GT+8OyZ/vqZ2/aQ2TTCgYw5e+XYzFt41FOt3l6BhegraZjfArW//jNM65aCiirF212HMVywJw3q0wJzV9XNytctugO0HItflGN67FU5u2xTbikvx07aDWKvMJ/RonYWiA0eRkZqMgt6tsOPAUSXnl3oRWVPiCIhoBICnACQDeJWZHyaiSQAKmXkaEWUA+A+AvgCKAVweMrk8EcB1AKoA3MbMn8e6X3gcweZ9pchMT0aLrPhT927bX4ZmjdKQGeaju/9IOfo9NAcrHwh4ti7cvB8nt22K5hEyY9bUMJKSCKXlVThWWY1+D7mzMpkgCPGRlpKE78efW/tub5k8El+v24OU5CSc2DILqclJaJyRgq37y5DXPJIRG1hRdAgXPvstXrumP37adgDPztuA35zaBoO75uI2JWPvf64fgD/+92ccKDvuJdU4IwW3DOmMv89cWyee4UBpBf799Uac17NVHasIEHCSWV50CKe0y1aNI5CAMovxe55zQbCCJg1SbXchHd67FT5fESjZuWXySGzaewRLdxzEr/uqOuLEpKq6BvdNW4l7LuyJ3YeP4bZ3l+CZy/uiXU5DMDM6TpiBz/7vTLRukoF+D83B45ecgtvfX4o5fzkbHZtn4lhldb2OaywiBZSJIrCYbnd/XicPvyAIxmnZON3WoLJFE4chMz0ZPe+dBcCeyOKftx1An3ZNQUQoOngUJzTJwNrdJejeqnHc17Q0sliITPdWWU6LIAgJw4Th3QEAb1w30NL7jDurU+3y2zcORG5WOhqmpeCOgm6YdusZlt47SN/22bUp2ts0bQAiMqQEouEZ91Gv0ja7IZbukIyasXjrhoHo1yEbz83bgOfmu6+Yj92c3LYJlslzU4eczDTcdHZn3HR259ptE0f0wJNz1qGswtz62neN6IHTOuWgXXbD2oRzAPD7IV1MvY9bkBGB4AjBkdJ4pYd3RpfmyEhNRo/W1vR4vEbDNH/30aZeN6D22QhSHRaaP/f2s3HtGXmmKYHGGYHvPGj2Obd7yzpKIJERRWA1kkxGlQt6tcKLV56KGwd3wuZHR9RuH3FSK6yaZG/uwbxmDW29Xywu6dcW+R0ix8r4gf552bj57M64elCH2m3h85mdcxvVSyFjhJPaNrHF9u9GRBFYTEsDLq1W8fq1/Z0WAZv3laKgd2skJ1GdUpVEhIZpKdgyeSQuy28X5QrmcXOIqcENPHbJKWjq09q5QVKSAk3TpNG9sWD8ubhxcEc8MLqX6rHpKeY0Y5MvPtmU63gRUQQWc0dBN6dFqEdflQLgdvPpstjlIRumWxetPfvPZ2FAxxwAzlVFa9Ig0NgX9GqFMf3quiCm+DxVbXLI/9+maQNMHNkzopvmSW3qpc6Ji3Y57hoZ2okoAovJcGHqicw0+2T6YcJQFN49LK5zj5o8ARhkzYMF6NoyCy9dnY+v/3YO1u0uiX2SBXz2f2cCAG48q2MdEwgAX08Uz7xtcB1FEItWTdw36vYaogh8RM/WjTH1ugG1dtW/XRB9tHLrOcY9JFo1yUDzRunY+MiI2vQdAHDlwA5RzgrwzqLthu+vRlA5N2mQivbNGjqirBtnpCBJaez2H6nAyW2b4sbBnXDlaYHvZfUuZ5STG9DrIvnQRb0N39OvcwNBRBHYQE+XeMI8evFJOPvEXACBRv6KAe2jHh8tPF4vyUmEJGUu4MNbBuG+X4XXLnKOOwu6xz7IZGoYSFa+j1TFxj1xZI/aRs3fhiF9NG2YJkWfDCKKwAZm/Gmw0yIACPimB/nrBd2QnZkW9fg0kybhggSH+7mNMjR5e1hhJ3/p6npBlbWJCu2kqqam1qTRPLN+3qrg/IUbCc5tuAlSz6UmaEQUgU189bchToug+rJ88oe6UZKvXdsfH94yCACQlhz/y6VWYDvYridpfOomjOgR9/0jofYf1TiQZiXoE7/mwQKc1Lb+ZGf4nIHThKZxtzLHT7wmmvAYAz24wYvOaUQR2ER4Omy3cEq7pnXW++floE+7gFdRitYWWwVWKcIYVERae29m9fEmX3xS7XJFdf28T+0d8BYJfreR5ieMfPeCdkae3BpDurVwWgzHkafNJrxiw8xISaqVVY/nRigf3DwINVHy7GVlaIuaNeM769chG5f1jx6PkKThRlk6szzG4sWr+kXdn5Hqrlczp1F0M6JX6eBjl9FQ3PW0JTBaGhsrGKjT1pySnBTSc4/vngfKKiOaW7ZMHmmrTT40YK1Jg9TayfJwbhvWNep17jV5cjuSHEEau8QOv/GRQNR344xUyz1rnEjQaMCilFCIIrCJZIcms/KaZWJMv7b49NYzox6n1vtPilPm1GQyxe5uxjsa/LfO6NIML12dHzF/+zWn50W9jtkT57FwS/xJ8Ln4cfN+AMCHt5yOId2iK7F4+dwBpwovpuG3An9ntvIBD4zupalR6Z+XjXfGDaqzLTdLvQpbLJiNTd6FXscoQWX21g2nabpXVkYKSo5VWSKL12ib3aB2+adtBwEETG1GOzV/PLcLnplXP8OsE54/PvxZVZERgU3orSRkFlqUwMK7hmJKmFvluoeGx50JtE12Awzp1gJnxTB/2IHWtiU7Mw1f/nUI+uepm9LUJr/18PHvT69dXvtQQZQjnSeYFuXpy/vWbrvnQvNMY38+78R6235zanxVvoxSI7YhAAYVARHlENFsIlqv/FVNYkNEY5Vj1hPR2JDtXxLRWiJaonwSevq+a4tGtt6vo8aAsJaNM+rZ7Y2YQk5smYVXr+mPN64bEPc1APt7a3nNMyN6KhkdEYTmd4rX5KYFM8wrGSmBzkOLkBFhaOyA0d9FrefvVGxCy8aSngIwPiIYD2AuM3cFMFdZrwMR5QC4D8BAAAMA3BemMH7HzH2Uzx6D8riaKwZGj+Q1m9Ym5mAZ2t1+HW2G/TbVxDTFZmGlIjDSoAbTccea37HCrt7zBGei7687s6Mj93UbRt+S0QCmKstTAVykcswFAGYzczEzHwAwG4C7x8YWYbeducrEYa8Tc92X9DOehvrB0cbz0ADGfrvwdNrxuuVqwYiSOTWkBsIrY/PRpmkD1ePOtaBTEJ591S6s/C28hFFF0JKZdyrLuwC0VDmmDYDQ7GE7lG1BXlPMQvdQgseJN7PZF7tdtrd9pJuYkJO/ReP4JrzNpKD38ajc+y3OsaS3XQt1CHjk14HAO2ZgaI+WEV2erxqUhykhcRBf/nUI/vcHe+r4GsXseJBEIaYiIKI5RLRC5TM69DgOjBf19pt+x8wnARisfK6KIsc4IiokosK9e/fqvI07GHXKCbbda1iPlnjkYnN6w4D2OYNmMfIX2U16ij43zBtDipaHYtbYysyKWmro7UsF8zllpafUOhaET4w/NubkOsoMqDvyaJCW7Jn6CTI1rE7Mp5KZhzFzb5XPJwB2E1FrAFD+qtn4iwCEjo3bKtvAzMG/JQDeRmAOIZIcU5g5n5nzc3Od90aJBzsHPFkZKbobQTWG9QiYAbTK/s9LTzF8Tyc5rVOzetsu6nMCBndtHvc1gw3ruLM64byeaoNm89Br6khR8klVR7F9XZLfrl7uqND7EOm/75+GHg/gs9MsdP8o9Spnfsdo92QagKAX0FgAn6gcMwvA+USUrUwSnw9gFhGlEFFzACCiVAAXAlhhUB5BwaxEai+P7R+IKNV4ucqqKLklPMqdw7sb8i4J/hR3jeih6zrrHx6u+156O+bBmIC3b4weZxFOaL8gLTlJd2nNUBfSf15iX+fBqbkIt2PUYDYZwHtEdD2ArQAuBQAiygdwMzPfwMzFRPQggEXKOZOUbZkIKIRUAMkA5gB4yaA8gkVoVSztXVYI3gyyGxozd8Ufoa2/n9ZAZ/W5oKmqT0jyQS0/dWhSvKYN09BE4/Oh1aVZsBdDioCZ9wMYqrK9EMANIeuvAng17JhSANEzbwlx40QkbGoy6a4uZSXBUpBGmKQxMjvIWzcMxO9eXlhnm13pKf73hzNAOnO2qkUJa/E2C0+OqtV0GBpbkpOZhuLSCk3nCdbiPidrwRTMzrGvJRiusto9U3ErHrgAvU0oaq63V35Gl/pzCXrNJvFC0O/mq3b8f3/cFvM8M2IhFt89DD/eVa8fGRdPX97HlOv4FVEENhNvIXenuW1Y/bQAbkatME48qE3uXnN6Hp67oi/SoiiJX/dtg6X3no8fJw5FrxOMKyQtEOmv4aDWoB/WUHgmXjUQ2kEhIrQwKbI3r5mYnIwgisBmmjeyx6/d7L65U2m0nUbt97ptWFdcePIJUXvfAzvmoEnDVLTIsi+FAYF0e6bFG1AVrwfcsUprnAn0jlAuzZdJ41BEESQodqfXzbbJ/OEGgtXmpv9RPa/P69f2x0V926ju08vP95yn+dh43Djj1e/xWoZObGlNvi3dJjHT6t8lBhJmJ5jCtFvPRKVKGUgv8tq1/XHta4vqbR87qANG9TmhtrHtEmHexMzSh9k6AvSCjWFuVjr2lpRrOsfukZ5VsTRaLts/L5BC47ExJ6umGvczMiJIUOz2GmqX0xCdcu3NrmoVfdo2Vd3+wOje6Nehbprqn3T02K0m2MvV09RmqAQdanl03GYp1NLDD+auuiS/neQYCkNGBAmKGYVh/Ep2Zhomje6FI+Wxe405mWlo3igd+46UW17KMRbBXrGeTvcNgzvizuHddd/LbYV6wt1Z1ZDGPzIyInCAm85Wz2djJmZmHg0Sq9ylW1hsgmfW1YPy8PshXUyQxj6CE6anRBjRqNEgNblOMJnW893kKgzot/kndnpL/YgiSFCssNef1LZJPW+LrIwUZKS66zFqZpNn1nHc0SgGG7cXfneqoeu0y4kdHR4ccd41Qv9owgq0dPZDG3/RA3UR05AT2NBulFuU8+cfY07BwI7NcPv7SwE4U3BcUCfYuBnJcDrztsER6xCEEqxnPO6sznHfy0wSPIO95YgicIAcG1I1V1iY/C3UFt3W4zUPEonwxjArPQUlGuY5QtGaIiSveabjcyKhaNEDoccM6twMp7Zvapk8XsNdY3qfcL0N5fGszA8ffKGeuqyPZffwEu/eNMiUvEZGCW8MNXWSE6QjrTegrEuLLHz0e28U07EDUQQOYFZxkk8iVIV6ZWw+/nWl9fn8RvcxJ2jKTF6/tr/t9+yc28iUvEZGSQ1znfGTl4w2neef70Mvogg8TPMs9UnRri2y6pQgNJsuuVmWXdso3Vq5VzarCRaZAYABHXO0pc92xzy3YcxIgudnRBF4mEiPfmiDYAUntW3iKvuwn/nNqce9uEJjR967aVBc9Qys5M3rB1p2bdEDxnDXkyLoItLQ3yv1YwVtvHjlqXX+hpKvpE0AUG8UGF572Gn0Fs3Rg5/MYFYgisDDROoF+fmlaKCjiIxXKOjdWlmK/rvqKaBjRe98aPfoOZaqLMxFpaXzI6OGyIgicAmTRusvqk0g3H5e/ToBmSbl4vcas247C00NlpX0Gh00BH+p0aSB+dliX7km+kS9FdHutUgjbwhDioCIcohoNhGtV/5mRzhuJhEdJKLPwrZ3JKKFRLSBiN4lIn+9xSEM61G/AEosiIB+efW/cj09w0QhOYl8N1G8ZfLIiBlQo3HN6XnHzzO5Af3tgHYR91npWSWTxcYwOiIYD2AuM3cFMFdZV+MxAFepbP87gCeZuQuAAwCuNyiPZ4mnr0QQlzi/MUOpgbBQKfGo57kZ0i0XAHD/qF6W2ev/rDJCDWLFKCSIXQWfEhWjimA0gKnK8lQAF6kdxMxzAZSEbqNAGOS5AD6Idb4fsLuQTKLhB3VIBPQ8oXHtMqAvC6hatHlLk0pF1uLgYzzqlBOcu7nHMaoIWjLzTmV5FwA99o1mAA4yczAGfgcA90Uo2UQ8ekDyqxzHUvuzSwg1+QVHgjU6HpxrTs+rs75l8kh0TpAaEkBgpCPER0xFQERziGiFymd06HEc6NJa9jYS0TgiKiSiwr1791p1G9toZkK+IYKMJEb3CfQCO+UmdvHytJQkdA2ZD6gdEei4xvm9WmHVpAvMFcxFxOoWSccpMjEVATMPY+beKp9PAOwmotYAoPzdo+Pe+wE0JaKgi0tbAEVR5JjCzPnMnJ+bm6vjNu5kcVhlq3ja8yQi1cliP9KpeWIrgnUPDccJIVlBGymeYXo7Ag3TrPUoc7Jb0ihD/X9b82CBzZJ4D6OmoWkAxirLYwF8ovVEZQQxH8CYeM5PNKIN8SM+yASkq5QaFBKbLZNH1pqJ0lwWPezkADVSJLUfvej0YvQpmgzgPCJaD2CYsg4iyieil4MHEdE3AN4HMJSIdhBRcHx6J4C/ENEGBOYMXjEoj2eJ9v5EepAb+TReIJTgYN+vFrIWUSZ7/fqdCPox1JIw834AQ1W2FwK4IWRdtXoJM28CMMCIDIlCPLb+YARxw7RklFVUmy2SJ2AAN53VCWd2be60KI6Rk5mG4tIKp8UQPIy7xpU+ZcvkkYZsq3eP7GmaLG7j4lNjO5JNGNEDg7t6f97IbC7NjxzcZRVuy28UikwVR0YUgUvQOyB447rjA6krBrY3WRr30LSBb4PNNXOsUn00eONZnWyWJDK/H+KOkpaCOqIIXMIJTTMwWId5o1+Hut5C5/XUn6LC63TOzcSZXfxrEgry0e9Px/Q/Ol8hDYjcobmjwB1F7gV1RBG4hIZpKfiPjoyQ4YnlXro632yRXM/c24fgEgfMH26je6vG6HWC8xXSAOtrYcTLyJNbY0DHHKfFcC3idiK4GokB8hYtskxOWWGQ684I1Ad//or6tRyE48iIwEHUcudf0Mt/Jh4hsdj4yAjcOLij02IAAE7t0NRpETyBKAIHUZvkvfK0DoaumWg96AT7d3xBchJJOgePIaYhB7mjoBtuOttczw4pUym4gbxmx1N+bHpkhIOSCFqQEYGDpKckm25TTbQCHQn27/iG3w5oVxv5nuRg50TqdWhDFIHLMJoWINHqFYuJwZsQES7vb79Hl9kjbL8gisAjFPRqpek4K6tACYIefFAiImGQOQKXEakDrMU/+9s7z4mYgdGryHjAu+gpmmMWYgqKj8RqNRKINiG55wFtk8BtsxuaX3rQaeS99ixOFE1yc64jNyOKwCMkJ/nzp5Ienndxg2nIiVGJF/Fn6+JBEszioxmZKzaGnvxVZlPtAtPQnNW7bZfBi8gcgUdING8gwV6euPQU2+95y9md0bddU9vvG0ppuT/rdOjFp/1M7zHqlNh5+RMRUX/mcFEf+5+fdjkNbU8K2Kpxep11MQ1pQxSBy+ic20h1+6DOzdDH4d6VE0T0opIRkiaCzgNOBnXZydWD8uqsd22h/j4JdTGkCIgoh4hmE9F65W92hONmEtFBIvosbPvrRLSZiJYonz5G5EkETgjzFgqlfU5DAECrRPMMikKkyeIF48+1WRJv8tBFvbFo4jCnxbCNcIU34qTWDkniLYyOCMYDmMvMXQHMVdbVeAzAVRH2/Y2Z+yifJQblSWiCveMJI/xT5CPSiKBFVrr6DqEOGanJyPXxd5VoKVeswqgiGA1gqrI8FcBFagcx81wAJQbv5TuW338+1j88vHY9RXEh7RCS0Mtv3HpOFwCSekLQhjwm2jCqCFoy805leReAeJLpP0xEy4joSSLyb9clhDsKuuGOgm7IykitEyl8z4U9AADJPnq6w//Tm6X2raADGRFoI6b7KBHNAaCW6GZi6AozMxHpnaKfgIACSQMwBcCdACZFkGMcgHEA0L594hZrB4DfD+miur1pw0Ahd1+5koa9yE5EqwrepWlDyb2lhZiKgJkjzjQR0W4ias3MO4moNYA9em4eMpooJ6LXAPw1yrFTEFAWyM/P93VrkJ3pn4fbRypPsIBozhfCcYyahqYBGKssjwXwiZ6TFeUBChh8LwKwwqA8Cc+mR0agdRP/PNwtwvzCfd0DEDQx67aznBbBcxhVBJMBnEdE6wEMU9ZBRPlE9HLwICL6BsD7AIYS0Q4iukDZ9RYRLQewHEBzAA8ZlCfh8Ys/eJDf9m+PxXcfH5SKZUiIhVotcCE6hlJMMPN+AENVthcCuCFkfXCE88UZXIhKUhKhWaOQUYEoAiEGMj+sH4ksFjxFeqo8skJ0/DZqNgN5qwRPkZGajC2TRzothuBiRA/oRxSBIAgJhZ/ibMxCFIEgCAlFY6nbrRtRBIIgJBQZqcno3irLaTE8hSgCQRAEnyOKQPAMF/U5wWkRBI8gSQn1IYpA8AwN06WyqqANUQP6EEUgCELCIQMCfYgiEDyDvNuCVkQR6EMUgeAZ5OUWtBKpxKmgjhhdBUFIOP5wThes/OWQ02J4BlEEgiAkHAW9W6Ggt1o9LUENMQ0JnqBzbibO7JLrtBiCkJDIiEDwBHNvH+K0CIKQsMiIQBAEweeIIhAEQfA5oggEQRB8jiFFQEQ5RDSbiNYrf7NVjulDRN8T0UoiWkZEl4Xs60hEC4loAxG9S0RpRuQRBEEQ9GN0RDAewFxm7gpgrrIeThmAq5m5F4ACAE8RUVNl398BPMnMXQAcAHC9QXkEQRAEnRhVBKMBTFWWpwK4KPwAZl7HzOuV5V8A7AGQS4H0gOcC+CDa+YIgCIK1GFUELZl5p7K8C0DLaAcT0QAAaQA2AmgG4CAzVym7dwBoY1AeQRAEQScx4wiIaA4AtRC9iaErzMxExFGu0xrAfwCMZeYavfnCiWgcgHEA0L59e13nCoIgCJGJqQiYeVikfUS0m4haM/NOpaHfE+G4xgCmA5jIzD8om/cDaEpEKcqooC2AoihyTAEwRbleCRGtjSW7gzQHsM9pIWLgdhndLh8gMpqB2+UD3C+jHvk6qG00Glk8DcBYAJOVv5+EH6B4An0M4A1mDs4HBEcQ8wGMAfBOpPMjsJaZ8w3KbhlEVOhm+QD3y+h2+QCR0QzcLh/gfhnNkM/oHMFkAOcR0XoAw5R1EFE+Eb2sHHMpgLMAXENES5RPH2XfnQD+QkQbEJgzeMWgPIIgCIJODI0ImHk/gKEq2wsB3KAsvwngzQjnbwIwwIgMgiAIgjG8Glk8xWkBYuB2+QD3y+h2+QCR0QzcLh/gfhkNy0fMER19BEEQBB/g1RGBIAiCYBKeUgREVEBEa5XcRGrpLFwlDxFdQ0R7QybJb3BCzjCZXiWiPUS0wmlZgNjyENEQIjoU8h3ea7eMKjK1I6L5RLRKyaH1JzfL4tLvMIOIfiSipYrcD7hZFje+y0GIKJmIfiaiz+K+CDN74gMgGYGI5E4IRCcvBdDTzfIAuAbAc05/d2EynQXgVAArnJZFizwAhgD4zGk5w2RqDeBUZTkLwDqnnkUtsrj0OyQAjZTlVAALAZzmVlnc+C6HyPYXAG8b+Y29NCIYAGADM29i5goEYg9Gizz6YOavARQ7LUcQt8mjBWbeycw/KcslAFbDofQobpJFDxzgiLKaqnwcmbB0kyx6IaK2AEYCeDnWsdHwkiJoA2B7yLrTuYm0yvMbJf32B0TUzh7REo5ByrD9cyLq5bQwoRBRHoC+CPQiHSWGLK77DhWTxhIEMhLMZmbHvkONsrjxXX4KwB0AaoxcxEuKwIt8CiCPmU8GMBvHM7UK2vkJQAdmPgXAswD+56w4xyGiRgA+BHAbMx92sSyu/A6ZuZqZ+yCQXmYAEfV2sSyue5eJ6EIAe5h5sdFreUkRFAEI1cJRcxPZQEx5mHk/M5crqy8D6GeTbAkDMx8ODtuZeQaAVCJq7rBYIKJUBBret5j5IzfL4tbvMAgzHwQwH4F6JY4SSRaXvstnABhFRFsQME2fS0Sqwbux8JIiWASgKwWqmqUBuByBXEeulUdJxBdkFAL2W0EHRNSKlFS1FEhjnoRAwkInZSIE0qGsZuYn3C6LS7/DXFIKVBFRAwDnAVjjVlnc+C4z8wRmbsvMeQi0P/OY+cp4rmU06ZxtMHMVEd0KYBYCHjuvMvNKt8lDRJMAFDLzNAB/JKJRAKoQmBC9xil5gxDRfxHwImlORDsA3MfMjuV4UpMHgck6MPOLCCQlvIWIqgAcBXA5K64SDnIGgKsALFfsygBwl9LbdoUsANoDrv4OWwOYSkTJCCim95g5fvdHC2Rx+7tsJhJZLAiC4HO8ZBoSBEEQLEAUgSAIgs8RRSAIguBzRBEIgiD4HFEEgiAIPkcUgSBEgYiahWSc3EVERcryESJ6wWn5BMEMxH1UEDRCRPcDOMLM/3RaFkEwExkRCEIcKDn+P1OW7yeiqUT0DRFtJaKLiegfRLSciGYqKSBARP2I6CsiWkxEs8KiVQXBMUQRCII5dAZwLgLpB94EMJ+ZT0IgknekogyeBTCGmfsBeBXAw04JKwiheCbFhCC4nM+ZuZKIliOQcmSmsn05gDwA3QD0BjBbSfuTDGCnA3IKQj1EEQiCOZQDADPXEFFlSC6fGgTeMwKwkpkHOSWgIERCTEOCYA9rAeQS0SAgkDraLQViBEEUgSDYgFLOdAyAvxPRUgBLAJzuqFCCoCDuo4IgCD5HRgSCIAg+RxSBIAiCzxFFIAiC4HNEEQiCIPgcUQSCIAg+RxSBIAiCzxFFIAiC4HNEEQiCIPic/wcvziJ0eY2VRAAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "librosa.display.waveplot(samples_out, sr=sr)\n", - "print(len(samples_out))" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "infectious-welcome", - "metadata": {}, - "outputs": [], - "source": [ - "import librosa\n", - "x, sr = librosa.load(wav, sr=16000)" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "musical-anatomy", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "float32\n", - "float64\n" - ] - } - ], - "source": [ - "print(x.dtype)\n", - "print(samples.dtype)" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "lucky-paraguay", - "metadata": {}, - "outputs": [], - "source": [ - "sf.read?" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "annual-christmas", - "metadata": {}, - "outputs": [], - "source": [ - "librosa.load?" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "infectious-seeker", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 47, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.allclose(x, samples)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "pregnant-conditioning", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import random" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "id": "logical-happiness", - "metadata": {}, - "outputs": [], - "source": [ - "np.random.uniform?" - ] - }, - { - "cell_type": "code", - "execution_count": 58, - "id": "rocky-plastic", - "metadata": {}, - "outputs": [], - "source": [ - "random.uniform?" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "id": "focused-compensation", - "metadata": {}, - "outputs": [], - "source": [ - "np.random.RandomState?" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "id": "centered-repository", - "metadata": {}, - "outputs": [], - "source": [ - "random.sample?" - ] - }, - { - "cell_type": "code", - "execution_count": 95, - "id": "inner-invite", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array(['3', '5'], dtype=' 1.0, speed up the audio;\n", - " speed_rate = 1.0, unchanged;\n", - " speed_rate < 1.0, slow down the audio;\n", - " speed_rate <= 0.0, not allowed, raise ValueError.\n", - " :type speed_rate: float\n", - " :raises ValueError: If speed_rate <= 0.0.\n", - " \"\"\"\n", - " if speed_rate <= 0:\n", - " raise ValueError(\"speed_rate should be greater than zero.\")\n", - " old_length = samples.shape[0]\n", - " new_length = int(old_length / speed_rate)\n", - " old_indices = np.arange(old_length)\n", - " new_indices = np.linspace(start=0, stop=old_length, num=new_length)\n", - " samples = np.interp(new_indices, old_indices, samples)\n", - " return samples" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "tracked-purse", - "metadata": {}, - "outputs": [], - "source": [ - "samples, sr = sf.read(wav)\n", - "samples_out = change_speed(samples, 1.0)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "steady-mileage", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ipd.Audio(samples, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "regulated-google", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ipd.Audio(samples_out, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "homeless-forge", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "samples_out = change_speed(samples, 1.1)\n", - "ipd.Audio(samples_out, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "exciting-blocking", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - " \n", - " " - ], - "text/plain": [ - "" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "samples_out = change_speed(samples, 0.9)\n", - "ipd.Audio(samples_out, rate=sr) # load a NumPy array" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "through-botswana", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "66078\n" - ] - } - ], - "source": [ - "print(len(samples_out))" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "cellular-violence", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting matplotlib\n", - " Downloading matplotlib-3.4.1-cp37-cp37m-manylinux1_x86_64.whl (10.3 MB)\n", - "\u001b[K |████████████████████████████████| 10.3 MB 691 kB/s eta 0:00:01\n", - "\u001b[?25hRequirement already satisfied: pillow>=6.2.0 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from matplotlib) (8.1.0)\n", - "Requirement already satisfied: numpy>=1.16 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from matplotlib) (1.20.1)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from matplotlib) (2.8.1)\n", - "Collecting kiwisolver>=1.0.1\n", - " Downloading kiwisolver-1.3.1-cp37-cp37m-manylinux1_x86_64.whl (1.1 MB)\n", - "\u001b[K |████████████████████████████████| 1.1 MB 45.9 MB/s eta 0:00:01\n", - "\u001b[?25hRequirement already satisfied: pyparsing>=2.2.1 in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from matplotlib) (2.4.7)\n", - "Collecting cycler>=0.10\n", - " Downloading cycler-0.10.0-py2.py3-none-any.whl (6.5 kB)\n", - "Requirement already satisfied: six in /workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages (from cycler>=0.10->matplotlib) (1.15.0)\n", - "Installing collected packages: kiwisolver, cycler, matplotlib\n", - "Successfully installed cycler-0.10.0 kiwisolver-1.3.1 matplotlib-3.4.1\n" - ] - } - ], - "source": [ - "!pip install matplotlib\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "import librosa.display" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "undefined-parade", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEKCAYAAAAfGVI8AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Z1A+gAAAACXBIWXMAAAsTAAALEwEAmpwYAAA8K0lEQVR4nO2dd3hUZfbHvycdQoAEQpEWmlQVJICoKApqABdcF8u6KlbUXX+77rqrIFZs7Lr2si5WXHXtrigI0myoSFB67xDpoYQEUs/vj7kTJpM7M/fO7XPP53nmye33ZObe97zveU8hZoYgCILgX5KcFkAQBEFwFlEEgiAIPkcUgSAIgs8RRSAIguBzRBEIgiD4HFEEgiAIPscURUBEBUS0log2ENF4lf1/IaJVRLSMiOYSUYeQfWOJaL3yGWuGPIIgCIJ2yGgcARElA1gH4DwAOwAsAvBbZl4Vcsw5ABYycxkR3QJgCDNfRkQ5AAoB5ANgAIsB9GPmA4aEEgRBEDRjxohgAIANzLyJmSsAvANgdOgBzDyfmcuU1R8AtFWWLwAwm5mLlcZ/NoACE2QSBEEQNJJiwjXaANgesr4DwMAox18P4PMo57aJdcPmzZtzXl6ePikFQRB8zuLFi/cxc274djMUgWaI6EoEzEBnx3HuOADjAKB9+/YoLCw0WTpBEITEhoi2qm03wzRUBKBdyHpbZVu4AMMATAQwipnL9ZwLAMw8hZnzmTk/N7eeQhMEQRDixAxFsAhAVyLqSERpAC4HMC30ACLqC+DfCCiBPSG7ZgE4n4iyiSgbwPnKNkEQBMEmDJuGmLmKiG5FoAFPBvAqM68kokkACpl5GoDHADQC8D4RAcA2Zh7FzMVE9CACygQAJjFzsVGZBEEQBO0Ydh91gvz8fJY5AkEQBH0Q0WJmzg/fLpHFgiAIPkcUgSAIgs8RRSAIguBzRBEIgiD4HFEEgif42/tLsXirpKASBCuwNbJYEOLl/cU7kJ6ahH4dsp0WRRASDhkRCIKQcKwoOoSZK3Y5LYZnEEUgCELCMeGj5bj5zcVOi+EZRBEInsGDsY+CQzDkYdGDKALBM8irLWhFOg36EEUgCELCIYpAH6IIBEFIOEQP6EMUgeAZ3l64zWkRBI/gxWSaTiKKQBAEweeIIhAEIaGorK7Bml0lTovhKUQRCIKQUBworXBaBM8hikAQhISiRqYHdCOKQPAUFVU1OPHuz50WQ3AxNTJRrBtTFAERFRDRWiLaQETjVfafRUQ/EVEVEY0J21dNREuUz7TwcwUhlKMV1aioqnFaDMHFiCLQj+Hso0SUDOB5AOcB2AFgERFNY+ZVIYdtA3ANgL+qXOIoM/cxKofgE8hpAQS3UyP9BN2YMSIYAGADM29i5goA7wAYHXoAM29h5mUA5CcSdDFj+U7kjZ/utBiCh5ARgX7MUARtAGwPWd+hbNNKBhEVEtEPRHSRCfIkNHnjp2P/kXKnxbCNtWFugEkyIhBiMPKZb5wWwXO4YbK4AzPnA7gCwFNE1FntICIapyiMwr1799orocvYU+IfRRDetyMSTSBEp7SiunZ5n486TUYwQxEUAWgXst5W2aYJZi5S/m4C8CWAvhGOm8LM+cycn5ubG7+0HmD+mj1Ysv1gve3HKgMPeLWf/ONkmC8YYM9hUQRaMEMRLALQlYg6ElEagMsBaPL+IaJsIkpXlpsDOAPAquhnJT7Xvr4If3jrp3rb/zFzLQB/KYLw/3Tu6t2OyCF4E5kv0IZhRcDMVQBuBTALwGoA7zHzSiKaRESjAICI+hPRDgCXAPg3Ea1UTu8BoJCIlgKYD2BymLeRr8kbP73OROn+0kDv5kCZfyMn//TOEgCSVEzQhjwm2jCleD0zzwAwI2zbvSHLixAwGYWf9x2Ak8yQIdFQM4UHH+oHPl2FId1a2CuQQ0R6kcurapCRmmyvMB6kpoZRw4yUZDdMB9qPjAi04c+nw8UcVHr7as9vsZJDZfO+UjtFcpRIJQeHPfGVzZJ4k7/PWoPu98x0WgzH+GHTfqdF8ASiCFzG8qJDqts37DmCbzfss1ka54nUodtx4Ki9gniUVb8cRpWP5pQ+WVLXT0UUgTZEEXiEV77d7LQIjuCfJsxanGgQDx+rxLIdB2295+qddeNOkpOkidOCfEseoVri5gUDXD7lB9vv+cQX6zDquQW23zcUn06N6Ea+Jo/gp+F9KDLXZ4yFm4odu3eVCzovBb1bOS2CJxBF4BFq/KoIxDhkiIpq5xrjJAeiwMOfl7Rk8SzTgigCl1J0sO5kaLWG9rDkWCXKKqosksghRA94FicUgRAfoghcRiRTiJYRwbAnvnLEFmwloge8i+gB72BKQJlgPdOX74x5zO7D5ThyLMFGBIJnkRGBd5ARgcsw+u5UJ9jsqqSS8C4rIsTEWMl7i7bHPkiohygCB2FmVJk8mZdoCelED3iTWSt3YeFm+z2WDpRV1lkXZwNtiCJwkBe+3IguE80txJ5oikDwJou3HqhdPmPyPAclEbQgisBBPltW3+4f+gLFQ6LpgQT7d3xDqIUz3ANOcB8yWewgq3cerrftqTnrHZBEEMzjdy//gAUb3JHjp0hyUmlCRgSCq5E5Am9xoLTCNUoAAB79fA0A4L8/bkNpuXjURUIUgUuoqq7Bawu0J5Yrr6qus/7yN5vMFsn1/PX9pfhmvb/rVwPA7sPHsOvQMafFAAAcrayOfZADTPhoOeau2eO0GK5FFIFL2LK/FA98qr0427pdR+qsPzR9tdkiuZ4PFu/AB4t3OC2G4/z6+QU4+7H5TosRFSkx6m5EEbgE0hlA8Kvnvq1dTuSc6+L+F5tjVTUor6rvhjxzxS7bZYn0GF8/tdBeQQRdmKIIiKiAiNYS0QYiGq+y/ywi+omIqohoTNi+sUS0XvmMNUMerzH6uW9hJI7s2tcWmSaL23htwZaYx0xfthMb9hyJeZzfePyLtbbfkww9yYJTGFYERJQM4HkAwwH0BPBbIuoZdtg2ANcAeDvs3BwA9wEYCGAAgPuIKNuoTF5j6Y5DukcEwPGo20oHM0y6gT+8/RMeneE/01iQSHmoJMODoBUzRgQDAGxg5k3MXAHgHQCjQw9g5i3MvAxAeIt1AYDZzFzMzAcAzAZQYIJMniPaOxvpRT+s5BXya60C4Pj35tdGr7i0AgePVsY+UBCiYIYiaAMgNMHHDmWb1ecmFNESdHW6a4b6DpZcPMf/e/9ogsv+/T0qlDmBaC6RTjwaTipkv9bsMAPPTBYT0TgiKiSiwr17ve8yOPbVH+usx/MCMRiLthiLRE4U9h4pd1oES7lhaiEOllUAABZuLsbhY4FRQFKSuxSgk9IcijAyyhs/3WZJvIcZiqAIQLuQ9bbKNlPPZeYpzJzPzPm5ublxCeomvlpnXJkxu6McoJN8suQXAMDS7QedFcRi5qzejeUh2TyDvX09De/KXw4ldKMo44H4MUMRLALQlYg6ElEagMsBTNN47iwA5xNRtjJJfL6yzXfENyIQL40gLusYW8LRiuPBWkG3Wj05/x/6rO6E+okTP8cvCZQH6JVvowdV+t2MGg3DioCZqwDcikADvhrAe8y8kogmEdEoACCi/kS0A8AlAP5NRCuVc4sBPIiAMlkEYJKyzXfE6zXk10nScOL5/rwGA9hbUn58BdE7EOH7UlPqvu4V1TXYsr/UPAEd5sWv/BddbxamzBEw8wxmPpGZOzPzw8q2e5l5mrK8iJnbMnMmMzdj5l4h577KzF2Uz2tmyONF4mnGGJKLx2/0f3gOAODCZwMBhXqem68Vc+ScVbstm1j9YlXkCGIr3ZwPllVICnYDeGayONFRy0QaC2ZgW3H9Hp0f4wqqaxj7E3zCOJQ9JeXIGz8dO+PIMXTDG4VY8Ysy32By23n3/1ZE3Ldwk3WD/QofPvNmIorAJcQTgs9g3Pnh8nrbS3xat7jfQ3NwJIEzTKqN/lbF0YEArIk9eXZu9BTqKckWmu9kMGAIUQQeJpJZyM+eREcSUAlGGy2GNq16TCMXv/CdAYnUeXz2uqj7Uy1UBFpqdYsZNTKiCDxMpBffz7bSRExSN/zpbwAAN7+5uN6+X0JMQ+EeQJo8ymycY09Jsq65qapOvN/dTkQReJhIj77VL8XmfaU49/EvLb2HoI1nQswxySE+tC99vUlTL9lORj+/wLJra/lXE7GTYBaiCDxMJM+PvUfK6/icm83S7Qexaa873Q79Oj8C1B0JPjxjta8mz2tcpvS8higCBzArsGXwP9SLkVz8wnf44zs/m3IPNYI9q2U7Dlp2j3g5/8mvbb/n/iPl2HnI+cCscM8ZTSbCBGk/tfwbEnwZGVEEDvB+ofVVtfYctq50YVCPXfHSQsvu4SUum/IDBj06z2kx6plH/NRJ1jIiCDUN7Tl8DP+cZX+9BrciisABNu61vohKekqyZdcOvnNHyqtw+FhlbQI0vxJMBuc04SPNkjhcafcfKa/NbBqNPSXHcOGz3+i+vlXoVXqzVu7Cc/M3WCOMBxFF4AQ2jFDTUqz5af/15UY8FtKTuuj5BRj5tHsaBD9jxgCg30Nz8Pjs2D3ldbuOYEXRYcx3SUF4LebW0EN8NFjShCgCB7DDVmmFz/aew8fwxOy12BVidtq0txTbDzhvHw+lyvYoU3fYnoMN3UOfrTJ0nc0aHAGCHkrXvu6OMqnSsBtDFIEDvPjVRsvvkZps/k874JG5qPSAv/Zpj841fI1v1+/DnCh5c9xI0Ab+2bKdms+pZq430f39pv0xz7M0SjgONM0RuP/RdQxRBAlKsh/yMkdg3xFjNvtjldW48pWFuOENbWk/gpHcTuf6DwaU6/GXf+P7rXFNdLvt8dLSyIcqC1EKdRFFkKDYnZX5UFklikvdMWlqlEgpHf73cxG2F5fV2ZY3fjoOlrljsjyoAPQElu+LM9bAbQ2plhFBoVLNb8n2g1KbIAxRBIIpjHnxO5zzzy+dFsMUfh0hD89t7y7B8Ke/qW1EIjWi63eXOJLmI9i21dYs0EC8YrqtGdXSrr9bGCiPftHzC7B2t/Wee15CFEGCYnehlvV7jkSsGZtIHCmvqv0/Rz+nnjLhvCe/xmfLfjHlfnrNTXoVULw943g71Fa5TuuVp9rHiRnVEEVgM1p8tM3AZSZcz3Kssn6qji9W7cayHQexN4pZZXtxme3eSzXMuhv2eEcu8SqQZIs6KHrzCM1zidurW0hxWgC/cfZj6mkh3M6bP2x1WgRHmLbkF1zav12dbXd8sCzmef/8Yh3++UUgLfO8289Gp9xGlsgXCrN+k42aHtDiaBCvaSj82oFyq8aVQ2m5vtxaRh0KEg1TRgREVEBEa4loAxGNV9mfTkTvKvsXElGesj2PiI4S0RLl86IZ8riZeCpKxYOeouZaWLBhX8xj3ORSmDd+OraaUI+3vEpfA7NbJbXHLpt+83gaZ7XEhdecnhf7PBMmW//435/RccIMw9cBgN++9IMp1/ErhhUBESUDeB7AcAA9AfyWiHqGHXY9gAPM3AXAkwD+HrJvIzP3UT43G5VHCGD2CFzL9aqqGTsOlMU+0CYueMp4AroXv9qky3wy8JH6MQzlNpmInpu3Xr+tXOWEBqmx05METexNG6YC0G4qenjG6trlaUvNmUcRjGPGiGAAgA3MvImZKwC8A2B02DGjAUxVlj8AMJTsns0UbGFFUXylE63gWKXxBrjo4FHsKTHWo4+3ULxeO/yc1XtQVqEvv1BQyYUXtYl5niLbwbJArimtrsMzV+zSdR/BHsxQBG0AbA9Z36FsUz2GmasAHALQTNnXkYh+JqKviGiwCfIIMG+y+Ok565E3frpmO266RTmOnOSLlbt1N7ChBL+6eWt266oTce7jX+m+l16dEwyGGxXiAaXlpw5VbuWVNVEnztX4Zv3e2uUPF1ufjTfIT9sO2HYvL+H0W7sTQHtm7gvgLwDeJqLGagcS0TgiKiSiwr1796odIoRg1oDryTlKHVqNDcy/vrQ+fYaVbNtf37R137SVmLE8/p5sMLfUda8X4sOftDd6m/fpn+PQa7sPVrNT844KsmT7wXoeUKHmshpm3VXxrnrlx9rl299fqutcI1z1sqROV8MMRVAEINStoq2yTfUYIkoB0ATAfmYuZ+b9AMDMiwFsBHCi2k2YeQoz5zNzfm5urgli28/8tfa5rH38cxEmf77GtOuVa3R7/XFLsWn3NAO9LpwPfLpSdbtZIyyrA830KoKgPITjpqjwpIgXPb8A05fXzV8UepcaZs/UyRaLtDpmKIJFALoSUUciSgNwOYBpYcdMAzBWWR4DYB4zMxHlKpPNIKJOALoC2GSCTK5ES1ZHMzFzGOxESL4ZMRcHXJD+Yc2uktrl+6apKxqz0Psz7VGikEvKq2o7DkT1A79Ckw3OWbUbEz5aXrs+6NF5ltYjNpMjcdRo8AOGFYFi878VwCwAqwG8x8wriWgSEY1SDnsFQDMi2oCACSjoYnoWgGVEtASBSeSbmdldXUoPk2JiZjAn+nvvLtpm+BoPTzeWkjmIkY7k32fWHZlZ2Xs2cu3QrKNDH/8KRREmkN9auDXuHEWRsHO0LNTHlIAyZp4BYEbYtntDlo8BuETlvA8BfGiGDF7g5+0Hbb2fmcnQnJhkqzAh5fUeHXl3AHsUXg0zki2K/TaiCJbtOATguPkkkreTFeaVb9fvwzndWph+3Vh8sHgHxvRra/t93YZEFtvIpzb7Ta/epc2V81hlNVKSCClRahjoUSo7DpRh6ndbUF0D3Pur8JASe9HTZkWbMDXa9oUGmllpZRv13LeGrxEcSB4N+T5CzXRWqLDDDuWpWhMh06zfcNpryDfocRs0C2Zttv3THp2LP79X13PDSGTuut0leOXbzXh1wea4zg9i57TeoaOV6H7PTBRZVG0tNNDsCgujYM2YE3ng04A57faQZ+KRkEAwo7y1sH66khnLtRfTMROZOw4gisAmzAjJj4d/zFqLT5YURU1NfLCsEou3FGNEWO3hrSqulFogIlMK45jxkgZNJU/MXocNeyJnvgyaQdbuLlHdb2Z50cKt3vBlX150qHY5NysdAOpVM4uHiR+vqLet1IGOEmB+KhavIorAJpxSBKt3Hsaf3lmC/g/PiXrcL4eOYVXYMDleiaurzUkkZsYrGjRzPzN3PSZ8tCyiDf0HDeUZ7cSuLLWxCH5fF/RqBSDgITTXosydRmstx4O4kwYQRWATTqU//3KtvuC7UL/7eJVXZnpKxHTDI5/5RrOZzAzV2TgjtXZ50ZYDmLtavQ7xLW/9FPU6T89db4I0x4kVLOaW2g6/ejYw53CssrreiNFsXv7WmCkxHtxWctMpRBHYhFpyLzdSUV1TayapjtNr595PVkQ066z85TCKy7TlpTHjK5uzeje+XX88c2pVnF418UT5RiPWpG60iWs7CY4Sd6mMGBMBtyhcpxFFYBNujbwM9xXfsq+sdlu8jeb6PUdUba/BiWu7g9OufOV4WgG1uYvwOsR2EKuhj/e7F/Tx1sJtEWtU+wlRBDYRy0bvFGdMnldnfcQz39TWHq4wkD5ZLYIz2LZpNZO99p35poLMtPoe004M1oKms7zx01UnsZ3yoonEzJXHcy01zrDO6/z8J/Un2gO0FdOJxHCLTV5eQBSBDdz3SX0vCSdYExJXMH/tnohFV4K9UbMnLINzDlU1NZpGBduLzXflvOGNRfW2GVF48ZKcRLXRudtVajhMX+YuRRDK4WPWpWlYF2dReSdSoCQSoghsYOr37ijzOPW7LVinuEde+9oifLY0emNz1GQ7ddA8du7jX+EFh7KUqtUoqM2waiPJSVSraIN/3164Fa8qE6bSrGmnvKpad/ptoS6iCHzEf3/cjoKnvq5tkGOl/73nf8ZHMmUVVTh0tBJ546fjlAe+qN3+3LwNMc8deXJrw/ePRkVVDUqOVWKPSnlJqzl8rKr2d2jeKA3b9pfhro9XYJLiQtmmaYbtMrmFw8f0TeA+aILbad746Yav4WVEEVhMvNWprKKGgVIDRVb00vPeWfjzu0sA1E1lfUxDLeDcRumWyJQ3fjqKSysw/sNlOOn+L9CqSQNL7hOLexST4dpdR/C/JXUztw/u6s1U62Zw8v1f6HpvzPbo8iOSa8hiXvgyds/XbuzuAc9TCUA6s0tzW2UI59QHZ9cun9G5me15oIDjMR53fby83r5KB+Yt3ERVDSNNmQA+Ul6FBRv2ITcrHae2z6537Jqd6tHgejlUVokmDVNjH5iAiCKwmNW7zHlIzWTYE8aLuhulb/tsrN55GN1bZalGd57ywBe2+Xh/t9FdUcX//XEbSn2eNz9oNnt9wWbcr+Q+atIgFUvvO7/esfs11kuOxZ0fLsOLV/Uz5VpeQ0xDVuMuy5BrWLPzMIY//Q1mrdxdp3e+YMM+5I2fbmugzzQHRgPRmPDRcvy07aDTYjjK1uJSfL1ub60SUMNsZbm86BAu+/f3pl7TK4giEBzhi1WBVA83v7kYxaUVtUFdoYnO/IzfRwQFT32Dq1/9sc628FCBXvfNwnuLtqNBarIp9yw6eBQLNxdj0KOBTLHrd5dETdaYSIhpyGJ2OeCR4kUG/2M+crPSffPixcIrGUrtpKyiGoVbivHsvA14/NJTAAB3fLjM9PvsPHQMX6zchXH/WQwAmP3ns9C1ZRYAYPqyX3Bqh2y0dsjBwCrIi4EY+fn5XFhY6LQYmjj7H/Ox1YEUBoKQyJzYslHcwWfxsO6h4UhOInS+K1CIccvkkZbfc29JeW3678rqGqQmJ+FIeRUapcfffyeixcycH77dFNMQERUQ0Voi2kBE41X2pxPRu8r+hUSUF7JvgrJ9LRFdYIY8bkKUgCCYz+7D9o4c+z74Be6ftrLOttLyKqwwYMqsqWG8vXAbqmsYh8oq8dScdTikFBZiZvR/eA7W7y5BRVUNuk78HPPW7Ebv+2ZhT4n5VgbDioCIkgE8D2A4gJ4AfktE4fUJrwdwgJm7AHgSwN+Vc3sCuBxALwAFAF5QrqeL8qpqw+kQmFk1TL2sogoFT31de/2t+0s1ZYZk5noyScpbIRx5JOLD7qyhpeXV+M8PxzMElFdVo9d9s3ChkqZ7za7DWKc02sFU7rHaiU37juCuj5dj9c7DmPr9Fjw1Zz3+/fVG/LTtAK55LZAKZe+R8tpMtde9HrCC3PTGYny4eAfOffzLOterqKrBzBU7sWmv+kgp2ryTGXMEAwBsYOZNAEBE7wAYDSB0un80gPuV5Q8APEcBn8HRAN5h5nIAm4log3K9mFP3P24uxvbiMgzslIORz3wb0M4PXIAMZeLoaEU1UpPr1+FlDhRNYWbUMPDLwaNol9MQHSfMQIusdPw4cRiYGW8u3IaUJEJmegrW7CrBDW8U4ut1Ab/viSN6YGCnHMxcsQt/HNoV24vL0LVlFm57Zwn+t6QIn/3fmXh67nrMXlU3973LYssEFyCPhDfpdvfM2uW1u0pQ8NTxxHXDerTE0B4tMOGj5dgyeSTW7S5Bg9RkNG2Yite/24K8ZpnISE3Ggg2B9OgXPvstRvc5AQDwwpcb66Rf+deXG7EmzAX95+0H8fP2gwACOalyMtNQdKAMM1bswrw1e9C0QSp+3bcNGqQlY39pBfI7ZKO0vAr3f7oKyY1btFX7fwzPERDRGAAFzHyDsn4VgIHMfGvIMSuUY3Yo6xsBDERAOfzAzG8q218B8DkzfxDtnrkde3LmZY+p7rtyYHvkZKbhmXkbMKBjDpo0SEWbpg3QPqchpny9ybLJ2yHdcnUXgREEQbCTopduPly5f3uT8O2e8RoionEAxgFAcuNcZEY47s2F22qXf9xcbINkAUQJCILgerhG1V5lxmRxEYB2IettlW2qxxBRCoAmAPZrPBcAwMxTmDmfmfMzsuqHmQfp1jIL/Toc39+0QSo65WaiQKm5ahVm+TILgiBYiOq0lBkjgkUAuhJRRwQa8csBXBF2zDQAYxGw/Y8BMI+ZmYimAXibiJ4AcAKArgB+RAy6t8rCXy8+CYVbinHVoDxc8u/vUVFVg2X3n19bo3Z7cRmaNkxFVkbd3CF7S8rBzMhMT8Huw8ewvOgQRvdpU5t9cM2DBQCAcf9ZjM65mejaIgt3fbwcHZo1xNb9AQ+g24Z1Rc/WjfGvLzfi4V+fhJ2HjuLc7i1w4t2fo7Ka8faNA3HrWz9rLskoCIJ3GT+8OyZ/vqZ2/aQ2TTCgYw5e+XYzFt41FOt3l6BhegraZjfArW//jNM65aCiirF212HMVywJw3q0wJzV9XNytctugO0HItflGN67FU5u2xTbikvx07aDWKvMJ/RonYWiA0eRkZqMgt6tsOPAUSXnl3oRWVPiCIhoBICnACQDeJWZHyaiSQAKmXkaEWUA+A+AvgCKAVweMrk8EcB1AKoA3MbMn8e6X3gcweZ9pchMT0aLrPhT927bX4ZmjdKQGeaju/9IOfo9NAcrHwh4ti7cvB8nt22K5hEyY9bUMJKSCKXlVThWWY1+D7mzMpkgCPGRlpKE78efW/tub5k8El+v24OU5CSc2DILqclJaJyRgq37y5DXPJIRG1hRdAgXPvstXrumP37adgDPztuA35zaBoO75uI2JWPvf64fgD/+92ccKDvuJdU4IwW3DOmMv89cWyee4UBpBf799Uac17NVHasIEHCSWV50CKe0y1aNI5CAMovxe55zQbCCJg1SbXchHd67FT5fESjZuWXySGzaewRLdxzEr/uqOuLEpKq6BvdNW4l7LuyJ3YeP4bZ3l+CZy/uiXU5DMDM6TpiBz/7vTLRukoF+D83B45ecgtvfX4o5fzkbHZtn4lhldb2OaywiBZSJIrCYbnd/XicPvyAIxmnZON3WoLJFE4chMz0ZPe+dBcCeyOKftx1An3ZNQUQoOngUJzTJwNrdJejeqnHc17Q0sliITPdWWU6LIAgJw4Th3QEAb1w30NL7jDurU+3y2zcORG5WOhqmpeCOgm6YdusZlt47SN/22bUp2ts0bQAiMqQEouEZ91Gv0ja7IZbukIyasXjrhoHo1yEbz83bgOfmu6+Yj92c3LYJlslzU4eczDTcdHZn3HR259ptE0f0wJNz1qGswtz62neN6IHTOuWgXXbD2oRzAPD7IV1MvY9bkBGB4AjBkdJ4pYd3RpfmyEhNRo/W1vR4vEbDNH/30aZeN6D22QhSHRaaP/f2s3HtGXmmKYHGGYHvPGj2Obd7yzpKIJERRWA1kkxGlQt6tcKLV56KGwd3wuZHR9RuH3FSK6yaZG/uwbxmDW29Xywu6dcW+R0ix8r4gf552bj57M64elCH2m3h85mdcxvVSyFjhJPaNrHF9u9GRBFYTEsDLq1W8fq1/Z0WAZv3laKgd2skJ1GdUpVEhIZpKdgyeSQuy28X5QrmcXOIqcENPHbJKWjq09q5QVKSAk3TpNG9sWD8ubhxcEc8MLqX6rHpKeY0Y5MvPtmU63gRUQQWc0dBN6dFqEdflQLgdvPpstjlIRumWxetPfvPZ2FAxxwAzlVFa9Ig0NgX9GqFMf3quiCm+DxVbXLI/9+maQNMHNkzopvmSW3qpc6Ji3Y57hoZ2okoAovJcGHqicw0+2T6YcJQFN49LK5zj5o8ARhkzYMF6NoyCy9dnY+v/3YO1u0uiX2SBXz2f2cCAG48q2MdEwgAX08Uz7xtcB1FEItWTdw36vYaogh8RM/WjTH1ugG1dtW/XRB9tHLrOcY9JFo1yUDzRunY+MiI2vQdAHDlwA5RzgrwzqLthu+vRlA5N2mQivbNGjqirBtnpCBJaez2H6nAyW2b4sbBnXDlaYHvZfUuZ5STG9DrIvnQRb0N39OvcwNBRBHYQE+XeMI8evFJOPvEXACBRv6KAe2jHh8tPF4vyUmEJGUu4MNbBuG+X4XXLnKOOwu6xz7IZGoYSFa+j1TFxj1xZI/aRs3fhiF9NG2YJkWfDCKKwAZm/Gmw0yIACPimB/nrBd2QnZkW9fg0kybhggSH+7mNMjR5e1hhJ3/p6npBlbWJCu2kqqam1qTRPLN+3qrg/IUbCc5tuAlSz6UmaEQUgU189bchToug+rJ88oe6UZKvXdsfH94yCACQlhz/y6VWYDvYridpfOomjOgR9/0jofYf1TiQZiXoE7/mwQKc1Lb+ZGf4nIHThKZxtzLHT7wmmvAYAz24wYvOaUQR2ER4Omy3cEq7pnXW++floE+7gFdRitYWWwVWKcIYVERae29m9fEmX3xS7XJFdf28T+0d8BYJfreR5ieMfPeCdkae3BpDurVwWgzHkafNJrxiw8xISaqVVY/nRigf3DwINVHy7GVlaIuaNeM769chG5f1jx6PkKThRlk6szzG4sWr+kXdn5Hqrlczp1F0M6JX6eBjl9FQ3PW0JTBaGhsrGKjT1pySnBTSc4/vngfKKiOaW7ZMHmmrTT40YK1Jg9TayfJwbhvWNep17jV5cjuSHEEau8QOv/GRQNR344xUyz1rnEjQaMCilFCIIrCJZIcms/KaZWJMv7b49NYzox6n1vtPilPm1GQyxe5uxjsa/LfO6NIML12dHzF/+zWn50W9jtkT57FwS/xJ8Ln4cfN+AMCHt5yOId2iK7F4+dwBpwovpuG3An9ntvIBD4zupalR6Z+XjXfGDaqzLTdLvQpbLJiNTd6FXscoQWX21g2nabpXVkYKSo5VWSKL12ib3aB2+adtBwEETG1GOzV/PLcLnplXP8OsE54/PvxZVZERgU3orSRkFlqUwMK7hmJKmFvluoeGx50JtE12Awzp1gJnxTB/2IHWtiU7Mw1f/nUI+uepm9LUJr/18PHvT69dXvtQQZQjnSeYFuXpy/vWbrvnQvNMY38+78R6235zanxVvoxSI7YhAAYVARHlENFsIlqv/FVNYkNEY5Vj1hPR2JDtXxLRWiJaonwSevq+a4tGtt6vo8aAsJaNM+rZ7Y2YQk5smYVXr+mPN64bEPc1APt7a3nNMyN6KhkdEYTmd4rX5KYFM8wrGSmBzkOLkBFhaOyA0d9FrefvVGxCy8aSngIwPiIYD2AuM3cFMFdZrwMR5QC4D8BAAAMA3BemMH7HzH2Uzx6D8riaKwZGj+Q1m9Ym5mAZ2t1+HW2G/TbVxDTFZmGlIjDSoAbTccea37HCrt7zBGei7687s6Mj93UbRt+S0QCmKstTAVykcswFAGYzczEzHwAwG4C7x8YWYbeducrEYa8Tc92X9DOehvrB0cbz0ADGfrvwdNrxuuVqwYiSOTWkBsIrY/PRpmkD1ePOtaBTEJ591S6s/C28hFFF0JKZdyrLuwC0VDmmDYDQ7GE7lG1BXlPMQvdQgseJN7PZF7tdtrd9pJuYkJO/ReP4JrzNpKD38ajc+y3OsaS3XQt1CHjk14HAO2ZgaI+WEV2erxqUhykhcRBf/nUI/vcHe+r4GsXseJBEIaYiIKI5RLRC5TM69DgOjBf19pt+x8wnARisfK6KIsc4IiokosK9e/fqvI07GHXKCbbda1iPlnjkYnN6w4D2OYNmMfIX2U16ij43zBtDipaHYtbYysyKWmro7UsF8zllpafUOhaET4w/NubkOsoMqDvyaJCW7Jn6CTI1rE7Mp5KZhzFzb5XPJwB2E1FrAFD+qtn4iwCEjo3bKtvAzMG/JQDeRmAOIZIcU5g5n5nzc3Od90aJBzsHPFkZKbobQTWG9QiYAbTK/s9LTzF8Tyc5rVOzetsu6nMCBndtHvc1gw3ruLM64byeaoNm89Br6khR8klVR7F9XZLfrl7uqND7EOm/75+GHg/gs9MsdP8o9Spnfsdo92QagKAX0FgAn6gcMwvA+USUrUwSnw9gFhGlEFFzACCiVAAXAlhhUB5BwaxEai+P7R+IKNV4ucqqKLklPMqdw7sb8i4J/hR3jeih6zrrHx6u+156O+bBmIC3b4weZxFOaL8gLTlJd2nNUBfSf15iX+fBqbkIt2PUYDYZwHtEdD2ArQAuBQAiygdwMzPfwMzFRPQggEXKOZOUbZkIKIRUAMkA5gB4yaA8gkVoVSztXVYI3gyyGxozd8Ufoa2/n9ZAZ/W5oKmqT0jyQS0/dWhSvKYN09BE4/Oh1aVZsBdDioCZ9wMYqrK9EMANIeuvAng17JhSANEzbwlx40QkbGoy6a4uZSXBUpBGmKQxMjvIWzcMxO9eXlhnm13pKf73hzNAOnO2qkUJa/E2C0+OqtV0GBpbkpOZhuLSCk3nCdbiPidrwRTMzrGvJRiusto9U3ErHrgAvU0oaq63V35Gl/pzCXrNJvFC0O/mq3b8f3/cFvM8M2IhFt89DD/eVa8fGRdPX97HlOv4FVEENhNvIXenuW1Y/bQAbkatME48qE3uXnN6Hp67oi/SoiiJX/dtg6X3no8fJw5FrxOMKyQtEOmv4aDWoB/WUHgmXjUQ2kEhIrQwKbI3r5mYnIwgisBmmjeyx6/d7L65U2m0nUbt97ptWFdcePIJUXvfAzvmoEnDVLTIsi+FAYF0e6bFG1AVrwfcsUprnAn0jlAuzZdJ41BEESQodqfXzbbJ/OEGgtXmpv9RPa/P69f2x0V926ju08vP95yn+dh43Djj1e/xWoZObGlNvi3dJjHT6t8lBhJmJ5jCtFvPRKVKGUgv8tq1/XHta4vqbR87qANG9TmhtrHtEmHexMzSh9k6AvSCjWFuVjr2lpRrOsfukZ5VsTRaLts/L5BC47ExJ6umGvczMiJIUOz2GmqX0xCdcu3NrmoVfdo2Vd3+wOje6Nehbprqn3T02K0m2MvV09RmqAQdanl03GYp1NLDD+auuiS/neQYCkNGBAmKGYVh/Ep2Zhomje6FI+Wxe405mWlo3igd+46UW17KMRbBXrGeTvcNgzvizuHddd/LbYV6wt1Z1ZDGPzIyInCAm85Wz2djJmZmHg0Sq9ylW1hsgmfW1YPy8PshXUyQxj6CE6anRBjRqNEgNblOMJnW893kKgzot/kndnpL/YgiSFCssNef1LZJPW+LrIwUZKS66zFqZpNn1nHc0SgGG7cXfneqoeu0y4kdHR4ccd41Qv9owgq0dPZDG3/RA3UR05AT2NBulFuU8+cfY07BwI7NcPv7SwE4U3BcUCfYuBnJcDrztsER6xCEEqxnPO6sznHfy0wSPIO95YgicIAcG1I1V1iY/C3UFt3W4zUPEonwxjArPQUlGuY5QtGaIiSveabjcyKhaNEDoccM6twMp7Zvapk8XsNdY3qfcL0N5fGszA8ffKGeuqyPZffwEu/eNMiUvEZGCW8MNXWSE6QjrTegrEuLLHz0e28U07EDUQQOYFZxkk8iVIV6ZWw+/nWl9fn8RvcxJ2jKTF6/tr/t9+yc28iUvEZGSQ1znfGTl4w2neef70Mvogg8TPMs9UnRri2y6pQgNJsuuVmWXdso3Vq5VzarCRaZAYABHXO0pc92xzy3YcxIgudnRBF4mEiPfmiDYAUntW3iKvuwn/nNqce9uEJjR967aVBc9Qys5M3rB1p2bdEDxnDXkyLoItLQ3yv1YwVtvHjlqXX+hpKvpE0AUG8UGF572Gn0Fs3Rg5/MYFYgisDDROoF+fmlaKCjiIxXKOjdWlmK/rvqKaBjRe98aPfoOZaqLMxFpaXzI6OGyIgicAmTRusvqk0g3H5e/ToBmSbl4vcas247C00NlpX0Gh00BH+p0aSB+dliX7km+kS9FdHutUgjbwhDioCIcohoNhGtV/5mRzhuJhEdJKLPwrZ3JKKFRLSBiN4lIn+9xSEM61G/AEosiIB+efW/cj09w0QhOYl8N1G8ZfLIiBlQo3HN6XnHzzO5Af3tgHYR91npWSWTxcYwOiIYD2AuM3cFMFdZV+MxAFepbP87gCeZuQuAAwCuNyiPZ4mnr0QQlzi/MUOpgbBQKfGo57kZ0i0XAHD/qF6W2ev/rDJCDWLFKCSIXQWfEhWjimA0gKnK8lQAF6kdxMxzAZSEbqNAGOS5AD6Idb4fsLuQTKLhB3VIBPQ8oXHtMqAvC6hatHlLk0pF1uLgYzzqlBOcu7nHMaoIWjLzTmV5FwA99o1mAA4yczAGfgcA90Uo2UQ8ekDyqxzHUvuzSwg1+QVHgjU6HpxrTs+rs75l8kh0TpAaEkBgpCPER0xFQERziGiFymd06HEc6NJa9jYS0TgiKiSiwr1791p1G9toZkK+IYKMJEb3CfQCO+UmdvHytJQkdA2ZD6gdEei4xvm9WmHVpAvMFcxFxOoWSccpMjEVATMPY+beKp9PAOwmotYAoPzdo+Pe+wE0JaKgi0tbAEVR5JjCzPnMnJ+bm6vjNu5kcVhlq3ja8yQi1cliP9KpeWIrgnUPDccJIVlBGymeYXo7Ag3TrPUoc7Jb0ihD/X9b82CBzZJ4D6OmoWkAxirLYwF8ovVEZQQxH8CYeM5PNKIN8SM+yASkq5QaFBKbLZNH1pqJ0lwWPezkADVSJLUfvej0YvQpmgzgPCJaD2CYsg4iyieil4MHEdE3AN4HMJSIdhBRcHx6J4C/ENEGBOYMXjEoj2eJ9v5EepAb+TReIJTgYN+vFrIWUSZ7/fqdCPox1JIw834AQ1W2FwK4IWRdtXoJM28CMMCIDIlCPLb+YARxw7RklFVUmy2SJ2AAN53VCWd2be60KI6Rk5mG4tIKp8UQPIy7xpU+ZcvkkYZsq3eP7GmaLG7j4lNjO5JNGNEDg7t6f97IbC7NjxzcZRVuy28UikwVR0YUgUvQOyB447rjA6krBrY3WRr30LSBb4PNNXOsUn00eONZnWyWJDK/H+KOkpaCOqIIXMIJTTMwWId5o1+Hut5C5/XUn6LC63TOzcSZXfxrEgry0e9Px/Q/Ol8hDYjcobmjwB1F7gV1RBG4hIZpKfiPjoyQ4YnlXro632yRXM/c24fgEgfMH26je6vG6HWC8xXSAOtrYcTLyJNbY0DHHKfFcC3idiK4GokB8hYtskxOWWGQ684I1Ad//or6tRyE48iIwEHUcudf0Mt/Jh4hsdj4yAjcOLij02IAAE7t0NRpETyBKAIHUZvkvfK0DoaumWg96AT7d3xBchJJOgePIaYhB7mjoBtuOttczw4pUym4gbxmx1N+bHpkhIOSCFqQEYGDpKckm25TTbQCHQn27/iG3w5oVxv5nuRg50TqdWhDFIHLMJoWINHqFYuJwZsQES7vb79Hl9kjbL8gisAjFPRqpek4K6tACYIefFAiImGQOQKXEakDrMU/+9s7z4mYgdGryHjAu+gpmmMWYgqKj8RqNRKINiG55wFtk8BtsxuaX3rQaeS99ixOFE1yc64jNyOKwCMkJ/nzp5Ienndxg2nIiVGJF/Fn6+JBEszioxmZKzaGnvxVZlPtAtPQnNW7bZfBi8gcgUdING8gwV6euPQU2+95y9md0bddU9vvG0ppuT/rdOjFp/1M7zHqlNh5+RMRUX/mcFEf+5+fdjkNbU8K2Kpxep11MQ1pQxSBy+ic20h1+6DOzdDH4d6VE0T0opIRkiaCzgNOBnXZydWD8uqsd22h/j4JdTGkCIgoh4hmE9F65W92hONmEtFBIvosbPvrRLSZiJYonz5G5EkETgjzFgqlfU5DAECrRPMMikKkyeIF48+1WRJv8tBFvbFo4jCnxbCNcIU34qTWDkniLYyOCMYDmMvMXQHMVdbVeAzAVRH2/Y2Z+yifJQblSWiCveMJI/xT5CPSiKBFVrr6DqEOGanJyPXxd5VoKVeswqgiGA1gqrI8FcBFagcx81wAJQbv5TuW338+1j88vHY9RXEh7RCS0Mtv3HpOFwCSekLQhjwm2jCqCFoy805leReAeJLpP0xEy4joSSLyb9clhDsKuuGOgm7IykitEyl8z4U9AADJPnq6w//Tm6X2raADGRFoI6b7KBHNAaCW6GZi6AozMxHpnaKfgIACSQMwBcCdACZFkGMcgHEA0L594hZrB4DfD+miur1pw0Ahd1+5koa9yE5EqwrepWlDyb2lhZiKgJkjzjQR0W4ias3MO4moNYA9em4eMpooJ6LXAPw1yrFTEFAWyM/P93VrkJ3pn4fbRypPsIBozhfCcYyahqYBGKssjwXwiZ6TFeUBChh8LwKwwqA8Cc+mR0agdRP/PNwtwvzCfd0DEDQx67aznBbBcxhVBJMBnEdE6wEMU9ZBRPlE9HLwICL6BsD7AIYS0Q4iukDZ9RYRLQewHEBzAA8ZlCfh8Ys/eJDf9m+PxXcfH5SKZUiIhVotcCE6hlJMMPN+AENVthcCuCFkfXCE88UZXIhKUhKhWaOQUYEoAiEGMj+sH4ksFjxFeqo8skJ0/DZqNgN5qwRPkZGajC2TRzothuBiRA/oRxSBIAgJhZ/ibMxCFIEgCAlFY6nbrRtRBIIgJBQZqcno3irLaTE8hSgCQRAEnyOKQPAMF/U5wWkRBI8gSQn1IYpA8AwN06WyqqANUQP6EEUgCELCIQMCfYgiEDyDvNuCVkQR6EMUgeAZ5OUWtBKpxKmgjhhdBUFIOP5wThes/OWQ02J4BlEEgiAkHAW9W6Ggt1o9LUENMQ0JnqBzbibO7JLrtBiCkJDIiEDwBHNvH+K0CIKQsMiIQBAEweeIIhAEQfA5oggEQRB8jiFFQEQ5RDSbiNYrf7NVjulDRN8T0UoiWkZEl4Xs60hEC4loAxG9S0RpRuQRBEEQ9GN0RDAewFxm7gpgrrIeThmAq5m5F4ACAE8RUVNl398BPMnMXQAcAHC9QXkEQRAEnRhVBKMBTFWWpwK4KPwAZl7HzOuV5V8A7AGQS4H0gOcC+CDa+YIgCIK1GFUELZl5p7K8C0DLaAcT0QAAaQA2AmgG4CAzVym7dwBoY1AeQRAEQScx4wiIaA4AtRC9iaErzMxExFGu0xrAfwCMZeYavfnCiWgcgHEA0L59e13nCoIgCJGJqQiYeVikfUS0m4haM/NOpaHfE+G4xgCmA5jIzD8om/cDaEpEKcqooC2AoihyTAEwRbleCRGtjSW7gzQHsM9pIWLgdhndLh8gMpqB2+UD3C+jHvk6qG00Glk8DcBYAJOVv5+EH6B4An0M4A1mDs4HBEcQ8wGMAfBOpPMjsJaZ8w3KbhlEVOhm+QD3y+h2+QCR0QzcLh/gfhnNkM/oHMFkAOcR0XoAw5R1EFE+Eb2sHHMpgLMAXENES5RPH2XfnQD+QkQbEJgzeMWgPIIgCIJODI0ImHk/gKEq2wsB3KAsvwngzQjnbwIwwIgMgiAIgjG8Glk8xWkBYuB2+QD3y+h2+QCR0QzcLh/gfhkNy0fMER19BEEQBB/g1RGBIAiCYBKeUgREVEBEa5XcRGrpLFwlDxFdQ0R7QybJb3BCzjCZXiWiPUS0wmlZgNjyENEQIjoU8h3ea7eMKjK1I6L5RLRKyaH1JzfL4tLvMIOIfiSipYrcD7hZFje+y0GIKJmIfiaiz+K+CDN74gMgGYGI5E4IRCcvBdDTzfIAuAbAc05/d2EynQXgVAArnJZFizwAhgD4zGk5w2RqDeBUZTkLwDqnnkUtsrj0OyQAjZTlVAALAZzmVlnc+C6HyPYXAG8b+Y29NCIYAGADM29i5goEYg9Gizz6YOavARQ7LUcQt8mjBWbeycw/KcslAFbDofQobpJFDxzgiLKaqnwcmbB0kyx6IaK2AEYCeDnWsdHwkiJoA2B7yLrTuYm0yvMbJf32B0TUzh7REo5ByrD9cyLq5bQwoRBRHoC+CPQiHSWGLK77DhWTxhIEMhLMZmbHvkONsrjxXX4KwB0AaoxcxEuKwIt8CiCPmU8GMBvHM7UK2vkJQAdmPgXAswD+56w4xyGiRgA+BHAbMx92sSyu/A6ZuZqZ+yCQXmYAEfV2sSyue5eJ6EIAe5h5sdFreUkRFAEI1cJRcxPZQEx5mHk/M5crqy8D6GeTbAkDMx8ODtuZeQaAVCJq7rBYIKJUBBret5j5IzfL4tbvMAgzHwQwH4F6JY4SSRaXvstnABhFRFsQME2fS0Sqwbux8JIiWASgKwWqmqUBuByBXEeulUdJxBdkFAL2W0EHRNSKlFS1FEhjnoRAwkInZSIE0qGsZuYn3C6LS7/DXFIKVBFRAwDnAVjjVlnc+C4z8wRmbsvMeQi0P/OY+cp4rmU06ZxtMHMVEd0KYBYCHjuvMvNKt8lDRJMAFDLzNAB/JKJRAKoQmBC9xil5gxDRfxHwImlORDsA3MfMjuV4UpMHgck6MPOLCCQlvIWIqgAcBXA5K64SDnIGgKsALFfsygBwl9LbdoUsANoDrv4OWwOYSkTJCCim95g5fvdHC2Rx+7tsJhJZLAiC4HO8ZBoSBEEQLEAUgSAIgs8RRSAIguBzRBEIgiD4HFEEgiAIPkcUgSBEgYiahWSc3EVERcryESJ6wWn5BMEMxH1UEDRCRPcDOMLM/3RaFkEwExkRCEIcKDn+P1OW7yeiqUT0DRFtJaKLiegfRLSciGYqKSBARP2I6CsiWkxEs8KiVQXBMUQRCII5dAZwLgLpB94EMJ+ZT0IgknekogyeBTCGmfsBeBXAw04JKwiheCbFhCC4nM+ZuZKIliOQcmSmsn05gDwA3QD0BjBbSfuTDGCnA3IKQj1EEQiCOZQDADPXEFFlSC6fGgTeMwKwkpkHOSWgIERCTEOCYA9rAeQS0SAgkDraLQViBEEUgSDYgFLOdAyAvxPRUgBLAJzuqFCCoCDuo4IgCD5HRgSCIAg+RxSBIAiCzxFFIAiC4HNEEQiCIPgcUQSCIAg+RxSBIAiCzxFFIAiC4HNEEQiCIPic/wcvziJ0eY2VRAAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "librosa.display.waveplot(samples_out, sr=sr)" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "special-delicious", - "metadata": {}, - "outputs": [], - "source": [ - "import getpass" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "seasonal-consensus", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['GetPassWarning',\n", - " '__all__',\n", - " '__builtins__',\n", - " '__cached__',\n", - " '__doc__',\n", - " '__file__',\n", - " '__loader__',\n", - " '__name__',\n", - " '__package__',\n", - " '__spec__',\n", - " '_raw_input',\n", - " 'contextlib',\n", - " 'fallback_getpass',\n", - " 'getpass',\n", - " 'getuser',\n", - " 'io',\n", - " 'os',\n", - " 'sys',\n", - " 'termios',\n", - " 'unix_getpass',\n", - " 'warnings',\n", - " 'win_getpass']" - ] - }, - "execution_count": 34, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dir(getpass)" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "dress-distinction", - "metadata": {}, - "outputs": [], - "source": [ - "getpass?" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "rental-anthony", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Worker:" - ] - } - ], - "source": [ - "import multiprocessing\n", - "import cProfile\n", - "import time\n", - "\n", - "def worker(num):\n", - " time.sleep(3)\n", - " print('Worker:', num)\n", - "\n", - "def profile_worker(num):\n", - " cProfile.runctx('worker(num)', globals(), locals(), 'profile-%d.out' %num)\n", - "\n", - "\n", - "\n", - "for i in range(5):\n", - " p = multiprocessing.Process(target=profile_worker, args=(i,))\n", - " p.start()" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "separated-restriction", - "metadata": {}, - "outputs": [], - "source": [ - "!ls" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "painted-variable", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(2, 2)\n", - "[ 1 20]\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "l = [(1, 20), (2, 30)]\n", - "scores = np.array(l)\n", - "print(scores.shape)\n", - "print(scores[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "satellite-insider", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[0 1]\n" - ] - } - ], - "source": [ - "sort_idx = np.argsort(scores[:, -1])\n", - "print(sort_idx)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "developed-thirty", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 1 20]\n", - " [ 2 30]]\n" - ] - } - ], - "source": [ - "sorted_val_scores = scores[sort_idx][::1]\n", - "print(sorted_val_scores)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "official-bench", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 1 20]\n", - " [ 2 30]]\n" - ] - } - ], - "source": [ - "sorted_val_scores = scores[sort_idx]\n", - "print(sorted_val_scores)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "ranking-camera", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1e\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n", - "[ 1 20 2 30]\n", - "[[ 1 20]\n", - " [ 2 30]]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel_launcher.py:1: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.\n", - " \"\"\"Entry point for launching an IPython kernel.\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel_launcher.py:3: DeprecationWarning: The binary mode of fromstring is deprecated, as it behaves surprisingly on unicode inputs. Use frombuffer instead\n", - " This is separate from the ipykernel package so we can avoid doing imports until\n" - ] - } - ], - "source": [ - "a = scores.tostring()\n", - "print(a)\n", - "b = np.fromstring(a, scores.dtype)\n", - "print(b)\n", - "print(scores)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "breeding-proxy", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "numpy.int16" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "np.int16" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "coordinate-hungary", - "metadata": {}, - "outputs": [], - "source": [ - "dtype = np.dtype('int16')" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "specified-jackson", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "int16\n", - "16\n" - ] - } - ], - "source": [ - "print(dtype)\n", - "dtype is np.int16\n", - "print(np.iinfo(dtype).bits)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "activated-insight", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/train_test.ipynb b/.notebook/train_test.ipynb deleted file mode 100644 index 67212e50a..000000000 --- a/.notebook/train_test.ipynb +++ /dev/null @@ -1,1887 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "cloudy-glass", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['CUDA_VISISBLE_DEVICES'] = '0'" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "grand-stephen", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.0.0\n" - ] - } - ], - "source": [ - "import paddle\n", - "print(paddle.__version__)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "isolated-prize", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "romance-samuel", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'num_samples': 5, 'beam_size': 500, 'num_proc_bsearch': 8, 'num_conv_layers': 2, 'num_rnn_layers': 3, 'rnn_layer_size': 2048, 'alpha': 2.5, 'beta': 0.3, 'cutoff_prob': 1.0, 'cutoff_top_n': 40, 'use_gru': False, 'use_gpu': True, 'share_rnn_weights': True, 'infer_manifest': 'examples/aishell/data/manifest.dev', 'mean_std_path': 'examples/aishell/data/mean_std.npz', 'vocab_path': 'examples/aishell/data/vocab.txt', 'lang_model_path': 'models/lm/common_crawl_00.prune01111.trie.klm', 'model_path': 'examples/aishell/checkpoints/step_final', 'decoding_method': 'ctc_beam_search', 'error_rate_type': 'wer', 'specgram_type': 'linear'}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "import sys\n", - "import argparse\n", - "import functools\n", - "from utils.utility import add_arguments, print_arguments\n", - "parser = argparse.ArgumentParser(description=__doc__)\n", - "add_arg = functools.partial(add_arguments, argparser=parser)\n", - "# yapf: disable\n", - "add_arg('num_samples', int, 5, \"# of samples to infer.\")\n", - "add_arg('beam_size', int, 500, \"Beam search width.\")\n", - "add_arg('num_proc_bsearch', int, 8, \"# of CPUs for beam search.\")\n", - "add_arg('num_conv_layers', int, 2, \"# of convolution layers.\")\n", - "add_arg('num_rnn_layers', int, 3, \"# of recurrent layers.\")\n", - "add_arg('rnn_layer_size', int, 2048, \"# of recurrent cells per layer.\")\n", - "add_arg('alpha', float, 2.5, \"Coef of LM for beam search.\")\n", - "add_arg('beta', float, 0.3, \"Coef of WC for beam search.\")\n", - "add_arg('cutoff_prob', float, 1.0, \"Cutoff probability for pruning.\")\n", - "add_arg('cutoff_top_n', int, 40, \"Cutoff number for pruning.\")\n", - "add_arg('use_gru', bool, False, \"Use GRUs instead of simple RNNs.\")\n", - "add_arg('use_gpu', bool, True, \"Use GPU or not.\")\n", - "add_arg('share_rnn_weights',bool, True, \"Share input-hidden weights across \"\n", - " \"bi-directional RNNs. Not for GRU.\")\n", - "add_arg('infer_manifest', str,\n", - " 'examples/aishell/data/manifest.dev',\n", - " \"Filepath of manifest to infer.\")\n", - "add_arg('mean_std_path', str,\n", - " 'examples/aishell/data/mean_std.npz',\n", - " \"Filepath of normalizer's mean & std.\")\n", - "add_arg('vocab_path', str,\n", - " 'examples/aishell/data/vocab.txt',\n", - " \"Filepath of vocabulary.\")\n", - "add_arg('lang_model_path', str,\n", - " 'models/lm/common_crawl_00.prune01111.trie.klm',\n", - " \"Filepath for language model.\")\n", - "add_arg('model_path', str,\n", - " 'examples/aishell/checkpoints/step_final',\n", - " \"If None, the training starts from scratch, \"\n", - " \"otherwise, it resumes from the pre-trained model.\")\n", - "add_arg('decoding_method', str,\n", - " 'ctc_beam_search',\n", - " \"Decoding method. Options: ctc_beam_search, ctc_greedy\",\n", - " choices = ['ctc_beam_search', 'ctc_greedy'])\n", - "add_arg('error_rate_type', str,\n", - " 'wer',\n", - " \"Error rate type for evaluation.\",\n", - " choices=['wer', 'cer'])\n", - "add_arg('specgram_type', str,\n", - " 'linear',\n", - " \"Audio feature type. Options: linear, mfcc.\",\n", - " choices=['linear', 'mfcc'])\n", - "# yapf: disable\n", - "args = parser.parse_args([])\n", - "print(vars(args))" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "timely-bikini", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/fftpack/__init__.py:103: DeprecationWarning: The module numpy.dual is deprecated. Instead of using dual, use the functions directly from numpy or scipy.\n", - " from numpy.dual import register_func\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/scipy/special/orthogonal.py:81: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around, int,\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:108: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " long_ = _make_signed(np.long)\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/numba/core/types/__init__.py:109: DeprecationWarning: `np.long` is a deprecated alias for `np.compat.long`. To silence this warning, use `np.compat.long` by itself. In the likely event your code does not need to work on Python 2 you can use the builtin `int` for which `np.compat.long` is itself an alias. Doing this will not modify any behaviour and is safe. When replacing `np.long`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " ulong = _make_unsigned(np.long)\n" - ] - } - ], - "source": [ - "from data_utils.dataset import create_dataloader\n", - "batch_reader = create_dataloader(\n", - " manifest_path=args.infer_manifest,\n", - " vocab_filepath=args.vocab_path,\n", - " mean_std_filepath=args.mean_std_path,\n", - " augmentation_config='{}',\n", - " #max_duration=float('inf'),\n", - " max_duration=27.0,\n", - " min_duration=0.0,\n", - " stride_ms=10.0,\n", - " window_ms=20.0,\n", - " max_freq=None,\n", - " specgram_type=args.specgram_type,\n", - " use_dB_normalization=True,\n", - " random_seed=0,\n", - " keep_transcription_text=False,\n", - " is_training=False,\n", - " batch_size=args.num_samples,\n", - " sortagrad=True,\n", - " shuffle_method=None,\n", - " dist=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "organized-warrior", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n", - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/dataloader/dataloader_iter.py:354: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. \n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " if arr.dtype == np.object:\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "test Tensor(shape=[5, 6], dtype=int32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[14 , 34 , 322 , 233 , 0 , 0 ],\n", - " [238 , 38 , 122 , 164 , 0 , 0 ],\n", - " [8 , 52 , 49 , 42 , 0 , 0 ],\n", - " [109 , 47 , 146 , 193 , 210 , 479 ],\n", - " [3330, 1751, 208 , 1923, 0 , 0 ]])\n", - "test raw 大时代里的的\n", - "test raw 煲汤受宠的的\n", - "audio len Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [163, 167, 180, 186, 186])\n", - "test len Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [4, 4, 4, 6, 4])\n", - "audio Tensor(shape=[5, 161, 186], dtype=float32, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [[[ 1.11669052, 0.79015088, 0.93658292, ..., 0. , 0. , 0. ],\n", - " [ 0.83549136, 0.72643483, 0.83578080, ..., 0. , 0. , 0. ],\n", - " [-0.89155018, -0.18894747, -0.53357804, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [ 0.33386710, -0.81240511, 0.12869737, ..., 0. , 0. , 0. ],\n", - " [-0.17537928, 0.58380985, 0.70696265, ..., 0. , 0. , 0. ],\n", - " [-0.84175998, 1.22041416, 0.07929770, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[-0.35964420, 0.77392709, 0.71409988, ..., 0. , 0. , 0. ],\n", - " [-0.15990183, 0.42962283, 0.06222462, ..., 0. , 0. , 0. ],\n", - " [-0.31166190, -0.74864638, -0.52836996, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [-0.27546275, 0.32889456, 0.12410031, ..., 0. , 0. , 0. ],\n", - " [ 0.16264282, 0.49418071, -0.15960945, ..., 0. , 0. , 0. ],\n", - " [ 0.12476666, 0.00516864, 1.16021466, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[ 0.90202141, 1.48541915, 0.92062062, ..., 0. , 0. , 0. ],\n", - " [ 0.82661545, 1.37171340, 0.86746097, ..., 0. , 0. , 0. ],\n", - " [-0.62287915, -0.48645937, 0.35041964, ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [ 0.07376949, 0.07138316, 0.76355994, ..., 0. , 0. , 0. ],\n", - " [-0.32306790, 0.43247896, 1.27311838, ..., 0. , 0. , 0. ],\n", - " [-0.97667056, 0.60747612, 0.79181534, ..., 0. , 0. , 0. ]],\n", - "\n", - " [[ 0.72022128, 0.95428467, 0.92766261, ..., 0.29105374, -0.45564806, -0.62151009],\n", - " [ 0.42083180, 0.49279949, 0.82724041, ..., -0.17333922, -1.45363355, -0.61673522],\n", - " [-0.76116520, -0.84750438, -0.09512503, ..., -1.01497340, -1.42781055, -0.80859023],\n", - " ...,\n", - " [-0.23009977, 1.06155431, 1.09065628, ..., 0.25581080, 0.53794998, -1.22650719],\n", - " [-1.37693381, 0.30778193, 0.17152318, ..., 0.51650339, 0.25580606, 0.83097816],\n", - " [-1.62180591, 1.30567718, 1.09928656, ..., -0.77590007, 1.27712476, 0.53189957]],\n", - "\n", - " [[ 1.03205252, -0.51535392, 0.21077573, ..., 0.76618457, 1.27425683, 1.52250278],\n", - " [ 0.82059991, 0.43990925, 0.13090958, ..., 0.86662549, 1.01687658, 1.48495352],\n", - " [-0.75489789, -0.01997089, -0.65174174, ..., 0.09061214, -0.55211234, -0.01614586],\n", - " ...,\n", - " [ 0.50985396, 1.84555030, 0.79185146, ..., 1.13666189, 1.19898069, 1.98158395],\n", - " [ 1.98721015, 2.52385354, 1.11714780, ..., 0.19416514, 1.11329341, 0.64460152],\n", - " [ 2.69512844, 1.90993905, 0.50245082, ..., -0.50902629, 0.03333465, -1.24584770]]])\n" - ] - } - ], - "source": [ - " for idx, (audio, audio_len, text, text_len) in enumerate(batch_reader()):\n", - " print('test', text)\n", - " print(\"test raw\", ''.join(batch_reader.dataset.vocab_list[i] for i in text[0]))\n", - " print(\"test raw\", ''.join(batch_reader.dataset.vocab_list[i] for i in text[-1]))\n", - " print('audio len', audio_len)\n", - " print('test len', text_len)\n", - " print('audio', audio)\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "confidential-radius", - "metadata": {}, - "outputs": [], - "source": [ - "# reader = batch_reader()\n", - "# audio, test , audio_len, text_len = reader.next()\n", - "# print('test', text)\n", - "# print('t len', text_len) #[B, T]\n", - "# print('audio len', audio_len)\n", - "# print(audio)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "future-vermont", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "煲汤受宠\n" - ] - } - ], - "source": [ - "print(u'\\u7172\\u6c64\\u53d7\\u5ba0')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dental-sweden", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "sunrise-contact", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "hispanic-asthma", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "hearing-leadership", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "skilled-friday", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "copyrighted-measure", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "employed-lightweight", - "metadata": {}, - "outputs": [], - "source": [ - "from model_utils.network import DeepSpeech2, DeepSpeech2Loss\n", - "\n", - "from data_utils.dataset import create_dataloader\n", - "batch_reader = create_dataloader(\n", - " manifest_path=args.infer_manifest,\n", - " vocab_filepath=args.vocab_path,\n", - " mean_std_filepath=args.mean_std_path,\n", - " augmentation_config='{}',\n", - " #max_duration=float('inf'),\n", - " max_duration=27.0,\n", - " min_duration=0.0,\n", - " stride_ms=10.0,\n", - " window_ms=20.0,\n", - " max_freq=None,\n", - " specgram_type=args.specgram_type,\n", - " use_dB_normalization=True,\n", - " random_seed=0,\n", - " keep_transcription_text=False,\n", - " is_training=False,\n", - " batch_size=args.num_samples,\n", - " sortagrad=True,\n", - " shuffle_method=None,\n", - " dist=False)\n", - "\n", - "\n", - "import paddle\n", - "from paddle import nn\n", - "from paddle.nn import functional as F\n", - "from paddle.nn import initializer as I\n", - "\n", - "import math\n", - "\n", - "def brelu(x, t_min=0.0, t_max=24.0, name=None):\n", - " t_min = paddle.to_tensor(t_min)\n", - " t_max = paddle.to_tensor(t_max)\n", - " return x.maximum(t_min).minimum(t_max)\n", - "\n", - "def sequence_mask(x_len, max_len=None, dtype='float32'):\n", - " max_len = max_len or x_len.max()\n", - " x_len = paddle.unsqueeze(x_len, -1)\n", - " row_vector = paddle.arange(max_len)\n", - " mask = row_vector > x_len # maybe a bug\n", - " mask = paddle.cast(mask, dtype)\n", - " print(f'seq mask: {mask}')\n", - " return mask\n", - "\n", - "\n", - "class ConvBn(nn.Layer):\n", - " def __init__(self, num_channels_in, num_channels_out, kernel_size, stride,\n", - " padding, act):\n", - "\n", - " super().__init__()\n", - " self.kernel_size = kernel_size\n", - " self.stride = stride\n", - " self.padding = padding\n", - "\n", - " self.conv = nn.Conv2D(\n", - " num_channels_in,\n", - " num_channels_out,\n", - " kernel_size=kernel_size,\n", - " stride=stride,\n", - " padding=padding,\n", - " weight_attr=None,\n", - " bias_attr=None,\n", - " data_format='NCHW')\n", - "\n", - " self.bn = nn.BatchNorm2D(\n", - " num_channels_out,\n", - " weight_attr=None,\n", - " bias_attr=None,\n", - " data_format='NCHW')\n", - " self.act = F.relu if act == 'relu' else brelu\n", - "\n", - " def forward(self, x, x_len):\n", - " \"\"\"\n", - " x(Tensor): audio, shape [B, C, D, T]\n", - " \"\"\"\n", - " x = self.conv(x)\n", - " x = self.bn(x)\n", - " x = self.act(x)\n", - "\n", - " x_len = (x_len - self.kernel_size[1] + 2 * self.padding[1]\n", - " ) // self.stride[1] + 1\n", - "\n", - " # reset padding part to 0\n", - " masks = sequence_mask(x_len) #[B, T]\n", - " masks = masks.unsqueeze(1).unsqueeze(1) # [B, 1, 1, T]\n", - " x = x.multiply(masks)\n", - "\n", - " return x, x_len\n", - "\n", - "\n", - "class ConvStack(nn.Layer):\n", - " def __init__(self, feat_size, num_stacks):\n", - " super().__init__()\n", - " self.feat_size = feat_size # D\n", - " self.num_stacks = num_stacks\n", - "\n", - " self.conv_in = ConvBn(\n", - " num_channels_in=1,\n", - " num_channels_out=32,\n", - " kernel_size=(41, 11), #[D, T]\n", - " stride=(2, 3),\n", - " padding=(20, 5),\n", - " act='brelu')\n", - "\n", - " out_channel = 32\n", - " self.conv_stack = nn.Sequential([\n", - " ConvBn(\n", - " num_channels_in=32,\n", - " num_channels_out=out_channel,\n", - " kernel_size=(21, 11),\n", - " stride=(2, 1),\n", - " padding=(10, 5),\n", - " act='brelu') for i in range(num_stacks - 1)\n", - " ])\n", - "\n", - " # conv output feat_dim\n", - " output_height = (feat_size - 1) // 2 + 1\n", - " for i in range(self.num_stacks - 1):\n", - " output_height = (output_height - 1) // 2 + 1\n", - " self.output_height = out_channel * output_height\n", - "\n", - " def forward(self, x, x_len):\n", - " \"\"\"\n", - " x: shape [B, C, D, T]\n", - " x_len : shape [B]\n", - " \"\"\"\n", - " print(f\"conv in: {x_len}\")\n", - " x, x_len = self.conv_in(x, x_len)\n", - " for i, conv in enumerate(self.conv_stack):\n", - " print(f\"conv in: {x_len}\")\n", - " x, x_len = conv(x, x_len)\n", - " print(f\"conv out: {x_len}\")\n", - " return x, x_len\n", - " \n", - " \n", - "\n", - "class RNNCell(nn.RNNCellBase):\n", - " r\"\"\"\n", - " Elman RNN (SimpleRNN) cell. Given the inputs and previous states, it \n", - " computes the outputs and updates states.\n", - " The formula used is as follows:\n", - " .. math::\n", - " h_{t} & = act(x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh})\n", - " y_{t} & = h_{t}\n", - " \n", - " where :math:`act` is for :attr:`activation`.\n", - " \"\"\"\n", - "\n", - " def __init__(self,\n", - " hidden_size,\n", - " activation=\"tanh\",\n", - " weight_ih_attr=None,\n", - " weight_hh_attr=None,\n", - " bias_ih_attr=None,\n", - " bias_hh_attr=None,\n", - " name=None):\n", - " super().__init__()\n", - " std = 1.0 / math.sqrt(hidden_size)\n", - " self.weight_hh = self.create_parameter(\n", - " (hidden_size, hidden_size),\n", - " weight_hh_attr,\n", - " default_initializer=I.Uniform(-std, std))\n", - " # self.bias_ih = self.create_parameter(\n", - " # (hidden_size, ),\n", - " # bias_ih_attr,\n", - " # is_bias=True,\n", - " # default_initializer=I.Uniform(-std, std))\n", - " self.bias_ih = None\n", - " self.bias_hh = self.create_parameter(\n", - " (hidden_size, ),\n", - " bias_hh_attr,\n", - " is_bias=True,\n", - " default_initializer=I.Uniform(-std, std))\n", - "\n", - " self.hidden_size = hidden_size\n", - " if activation not in [\"tanh\", \"relu\", \"brelu\"]:\n", - " raise ValueError(\n", - " \"activation for SimpleRNNCell should be tanh or relu, \"\n", - " \"but get {}\".format(activation))\n", - " self.activation = activation\n", - " self._activation_fn = paddle.tanh \\\n", - " if activation == \"tanh\" \\\n", - " else F.relu\n", - " if activation == 'brelu':\n", - " self._activation_fn = brelu\n", - "\n", - " def forward(self, inputs, states=None):\n", - " if states is None:\n", - " states = self.get_initial_states(inputs, self.state_shape)\n", - " pre_h = states\n", - " i2h = inputs\n", - " if self.bias_ih is not None:\n", - " i2h += self.bias_ih\n", - " h2h = paddle.matmul(pre_h, self.weight_hh, transpose_y=True)\n", - " if self.bias_hh is not None:\n", - " h2h += self.bias_hh\n", - " h = self._activation_fn(i2h + h2h)\n", - " return h, h\n", - "\n", - " @property\n", - " def state_shape(self):\n", - " return (self.hidden_size, )\n", - "\n", - "\n", - "class GRUCellShare(nn.RNNCellBase):\n", - " r\"\"\"\n", - " Gated Recurrent Unit (GRU) RNN cell. Given the inputs and previous states, \n", - " it computes the outputs and updates states.\n", - " The formula for GRU used is as follows:\n", - " .. math::\n", - " r_{t} & = \\sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr})\n", - " z_{t} & = \\sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz})\n", - " \\widetilde{h}_{t} & = \\tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc}))\n", - " h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \\widetilde{h}_{t}\n", - " y_{t} & = h_{t}\n", - " \n", - " where :math:`\\sigma` is the sigmoid fucntion, and * is the elemetwise \n", - " multiplication operator.\n", - " \"\"\"\n", - "\n", - " def __init__(self,\n", - " input_size,\n", - " hidden_size,\n", - " weight_ih_attr=None,\n", - " weight_hh_attr=None,\n", - " bias_ih_attr=None,\n", - " bias_hh_attr=None,\n", - " name=None):\n", - " super().__init__()\n", - " std = 1.0 / math.sqrt(hidden_size)\n", - " self.weight_hh = self.create_parameter(\n", - " (3 * hidden_size, hidden_size),\n", - " weight_hh_attr,\n", - " default_initializer=I.Uniform(-std, std))\n", - " # self.bias_ih = self.create_parameter(\n", - " # (3 * hidden_size, ),\n", - " # bias_ih_attr,\n", - " # is_bias=True,\n", - " # default_initializer=I.Uniform(-std, std))\n", - " self.bias_ih = None\n", - " self.bias_hh = self.create_parameter(\n", - " (3 * hidden_size, ),\n", - " bias_hh_attr,\n", - " is_bias=True,\n", - " default_initializer=I.Uniform(-std, std))\n", - "\n", - " self.hidden_size = hidden_size\n", - " self.input_size = input_size\n", - " self._gate_activation = F.sigmoid\n", - " #self._activation = paddle.tanh\n", - " self._activation = F.relu\n", - "\n", - " def forward(self, inputs, states=None):\n", - " if states is None:\n", - " states = self.get_initial_states(inputs, self.state_shape)\n", - "\n", - " pre_hidden = states\n", - " x_gates = inputs\n", - " if self.bias_ih is not None:\n", - " x_gates = x_gates + self.bias_ih\n", - " h_gates = paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True)\n", - " if self.bias_hh is not None:\n", - " h_gates = h_gates + self.bias_hh\n", - "\n", - " x_r, x_z, x_c = paddle.split(x_gates, num_or_sections=3, axis=1)\n", - " h_r, h_z, h_c = paddle.split(h_gates, num_or_sections=3, axis=1)\n", - "\n", - " r = self._gate_activation(x_r + h_r)\n", - " z = self._gate_activation(x_z + h_z)\n", - " c = self._activation(x_c + r * h_c) # apply reset gate after mm\n", - " h = (pre_hidden - c) * z + c\n", - " # https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/layers/dynamic_gru_cn.html#dynamic-gru\n", - " #h = (1-z) * pre_hidden + z * c\n", - "\n", - " return h, h\n", - "\n", - " @property\n", - " def state_shape(self):\n", - " r\"\"\"\n", - " The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch\n", - " size would be automatically inserted into shape). The shape corresponds\n", - " to the shape of :math:`h_{t-1}`.\n", - " \"\"\"\n", - " return (self.hidden_size, )\n", - "\n", - "\n", - "class BiRNNWithBN(nn.Layer):\n", - " \"\"\"Bidirectonal simple rnn layer with sequence-wise batch normalization.\n", - " The batch normalization is only performed on input-state weights.\n", - "\n", - " :param name: Name of the layer parameters.\n", - " :type name: string\n", - " :param size: Dimension of RNN cells.\n", - " :type size: int\n", - " :param share_weights: Whether to share input-hidden weights between\n", - " forward and backward directional RNNs.\n", - " :type share_weights: bool\n", - " :return: Bidirectional simple rnn layer.\n", - " :rtype: Variable\n", - " \"\"\"\n", - "\n", - " def __init__(self, i_size, h_size, share_weights):\n", - " super().__init__()\n", - " self.share_weights = share_weights\n", - " if self.share_weights:\n", - " #input-hidden weights shared between bi-directional rnn.\n", - " self.fw_fc = nn.Linear(i_size, h_size, bias_attr=False)\n", - " # batch norm is only performed on input-state projection\n", - " self.fw_bn = nn.BatchNorm1D(\n", - " h_size, bias_attr=None, data_format='NLC')\n", - " self.bw_fc = self.fw_fc\n", - " self.bw_bn = self.fw_bn\n", - " else:\n", - " self.fw_fc = nn.Linear(i_size, h_size, bias_attr=False)\n", - " self.fw_bn = nn.BatchNorm1D(\n", - " h_size, bias_attr=None, data_format='NLC')\n", - " self.bw_fc = nn.Linear(i_size, h_size, bias_attr=False)\n", - " self.bw_bn = nn.BatchNorm1D(\n", - " h_size, bias_attr=None, data_format='NLC')\n", - "\n", - " self.fw_cell = RNNCell(hidden_size=h_size, activation='brelu')\n", - " self.bw_cell = RNNCell(hidden_size=h_size, activation='brelu')\n", - " self.fw_rnn = nn.RNN(\n", - " self.fw_cell, is_reverse=False, time_major=False) #[B, T, D]\n", - " self.bw_rnn = nn.RNN(\n", - " self.fw_cell, is_reverse=True, time_major=False) #[B, T, D]\n", - "\n", - " def forward(self, x, x_len):\n", - " # x, shape [B, T, D]\n", - " fw_x = self.fw_bn(self.fw_fc(x))\n", - " bw_x = self.bw_bn(self.bw_fc(x))\n", - " fw_x, _ = self.fw_rnn(inputs=fw_x, sequence_length=x_len)\n", - " bw_x, _ = self.bw_rnn(inputs=bw_x, sequence_length=x_len)\n", - " x = paddle.concat([fw_x, bw_x], axis=-1)\n", - " return x, x_len\n", - "\n", - "\n", - "class BiGRUWithBN(nn.Layer):\n", - " \"\"\"Bidirectonal gru layer with sequence-wise batch normalization.\n", - " The batch normalization is only performed on input-state weights.\n", - "\n", - " :param name: Name of the layer.\n", - " :type name: string\n", - " :param input: Input layer.\n", - " :type input: Variable\n", - " :param size: Dimension of GRU cells.\n", - " :type size: int\n", - " :param act: Activation type.\n", - " :type act: string\n", - " :return: Bidirectional GRU layer.\n", - " :rtype: Variable\n", - " \"\"\"\n", - "\n", - " def __init__(self, i_size, h_size, act):\n", - " super().__init__()\n", - " hidden_size = h_size * 3\n", - " self.fw_fc = nn.Linear(i_size, hidden_size, bias_attr=False)\n", - " self.fw_bn = nn.BatchNorm1D(\n", - " hidden_size, bias_attr=None, data_format='NLC')\n", - " self.bw_fc = nn.Linear(i_size, hidden_size, bias_attr=False)\n", - " self.bw_bn = nn.BatchNorm1D(\n", - " hidden_size, bias_attr=None, data_format='NLC')\n", - "\n", - " self.fw_cell = GRUCellShare(input_size=hidden_size, hidden_size=h_size)\n", - " self.bw_cell = GRUCellShare(input_size=hidden_size, hidden_size=h_size)\n", - " self.fw_rnn = nn.RNN(\n", - " self.fw_cell, is_reverse=False, time_major=False) #[B, T, D]\n", - " self.bw_rnn = nn.RNN(\n", - " self.fw_cell, is_reverse=True, time_major=False) #[B, T, D]\n", - "\n", - " def forward(self, x, x_len):\n", - " # x, shape [B, T, D]\n", - " fw_x = self.fw_bn(self.fw_fc(x))\n", - " bw_x = self.bw_bn(self.bw_fc(x))\n", - " fw_x, _ = self.fw_rnn(inputs=fw_x, sequence_length=x_len)\n", - " bw_x, _ = self.bw_rnn(inputs=bw_x, sequence_length=x_len)\n", - " x = paddle.concat([fw_x, bw_x], axis=-1)\n", - " return x, x_len\n", - "\n", - "\n", - "class RNNStack(nn.Layer):\n", - " \"\"\"RNN group with stacked bidirectional simple RNN or GRU layers.\n", - "\n", - " :param input: Input layer.\n", - " :type input: Variable\n", - " :param size: Dimension of RNN cells in each layer.\n", - " :type size: int\n", - " :param num_stacks: Number of stacked rnn layers.\n", - " :type num_stacks: int\n", - " :param use_gru: Use gru if set True. Use simple rnn if set False.\n", - " :type use_gru: bool\n", - " :param share_rnn_weights: Whether to share input-hidden weights between\n", - " forward and backward directional RNNs.\n", - " It is only available when use_gru=False.\n", - " :type share_weights: bool\n", - " :return: Output layer of the RNN group.\n", - " :rtype: Variable\n", - " \"\"\"\n", - "\n", - " def __init__(self, i_size, h_size, num_stacks, use_gru, share_rnn_weights):\n", - " super().__init__()\n", - " self.rnn_stacks = nn.LayerList()\n", - " for i in range(num_stacks):\n", - " if use_gru:\n", - " #default:GRU using tanh\n", - " self.rnn_stacks.append(\n", - " BiGRUWithBN(i_size=i_size, h_size=h_size, act=\"relu\"))\n", - " else:\n", - " self.rnn_stacks.append(\n", - " BiRNNWithBN(\n", - " i_size=i_size,\n", - " h_size=h_size,\n", - " share_weights=share_rnn_weights))\n", - " i_size = h_size * 2\n", - "\n", - " def forward(self, x, x_len):\n", - " \"\"\"\n", - " x: shape [B, T, D]\n", - " x_len: shpae [B]\n", - " \"\"\"\n", - " for i, rnn in enumerate(self.rnn_stacks):\n", - " x, x_len = rnn(x, x_len)\n", - " masks = sequence_mask(x_len) #[B, T]\n", - " masks = masks.unsqueeze(-1) # [B, T, 1]\n", - " x = x.multiply(masks)\n", - " return x, x_len\n", - "\n", - " \n", - "class DeepSpeech2Test(DeepSpeech2):\n", - " def __init__(self,\n", - " feat_size,\n", - " dict_size,\n", - " num_conv_layers=2,\n", - " num_rnn_layers=3,\n", - " rnn_size=256,\n", - " use_gru=False,\n", - " share_rnn_weights=True):\n", - " super().__init__(feat_size,\n", - " dict_size,\n", - " num_conv_layers=2,\n", - " num_rnn_layers=3,\n", - " rnn_size=256,\n", - " use_gru=False,\n", - " share_rnn_weights=True)\n", - " self.feat_size = feat_size # 161 for linear\n", - " self.dict_size = dict_size\n", - "\n", - " self.conv = ConvStack(feat_size, num_conv_layers)\n", - " \n", - "# self.fc = nn.Linear(1312, dict_size + 1)\n", - "\n", - " i_size = self.conv.output_height # H after conv stack\n", - " self.rnn = RNNStack(\n", - " i_size=i_size,\n", - " h_size=rnn_size,\n", - " num_stacks=num_rnn_layers,\n", - " use_gru=use_gru,\n", - " share_rnn_weights=share_rnn_weights)\n", - " \n", - " self.fc = nn.Linear(rnn_size * 2, dict_size + 1)\n", - " \n", - " def infer(self, audio, audio_len):\n", - " # [B, D, T] -> [B, C=1, D, T]\n", - " audio = audio.unsqueeze(1)\n", - "\n", - " # convolution group\n", - " x, audio_len = self.conv(audio, audio_len)\n", - " print('conv out', x.shape)\n", - "\n", - " # convert data from convolution feature map to sequence of vectors\n", - " B, C, D, T = paddle.shape(x)\n", - " x = x.transpose([0, 3, 1, 2]) #[B, T, C, D]\n", - " x = x.reshape([B, T, C * D]) #[B, T, C*D]\n", - " print('rnn input', x.shape)\n", - "\n", - " # remove padding part\n", - " x, audio_len = self.rnn(x, audio_len) #[B, T, D]\n", - " print('rnn output', x.shape)\n", - "\n", - " logits = self.fc(x) #[B, T, V + 1]\n", - "\n", - " #ctcdecoder need probs, not log_probs\n", - " probs = F.softmax(logits)\n", - "\n", - " return logits, probs, audio_len\n", - "\n", - " def forward(self, audio, audio_len, text, text_len):\n", - " \"\"\"\n", - " audio: shape [B, D, T]\n", - " text: shape [B, T]\n", - " audio_len: shape [B]\n", - " text_len: shape [B]\n", - " \"\"\"\n", - " return self.infer(audio, audio_len)\n", - " \n", - "\n", - "feat_dim=161\n", - "\n", - "model = DeepSpeech2Test(\n", - " feat_size=feat_dim,\n", - " dict_size=batch_reader.dataset.vocab_size,\n", - " num_conv_layers=args.num_conv_layers,\n", - " num_rnn_layers=args.num_rnn_layers,\n", - " rnn_size=1024,\n", - " use_gru=args.use_gru,\n", - " share_rnn_weights=args.share_rnn_weights,\n", - " )\n", - "dp_model = model\n", - "#dp_model = paddle.DataParallel(model)\n", - "\n", - "loss_fn = DeepSpeech2Loss(batch_reader.dataset.vocab_size)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "divided-incentive", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "discrete-conjunction", - "metadata": {}, - "outputs": [], - "source": [ - "audio, audio_len, text, text_len = None, None, None, None\n", - "\n", - "for idx, inputs in enumerate(batch_reader):\n", - " audio, audio_len, text, text_len = inputs\n", - "# print(idx)\n", - "# print('a', audio.shape, audio.place)\n", - "# print('t', text)\n", - "# print('al', audio_len)\n", - "# print('tl', text_len)\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "protected-announcement", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "conv in: Tensor(shape=[5], dtype=int64, place=CUDAPinnedPlace, stop_gradient=True,\n", - " [163, 167, 180, 186, 186])\n", - "seq mask: Tensor(shape=[5, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])\n", - "conv in: Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [55, 56, 60, 62, 62])\n", - "seq mask: Tensor(shape=[5, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])\n", - "conv out: Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [55, 56, 60, 62, 62])\n", - "conv out [5, 32, 41, 62]\n", - "rnn input [5, 62, 1312]\n", - "seq mask: Tensor(shape=[5, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])\n", - "seq mask: Tensor(shape=[5, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:77: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n", - " return (isinstance(seq, collections.Sequence) and\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "seq mask: Tensor(shape=[5, 62], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 0.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])\n", - "rnn output [5, 62, 2048]\n", - "logits len Tensor(shape=[5], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [55, 56, 60, 62, 62])\n", - "loss Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [2316.82153320])\n" - ] - } - ], - "source": [ - "outputs = dp_model(audio, audio_len, text, text_len)\n", - "logits, _, logits_len = outputs\n", - "print('logits len', logits_len)\n", - "loss = loss_fn.forward(logits, text, logits_len, text_len)\n", - "print('loss', loss)" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "universal-myrtle", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "param grad: conv.conv_in.conv.weight: shape: [32, 1, 41, 11] stop_grad: False grad: None\n", - "param grad: conv.conv_in.conv.bias: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_in.bn.weight: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_in.bn.bias: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_in.bn._mean: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_in.bn._variance: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_stack.0.conv.weight: shape: [32, 32, 21, 11] stop_grad: False grad: None\n", - "param grad: conv.conv_stack.0.conv.bias: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_stack.0.bn.weight: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_stack.0.bn.bias: shape: [32] stop_grad: False grad: None\n", - "param grad: conv.conv_stack.0.bn._mean: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_stack.0.bn._variance: shape: [32] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_fc.weight: shape: [1312, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_bn.weight: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_bn.bias: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_fc.weight: shape: [2048, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_bn.weight: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_bn.bias: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_fc.weight: shape: [2048, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_bn.weight: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_bn.bias: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: fc.weight: shape: [2048, 4299] stop_grad: False grad: None\n", - "param grad: fc.bias: shape: [4299] stop_grad: False grad: None\n" - ] - } - ], - "source": [ - "for n, p in dp_model.named_parameters():\n", - " print(\n", - " f\"param grad: {n}: shape: {p.shape} stop_grad: {p.stop_gradient} grad: {p.grad}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "referenced-double", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "param grad: conv.conv_in.conv.weight: shape: [32, 1, 41, 11] stop_grad: False grad: [[[[ 2.1243238 1.696022 3.770659 ... 5.234652 5.4865217\n", - " 4.757795 ]\n", - " [ 2.651376 2.3109848 4.428488 ... 5.353201 8.703288\n", - " 5.1787405 ]\n", - " [ 2.7511077 1.8823049 2.1875212 ... 3.4821286 6.386543\n", - " 3.5026932 ]\n", - " ...\n", - " [ 1.9173846 1.8623551 0.5601456 ... 2.8375719 3.8496673\n", - " 2.359191 ]\n", - " [ 2.3827765 2.497965 1.5914664 ... 2.220721 3.4617734\n", - " 4.829253 ]\n", - " [ 1.6855702 1.5040786 1.8793598 ... 4.0773935 3.176893\n", - " 3.7477999 ]]]\n", - "\n", - "\n", - " [[[ 1.8451455 2.0091445 1.5225713 ... 1.524528 0.17764974\n", - " 1.0245132 ]\n", - " [ 1.9388857 1.3873467 2.044691 ... 0.92544 -0.9746763\n", - " -0.41603735]\n", - " [ 2.6814485 2.6096234 1.6802506 ... 1.902397 1.6837387\n", - " -0.96788657]\n", - " ...\n", - " [ 4.3675485 1.9822174 1.1695029 ... 1.4672399 3.2029557\n", - " 2.6364415 ]\n", - " [ 3.2536 1.1792442 -0.5618002 ... 2.101127 1.904225\n", - " 3.3839993 ]\n", - " [ 1.9118482 1.0651072 0.5409893 ... 2.6783593 1.6871439\n", - " 4.1078367 ]]]\n", - "\n", - "\n", - " [[[-4.412424 -1.7111907 -1.7722387 ... -4.3383503 -6.2393785\n", - " -6.139402 ]\n", - " [-2.260428 -1.0250616 -2.0550888 ... -5.353946 -4.29947\n", - " -6.158736 ]\n", - " [-1.4927872 0.7552787 -0.0702923 ... -4.485656 -4.0794134\n", - " -5.416684 ]\n", - " ...\n", - " [ 2.9100134 4.156195 4.357041 ... -3.569804 -1.8634341\n", - " -0.8772557 ]\n", - " [ 1.6895763 3.4314504 4.1192107 ... -1.380024 -2.3234155\n", - " -3.6650617 ]\n", - " [ 2.4190075 1.007498 3.1173465 ... -0.96318084 -3.6175003\n", - " -2.5240796 ]]]\n", - "\n", - "\n", - " ...\n", - "\n", - "\n", - " [[[-0.6865506 -0.60106415 -1.5555015 ... 2.0853553 1.900961\n", - " 2.101063 ]\n", - " [-0.31686288 -1.4362946 -1.4929098 ... 0.15085456 1.4540495\n", - " 1.4128599 ]\n", - " [-0.57852304 -0.8204216 -2.3264258 ... 1.4970423 0.54599845\n", - " 1.6222539 ]\n", - " ...\n", - " [ 0.32624918 0.96004546 -0.7476514 ... 2.2786083 2.1000178\n", - " 2.7494807 ]\n", - " [-1.6967826 -0.78979015 -1.8424999 ... 1.0620685 2.0544293\n", - " 2.2483966 ]\n", - " [ 0.8192332 2.601636 -2.6636481 ... 0.26625186 1.7610842\n", - " 1.7467536 ]]]\n", - "\n", - "\n", - " [[[ 0.9140297 0.42424175 1.4352363 ... -2.3022954 -3.001058\n", - " -2.6987422 ]\n", - " [ 0.4491998 -0.10698095 1.5089144 ... -3.2831016 -3.6055021\n", - " -3.6595795 ]\n", - " [ 2.6818252 -1.5750014 -0.34812498 ... -4.4137015 -4.250422\n", - " -3.481941 ]\n", - " ...\n", - " [ 1.4232106 2.9689102 3.9547806 ... -0.481165 0.28190404\n", - " -1.2167063 ]\n", - " [ 2.2297084 4.8198485 4.2857304 ... 0.57483846 1.4093391\n", - " 0.0715822 ]\n", - " [ 1.679745 4.768068 5.416195 ... 0.17254728 0.4623217\n", - " 1.4772662 ]]]\n", - "\n", - "\n", - " [[[-2.0860114 -2.9508173 -1.4945896 ... -4.067145 -2.5652342\n", - " -3.5771027 ]\n", - " [-2.697845 -1.9273603 -2.3885014 ... -2.196533 -2.8573706\n", - " -2.0113711 ]\n", - " [-2.413383 -2.7204053 -1.0502659 ... -3.001385 -3.36447\n", - " -4.3225455 ]\n", - " ...\n", - " [ 1.2754489 0.9560999 1.5239805 ... -0.0105865 -1.00876\n", - " 2.6247358 ]\n", - " [ 1.1965859 1.0378222 1.1025598 ... -0.5394704 0.49838027\n", - " -0.9618193 ]\n", - " [ 1.1361816 1.3232857 0.687318 ... -0.23925456 -0.43679112\n", - " -0.79297894]]]]\n", - "param grad: conv.conv_in.conv.bias: shape: [32] stop_grad: False grad: [ 5.9604645e-07 -3.9339066e-06 -1.0728836e-06 -1.6689301e-06\n", - " 1.1920929e-06 -2.5033951e-06 -2.3841858e-07 4.7683716e-07\n", - " 4.2915344e-06 -1.9073486e-06 -1.9073486e-06 3.0994415e-06\n", - " -2.6822090e-06 3.3378601e-06 -4.2915344e-06 5.2452087e-06\n", - " 3.8146973e-06 2.3841858e-07 7.1525574e-07 -3.6954880e-06\n", - " 2.0563602e-06 -2.6226044e-06 3.0994415e-06 -3.5762787e-07\n", - " -4.7683716e-06 1.2218952e-06 3.3378601e-06 -2.5629997e-06\n", - " 2.3841858e-07 -1.7881393e-06 4.7683716e-07 -2.7418137e-06]\n", - "param grad: conv.conv_in.bn.weight: shape: [32] stop_grad: False grad: [ 2.363316 3.286464 1.9607866 -1.6367784 -1.6325372 -1.7729434\n", - " -0.9261875 2.0950415 0.1155543 -0.8857083 0.70079553 0.33920464\n", - " 2.6953902 -0.64524114 0.8845749 -1.2271115 0.6578167 -2.939814\n", - " 5.5728893 -1.0917969 0.01470797 1.395206 4.8009634 -0.744532\n", - " 0.944651 -1.092311 1.4877632 -3.042566 0.51686054 -5.4768667\n", - " -5.628145 -1.0894046 ]\n", - "param grad: conv.conv_in.bn.bias: shape: [32] stop_grad: False grad: [ 1.5193373 1.8838218 3.7722278 0.28052303 0.5386534 -0.44620085\n", - " -1.6977876 3.115642 0.03312349 -2.9121587 3.8925257 0.2288351\n", - " -2.273387 -1.3597974 4.3708124 -0.23374033 0.116272 -0.7064927\n", - " 6.5267463 -1.5318865 1.0288429 0.7928574 -0.24655592 -2.1116853\n", - " 2.922772 -3.3462617 1.7016437 -3.5471547 0.29777628 -3.2820854\n", - " -4.116946 -0.9909375 ]\n", - "param grad: conv.conv_in.bn._mean: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_in.bn._variance: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_stack.0.conv.weight: shape: [32, 32, 21, 11] stop_grad: False grad: [[[[ 6.20494843e-01 5.95983505e-01 -1.48909020e+00 ... -6.86620831e-01\n", - " 6.71104014e-01 -1.95339048e+00]\n", - " [-3.91837955e-03 1.27062631e+00 -1.63248098e+00 ... 1.07290137e+00\n", - " -9.42245364e-01 -3.34277248e+00]\n", - " [ 2.41821265e+00 2.36212373e-01 -1.84433365e+00 ... 1.23182368e+00\n", - " 1.36039746e+00 -2.94621849e+00]\n", - " ...\n", - " [ 1.55153418e+00 7.25861669e-01 2.08785534e+00 ... -6.40172660e-01\n", - " -3.23889256e-02 -2.30832791e+00]\n", - " [ 3.69824195e+00 1.27163112e-01 4.09263194e-01 ... -8.60729575e-01\n", - " -3.51897454e+00 -2.10093403e+00]\n", - " [-4.94779050e-01 -3.74262631e-01 -1.19801068e+00 ... -2.05930543e+00\n", - " -7.38576293e-01 -9.44581270e-01]]\n", - "\n", - " [[-2.04341412e+00 -3.70606273e-01 -1.40429378e+00 ... -1.71711946e+00\n", - " -4.09437418e-01 -1.74107194e+00]\n", - " [-8.72247815e-01 -1.06301677e+00 -9.19306517e-01 ... -2.98976970e+00\n", - " -3.03250861e+00 -2.37099743e+00]\n", - " [-5.00457406e-01 -1.11882675e+00 -5.91526508e-01 ... 4.23921436e-01\n", - " -2.08650708e+00 -1.82109618e+00]\n", - " ...\n", - " [ 2.07773042e+00 1.40735030e-01 -2.60543615e-01 ... -1.55956164e-01\n", - " -1.31862307e+00 -2.07174897e+00]\n", - " [ 7.95007765e-01 1.14988625e-01 -1.43308258e+00 ... 8.29253554e-01\n", - " -9.57888126e-01 -3.82121086e-01]\n", - " [ 8.34397674e-02 1.38636863e+00 -1.21593380e+00 ... -2.65783578e-01\n", - " 1.78124309e-02 -3.40287232e+00]]\n", - "\n", - " [[ 6.27344131e-01 5.71699142e-02 -3.58010936e+00 ... -4.53077674e-01\n", - " 1.65331578e+00 2.58466601e-02]\n", - " [ 2.66681361e+00 2.02069378e+00 -1.52052927e+00 ... 2.94914508e+00\n", - " 1.94632411e+00 -1.06698799e+00]\n", - " [ 1.57839453e+00 -1.03649735e-01 -4.22528505e+00 ... 2.28863955e+00\n", - " 4.27859402e+00 3.66381669e+00]\n", - " ...\n", - " [-2.44603205e+00 -2.09621000e+00 -2.57623529e+00 ... 9.00211930e-01\n", - " 4.30536079e+00 -2.49779320e+00]\n", - " [-2.52187514e+00 -3.36546659e+00 -1.26748765e+00 ... 8.11533451e-01\n", - " 2.55930424e-01 4.50821817e-02]\n", - " [-3.40082574e+00 -3.26924801e+00 -5.86932135e+00 ... -1.18203712e+00\n", - " 1.09565187e+00 -4.96661961e-01]]\n", - "\n", - " ...\n", - "\n", - " [[ 8.20469666e+00 6.96195841e+00 2.73753977e+00 ... 8.34498823e-01\n", - " 2.56748104e+00 1.67592216e+00]\n", - " [ 9.85801792e+00 8.81465149e+00 6.09280396e+00 ... 1.42389655e+00\n", - " 2.92086434e+00 2.08308399e-01]\n", - " [ 8.00702763e+00 7.97301006e+00 4.64527416e+00 ... 8.61916900e-01\n", - " 3.55370259e+00 4.75085378e-01]\n", - " ...\n", - " [ 5.61662769e+00 -4.72857296e-01 -1.04519971e-01 ... -4.03000236e-01\n", - " -1.66419971e+00 -1.70375630e-01]\n", - " [ 4.52409792e+00 -3.70670676e-01 4.54190969e-02 ... -8.20453286e-01\n", - " 9.49141383e-02 8.88008535e-01]\n", - " [ 3.27219462e+00 8.93201411e-01 1.94810414e+00 ... -2.86915004e-02\n", - " 1.93200278e+00 8.19505215e-01]]\n", - "\n", - " [[ 5.84066296e+00 6.72855520e+00 5.21399307e+00 ... 4.55058670e+00\n", - " 3.19132543e+00 3.17435169e+00]\n", - " [ 6.04594421e+00 6.88997173e+00 5.00542831e+00 ... 2.23561144e+00\n", - " 2.76059532e+00 4.83479440e-01]\n", - " [ 5.36118126e+00 4.13896275e+00 3.68701124e+00 ... 3.64462805e+00\n", - " 2.80596399e+00 1.52781498e+00]\n", - " ...\n", - " [ 2.87856674e+00 5.84320784e-01 1.74297714e+00 ... 2.83938944e-01\n", - " -2.26546407e-01 -1.18434143e+00]\n", - " [ 2.08510804e+00 1.74915957e+00 1.58637917e+00 ... 6.41967297e-01\n", - " -1.31319761e-01 -3.85830402e-01]\n", - " [ 4.41666174e+00 2.58244562e+00 2.97712159e+00 ... 1.42317235e-01\n", - " 1.68037796e+00 -6.50003672e-01]]\n", - "\n", - " [[ 1.05511594e+00 6.74880028e-01 -7.64639139e-01 ... -2.15282440e-01\n", - " 2.07197094e+00 4.48752761e-01]\n", - " [ 2.12095881e+00 3.44118834e+00 1.61375272e+00 ... -1.18487728e+00\n", - " 1.88659012e+00 1.48252523e+00]\n", - " [ 8.33427787e-01 4.35035896e+00 -3.59877385e-02 ... 8.70242774e-01\n", - " 3.75945044e+00 -3.09408635e-01]\n", - " ...\n", - " [ 5.08510351e+00 4.73114061e+00 1.97346115e+00 ... -2.25924397e+00\n", - " -1.26373076e+00 -1.37826729e+00]\n", - " [ 6.17275095e+00 4.16016817e+00 3.15675950e+00 ... -2.02416754e+00\n", - " 1.50002241e-02 1.84633851e+00]\n", - " [ 7.32995272e+00 5.34601831e+00 4.58857203e+00 ... -1.88874304e+00\n", - " 1.53240371e+00 7.47349262e-02]]]\n", - "\n", - "\n", - " [[[-1.80918843e-01 -2.52616453e+00 -2.78145695e+00 ... 1.44283652e+00\n", - " -1.08945215e+00 4.19084758e-01]\n", - " [-9.66833949e-01 -2.41106153e+00 -3.48886085e+00 ... -1.87193304e-01\n", - " 8.21905077e-01 1.89097953e+00]\n", - " [-1.59118319e+00 -2.56997013e+00 -3.10426521e+00 ... 2.05900550e+00\n", - " -2.78253704e-01 6.96343541e-01]\n", - " ...\n", - " [ 6.66302443e-02 -2.00887346e+00 -3.17550874e+00 ... 7.97579706e-01\n", - " -9.71581042e-02 1.71877682e+00]\n", - " [-8.01679730e-01 -2.02678037e+00 -3.21915555e+00 ... 8.35528374e-01\n", - " -1.15296638e+00 4.35728967e-01]\n", - " [ 1.45292446e-01 -2.15479851e+00 -1.51839817e+00 ... -3.07936192e-01\n", - " -5.39051890e-01 1.13107657e+00]]\n", - "\n", - " [[-2.43341160e+00 -3.35346818e+00 -9.87014294e-01 ... 1.34049034e+00\n", - " 2.95773447e-02 1.27177119e+00]\n", - " [-2.61602497e+00 -9.76761580e-01 -2.52060473e-01 ... -1.38134825e+00\n", - " 3.85564029e-01 4.57195908e-01]\n", - " [-2.23676014e+00 -4.00404739e+00 -2.23409963e+00 ... -1.41846514e+00\n", - " -6.58698231e-02 -3.61778140e-01]\n", - " ...\n", - " [-1.13604403e+00 -6.03917837e-02 -4.95491922e-01 ... 2.14673686e+00\n", - " 1.21484184e+00 2.22764325e+00]\n", - " [-1.05162430e+00 -1.59828448e+00 3.15489501e-01 ... 2.28046751e+00\n", - " 2.39702511e+00 2.43942714e+00]\n", - " [-1.27370405e+00 -2.05736399e-01 -1.12124372e+00 ... 2.21597219e+00\n", - " 2.50086927e+00 1.91134131e+00]]\n", - "\n", - " [[-4.53170598e-01 -1.59644139e+00 -3.63470483e+00 ... -4.35066032e+00\n", - " -3.79540777e+00 -1.09796596e+00]\n", - " [-2.21036464e-01 -2.53353834e+00 -1.28269875e+00 ... -3.38615727e+00\n", - " -2.59143281e+00 7.74220943e-01]\n", - " [-6.89323783e-01 -1.44375205e+00 6.66438341e-02 ... -1.30736077e+00\n", - " -1.23293114e+00 1.58148706e+00]\n", - " ...\n", - " [ 1.63751483e+00 -4.08427984e-01 -8.15176964e-01 ... 3.70807743e+00\n", - " 2.04232907e+00 1.97716308e+00]\n", - " [ 2.13261342e+00 1.85947633e+00 -8.06532025e-01 ... 1.98311245e+00\n", - " 2.27003932e+00 -1.11734614e-01]\n", - " [ 1.28702402e+00 3.98628891e-01 -1.63712263e+00 ... 8.00528765e-01\n", - " 5.78273535e-01 -2.59924948e-01]]\n", - "\n", - " ...\n", - "\n", - " [[ 3.96233416e+00 4.66794682e+00 1.39437711e+00 ... 7.52061129e-01\n", - " -1.53534544e+00 -6.67162359e-01]\n", - " [ 2.33841681e+00 3.35811281e+00 9.80114818e-01 ... 1.48806703e+00\n", - " 2.68609226e-01 -1.35124445e+00]\n", - " [ 2.08177710e+00 4.28519583e+00 1.52450514e+00 ... 7.45321214e-01\n", - " -5.04359961e-01 -1.81241560e+00]\n", - " ...\n", - " [ 2.95398951e-01 4.30877179e-01 -2.03731894e+00 ... -4.20221925e-01\n", - " 3.29260826e-01 5.83679557e-01]\n", - " [ 1.30742240e+00 -6.32183790e-01 -3.13741422e+00 ... 9.63868052e-02\n", - " 2.91730791e-01 1.33400351e-01]\n", - " [ 5.43292165e-01 -2.83665359e-01 -1.88138187e+00 ... 2.15468198e-01\n", - " 4.90157723e-01 2.40562439e+00]]\n", - "\n", - " [[ 1.57632053e+00 6.27885723e+00 2.87853765e+00 ... 3.07016110e+00\n", - " 1.91490650e+00 1.76274943e+00]\n", - " [ 2.57776356e+00 4.07256317e+00 2.52231169e+00 ... 4.09494352e+00\n", - " 2.53548074e+00 2.44395185e+00]\n", - " [ 2.43037057e+00 4.35728836e+00 1.96233964e+00 ... 2.26702976e+00\n", - " 2.94634581e+00 2.21452284e+00]\n", - " ...\n", - " [-2.72509992e-01 -8.41220498e-01 -1.89133918e+00 ... -1.80079627e+00\n", - " -2.00367713e+00 -7.09145784e-01]\n", - " [ 8.21575999e-01 -1.13323164e+00 -2.62418866e+00 ... -2.38889670e+00\n", - " -7.83945560e-01 -1.01922750e-01]\n", - " [-1.14730227e+00 -1.42182577e+00 -2.00993991e+00 ... -2.11025667e+00\n", - " 1.60286129e-02 -7.26446986e-01]]\n", - "\n", - " [[ 4.20389509e+00 3.75917768e+00 4.97653627e+00 ... 1.23642838e+00\n", - " 8.52760911e-01 1.27920091e-01]\n", - " [ 5.29409122e+00 5.29002380e+00 3.96404648e+00 ... 1.91227329e+00\n", - " 3.97556186e-01 1.69182217e+00]\n", - " [ 4.60112572e+00 4.12772799e+00 2.10280085e+00 ... 3.24303842e+00\n", - " -1.07720590e+00 -3.81854475e-01]\n", - " ...\n", - " [ 1.81884170e-02 -3.11472058e+00 -8.23525012e-01 ... -2.40161085e+00\n", - " -4.48192549e+00 -6.14600539e-01]\n", - " [ 1.16305006e+00 -1.15409636e+00 -3.48765063e+00 ... -1.97504926e+00\n", - " -4.44984436e+00 -2.28429958e-01]\n", - " [ 1.29197860e+00 6.17720246e-01 -5.87171853e-01 ... -1.35258228e-01\n", - " -1.29259872e+00 1.30360842e-01]]]\n", - "\n", - "\n", - " [[[-1.26687372e+00 -2.33633637e+00 -1.49625254e+00 ... 2.52396107e+00\n", - " -6.68072224e-01 -1.13282454e+00]\n", - " [-1.34229445e+00 -2.87080932e+00 -2.57388353e+00 ... -8.75385761e-01\n", - " -1.00205469e+00 -3.58956242e+00]\n", - " [-9.49853599e-01 -5.78684711e+00 -3.52962446e+00 ... 8.88233304e-01\n", - " 2.25133196e-01 -1.02802217e+00]\n", - " ...\n", - " [-7.38113701e-01 -3.47510982e+00 -3.23011065e+00 ... -1.25624001e+00\n", - " -1.63268471e+00 6.00247443e-01]\n", - " [-2.29733467e+00 -5.72547615e-01 -1.98301303e+00 ... -1.90137398e+00\n", - " -1.47013855e+00 -1.45779204e+00]\n", - " [-2.24628520e+00 -3.36337948e+00 -3.91878939e+00 ... -1.53652275e+00\n", - " -1.36285520e+00 -1.68160331e+00]]\n", - "\n", - " [[-8.11348319e-01 -7.17824280e-01 -1.02243233e+00 ... -2.69050407e+00\n", - " -2.32403350e+00 -4.25943947e+00]\n", - " [-2.35056520e+00 -2.35941172e+00 -1.24398732e+00 ... -2.08313870e+00\n", - " -1.16508257e+00 -1.30353463e+00]\n", - " [-2.25146723e+00 -1.94972813e+00 -1.13295293e+00 ... -2.61496377e+00\n", - " -1.91106403e+00 -1.07801402e+00]\n", - " ...\n", - " [-2.67012739e+00 -3.20916414e+00 -2.41768575e+00 ... 2.65138328e-01\n", - " -5.27612507e-01 1.44604075e+00]\n", - " [-3.54237866e+00 -3.62832785e+00 -2.40270257e+00 ... -9.76106226e-02\n", - " 4.67946082e-01 -7.24248111e-01]\n", - " [-2.49844384e+00 -3.42463255e+00 -2.99040008e+00 ... 4.28889185e-01\n", - " -7.51657963e-01 -1.00530767e+00]]\n", - "\n", - " [[-8.42589438e-02 1.42022014e-01 -8.51281703e-01 ... 4.21745628e-01\n", - " -2.35717297e-02 -1.71374834e+00]\n", - " [-1.05496287e+00 3.82416457e-01 -4.40595537e-01 ... 1.03381336e-01\n", - " -1.41204190e+00 -7.58325040e-01]\n", - " [-2.28930283e+00 -2.03857040e+00 -9.16261196e-01 ... -3.94939929e-01\n", - " -1.07798588e+00 -1.48433352e+00]\n", - " ...\n", - " [-3.11473966e-01 -1.40877593e+00 -2.42908645e+00 ... 7.88682699e-01\n", - " 1.24199319e+00 1.89949930e-01]\n", - " [ 5.44084549e-01 -1.02425671e+00 -1.53991556e+00 ... -4.36764538e-01\n", - " -5.78772545e-01 2.62665659e-01]\n", - " [ 1.26812792e+00 -9.89493608e-01 -1.47972977e+00 ... 2.21440494e-02\n", - " 2.79776216e-01 7.63269484e-01]]\n", - "\n", - " ...\n", - "\n", - " [[ 6.02095068e-01 5.93243122e-01 -1.06838238e+00 ... 3.56546330e+00\n", - " 1.16390383e+00 -1.47593319e-01]\n", - " [ 1.80458140e+00 1.68401957e+00 4.17516947e-01 ... 3.33444500e+00\n", - " 1.89411759e+00 1.03220642e-01]\n", - " [ 2.74264169e+00 2.92038846e+00 1.00775683e+00 ... 3.53285050e+00\n", - " 2.07282662e+00 -2.56800652e-01]\n", - " ...\n", - " [ 4.88933468e+00 3.72433925e+00 3.58677816e+00 ... 1.98363388e+00\n", - " 1.80851030e+00 8.32634747e-01]\n", - " [ 4.01546288e+00 4.78934765e+00 2.94778132e+00 ... 2.99637699e+00\n", - " 1.30439472e+00 3.61029744e-01]\n", - " [ 3.13628030e+00 2.01894832e+00 2.82585931e+00 ... 2.54264188e+00\n", - " -9.16651785e-02 9.93353873e-02]]\n", - "\n", - " [[ 2.35585642e+00 8.42678428e-01 1.57331872e+00 ... 3.65935063e+00\n", - " 3.94066262e+00 4.89832020e+00]\n", - " [ 1.85791731e+00 1.34373701e+00 1.30812299e+00 ... 2.71434736e+00\n", - " 3.22004294e+00 2.99872303e+00]\n", - " [ 1.67675853e+00 -4.05569375e-02 1.85539150e+00 ... 3.73934364e+00\n", - " 2.98195982e+00 3.37315011e+00]\n", - " ...\n", - " [ 2.14539170e+00 2.86586595e+00 2.20222116e+00 ... 1.20492995e+00\n", - " 2.13971066e+00 1.94932449e+00]\n", - " [ 4.68422651e+00 3.80044746e+00 4.23209000e+00 ... 2.40658951e+00\n", - " 2.29117441e+00 2.52368808e+00]\n", - " [ 3.10694575e+00 2.49402595e+00 4.53786707e+00 ... 9.08902645e-01\n", - " 1.86903965e+00 2.27776885e+00]]\n", - "\n", - " [[ 1.45200038e+00 5.17961740e-01 -1.58403587e+00 ... 5.07019472e+00\n", - " 7.87163258e-01 1.20610237e+00]\n", - " [ 3.39321136e+00 2.21043849e+00 -6.31202877e-01 ... 4.97822762e+00\n", - " 9.66498017e-01 1.18883348e+00]\n", - " [ 1.20627856e+00 1.82759428e+00 5.91053367e-01 ... 4.14318657e+00\n", - " 5.25399208e-01 -1.16850233e+00]\n", - " ...\n", - " [ 1.05183899e+00 5.80030501e-01 1.89724147e+00 ... 2.54626465e+00\n", - " -1.49128008e+00 -1.85064209e+00]\n", - " [ 1.50983357e+00 2.85973406e+00 2.61224055e+00 ... 4.83481932e+00\n", - " 9.67048705e-02 -4.37043965e-01]\n", - " [ 2.57720876e+00 2.09961963e+00 4.11754288e-02 ... 3.80421424e+00\n", - " -7.83308804e-01 -1.64871216e+00]]]\n", - "\n", - "\n", - " ...\n", - "\n", - "\n", - " [[[-1.16345096e+00 -2.53971386e+00 -8.99101734e-01 ... -4.35583591e-01\n", - " -1.29671764e+00 -1.61429560e+00]\n", - " [ 3.72841507e-01 3.45808208e-01 -1.82167351e+00 ... -2.14515448e+00\n", - " -1.26383066e+00 -2.27464601e-01]\n", - " [ 1.58568513e+00 2.58181524e+00 1.86554670e+00 ... -1.10401320e+00\n", - " -3.68550658e-01 -2.58849680e-01]\n", - " ...\n", - " [-9.15827155e-01 -1.25424683e+00 -4.04716206e+00 ... 2.13138080e+00\n", - " 2.67662477e+00 2.31014514e+00]\n", - " [-3.19453120e-01 -6.71132684e-01 -1.51378751e+00 ... 1.86080432e+00\n", - " 2.77418542e+00 1.22875953e+00]\n", - " [-1.20453942e+00 -3.93669218e-01 -1.51751983e+00 ... 1.17620552e+00\n", - " 1.95602298e+00 7.64306366e-01]]\n", - "\n", - " [[-8.73186827e-01 -2.12537169e+00 -1.91664994e+00 ... -2.90821463e-01\n", - " 1.90896463e+00 8.02283168e-01]\n", - " [-1.06389821e+00 -2.15300727e+00 -1.82113051e+00 ... -4.34280694e-01\n", - " 1.53455496e+00 1.94702053e+00]\n", - " [-2.08403468e+00 -4.72900331e-01 -1.10610819e+00 ... -8.79420400e-01\n", - " 7.79394627e-01 2.02670670e+00]\n", - " ...\n", - " [-4.28208113e-01 -7.90894389e-01 -1.06713009e+00 ... 1.12579381e+00\n", - " 9.61961091e-01 1.40342009e+00]\n", - " [ 4.40416574e-01 -1.65901780e-02 -1.05338669e+00 ... 1.40698349e+00\n", - " 9.43485856e-01 2.34856772e+00]\n", - " [-1.20572495e+00 -2.03134632e+00 4.88817632e-01 ... 2.20770907e+00\n", - " 1.38143206e+00 2.00714707e+00]]\n", - "\n", - " [[ 9.00486887e-01 -9.50459957e-01 -1.42935121e+00 ... -1.30648065e+00\n", - " -2.52133775e+00 -8.87715697e-01]\n", - " [ 3.73431134e+00 1.69571114e+00 5.99429727e-01 ... 6.64332986e-01\n", - " -6.10453069e-01 2.06534386e+00]\n", - " [ 1.59800696e+00 -4.59622175e-01 -6.73136234e-01 ... 2.18770742e-01\n", - " -1.12928271e+00 4.87097502e-02]\n", - " ...\n", - " [ 1.92336845e+00 1.37130380e-01 -3.51048648e-01 ... 5.41638851e-01\n", - " 1.06069386e+00 1.36404145e+00]\n", - " [ 1.29641414e+00 -2.79530913e-01 -2.63607264e-01 ... -8.62445176e-01\n", - " 1.48393130e+00 2.69196725e+00]\n", - " [ 1.14442182e+00 -1.24098969e+00 3.70959163e-01 ... -1.12241995e+00\n", - " 3.67927134e-01 2.55976987e+00]]\n", - "\n", - " ...\n", - "\n", - " [[ 5.32017851e+00 3.64207411e+00 3.84571218e+00 ... 3.60754800e+00\n", - " 2.57500267e+00 -1.38083458e-01]\n", - " [ 5.69058084e+00 3.93056583e+00 2.93337941e+00 ... 3.17091584e+00\n", - " 2.34770632e+00 6.48133337e-01]\n", - " [ 5.98239613e+00 6.16548634e+00 3.04750896e+00 ... 5.51510525e+00\n", - " 4.34810448e+00 1.31588542e+00]\n", - " ...\n", - " [ 5.09930992e+00 3.32360983e+00 2.29228449e+00 ... 3.45123887e-01\n", - " 1.06280947e+00 -5.93325794e-02]\n", - " [ 4.19760656e+00 3.97779059e+00 1.66905916e+00 ... 3.68937254e-01\n", - " 8.06131065e-02 8.08142900e-01]\n", - " [ 4.52498960e+00 3.45109749e+00 1.01074433e+00 ... -2.54036248e-01\n", - " 3.13675582e-01 2.13851762e+00]]\n", - "\n", - " [[ 6.93927193e+00 6.05758238e+00 4.60648441e+00 ... 4.32221603e+00\n", - " 3.17874146e+00 1.47012353e+00]\n", - " [ 7.88523865e+00 6.62228966e+00 4.77496338e+00 ... 4.45868683e+00\n", - " 2.73698759e+00 2.17057824e+00]\n", - " [ 7.12061214e+00 6.01714134e+00 4.52996492e+00 ... 3.97184372e+00\n", - " 3.43153954e+00 1.21802723e+00]\n", - " ...\n", - " [ 2.85720730e+00 1.89639473e+00 1.96340394e+00 ... 1.89643729e+00\n", - " 1.64856291e+00 1.15853786e+00]\n", - " [ 3.88248491e+00 2.16386199e+00 1.53069091e+00 ... 2.71704245e+00\n", - " 2.24890351e+00 2.22156644e+00]\n", - " [ 5.27136230e+00 1.68400204e+00 2.09500480e+00 ... 2.75956345e+00\n", - " 3.71970820e+00 1.69852686e+00]]\n", - "\n", - " [[ 2.55598164e+00 1.64588141e+00 6.70431674e-01 ... 3.24091220e+00\n", - " 1.48759770e+00 -1.72001183e+00]\n", - " [ 4.33942318e+00 8.40826690e-01 -7.40000725e-01 ... 7.24577069e-01\n", - " 1.74327165e-01 -1.83029580e+00]\n", - " [ 4.39864540e+00 2.28395438e+00 -1.90353513e-01 ... 5.58019161e+00\n", - " 1.05627227e+00 -8.02519619e-01]\n", - " ...\n", - " [ 1.97654784e+00 3.26888156e+00 1.52879453e+00 ... 3.15013933e+00\n", - " 4.66731453e+00 4.98701715e+00]\n", - " [ 1.40016854e+00 3.45761251e+00 3.68359756e+00 ... 1.14207900e+00\n", - " 3.32219076e+00 3.83035636e+00]\n", - " [ 1.99269783e+00 2.15428829e+00 3.35396528e-01 ... 2.45916694e-01\n", - " 2.13785577e+00 4.33214951e+00]]]\n", - "\n", - "\n", - " [[[ 1.35320330e+00 5.05850911e-02 1.04915988e+00 ... 1.82023585e-01\n", - " 2.72914767e-01 3.92112255e-01]\n", - " [ 1.04646444e+00 7.60913491e-01 1.93323612e+00 ... 1.19493449e+00\n", - " -1.44200325e-01 4.07531261e-02]\n", - " [-9.88207340e-01 -1.46165287e+00 1.05884135e-01 ... -3.23057353e-01\n", - " -2.28934169e+00 -7.38609374e-01]\n", - " ...\n", - " [ 1.01198792e+00 2.34331083e+00 1.04566610e+00 ... 1.29697472e-01\n", - " -1.23878837e+00 2.21006930e-01]\n", - " [-3.75360101e-01 1.53673506e+00 -1.32206869e+00 ... -2.55255580e-01\n", - " -6.22699618e-01 -1.73162484e+00]\n", - " [ 4.34735864e-01 5.08327007e-01 -3.49233925e-01 ... -1.04749084e+00\n", - " -1.15777385e+00 -1.13671994e+00]]\n", - "\n", - " [[ 1.67839336e+00 -1.80224836e-01 1.02194118e+00 ... 8.44027162e-01\n", - " 8.81283879e-02 -1.37762165e+00]\n", - " [ 8.39694083e-01 1.32322550e+00 4.02442753e-01 ... -4.21785116e-01\n", - " -9.98012185e-01 -1.11348581e+00]\n", - " [ 7.64424682e-01 8.58965695e-01 2.94626594e-01 ... -6.65519595e-01\n", - " -3.65677416e-01 -2.25250268e+00]\n", - " ...\n", - " [-1.10193872e+00 1.18070498e-01 1.04604781e-01 ... -1.44486964e+00\n", - " -2.52748466e+00 -2.16131711e+00]\n", - " [-1.06079710e+00 -1.48379254e+00 3.80138367e-01 ... -1.62288392e+00\n", - " -2.44736362e+00 -8.78590107e-01]\n", - " [ 3.44401300e-02 -2.60935068e+00 -2.35597759e-01 ... -2.41114974e+00\n", - " -2.45255780e+00 -1.82384634e+00]]\n", - "\n", - " [[ 1.37670958e+00 1.58661580e+00 -2.85664916e-01 ... 1.49081087e+00\n", - " 4.13422853e-01 1.12761199e+00]\n", - " [ 1.54148173e+00 6.22704089e-01 1.41886568e+00 ... 1.59678531e+00\n", - " -8.72656107e-01 1.52415514e-01]\n", - " [ 3.30207205e+00 2.89925170e+00 1.91855145e+00 ... 3.18863559e+00\n", - " 1.87347198e+00 9.48901057e-01]\n", - " ...\n", - " [-1.53920484e+00 1.77375078e-02 -1.02018684e-01 ... 1.94011092e+00\n", - " -6.83587790e-01 1.49154460e+00]\n", - " [-2.27719522e+00 1.02481163e+00 -2.11300224e-01 ... -8.18020821e-01\n", - " 1.54248989e+00 -1.46732473e+00]\n", - " [-4.50206220e-01 3.62383485e+00 1.07175660e+00 ... 4.25961137e-01\n", - " 1.12405360e-01 -6.87821358e-02]]\n", - "\n", - " ...\n", - "\n", - " [[-3.40477467e-01 -2.99311423e+00 -2.12096786e+00 ... 2.27393007e+00\n", - " 4.03424358e+00 3.73335361e+00]\n", - " [-6.99971199e-01 -2.97719741e+00 -2.72910309e+00 ... 1.50101089e+00\n", - " 2.29408574e+00 3.14105940e+00]\n", - " [-1.41648722e+00 -1.86292887e+00 -1.84006739e+00 ... 2.78402638e+00\n", - " 3.91481900e+00 5.32456112e+00]\n", - " ...\n", - " [ 5.97958088e-01 1.50512588e+00 6.23718500e-01 ... 2.83813477e+00\n", - " 3.87909842e+00 3.33359623e+00]\n", - " [ 1.65542316e+00 3.56163192e+00 4.01527691e+00 ... 3.38367462e+00\n", - " 1.55827272e+00 2.50741863e+00]\n", - " [ 2.82036042e+00 2.53322673e+00 4.38798475e+00 ... 4.64642382e+00\n", - " 3.28739667e+00 3.02895570e+00]]\n", - "\n", - " [[-3.47941303e+00 -3.49006844e+00 -2.25583363e+00 ... 1.45181656e-01\n", - " 1.52944064e+00 2.08810711e+00]\n", - " [-2.27786446e+00 -4.59218550e+00 -2.74722624e+00 ... -1.73136210e+00\n", - " 7.46028006e-01 1.74789345e+00]\n", - " [-3.35524082e+00 -4.58244705e+00 -2.40820456e+00 ... -5.04051924e-01\n", - " 1.49640536e+00 2.16613841e+00]\n", - " ...\n", - " [ 5.26107132e-01 2.05329061e+00 2.84252572e+00 ... 1.33222675e+00\n", - " 3.87935114e+00 3.69385266e+00]\n", - " [ 4.38092083e-01 2.15028906e+00 3.13363624e+00 ... 3.36048746e+00\n", - " 5.36551809e+00 2.94915986e+00]\n", - " [ 2.75497317e+00 3.25929213e+00 2.33522987e+00 ... 1.69926262e+00\n", - " 3.93462896e+00 3.68200874e+00]]\n", - "\n", - " [[ 1.10951948e+00 5.31419516e-02 -1.58864903e+00 ... 5.24887085e+00\n", - " 1.60273385e+00 4.90113163e+00]\n", - " [-2.94517064e+00 -2.81092644e+00 -4.89631557e+00 ... 3.99868512e+00\n", - " 1.40544355e+00 2.84833241e+00]\n", - " [-3.51893663e-01 -3.53325534e+00 -2.21239805e+00 ... 4.26225853e+00\n", - " 6.87886119e-01 2.58609629e+00]\n", - " ...\n", - " [ 2.92248201e+00 5.40264511e+00 4.65721560e+00 ... 5.24537373e+00\n", - " 2.30406880e+00 1.29892707e+00]\n", - " [ 1.43473256e+00 4.61167526e+00 3.57578802e+00 ... 5.12181854e+00\n", - " 8.59923482e-01 1.38731599e+00]\n", - " [-6.50881350e-01 2.18233657e+00 2.74669623e+00 ... 4.86368895e+00\n", - " 1.44120216e+00 1.79993320e+00]]]\n", - "\n", - "\n", - " [[[ 1.64106202e+00 3.54410499e-01 -3.54172409e-01 ... 2.32646990e+00\n", - " 1.65043330e+00 3.45897645e-01]\n", - " [ 2.16236949e+00 1.28213906e+00 2.26082468e+00 ... 6.10507369e-01\n", - " 9.12241280e-01 1.27429694e-01]\n", - " [ 2.07962990e+00 7.03816175e-01 2.01272345e+00 ... -2.26959705e-01\n", - " 1.00041127e+00 5.87104559e-02]\n", - " ...\n", - " [-1.62972426e+00 -3.04028845e+00 -1.39124167e+00 ... 2.47561097e+00\n", - " 2.35047388e+00 1.61532843e+00]\n", - " [-1.97368932e+00 -5.44541061e-01 -5.92882216e-01 ... 1.39800012e+00\n", - " 2.32770801e+00 9.96662021e-01]\n", - " [-1.15636075e+00 -1.34654212e+00 -8.50648999e-01 ... 1.85655832e+00\n", - " 2.05776072e+00 5.34575820e-01]]\n", - "\n", - " [[-1.02104437e+00 3.08469892e-01 2.81789303e-01 ... -8.24654043e-01\n", - " -9.85817850e-01 -2.05517030e+00]\n", - " [ 9.50192690e-01 3.35105330e-01 5.31637192e-01 ... -1.42974198e-01\n", - " -1.79659498e+00 -1.58266973e+00]\n", - " [-2.51316994e-01 -1.28709340e+00 3.01498562e-01 ... -1.32253516e+00\n", - " -1.55507576e+00 -9.37123299e-01]\n", - " ...\n", - " [ 2.33016998e-01 2.92454743e+00 3.15420461e+00 ... 1.15574491e+00\n", - " 1.27850962e+00 1.35487700e+00]\n", - " [ 3.81013602e-01 1.44239831e+00 6.64825320e-01 ... -3.89374971e-01\n", - " 1.50716826e-01 1.33641326e+00]\n", - " [ 1.71373415e+00 1.67357373e+00 1.76596940e+00 ... 1.57941079e+00\n", - " 1.60940981e+00 1.78091609e+00]]\n", - "\n", - " [[-5.16522598e+00 -1.68099070e+00 -3.24440050e+00 ... -3.46229005e+00\n", - " -2.18273020e+00 -1.98621082e+00]\n", - " [-3.05743694e+00 9.15392339e-01 -1.93508530e+00 ... -1.82306373e+00\n", - " -2.12960863e+00 -3.45255351e+00]\n", - " [-4.32777822e-01 -1.00303245e+00 -1.61397791e+00 ... -2.08376765e+00\n", - " -3.72989595e-01 -1.36516929e+00]\n", - " ...\n", - " [-5.83641946e-01 4.14125490e+00 1.58227599e+00 ... 2.03144050e+00\n", - " 2.13982654e+00 -1.81909311e+00]\n", - " [-1.74230576e+00 2.39347410e+00 2.44080925e+00 ... 5.43732524e-01\n", - " 2.07899213e+00 -3.71748984e-01]\n", - " [ 3.80016506e-01 7.84988403e-01 1.20596504e+00 ... -2.32057095e+00\n", - " -2.81265080e-01 -3.69353056e+00]]\n", - "\n", - " ...\n", - "\n", - " [[-3.48024845e+00 -2.60937548e+00 -3.84952760e+00 ... 6.68736577e-01\n", - " -1.75104141e-02 -3.54720926e+00]\n", - " [-2.59637117e+00 -5.18190145e+00 -2.33887696e+00 ... 9.13373232e-02\n", - " -3.58282638e+00 -2.40778995e+00]\n", - " [-2.50912881e+00 -1.22113395e+00 -2.34372020e+00 ... 1.40071487e+00\n", - " -1.67449510e+00 -1.14655948e+00]\n", - " ...\n", - " [-5.75253534e+00 -6.67348385e+00 -5.05184650e+00 ... -2.73145151e+00\n", - " -1.48933101e+00 -1.36807609e+00]\n", - " [-3.29049587e+00 -3.73956156e+00 -2.85064268e+00 ... -3.92481357e-01\n", - " -8.00529659e-01 -8.39800835e-01]\n", - " [-4.30351114e+00 -4.21471930e+00 -2.41703367e+00 ... -1.27081513e+00\n", - " 1.67839837e+00 8.47821474e-01]]\n", - "\n", - " [[-5.27856112e-01 -1.09752083e+00 3.39107156e-01 ... 2.00062895e+00\n", - " 8.83528054e-01 2.57416844e-01]\n", - " [-1.58655810e+00 -3.36268663e-01 1.16161990e+00 ... 1.54868484e+00\n", - " 2.38878536e+00 1.84097290e+00]\n", - " [ 5.96052647e-01 2.15484858e-01 1.85280466e+00 ... 2.74587560e+00\n", - " 1.61432290e+00 1.13214278e+00]\n", - " ...\n", - " [-4.57659864e+00 -5.42679739e+00 -4.35204458e+00 ... -1.82452416e+00\n", - " -2.18670201e+00 -3.91811800e+00]\n", - " [-1.32477629e+00 -4.19110394e+00 -3.41308069e+00 ... 1.39622003e-01\n", - " -1.59393203e+00 -9.08105671e-01]\n", - " [-3.60161018e+00 -4.05932713e+00 -2.23674798e+00 ... 9.09647286e-01\n", - " 9.73127842e-01 1.19991803e+00]]\n", - "\n", - " [[ 2.04062796e+00 7.95603275e-01 -1.28833270e+00 ... 4.64749050e+00\n", - " 2.25974560e+00 1.02396965e+00]\n", - " [ 1.68882537e+00 2.63353348e+00 2.53597498e-02 ... 4.69063854e+00\n", - " -4.19382691e-01 2.91669458e-01]\n", - " [ 7.71395087e-01 1.20833695e+00 -2.58601785e-01 ... 1.21794045e+00\n", - " -1.51922226e-01 7.44265199e-01]\n", - " ...\n", - " [-6.66095781e+00 -4.81577682e+00 -5.39921665e+00 ... -2.20548606e+00\n", - " 5.72486281e-01 -4.35207397e-01]\n", - " [-7.51608658e+00 -6.67776871e+00 -3.73199415e+00 ... -1.70327055e+00\n", - " 1.01334639e-02 -3.20627165e+00]\n", - " [-5.73050356e+00 -2.74379373e+00 -3.70248461e+00 ... -1.09794116e+00\n", - " -1.73590891e-02 -1.80156028e+00]]]]\n", - "param grad: conv.conv_stack.0.conv.bias: shape: [32] stop_grad: False grad: [-1.4305115e-06 0.0000000e+00 -4.0531158e-06 -1.6689301e-06\n", - " 2.3841858e-07 -7.1525574e-07 1.1920929e-06 1.5497208e-06\n", - " -2.3841858e-07 1.6689301e-06 9.5367432e-07 9.5367432e-07\n", - " -2.6226044e-06 1.1920929e-06 1.3113022e-06 1.9669533e-06\n", - " -4.7683716e-07 1.1920929e-06 -1.6689301e-06 -1.5497208e-06\n", - " -2.2649765e-06 4.7683716e-07 2.3841858e-06 -3.5762787e-06\n", - " 2.3841858e-07 2.1457672e-06 -3.5762787e-07 8.3446503e-07\n", - " -3.5762787e-07 -7.1525574e-07 2.6524067e-06 -1.1920929e-06]\n", - "param grad: conv.conv_stack.0.bn.weight: shape: [32] stop_grad: False grad: [-3.7669735 1.5226867 1.759756 4.501629 -2.2077336 0.18411277\n", - " 1.3558264 -1.0269645 3.9628277 3.9300344 -2.80754 1.8462183\n", - " -0.03385968 2.1284049 0.46124816 -4.364863 0.78491163 0.25565645\n", - " -5.3538237 3.2606194 0.79100513 -1.4652673 2.769378 1.2283417\n", - " -4.7466464 -1.3404545 -6.9374166 0.710248 2.0944448 0.4334769\n", - " -0.24313992 0.31392363]\n", - "param grad: conv.conv_stack.0.bn.bias: shape: [32] stop_grad: False grad: [-0.6251638 2.833331 0.6993131 3.7106915 -2.262496 0.7390424\n", - " 0.5360477 -2.803875 2.1646228 2.117193 -1.9988279 1.5135905\n", - " -2.0181084 2.6450465 0.06302822 -3.0530102 1.4788482 0.5941844\n", - " -3.1690063 1.8753575 -0.0737313 -2.7806277 -0.04483938 0.16129279\n", - " -1.2960215 -0.38020235 -0.55218065 0.10754502 2.065371 -1.4703183\n", - " -0.40964937 -1.4454535 ]\n", - "param grad: conv.conv_stack.0.bn._mean: shape: [32] stop_grad: True grad: None\n", - "param grad: conv.conv_stack.0.bn._variance: shape: [32] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_fc.weight: shape: [1312, 1024] stop_grad: False grad: [[-0.46178514 0.1095643 0.06441769 ... 0.42020613 -0.34181893\n", - " -0.0658682 ]\n", - " [-0.03619978 0.21653323 0.01727325 ... 0.05731536 -0.37822944\n", - " -0.05464617]\n", - " [-0.32397318 0.04158126 -0.08091418 ... 0.0928297 -0.06518176\n", - " -0.40110156]\n", - " ...\n", - " [-0.2702023 0.05126935 0.11825457 ... 0.0069707 -0.36951366\n", - " 0.37071258]\n", - " [-0.11326203 0.19305304 -0.133317 ... -0.13030824 -0.09068564\n", - " 0.32735693]\n", - " [-0.04543798 0.09902512 -0.10745425 ... -0.06685166 -0.3055201\n", - " 0.0752247 ]]\n", - "param grad: rnn.rnn_stacks.0.fw_bn.weight: shape: [1024] stop_grad: False grad: [-0.07338604 0.64991236 0.5465856 ... 0.507725 0.14061031\n", - " 0.3020359 ]\n", - "param grad: rnn.rnn_stacks.0.fw_bn.bias: shape: [1024] stop_grad: False grad: [-0.41395143 -0.28493872 0.36796764 ... 0.2387953 0.06732331\n", - " 0.16263628]\n", - "param grad: rnn.rnn_stacks.0.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.0.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: [[-0.09370177 -0.12264141 -0.08237482 ... -0.50241685 -0.149155\n", - " -0.25661892]\n", - " [-0.37426725 0.44987115 0.10685667 ... -0.65946174 -0.4499248\n", - " -0.17545304]\n", - " [-0.03753807 0.33422717 0.12750985 ... 0.05405155 -0.17648363\n", - " 0.05315325]\n", - " ...\n", - " [ 0.15721183 0.03064088 -0.00751081 ... 0.27183983 0.3881693\n", - " -0.01544908]\n", - " [ 0.26047793 0.16917065 0.00915196 ... 0.18076143 -0.05080506\n", - " 0.14791614]\n", - " [ 0.19052255 0.03642382 -0.14313167 ... 0.2611448 0.20763844\n", - " 0.26846847]]\n", - "param grad: rnn.rnn_stacks.0.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: [-0.4139514 -0.28493875 0.36796758 ... 0.23879525 0.06732336\n", - " 0.16263627]\n", - "param grad: rnn.rnn_stacks.0.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.0.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_fc.weight: shape: [2048, 1024] stop_grad: False grad: [[ 0.04214853 -0.1710323 0.17557406 ... 0.11926915 0.21577051\n", - " -0.30598596]\n", - " [-0.02370887 -0.03498494 -0.05991999 ... -0.06049232 -0.14527473\n", - " -0.5335691 ]\n", - " [-0.21417995 -0.10263194 -0.05903128 ... -0.26958284 0.05936668\n", - " 0.25522667]\n", - " ...\n", - " [ 0.31594425 -0.29487017 0.15871571 ... 0.3504135 -0.1418606\n", - " -0.07482046]\n", - " [ 0.22316164 0.7682122 -0.22191924 ... -0.00535548 -0.6497105\n", - " -0.2011079 ]\n", - " [-0.05800886 0.13750821 0.02450509 ... 0.245736 0.07425706\n", - " -0.17761081]]\n", - "param grad: rnn.rnn_stacks.1.fw_bn.weight: shape: [1024] stop_grad: False grad: [-0.45080703 0.19005743 0.077441 ... -0.24504453 0.19666554\n", - " -0.10503208]\n", - "param grad: rnn.rnn_stacks.1.fw_bn.bias: shape: [1024] stop_grad: False grad: [-0.55867654 0.04237206 0.03389215 ... -0.35602498 0.25528812\n", - " 0.11344345]\n", - "param grad: rnn.rnn_stacks.1.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.1.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: [[-0.48457903 0.04466334 -0.19785863 ... -0.0254025 -0.10338341\n", - " -0.29202533]\n", - " [-0.15261276 0.00412052 0.22198747 ... 0.22460426 -0.03752084\n", - " 0.05170784]\n", - " [-0.09337254 0.02530848 0.1263681 ... -0.02056236 0.33342454\n", - " -0.08760723]\n", - " ...\n", - " [-0.28645608 -0.19169135 -0.1361257 ... -0.00444204 -0.06552711\n", - " -0.14726155]\n", - " [ 0.21883707 0.2049045 0.23723911 ... 0.4626113 -0.14110637\n", - " 0.02569831]\n", - " [ 0.37554163 -0.19249167 0.14591683 ... 0.25602737 0.40088275\n", - " 0.41056633]]\n", - "param grad: rnn.rnn_stacks.1.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: [-0.55867654 0.04237211 0.0338921 ... -0.35602498 0.2552881\n", - " 0.11344352]\n", - "param grad: rnn.rnn_stacks.1.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.1.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_fc.weight: shape: [2048, 1024] stop_grad: False grad: [[-0.28007814 -0.09206 -0.01297755 ... -0.2557205 -0.2693453\n", - " 0.05862035]\n", - " [-0.34194735 -0.01383794 -0.06490533 ... -0.11063005 0.16226721\n", - " -0.3197178 ]\n", - " [-0.3646778 0.15443833 0.02241019 ... -0.15093157 -0.09886418\n", - " -0.44295847]\n", - " ...\n", - " [-0.01041886 -0.57636976 -0.03988511 ... -0.2260822 0.49646813\n", - " -0.15528557]\n", - " [-0.19385241 -0.56451964 -0.05551083 ... -0.5638106 0.43611372\n", - " -0.61484563]\n", - " [ 0.1051331 -0.4762463 0.11194798 ... -0.26766616 -0.30734932\n", - " 0.17856634]]\n", - "param grad: rnn.rnn_stacks.2.fw_bn.weight: shape: [1024] stop_grad: False grad: [-0.02791309 -0.992517 0.63012564 ... -1.1830902 1.4646478\n", - " 1.6333911 ]\n", - "param grad: rnn.rnn_stacks.2.fw_bn.bias: shape: [1024] stop_grad: False grad: [-0.10834587 -1.7079136 0.81259465 ... -1.4478713 1.455745\n", - " 2.069446 ]\n", - "param grad: rnn.rnn_stacks.2.fw_bn._mean: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_bn._variance: shape: [1024] stop_grad: True grad: None\n", - "param grad: rnn.rnn_stacks.2.fw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: [[-0.14363798 -0.06933184 0.02901152 ... -0.19233373 -0.03206367\n", - " -0.00845779]\n", - " [-0.44314507 -0.8921327 -1.031872 ... -0.558997 -0.53070104\n", - " -0.855925 ]\n", - " [ 0.15673254 0.28793585 0.13351494 ... 0.38433537 0.5040767\n", - " 0.11303265]\n", - " ...\n", - " [-0.22923109 -0.62508404 -0.6195032 ... -0.6876448 -0.41718128\n", - " -0.74844164]\n", - " [ 0.18024652 0.45618314 0.81391454 ... 0.5780604 0.87566674\n", - " 0.71526295]\n", - " [ 0.3763076 0.54033077 0.9940485 ... 1.087821 0.72288674\n", - " 1.2852117 ]]\n", - "param grad: rnn.rnn_stacks.2.fw_cell.bias_hh: shape: [1024] stop_grad: False grad: [-0.10834593 -1.7079139 0.8125948 ... -1.4478711 1.4557447\n", - " 2.0694466 ]\n", - "param grad: rnn.rnn_stacks.2.bw_cell.weight_hh: shape: [1024, 1024] stop_grad: False grad: None\n", - "param grad: rnn.rnn_stacks.2.bw_cell.bias_hh: shape: [1024] stop_grad: False grad: None\n", - "param grad: fc.weight: shape: [2048, 4299] stop_grad: False grad: [[ 1.4382483e-02 2.0160766e-02 1.2322801e-02 ... 1.0075266e-02\n", - " 7.4421698e-03 -2.3925617e+01]\n", - " [ 3.7887424e-02 5.7105277e-02 2.8803380e-02 ... 2.4820438e-02\n", - " 1.8560058e-02 -5.0687141e+01]\n", - " [ 4.5566272e-02 5.4415584e-02 3.2858539e-02 ... 3.2725763e-02\n", - " 2.1536341e-02 -6.1036335e+01]\n", - " ...\n", - " [ 2.8015019e-02 3.5967816e-02 2.3228688e-02 ... 2.1284629e-02\n", - " 1.3860047e-02 -5.2543671e+01]\n", - " [ 2.8445240e-02 4.2448867e-02 2.7125146e-02 ... 2.2253662e-02\n", - " 1.7470375e-02 -4.3619675e+01]\n", - " [ 4.7438074e-02 5.8287360e-02 3.4546286e-02 ... 3.0827176e-02\n", - " 2.2168703e-02 -6.7901680e+01]]\n", - "param grad: fc.bias: shape: [4299] stop_grad: False grad: [ 8.8967547e-02 1.0697905e-01 6.5251388e-02 ... 6.1503030e-02\n", - " 4.3404289e-02 -1.3512518e+02]\n" - ] - } - ], - "source": [ - "loss.backward(retain_graph=False)\n", - "for n, p in dp_model.named_parameters():\n", - " print(\n", - " f\"param grad: {n}: shape: {p.shape} stop_grad: {p.stop_gradient} grad: {p.grad}\")" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "selected-crazy", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[1.]\n" - ] - } - ], - "source": [ - "print(loss.grad)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bottom-engineer", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "stuffed-yeast", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} \ No newline at end of file diff --git a/.notebook/u2_confermer_model_wenet.ipynb b/.notebook/u2_confermer_model_wenet.ipynb deleted file mode 100644 index 4f2c9632f..000000000 --- a/.notebook/u2_confermer_model_wenet.ipynb +++ /dev/null @@ -1,4608 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "choice-grade", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/workspace/DeepSpeech-2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "broke-broad", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n", - "register user softmax to paddle, remove this when fixed!\n", - "register user log_softmax to paddle, remove this when fixed!\n", - "register user sigmoid to paddle, remove this when fixed!\n", - "register user log_sigmoid to paddle, remove this when fixed!\n", - "register user relu to paddle, remove this when fixed!\n", - "override cat of paddle if exists or register, remove this when fixed!\n", - "override item of paddle.Tensor if exists or register, remove this when fixed!\n", - "override long of paddle.Tensor if exists or register, remove this when fixed!\n", - "override new_full of paddle.Tensor if exists or register, remove this when fixed!\n", - "override eq of paddle.Tensor if exists or register, remove this when fixed!\n", - "override eq of paddle if exists or register, remove this when fixed!\n", - "override contiguous of paddle.Tensor if exists or register, remove this when fixed!\n", - "override size of paddle.Tensor (`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!\n", - "register user view to paddle.Tensor, remove this when fixed!\n", - "register user view_as to paddle.Tensor, remove this when fixed!\n", - "register user masked_fill to paddle.Tensor, remove this when fixed!\n", - "register user masked_fill_ to paddle.Tensor, remove this when fixed!\n", - "register user fill_ to paddle.Tensor, remove this when fixed!\n", - "register user repeat to paddle.Tensor, remove this when fixed!\n", - "register user softmax to paddle.Tensor, remove this when fixed!\n", - "register user sigmoid to paddle.Tensor, remove this when fixed!\n", - "register user relu to paddle.Tensor, remove this when fixed!\n", - "register user type_as to paddle.Tensor, remove this when fixed!\n", - "register user to to paddle.Tensor, remove this when fixed!\n", - "register user float to paddle.Tensor, remove this when fixed!\n", - "register user tolist to paddle.Tensor, remove this when fixed!\n", - "register user glu to paddle.nn.functional, remove this when fixed!\n", - "override ctc_loss of paddle.nn.functional if exists, remove this when fixed!\n", - "register user Module to paddle.nn, remove this when fixed!\n", - "register user ModuleList to paddle.nn, remove this when fixed!\n", - "register user GLU to paddle.nn, remove this when fixed!\n", - "register user ConstantPad2d to paddle.nn, remove this when fixed!\n", - "register user export to paddle.jit, remove this when fixed!\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import paddle\n", - "from yacs.config import CfgNode as CN\n", - "\n", - "from deepspeech.models.u2 import U2Model\n", - "from deepspeech.utils.layer_tools import print_params\n", - "from deepspeech.utils.layer_tools import summary" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "permanent-summary", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n", - "[INFO 2021/04/20 03:32:21 u2.py:834] U2 Encoder type: conformer\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "encoder.embed.conv.0.weight | [256, 1, 3, 3] | 2304 | True\n", - "encoder.embed.conv.0.bias | [256] | 256 | True\n", - "encoder.embed.conv.2.weight | [256, 256, 3, 3] | 589824 | True\n", - "encoder.embed.conv.2.bias | [256] | 256 | True\n", - "encoder.embed.out.0.weight | [4864, 256] | 1245184 | True\n", - "encoder.embed.out.0.bias | [256] | 256 | True\n", - "encoder.after_norm.weight | [256] | 256 | True\n", - "encoder.after_norm.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.0.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.0.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.0.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.0.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.0.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.0.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.0.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.0.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.0.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.0.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.0.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.1.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.1.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.1.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.1.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.1.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.1.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.1.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.1.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.1.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.1.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.1.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.2.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.2.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.2.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.2.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.2.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.2.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.2.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.2.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.2.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.2.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.2.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.3.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.3.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.3.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.3.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.3.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.3.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.3.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.3.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.3.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.3.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.3.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.4.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.4.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.4.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.4.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.4.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.4.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.4.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.4.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.4.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.4.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.4.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.5.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.5.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.5.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.5.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.5.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.5.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.5.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.5.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.5.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.5.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.5.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.6.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.6.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.6.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.6.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.6.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.6.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.6.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.6.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.6.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.6.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.6.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.7.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.7.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.7.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.7.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.7.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.7.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.7.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.7.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.7.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.7.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.7.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.8.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.8.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.8.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.8.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.8.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.8.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.8.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.8.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.8.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.8.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.8.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.9.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.9.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.9.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.9.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.9.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.9.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.9.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.9.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.9.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.9.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.9.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.10.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.10.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.10.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.10.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.10.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.10.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.10.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.10.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.10.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.10.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.10.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.pos_bias_u | [4, 64] | 256 | True\n", - "encoder.encoders.11.self_attn.pos_bias_v | [4, 64] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_pos.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.11.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv1.bias | [512] | 512 | True\n", - "encoder.encoders.11.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840 | True\n", - "encoder.encoders.11.conv_module.depthwise_conv.bias | [256] | 256 | True\n", - "encoder.encoders.11.conv_module.norm.weight | [256] | 256 | True\n", - "encoder.encoders.11.conv_module.norm.bias | [256] | 256 | True\n", - "encoder.encoders.11.conv_module.norm._mean | [256] | 256 | False\n", - "encoder.encoders.11.conv_module.norm._variance | [256] | 256 | False\n", - "encoder.encoders.11.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv2.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm_ff.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm_ff.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm_mha.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm_mha.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm_ff_macaron.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm_ff_macaron.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm_conv.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm_conv.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm_final.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm_final.bias | [256] | 256 | True\n", - "encoder.encoders.11.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.11.concat_linear.bias | [256] | 256 | True\n", - "decoder.embed.0.weight | [4233, 256] | 1083648 | True\n", - "decoder.after_norm.weight | [256] | 256 | True\n", - "decoder.after_norm.bias | [256] | 256 | True\n", - "decoder.output_layer.weight | [256, 4233] | 1083648 | True\n", - "decoder.output_layer.bias | [4233] | 4233 | True\n", - "decoder.decoders.0.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.0.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.0.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.0.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.0.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.0.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.0.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.1.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.1.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.1.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.1.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.1.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.1.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.2.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.2.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.2.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.2.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.2.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.2.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.3.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.3.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.3.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.3.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.3.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.3.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.4.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.4.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.4.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.4.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.4.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.4.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.5.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.5.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.5.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.5.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.5.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.5.concat_linear2.bias | [256] | 256 | True\n", - "ctc.ctc_lo.weight | [256, 4233] | 1083648 | True\n", - "ctc.ctc_lo.bias | [4233] | 4233 | True\n", - "Total parameters: 687.0, 49355282.0 elements.\n" - ] - } - ], - "source": [ - "conf_str='examples/aishell/s1/conf/conformer.yaml'\n", - "cfg = CN().load_cfg(open(conf_str))\n", - "cfg.model.input_dim = 80\n", - "cfg.model.output_dim = 4233\n", - "cfg.model.cmvn_file = \"/workspace/wenet/examples/aishell/s0/raw_wav/train/global_cmvn\"\n", - "cfg.model.cmvn_file_type = 'json'\n", - "cfg.freeze()\n", - "\n", - "model = U2Model(cfg.model)\n", - "print_params(model)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "sapphire-agent", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "encoder.global_cmvn.mean | [80] | 80\n", - "encoder.global_cmvn.istd | [80] | 80\n", - "encoder.embed.conv.0.weight | [256, 1, 3, 3] | 2304\n", - "encoder.embed.conv.0.bias | [256] | 256\n", - "encoder.embed.conv.2.weight | [256, 256, 3, 3] | 589824\n", - "encoder.embed.conv.2.bias | [256] | 256\n", - "encoder.embed.out.0.weight | [4864, 256] | 1245184\n", - "encoder.embed.out.0.bias | [256] | 256\n", - "encoder.after_norm.weight | [256] | 256\n", - "encoder.after_norm.bias | [256] | 256\n", - "encoder.encoders.0.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.0.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.0.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.0.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.0.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.0.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.0.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.0.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.0.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.0.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.0.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.0.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.0.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.0.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.0.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.0.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.0.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.0.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.0.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.0.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.0.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.0.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.0.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.0.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.0.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.0.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.0.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.0.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.0.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.0.norm_ff.weight | [256] | 256\n", - "encoder.encoders.0.norm_ff.bias | [256] | 256\n", - "encoder.encoders.0.norm_mha.weight | [256] | 256\n", - "encoder.encoders.0.norm_mha.bias | [256] | 256\n", - "encoder.encoders.0.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.0.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.0.norm_conv.weight | [256] | 256\n", - "encoder.encoders.0.norm_conv.bias | [256] | 256\n", - "encoder.encoders.0.norm_final.weight | [256] | 256\n", - "encoder.encoders.0.norm_final.bias | [256] | 256\n", - "encoder.encoders.0.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.0.concat_linear.bias | [256] | 256\n", - "encoder.encoders.1.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.1.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.1.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.1.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.1.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.1.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.1.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.1.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.1.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.1.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.1.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.1.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.1.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.1.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.1.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.1.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.1.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.1.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.1.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.1.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.1.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.1.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.1.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.1.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.1.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.1.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.1.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.1.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.1.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.1.norm_ff.weight | [256] | 256\n", - "encoder.encoders.1.norm_ff.bias | [256] | 256\n", - "encoder.encoders.1.norm_mha.weight | [256] | 256\n", - "encoder.encoders.1.norm_mha.bias | [256] | 256\n", - "encoder.encoders.1.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.1.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.1.norm_conv.weight | [256] | 256\n", - "encoder.encoders.1.norm_conv.bias | [256] | 256\n", - "encoder.encoders.1.norm_final.weight | [256] | 256\n", - "encoder.encoders.1.norm_final.bias | [256] | 256\n", - "encoder.encoders.1.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.1.concat_linear.bias | [256] | 256\n", - "encoder.encoders.2.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.2.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.2.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.2.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.2.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.2.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.2.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.2.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.2.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.2.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.2.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.2.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.2.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.2.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.2.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.2.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.2.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.2.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.2.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.2.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.2.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.2.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.2.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.2.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.2.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.2.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.2.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.2.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.2.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.2.norm_ff.weight | [256] | 256\n", - "encoder.encoders.2.norm_ff.bias | [256] | 256\n", - "encoder.encoders.2.norm_mha.weight | [256] | 256\n", - "encoder.encoders.2.norm_mha.bias | [256] | 256\n", - "encoder.encoders.2.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.2.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.2.norm_conv.weight | [256] | 256\n", - "encoder.encoders.2.norm_conv.bias | [256] | 256\n", - "encoder.encoders.2.norm_final.weight | [256] | 256\n", - "encoder.encoders.2.norm_final.bias | [256] | 256\n", - "encoder.encoders.2.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.2.concat_linear.bias | [256] | 256\n", - "encoder.encoders.3.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.3.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.3.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.3.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.3.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.3.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.3.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.3.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.3.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.3.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.3.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.3.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.3.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.3.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.3.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.3.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.3.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.3.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.3.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.3.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.3.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.3.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.3.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.3.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.3.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.3.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.3.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.3.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.3.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.3.norm_ff.weight | [256] | 256\n", - "encoder.encoders.3.norm_ff.bias | [256] | 256\n", - "encoder.encoders.3.norm_mha.weight | [256] | 256\n", - "encoder.encoders.3.norm_mha.bias | [256] | 256\n", - "encoder.encoders.3.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.3.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.3.norm_conv.weight | [256] | 256\n", - "encoder.encoders.3.norm_conv.bias | [256] | 256\n", - "encoder.encoders.3.norm_final.weight | [256] | 256\n", - "encoder.encoders.3.norm_final.bias | [256] | 256\n", - "encoder.encoders.3.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.3.concat_linear.bias | [256] | 256\n", - "encoder.encoders.4.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.4.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.4.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.4.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.4.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.4.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.4.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.4.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.4.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.4.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.4.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.4.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.4.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.4.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.4.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.4.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.4.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.4.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.4.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.4.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.4.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.4.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.4.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.4.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.4.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.4.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.4.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.4.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.4.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.4.norm_ff.weight | [256] | 256\n", - "encoder.encoders.4.norm_ff.bias | [256] | 256\n", - "encoder.encoders.4.norm_mha.weight | [256] | 256\n", - "encoder.encoders.4.norm_mha.bias | [256] | 256\n", - "encoder.encoders.4.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.4.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.4.norm_conv.weight | [256] | 256\n", - "encoder.encoders.4.norm_conv.bias | [256] | 256\n", - "encoder.encoders.4.norm_final.weight | [256] | 256\n", - "encoder.encoders.4.norm_final.bias | [256] | 256\n", - "encoder.encoders.4.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.4.concat_linear.bias | [256] | 256\n", - "encoder.encoders.5.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.5.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.5.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.5.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.5.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.5.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.5.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.5.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.5.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.5.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.5.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.5.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.5.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.5.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.5.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.5.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.5.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.5.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.5.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.5.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.5.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.5.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.5.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.5.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.5.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.5.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.5.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.5.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.5.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.5.norm_ff.weight | [256] | 256\n", - "encoder.encoders.5.norm_ff.bias | [256] | 256\n", - "encoder.encoders.5.norm_mha.weight | [256] | 256\n", - "encoder.encoders.5.norm_mha.bias | [256] | 256\n", - "encoder.encoders.5.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.5.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.5.norm_conv.weight | [256] | 256\n", - "encoder.encoders.5.norm_conv.bias | [256] | 256\n", - "encoder.encoders.5.norm_final.weight | [256] | 256\n", - "encoder.encoders.5.norm_final.bias | [256] | 256\n", - "encoder.encoders.5.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.5.concat_linear.bias | [256] | 256\n", - "encoder.encoders.6.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.6.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.6.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.6.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.6.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.6.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.6.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.6.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.6.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.6.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.6.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.6.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.6.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.6.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.6.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.6.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.6.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.6.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.6.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.6.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.6.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.6.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.6.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.6.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.6.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.6.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.6.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.6.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.6.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.6.norm_ff.weight | [256] | 256\n", - "encoder.encoders.6.norm_ff.bias | [256] | 256\n", - "encoder.encoders.6.norm_mha.weight | [256] | 256\n", - "encoder.encoders.6.norm_mha.bias | [256] | 256\n", - "encoder.encoders.6.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.6.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.6.norm_conv.weight | [256] | 256\n", - "encoder.encoders.6.norm_conv.bias | [256] | 256\n", - "encoder.encoders.6.norm_final.weight | [256] | 256\n", - "encoder.encoders.6.norm_final.bias | [256] | 256\n", - "encoder.encoders.6.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.6.concat_linear.bias | [256] | 256\n", - "encoder.encoders.7.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.7.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.7.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.7.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.7.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.7.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.7.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.7.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.7.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.7.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.7.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.7.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.7.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.7.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.7.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.7.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.7.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.7.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.7.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.7.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.7.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.7.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.7.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.7.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.7.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.7.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.7.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.7.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.7.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.7.norm_ff.weight | [256] | 256\n", - "encoder.encoders.7.norm_ff.bias | [256] | 256\n", - "encoder.encoders.7.norm_mha.weight | [256] | 256\n", - "encoder.encoders.7.norm_mha.bias | [256] | 256\n", - "encoder.encoders.7.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.7.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.7.norm_conv.weight | [256] | 256\n", - "encoder.encoders.7.norm_conv.bias | [256] | 256\n", - "encoder.encoders.7.norm_final.weight | [256] | 256\n", - "encoder.encoders.7.norm_final.bias | [256] | 256\n", - "encoder.encoders.7.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.7.concat_linear.bias | [256] | 256\n", - "encoder.encoders.8.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.8.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.8.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.8.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.8.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.8.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.8.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.8.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.8.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.8.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.8.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.8.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.8.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.8.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.8.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.8.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.8.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.8.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.8.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.8.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.8.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.8.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.8.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.8.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.8.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.8.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.8.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.8.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.8.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.8.norm_ff.weight | [256] | 256\n", - "encoder.encoders.8.norm_ff.bias | [256] | 256\n", - "encoder.encoders.8.norm_mha.weight | [256] | 256\n", - "encoder.encoders.8.norm_mha.bias | [256] | 256\n", - "encoder.encoders.8.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.8.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.8.norm_conv.weight | [256] | 256\n", - "encoder.encoders.8.norm_conv.bias | [256] | 256\n", - "encoder.encoders.8.norm_final.weight | [256] | 256\n", - "encoder.encoders.8.norm_final.bias | [256] | 256\n", - "encoder.encoders.8.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.8.concat_linear.bias | [256] | 256\n", - "encoder.encoders.9.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.9.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.9.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.9.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.9.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.9.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.9.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.9.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.9.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.9.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.9.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.9.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.9.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.9.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.9.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.9.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.9.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.9.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.9.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.9.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.9.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.9.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.9.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.9.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.9.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.9.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.9.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.9.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.9.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.9.norm_ff.weight | [256] | 256\n", - "encoder.encoders.9.norm_ff.bias | [256] | 256\n", - "encoder.encoders.9.norm_mha.weight | [256] | 256\n", - "encoder.encoders.9.norm_mha.bias | [256] | 256\n", - "encoder.encoders.9.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.9.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.9.norm_conv.weight | [256] | 256\n", - "encoder.encoders.9.norm_conv.bias | [256] | 256\n", - "encoder.encoders.9.norm_final.weight | [256] | 256\n", - "encoder.encoders.9.norm_final.bias | [256] | 256\n", - "encoder.encoders.9.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.9.concat_linear.bias | [256] | 256\n", - "encoder.encoders.10.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.10.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.10.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.10.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.10.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.10.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.10.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.10.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.10.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.10.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.10.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.10.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.10.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.10.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.10.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.10.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.10.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.10.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.10.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.10.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.10.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.10.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.10.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.10.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.10.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.10.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.10.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.10.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.10.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.10.norm_ff.weight | [256] | 256\n", - "encoder.encoders.10.norm_ff.bias | [256] | 256\n", - "encoder.encoders.10.norm_mha.weight | [256] | 256\n", - "encoder.encoders.10.norm_mha.bias | [256] | 256\n", - "encoder.encoders.10.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.10.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.10.norm_conv.weight | [256] | 256\n", - "encoder.encoders.10.norm_conv.bias | [256] | 256\n", - "encoder.encoders.10.norm_final.weight | [256] | 256\n", - "encoder.encoders.10.norm_final.bias | [256] | 256\n", - "encoder.encoders.10.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.10.concat_linear.bias | [256] | 256\n", - "encoder.encoders.11.self_attn.pos_bias_u | [4, 64] | 256\n", - "encoder.encoders.11.self_attn.pos_bias_v | [4, 64] | 256\n", - "encoder.encoders.11.self_attn.linear_q.weight | [256, 256] | 65536\n", - "encoder.encoders.11.self_attn.linear_q.bias | [256] | 256\n", - "encoder.encoders.11.self_attn.linear_k.weight | [256, 256] | 65536\n", - "encoder.encoders.11.self_attn.linear_k.bias | [256] | 256\n", - "encoder.encoders.11.self_attn.linear_v.weight | [256, 256] | 65536\n", - "encoder.encoders.11.self_attn.linear_v.bias | [256] | 256\n", - "encoder.encoders.11.self_attn.linear_out.weight | [256, 256] | 65536\n", - "encoder.encoders.11.self_attn.linear_out.bias | [256] | 256\n", - "encoder.encoders.11.self_attn.linear_pos.weight | [256, 256] | 65536\n", - "encoder.encoders.11.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.11.feed_forward.w_1.bias | [2048] | 2048\n", - "encoder.encoders.11.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.11.feed_forward.w_2.bias | [256] | 256\n", - "encoder.encoders.11.feed_forward_macaron.w_1.weight | [256, 2048] | 524288\n", - "encoder.encoders.11.feed_forward_macaron.w_1.bias | [2048] | 2048\n", - "encoder.encoders.11.feed_forward_macaron.w_2.weight | [2048, 256] | 524288\n", - "encoder.encoders.11.feed_forward_macaron.w_2.bias | [256] | 256\n", - "encoder.encoders.11.conv_module.pointwise_conv1.weight | [512, 256, 1] | 131072\n", - "encoder.encoders.11.conv_module.pointwise_conv1.bias | [512] | 512\n", - "encoder.encoders.11.conv_module.depthwise_conv.weight | [256, 1, 15] | 3840\n", - "encoder.encoders.11.conv_module.depthwise_conv.bias | [256] | 256\n", - "encoder.encoders.11.conv_module.norm.weight | [256] | 256\n", - "encoder.encoders.11.conv_module.norm.bias | [256] | 256\n", - "encoder.encoders.11.conv_module.norm._mean | [256] | 256\n", - "encoder.encoders.11.conv_module.norm._variance | [256] | 256\n", - "encoder.encoders.11.conv_module.pointwise_conv2.weight | [256, 256, 1] | 65536\n", - "encoder.encoders.11.conv_module.pointwise_conv2.bias | [256] | 256\n", - "encoder.encoders.11.norm_ff.weight | [256] | 256\n", - "encoder.encoders.11.norm_ff.bias | [256] | 256\n", - "encoder.encoders.11.norm_mha.weight | [256] | 256\n", - "encoder.encoders.11.norm_mha.bias | [256] | 256\n", - "encoder.encoders.11.norm_ff_macaron.weight | [256] | 256\n", - "encoder.encoders.11.norm_ff_macaron.bias | [256] | 256\n", - "encoder.encoders.11.norm_conv.weight | [256] | 256\n", - "encoder.encoders.11.norm_conv.bias | [256] | 256\n", - "encoder.encoders.11.norm_final.weight | [256] | 256\n", - "encoder.encoders.11.norm_final.bias | [256] | 256\n", - "encoder.encoders.11.concat_linear.weight | [512, 256] | 131072\n", - "encoder.encoders.11.concat_linear.bias | [256] | 256\n", - "decoder.embed.0.weight | [4233, 256] | 1083648\n", - "decoder.after_norm.weight | [256] | 256\n", - "decoder.after_norm.bias | [256] | 256\n", - "decoder.output_layer.weight | [256, 4233] | 1083648\n", - "decoder.output_layer.bias | [4233] | 4233\n", - "decoder.decoders.0.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.0.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.0.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.0.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.0.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.0.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.0.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.0.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.0.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.0.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.0.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.0.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.0.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.0.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.0.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.0.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.0.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.0.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.0.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.0.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.0.norm1.weight | [256] | 256\n", - "decoder.decoders.0.norm1.bias | [256] | 256\n", - "decoder.decoders.0.norm2.weight | [256] | 256\n", - "decoder.decoders.0.norm2.bias | [256] | 256\n", - "decoder.decoders.0.norm3.weight | [256] | 256\n", - "decoder.decoders.0.norm3.bias | [256] | 256\n", - "decoder.decoders.0.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.0.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.0.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.0.concat_linear2.bias | [256] | 256\n", - "decoder.decoders.1.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.1.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.1.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.1.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.1.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.1.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.1.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.1.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.1.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.1.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.1.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.1.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.1.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.1.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.1.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.1.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.1.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.1.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.1.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.1.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.1.norm1.weight | [256] | 256\n", - "decoder.decoders.1.norm1.bias | [256] | 256\n", - "decoder.decoders.1.norm2.weight | [256] | 256\n", - "decoder.decoders.1.norm2.bias | [256] | 256\n", - "decoder.decoders.1.norm3.weight | [256] | 256\n", - "decoder.decoders.1.norm3.bias | [256] | 256\n", - "decoder.decoders.1.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.1.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.1.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.1.concat_linear2.bias | [256] | 256\n", - "decoder.decoders.2.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.2.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.2.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.2.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.2.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.2.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.2.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.2.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.2.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.2.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.2.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.2.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.2.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.2.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.2.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.2.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.2.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.2.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.2.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.2.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.2.norm1.weight | [256] | 256\n", - "decoder.decoders.2.norm1.bias | [256] | 256\n", - "decoder.decoders.2.norm2.weight | [256] | 256\n", - "decoder.decoders.2.norm2.bias | [256] | 256\n", - "decoder.decoders.2.norm3.weight | [256] | 256\n", - "decoder.decoders.2.norm3.bias | [256] | 256\n", - "decoder.decoders.2.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.2.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.2.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.2.concat_linear2.bias | [256] | 256\n", - "decoder.decoders.3.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.3.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.3.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.3.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.3.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.3.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.3.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.3.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.3.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.3.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.3.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.3.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.3.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.3.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.3.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.3.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.3.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.3.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.3.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.3.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.3.norm1.weight | [256] | 256\n", - "decoder.decoders.3.norm1.bias | [256] | 256\n", - "decoder.decoders.3.norm2.weight | [256] | 256\n", - "decoder.decoders.3.norm2.bias | [256] | 256\n", - "decoder.decoders.3.norm3.weight | [256] | 256\n", - "decoder.decoders.3.norm3.bias | [256] | 256\n", - "decoder.decoders.3.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.3.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.3.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.3.concat_linear2.bias | [256] | 256\n", - "decoder.decoders.4.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.4.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.4.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.4.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.4.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.4.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.4.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.4.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.4.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.4.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.4.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.4.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.4.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.4.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.4.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.4.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.4.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.4.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.4.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.4.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.4.norm1.weight | [256] | 256\n", - "decoder.decoders.4.norm1.bias | [256] | 256\n", - "decoder.decoders.4.norm2.weight | [256] | 256\n", - "decoder.decoders.4.norm2.bias | [256] | 256\n", - "decoder.decoders.4.norm3.weight | [256] | 256\n", - "decoder.decoders.4.norm3.bias | [256] | 256\n", - "decoder.decoders.4.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.4.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.4.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.4.concat_linear2.bias | [256] | 256\n", - "decoder.decoders.5.self_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.5.self_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.5.self_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.5.self_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.5.self_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.5.self_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.5.self_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.5.self_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.5.src_attn.linear_q.weight | [256, 256] | 65536\n", - "decoder.decoders.5.src_attn.linear_q.bias | [256] | 256\n", - "decoder.decoders.5.src_attn.linear_k.weight | [256, 256] | 65536\n", - "decoder.decoders.5.src_attn.linear_k.bias | [256] | 256\n", - "decoder.decoders.5.src_attn.linear_v.weight | [256, 256] | 65536\n", - "decoder.decoders.5.src_attn.linear_v.bias | [256] | 256\n", - "decoder.decoders.5.src_attn.linear_out.weight | [256, 256] | 65536\n", - "decoder.decoders.5.src_attn.linear_out.bias | [256] | 256\n", - "decoder.decoders.5.feed_forward.w_1.weight | [256, 2048] | 524288\n", - "decoder.decoders.5.feed_forward.w_1.bias | [2048] | 2048\n", - "decoder.decoders.5.feed_forward.w_2.weight | [2048, 256] | 524288\n", - "decoder.decoders.5.feed_forward.w_2.bias | [256] | 256\n", - "decoder.decoders.5.norm1.weight | [256] | 256\n", - "decoder.decoders.5.norm1.bias | [256] | 256\n", - "decoder.decoders.5.norm2.weight | [256] | 256\n", - "decoder.decoders.5.norm2.bias | [256] | 256\n", - "decoder.decoders.5.norm3.weight | [256] | 256\n", - "decoder.decoders.5.norm3.bias | [256] | 256\n", - "decoder.decoders.5.concat_linear1.weight | [512, 256] | 131072\n", - "decoder.decoders.5.concat_linear1.bias | [256] | 256\n", - "decoder.decoders.5.concat_linear2.weight | [512, 256] | 131072\n", - "decoder.decoders.5.concat_linear2.bias | [256] | 256\n", - "ctc.ctc_lo.weight | [256, 4233] | 1083648\n", - "ctc.ctc_lo.bias | [4233] | 4233\n", - "Total parameters: 689, 49355442 elements.\n" - ] - } - ], - "source": [ - "summary(model)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ruled-invitation", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "U2Model(\n", - " (encoder): ConformerEncoder(\n", - " (global_cmvn): GlobalCMVN()\n", - " (embed): Conv2dSubsampling4(\n", - " (pos_enc): RelPositionalEncoding(\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " )\n", - " (conv): Sequential(\n", - " (0): Conv2D(1, 256, kernel_size=[3, 3], stride=[2, 2], data_format=NCHW)\n", - " (1): ReLU()\n", - " (2): Conv2D(256, 256, kernel_size=[3, 3], stride=[2, 2], data_format=NCHW)\n", - " (3): ReLU()\n", - " )\n", - " (out): Sequential(\n", - " (0): Linear(in_features=4864, out_features=256, dtype=float32)\n", - " )\n", - " )\n", - " (after_norm): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (encoders): LayerList(\n", - " (0): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (1): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (2): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (3): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (4): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (5): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (6): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (7): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (8): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (9): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (10): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (11): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " (linear_pos): Linear(in_features=256, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1D(256, 512, kernel_size=[1], data_format=NCL)\n", - " (depthwise_conv): Conv1D(256, 256, kernel_size=[15], padding=7, groups=256, data_format=NCL)\n", - " (norm): BatchNorm1D(num_features=256, momentum=0.9, epsilon=1e-05)\n", - " (pointwise_conv2): Conv1D(256, 256, kernel_size=[1], data_format=NCL)\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_mha): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_ff_macaron): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_conv): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm_final): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " )\n", - " )\n", - " (decoder): TransformerDecoder(\n", - " (embed): Sequential(\n", - " (0): Embedding(4233, 256, sparse=False)\n", - " (1): PositionalEncoding(\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " )\n", - " )\n", - " (after_norm): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (output_layer): Linear(in_features=256, out_features=4233, dtype=float32)\n", - " (decoders): LayerList(\n", - " (0): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (1): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (2): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (3): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (4): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " (5): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_k): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_v): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (linear_out): Linear(in_features=256, out_features=256, dtype=float32)\n", - " (dropout): Dropout(p=0.0, axis=None, mode=upscale_in_train)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, dtype=float32)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (w_2): Linear(in_features=2048, out_features=256, dtype=float32)\n", - " )\n", - " (norm1): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm2): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (norm3): LayerNorm(normalized_shape=[256], epsilon=1e-12)\n", - " (dropout): Dropout(p=0.1, axis=None, mode=upscale_in_train)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, dtype=float32)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, dtype=float32)\n", - " )\n", - " )\n", - " )\n", - " (ctc): CTCDecoder(\n", - " (ctc_lo): Linear(in_features=256, out_features=4233, dtype=float32)\n", - " (criterion): CTCLoss(\n", - " (loss): CTCLoss()\n", - " )\n", - " )\n", - " (criterion_att): LabelSmoothingLoss(\n", - " (criterion): KLDivLoss()\n", - " )\n", - ")\n" - ] - } - ], - "source": [ - "print(model)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "fossil-means", - "metadata": {}, - "outputs": [], - "source": [ - "# load feat" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "fleet-despite", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "compute_cmvn_loader_test.ipynb encoder.npz\r\n", - "dataloader.ipynb hack_api_test.ipynb\r\n", - "dataloader_with_tokens_tokenids.ipynb jit_infer.ipynb\r\n", - "data.npz layer_norm_test.ipynb\r\n", - "decoder.npz Linear_test.ipynb\r\n", - "enc_0_ff_out.npz mask_and_masked_fill_test.ipynb\r\n", - "enc_0_norm_ff.npz model.npz\r\n", - "enc_0.npz position_embeding_check.ipynb\r\n", - "enc_0_selattn_out.npz python_test.ipynb\r\n", - "enc_2.npz train_test.ipynb\r\n", - "enc_all.npz u2_model.ipynb\r\n", - "enc_embed.npz\r\n" - ] - } - ], - "source": [ - "%ls .notebook" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "abroad-oracle", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['BAC009S0739W0246' 'BAC009S0727W0424' 'BAC009S0753W0412'\n", - " 'BAC009S0756W0206' 'BAC009S0740W0414' 'BAC009S0728W0426'\n", - " 'BAC009S0739W0214' 'BAC009S0753W0423' 'BAC009S0734W0201'\n", - " 'BAC009S0740W0427' 'BAC009S0730W0423' 'BAC009S0728W0367'\n", - " 'BAC009S0730W0418' 'BAC009S0727W0157' 'BAC009S0749W0409'\n", - " 'BAC009S0727W0418']\n", - "(16, 207, 80)\n", - "[[[ 8.994624 9.538309 9.191589 ... 10.507416 9.563305 8.256403 ]\n", - " [ 9.798841 10.405224 9.26511 ... 10.251211 9.543982 8.873768 ]\n", - " [10.6890745 10.395469 8.053548 ... 9.906749 10.064903 8.050915 ]\n", - " ...\n", - " [ 9.217986 9.65069 8.505259 ... 9.687183 8.742463 7.9865475]\n", - " [10.129122 9.935194 9.37982 ... 9.563894 9.825992 8.979543 ]\n", - " [ 9.095531 7.1338377 9.468001 ... 9.472748 9.021235 7.447914 ]]\n", - "\n", - " [[11.430976 10.671858 6.0841026 ... 9.382682 8.729745 7.5315614]\n", - " [ 9.731717 7.8104815 7.5714607 ... 10.043035 9.243595 7.3540792]\n", - " [10.65017 10.600604 8.467784 ... 9.281448 9.186885 8.070343 ]\n", - " ...\n", - " [ 9.096987 9.2637 8.075275 ... 8.431845 8.370505 8.002926 ]\n", - " [10.461651 10.147784 6.7693496 ... 9.779426 9.577453 8.080652 ]\n", - " [ 7.794432 5.621059 7.9750648 ... 9.997245 9.849678 8.031287 ]]\n", - "\n", - " [[ 7.3455667 7.896357 7.5795946 ... 11.631024 10.451254 9.123633 ]\n", - " [ 8.628678 8.4630575 7.499242 ... 12.415986 10.975749 8.9425745]\n", - " [ 9.831394 10.2812805 8.97241 ... 12.1386795 10.40175 9.005517 ]\n", - " ...\n", - " [ 7.089641 7.405548 6.8142557 ... 9.325196 9.273162 8.353427 ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]]\n", - "\n", - " ...\n", - "\n", - " [[10.933237 10.464394 7.7202725 ... 10.348816 9.302338 7.1553144]\n", - " [10.449866 9.907033 9.029272 ... 9.952465 9.414051 7.559279 ]\n", - " [10.487655 9.81259 9.895244 ... 9.58662 9.341254 7.7849016]\n", - " ...\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]]\n", - "\n", - " [[ 9.944384 9.585867 8.220328 ... 11.588647 11.045029 8.817075 ]\n", - " [ 7.678356 8.322397 7.533047 ... 11.055085 10.535685 9.27465 ]\n", - " [ 8.626197 9.675917 9.841045 ... 11.378827 10.922112 8.991444 ]\n", - " ...\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]]\n", - "\n", - " [[ 8.107938 7.759043 6.710301 ... 12.650573 11.466156 11.061517 ]\n", - " [11.380332 11.222007 8.658889 ... 12.810616 12.222216 11.689288 ]\n", - " [10.677676 9.920579 8.046089 ... 13.572894 12.5624075 11.155033 ]\n", - " ...\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]\n", - " [ 0. 0. 0. ... 0. 0. 0. ]]]\n", - "[207 207 205 205 203 203 198 197 195 188 186 186 185 180 166 163]\n", - "[[2995 3116 1209 565 -1 -1]\n", - " [ 236 1176 331 66 3925 4077]\n", - " [2693 524 234 1145 366 -1]\n", - " [3875 4211 3062 700 -1 -1]\n", - " [ 272 987 1134 494 2959 -1]\n", - " [1936 3715 120 2553 2695 2710]\n", - " [ 25 1149 3930 -1 -1 -1]\n", - " [1753 1778 1237 482 3925 110]\n", - " [3703 2 565 3827 -1 -1]\n", - " [1150 2734 10 2478 3490 -1]\n", - " [ 426 811 95 489 144 -1]\n", - " [2313 2006 489 975 -1 -1]\n", - " [3702 3414 205 1488 2966 1347]\n", - " [ 70 1741 702 1666 -1 -1]\n", - " [ 703 1778 1030 849 -1 -1]\n", - " [ 814 1674 115 3827 -1 -1]]\n", - "[4 6 5 4 5 6 3 6 4 5 5 4 6 4 4 4]\n" - ] - } - ], - "source": [ - "data = np.load('.notebook/data.npz', allow_pickle=True)\n", - "keys=data['keys']\n", - "feat=data['feat']\n", - "feat_len=data['feat_len']\n", - "text=data['text']\n", - "text_len=data['text_len']\n", - "print(keys)\n", - "print(feat.shape)\n", - "print(feat)\n", - "print(feat_len)\n", - "print(text)\n", - "print(text_len)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "false-instrument", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "arctic-proxy", - "metadata": {}, - "outputs": [], - "source": [ - "# ['BAC009S0739W0246', 'BAC009S0727W0424', 'BAC009S0753W0412', 'BAC009S0756W0206', 'BAC009S0740W0414', 'BAC009S0728W0426', 'BAC009S0739W0214', 'BAC009S0753W0423', 'BAC009S0734W0201', 'BAC009S0740W0427', 'BAC009S0730W0423', 'BAC009S0728W0367', 'BAC009S0730W0418', 'BAC009S0727W0157', 'BAC009S0749W0409', 'BAC009S0727W0418']\n", - "# torch.Size([16, 207, 80])\n", - "# tensor([[[ 8.9946, 9.5383, 9.1916, ..., 10.5074, 9.5633, 8.2564],\n", - "# [ 9.7988, 10.4052, 9.2651, ..., 10.2512, 9.5440, 8.8738],\n", - "# [10.6891, 10.3955, 8.0535, ..., 9.9067, 10.0649, 8.0509],\n", - "# ...,\n", - "# [ 9.2180, 9.6507, 8.5053, ..., 9.6872, 8.7425, 7.9865],\n", - "# [10.1291, 9.9352, 9.3798, ..., 9.5639, 9.8260, 8.9795],\n", - "# [ 9.0955, 7.1338, 9.4680, ..., 9.4727, 9.0212, 7.4479]],\n", - "\n", - "# [[11.4310, 10.6719, 6.0841, ..., 9.3827, 8.7297, 7.5316],\n", - "# [ 9.7317, 7.8105, 7.5715, ..., 10.0430, 9.2436, 7.3541],\n", - "# [10.6502, 10.6006, 8.4678, ..., 9.2814, 9.1869, 8.0703],\n", - "# ...,\n", - "# [ 9.0970, 9.2637, 8.0753, ..., 8.4318, 8.3705, 8.0029],\n", - "# [10.4617, 10.1478, 6.7693, ..., 9.7794, 9.5775, 8.0807],\n", - "# [ 7.7944, 5.6211, 7.9751, ..., 9.9972, 9.8497, 8.0313]],\n", - "\n", - "# [[ 7.3456, 7.8964, 7.5796, ..., 11.6310, 10.4513, 9.1236],\n", - "# [ 8.6287, 8.4631, 7.4992, ..., 12.4160, 10.9757, 8.9426],\n", - "# [ 9.8314, 10.2813, 8.9724, ..., 12.1387, 10.4017, 9.0055],\n", - "# ...,\n", - "# [ 7.0896, 7.4055, 6.8143, ..., 9.3252, 9.2732, 8.3534],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - "# ...,\n", - "\n", - "# [[10.9332, 10.4644, 7.7203, ..., 10.3488, 9.3023, 7.1553],\n", - "# [10.4499, 9.9070, 9.0293, ..., 9.9525, 9.4141, 7.5593],\n", - "# [10.4877, 9.8126, 9.8952, ..., 9.5866, 9.3413, 7.7849],\n", - "# ...,\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - "# [[ 9.9444, 9.5859, 8.2203, ..., 11.5886, 11.0450, 8.8171],\n", - "# [ 7.6784, 8.3224, 7.5330, ..., 11.0551, 10.5357, 9.2746],\n", - "# [ 8.6262, 9.6759, 9.8410, ..., 11.3788, 10.9221, 8.9914],\n", - "# ...,\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - "# [[ 8.1079, 7.7590, 6.7103, ..., 12.6506, 11.4662, 11.0615],\n", - "# [11.3803, 11.2220, 8.6589, ..., 12.8106, 12.2222, 11.6893],\n", - "# [10.6777, 9.9206, 8.0461, ..., 13.5729, 12.5624, 11.1550],\n", - "# ...,\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - "# [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]]])\n", - "# tensor([207, 207, 205, 205, 203, 203, 198, 197, 195, 188, 186, 186, 185, 180,\n", - "# 166, 163], dtype=torch.int32)\n", - "# tensor([[2995, 3116, 1209, 565, -1, -1],\n", - "# [ 236, 1176, 331, 66, 3925, 4077],\n", - "# [2693, 524, 234, 1145, 366, -1],\n", - "# [3875, 4211, 3062, 700, -1, -1],\n", - "# [ 272, 987, 1134, 494, 2959, -1],\n", - "# [1936, 3715, 120, 2553, 2695, 2710],\n", - "# [ 25, 1149, 3930, -1, -1, -1],\n", - "# [1753, 1778, 1237, 482, 3925, 110],\n", - "# [3703, 2, 565, 3827, -1, -1],\n", - "# [1150, 2734, 10, 2478, 3490, -1],\n", - "# [ 426, 811, 95, 489, 144, -1],\n", - "# [2313, 2006, 489, 975, -1, -1],\n", - "# [3702, 3414, 205, 1488, 2966, 1347],\n", - "# [ 70, 1741, 702, 1666, -1, -1],\n", - "# [ 703, 1778, 1030, 849, -1, -1],\n", - "# [ 814, 1674, 115, 3827, -1, -1]], dtype=torch.int32)\n", - "# tensor([4, 6, 5, 4, 5, 6, 3, 6, 4, 5, 5, 4, 6, 4, 4, 4], dtype=torch.int32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "seasonal-switch", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "defined-brooks", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "compute_cmvn_loader_test.ipynb\t encoder.npz\r\n", - "dataloader.ipynb\t\t hack_api_test.ipynb\r\n", - "dataloader_with_tokens_tokenids.ipynb jit_infer.ipynb\r\n", - "data.npz\t\t\t layer_norm_test.ipynb\r\n", - "decoder.npz\t\t\t Linear_test.ipynb\r\n", - "enc_0_ff_out.npz\t\t mask_and_masked_fill_test.ipynb\r\n", - "enc_0_norm_ff.npz\t\t model.npz\r\n", - "enc_0.npz\t\t\t position_embeding_check.ipynb\r\n", - "enc_0_selattn_out.npz\t\t python_test.ipynb\r\n", - "enc_2.npz\t\t\t train_test.ipynb\r\n", - "enc_all.npz\t\t\t u2_model.ipynb\r\n", - "enc_embed.npz\r\n" - ] - } - ], - "source": [ - "# load model param\n", - "!ls .notebook\n", - "data = np.load('.notebook/model.npz', allow_pickle=True)\n", - "state_dict = data['state'].item()\n", - "\n", - "for key, _ in model.state_dict().items():\n", - " if key not in state_dict:\n", - " print(f\"{key} not find.\")\n", - "\n", - "model.set_state_dict(state_dict)\n", - "\n", - "now_state_dict = model.state_dict()\n", - "for key, value in now_state_dict.items():\n", - " if not np.allclose(value.numpy(), state_dict[key]):\n", - " print(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "exempt-viewer", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "confident-piano", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/framework.py:687: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " elif dtype == np.bool:\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [142.48880005]) Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [41.84146118]) Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [377.33258057])\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py:238: UserWarning: The dtype of left and right variables are not the same, left dtype is VarType.FP32, but right dtype is VarType.INT32, the right dtype will convert to VarType.FP32\n", - " format(lhs_dtype, rhs_dtype, lhs_dtype))\n" - ] - } - ], - "source": [ - "# compute loss\n", - "import paddle\n", - "feat=paddle.to_tensor(feat)\n", - "feat_len=paddle.to_tensor(feat_len, dtype='int64')\n", - "text=paddle.to_tensor(text, dtype='int64')\n", - "text_len=paddle.to_tensor(text_len, dtype='int64')\n", - "\n", - "model.eval()\n", - "total_loss, attention_loss, ctc_loss = model(feat, feat_len,\n", - " text, text_len)\n", - "print(total_loss, attention_loss, ctc_loss )" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "better-senator", - "metadata": {}, - "outputs": [], - "source": [ - "# tensor(142.4888, device='cuda:0', grad_fn=) \n", - "# tensor(41.8415, device='cuda:0', grad_fn=) \n", - "# tensor(377.3326, device='cuda:0', grad_fn=)\n", - "# 142.4888 41.84146 377.33258" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "related-banking", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "olympic-problem", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[16, 51, 256]\n", - "[16, 1, 51]\n", - "Tensor(shape=[51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[-0.70194179, 0.56254166, 0.68803459, ..., 1.12373221, 0.78039235, 1.13693869],\n", - " [-0.77877808, 0.39126658, 0.71887815, ..., 1.25188220, 0.88616788, 1.31734526],\n", - " [-0.95908946, 0.63460249, 0.87671334, ..., 0.98183727, 0.74401081, 1.29032660],\n", - " ...,\n", - " [-1.07322502, 0.67236906, 0.92303109, ..., 0.90754563, 0.81767166, 1.32396567],\n", - " [-1.16541159, 0.68199694, 0.69394493, ..., 1.22383487, 0.80282891, 1.45065081],\n", - " [-1.27320945, 0.71458030, 0.75819558, ..., 0.94154912, 0.87748396, 1.26230514]])\n" - ] - } - ], - "source": [ - "# ecnoder\n", - "encoder_out, encoder_mask = model.encoder(feat, feat_len)\n", - "print(encoder_out.shape)\n", - "print(encoder_mask.shape)\n", - "print(encoder_out[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "shaped-alaska", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "deepspeech examples README_cn.md\tsetup.sh tools\r\n", - "docs\t LICENSE README.md\t\ttests\t utils\r\n", - "env.sh\t log requirements.txt\tthird_party\r\n" - ] - } - ], - "source": [ - "!ls\n", - "data = np.load('.notebook/encoder.npz', allow_pickle=True)\n", - "torch_mask = data['mask']\n", - "torch_encoder_out = data['out']" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "federal-rover", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "None\n" - ] - } - ], - "source": [ - "print(np.testing.assert_equal(torch_mask, encoder_mask.numpy()))" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "regulated-interstate", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "False\n", - "[[-0.7019424 0.56254166 0.6880345 ... 1.1237322 0.78039217\n", - " 1.1369387 ]\n", - " [-0.778778 0.39126638 0.7188779 ... 1.2518823 0.8861681\n", - " 1.3173454 ]\n", - " [-0.9590891 0.6346026 0.87671363 ... 0.9818373 0.74401116\n", - " 1.2903274 ]\n", - " ...\n", - " [-1.0732253 0.6723689 0.9230311 ... 0.9075457 0.8176713\n", - " 1.3239657 ]\n", - " [-1.165412 0.6819976 0.69394535 ... 1.2238353 0.80282927\n", - " 1.4506509 ]\n", - " [-1.2732087 0.71458083 0.7581961 ... 0.9415482 0.877484\n", - " 1.2623053 ]]\n", - "----\n", - "[[-0.7019418 0.56254166 0.6880346 ... 1.1237322 0.78039235\n", - " 1.1369387 ]\n", - " [-0.7787781 0.39126658 0.71887815 ... 1.2518822 0.8861679\n", - " 1.3173453 ]\n", - " [-0.95908946 0.6346025 0.87671334 ... 0.9818373 0.7440108\n", - " 1.2903266 ]\n", - " ...\n", - " [-1.073225 0.67236906 0.9230311 ... 0.9075456 0.81767166\n", - " 1.3239657 ]\n", - " [-1.1654116 0.68199694 0.69394493 ... 1.2238349 0.8028289\n", - " 1.4506508 ]\n", - " [-1.2732095 0.7145803 0.7581956 ... 0.9415491 0.87748396\n", - " 1.2623051 ]]\n", - "True\n", - "False\n" - ] - } - ], - "source": [ - "print(np.allclose(torch_encoder_out, encoder_out.numpy()))\n", - "print(torch_encoder_out[0])\n", - "print(\"----\")\n", - "print(encoder_out.numpy()[0])\n", - "print(np.allclose(torch_encoder_out, encoder_out.numpy(), atol=1e-5, rtol=1e-6))\n", - "print(np.allclose(torch_encoder_out, encoder_out.numpy(), atol=1e-6, rtol=1e-6))" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "proof-scheduling", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [377.33258057])\n", - "[1.]\n", - "[[ 3.16902876e+00 -1.51763987e-02 4.91095744e-02 ... -2.47971853e-03\n", - " -5.93360700e-03 -7.26609165e-03]\n", - " [-1.74184477e+00 7.75874173e-03 -4.49434854e-02 ... 9.92412097e-04\n", - " 2.46337592e-03 2.31892057e-03]\n", - " [-2.33343339e+00 1.30475955e-02 -2.66557075e-02 ... 2.27532350e-03\n", - " 5.76924905e-03 7.48788286e-03]\n", - " ...\n", - " [-4.30358458e+00 2.46054661e-02 -9.00950655e-02 ... 4.43156436e-03\n", - " 1.16122244e-02 1.44715561e-02]\n", - " [-3.36921120e+00 1.73153952e-02 -6.36872873e-02 ... 3.28363618e-03\n", - " 8.58010259e-03 1.07794888e-02]\n", - " [-6.62045336e+00 3.49955931e-02 -1.23962618e-01 ... 6.36671018e-03\n", - " 1.60814095e-02 2.03891303e-02]]\n", - "[-4.3777819e+00 2.3245810e-02 -9.3339294e-02 ... 4.2569344e-03\n", - " 1.0919910e-02 1.3787797e-02]\n" - ] - } - ], - "source": [ - "from paddle.nn import functional as F\n", - "def ctc_loss(logits,\n", - " labels,\n", - " input_lengths,\n", - " label_lengths,\n", - " blank=0,\n", - " reduction='mean',\n", - " norm_by_times=False):\n", - " loss_out = paddle.fluid.layers.warpctc(logits, labels, blank, norm_by_times,\n", - " input_lengths, label_lengths)\n", - " loss_out = paddle.fluid.layers.squeeze(loss_out, [-1])\n", - " assert reduction in ['mean', 'sum', 'none']\n", - " if reduction == 'mean':\n", - " loss_out = paddle.mean(loss_out / label_lengths)\n", - " elif reduction == 'sum':\n", - " loss_out = paddle.sum(loss_out)\n", - " return loss_out\n", - "\n", - "F.ctc_loss = ctc_loss\n", - "\n", - "torch_mask_t = paddle.to_tensor(torch_mask, dtype='int64')\n", - "encoder_out_lens = torch_mask_t.squeeze(1).sum(1)\n", - "loss_ctc = model.ctc(paddle.to_tensor(torch_encoder_out), encoder_out_lens, text, text_len)\n", - "print(loss_ctc)\n", - "loss_ctc.backward()\n", - "print(loss_ctc.grad)\n", - "print(model.ctc.ctc_lo.weight.grad)\n", - "print(model.ctc.ctc_lo.bias.grad)\n", - "\n", - "\n", - "# tensor(377.3326, device='cuda:0', grad_fn=)\n", - "# None\n", - "# [[ 3.16902351e+00 -1.51765049e-02 4.91097234e-02 ... -2.47973716e-03\n", - "# -5.93366381e-03 -7.26613170e-03]\n", - "# [-1.74185038e+00 7.75875803e-03 -4.49435972e-02 ... 9.92415240e-04\n", - "# 2.46338220e-03 2.31891591e-03]\n", - "# [-2.33343077e+00 1.30476682e-02 -2.66557615e-02 ... 2.27533933e-03\n", - "# 5.76929189e-03 7.48792710e-03]\n", - "# ...\n", - "# [-4.30356789e+00 2.46056803e-02 -9.00955945e-02 ... 4.43160534e-03\n", - "# 1.16123557e-02 1.44716976e-02]\n", - "# [-3.36919212e+00 1.73155665e-02 -6.36875406e-02 ... 3.28367390e-03\n", - "# 8.58021621e-03 1.07796099e-02]\n", - "# [-6.62039661e+00 3.49958315e-02 -1.23963736e-01 ... 6.36674836e-03\n", - "# 1.60815325e-02 2.03892551e-02]]\n", - "# [-4.3777566e+00 2.3245990e-02 -9.3339972e-02 ... 4.2569702e-03\n", - "# 1.0920014e-02 1.3787906e-02]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "enclosed-consolidation", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "synthetic-hungarian", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [41.84146118]) 0.0\n" - ] - } - ], - "source": [ - "loss_att, acc_att = model._calc_att_loss(paddle.to_tensor(torch_encoder_out), paddle.to_tensor(torch_mask),\n", - " text, text_len)\n", - "print(loss_att, acc_att)\n", - "#tensor(41.8416, device='cuda:0', grad_fn=) 0.0" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "indian-sweden", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 202, - "id": "marine-cuisine", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[-3.7638968e-01 -8.2272053e-01 7.4276292e-01 ... 3.4200522e-01\n", - " 1.5034772e-02 4.0337229e-01]\n", - " [-8.7386459e-01 -3.1389427e-01 4.1987866e-01 ... 3.7723729e-01\n", - " -1.4352810e-01 -1.0023664e+00]\n", - " [-4.3505096e-01 3.4504786e-02 -2.8710306e-01 ... 7.7274129e-02\n", - " -1.1672243e+00 -2.6848501e-01]\n", - " ...\n", - " [ 4.2471480e-01 5.8885634e-01 2.0203922e-02 ... 3.7405500e-01\n", - " 4.5470044e-02 -3.7139410e-01]\n", - " [-3.7978446e-01 -8.1084180e-01 7.5725085e-01 ... 2.6038891e-01\n", - " -7.9347193e-04 4.2537671e-01]\n", - " [-3.8279903e-01 -8.1206715e-01 7.4943429e-01 ... 2.6173013e-01\n", - " -1.0499060e-03 4.2678756e-01]]\n" - ] - } - ], - "source": [ - "data = np.load(\".notebook/decoder.npz\", allow_pickle=True)\n", - "torch_decoder_out = data['decoder_out']\n", - "print(torch_decoder_out[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 180, - "id": "several-result", - "metadata": {}, - "outputs": [], - "source": [ - "def add_sos_eos(ys_pad: paddle.Tensor, sos: int, eos: int,\n", - " ignore_id: int):\n", - " \"\"\"Add and labels.\n", - " Args:\n", - " ys_pad (paddle.Tensor): batch of padded target sequences (B, Lmax)\n", - " sos (int): index of \n", - " eos (int): index of \n", - " ignore_id (int): index of padding\n", - " Returns:\n", - " ys_in (paddle.Tensor) : (B, Lmax + 1)\n", - " ys_out (paddle.Tensor) : (B, Lmax + 1)\n", - " Examples:\n", - " >>> sos_id = 10\n", - " >>> eos_id = 11\n", - " >>> ignore_id = -1\n", - " >>> ys_pad\n", - " tensor([[ 1, 2, 3, 4, 5],\n", - " [ 4, 5, 6, -1, -1],\n", - " [ 7, 8, 9, -1, -1]], dtype=paddle.int32)\n", - " >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id)\n", - " >>> ys_in\n", - " tensor([[10, 1, 2, 3, 4, 5],\n", - " [10, 4, 5, 6, 11, 11],\n", - " [10, 7, 8, 9, 11, 11]])\n", - " >>> ys_out\n", - " tensor([[ 1, 2, 3, 4, 5, 11],\n", - " [ 4, 5, 6, 11, -1, -1],\n", - " [ 7, 8, 9, 11, -1, -1]])\n", - " \"\"\"\n", - " # TODO(Hui Zhang): using comment code, \n", - " #_sos = paddle.to_tensor(\n", - " # [sos], dtype=paddle.long, stop_gradient=True, place=ys_pad.place)\n", - " #_eos = paddle.to_tensor(\n", - " # [eos], dtype=paddle.long, stop_gradient=True, place=ys_pad.place)\n", - " #ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys\n", - " #ys_in = [paddle.cat([_sos, y], dim=0) for y in ys]\n", - " #ys_out = [paddle.cat([y, _eos], dim=0) for y in ys]\n", - " #return pad_sequence(ys_in, padding_value=eos), pad_sequence(ys_out, padding_value=ignore_id)\n", - " B = ys_pad.size(0)\n", - " _sos = paddle.ones([B, 1], dtype=ys_pad.dtype) * sos\n", - " _eos = paddle.ones([B, 1], dtype=ys_pad.dtype) * eos\n", - " ys_in = paddle.cat([_sos, ys_pad], dim=1)\n", - " mask_pad = (ys_in == ignore_id)\n", - " ys_in = ys_in.masked_fill(mask_pad, eos)\n", - " \n", - "\n", - " ys_out = paddle.cat([ys_pad, _eos], dim=1)\n", - " ys_out = ys_out.masked_fill(mask_pad, eos)\n", - " mask_eos = (ys_out == ignore_id)\n", - " ys_out = ys_out.masked_fill(mask_eos, eos)\n", - " ys_out = ys_out.masked_fill(mask_pad, ignore_id)\n", - " return ys_in, ys_out" - ] - }, - { - "cell_type": "code", - "execution_count": 181, - "id": "possible-bulgaria", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[16, 7], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [[4232, 2995, 3116, 1209, 565 , 4232, 4232],\n", - " [4232, 236 , 1176, 331 , 66 , 3925, 4077],\n", - " [4232, 2693, 524 , 234 , 1145, 366 , 4232],\n", - " [4232, 3875, 4211, 3062, 700 , 4232, 4232],\n", - " [4232, 272 , 987 , 1134, 494 , 2959, 4232],\n", - " [4232, 1936, 3715, 120 , 2553, 2695, 2710],\n", - " [4232, 25 , 1149, 3930, 4232, 4232, 4232],\n", - " [4232, 1753, 1778, 1237, 482 , 3925, 110 ],\n", - " [4232, 3703, 2 , 565 , 3827, 4232, 4232],\n", - " [4232, 1150, 2734, 10 , 2478, 3490, 4232],\n", - " [4232, 426 , 811 , 95 , 489 , 144 , 4232],\n", - " [4232, 2313, 2006, 489 , 975 , 4232, 4232],\n", - " [4232, 3702, 3414, 205 , 1488, 2966, 1347],\n", - " [4232, 70 , 1741, 702 , 1666, 4232, 4232],\n", - " [4232, 703 , 1778, 1030, 849 , 4232, 4232],\n", - " [4232, 814 , 1674, 115 , 3827, 4232, 4232]])\n", - "Tensor(shape=[16, 7], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n", - " [[2995, 3116, 1209, 565, 4232, -1 , -1 ],\n", - " [ 236, 1176, 331, 66 , 3925, 4077, 4232],\n", - " [2693, 524, 234, 1145, 366, 4232, -1 ],\n", - " [3875, 4211, 3062, 700, 4232, -1 , -1 ],\n", - " [ 272, 987, 1134, 494, 2959, 4232, -1 ],\n", - " [1936, 3715, 120, 2553, 2695, 2710, 4232],\n", - " [ 25 , 1149, 3930, 4232, -1 , -1 , -1 ],\n", - " [1753, 1778, 1237, 482, 3925, 110, 4232],\n", - " [3703, 2 , 565, 3827, 4232, -1 , -1 ],\n", - " [1150, 2734, 10 , 2478, 3490, 4232, -1 ],\n", - " [ 426, 811, 95 , 489, 144, 4232, -1 ],\n", - " [2313, 2006, 489, 975, 4232, -1 , -1 ],\n", - " [3702, 3414, 205, 1488, 2966, 1347, 4232],\n", - " [ 70 , 1741, 702, 1666, 4232, -1 , -1 ],\n", - " [ 703, 1778, 1030, 849, 4232, -1 , -1 ],\n", - " [ 814, 1674, 115, 3827, 4232, -1 , -1 ]])\n" - ] - } - ], - "source": [ - "ys_pad = text\n", - "ys_pad_lens = text_len\n", - "ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, model.sos, model.eos,\n", - " model.ignore_id)\n", - "ys_in_lens = ys_pad_lens + 1\n", - "print(ys_in_pad)\n", - "print(ys_out_pad)" - ] - }, - { - "cell_type": "code", - "execution_count": 285, - "id": "north-walter", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "False\n", - "True\n", - "False\n", - "[[-3.76389682e-01 -8.22720408e-01 7.42762923e-01 ... 3.42005253e-01\n", - " 1.50350705e-02 4.03372347e-01]\n", - " [-8.73864174e-01 -3.13894272e-01 4.19878662e-01 ... 3.77237231e-01\n", - " -1.43528014e-01 -1.00236630e+00]\n", - " [-4.35050905e-01 3.45046446e-02 -2.87102997e-01 ... 7.72742853e-02\n", - " -1.16722476e+00 -2.68485069e-01]\n", - " ...\n", - " [ 4.24714804e-01 5.88856399e-01 2.02039629e-02 ... 3.74054879e-01\n", - " 4.54700664e-02 -3.71394157e-01]\n", - " [-3.79784584e-01 -8.10841978e-01 7.57250786e-01 ... 2.60389000e-01\n", - " -7.93404877e-04 4.25376773e-01]\n", - " [-3.82798851e-01 -8.12067091e-01 7.49434292e-01 ... 2.61730075e-01\n", - " -1.04988366e-03 4.26787734e-01]]\n", - "---\n", - "[[-3.7638968e-01 -8.2272053e-01 7.4276292e-01 ... 3.4200522e-01\n", - " 1.5034772e-02 4.0337229e-01]\n", - " [-8.7386459e-01 -3.1389427e-01 4.1987866e-01 ... 3.7723729e-01\n", - " -1.4352810e-01 -1.0023664e+00]\n", - " [-4.3505096e-01 3.4504786e-02 -2.8710306e-01 ... 7.7274129e-02\n", - " -1.1672243e+00 -2.6848501e-01]\n", - " ...\n", - " [ 4.2471480e-01 5.8885634e-01 2.0203922e-02 ... 3.7405500e-01\n", - " 4.5470044e-02 -3.7139410e-01]\n", - " [-3.7978446e-01 -8.1084180e-01 7.5725085e-01 ... 2.6038891e-01\n", - " -7.9347193e-04 4.2537671e-01]\n", - " [-3.8279903e-01 -8.1206715e-01 7.4943429e-01 ... 2.6173013e-01\n", - " -1.0499060e-03 4.2678756e-01]]\n" - ] - } - ], - "source": [ - "decoder_out, _ = model.decoder(encoder_out, encoder_mask, ys_in_pad,\n", - " ys_in_lens)\n", - "\n", - "print(np.allclose(decoder_out.numpy(), torch_decoder_out))\n", - "print(np.allclose(decoder_out.numpy(), torch_decoder_out, atol=1e-6))\n", - "print(np.allclose(decoder_out.numpy(), torch_decoder_out, atol=1e-7))\n", - "print(decoder_out.numpy()[0])\n", - "print('---')\n", - "print(torch_decoder_out[0])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "armed-cowboy", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fifty-earth", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "proud-commonwealth", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 183, - "id": "assisted-fortune", - "metadata": {}, - "outputs": [], - "source": [ - "from paddle import nn\n", - "import paddle\n", - "from paddle.nn import functional as F\n", - "\n", - "class LabelSmoothingLoss(nn.Layer):\n", - "\n", - " def __init__(self,\n", - " size: int,\n", - " padding_idx: int,\n", - " smoothing: float,\n", - " normalize_length: bool=False):\n", - " super().__init__()\n", - " self.size = size\n", - " self.padding_idx = padding_idx\n", - " self.smoothing = smoothing\n", - " self.confidence = 1.0 - smoothing\n", - " self.normalize_length = normalize_length\n", - " self.criterion = nn.KLDivLoss(reduction=\"none\")\n", - "\n", - " def forward(self, x: paddle.Tensor, target: paddle.Tensor) -> paddle.Tensor:\n", - " \"\"\"Compute loss between x and target.\n", - " The model outputs and data labels tensors are flatten to\n", - " (batch*seqlen, class) shape and a mask is applied to the\n", - " padding part which should not be calculated for loss.\n", - " \n", - " Args:\n", - " x (paddle.Tensor): prediction (batch, seqlen, class)\n", - " target (paddle.Tensor):\n", - " target signal masked with self.padding_id (batch, seqlen)\n", - " Returns:\n", - " loss (paddle.Tensor) : The KL loss, scalar float value\n", - " \"\"\"\n", - " B, T, D = paddle.shape(x)\n", - " assert D == self.size\n", - " x = x.reshape((-1, self.size))\n", - " target = target.reshape([-1])\n", - "\n", - " # use zeros_like instead of torch.no_grad() for true_dist,\n", - " # since no_grad() can not be exported by JIT\n", - " true_dist = paddle.full_like(x, self.smoothing / (self.size - 1))\n", - " ignore = target == self.padding_idx # (B,)\n", - " print(self.smoothing / (self.size - 1))\n", - " print(true_dist)\n", - "\n", - " #target = target * (1 - ignore) # avoid -1 index\n", - " target = target.masked_fill(ignore, 0) # avoid -1 index\n", - " \n", - " \n", - " #true_dist += F.one_hot(target, self.size) * self.confidence\n", - " target_mask = F.one_hot(target, self.size)\n", - " true_dist *= (1 - target_mask)\n", - " true_dist += target_mask * self.confidence\n", - " \n", - "\n", - " kl = self.criterion(F.log_softmax(x, axis=1), true_dist)\n", - " \n", - " #TODO(Hui Zhang): sum not support bool type\n", - " #total = len(target) - int(ignore.sum())\n", - " total = len(target) - int(ignore.type_as(target).sum())\n", - " denom = total if self.normalize_length else B\n", - "\n", - " #numer = (kl * (1 - ignore)).sum()\n", - " numer = kl.masked_fill(ignore.unsqueeze(1), 0).sum()\n", - " return numer / denom\n" - ] - }, - { - "cell_type": "code", - "execution_count": 184, - "id": "weighted-delight", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.3629489603024576e-05\n", - "Tensor(shape=[112, 4233], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363],\n", - " [0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363],\n", - " [0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363],\n", - " ...,\n", - " [0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363],\n", - " [0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363],\n", - " [0.00002363, 0.00002363, 0.00002363, ..., 0.00002363, 0.00002363, 0.00002363]])\n", - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [41.84146118])\n", - "VarType.INT64\n" - ] - } - ], - "source": [ - "criteron = LabelSmoothingLoss(4233, -1, 0.1, False)\n", - "loss_att = criteron(paddle.to_tensor(torch_decoder_out), ys_out_pad.astype('int64'))\n", - "print(loss_att)\n", - "print(ys_out_pad.dtype)\n", - "# tensor(41.8416, device='cuda:0', grad_fn=)" - ] - }, - { - "cell_type": "code", - "execution_count": 286, - "id": "dress-shelter", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [41.84146118])\n", - "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [41.84146118])\n", - "4233\n", - "-1\n", - "0.1\n", - "False\n" - ] - } - ], - "source": [ - "decoder_out, _ = model.decoder(encoder_out, encoder_mask, ys_in_pad,\n", - " ys_in_lens)\n", - "\n", - "loss_att = model.criterion_att(paddle.to_tensor(torch_decoder_out), ys_out_pad)\n", - "print(loss_att)\n", - "\n", - "loss_att = model.criterion_att(decoder_out, ys_out_pad)\n", - "print(loss_att)\n", - "\n", - "print(model.criterion_att.size)\n", - "print(model.criterion_att.padding_idx)\n", - "print(model.criterion_att.smoothing)\n", - "print(model.criterion_att.normalize_length)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "growing-tooth", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "going-hungary", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "naughty-citizenship", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "experimental-emerald", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "adverse-saskatchewan", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "speaking-shelf", - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List\n", - "from typing import Optional\n", - "from typing import Tuple\n", - "\n", - "import paddle\n", - "from paddle import nn\n", - "from typeguard import check_argument_types\n", - "\n", - "from deepspeech.modules.activation import get_activation\n", - "from deepspeech.modules.attention import MultiHeadedAttention\n", - "from deepspeech.modules.attention import RelPositionMultiHeadedAttention\n", - "from deepspeech.modules.conformer_convolution import ConvolutionModule\n", - "from deepspeech.modules.embedding import PositionalEncoding\n", - "from deepspeech.modules.embedding import RelPositionalEncoding\n", - "from deepspeech.modules.encoder_layer import ConformerEncoderLayer\n", - "from deepspeech.modules.encoder_layer import TransformerEncoderLayer\n", - "from deepspeech.modules.mask import add_optional_chunk_mask\n", - "from deepspeech.modules.mask import make_non_pad_mask\n", - "from deepspeech.modules.positionwise_feed_forward import PositionwiseFeedForward\n", - "from deepspeech.modules.subsampling import Conv2dSubsampling4\n", - "from deepspeech.modules.subsampling import Conv2dSubsampling6\n", - "from deepspeech.modules.subsampling import Conv2dSubsampling8\n", - "from deepspeech.modules.subsampling import LinearNoSubsampling\n", - "\n", - "class BaseEncoder(nn.Layer):\n", - " def __init__(\n", - " self,\n", - " input_size: int,\n", - " output_size: int=256,\n", - " attention_heads: int=4,\n", - " linear_units: int=2048,\n", - " num_blocks: int=6,\n", - " dropout_rate: float=0.1,\n", - " positional_dropout_rate: float=0.1,\n", - " attention_dropout_rate: float=0.0,\n", - " input_layer: str=\"conv2d\",\n", - " pos_enc_layer_type: str=\"abs_pos\",\n", - " normalize_before: bool=True,\n", - " concat_after: bool=False,\n", - " static_chunk_size: int=0,\n", - " use_dynamic_chunk: bool=False,\n", - " global_cmvn: paddle.nn.Layer=None,\n", - " use_dynamic_left_chunk: bool=False, ):\n", - " \"\"\"\n", - " Args:\n", - " input_size (int): input dim, d_feature\n", - " output_size (int): dimension of attention, d_model\n", - " attention_heads (int): the number of heads of multi head attention\n", - " linear_units (int): the hidden units number of position-wise feed\n", - " forward\n", - " num_blocks (int): the number of encoder blocks\n", - " dropout_rate (float): dropout rate\n", - " attention_dropout_rate (float): dropout rate in attention\n", - " positional_dropout_rate (float): dropout rate after adding\n", - " positional encoding\n", - " input_layer (str): input layer type.\n", - " optional [linear, conv2d, conv2d6, conv2d8]\n", - " pos_enc_layer_type (str): Encoder positional encoding layer type.\n", - " opitonal [abs_pos, scaled_abs_pos, rel_pos]\n", - " normalize_before (bool):\n", - " True: use layer_norm before each sub-block of a layer.\n", - " False: use layer_norm after each sub-block of a layer.\n", - " concat_after (bool): whether to concat attention layer's input\n", - " and output.\n", - " True: x -> x + linear(concat(x, att(x)))\n", - " False: x -> x + att(x)\n", - " static_chunk_size (int): chunk size for static chunk training and\n", - " decoding\n", - " use_dynamic_chunk (bool): whether use dynamic chunk size for\n", - " training or not, You can only use fixed chunk(chunk_size > 0)\n", - " or dyanmic chunk size(use_dynamic_chunk = True)\n", - " global_cmvn (Optional[paddle.nn.Layer]): Optional GlobalCMVN layer\n", - " use_dynamic_left_chunk (bool): whether use dynamic left chunk in\n", - " dynamic chunk training\n", - " \"\"\"\n", - " assert check_argument_types()\n", - " super().__init__()\n", - " self._output_size = output_size\n", - "\n", - " if pos_enc_layer_type == \"abs_pos\":\n", - " pos_enc_class = PositionalEncoding\n", - " elif pos_enc_layer_type == \"rel_pos\":\n", - " pos_enc_class = RelPositionalEncoding\n", - " else:\n", - " raise ValueError(\"unknown pos_enc_layer: \" + pos_enc_layer_type)\n", - "\n", - " if input_layer == \"linear\":\n", - " subsampling_class = LinearNoSubsampling\n", - " elif input_layer == \"conv2d\":\n", - " subsampling_class = Conv2dSubsampling4\n", - " elif input_layer == \"conv2d6\":\n", - " subsampling_class = Conv2dSubsampling6\n", - " elif input_layer == \"conv2d8\":\n", - " subsampling_class = Conv2dSubsampling8\n", - " else:\n", - " raise ValueError(\"unknown input_layer: \" + input_layer)\n", - "\n", - " self.global_cmvn = global_cmvn\n", - " self.embed = subsampling_class(\n", - " idim=input_size,\n", - " odim=output_size,\n", - " dropout_rate=dropout_rate,\n", - " pos_enc_class=pos_enc_class(\n", - " d_model=output_size, dropout_rate=positional_dropout_rate), )\n", - "\n", - " self.normalize_before = normalize_before\n", - " self.after_norm = nn.LayerNorm(output_size, epsilon=1e-12)\n", - " self.static_chunk_size = static_chunk_size\n", - " self.use_dynamic_chunk = use_dynamic_chunk\n", - " self.use_dynamic_left_chunk = use_dynamic_left_chunk\n", - "\n", - " def output_size(self) -> int:\n", - " return self._output_size\n", - "\n", - " def forward(\n", - " self,\n", - " xs: paddle.Tensor,\n", - " xs_lens: paddle.Tensor,\n", - " decoding_chunk_size: int=0,\n", - " num_decoding_left_chunks: int=-1,\n", - " ) -> Tuple[paddle.Tensor, paddle.Tensor]:\n", - " \"\"\"Embed positions in tensor.\n", - " Args:\n", - " xs: padded input tensor (B, L, D)\n", - " xs_lens: input length (B)\n", - " decoding_chunk_size: decoding chunk size for dynamic chunk\n", - " 0: default for training, use random dynamic chunk.\n", - " <0: for decoding, use full chunk.\n", - " >0: for decoding, use fixed chunk size as set.\n", - " num_decoding_left_chunks: number of left chunks, this is for decoding,\n", - " the chunk size is decoding_chunk_size.\n", - " >=0: use num_decoding_left_chunks\n", - " <0: use all left chunks\n", - " Returns:\n", - " encoder output tensor, lens and mask\n", - " \"\"\"\n", - " masks = make_non_pad_mask(xs_lens).unsqueeze(1) # (B, 1, L)\n", - "\n", - " if self.global_cmvn is not None:\n", - " xs = self.global_cmvn(xs)\n", - " #TODO(Hui Zhang): self.embed(xs, masks, offset=0), stride_slice not support bool tensor\n", - " xs, pos_emb, masks = self.embed(xs, masks.type_as(xs), offset=0)\n", - " #TODO(Hui Zhang): remove mask.astype, stride_slice not support bool tensor\n", - " masks = masks.astype(paddle.bool)\n", - " #TODO(Hui Zhang): mask_pad = ~masks\n", - " mask_pad = masks.logical_not()\n", - " chunk_masks = add_optional_chunk_mask(\n", - " xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk,\n", - " decoding_chunk_size, self.static_chunk_size,\n", - " num_decoding_left_chunks)\n", - " for layer in self.encoders:\n", - " xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " if self.normalize_before:\n", - " xs = self.after_norm(xs)\n", - " # Here we assume the mask is not changed in encoder layers, so just\n", - " # return the masks before encoder layers, and the masks will be used\n", - " # for cross attention with decoder later\n", - " return xs, masks" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "sharp-municipality", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "class ConformerEncoder(BaseEncoder):\n", - " \"\"\"Conformer encoder module.\"\"\"\n", - "\n", - " def __init__(\n", - " self,\n", - " input_size: int,\n", - " output_size: int=256,\n", - " attention_heads: int=4,\n", - " linear_units: int=2048,\n", - " num_blocks: int=6,\n", - " dropout_rate: float=0.1,\n", - " positional_dropout_rate: float=0.1,\n", - " attention_dropout_rate: float=0.0,\n", - " input_layer: str=\"conv2d\",\n", - " pos_enc_layer_type: str=\"rel_pos\",\n", - " normalize_before: bool=True,\n", - " concat_after: bool=False,\n", - " static_chunk_size: int=0,\n", - " use_dynamic_chunk: bool=False,\n", - " global_cmvn: nn.Layer=None,\n", - " use_dynamic_left_chunk: bool=False,\n", - " positionwise_conv_kernel_size: int=1,\n", - " macaron_style: bool=True,\n", - " selfattention_layer_type: str=\"rel_selfattn\",\n", - " activation_type: str=\"swish\",\n", - " use_cnn_module: bool=True,\n", - " cnn_module_kernel: int=15,\n", - " causal: bool=False,\n", - " cnn_module_norm: str=\"batch_norm\", ):\n", - " \"\"\"Construct ConformerEncoder\n", - " Args:\n", - " input_size to use_dynamic_chunk, see in BaseEncoder\n", - " positionwise_conv_kernel_size (int): Kernel size of positionwise\n", - " conv1d layer.\n", - " macaron_style (bool): Whether to use macaron style for\n", - " positionwise layer.\n", - " selfattention_layer_type (str): Encoder attention layer type,\n", - " the parameter has no effect now, it's just for configure\n", - " compatibility.\n", - " activation_type (str): Encoder activation function type.\n", - " use_cnn_module (bool): Whether to use convolution module.\n", - " cnn_module_kernel (int): Kernel size of convolution module.\n", - " causal (bool): whether to use causal convolution or not.\n", - " cnn_module_norm (str): cnn conv norm type, Optional['batch_norm','layer_norm']\n", - " \"\"\"\n", - " assert check_argument_types()\n", - " super().__init__(input_size, output_size, attention_heads, linear_units,\n", - " num_blocks, dropout_rate, positional_dropout_rate,\n", - " attention_dropout_rate, input_layer,\n", - " pos_enc_layer_type, normalize_before, concat_after,\n", - " static_chunk_size, use_dynamic_chunk, global_cmvn,\n", - " use_dynamic_left_chunk)\n", - " activation = get_activation(activation_type)\n", - "\n", - " # self-attention module definition\n", - " encoder_selfattn_layer = RelPositionMultiHeadedAttention\n", - " encoder_selfattn_layer_args = (attention_heads, output_size,\n", - " attention_dropout_rate)\n", - " # feed-forward module definition\n", - " positionwise_layer = PositionwiseFeedForward\n", - " positionwise_layer_args = (output_size, linear_units, dropout_rate,\n", - " activation)\n", - " # convolution module definition\n", - " convolution_layer = ConvolutionModule\n", - " convolution_layer_args = (output_size, cnn_module_kernel, activation,\n", - " cnn_module_norm, causal)\n", - "\n", - " self.encoders = nn.ModuleList([\n", - " ConformerEncoderLayer(\n", - " size=output_size,\n", - " self_attn=encoder_selfattn_layer(*encoder_selfattn_layer_args),\n", - " feed_forward=positionwise_layer(*positionwise_layer_args),\n", - " feed_forward_macaron=positionwise_layer(\n", - " *positionwise_layer_args) if macaron_style else None,\n", - " conv_module=convolution_layer(*convolution_layer_args)\n", - " if use_cnn_module else None,\n", - " dropout_rate=dropout_rate,\n", - " normalize_before=normalize_before,\n", - " concat_after=concat_after) for _ in range(num_blocks)\n", - " ])\n" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "tutorial-syndication", - "metadata": {}, - "outputs": [], - "source": [ - "from deepspeech.frontend.utility import load_cmvn\n", - "from deepspeech.modules.cmvn import GlobalCMVN\n", - "\n", - "configs=cfg.model\n", - "mean, istd = load_cmvn(configs['cmvn_file'],\n", - " configs['cmvn_file_type'])\n", - "global_cmvn = GlobalCMVN(\n", - " paddle.to_tensor(mean, dtype=paddle.float),\n", - " paddle.to_tensor(istd, dtype=paddle.float))\n", - "\n", - "\n", - "input_dim = configs['input_dim']\n", - "vocab_size = configs['output_dim']\n", - "encoder_type = configs.get('encoder', 'transformer')\n", - " \n", - "encoder = ConformerEncoder(\n", - " input_dim, global_cmvn=global_cmvn, **configs['encoder_conf'])" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "fuzzy-register", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n" - ] - } - ], - "source": [ - "o = global_cmvn(feat)\n", - "o2 = model.encoder.global_cmvn(feat)\n", - "print(np.allclose(o.numpy(), o2.numpy()))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "explicit-triumph", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "humanitarian-belgium", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "dying-proposal", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "honest-quick", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "bound-cholesterol", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "viral-packaging", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 203, - "id": "balanced-locator", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[16, 1, 207], dtype=bool, place=CUDAPlace(0), stop_gradient=True,\n", - " [[[True , True , True , ..., True , True , True ]],\n", - "\n", - " [[True , True , True , ..., True , True , True ]],\n", - "\n", - " [[True , True , True , ..., True , False, False]],\n", - "\n", - " ...,\n", - "\n", - " [[True , True , True , ..., False, False, False]],\n", - "\n", - " [[True , True , True , ..., False, False, False]],\n", - "\n", - " [[True , True , True , ..., False, False, False]]])\n" - ] - } - ], - "source": [ - "from deepspeech.modules.mask import make_non_pad_mask\n", - "from deepspeech.modules.mask import make_pad_mask\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "print(masks)" - ] - }, - { - "cell_type": "code", - "execution_count": 204, - "id": "induced-proposition", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[16, 207, 80], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[[-0.53697914, -0.19910523, -0.34997201, ..., -0.82427669, -1.02650309, -0.96300691],\n", - " [-0.04464225, 0.23176001, -0.32538742, ..., -0.90158713, -1.03248465, -0.75986791],\n", - " [ 0.50035292, 0.22691160, -0.73052198, ..., -1.00552964, -0.87123060, -1.03062117],\n", - " ...,\n", - " [-0.40023831, -0.14325078, -0.57947433, ..., -1.07178426, -1.28059900, -1.05180073],\n", - " [ 0.15755332, -0.00184949, -0.28702953, ..., -1.10898709, -0.94518697, -0.72506356],\n", - " [-0.47520429, -1.39415145, -0.25754252, ..., -1.13649082, -1.19430351, -1.22903371]],\n", - "\n", - " [[ 0.95454037, 0.36427975, -1.38908529, ..., -1.16366839, -1.28453600, -1.20151031],\n", - " [-0.08573537, -1.05785275, -0.89172721, ..., -0.96440506, -1.12547100, -1.25990939],\n", - " [ 0.47653601, 0.32886592, -0.59200549, ..., -1.19421589, -1.14302588, -1.02422845],\n", - " ...,\n", - " [-0.47431335, -0.33558893, -0.72325647, ..., -1.45058632, -1.39574063, -1.04641151],\n", - " [ 0.36112556, 0.10380996, -1.15994537, ..., -1.04394984, -1.02212358, -1.02083635],\n", - " [-1.27172923, -2.14601755, -0.75676596, ..., -0.97822225, -0.93785471, -1.03707945]],\n", - "\n", - " [[-1.54652190, -1.01517177, -0.88900733, ..., -0.48522446, -0.75163364, -0.67765164],\n", - " [-0.76100892, -0.73351598, -0.91587651, ..., -0.24835993, -0.58927339, -0.73722762],\n", - " [-0.02471367, 0.17015894, -0.42326337, ..., -0.33203802, -0.76695800, -0.71651691],\n", - " ...,\n", - " [-1.70319796, -1.25910866, -1.14492917, ..., -1.18101490, -1.11631835, -0.93108195],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063]],\n", - "\n", - " ...,\n", - "\n", - " [[ 0.64982772, 0.26116797, -0.84196597, ..., -0.87213463, -1.10728693, -1.32531130],\n", - " [ 0.35391113, -0.01584581, -0.40424931, ..., -0.99173468, -1.07270539, -1.19239008],\n", - " [ 0.37704495, -0.06278508, -0.11467686, ..., -1.10212946, -1.09524000, -1.11815071],\n", - " ...,\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063]],\n", - "\n", - " [[ 0.04445776, -0.17546852, -0.67475224, ..., -0.49801198, -0.56782746, -0.77852231],\n", - " [-1.34279025, -0.80342549, -0.90457231, ..., -0.65901577, -0.72549772, -0.62796098],\n", - " [-0.76252806, -0.13071291, -0.13280024, ..., -0.56132573, -0.60587686, -0.72114766],\n", - " ...,\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063]],\n", - "\n", - " [[-1.07980299, -1.08341801, -1.17969072, ..., -0.17757270, -0.43746525, -0.04000654],\n", - " [ 0.92353648, 0.63770926, -0.52810186, ..., -0.12927933, -0.20342292, 0.16655664],\n", - " [ 0.49337494, -0.00911332, -0.73301607, ..., 0.10074048, -0.09811471, -0.00923573],\n", - " ...,\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063],\n", - " [-6.04343224, -4.93973970, -3.42354989, ..., -3.99492049, -3.98687553, -3.67971063]]])\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "print(xs)" - ] - }, - { - "cell_type": "code", - "execution_count": 205, - "id": "cutting-julian", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[16, 256, 51, 19], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0.00209083],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0.01194306, 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0.04610471, 0. ],\n", - " [0. , 0. , 0. , ..., 0.00967231, 0.04613467, 0. ]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0.22816099, 0.24614786, 0.25304127, ..., 0.20401822, 0.23248228, 0.31190544],\n", - " [0.13587360, 0.28877240, 0.27991283, ..., 0.19210319, 0.20346391, 0.19934426],\n", - " [0.25739068, 0.39348233, 0.27877361, ..., 0.27482539, 0.19302306, 0.23810163],\n", - " ...,\n", - " [0.11939213, 0.28473237, 0.33082074, ..., 0.23838061, 0.22104350, 0.23905794],\n", - " [0.17387670, 0.20402060, 0.40263173, ..., 0.24782266, 0.26742202, 0.15426503],\n", - " [0. , 0.29080707, 0.27725950, ..., 0.17539823, 0.18478745, 0.22483408]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.35446781, 0.38861471, 0.39724261, ..., 0.38680089, 0.33568040, 0.34552398],\n", - " [0.41739127, 0.51038563, 0.41729912, ..., 0.33992639, 0.37081629, 0.35109508],\n", - " [0.36116859, 0.40744874, 0.48490953, ..., 0.34848654, 0.32321057, 0.35188958],\n", - " ...,\n", - " [0.23143977, 0.38021481, 0.51526314, ..., 0.36499465, 0.37411752, 0.39986172],\n", - " [0.34678638, 0.40238205, 0.50076538, ..., 0.36184520, 0.31596646, 0.36334658],\n", - " [0.36498138, 0.37943166, 0.51718897, ..., 0.31798238, 0.33656698, 0.34130475]]],\n", - "\n", - "\n", - " [[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.01456045, 0.09447514, 0. , ..., 0. , 0. , 0. ],\n", - " [0.01500242, 0.02963220, 0. , ..., 0. , 0. , 0. ],\n", - " [0.03295187, 0. , 0. , ..., 0.04584959, 0.02043908, 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0.04425837],\n", - " [0. , 0. , 0.02556529, ..., 0. , 0.00900441, 0.04908358]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0.11141267, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0.33696529, 0.38526866, 0.32900479, ..., 0.28703830, 0.23351061, 0.19004467],\n", - " [0.13575366, 0.35783342, 0.33573425, ..., 0.22081660, 0.15854910, 0.13587447],\n", - " [0.21928655, 0.28900093, 0.28255141, ..., 0.20602837, 0.23927397, 0.21909429],\n", - " ...,\n", - " [0.23291890, 0.39096734, 0.36399242, ..., 0.20598020, 0.25373828, 0.23137446],\n", - " [0.18739152, 0.30793777, 0.30296701, ..., 0.27250600, 0.25191751, 0.20836820],\n", - " [0.22454213, 0.41402060, 0.54082996, ..., 0.31874508, 0.25079906, 0.25938687]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.26456982, 0.49519050, 0.56702250, ..., 0.30954638, 0.35292268, 0.32668519],\n", - " [0.21576807, 0.51833367, 0.49183372, ..., 0.36043224, 0.38523889, 0.36154741],\n", - " [0.20067888, 0.42784205, 0.52817714, ..., 0.31871423, 0.32452232, 0.31036487],\n", - " ...,\n", - " [0.49855131, 0.51001430, 0.52278662, ..., 0.36450142, 0.34338164, 0.33602941],\n", - " [0.41233343, 0.55517823, 0.52827710, ..., 0.40675971, 0.33873138, 0.36724189],\n", - " [0.40820011, 0.46187383, 0.47338152, ..., 0.38690975, 0.36039269, 0.38022059]]],\n", - "\n", - "\n", - " [[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0. , 0.00578516, 0. , ..., 0.00748384, 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0.03035110, 0. , 0.00026720],\n", - " [0.00094807, 0. , 0. , ..., 0.00795512, 0. , 0. ],\n", - " ...,\n", - " [0.02032628, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0.01080076, 0. ],\n", - " [0.18470290, 0. , 0. , ..., 0.05058352, 0.09475817, 0.05914564]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0.38708323, 0.28021947, 0.35892880, ..., 0.16595127, 0.16031364, 0.21136315],\n", - " [0.15595171, 0.30544323, 0.24666184, ..., 0.22675267, 0.25765014, 0.19682154],\n", - " [0.29517862, 0.41209796, 0.20063159, ..., 0.17595036, 0.22536841, 0.22214051],\n", - " ...,\n", - " [0.24744980, 0.26258564, 0.38654143, ..., 0.23620218, 0.23157144, 0.18514194],\n", - " [0.25714791, 0.29592845, 0.47744542, ..., 0.23545510, 0.25072727, 0.20976165],\n", - " [1.20154655, 0.84644288, 0.73385584, ..., 1.02517247, 0.95309550, 1.00134516]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.45013186, 0.47484034, 0.40540054, ..., 0.19346163, 0.17825794, 0.14776605],\n", - " [0.47545874, 0.48186573, 0.36760187, ..., 0.27809089, 0.32997063, 0.32337096],\n", - " [0.46160024, 0.40050328, 0.39060861, ..., 0.36612910, 0.35242686, 0.29738861],\n", - " ...,\n", - " [0.55148494, 0.51017821, 0.40132499, ..., 0.38948193, 0.35737294, 0.33088297],\n", - " [0.41972569, 0.45475486, 0.45320493, ..., 0.38343129, 0.40125814, 0.36180776],\n", - " [0.34279808, 0.31606171, 0.44701228, ..., 0.21665487, 0.23984617, 0.23903391]]],\n", - "\n", - "\n", - " ...,\n", - "\n", - "\n", - " [[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0.04178291, 0. , 0.01580476, ..., 0. , 0.02250817, 0. ],\n", - " [0.04323414, 0.07786420, 0. , ..., 0.01634724, 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.03209178, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0.13563479, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0. , 0.25187218, 0.24979387, ..., 0.24774717, 0.22354351, 0.19149347],\n", - " [0.16540922, 0.19585510, 0.19812922, ..., 0.27344131, 0.20928150, 0.26150429],\n", - " [0.10494646, 0.06329897, 0.33843631, ..., 0.25138417, 0.12470355, 0.23926635],\n", - " ...,\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.11428106, 0.45667490, 0.46820879, ..., 0.32057840, 0.33578536, 0.39012644],\n", - " [0.10441341, 0.45739070, 0.46107352, ..., 0.38467997, 0.38291249, 0.36685589],\n", - " [0.19867736, 0.35519636, 0.44313061, ..., 0.40679252, 0.38067645, 0.30645671],\n", - " ...,\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700]]],\n", - "\n", - "\n", - " [[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.02465414, 0. , 0. , ..., 0. , 0. , 0.03390232],\n", - " [0. , 0. , 0.01830704, ..., 0.05166877, 0.00948385, 0.07453502],\n", - " [0.09921519, 0. , 0.01587192, ..., 0.01620276, 0.05140074, 0.00192392],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0.40034360, 0.25306445, 0.20217699, ..., 0.09816189, 0.07064310, 0.04974059],\n", - " [0.12567598, 0.21030979, 0.11181555, ..., 0.04278110, 0.11968569, 0.12005232],\n", - " [0.28786880, 0.24030517, 0.22565845, ..., 0. , 0.06418110, 0.05872961],\n", - " ...,\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.38404641, 0.30990323, 0.37156230, ..., 0.18125033, 0.15050662, 0.19619957],\n", - " [0.47285745, 0.40528792, 0.39718056, ..., 0.24709940, 0.04565683, 0.11500744],\n", - " [0.32620737, 0.30072594, 0.30477354, ..., 0.23529193, 0.21356541, 0.16985542],\n", - " ...,\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700]]],\n", - "\n", - "\n", - " [[[0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.03343770, 0.00123780, 0.05297198, ..., 0.07271163, 0.08656286, 0.14493589],\n", - " [0.11043239, 0.06143146, 0.06362963, ..., 0.08127750, 0.06259022, 0.08315435],\n", - " [0.01767678, 0.00201111, 0.07875030, ..., 0.06963293, 0.08979890, 0.05326346],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.10033827, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0.15627117, 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0.05144687, 0. , 0. , ..., 0. , 0. , 0.00436414],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " ...,\n", - "\n", - " [[0.25142455, 0.45964020, 0.37346074, ..., 0.04763087, 0. , 0. ],\n", - " [0.19760093, 0.26626948, 0.11190540, ..., 0.03044968, 0. , 0. ],\n", - " [0.16340607, 0.32938001, 0.25689697, ..., 0.05569421, 0. , 0. ],\n", - " ...,\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163],\n", - " [1.12572610, 0.87340784, 0.78169060, ..., 1.04576325, 1.00935984, 1.02209163]],\n", - "\n", - " [[0. , 0. , 0. , ..., 0. , 0.02218930, 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0.02848953],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " ...,\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ],\n", - " [0. , 0. , 0. , ..., 0. , 0. , 0. ]],\n", - "\n", - " [[0.25810039, 0.63016868, 0.37037861, ..., 0.18704373, 0.08269356, 0.09912672],\n", - " [0.17292863, 0.50678611, 0.40738991, ..., 0.16006103, 0.11725381, 0.09940521],\n", - " [0.24175072, 0.41616210, 0.41256818, ..., 0.13519743, 0.07912572, 0.12846369],\n", - " ...,\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700],\n", - " [1.44883108, 1.02119160, 0.94472742, ..., 1.23630035, 1.21888959, 1.23804700]]]])\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "\n", - "\n", - "#xs, pos_emb, masks = model.encoder.embed(xs, masks.type_as(xs), offset=0)\n", - "# print(xs)\n", - "\n", - "x = xs.unsqueeze(1)\n", - "x = model.encoder.embed.conv(x)\n", - "print(x)" - ] - }, - { - "cell_type": "code", - "execution_count": 206, - "id": "friendly-nightlife", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[16, 51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[[-0.03426375, 0.14291267, -0.06718873, ..., 0.09064753, 0.01809387, -0.04340880],\n", - " [-0.05007839, 0.11054724, -0.10399298, ..., 0.11457238, 0.04244684, -0.01249714],\n", - " [-0.10695291, 0.16910909, -0.08352133, ..., 0.07710276, 0.01168563, -0.03584499],\n", - " ...,\n", - " [-0.06060536, 0.14455931, -0.05470302, ..., 0.05364908, 0.03033342, -0.02610814],\n", - " [-0.08505894, 0.13611752, -0.11132983, ..., 0.13079923, 0.01580139, -0.02281028],\n", - " [-0.10604677, 0.14714901, -0.10885533, ..., 0.08543444, 0.03719445, -0.04634233]],\n", - "\n", - " [[-0.12392755, 0.14486063, -0.05674079, ..., 0.02573164, 0.03128851, 0.00545091],\n", - " [-0.04775286, 0.08473608, -0.08507854, ..., 0.04573154, 0.04240163, 0.01053247],\n", - " [-0.05940291, 0.10023535, -0.08143730, ..., 0.03596500, 0.01673085, 0.02089563],\n", - " ...,\n", - " [-0.09222981, 0.15823206, -0.07700447, ..., 0.08122957, 0.03136991, -0.00646474],\n", - " [-0.07331756, 0.14482647, -0.07838815, ..., 0.10869440, 0.01356864, -0.02777974],\n", - " [-0.07937264, 0.20143102, -0.05544947, ..., 0.10287814, 0.00608235, -0.04799180]],\n", - "\n", - " [[-0.03670349, 0.08931590, -0.08718812, ..., 0.01314050, 0.00642052, 0.00573716],\n", - " [ 0.01089254, 0.11146393, -0.10263617, ..., 0.05070438, 0.01960694, 0.03521532],\n", - " [-0.02182280, 0.11443964, -0.06678198, ..., 0.04327708, 0.00861394, 0.02871092],\n", - " ...,\n", - " [-0.06792898, 0.14376275, -0.07899005, ..., 0.11248926, 0.03208683, -0.03264240],\n", - " [-0.07884051, 0.17024788, -0.08583611, ..., 0.09028331, 0.03588808, -0.02075090],\n", - " [-0.13792302, 0.27163863, -0.23930418, ..., 0.13391261, 0.07521040, -0.08621951]],\n", - "\n", - " ...,\n", - "\n", - " [[-0.02446348, 0.11595841, -0.03591986, ..., 0.06288970, 0.02895011, -0.06532725],\n", - " [-0.05378424, 0.12607370, -0.09023033, ..., 0.09078894, 0.01035743, 0.03701983],\n", - " [-0.04566649, 0.14275314, -0.06686870, ..., 0.09890588, -0.00612222, 0.03439377],\n", - " ...,\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698]],\n", - "\n", - " [[-0.01012144, 0.03909408, -0.07077143, ..., 0.00452683, -0.01377654, 0.02897627],\n", - " [-0.00519154, 0.03594019, -0.06831125, ..., 0.05693541, -0.00406374, 0.04561640],\n", - " [-0.01762631, 0.00500899, -0.05886075, ..., 0.02112178, -0.00729015, 0.02782153],\n", - " ...,\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698]],\n", - "\n", - " [[-0.03411558, -0.04318277, -0.08497842, ..., -0.04886402, 0.04296734, 0.06151697],\n", - " [ 0.00263296, -0.06913657, -0.08993219, ..., -0.00149064, 0.05696633, 0.03304394],\n", - " [-0.01818341, -0.01178640, -0.09679577, ..., -0.00870231, 0.00362198, 0.01916483],\n", - " ...,\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698],\n", - " [-0.31763062, 0.53700209, -0.26335421, ..., 0.39182857, 0.00337184, -0.18293698]]])\n", - "Tensor(shape=[16, 51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[[-0.54821998, 2.28660274, -1.07501972, ..., 1.45036042, 0.28950194, -0.69454080],\n", - " [-0.80125421, 1.76875579, -1.66388774, ..., 1.83315802, 0.67914939, -0.19995420],\n", - " [-1.71124649, 2.70574546, -1.33634126, ..., 1.23364413, 0.18697014, -0.57351983],\n", - " ...,\n", - " [-0.96968573, 2.31294894, -0.87524825, ..., 0.85838526, 0.48533469, -0.41773027],\n", - " [-1.36094308, 2.17788029, -1.78127730, ..., 2.09278774, 0.25282228, -0.36496443],\n", - " [-1.69674826, 2.35438418, -1.74168527, ..., 1.36695099, 0.59511113, -0.74147725]],\n", - "\n", - " [[-1.98284078, 2.31777000, -0.90785271, ..., 0.41170627, 0.50061619, 0.08721463],\n", - " [-0.76404583, 1.35577726, -1.36125672, ..., 0.73170459, 0.67842603, 0.16851945],\n", - " [-0.95044655, 1.60376561, -1.30299675, ..., 0.57544005, 0.26769355, 0.33433008],\n", - " ...,\n", - " [-1.47567701, 2.53171301, -1.23207152, ..., 1.29967308, 0.50191855, -0.10343577],\n", - " [-1.17308092, 2.31722355, -1.25421047, ..., 1.73911047, 0.21709818, -0.44447583],\n", - " [-1.26996231, 3.22289634, -0.88719147, ..., 1.64605021, 0.09731755, -0.76786882]],\n", - "\n", - " [[-0.58725590, 1.42905438, -1.39500988, ..., 0.21024795, 0.10272825, 0.09179455],\n", - " [ 0.17428070, 1.78342295, -1.64217877, ..., 0.81127012, 0.31371105, 0.56344515],\n", - " [-0.34916472, 1.83103430, -1.06851172, ..., 0.69243336, 0.13782299, 0.45937473],\n", - " ...,\n", - " [-1.08686376, 2.30020404, -1.26384079, ..., 1.79982817, 0.51338923, -0.52227837],\n", - " [-1.26144814, 2.72396612, -1.37337780, ..., 1.44453299, 0.57420933, -0.33201432],\n", - " [-2.20676827, 4.34621811, -3.82886696, ..., 2.14260173, 1.20336640, -1.37951219]],\n", - "\n", - " ...,\n", - "\n", - " [[-0.39141566, 1.85533464, -0.57471782, ..., 1.00623512, 0.46320182, -1.04523599],\n", - " [-0.86054784, 2.01717925, -1.44368529, ..., 1.45262301, 0.16571884, 0.59231722],\n", - " [-0.73066384, 2.28405023, -1.06989920, ..., 1.58249414, -0.09795550, 0.55030036],\n", - " ...,\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170]],\n", - "\n", - " [[-0.16194311, 0.62550521, -1.13234293, ..., 0.07242929, -0.22042468, 0.46362036],\n", - " [-0.08306468, 0.57504302, -1.09298003, ..., 0.91096652, -0.06501988, 0.72986233],\n", - " [-0.28202093, 0.08014385, -0.94177192, ..., 0.33794850, -0.11664233, 0.44514441],\n", - " ...,\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170]],\n", - "\n", - " [[-0.54584920, -0.69092435, -1.35965478, ..., -0.78182435, 0.68747747, 0.98427159],\n", - " [ 0.04212743, -1.10618520, -1.43891501, ..., -0.02385022, 0.91146135, 0.52870303],\n", - " [-0.29093450, -0.18858244, -1.54873240, ..., -0.13923697, 0.05795169, 0.30663735],\n", - " ...,\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170],\n", - " [-5.08208990, 8.59203339, -4.21366739, ..., 6.26925707, 0.05394945, -2.92699170]]])\n", - "Tensor(shape=[1, 51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[[ 0. , 1. , 0. , ..., 1. , 0. , 1. ],\n", - " [ 0.84147102, 0.54030228, 0.80196184, ..., 1. , 0.00010746, 1. ],\n", - " [ 0.90929747, -0.41614681, 0.95814437, ..., 1. , 0.00021492, 1. ],\n", - " ...,\n", - " [-0.76825470, -0.64014435, 0.63279730, ..., 0.99998462, 0.00515809, 0.99998671],\n", - " [-0.95375264, 0.30059254, 0.99899054, ..., 0.99998397, 0.00526555, 0.99998611],\n", - " [-0.26237485, 0.96496606, 0.56074661, ..., 0.99998331, 0.00537301, 0.99998558]]])\n" - ] - } - ], - "source": [ - "b, c, t, f = paddle.shape(x)\n", - "x = model.encoder.embed.out(x.transpose([0, 2, 1, 3]).reshape([b, t, c * f]))\n", - "print(x)\n", - "x, pos_emb = model.encoder.embed.pos_enc(x, 0)\n", - "print(x)\n", - "print(pos_emb)" - ] - }, - { - "cell_type": "code", - "execution_count": 207, - "id": "guilty-cache", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Tensor(shape=[1, 51, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n", - " [[[ 0. , 1. , 0. , ..., 1. , 0. , 1. ],\n", - " [ 0.84147102, 0.54030228, 0.80196184, ..., 1. , 0.00010746, 1. ],\n", - " [ 0.90929747, -0.41614681, 0.95814437, ..., 1. , 0.00021492, 1. ],\n", - " ...,\n", - " [-0.76825470, -0.64014435, 0.63279730, ..., 0.99998462, 0.00515809, 0.99998671],\n", - " [-0.95375264, 0.30059254, 0.99899054, ..., 0.99998397, 0.00526555, 0.99998611],\n", - " [-0.26237485, 0.96496606, 0.56074661, ..., 0.99998331, 0.00537301, 0.99998558]]])\n" - ] - } - ], - "source": [ - "print(pos_emb)" - ] - }, - { - "cell_type": "code", - "execution_count": 208, - "id": "iraqi-payday", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[ 0.0000000e+00 1.0000000e+00 0.0000000e+00 ... 1.0000000e+00\n", - " 0.0000000e+00 1.0000000e+00]\n", - " [ 8.4147096e-01 5.4030234e-01 8.0196178e-01 ... 1.0000000e+00\n", - " 1.0746076e-04 1.0000000e+00]\n", - " [ 9.0929741e-01 -4.1614684e-01 9.5814437e-01 ... 1.0000000e+00\n", - " 2.1492151e-04 1.0000000e+00]\n", - " ...\n", - " [ 9.5625257e-01 -2.9254240e-01 4.8925215e-01 ... 8.3807874e-01\n", - " 5.1154459e-01 8.5925674e-01]\n", - " [ 2.7049953e-01 -9.6272010e-01 9.9170387e-01 ... 8.3801574e-01\n", - " 5.1163691e-01 8.5920173e-01]\n", - " [-6.6394955e-01 -7.4777740e-01 6.9544029e-01 ... 8.3795273e-01\n", - " 5.1172924e-01 8.5914677e-01]]]\n", - "[1, 5000, 256]\n" - ] - } - ], - "source": [ - "import torch\n", - "import math\n", - "import numpy as np\n", - "\n", - "max_len=5000\n", - "d_model=256\n", - "\n", - "pe = torch.zeros(max_len, d_model)\n", - "position = torch.arange(0, max_len,\n", - " dtype=torch.float32).unsqueeze(1)\n", - "toruch_position = position\n", - "div_term = torch.exp(\n", - " torch.arange(0, d_model, 2, dtype=torch.float32) *\n", - " -(math.log(10000.0) / d_model))\n", - "tourch_div_term = div_term.cpu().detach().numpy()\n", - "\n", - "torhc_sin = torch.sin(position * div_term)\n", - "torhc_cos = torch.cos(position * div_term)\n", - "\n", - "np_sin = np.sin((position * div_term).cpu().detach().numpy())\n", - "np_cos = np.cos((position * div_term).cpu().detach().numpy())\n", - "pe[:, 0::2] = torhc_sin\n", - "pe[:, 1::2] = torhc_cos\n", - "pe = pe.unsqueeze(0) \n", - "tourch_pe = pe.cpu().detach().numpy()\n", - "print(tourch_pe)\n", - "bak_pe = model.encoder.embed.pos_enc.pe\n", - "print(bak_pe.shape)\n", - "model.encoder.embed.pos_enc.pe = paddle.to_tensor(tourch_pe)" - ] - }, - { - "cell_type": "code", - "execution_count": 210, - "id": "exempt-cloud", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks.type_as(xs), offset=0)\n", - "#print(xs)\n", - "data = np.load(\".notebook/enc_embed.npz\")\n", - "torch_pos_emb=data['pos_emb']\n", - "torch_xs = data['embed_out']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(pos_emb.numpy(), torch_pos_emb))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "composite-involvement", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 269, - "id": "handed-harris", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "True\n", - "True\n", - "True\n", - "True\n", - "True\n", - "False\n", - "True\n", - "[256, 2048]\n", - "[2048]\n", - "[2048, 256]\n", - "[256]\n", - "--------ff-------\n", - "True\n", - "False\n", - "False\n", - "False\n", - "False\n", - "True\n", - "linear_714.w_0 True\n", - "linear_714.b_0 True\n", - "linear_715.w_0 True\n", - "linear_715.b_0 True\n", - "False\n", - "True\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks.type_as(xs), offset=0)\n", - "masks = masks.astype(paddle.bool)\n", - "mask_pad = masks.logical_not()\n", - "decoding_chunk_size=0\n", - "num_decoding_left_chunks=-1\n", - "chunk_masks = add_optional_chunk_mask(\n", - " xs, masks, model.encoder.use_dynamic_chunk, model.encoder.use_dynamic_left_chunk,\n", - " decoding_chunk_size, model.encoder.static_chunk_size,\n", - " num_decoding_left_chunks)\n", - "\n", - "#print(chunk_masks)\n", - "data = np.load(\".notebook/enc_embed.npz\")\n", - "torch_pos_emb=data['pos_emb']\n", - "torch_xs = data['embed_out']\n", - "torch_chunk_masks = data['chunk_masks']\n", - "torch_mask_pad = data['mask_pad']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(pos_emb.numpy(), torch_pos_emb))\n", - "np.testing.assert_equal(chunk_masks.numpy(), torch_chunk_masks)\n", - "np.testing.assert_equal(mask_pad.numpy(), ~torch_mask_pad)\n", - "\n", - "for layer in model.encoder.encoders:\n", - " #xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " print(layer.feed_forward_macaron is not None)\n", - " print(layer.normalize_before)\n", - " \n", - " data = np.load('.notebook/enc_0_norm_ff.npz')\n", - " t_norm_ff = data['norm_ff']\n", - " t_xs = data['xs']\n", - " \n", - " \n", - " x = xs\n", - " print(np.allclose(t_xs, x.numpy()))\n", - " residual = x\n", - " print(np.allclose(t_xs, residual.numpy()))\n", - " x_nrom = layer.norm_ff_macaron(x)\n", - " print(np.allclose(t.numpy(), x_nrom.numpy()))\n", - " print(np.allclose(t_norm_ff, x_nrom.numpy()))\n", - "# for n, p in layer.norm_ff_macaron.state_dict().items():\n", - "# print(n, p)\n", - "# pass\n", - "\n", - " layer.eval()\n", - " x_nrom = paddle.to_tensor(t_norm_ff)\n", - " print(np.allclose(t_norm_ff, x_nrom.numpy()))\n", - " x = residual + layer.ff_scale * layer.feed_forward_macaron(x_nrom)\n", - " \n", - " ps=[]\n", - " for n, p in layer.feed_forward_macaron.state_dict().items():\n", - " #print(n, p)\n", - " ps.append(p)\n", - " print(p.shape)\n", - " pass\n", - "\n", - " x_nrom = paddle.to_tensor(t_norm_ff)\n", - " ff_l_x = layer.feed_forward_macaron.w_1(x_nrom)\n", - " ff_l_a_x = layer.feed_forward_macaron.activation(ff_l_x)\n", - " ff_l_a_l_x = layer.feed_forward_macaron.w_2(ff_l_a_x)\n", - " data = np.load('.notebook/enc_0_ff_out.npz', allow_pickle=True)\n", - " t_norm_ff = data['norm_ff']\n", - " t_ff_out = data['ff_out']\n", - " t_ff_l_x = data['ff_l_x']\n", - " t_ff_l_a_x = data['ff_l_a_x']\n", - " t_ff_l_a_l_x = data['ff_l_a_l_x']\n", - " t_ps = data['ps']\n", - " \n", - " print(\"--------ff-------\")\n", - " print(np.allclose(x_nrom.numpy(), t_norm_ff))\n", - " print(np.allclose(x.numpy(), t_ff_out))\n", - " print(np.allclose(ff_l_x.numpy(), t_ff_l_x))\n", - " print(np.allclose(ff_l_a_x.numpy(), t_ff_l_a_x))\n", - " print(np.allclose(ff_l_a_l_x.numpy(), t_ff_l_a_l_x))\n", - " \n", - " print(np.allclose(ff_l_x.numpy(), t_ff_l_x, atol=1e-6))\n", - " for p, t_p in zip(ps, t_ps):\n", - " print(p.name, np.allclose(p.numpy(), t_p.T))\n", - " \n", - " \n", - "# residual = x\n", - "# x = layer.norm_mha(x)\n", - "# x_q = x\n", - " \n", - " data = np.load('.notebook/enc_0_selattn_out.npz', allow_pickle=True)\n", - " tx_q = data['x_q']\n", - " tx = data['x']\n", - " tpos_emb=data['pos_emb']\n", - " tmask=data['mask']\n", - " tt_x_att=data['x_att']\n", - " x_q = paddle.to_tensor(tx_q)\n", - " x = paddle.to_tensor(tx)\n", - " pos_emb = paddle.to_tensor(tpos_emb)\n", - " mask = paddle.to_tensor(tmask)\n", - " \n", - " x_att = layer.self_attn(x_q, x, x, pos_emb, mask)\n", - " print(np.allclose(x_att.numpy(), t_x_att))\n", - " print(np.allclose(x_att.numpy(), t_x_att, atol=1e-6))\n", - " \n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": 270, - "id": "sonic-thumb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "False\n", - "True\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks.type_as(xs), offset=0)\n", - "masks = masks.astype(paddle.bool)\n", - "mask_pad = masks.logical_not()\n", - "decoding_chunk_size=0\n", - "num_decoding_left_chunks=-1\n", - "chunk_masks = add_optional_chunk_mask(\n", - " xs, masks, model.encoder.use_dynamic_chunk, model.encoder.use_dynamic_left_chunk,\n", - " decoding_chunk_size, model.encoder.static_chunk_size,\n", - " num_decoding_left_chunks)\n", - "\n", - "#print(chunk_masks)\n", - "data = np.load(\".notebook/enc_embed.npz\")\n", - "torch_pos_emb=data['pos_emb']\n", - "torch_xs = data['embed_out']\n", - "torch_chunk_masks = data['chunk_masks']\n", - "torch_mask_pad = data['mask_pad']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(pos_emb.numpy(), torch_pos_emb))\n", - "np.testing.assert_equal(chunk_masks.numpy(), torch_chunk_masks)\n", - "np.testing.assert_equal(mask_pad.numpy(), ~torch_mask_pad)\n", - "\n", - "\n", - "for layer in model.encoder.encoders:\n", - " xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " break\n", - "data = np.load('.notebook/enc_0.npz')\n", - "torch_xs = data['enc_0']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(xs.numpy(), torch_xs, atol=1e-6))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 273, - "id": "brave-latino", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True\n", - "True\n", - "--------layers_______\n", - "False\n", - "True\n", - "[[-0.70194244 0.56254214 0.6880346 ... 1.1237319 0.7803924\n", - " 1.1369387 ]\n", - " [-0.7787783 0.3912667 0.71887773 ... 1.251882 0.886168\n", - " 1.3173451 ]\n", - " [-0.95908964 0.6346029 0.87671334 ... 0.98183745 0.7440111\n", - " 1.2903278 ]\n", - " ...\n", - " [-1.0732255 0.67236906 0.92303115 ... 0.9075458 0.8176712\n", - " 1.3239655 ]\n", - " [-1.1654118 0.6819967 0.6939453 ... 1.2238353 0.8028295\n", - " 1.4506507 ]\n", - " [-1.2732092 0.7145806 0.75819594 ... 0.94154835 0.8774845\n", - " 1.2623049 ]]\n", - "xxxxxx\n", - "[[-0.7019424 0.56254166 0.6880345 ... 1.1237322 0.78039217\n", - " 1.1369387 ]\n", - " [-0.778778 0.39126638 0.7188779 ... 1.2518823 0.8861681\n", - " 1.3173454 ]\n", - " [-0.9590891 0.6346026 0.87671363 ... 0.9818373 0.74401116\n", - " 1.2903274 ]\n", - " ...\n", - " [-1.0732253 0.6723689 0.9230311 ... 0.9075457 0.8176713\n", - " 1.3239657 ]\n", - " [-1.165412 0.6819976 0.69394535 ... 1.2238353 0.80282927\n", - " 1.4506509 ]\n", - " [-1.273209 0.71458095 0.75819623 ... 0.9415484 0.8774842\n", - " 1.2623055 ]]\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = make_non_pad_mask(feat_len).unsqueeze(1)\n", - "\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks.type_as(xs), offset=0)\n", - "masks = masks.astype(paddle.bool)\n", - "mask_pad = masks.logical_not()\n", - "decoding_chunk_size=0\n", - "num_decoding_left_chunks=-1\n", - "chunk_masks = add_optional_chunk_mask(\n", - " xs, masks, model.encoder.use_dynamic_chunk, model.encoder.use_dynamic_left_chunk,\n", - " decoding_chunk_size, model.encoder.static_chunk_size,\n", - " num_decoding_left_chunks)\n", - "\n", - "#print(chunk_masks)\n", - "data = np.load(\".notebook/enc_embed.npz\")\n", - "torch_pos_emb=data['pos_emb']\n", - "torch_xs = data['embed_out']\n", - "torch_chunk_masks = data['chunk_masks']\n", - "torch_mask_pad = data['mask_pad']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(pos_emb.numpy(), torch_pos_emb))\n", - "np.testing.assert_equal(chunk_masks.numpy(), torch_chunk_masks)\n", - "np.testing.assert_equal(mask_pad.numpy(), ~torch_mask_pad)\n", - "\n", - "print(\"--------layers_______\")\n", - "i =0\n", - "for layer in model.encoder.encoders:\n", - " xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " i+=1\n", - "# if i == 2:\n", - "# data = np.load('.notebook/enc_2.npz')\n", - "# torch_xs = data['enc_2']\n", - "# print(np.allclose(xs.numpy(), torch_xs))\n", - "# print(np.allclose(xs.numpy(), torch_xs, atol=1e-5))\n", - "# print(xs[0].numpy())\n", - "# print('xxxxxx')\n", - "# print(torch_xs[0])\n", - "# print('----i==2')\n", - "data = np.load('.notebook/enc_all.npz')\n", - "torch_xs = data['enc_all']\n", - "print(np.allclose(xs.numpy(), torch_xs))\n", - "print(np.allclose(xs.numpy(), torch_xs, atol=1e-5))\n", - "print(xs[0].numpy())\n", - "print('xxxxxx')\n", - "print(torch_xs[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 64, - "id": "municipal-stock", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 278, - "id": "macro-season", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[-0.7019424 0.5625421 0.68803453 ... 1.1237317 0.7803923\n", - " 1.1369386 ]\n", - " [-0.7787783 0.39126673 0.71887773 ... 1.251882 0.886168\n", - " 1.3173451 ]\n", - " [-0.95908964 0.6346029 0.87671334 ... 0.98183745 0.7440111\n", - " 1.2903278 ]\n", - " ...\n", - " [-1.0732255 0.67236906 0.92303115 ... 0.9075458 0.8176712\n", - " 1.3239655 ]\n", - " [-1.1654117 0.68199664 0.6939452 ... 1.2238352 0.8028294\n", - " 1.4506506 ]\n", - " [-1.2732091 0.71458054 0.7581958 ... 0.9415482 0.8774844\n", - " 1.2623048 ]]\n", - "---\n", - "[[-0.7019424 0.56254166 0.6880345 ... 1.1237322 0.78039217\n", - " 1.1369387 ]\n", - " [-0.778778 0.39126638 0.7188779 ... 1.2518823 0.8861681\n", - " 1.3173454 ]\n", - " [-0.9590891 0.6346026 0.87671363 ... 0.9818373 0.74401116\n", - " 1.2903274 ]\n", - " ...\n", - " [-1.0732253 0.6723689 0.9230311 ... 0.9075457 0.8176713\n", - " 1.3239657 ]\n", - " [-1.165412 0.6819976 0.69394535 ... 1.2238353 0.80282927\n", - " 1.4506509 ]\n", - " [-1.2732087 0.71458083 0.7581961 ... 0.9415482 0.877484\n", - " 1.2623053 ]]\n", - "False\n", - "True\n", - "False\n" - ] - } - ], - "source": [ - "encoder_out, mask = model.encoder(feat, feat_len)\n", - "print(encoder_out.numpy()[0])\n", - "print(\"---\")\n", - "print(torch_encoder_out[0])\n", - "print(np.allclose(torch_encoder_out, encoder_out.numpy()))\n", - "print(np.allclose(torch_encoder_out, encoder_out.numpy(), atol=1e-5))\n", - "print(np.allclose(torch_encoder_out, encoder_out.numpy(), atol=1e-6))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "associate-sampling", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/u2_tansformer_model_espnet.ipynb b/.notebook/u2_tansformer_model_espnet.ipynb deleted file mode 100644 index 75c2ea5c6..000000000 --- a/.notebook/u2_tansformer_model_espnet.ipynb +++ /dev/null @@ -1,1672 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "choice-grade", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x\n" - ] - }, - { - "data": { - "text/plain": [ - "'/workspace/DeepSpeech-2.x'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd ..\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "broke-broad", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:26: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " def convert_to_list(value, n, name, dtype=np.int):\n", - "register user softmax to paddle, remove this when fixed!\n", - "register user log_softmax to paddle, remove this when fixed!\n", - "register user sigmoid to paddle, remove this when fixed!\n", - "register user log_sigmoid to paddle, remove this when fixed!\n", - "register user relu to paddle, remove this when fixed!\n", - "override cat of paddle if exists or register, remove this when fixed!\n", - "override item of paddle.Tensor if exists or register, remove this when fixed!\n", - "override long of paddle.Tensor if exists or register, remove this when fixed!\n", - "override new_full of paddle.Tensor if exists or register, remove this when fixed!\n", - "override eq of paddle.Tensor if exists or register, remove this when fixed!\n", - "override eq of paddle if exists or register, remove this when fixed!\n", - "override contiguous of paddle.Tensor if exists or register, remove this when fixed!\n", - "override size of paddle.Tensor (`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!\n", - "register user view to paddle.Tensor, remove this when fixed!\n", - "register user view_as to paddle.Tensor, remove this when fixed!\n", - "register user masked_fill to paddle.Tensor, remove this when fixed!\n", - "register user masked_fill_ to paddle.Tensor, remove this when fixed!\n", - "register user fill_ to paddle.Tensor, remove this when fixed!\n", - "register user repeat to paddle.Tensor, remove this when fixed!\n", - "register user softmax to paddle.Tensor, remove this when fixed!\n", - "register user sigmoid to paddle.Tensor, remove this when fixed!\n", - "register user relu to paddle.Tensor, remove this when fixed!\n", - "register user type_as to paddle.Tensor, remove this when fixed!\n", - "register user to to paddle.Tensor, remove this when fixed!\n", - "register user float to paddle.Tensor, remove this when fixed!\n", - "register user tolist to paddle.Tensor, remove this when fixed!\n", - "register user glu to paddle.nn.functional, remove this when fixed!\n", - "override ctc_loss of paddle.nn.functional if exists, remove this when fixed!\n", - "register user Module to paddle.nn, remove this when fixed!\n", - "register user ModuleList to paddle.nn, remove this when fixed!\n", - "register user GLU to paddle.nn, remove this when fixed!\n", - "register user ConstantPad2d to paddle.nn, remove this when fixed!\n", - "register user export to paddle.jit, remove this when fixed!\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import paddle\n", - "from yacs.config import CfgNode as CN\n", - "\n", - "from deepspeech.models.u2 import U2Model\n", - "from deepspeech.utils.layer_tools import print_params\n", - "from deepspeech.utils.layer_tools import summary" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "permanent-summary", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n", - "[INFO 2021/05/31 03:23:22 u2.py:839] U2 Encoder type: transformer\n", - "[INFO 2021/05/31 03:23:22 u2.py:840] attention_dropout_rate: 0.0\n", - "attention_heads: 4\n", - "dropout_rate: 0.1\n", - "input_layer: conv2d\n", - "linear_units: 2048\n", - "normalize_before: True\n", - "num_blocks: 12\n", - "output_size: 256\n", - "positional_dropout_rate: 0.1\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "encoder.embed.conv.0.weight | [256, 1, 3, 3] | 2304 | True\n", - "encoder.embed.conv.0.bias | [256] | 256 | True\n", - "encoder.embed.conv.2.weight | [256, 256, 3, 3] | 589824 | True\n", - "encoder.embed.conv.2.bias | [256] | 256 | True\n", - "encoder.embed.out.0.weight | [5120, 256] | 1310720 | True\n", - "encoder.embed.out.0.bias | [256] | 256 | True\n", - "encoder.after_norm.weight | [256] | 256 | True\n", - "encoder.after_norm.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.0.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.0.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.0.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.0.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.0.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.0.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.0.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.1.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.1.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.1.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.1.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.1.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.1.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.1.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.2.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.2.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.2.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.2.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.2.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.2.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.2.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.3.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.3.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.3.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.3.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.3.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.3.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.3.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.4.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.4.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.4.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.4.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.4.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.4.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.4.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.5.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.5.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.5.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.5.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.5.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.5.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.5.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.6.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.6.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.6.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.6.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.6.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.6.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.6.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.7.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.7.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.7.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.7.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.7.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.7.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.7.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.8.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.8.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.8.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.8.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.8.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.8.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.8.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.9.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.9.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.9.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.9.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.9.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.9.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.9.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.10.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.10.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.10.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.10.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.10.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.10.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.10.concat_linear.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_q.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_k.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_v.bias | [256] | 256 | True\n", - "encoder.encoders.11.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_out.bias | [256] | 256 | True\n", - "encoder.encoders.11.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "encoder.encoders.11.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_2.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm1.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm1.bias | [256] | 256 | True\n", - "encoder.encoders.11.norm2.weight | [256] | 256 | True\n", - "encoder.encoders.11.norm2.bias | [256] | 256 | True\n", - "encoder.encoders.11.concat_linear.weight | [512, 256] | 131072 | True\n", - "encoder.encoders.11.concat_linear.bias | [256] | 256 | True\n", - "decoder.embed.0.weight | [4233, 256] | 1083648 | True\n", - "decoder.after_norm.weight | [256] | 256 | True\n", - "decoder.after_norm.bias | [256] | 256 | True\n", - "decoder.output_layer.weight | [256, 4233] | 1083648 | True\n", - "decoder.output_layer.bias | [4233] | 4233 | True\n", - "decoder.decoders.0.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.0.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.0.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.0.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.0.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.0.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.0.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.0.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.0.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.0.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.0.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.1.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.1.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.1.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.1.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.1.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.1.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.1.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.1.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.1.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.1.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.2.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.2.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.2.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.2.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.2.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.2.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.2.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.2.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.2.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.2.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.3.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.3.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.3.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.3.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.3.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.3.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.3.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.3.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.3.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.3.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.4.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.4.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.4.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.4.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.4.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.4.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.4.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.4.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.4.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.4.concat_linear2.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.5.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_q.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_k.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_v.bias | [256] | 256 | True\n", - "decoder.decoders.5.src_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_out.bias | [256] | 256 | True\n", - "decoder.decoders.5.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "decoder.decoders.5.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_2.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm1.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm1.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm2.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm2.bias | [256] | 256 | True\n", - "decoder.decoders.5.norm3.weight | [256] | 256 | True\n", - "decoder.decoders.5.norm3.bias | [256] | 256 | True\n", - "decoder.decoders.5.concat_linear1.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.5.concat_linear1.bias | [256] | 256 | True\n", - "decoder.decoders.5.concat_linear2.weight | [512, 256] | 131072 | True\n", - "decoder.decoders.5.concat_linear2.bias | [256] | 256 | True\n", - "ctc.ctc_lo.weight | [256, 4233] | 1083648 | True\n", - "ctc.ctc_lo.bias | [4233] | 4233 | True\n", - "Total parameters: 411.0, 32.01M elements.\n" - ] - } - ], - "source": [ - "conf_str='examples/tiny/s1/conf/transformer.yaml'\n", - "cfg = CN().load_cfg(open(conf_str))\n", - "cfg.model.input_dim = 83\n", - "cfg.model.output_dim = 4233\n", - "cfg.model.cmvn_file = None\n", - "cfg.model.cmvn_file_type = 'json'\n", - "#cfg.model.encoder_conf.concat_after=True\n", - "cfg.freeze()\n", - "model = U2Model(cfg.model)\n", - "\n", - "print_params(model)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "sapphire-agent", - "metadata": {}, - "outputs": [], - "source": [ - "#summary(model)\n", - "#print(model)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ruled-invitation", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "fossil-means", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "embed.npz feat.npz l1.npz l11.npz l3.npz l5.npz l7.npz l9.npz\r\n", - "encoder.npz l0.npz l10.npz l2.npz l4.npz l6.npz l8.npz model.npz\r\n" - ] - } - ], - "source": [ - "%ls .notebook/espnet" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "45c2b75f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "state\n", - "odict_keys(['mask_feature', 'encoder.embed.conv.0.weight', 'encoder.embed.conv.0.bias', 'encoder.embed.conv.2.weight', 'encoder.embed.conv.2.bias', 'encoder.embed.out.0.weight', 'encoder.embed.out.0.bias', 'encoder.encoders.0.self_attn.linear_q.weight', 'encoder.encoders.0.self_attn.linear_q.bias', 'encoder.encoders.0.self_attn.linear_k.weight', 'encoder.encoders.0.self_attn.linear_k.bias', 'encoder.encoders.0.self_attn.linear_v.weight', 'encoder.encoders.0.self_attn.linear_v.bias', 'encoder.encoders.0.self_attn.linear_out.weight', 'encoder.encoders.0.self_attn.linear_out.bias', 'encoder.encoders.0.feed_forward.w_1.weight', 'encoder.encoders.0.feed_forward.w_1.bias', 'encoder.encoders.0.feed_forward.w_2.weight', 'encoder.encoders.0.feed_forward.w_2.bias', 'encoder.encoders.0.norm1.weight', 'encoder.encoders.0.norm1.bias', 'encoder.encoders.0.norm2.weight', 'encoder.encoders.0.norm2.bias', 'encoder.encoders.1.self_attn.linear_q.weight', 'encoder.encoders.1.self_attn.linear_q.bias', 'encoder.encoders.1.self_attn.linear_k.weight', 'encoder.encoders.1.self_attn.linear_k.bias', 'encoder.encoders.1.self_attn.linear_v.weight', 'encoder.encoders.1.self_attn.linear_v.bias', 'encoder.encoders.1.self_attn.linear_out.weight', 'encoder.encoders.1.self_attn.linear_out.bias', 'encoder.encoders.1.feed_forward.w_1.weight', 'encoder.encoders.1.feed_forward.w_1.bias', 'encoder.encoders.1.feed_forward.w_2.weight', 'encoder.encoders.1.feed_forward.w_2.bias', 'encoder.encoders.1.norm1.weight', 'encoder.encoders.1.norm1.bias', 'encoder.encoders.1.norm2.weight', 'encoder.encoders.1.norm2.bias', 'encoder.encoders.2.self_attn.linear_q.weight', 'encoder.encoders.2.self_attn.linear_q.bias', 'encoder.encoders.2.self_attn.linear_k.weight', 'encoder.encoders.2.self_attn.linear_k.bias', 'encoder.encoders.2.self_attn.linear_v.weight', 'encoder.encoders.2.self_attn.linear_v.bias', 'encoder.encoders.2.self_attn.linear_out.weight', 'encoder.encoders.2.self_attn.linear_out.bias', 'encoder.encoders.2.feed_forward.w_1.weight', 'encoder.encoders.2.feed_forward.w_1.bias', 'encoder.encoders.2.feed_forward.w_2.weight', 'encoder.encoders.2.feed_forward.w_2.bias', 'encoder.encoders.2.norm1.weight', 'encoder.encoders.2.norm1.bias', 'encoder.encoders.2.norm2.weight', 'encoder.encoders.2.norm2.bias', 'encoder.encoders.3.self_attn.linear_q.weight', 'encoder.encoders.3.self_attn.linear_q.bias', 'encoder.encoders.3.self_attn.linear_k.weight', 'encoder.encoders.3.self_attn.linear_k.bias', 'encoder.encoders.3.self_attn.linear_v.weight', 'encoder.encoders.3.self_attn.linear_v.bias', 'encoder.encoders.3.self_attn.linear_out.weight', 'encoder.encoders.3.self_attn.linear_out.bias', 'encoder.encoders.3.feed_forward.w_1.weight', 'encoder.encoders.3.feed_forward.w_1.bias', 'encoder.encoders.3.feed_forward.w_2.weight', 'encoder.encoders.3.feed_forward.w_2.bias', 'encoder.encoders.3.norm1.weight', 'encoder.encoders.3.norm1.bias', 'encoder.encoders.3.norm2.weight', 'encoder.encoders.3.norm2.bias', 'encoder.encoders.4.self_attn.linear_q.weight', 'encoder.encoders.4.self_attn.linear_q.bias', 'encoder.encoders.4.self_attn.linear_k.weight', 'encoder.encoders.4.self_attn.linear_k.bias', 'encoder.encoders.4.self_attn.linear_v.weight', 'encoder.encoders.4.self_attn.linear_v.bias', 'encoder.encoders.4.self_attn.linear_out.weight', 'encoder.encoders.4.self_attn.linear_out.bias', 'encoder.encoders.4.feed_forward.w_1.weight', 'encoder.encoders.4.feed_forward.w_1.bias', 'encoder.encoders.4.feed_forward.w_2.weight', 'encoder.encoders.4.feed_forward.w_2.bias', 'encoder.encoders.4.norm1.weight', 'encoder.encoders.4.norm1.bias', 'encoder.encoders.4.norm2.weight', 'encoder.encoders.4.norm2.bias', 'encoder.encoders.5.self_attn.linear_q.weight', 'encoder.encoders.5.self_attn.linear_q.bias', 'encoder.encoders.5.self_attn.linear_k.weight', 'encoder.encoders.5.self_attn.linear_k.bias', 'encoder.encoders.5.self_attn.linear_v.weight', 'encoder.encoders.5.self_attn.linear_v.bias', 'encoder.encoders.5.self_attn.linear_out.weight', 'encoder.encoders.5.self_attn.linear_out.bias', 'encoder.encoders.5.feed_forward.w_1.weight', 'encoder.encoders.5.feed_forward.w_1.bias', 'encoder.encoders.5.feed_forward.w_2.weight', 'encoder.encoders.5.feed_forward.w_2.bias', 'encoder.encoders.5.norm1.weight', 'encoder.encoders.5.norm1.bias', 'encoder.encoders.5.norm2.weight', 'encoder.encoders.5.norm2.bias', 'encoder.encoders.6.self_attn.linear_q.weight', 'encoder.encoders.6.self_attn.linear_q.bias', 'encoder.encoders.6.self_attn.linear_k.weight', 'encoder.encoders.6.self_attn.linear_k.bias', 'encoder.encoders.6.self_attn.linear_v.weight', 'encoder.encoders.6.self_attn.linear_v.bias', 'encoder.encoders.6.self_attn.linear_out.weight', 'encoder.encoders.6.self_attn.linear_out.bias', 'encoder.encoders.6.feed_forward.w_1.weight', 'encoder.encoders.6.feed_forward.w_1.bias', 'encoder.encoders.6.feed_forward.w_2.weight', 'encoder.encoders.6.feed_forward.w_2.bias', 'encoder.encoders.6.norm1.weight', 'encoder.encoders.6.norm1.bias', 'encoder.encoders.6.norm2.weight', 'encoder.encoders.6.norm2.bias', 'encoder.encoders.7.self_attn.linear_q.weight', 'encoder.encoders.7.self_attn.linear_q.bias', 'encoder.encoders.7.self_attn.linear_k.weight', 'encoder.encoders.7.self_attn.linear_k.bias', 'encoder.encoders.7.self_attn.linear_v.weight', 'encoder.encoders.7.self_attn.linear_v.bias', 'encoder.encoders.7.self_attn.linear_out.weight', 'encoder.encoders.7.self_attn.linear_out.bias', 'encoder.encoders.7.feed_forward.w_1.weight', 'encoder.encoders.7.feed_forward.w_1.bias', 'encoder.encoders.7.feed_forward.w_2.weight', 'encoder.encoders.7.feed_forward.w_2.bias', 'encoder.encoders.7.norm1.weight', 'encoder.encoders.7.norm1.bias', 'encoder.encoders.7.norm2.weight', 'encoder.encoders.7.norm2.bias', 'encoder.encoders.8.self_attn.linear_q.weight', 'encoder.encoders.8.self_attn.linear_q.bias', 'encoder.encoders.8.self_attn.linear_k.weight', 'encoder.encoders.8.self_attn.linear_k.bias', 'encoder.encoders.8.self_attn.linear_v.weight', 'encoder.encoders.8.self_attn.linear_v.bias', 'encoder.encoders.8.self_attn.linear_out.weight', 'encoder.encoders.8.self_attn.linear_out.bias', 'encoder.encoders.8.feed_forward.w_1.weight', 'encoder.encoders.8.feed_forward.w_1.bias', 'encoder.encoders.8.feed_forward.w_2.weight', 'encoder.encoders.8.feed_forward.w_2.bias', 'encoder.encoders.8.norm1.weight', 'encoder.encoders.8.norm1.bias', 'encoder.encoders.8.norm2.weight', 'encoder.encoders.8.norm2.bias', 'encoder.encoders.9.self_attn.linear_q.weight', 'encoder.encoders.9.self_attn.linear_q.bias', 'encoder.encoders.9.self_attn.linear_k.weight', 'encoder.encoders.9.self_attn.linear_k.bias', 'encoder.encoders.9.self_attn.linear_v.weight', 'encoder.encoders.9.self_attn.linear_v.bias', 'encoder.encoders.9.self_attn.linear_out.weight', 'encoder.encoders.9.self_attn.linear_out.bias', 'encoder.encoders.9.feed_forward.w_1.weight', 'encoder.encoders.9.feed_forward.w_1.bias', 'encoder.encoders.9.feed_forward.w_2.weight', 'encoder.encoders.9.feed_forward.w_2.bias', 'encoder.encoders.9.norm1.weight', 'encoder.encoders.9.norm1.bias', 'encoder.encoders.9.norm2.weight', 'encoder.encoders.9.norm2.bias', 'encoder.encoders.10.self_attn.linear_q.weight', 'encoder.encoders.10.self_attn.linear_q.bias', 'encoder.encoders.10.self_attn.linear_k.weight', 'encoder.encoders.10.self_attn.linear_k.bias', 'encoder.encoders.10.self_attn.linear_v.weight', 'encoder.encoders.10.self_attn.linear_v.bias', 'encoder.encoders.10.self_attn.linear_out.weight', 'encoder.encoders.10.self_attn.linear_out.bias', 'encoder.encoders.10.feed_forward.w_1.weight', 'encoder.encoders.10.feed_forward.w_1.bias', 'encoder.encoders.10.feed_forward.w_2.weight', 'encoder.encoders.10.feed_forward.w_2.bias', 'encoder.encoders.10.norm1.weight', 'encoder.encoders.10.norm1.bias', 'encoder.encoders.10.norm2.weight', 'encoder.encoders.10.norm2.bias', 'encoder.encoders.11.self_attn.linear_q.weight', 'encoder.encoders.11.self_attn.linear_q.bias', 'encoder.encoders.11.self_attn.linear_k.weight', 'encoder.encoders.11.self_attn.linear_k.bias', 'encoder.encoders.11.self_attn.linear_v.weight', 'encoder.encoders.11.self_attn.linear_v.bias', 'encoder.encoders.11.self_attn.linear_out.weight', 'encoder.encoders.11.self_attn.linear_out.bias', 'encoder.encoders.11.feed_forward.w_1.weight', 'encoder.encoders.11.feed_forward.w_1.bias', 'encoder.encoders.11.feed_forward.w_2.weight', 'encoder.encoders.11.feed_forward.w_2.bias', 'encoder.encoders.11.norm1.weight', 'encoder.encoders.11.norm1.bias', 'encoder.encoders.11.norm2.weight', 'encoder.encoders.11.norm2.bias', 'encoder.after_norm.weight', 'encoder.after_norm.bias', 'decoder.embed.0.weight', 'decoder.decoders.0.self_attn.linear_q.weight', 'decoder.decoders.0.self_attn.linear_q.bias', 'decoder.decoders.0.self_attn.linear_k.weight', 'decoder.decoders.0.self_attn.linear_k.bias', 'decoder.decoders.0.self_attn.linear_v.weight', 'decoder.decoders.0.self_attn.linear_v.bias', 'decoder.decoders.0.self_attn.linear_out.weight', 'decoder.decoders.0.self_attn.linear_out.bias', 'decoder.decoders.0.src_attn.linear_q.weight', 'decoder.decoders.0.src_attn.linear_q.bias', 'decoder.decoders.0.src_attn.linear_k.weight', 'decoder.decoders.0.src_attn.linear_k.bias', 'decoder.decoders.0.src_attn.linear_v.weight', 'decoder.decoders.0.src_attn.linear_v.bias', 'decoder.decoders.0.src_attn.linear_out.weight', 'decoder.decoders.0.src_attn.linear_out.bias', 'decoder.decoders.0.feed_forward.w_1.weight', 'decoder.decoders.0.feed_forward.w_1.bias', 'decoder.decoders.0.feed_forward.w_2.weight', 'decoder.decoders.0.feed_forward.w_2.bias', 'decoder.decoders.0.norm1.weight', 'decoder.decoders.0.norm1.bias', 'decoder.decoders.0.norm2.weight', 'decoder.decoders.0.norm2.bias', 'decoder.decoders.0.norm3.weight', 'decoder.decoders.0.norm3.bias', 'decoder.after_norm.weight', 'decoder.after_norm.bias', 'decoder.output_layer.weight', 'decoder.output_layer.bias', 'sfc.weight', 'sfc.bias', 'deconv.0.weight', 'deconv.0.bias', 'deconv.1.weight', 'deconv.1.bias', 'xlm_embed.0.weight', 'xlm_pred.weight', 'xlm_pred.bias'])\n" - ] - } - ], - "source": [ - "#!pip install torch\n", - "import torch\n", - "\n", - "e_model = np.load('.notebook/espnet/model.npz',allow_pickle=True)\n", - "for k in e_model.files:\n", - " print(k)\n", - "state_dict = e_model['state']\n", - "state_dict = state_dict.tolist()\n", - "print(state_dict.keys())" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "f187bb55", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/ipykernel/ipkernel.py:283: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n", - " and should_run_async(code)\n" - ] - } - ], - "source": [ - "# embed.conv.0.weight None torch.Size([256, 1, 3, 3]) \tencoder.embed.conv.0.weight | [256, 1, 3, 3] | 2304 | True\n", - "# embed.conv.0.bias None torch.Size([256]) \tencoder.embed.conv.0.bias | [256] | 256 | True\n", - "# embed.conv.2.weight None torch.Size([256, 256, 3, 3]) \tencoder.embed.conv.2.weight | [256, 256, 3, 3] | 589824 | True\n", - "# embed.conv.2.bias None torch.Size([256]) \tencoder.embed.conv.2.bias | [256] | 256 | True\n", - "# embed.out.0.weight None torch.Size([256, 5120]) 83 feature\tencoder.embed.out.0.weight | [4864, 256] | 1245184 | True 80 feature\n", - "# embed.out.0.bias None torch.Size([256]) \tencoder.embed.out.0.bias | [256] | 256 | True\n", - "# after_norm.weight None torch.Size([256]) \tencoder.after_norm.weight | [256] | 256 | True\n", - "# after_norm.bias None torch.Size([256]) \tencoder.after_norm.bias | [256] | 256 | True\n", - "# encoders.9.self_attn.linear_q.weight None torch.Size([256, 256]) \tencoder.encoders.0.self_attn.linear_q.weight | [256, 256] | 65536 | True\n", - "# encoders.9.self_attn.linear_q.bias None torch.Size([256]) \tencoder.encoders.0.self_attn.linear_q.bias | [256] | 256 | True\n", - "# encoders.9.self_attn.linear_k.weight None torch.Size([256, 256]) \tencoder.encoders.0.self_attn.linear_k.weight | [256, 256] | 65536 | True\n", - "# encoders.9.self_attn.linear_k.bias None torch.Size([256]) \tencoder.encoders.0.self_attn.linear_k.bias | [256] | 256 | True\n", - "# encoders.9.self_attn.linear_v.weight None torch.Size([256, 256]) \tencoder.encoders.0.self_attn.linear_v.weight | [256, 256] | 65536 | True\n", - "# encoders.9.self_attn.linear_v.bias None torch.Size([256]) \tencoder.encoders.0.self_attn.linear_v.bias | [256] | 256 | True\n", - "# encoders.9.self_attn.linear_out.weight None torch.Size([256, 256]) \tencoder.encoders.0.self_attn.linear_out.weight | [256, 256] | 65536 | True\n", - "# encoders.9.self_attn.linear_out.bias None torch.Size([256]) \tencoder.encoders.0.self_attn.linear_out.bias | [256] | 256 | True\n", - "# encoders.9.feed_forward.w_1.weight None torch.Size([2048, 256]) \tencoder.encoders.0.feed_forward.w_1.weight | [256, 2048] | 524288 | True\n", - "# encoders.9.feed_forward.w_1.bias None torch.Size([2048]) \tencoder.encoders.0.feed_forward.w_1.bias | [2048] | 2048 | True\n", - "# encoders.9.feed_forward.w_2.weight None torch.Size([256, 2048]) \tencoder.encoders.0.feed_forward.w_2.weight | [2048, 256] | 524288 | True\n", - "# encoders.9.feed_forward.w_2.bias None torch.Size([256]) \tencoder.encoders.0.feed_forward.w_2.bias | [256] | 256 | True\n", - "# encoders.9.norm1.weight None torch.Size([256]) \tencoder.encoders.0.norm1.weight | [256] | 256 | True\n", - "# encoders.9.norm1.bias None torch.Size([256]) \tencoder.encoders.0.norm1.bias | [256] | 256 | True\n", - "# encoders.9.norm2.weight None torch.Size([256]) \tencoder.encoders.0.norm2.weight | [256] | 256 | True\n", - "# encoders.9.norm2.bias None torch.Size([256]) \tencoder.encoders.0.norm2.bias | [256] | 256 | True\n", - "# \tencoder.encoders.0.concat_linear.weight | [512, 256] | 131072 | True\n", - "# \tencoder.encoders.0.concat_linear.bias | [256] | 256 | True\n", - "# espnet transformer\tconcat_linear只是保存了,但是未使用\n", - "\t\n", - "# \tpaddle transformer" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "2a0428ae", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-> encoder.embed.conv.0.weight\n", - "-> encoder.embed.conv.0.bias\n", - "-> encoder.embed.conv.2.weight\n", - "-> encoder.embed.conv.2.bias\n", - "-> encoder.embed.out.0.weight\n", - "encoder.embed.out.0.weight: (256, 5120) -> (5120, 256)\n", - "-> encoder.embed.out.0.bias\n", - "-> encoder.encoders.0.self_attn.linear_q.weight\n", - "encoder.encoders.0.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.0.self_attn.linear_q.bias\n", - "-> encoder.encoders.0.self_attn.linear_k.weight\n", - "encoder.encoders.0.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.0.self_attn.linear_k.bias\n", - "-> encoder.encoders.0.self_attn.linear_v.weight\n", - "encoder.encoders.0.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.0.self_attn.linear_v.bias\n", - "-> encoder.encoders.0.self_attn.linear_out.weight\n", - "encoder.encoders.0.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.0.self_attn.linear_out.bias\n", - "-> encoder.encoders.0.feed_forward.w_1.weight\n", - "encoder.encoders.0.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.0.feed_forward.w_1.bias\n", - "-> encoder.encoders.0.feed_forward.w_2.weight\n", - "encoder.encoders.0.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.0.feed_forward.w_2.bias\n", - "-> encoder.encoders.0.norm1.weight\n", - "-> encoder.encoders.0.norm1.bias\n", - "-> encoder.encoders.0.norm2.weight\n", - "-> encoder.encoders.0.norm2.bias\n", - "-> encoder.encoders.1.self_attn.linear_q.weight\n", - "encoder.encoders.1.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.1.self_attn.linear_q.bias\n", - "-> encoder.encoders.1.self_attn.linear_k.weight\n", - "encoder.encoders.1.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.1.self_attn.linear_k.bias\n", - "-> encoder.encoders.1.self_attn.linear_v.weight\n", - "encoder.encoders.1.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.1.self_attn.linear_v.bias\n", - "-> encoder.encoders.1.self_attn.linear_out.weight\n", - "encoder.encoders.1.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.1.self_attn.linear_out.bias\n", - "-> encoder.encoders.1.feed_forward.w_1.weight\n", - "encoder.encoders.1.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.1.feed_forward.w_1.bias\n", - "-> encoder.encoders.1.feed_forward.w_2.weight\n", - "encoder.encoders.1.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.1.feed_forward.w_2.bias\n", - "-> encoder.encoders.1.norm1.weight\n", - "-> encoder.encoders.1.norm1.bias\n", - "-> encoder.encoders.1.norm2.weight\n", - "-> encoder.encoders.1.norm2.bias\n", - "-> encoder.encoders.2.self_attn.linear_q.weight\n", - "encoder.encoders.2.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.2.self_attn.linear_q.bias\n", - "-> encoder.encoders.2.self_attn.linear_k.weight\n", - "encoder.encoders.2.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.2.self_attn.linear_k.bias\n", - "-> encoder.encoders.2.self_attn.linear_v.weight\n", - "encoder.encoders.2.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.2.self_attn.linear_v.bias\n", - "-> encoder.encoders.2.self_attn.linear_out.weight\n", - "encoder.encoders.2.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.2.self_attn.linear_out.bias\n", - "-> encoder.encoders.2.feed_forward.w_1.weight\n", - "encoder.encoders.2.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.2.feed_forward.w_1.bias\n", - "-> encoder.encoders.2.feed_forward.w_2.weight\n", - "encoder.encoders.2.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.2.feed_forward.w_2.bias\n", - "-> encoder.encoders.2.norm1.weight\n", - "-> encoder.encoders.2.norm1.bias\n", - "-> encoder.encoders.2.norm2.weight\n", - "-> encoder.encoders.2.norm2.bias\n", - "-> encoder.encoders.3.self_attn.linear_q.weight\n", - "encoder.encoders.3.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.3.self_attn.linear_q.bias\n", - "-> encoder.encoders.3.self_attn.linear_k.weight\n", - "encoder.encoders.3.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.3.self_attn.linear_k.bias\n", - "-> encoder.encoders.3.self_attn.linear_v.weight\n", - "encoder.encoders.3.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.3.self_attn.linear_v.bias\n", - "-> encoder.encoders.3.self_attn.linear_out.weight\n", - "encoder.encoders.3.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.3.self_attn.linear_out.bias\n", - "-> encoder.encoders.3.feed_forward.w_1.weight\n", - "encoder.encoders.3.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.3.feed_forward.w_1.bias\n", - "-> encoder.encoders.3.feed_forward.w_2.weight\n", - "encoder.encoders.3.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.3.feed_forward.w_2.bias\n", - "-> encoder.encoders.3.norm1.weight\n", - "-> encoder.encoders.3.norm1.bias\n", - "-> encoder.encoders.3.norm2.weight\n", - "-> encoder.encoders.3.norm2.bias\n", - "-> encoder.encoders.4.self_attn.linear_q.weight\n", - "encoder.encoders.4.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.4.self_attn.linear_q.bias\n", - "-> encoder.encoders.4.self_attn.linear_k.weight\n", - "encoder.encoders.4.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.4.self_attn.linear_k.bias\n", - "-> encoder.encoders.4.self_attn.linear_v.weight\n", - "encoder.encoders.4.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.4.self_attn.linear_v.bias\n", - "-> encoder.encoders.4.self_attn.linear_out.weight\n", - "encoder.encoders.4.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.4.self_attn.linear_out.bias\n", - "-> encoder.encoders.4.feed_forward.w_1.weight\n", - "encoder.encoders.4.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.4.feed_forward.w_1.bias\n", - "-> encoder.encoders.4.feed_forward.w_2.weight\n", - "encoder.encoders.4.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.4.feed_forward.w_2.bias\n", - "-> encoder.encoders.4.norm1.weight\n", - "-> encoder.encoders.4.norm1.bias\n", - "-> encoder.encoders.4.norm2.weight\n", - "-> encoder.encoders.4.norm2.bias\n", - "-> encoder.encoders.5.self_attn.linear_q.weight\n", - "encoder.encoders.5.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.5.self_attn.linear_q.bias\n", - "-> encoder.encoders.5.self_attn.linear_k.weight\n", - "encoder.encoders.5.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.5.self_attn.linear_k.bias\n", - "-> encoder.encoders.5.self_attn.linear_v.weight\n", - "encoder.encoders.5.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.5.self_attn.linear_v.bias\n", - "-> encoder.encoders.5.self_attn.linear_out.weight\n", - "encoder.encoders.5.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.5.self_attn.linear_out.bias\n", - "-> encoder.encoders.5.feed_forward.w_1.weight\n", - "encoder.encoders.5.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.5.feed_forward.w_1.bias\n", - "-> encoder.encoders.5.feed_forward.w_2.weight\n", - "encoder.encoders.5.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.5.feed_forward.w_2.bias\n", - "-> encoder.encoders.5.norm1.weight\n", - "-> encoder.encoders.5.norm1.bias\n", - "-> encoder.encoders.5.norm2.weight\n", - "-> encoder.encoders.5.norm2.bias\n", - "-> encoder.encoders.6.self_attn.linear_q.weight\n", - "encoder.encoders.6.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.6.self_attn.linear_q.bias\n", - "-> encoder.encoders.6.self_attn.linear_k.weight\n", - "encoder.encoders.6.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.6.self_attn.linear_k.bias\n", - "-> encoder.encoders.6.self_attn.linear_v.weight\n", - "encoder.encoders.6.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.6.self_attn.linear_v.bias\n", - "-> encoder.encoders.6.self_attn.linear_out.weight\n", - "encoder.encoders.6.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.6.self_attn.linear_out.bias\n", - "-> encoder.encoders.6.feed_forward.w_1.weight\n", - "encoder.encoders.6.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.6.feed_forward.w_1.bias\n", - "-> encoder.encoders.6.feed_forward.w_2.weight\n", - "encoder.encoders.6.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.6.feed_forward.w_2.bias\n", - "-> encoder.encoders.6.norm1.weight\n", - "-> encoder.encoders.6.norm1.bias\n", - "-> encoder.encoders.6.norm2.weight\n", - "-> encoder.encoders.6.norm2.bias\n", - "-> encoder.encoders.7.self_attn.linear_q.weight\n", - "encoder.encoders.7.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.7.self_attn.linear_q.bias\n", - "-> encoder.encoders.7.self_attn.linear_k.weight\n", - "encoder.encoders.7.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.7.self_attn.linear_k.bias\n", - "-> encoder.encoders.7.self_attn.linear_v.weight\n", - "encoder.encoders.7.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.7.self_attn.linear_v.bias\n", - "-> encoder.encoders.7.self_attn.linear_out.weight\n", - "encoder.encoders.7.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.7.self_attn.linear_out.bias\n", - "-> encoder.encoders.7.feed_forward.w_1.weight\n", - "encoder.encoders.7.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.7.feed_forward.w_1.bias\n", - "-> encoder.encoders.7.feed_forward.w_2.weight\n", - "encoder.encoders.7.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.7.feed_forward.w_2.bias\n", - "-> encoder.encoders.7.norm1.weight\n", - "-> encoder.encoders.7.norm1.bias\n", - "-> encoder.encoders.7.norm2.weight\n", - "-> encoder.encoders.7.norm2.bias\n", - "-> encoder.encoders.8.self_attn.linear_q.weight\n", - "encoder.encoders.8.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.8.self_attn.linear_q.bias\n", - "-> encoder.encoders.8.self_attn.linear_k.weight\n", - "encoder.encoders.8.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.8.self_attn.linear_k.bias\n", - "-> encoder.encoders.8.self_attn.linear_v.weight\n", - "encoder.encoders.8.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.8.self_attn.linear_v.bias\n", - "-> encoder.encoders.8.self_attn.linear_out.weight\n", - "encoder.encoders.8.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.8.self_attn.linear_out.bias\n", - "-> encoder.encoders.8.feed_forward.w_1.weight\n", - "encoder.encoders.8.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.8.feed_forward.w_1.bias\n", - "-> encoder.encoders.8.feed_forward.w_2.weight\n", - "encoder.encoders.8.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.8.feed_forward.w_2.bias\n", - "-> encoder.encoders.8.norm1.weight\n", - "-> encoder.encoders.8.norm1.bias\n", - "-> encoder.encoders.8.norm2.weight\n", - "-> encoder.encoders.8.norm2.bias\n", - "-> encoder.encoders.9.self_attn.linear_q.weight\n", - "encoder.encoders.9.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.9.self_attn.linear_q.bias\n", - "-> encoder.encoders.9.self_attn.linear_k.weight\n", - "encoder.encoders.9.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.9.self_attn.linear_k.bias\n", - "-> encoder.encoders.9.self_attn.linear_v.weight\n", - "encoder.encoders.9.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.9.self_attn.linear_v.bias\n", - "-> encoder.encoders.9.self_attn.linear_out.weight\n", - "encoder.encoders.9.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.9.self_attn.linear_out.bias\n", - "-> encoder.encoders.9.feed_forward.w_1.weight\n", - "encoder.encoders.9.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.9.feed_forward.w_1.bias\n", - "-> encoder.encoders.9.feed_forward.w_2.weight\n", - "encoder.encoders.9.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.9.feed_forward.w_2.bias\n", - "-> encoder.encoders.9.norm1.weight\n", - "-> encoder.encoders.9.norm1.bias\n", - "-> encoder.encoders.9.norm2.weight\n", - "-> encoder.encoders.9.norm2.bias\n", - "-> encoder.encoders.10.self_attn.linear_q.weight\n", - "encoder.encoders.10.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.10.self_attn.linear_q.bias\n", - "-> encoder.encoders.10.self_attn.linear_k.weight\n", - "encoder.encoders.10.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.10.self_attn.linear_k.bias\n", - "-> encoder.encoders.10.self_attn.linear_v.weight\n", - "encoder.encoders.10.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.10.self_attn.linear_v.bias\n", - "-> encoder.encoders.10.self_attn.linear_out.weight\n", - "encoder.encoders.10.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.10.self_attn.linear_out.bias\n", - "-> encoder.encoders.10.feed_forward.w_1.weight\n", - "encoder.encoders.10.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.10.feed_forward.w_1.bias\n", - "-> encoder.encoders.10.feed_forward.w_2.weight\n", - "encoder.encoders.10.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.10.feed_forward.w_2.bias\n", - "-> encoder.encoders.10.norm1.weight\n", - "-> encoder.encoders.10.norm1.bias\n", - "-> encoder.encoders.10.norm2.weight\n", - "-> encoder.encoders.10.norm2.bias\n", - "-> encoder.encoders.11.self_attn.linear_q.weight\n", - "encoder.encoders.11.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.11.self_attn.linear_q.bias\n", - "-> encoder.encoders.11.self_attn.linear_k.weight\n", - "encoder.encoders.11.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.11.self_attn.linear_k.bias\n", - "-> encoder.encoders.11.self_attn.linear_v.weight\n", - "encoder.encoders.11.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.11.self_attn.linear_v.bias\n", - "-> encoder.encoders.11.self_attn.linear_out.weight\n", - "encoder.encoders.11.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "-> encoder.encoders.11.self_attn.linear_out.bias\n", - "-> encoder.encoders.11.feed_forward.w_1.weight\n", - "encoder.encoders.11.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "-> encoder.encoders.11.feed_forward.w_1.bias\n", - "-> encoder.encoders.11.feed_forward.w_2.weight\n", - "encoder.encoders.11.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "-> encoder.encoders.11.feed_forward.w_2.bias\n", - "-> encoder.encoders.11.norm1.weight\n", - "-> encoder.encoders.11.norm1.bias\n", - "-> encoder.encoders.11.norm2.weight\n", - "-> encoder.encoders.11.norm2.bias\n", - "-> encoder.after_norm.weight\n", - "-> encoder.after_norm.bias\n" - ] - } - ], - "source": [ - "# dump torch model to paddle\n", - "#state_dict = model.state_dict()\n", - "paddle_state_dict = {}\n", - "\n", - "for n, p in state_dict.items():\n", - " if 'encoder' not in n:\n", - " continue \n", - " print(f'-> {n}')\n", - " \n", - " \n", - " name_change=True\n", - " if 'norm.running_mean' in n:\n", - " new_n = n.replace('norm.running_', 'norm._')\n", - " elif 'norm.running_var' in n:\n", - " new_n = n.replace('norm.running_var', 'norm._variance')\n", - " else:\n", - " name_change=False\n", - " new_n = n\n", - " if name_change:\n", - " print(f\"{n} -> {new_n}\")\n", - " \n", - " \n", - " p = p.cpu().detach().numpy()\n", - " if n.endswith('weight') and p.ndim == 2:\n", - " new_p = p.T\n", - " print(f\"{n}: {p.shape} -> {new_p.shape}\")\n", - " else:\n", - " new_p = p\n", - " \n", - " if 'global_cmvn.mean' in n:\n", - " print(p, p.dtype)\n", - " \n", - " paddle_state_dict[new_n] = new_p\n", - " \n", - "# np.savez('/workspace/DeepSpeech-2.x/.notebook/model',\n", - "# state=paddle_state_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "a1d97e9f", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.0.concat_linear.weight. encoder.encoders.0.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.0.concat_linear.bias. encoder.encoders.0.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.1.concat_linear.weight. encoder.encoders.1.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.1.concat_linear.bias. encoder.encoders.1.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.2.concat_linear.weight. encoder.encoders.2.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.2.concat_linear.bias. encoder.encoders.2.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.3.concat_linear.weight. encoder.encoders.3.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.3.concat_linear.bias. encoder.encoders.3.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.4.concat_linear.weight. encoder.encoders.4.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.4.concat_linear.bias. encoder.encoders.4.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.5.concat_linear.weight. encoder.encoders.5.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.5.concat_linear.bias. encoder.encoders.5.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.6.concat_linear.weight. encoder.encoders.6.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.6.concat_linear.bias. encoder.encoders.6.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.7.concat_linear.weight. encoder.encoders.7.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.7.concat_linear.bias. encoder.encoders.7.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.8.concat_linear.weight. encoder.encoders.8.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.8.concat_linear.bias. encoder.encoders.8.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.9.concat_linear.weight. encoder.encoders.9.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.9.concat_linear.bias. encoder.encoders.9.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.10.concat_linear.weight. encoder.encoders.10.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.10.concat_linear.bias. encoder.encoders.10.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.11.concat_linear.weight. encoder.encoders.11.concat_linear.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for encoder.encoders.11.concat_linear.bias. encoder.encoders.11.concat_linear.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.embed.0.weight. decoder.embed.0.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.after_norm.weight. decoder.after_norm.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.after_norm.bias. decoder.after_norm.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.output_layer.weight. decoder.output_layer.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.output_layer.bias. decoder.output_layer.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_q.weight. decoder.decoders.0.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_q.bias. decoder.decoders.0.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_k.weight. decoder.decoders.0.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_k.bias. decoder.decoders.0.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_v.weight. decoder.decoders.0.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_v.bias. decoder.decoders.0.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_out.weight. decoder.decoders.0.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.self_attn.linear_out.bias. decoder.decoders.0.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_q.weight. decoder.decoders.0.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_q.bias. decoder.decoders.0.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_k.weight. decoder.decoders.0.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_k.bias. decoder.decoders.0.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_v.weight. decoder.decoders.0.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_v.bias. decoder.decoders.0.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_out.weight. decoder.decoders.0.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.src_attn.linear_out.bias. decoder.decoders.0.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.feed_forward.w_1.weight. decoder.decoders.0.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.feed_forward.w_1.bias. decoder.decoders.0.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.feed_forward.w_2.weight. decoder.decoders.0.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.feed_forward.w_2.bias. decoder.decoders.0.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm1.weight. decoder.decoders.0.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm1.bias. decoder.decoders.0.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm2.weight. decoder.decoders.0.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm2.bias. decoder.decoders.0.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm3.weight. decoder.decoders.0.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.norm3.bias. decoder.decoders.0.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.concat_linear1.weight. decoder.decoders.0.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.concat_linear1.bias. decoder.decoders.0.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.concat_linear2.weight. decoder.decoders.0.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.0.concat_linear2.bias. decoder.decoders.0.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_q.weight. decoder.decoders.1.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_q.bias. decoder.decoders.1.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_k.weight. decoder.decoders.1.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_k.bias. decoder.decoders.1.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_v.weight. decoder.decoders.1.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_v.bias. decoder.decoders.1.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_out.weight. decoder.decoders.1.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.self_attn.linear_out.bias. decoder.decoders.1.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_q.weight. decoder.decoders.1.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_q.bias. decoder.decoders.1.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_k.weight. decoder.decoders.1.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_k.bias. decoder.decoders.1.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_v.weight. decoder.decoders.1.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_v.bias. decoder.decoders.1.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_out.weight. decoder.decoders.1.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.src_attn.linear_out.bias. decoder.decoders.1.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.feed_forward.w_1.weight. decoder.decoders.1.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.feed_forward.w_1.bias. decoder.decoders.1.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.feed_forward.w_2.weight. decoder.decoders.1.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.feed_forward.w_2.bias. decoder.decoders.1.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm1.weight. decoder.decoders.1.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm1.bias. decoder.decoders.1.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm2.weight. decoder.decoders.1.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm2.bias. decoder.decoders.1.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm3.weight. decoder.decoders.1.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.norm3.bias. decoder.decoders.1.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.concat_linear1.weight. decoder.decoders.1.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.concat_linear1.bias. decoder.decoders.1.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.concat_linear2.weight. decoder.decoders.1.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.1.concat_linear2.bias. decoder.decoders.1.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_q.weight. decoder.decoders.2.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_q.bias. decoder.decoders.2.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_k.weight. decoder.decoders.2.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_k.bias. decoder.decoders.2.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_v.weight. decoder.decoders.2.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_v.bias. decoder.decoders.2.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_out.weight. decoder.decoders.2.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.self_attn.linear_out.bias. decoder.decoders.2.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_q.weight. decoder.decoders.2.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_q.bias. decoder.decoders.2.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_k.weight. decoder.decoders.2.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_k.bias. decoder.decoders.2.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_v.weight. decoder.decoders.2.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_v.bias. decoder.decoders.2.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_out.weight. decoder.decoders.2.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.src_attn.linear_out.bias. decoder.decoders.2.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.feed_forward.w_1.weight. decoder.decoders.2.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.feed_forward.w_1.bias. decoder.decoders.2.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.feed_forward.w_2.weight. decoder.decoders.2.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.feed_forward.w_2.bias. decoder.decoders.2.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm1.weight. decoder.decoders.2.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm1.bias. decoder.decoders.2.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm2.weight. decoder.decoders.2.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm2.bias. decoder.decoders.2.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm3.weight. decoder.decoders.2.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.norm3.bias. decoder.decoders.2.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.concat_linear1.weight. decoder.decoders.2.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.concat_linear1.bias. decoder.decoders.2.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.concat_linear2.weight. decoder.decoders.2.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.2.concat_linear2.bias. decoder.decoders.2.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_q.weight. decoder.decoders.3.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_q.bias. decoder.decoders.3.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_k.weight. decoder.decoders.3.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_k.bias. decoder.decoders.3.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_v.weight. decoder.decoders.3.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_v.bias. decoder.decoders.3.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_out.weight. decoder.decoders.3.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.self_attn.linear_out.bias. decoder.decoders.3.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_q.weight. decoder.decoders.3.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_q.bias. decoder.decoders.3.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_k.weight. decoder.decoders.3.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_k.bias. decoder.decoders.3.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_v.weight. decoder.decoders.3.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_v.bias. decoder.decoders.3.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_out.weight. decoder.decoders.3.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.src_attn.linear_out.bias. decoder.decoders.3.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.feed_forward.w_1.weight. decoder.decoders.3.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.feed_forward.w_1.bias. decoder.decoders.3.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.feed_forward.w_2.weight. decoder.decoders.3.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.feed_forward.w_2.bias. decoder.decoders.3.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm1.weight. decoder.decoders.3.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm1.bias. decoder.decoders.3.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm2.weight. decoder.decoders.3.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm2.bias. decoder.decoders.3.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm3.weight. decoder.decoders.3.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.norm3.bias. decoder.decoders.3.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.concat_linear1.weight. decoder.decoders.3.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.concat_linear1.bias. decoder.decoders.3.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.concat_linear2.weight. decoder.decoders.3.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.3.concat_linear2.bias. decoder.decoders.3.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_q.weight. decoder.decoders.4.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_q.bias. decoder.decoders.4.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_k.weight. decoder.decoders.4.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_k.bias. decoder.decoders.4.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_v.weight. decoder.decoders.4.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_v.bias. decoder.decoders.4.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_out.weight. decoder.decoders.4.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.self_attn.linear_out.bias. decoder.decoders.4.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_q.weight. decoder.decoders.4.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_q.bias. decoder.decoders.4.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_k.weight. decoder.decoders.4.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_k.bias. decoder.decoders.4.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_v.weight. decoder.decoders.4.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_v.bias. decoder.decoders.4.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_out.weight. decoder.decoders.4.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.src_attn.linear_out.bias. decoder.decoders.4.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.feed_forward.w_1.weight. decoder.decoders.4.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.feed_forward.w_1.bias. decoder.decoders.4.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.feed_forward.w_2.weight. decoder.decoders.4.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.feed_forward.w_2.bias. decoder.decoders.4.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm1.weight. decoder.decoders.4.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm1.bias. decoder.decoders.4.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm2.weight. decoder.decoders.4.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm2.bias. decoder.decoders.4.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm3.weight. decoder.decoders.4.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.norm3.bias. decoder.decoders.4.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.concat_linear1.weight. decoder.decoders.4.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.concat_linear1.bias. decoder.decoders.4.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.concat_linear2.weight. decoder.decoders.4.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.4.concat_linear2.bias. decoder.decoders.4.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_q.weight. decoder.decoders.5.self_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_q.bias. decoder.decoders.5.self_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_k.weight. decoder.decoders.5.self_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_k.bias. decoder.decoders.5.self_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_v.weight. decoder.decoders.5.self_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_v.bias. decoder.decoders.5.self_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_out.weight. decoder.decoders.5.self_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.self_attn.linear_out.bias. decoder.decoders.5.self_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_q.weight. decoder.decoders.5.src_attn.linear_q.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_q.bias. decoder.decoders.5.src_attn.linear_q.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_k.weight. decoder.decoders.5.src_attn.linear_k.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_k.bias. decoder.decoders.5.src_attn.linear_k.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_v.weight. decoder.decoders.5.src_attn.linear_v.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_v.bias. decoder.decoders.5.src_attn.linear_v.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_out.weight. decoder.decoders.5.src_attn.linear_out.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.src_attn.linear_out.bias. decoder.decoders.5.src_attn.linear_out.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.feed_forward.w_1.weight. decoder.decoders.5.feed_forward.w_1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.feed_forward.w_1.bias. decoder.decoders.5.feed_forward.w_1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.feed_forward.w_2.weight. decoder.decoders.5.feed_forward.w_2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.feed_forward.w_2.bias. decoder.decoders.5.feed_forward.w_2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm1.weight. decoder.decoders.5.norm1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm1.bias. decoder.decoders.5.norm1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm2.weight. decoder.decoders.5.norm2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm2.bias. decoder.decoders.5.norm2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm3.weight. decoder.decoders.5.norm3.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.norm3.bias. decoder.decoders.5.norm3.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.concat_linear1.weight. decoder.decoders.5.concat_linear1.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.concat_linear1.bias. decoder.decoders.5.concat_linear1.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.concat_linear2.weight. decoder.decoders.5.concat_linear2.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for decoder.decoders.5.concat_linear2.bias. decoder.decoders.5.concat_linear2.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for ctc.ctc_lo.weight. ctc.ctc_lo.weight is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n", - "/workspace/DeepSpeech-2.x/tools/venv-2p1/lib/python3.7/site-packages/paddle/fluid/dygraph/layers.py:1303: UserWarning: Skip loading for ctc.ctc_lo.bias. ctc.ctc_lo.bias is not found in the provided dict.\n", - " warnings.warn((\"Skip loading for {}. \".format(key) + str(err)))\n" - ] - } - ], - "source": [ - "model.set_state_dict(paddle_state_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "fc7edf1e", - "metadata": {}, - "outputs": [], - "source": [ - "e_state = model.encoder.state_dict()\n", - "for key, value in e_state.items():\n", - " if 'concat_linear' in key:\n", - " continue\n", - " if not np.allclose(value.numpy(), paddle_state_dict['encoder.' + key]):\n", - " print(key)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "572097d0", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "748250b7", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "91e5deee", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "fleet-despite", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "embed.npz feat.npz l1.npz l11.npz l3.npz l5.npz l7.npz l9.npz\r\n", - "encoder.npz l0.npz l10.npz l2.npz l4.npz l6.npz l8.npz model.npz\r\n" - ] - } - ], - "source": [ - "%ls .notebook/espnet" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "abroad-oracle", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(8, 57, 83)\n", - "(8, 1, 57)\n", - "[57 50 48 38 32 31 28 25]\n" - ] - } - ], - "source": [ - "data = np.load('.notebook/espnet/feat.npz', allow_pickle=True)\n", - "xs=data['xs']\n", - "masks=data['masks']\n", - "print(xs.shape)\n", - "print(masks.shape)\n", - "xs_lens = masks.sum(axis=-1).squeeze()\n", - "print(xs_lens)" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "false-instrument", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[8, 13, 256]\n", - "[8, 1, 13]\n" - ] - } - ], - "source": [ - "# ecnoder\n", - "xs = paddle.to_tensor(xs, dtype='float32')\n", - "x_lens = paddle.to_tensor(xs_lens, dtype='int32')\n", - "model.eval()\n", - "encoder_out, encoder_mask = model.encoder(xs, x_lens)\n", - "print(encoder_out.shape)\n", - "print(encoder_mask.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "arctic-proxy", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(8, 13, 256)\n", - "(8, 1, 13)\n", - "False\n", - "False\n", - "True\n", - "True\n" - ] - } - ], - "source": [ - "data = np.load('.notebook/espnet/encoder.npz', allow_pickle=True)\n", - "xs = data['xs']\n", - "masks = data['masks']\n", - "print(xs.shape)\n", - "print(masks.shape)\n", - "print(np.allclose(xs, encoder_out.numpy()))\n", - "print(np.allclose(xs, encoder_out.numpy(), atol=1e-6))\n", - "print(np.allclose(xs, encoder_out.numpy(), atol=1e-5))\n", - "print(np.allclose(masks, encoder_mask.numpy()))" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "seasonal-switch", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 2.1380312 1.8675405 -1.1873871 ... -0.30456656 0.56382173\n", - " -0.6526459 ]\n", - " [ 2.1926146 2.1373641 -0.6548196 ... -0.897318 0.6044322\n", - " -0.63332295]\n", - " [ 1.6367635 2.3320658 -0.8848577 ... -0.9640939 1.2420733\n", - " -0.05243584]\n", - " ...\n", - " [ 1.8533031 1.8421621 -0.6728406 ... 0.04810616 0.6459763\n", - " -0.18188554]\n", - " [ 2.0894065 1.7813934 -1.1591585 ... -0.09513803 0.8321831\n", - " -0.72916794]\n", - " [ 1.6488649 2.0984242 -1.3490562 ... 0.42678255 0.5903866\n", - " -0.32597935]]\n", - "Tensor(shape=[13, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[ 2.13803196, 1.86753929, -1.18738675, ..., -0.30456796, 0.56382364, -0.65264463],\n", - " [ 2.19261336, 2.13736486, -0.65482187, ..., -0.89731705, 0.60443199, -0.63332343],\n", - " [ 1.63676369, 2.33206534, -0.88485885, ..., -0.96409231, 1.24207270, -0.05243752],\n", - " ...,\n", - " [ 1.85330284, 1.84216177, -0.67284071, ..., 0.04810715, 0.64597648, -0.18188696],\n", - " [ 2.08940673, 1.78139246, -1.15916038, ..., -0.09513779, 0.83218288, -0.72916913],\n", - " [ 1.64886570, 2.09842515, -1.34905660, ..., 0.42678308, 0.59038705, -0.32598034]])\n" - ] - } - ], - "source": [ - "print(xs[0])\n", - "print(encoder_out[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "defined-brooks", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 2.209824 1.5208759 0.1417884 ... -0.73617566 1.6538682\n", - " -0.16355833]\n", - " [ 2.1441019 1.4377339 0.3629197 ... -0.91226125 1.3739952\n", - " 0.11874156]\n", - " [ 1.8725398 1.5417286 0.38919652 ... -0.89621615 1.1841662\n", - " 0.27621832]\n", - " ...\n", - " [ 2.4591084 0.7238764 -1.1456345 ... -0.24188249 0.8232168\n", - " -0.9794884 ]\n", - " [ 2.5156236 1.1919155 -0.97032744 ... -0.7360675 1.0647209\n", - " -1.3076135 ]\n", - " [ 2.160009 0.98425585 -1.2231126 ... -0.03393313 1.9141548\n", - " -1.0099151 ]]\n", - "Tensor(shape=[13, 256], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n", - " [[ 2.20982409, 1.52087593, 0.14178854, ..., -0.73617446, 1.65386844, -0.16355731],\n", - " [ 2.14410043, 1.43773460, 0.36291891, ..., -0.91226172, 1.37399518, 0.11874183],\n", - " [ 1.87254059, 1.54172909, 0.38919681, ..., -0.89621687, 1.18416822, 0.27621880],\n", - " ...,\n", - " [ 2.45910931, 0.72387671, -1.14563596, ..., -0.24188218, 0.82321703, -0.97948682],\n", - " [ 2.51562238, 1.19191694, -0.97032893, ..., -0.73606837, 1.06472087, -1.30761123],\n", - " [ 2.16000915, 0.98425680, -1.22311163, ..., -0.03393326, 1.91415381, -1.00991392]])\n" - ] - } - ], - "source": [ - "print(xs[1])\n", - "print(encoder_out[1])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0504e3f8", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/.notebook/wenet_model.ipynb b/.notebook/wenet_model.ipynb deleted file mode 100644 index 8e10b6c4b..000000000 --- a/.notebook/wenet_model.ipynb +++ /dev/null @@ -1,5015 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "cfb832c0", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "/workspace/wenet\n" - ] - }, - { - "data": { - "text/plain": [ - "'/workspace/wenet'" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "%cd /workspace/wenet/\n", - "%pwd" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "62277538", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "import argparse\n", - "import copy\n", - "import logging\n", - "import os\n", - "\n", - "import torch\n", - "import torch.distributed as dist\n", - "import torch.optim as optim\n", - "import yaml\n", - "from tensorboardX import SummaryWriter\n", - "from torch.utils.data import DataLoader\n", - "\n", - "from wenet.dataset.dataset import AudioDataset, CollateFunc\n", - "from wenet.transformer.asr_model import init_asr_model\n", - "from wenet.utils.checkpoint import load_checkpoint, save_checkpoint\n", - "from wenet.utils.executor import Executor\n", - "from wenet.utils.scheduler import WarmupLR\n", - "\n", - "os.environ['CUDA_VISIBLE_DEVICES'] = \"0\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "2f6ea33a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'config': 'examples/aishell/s0/conf/train_conformer.yaml', 'train_data': 'examples/aishell/s0/raw_wav/train/format.data', 'cv_data': 'examples/aishell/s0/raw_wav/dev/format.data', 'gpu': -1, 'model_dir': None, 'checkpoint': None, 'tensorboard_dir': 'tensorboard', 'rank': 0, 'world_size': -1, 'dist_backend': 'nccl', 'init_method': None, 'num_workers': 0, 'pin_memory': False, 'cmvn': 'examples/aishell/s0/raw_wav/train/global_cmvn'}\n" - ] - } - ], - "source": [ - "parser = argparse.ArgumentParser(description='training your network')\n", - "parser.add_argument('--config', default=\"examples/aishell/s0/conf/train_conformer.yaml\", help='config file')\n", - "parser.add_argument('--train_data', default=\"examples/aishell/s0/raw_wav/train/format.data\", help='train data file')\n", - "parser.add_argument('--cv_data', default=\"examples/aishell/s0/raw_wav/dev/format.data\", help='cv data file')\n", - "parser.add_argument('--gpu',\n", - " type=int,\n", - " default=-1,\n", - " help='gpu id for this local rank, -1 for cpu')\n", - "parser.add_argument('--model_dir' , help='save model dir')\n", - "parser.add_argument('--checkpoint', help='checkpoint model')\n", - "parser.add_argument('--tensorboard_dir',\n", - " default='tensorboard',\n", - " help='tensorboard log dir')\n", - "parser.add_argument('--ddp.rank',\n", - " dest='rank',\n", - " default=0,\n", - " type=int,\n", - " help='global rank for distributed training')\n", - "parser.add_argument('--ddp.world_size',\n", - " dest='world_size',\n", - " default=-1,\n", - " type=int,\n", - " help='''number of total processes/gpus for\n", - " distributed training''')\n", - "parser.add_argument('--ddp.dist_backend',\n", - " dest='dist_backend',\n", - " default='nccl',\n", - " choices=['nccl', 'gloo'],\n", - " help='distributed backend')\n", - "parser.add_argument('--ddp.init_method',\n", - " dest='init_method',\n", - " default=None,\n", - " help='ddp init method')\n", - "parser.add_argument('--num_workers',\n", - " default=0,\n", - " type=int,\n", - " help='num of subprocess workers for reading')\n", - "parser.add_argument('--pin_memory',\n", - " action='store_true',\n", - " default=False,\n", - " help='Use pinned memory buffers used for reading')\n", - "parser.add_argument('--cmvn', default=\"examples/aishell/s0/raw_wav/train/global_cmvn\", help='global cmvn file')\n", - "\n", - "args = parser.parse_args([])\n", - "print(vars(args))" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "f5d6af9b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Namespace(checkpoint=None, cmvn='examples/aishell/s0/raw_wav/train/global_cmvn', config='examples/aishell/s0/conf/train_conformer.yaml', cv_data='examples/aishell/s0/raw_wav/dev/format.data', dist_backend='nccl', gpu=-1, init_method=None, model_dir=None, num_workers=0, pin_memory=False, rank=0, tensorboard_dir='tensorboard', train_data='examples/aishell/s0/raw_wav/train/format.data', world_size=-1)\n" - ] - } - ], - "source": [ - "# Set random seed\n", - "torch.manual_seed(777)\n", - "print(args)\n", - "with open(args.config, 'r') as fin:\n", - " configs = yaml.load(fin, Loader=yaml.FullLoader)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "264bd353", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "7507 batches\n", - "896\n" - ] - } - ], - "source": [ - "raw_wav = configs['raw_wav']\n", - "\n", - "train_collate_func = CollateFunc(**configs['collate_conf'],\n", - " raw_wav=raw_wav)\n", - "\n", - "cv_collate_conf = copy.deepcopy(configs['collate_conf'])\n", - "# no augmenation on cv set\n", - "cv_collate_conf['spec_aug'] = False\n", - "cv_collate_conf['spec_sub'] = False\n", - "if raw_wav:\n", - " cv_collate_conf['feature_dither'] = 0.0\n", - " cv_collate_conf['speed_perturb'] = False\n", - " cv_collate_conf['wav_distortion_conf']['wav_distortion_rate'] = 0\n", - "cv_collate_func = CollateFunc(**cv_collate_conf, raw_wav=raw_wav)\n", - "\n", - "dataset_conf = configs.get('dataset_conf', {})\n", - "train_dataset = AudioDataset(args.train_data,\n", - " **dataset_conf,\n", - " raw_wav=raw_wav)\n", - "cv_dataset = AudioDataset(args.cv_data, **dataset_conf, raw_wav=raw_wav)\n", - "# 120098 data/train/wav.scp\n", - "print(len(train_dataset), 'batches')\n", - "# 14326 data/dev/wav.scp\n", - "print(len(cv_dataset))" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "88863d3c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "896\n" - ] - } - ], - "source": [ - "train_sampler = None\n", - "cv_sampler = None\n", - "train_data_loader = DataLoader(train_dataset,\n", - " collate_fn=train_collate_func,\n", - " sampler=train_sampler,\n", - " #shuffle=(train_sampler is None),\n", - " shuffle=False,\n", - " pin_memory=args.pin_memory,\n", - " batch_size=1,\n", - " num_workers=args.num_workers)\n", - "cv_data_loader = DataLoader(cv_dataset,\n", - " collate_fn=cv_collate_func,\n", - " sampler=cv_sampler,\n", - " shuffle=False,\n", - " batch_size=1,\n", - " pin_memory=args.pin_memory,\n", - " num_workers=args.num_workers)\n", - "print(len(cv_data_loader))" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "10d5acd4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "4233 vocab\n", - "80 feat dim\n" - ] - } - ], - "source": [ - "if raw_wav:\n", - " input_dim = configs['collate_conf']['feature_extraction_conf'][\n", - " 'mel_bins']\n", - "else:\n", - " input_dim = train_dataset.input_dim\n", - "vocab_size = train_dataset.output_dim\n", - "print(vocab_size, 'vocab')\n", - "print(input_dim , 'feat dim')" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "0380ef5a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "examples/aishell/s0/raw_wav/train/global_cmvn\n" - ] - } - ], - "source": [ - "# Save configs to model_dir/train.yaml for inference and export\n", - "configs['input_dim'] = input_dim\n", - "configs['output_dim'] = vocab_size\n", - "configs['cmvn_file'] = args.cmvn\n", - "configs['is_json_cmvn'] = raw_wav\n", - "print(args.cmvn)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "15ebf2bf", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "(80,)\n", - "(80,)\n", - "[ 9.87176362 9.93891555 10.23818678 10.85971412 11.68652649 12.2548801\n", - " 12.65768161 12.86138996 12.80733912 12.56625574 12.32007066 12.13879205\n", - " 12.31318868 12.55255216 12.61223855 12.56974526 12.38972728 12.14383338\n", - " 12.09285066 11.79395822 11.62259065 11.9263303 11.8154422 11.95122567\n", - " 11.83180553 11.88788759 11.79014437 11.88072035 11.90005711 11.97348142\n", - " 12.00982189 12.00881339 12.02619706 12.10479646 12.21555081 12.34399304\n", - " 12.45014401 12.4966879 12.48653775 12.3550783 12.39291732 12.2553737\n", - " 12.26496277 12.25314244 12.32545763 12.43359839 12.54867439 12.6763342\n", - " 12.80920698 12.92934681 12.96115138 12.96883353 12.99593057 13.04728142\n", - " 13.0588804 13.05737948 12.99921175 12.93402238 12.87429219 12.71652995\n", - " 12.48942004 12.27478385 12.26163069 12.28631891 12.31956049 12.4229073\n", - " 12.51480191 12.5785164 12.64719411 12.73762568 12.80017069 12.86872766\n", - " 12.96666856 13.06478583 13.15915908 13.27284306 13.31081821 13.23904279\n", - " 12.87936075 11.18310185]\n", - "[0.61219383 0.49700994 0.33439025 0.31503119 0.29640823 0.28411759\n", - " 0.26972922 0.25610475 0.24632936 0.24610228 0.24733299 0.24426536\n", - " 0.23751781 0.22987273 0.22659963 0.2268427 0.23059031 0.23420722\n", - " 0.23771761 0.2411352 0.24404673 0.24557175 0.24724932 0.25055198\n", - " 0.25482755 0.2602407 0.26363878 0.26503898 0.2648467 0.26435072\n", - " 0.26353625 0.26364794 0.26411054 0.26339948 0.26212082 0.26146597\n", - " 0.26196556 0.26365859 0.26592959 0.26963884 0.27392766 0.27818809\n", - " 0.28313664 0.2863325 0.28713431 0.28649323 0.28636648 0.2867843\n", - " 0.28635904 0.28562022 0.28492711 0.28429201 0.28402977 0.28401045\n", - " 0.28560797 0.28728033 0.28969549 0.29351627 0.29826453 0.30572631\n", - " 0.31811682 0.32887739 0.33288219 0.33326245 0.33014147 0.32403202\n", - " 0.31903576 0.31316258 0.30741037 0.30370692 0.30204833 0.30049064\n", - " 0.29901079 0.29824511 0.29812308 0.29753329 0.29779342 0.30175296\n", - " 0.30955538 0.32904205]\n" - ] - } - ], - "source": [ - "import json\n", - "import math\n", - "import numpy as np\n", - "def _load_json_cmvn(json_cmvn_file):\n", - " \"\"\" Load the json format cmvn stats file and calculate cmvn\n", - "\n", - " Args:\n", - " json_cmvn_file: cmvn stats file in json format\n", - "\n", - " Returns:\n", - " a numpy array of [means, vars]\n", - " \"\"\"\n", - " with open(json_cmvn_file) as f:\n", - " cmvn_stats = json.load(f)\n", - "\n", - " means = cmvn_stats['mean_stat']\n", - " variance = cmvn_stats['var_stat']\n", - " count = cmvn_stats['frame_num']\n", - " for i in range(len(means)):\n", - " means[i] /= count\n", - " variance[i] = variance[i] / count - means[i] * means[i]\n", - " if variance[i] < 1.0e-20:\n", - " variance[i] = 1.0e-20\n", - " variance[i] = 1.0 / math.sqrt(variance[i])\n", - " cmvn = np.array([means, variance])\n", - " return cmvn\n", - "\n", - "\n", - "def _load_kaldi_cmvn(kaldi_cmvn_file):\n", - " \"\"\" Load the kaldi format cmvn stats file and calculate cmvn\n", - "\n", - " Args:\n", - " kaldi_cmvn_file: kaldi text style global cmvn file, which\n", - " is generated by:\n", - " compute-cmvn-stats --binary=false scp:feats.scp global_cmvn\n", - "\n", - " Returns:\n", - " a numpy array of [means, vars]\n", - " \"\"\"\n", - " means = []\n", - " variance = []\n", - " with open(kaldi_cmvn_file, 'r') as fid:\n", - " # kaldi binary file start with '\\0B'\n", - " if fid.read(2) == '\\0B':\n", - " logger.error('kaldi cmvn binary file is not supported, please '\n", - " 'recompute it by: compute-cmvn-stats --binary=false '\n", - " ' scp:feats.scp global_cmvn')\n", - " sys.exit(1)\n", - " fid.seek(0)\n", - " arr = fid.read().split()\n", - " assert (arr[0] == '[')\n", - " assert (arr[-2] == '0')\n", - " assert (arr[-1] == ']')\n", - " feat_dim = int((len(arr) - 2 - 2) / 2)\n", - " for i in range(1, feat_dim + 1):\n", - " means.append(float(arr[i]))\n", - " count = float(arr[feat_dim + 1])\n", - " for i in range(feat_dim + 2, 2 * feat_dim + 2):\n", - " variance.append(float(arr[i]))\n", - "\n", - " for i in range(len(means)):\n", - " means[i] /= count\n", - " variance[i] = variance[i] / count - means[i] * means[i]\n", - " if variance[i] < 1.0e-20:\n", - " variance[i] = 1.0e-20\n", - " variance[i] = 1.0 / math.sqrt(variance[i])\n", - " cmvn = np.array([means, variance])\n", - " return cmvn\n", - "\n", - "\n", - "def _load_npz_cmvn(npz_cmvn_file, eps=1e-20):\n", - " npzfile = np.load(npz_cmvn_file)\n", - " means = npzfile[\"mean\"] #(1, D)\n", - " std = npzfile[\"std\"] #(1, D)\n", - " std = np.clip(std, eps, None)\n", - " variance = 1.0 / std\n", - " cmvn = np.array([means, variance])\n", - " return cmvn\n", - "\n", - "\n", - "def load_cmvn(cmvn_file: str, filetype: str):\n", - " \"\"\"load cmvn from file.\n", - "\n", - " Args:\n", - " cmvn_file (str): cmvn path.\n", - " filetype (str): file type, optional[npz, json, kaldi].\n", - "\n", - " Raises:\n", - " ValueError: file type not support.\n", - "\n", - " Returns:\n", - " Tuple[np.ndarray, np.ndarray]: mean, istd\n", - " \"\"\"\n", - " assert filetype in ['npz', 'json', 'kaldi'], filetype\n", - " filetype = filetype.lower()\n", - " if filetype == \"json\":\n", - " cmvn = _load_json_cmvn(cmvn_file)\n", - " elif filetype == \"kaldi\":\n", - " cmvn = _load_kaldi_cmvn(cmvn_file)\n", - " elif filetype == \"npz\":\n", - " cmvn = _load_npz_cmvn(cmvn_file)\n", - " else:\n", - " raise ValueError(f\"cmvn file type no support: {filetype}\")\n", - " return cmvn[0], cmvn[1]\n", - "\n", - "mean, istd = load_cmvn(args.cmvn, 'json')\n", - "print(mean.shape)\n", - "print(istd.shape)\n", - "print(mean)\n", - "print(istd)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "3cfa5e23", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ASRModel(\n", - " (encoder): ConformerEncoder(\n", - " (global_cmvn): GlobalCMVN()\n", - " (embed): Conv2dSubsampling4(\n", - " (conv): Sequential(\n", - " (0): Conv2d(1, 256, kernel_size=(3, 3), stride=(2, 2))\n", - " (1): ReLU()\n", - " (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2))\n", - " (3): ReLU()\n", - " )\n", - " (out): Sequential(\n", - " (0): Linear(in_features=4864, out_features=256, bias=True)\n", - " )\n", - " (pos_enc): RelPositionalEncoding(\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (encoders): ModuleList(\n", - " (0): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (1): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (2): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (3): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (4): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (5): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (6): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (7): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (8): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (9): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (10): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (11): ConformerEncoderLayer(\n", - " (self_attn): RelPositionMultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " (linear_pos): Linear(in_features=256, out_features=256, bias=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (feed_forward_macaron): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): Swish()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (conv_module): ConvolutionModule(\n", - " (pointwise_conv1): Conv1d(256, 512, kernel_size=(1,), stride=(1,))\n", - " (depthwise_conv): Conv1d(256, 256, kernel_size=(15,), stride=(1,), padding=(7,), groups=256)\n", - " (norm): BatchNorm1d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (pointwise_conv2): Conv1d(256, 256, kernel_size=(1,), stride=(1,))\n", - " (activation): Swish()\n", - " )\n", - " (norm_ff): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_mha): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_ff_macaron): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_conv): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm_final): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (decoder): TransformerDecoder(\n", - " (embed): Sequential(\n", - " (0): Embedding(4233, 256)\n", - " (1): PositionalEncoding(\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " )\n", - " )\n", - " (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (output_layer): Linear(in_features=256, out_features=4233, bias=True)\n", - " (decoders): ModuleList(\n", - " (0): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (1): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (2): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (3): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (4): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " (5): DecoderLayer(\n", - " (self_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (src_attn): MultiHeadedAttention(\n", - " (linear_q): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_k): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_v): Linear(in_features=256, out_features=256, bias=True)\n", - " (linear_out): Linear(in_features=256, out_features=256, bias=True)\n", - " (dropout): Dropout(p=0.0, inplace=False)\n", - " )\n", - " (feed_forward): PositionwiseFeedForward(\n", - " (w_1): Linear(in_features=256, out_features=2048, bias=True)\n", - " (activation): ReLU()\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (w_2): Linear(in_features=2048, out_features=256, bias=True)\n", - " )\n", - " (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True)\n", - " (dropout): Dropout(p=0.1, inplace=False)\n", - " (concat_linear1): Linear(in_features=512, out_features=256, bias=True)\n", - " (concat_linear2): Linear(in_features=512, out_features=256, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (ctc): CTC(\n", - " (ctc_lo): Linear(in_features=256, out_features=4233, bias=True)\n", - " (ctc_loss): CTCLoss()\n", - " )\n", - " (criterion_att): LabelSmoothingLoss(\n", - " (criterion): KLDivLoss()\n", - " )\n", - ")\n" - ] - } - ], - "source": [ - "# Init asr model from configs\n", - "model = init_asr_model(configs)\n", - "print(model)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "3c780af5", - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "def summary(layer, print_func=print):\n", - " num_params = num_elements = 0\n", - " for name, param in layer.state_dict().items():\n", - " if print_func:\n", - " print_func(\n", - " \"{} | {} | {}\".format(name, param.shape, np.prod(param.shape)))\n", - " num_elements += np.prod(param.shape)\n", - " num_params += 1\n", - " if print_func:\n", - " print_func(\n", - " f\"Total parameters: {num_params}, {num_elements} elements.\"\n", - " )\n", - " \n", - "def print_params(model, print_func=print):\n", - " if print_func is None:\n", - " return\n", - " total = 0.0\n", - " num_params = 0.0\n", - " for n, p in model.named_parameters():\n", - " msg = f\"{n} | {p.shape} | {np.prod(p.shape)} | {p.requires_grad}\"\n", - " total += np.prod(p.shape)\n", - " num_params += 1\n", - " if print_func:\n", - " print_func(msg)\n", - " if print_func:\n", - " print_func(f\"Total parameters: {num_params}, {total} elements.\")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "e159a200", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "encoder.global_cmvn.mean | torch.Size([80]) | 80\n", - "encoder.global_cmvn.istd | torch.Size([80]) | 80\n", - "encoder.embed.conv.0.weight | torch.Size([256, 1, 3, 3]) | 2304\n", - "encoder.embed.conv.0.bias | torch.Size([256]) | 256\n", - "encoder.embed.conv.2.weight | torch.Size([256, 256, 3, 3]) | 589824\n", - "encoder.embed.conv.2.bias | torch.Size([256]) | 256\n", - "encoder.embed.out.0.weight | torch.Size([256, 4864]) | 1245184\n", - "encoder.embed.out.0.bias | torch.Size([256]) | 256\n", - "encoder.after_norm.weight | torch.Size([256]) | 256\n", - "encoder.after_norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.0.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.0.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.0.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.0.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.0.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.0.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.0.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.0.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.0.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.0.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.0.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.0.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.0.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.0.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.0.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.0.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.0.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.0.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.0.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.0.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.0.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.0.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.1.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.1.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.1.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.1.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.1.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.1.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.1.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.1.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.1.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.1.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.1.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.1.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.1.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.1.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.1.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.1.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.1.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.1.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.1.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.1.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.1.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.1.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.2.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.2.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.2.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.2.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.2.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.2.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.2.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.2.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.2.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.2.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.2.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.2.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.2.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.2.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.2.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.2.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.2.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.2.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.2.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.2.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.2.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.2.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.3.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.3.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.3.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.3.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.3.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.3.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.3.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.3.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.3.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.3.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.3.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.3.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.3.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.3.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.3.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.3.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.3.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.3.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.3.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.3.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.3.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.3.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.4.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.4.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.4.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.4.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.4.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.4.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.4.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.4.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.4.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.4.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.4.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.4.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.4.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.4.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.4.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.4.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.4.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.4.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.4.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.4.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.4.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.4.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.5.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.5.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.5.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.5.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.5.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.5.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.5.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.5.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.5.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.5.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.5.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.5.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.5.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.5.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.5.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.5.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.5.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.5.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.5.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.5.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.5.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.5.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.6.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.6.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.6.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.6.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.6.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.6.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.6.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.6.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.6.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.6.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.6.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.6.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.6.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.6.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.6.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.6.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.6.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.6.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.6.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.6.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.6.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.6.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.7.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.7.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.7.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.7.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.7.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.7.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.7.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.7.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.7.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.7.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.7.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.7.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.7.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.7.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.7.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.7.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.7.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.7.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.7.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.7.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.7.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.7.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.8.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.8.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.8.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.8.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.8.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.8.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.8.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.8.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.8.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.8.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.8.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.8.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.8.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.8.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.8.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.8.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.8.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.8.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.8.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.8.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.8.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.8.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.9.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.9.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.9.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.9.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.9.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.9.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.9.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.9.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.9.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.9.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.9.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.9.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.9.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.9.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.9.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.9.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.9.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.9.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.9.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.9.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.9.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.9.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.10.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.10.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.10.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.10.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.10.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.10.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.10.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.10.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.10.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.10.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.10.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.10.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.10.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.10.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.10.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.10.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.10.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.10.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.10.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.10.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.10.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.10.concat_linear.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.self_attn.pos_bias_u | torch.Size([4, 64]) | 256\n", - "encoder.encoders.11.self_attn.pos_bias_v | torch.Size([4, 64]) | 256\n", - "encoder.encoders.11.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.11.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.11.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.11.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.11.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536\n", - "encoder.encoders.11.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.11.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.11.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.11.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "encoder.encoders.11.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048\n", - "encoder.encoders.11.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "encoder.encoders.11.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072\n", - "encoder.encoders.11.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512\n", - "encoder.encoders.11.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840\n", - "encoder.encoders.11.conv_module.depthwise_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.norm.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.norm.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.norm.running_mean | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.norm.running_var | torch.Size([256]) | 256\n", - "encoder.encoders.11.conv_module.norm.num_batches_tracked | torch.Size([]) | 1.0\n", - "encoder.encoders.11.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536\n", - "encoder.encoders.11.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_ff.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_ff.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_mha.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_mha.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_ff_macaron.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_ff_macaron.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_conv.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_conv.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_final.weight | torch.Size([256]) | 256\n", - "encoder.encoders.11.norm_final.bias | torch.Size([256]) | 256\n", - "encoder.encoders.11.concat_linear.weight | torch.Size([256, 512]) | 131072\n", - "encoder.encoders.11.concat_linear.bias | torch.Size([256]) | 256\n", - "decoder.embed.0.weight | torch.Size([4233, 256]) | 1083648\n", - "decoder.after_norm.weight | torch.Size([256]) | 256\n", - "decoder.after_norm.bias | torch.Size([256]) | 256\n", - "decoder.output_layer.weight | torch.Size([4233, 256]) | 1083648\n", - "decoder.output_layer.bias | torch.Size([4233]) | 4233\n", - "decoder.decoders.0.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.0.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.0.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.0.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.0.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.0.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.0.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.0.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.0.concat_linear2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.1.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.1.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.1.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.1.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.1.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.1.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.1.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.1.concat_linear2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.2.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.2.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.2.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.2.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.2.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.2.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.2.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.2.concat_linear2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.3.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.3.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.3.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.3.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.3.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.3.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.3.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.3.concat_linear2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.4.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.4.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.4.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.4.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.4.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.4.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.4.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.4.concat_linear2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.self_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.self_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.self_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.self_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.src_attn.linear_q.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.src_attn.linear_k.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.src_attn.linear_v.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536\n", - "decoder.decoders.5.src_attn.linear_out.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288\n", - "decoder.decoders.5.feed_forward.w_1.bias | torch.Size([2048]) | 2048\n", - "decoder.decoders.5.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288\n", - "decoder.decoders.5.feed_forward.w_2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm1.weight | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm2.weight | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm2.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm3.weight | torch.Size([256]) | 256\n", - "decoder.decoders.5.norm3.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.concat_linear1.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.5.concat_linear1.bias | torch.Size([256]) | 256\n", - "decoder.decoders.5.concat_linear2.weight | torch.Size([256, 512]) | 131072\n", - "decoder.decoders.5.concat_linear2.bias | torch.Size([256]) | 256\n", - "ctc.ctc_lo.weight | torch.Size([4233, 256]) | 1083648\n", - "ctc.ctc_lo.bias | torch.Size([4233]) | 4233\n", - "Total parameters: 701, 49355454.0 elements.\n" - ] - } - ], - "source": [ - "summary(model)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8494c6ab", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "0648a969", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "encoder.embed.conv.0.weight | torch.Size([256, 1, 3, 3]) | 2304 | True\n", - "encoder.embed.conv.0.bias | torch.Size([256]) | 256 | True\n", - "encoder.embed.conv.2.weight | torch.Size([256, 256, 3, 3]) | 589824 | True\n", - "encoder.embed.conv.2.bias | torch.Size([256]) | 256 | True\n", - "encoder.embed.out.0.weight | torch.Size([256, 4864]) | 1245184 | True\n", - "encoder.embed.out.0.bias | torch.Size([256]) | 256 | True\n", - "encoder.after_norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.after_norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.0.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.0.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.0.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.0.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.0.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.0.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.0.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.0.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.0.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.0.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.0.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.0.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.1.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.1.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.1.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.1.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.1.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.1.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.1.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.1.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.1.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.1.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.1.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.1.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.2.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.2.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.2.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.2.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.2.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.2.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.2.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.2.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.2.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.2.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.2.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.2.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.3.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.3.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.3.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.3.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.3.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.3.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.3.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.3.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.3.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.3.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.3.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.3.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.4.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.4.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.4.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.4.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.4.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.4.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.4.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.4.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.4.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.4.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.4.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.4.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.5.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.5.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.5.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.5.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.5.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.5.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.5.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.5.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.5.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.5.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.5.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.5.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.6.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.6.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.6.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.6.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.6.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.6.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.6.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.6.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.6.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.6.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.6.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.6.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.7.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.7.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.7.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.7.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.7.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.7.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.7.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.7.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.7.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.7.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.7.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.7.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.8.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.8.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.8.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.8.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.8.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.8.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.8.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.8.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.8.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.8.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.8.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.8.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.9.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.9.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.9.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.9.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.9.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.9.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.9.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.9.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.9.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.9.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.9.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.9.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.10.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.10.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.10.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.10.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.10.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.10.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.10.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.10.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.10.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.10.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.10.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.10.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.self_attn.pos_bias_u | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.11.self_attn.pos_bias_v | torch.Size([4, 64]) | 256 | True\n", - "encoder.encoders.11.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.11.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.self_attn.linear_pos.weight | torch.Size([256, 256]) | 65536 | True\n", - "encoder.encoders.11.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.11.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.11.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "encoder.encoders.11.feed_forward_macaron.w_2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv1.weight | torch.Size([512, 256, 1]) | 131072 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv1.bias | torch.Size([512]) | 512 | True\n", - "encoder.encoders.11.conv_module.depthwise_conv.weight | torch.Size([256, 1, 15]) | 3840 | True\n", - "encoder.encoders.11.conv_module.depthwise_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.conv_module.norm.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.conv_module.norm.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv2.weight | torch.Size([256, 256, 1]) | 65536 | True\n", - "encoder.encoders.11.conv_module.pointwise_conv2.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_ff.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_ff.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_mha.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_mha.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_ff_macaron.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_ff_macaron.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_conv.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_conv.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_final.weight | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.norm_final.bias | torch.Size([256]) | 256 | True\n", - "encoder.encoders.11.concat_linear.weight | torch.Size([256, 512]) | 131072 | True\n", - "encoder.encoders.11.concat_linear.bias | torch.Size([256]) | 256 | True\n", - "decoder.embed.0.weight | torch.Size([4233, 256]) | 1083648 | True\n", - "decoder.after_norm.weight | torch.Size([256]) | 256 | True\n", - "decoder.after_norm.bias | torch.Size([256]) | 256 | True\n", - "decoder.output_layer.weight | torch.Size([4233, 256]) | 1083648 | True\n", - "decoder.output_layer.bias | torch.Size([4233]) | 4233 | True\n", - "decoder.decoders.0.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.0.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.0.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.0.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.0.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.0.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.0.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.1.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.1.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.1.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.1.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.1.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.1.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.2.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.2.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.2.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.2.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.2.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.2.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.3.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.3.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.3.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.3.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.3.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.3.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.4.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.4.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.4.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.4.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.4.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.4.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.self_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.self_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.self_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.self_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.self_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.src_attn.linear_q.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_q.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.src_attn.linear_k.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_k.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.src_attn.linear_v.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_v.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.src_attn.linear_out.weight | torch.Size([256, 256]) | 65536 | True\n", - "decoder.decoders.5.src_attn.linear_out.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.feed_forward.w_1.weight | torch.Size([2048, 256]) | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_1.bias | torch.Size([2048]) | 2048 | True\n", - "decoder.decoders.5.feed_forward.w_2.weight | torch.Size([256, 2048]) | 524288 | True\n", - "decoder.decoders.5.feed_forward.w_2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm1.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm2.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm2.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm3.weight | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.norm3.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.concat_linear1.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.5.concat_linear1.bias | torch.Size([256]) | 256 | True\n", - "decoder.decoders.5.concat_linear2.weight | torch.Size([256, 512]) | 131072 | True\n", - "decoder.decoders.5.concat_linear2.bias | torch.Size([256]) | 256 | True\n", - "ctc.ctc_lo.weight | torch.Size([4233, 256]) | 1083648 | True\n", - "ctc.ctc_lo.bias | torch.Size([4233]) | 4233 | True\n", - "Total parameters: 663.0, 49349138.0 elements.\n" - ] - } - ], - "source": [ - "print_params(model)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "5ad6de2a", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['BAC009S0739W0246', 'BAC009S0727W0424', 'BAC009S0753W0412', 'BAC009S0756W0206', 'BAC009S0740W0414', 'BAC009S0728W0426', 'BAC009S0739W0214', 'BAC009S0753W0423', 'BAC009S0734W0201', 'BAC009S0740W0427', 'BAC009S0730W0423', 'BAC009S0728W0367', 'BAC009S0730W0418', 'BAC009S0727W0157', 'BAC009S0749W0409', 'BAC009S0727W0418']\n", - "torch.Size([16, 207, 80])\n", - "tensor([[[ 8.9946, 9.5383, 9.1916, ..., 10.5074, 9.5633, 8.2564],\n", - " [ 9.7988, 10.4052, 9.2651, ..., 10.2512, 9.5440, 8.8738],\n", - " [10.6891, 10.3955, 8.0535, ..., 9.9067, 10.0649, 8.0509],\n", - " ...,\n", - " [ 9.2180, 9.6507, 8.5053, ..., 9.6872, 8.7425, 7.9865],\n", - " [10.1291, 9.9352, 9.3798, ..., 9.5639, 9.8260, 8.9795],\n", - " [ 9.0955, 7.1338, 9.4680, ..., 9.4727, 9.0212, 7.4479]],\n", - "\n", - " [[11.4310, 10.6719, 6.0841, ..., 9.3827, 8.7297, 7.5316],\n", - " [ 9.7317, 7.8105, 7.5715, ..., 10.0430, 9.2436, 7.3541],\n", - " [10.6502, 10.6006, 8.4678, ..., 9.2814, 9.1869, 8.0703],\n", - " ...,\n", - " [ 9.0970, 9.2637, 8.0753, ..., 8.4318, 8.3705, 8.0029],\n", - " [10.4617, 10.1478, 6.7693, ..., 9.7794, 9.5775, 8.0807],\n", - " [ 7.7944, 5.6211, 7.9751, ..., 9.9972, 9.8497, 8.0313]],\n", - "\n", - " [[ 7.3456, 7.8964, 7.5796, ..., 11.6310, 10.4513, 9.1236],\n", - " [ 8.6287, 8.4631, 7.4992, ..., 12.4160, 10.9757, 8.9426],\n", - " [ 9.8314, 10.2813, 8.9724, ..., 12.1387, 10.4017, 9.0055],\n", - " ...,\n", - " [ 7.0896, 7.4055, 6.8143, ..., 9.3252, 9.2732, 8.3534],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - " ...,\n", - "\n", - " [[10.9332, 10.4644, 7.7203, ..., 10.3488, 9.3023, 7.1553],\n", - " [10.4499, 9.9070, 9.0293, ..., 9.9525, 9.4141, 7.5593],\n", - " [10.4877, 9.8126, 9.8952, ..., 9.5866, 9.3413, 7.7849],\n", - " ...,\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - " [[ 9.9444, 9.5859, 8.2203, ..., 11.5886, 11.0450, 8.8171],\n", - " [ 7.6784, 8.3224, 7.5330, ..., 11.0551, 10.5357, 9.2746],\n", - " [ 8.6262, 9.6759, 9.8410, ..., 11.3788, 10.9221, 8.9914],\n", - " ...,\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]],\n", - "\n", - " [[ 8.1079, 7.7590, 6.7103, ..., 12.6506, 11.4662, 11.0615],\n", - " [11.3803, 11.2220, 8.6589, ..., 12.8106, 12.2222, 11.6893],\n", - " [10.6777, 9.9206, 8.0461, ..., 13.5729, 12.5624, 11.1550],\n", - " ...,\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000],\n", - " [ 0.0000, 0.0000, 0.0000, ..., 0.0000, 0.0000, 0.0000]]])\n", - "tensor([207, 207, 205, 205, 203, 203, 198, 197, 195, 188, 186, 186, 185, 180,\n", - " 166, 163], dtype=torch.int32)\n", - "tensor([[2995, 3116, 1209, 565, -1, -1],\n", - " [ 236, 1176, 331, 66, 3925, 4077],\n", - " [2693, 524, 234, 1145, 366, -1],\n", - " [3875, 4211, 3062, 700, -1, -1],\n", - " [ 272, 987, 1134, 494, 2959, -1],\n", - " [1936, 3715, 120, 2553, 2695, 2710],\n", - " [ 25, 1149, 3930, -1, -1, -1],\n", - " [1753, 1778, 1237, 482, 3925, 110],\n", - " [3703, 2, 565, 3827, -1, -1],\n", - " [1150, 2734, 10, 2478, 3490, -1],\n", - " [ 426, 811, 95, 489, 144, -1],\n", - " [2313, 2006, 489, 975, -1, -1],\n", - " [3702, 3414, 205, 1488, 2966, 1347],\n", - " [ 70, 1741, 702, 1666, -1, -1],\n", - " [ 703, 1778, 1030, 849, -1, -1],\n", - " [ 814, 1674, 115, 3827, -1, -1]], dtype=torch.int32)\n", - "tensor([4, 6, 5, 4, 5, 6, 3, 6, 4, 5, 5, 4, 6, 4, 4, 4], dtype=torch.int32)\n" - ] - } - ], - "source": [ - "for batch in cv_data_loader:\n", - " keys, feat, text, feat_len, text_len = batch\n", - " print(keys)\n", - " print(feat.shape)\n", - " print(feat)\n", - " print(feat_len)\n", - " print(text)\n", - " print(text_len)\n", - " np.savez('data.npz', keys=keys, feat=feat.numpy(), feat_len=feat_len.numpy(), text=text.numpy(), text_len=text_len.numpy())\n", - " break" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "852a9c95", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CODE_OF_CONDUCT.md data.npz install.sh README.md\t tools\r\n", - "CONTRIBUTING.md docs LICENSE\t requirements.txt venv\r\n", - "CPPLINT.cfg\t examples Makefile\t runtime\t wenet\r\n" - ] - } - ], - "source": [ - "!ls\n", - "!cp data.npz /workspace/DeepSpeech-2.x/.notebook" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "cde24c4e", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor(111.9988)\n", - "tensor(830.9634, grad_fn=)\n", - "tensor([False, False, False, False, False, True, True, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " True, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, False, True, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, True, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " False, True, True, False, False, False, False, False, False, True,\n", - " False, False, False, False, False, False, True, False, False, False,\n", - " False, False, True, True, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, True, True, False, False, False, False, False,\n", - " True, True])\n", - "tensor(669.4633, grad_fn=)\n", - "tensor(142.4888, grad_fn=) tensor(41.8415, grad_fn=) tensor(377.3326, grad_fn=)\n" - ] - } - ], - "source": [ - "model.cpu().eval()\n", - "total_loss, attention_loss, ctc_loss = model(feat, feat_len,\n", - " text, text_len)\n", - "print(total_loss, attention_loss, ctc_loss )" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "be5b2a2c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "cpu\n" - ] - } - ], - "source": [ - "print(total_loss.device)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "5b791771", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor(112., device='cuda:0')\n", - "tensor(830.9634, device='cuda:0', grad_fn=)\n", - "tensor([False, False, False, False, False, True, True, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " True, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, False, True, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, True, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " False, True, True, False, False, False, False, False, False, True,\n", - " False, False, False, False, False, False, True, False, False, False,\n", - " False, False, True, True, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, True, True, False, False, False, False, False,\n", - " True, True], device='cuda:0')\n", - "tensor(669.4634, device='cuda:0', grad_fn=)\n", - "cuda:0\n", - "142.4888 41.84146 377.33258\n" - ] - } - ], - "source": [ - "model.cuda().eval()\n", - "feat=feat.cuda()\n", - "feat_len=feat_len.cuda()\n", - "text=text.cuda()\n", - "text_len=text_len.cuda()\n", - "\n", - "total_loss, attention_loss, ctc_loss = model(feat, feat_len,\n", - " text, text_len)\n", - "print(total_loss.device)\n", - "print(total_loss.cpu().data.numpy(), attention_loss.cpu().data.numpy(), ctc_loss.cpu().data.numpy() )" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "1baef537", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.Size([16, 51, 256])\n", - "torch.Size([16, 1, 51])\n", - "tensor([[-0.7019, 0.5625, 0.6880, ..., 1.1237, 0.7804, 1.1369],\n", - " [-0.7788, 0.3913, 0.7189, ..., 1.2519, 0.8862, 1.3173],\n", - " [-0.9591, 0.6346, 0.8767, ..., 0.9818, 0.7440, 1.2903],\n", - " ...,\n", - " [-1.0732, 0.6724, 0.9230, ..., 0.9075, 0.8177, 1.3240],\n", - " [-1.1654, 0.6820, 0.6939, ..., 1.2238, 0.8028, 1.4507],\n", - " [-1.2732, 0.7146, 0.7582, ..., 0.9415, 0.8775, 1.2623]],\n", - " device='cuda:0', grad_fn=)\n" - ] - } - ], - "source": [ - "encoder_out, encoder_mask = model.encoder(feat, feat_len)\n", - "print(encoder_out.shape)\n", - "print(encoder_mask.shape)\n", - "print(encoder_out[0])\n", - "\n", - "np.savez('/workspace/DeepSpeech-2.x/.notebook/encoder.npz',\n", - " mask=encoder_mask.cpu().detach().numpy(), \n", - " out=encoder_out.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e22c782", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "30b6b946", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 9.871763 9.938915 10.238187 10.8597145 11.686526 12.25488\n", - " 12.657681 12.86139 12.807339 12.566256 12.32007 12.138792\n", - " 12.313189 12.552552 12.612239 12.569745 12.389728 12.143833\n", - " 12.092851 11.793959 11.622591 11.926331 11.815442 11.951225\n", - " 11.831805 11.887888 11.790144 11.88072 11.900057 11.973481\n", - " 12.009822 12.008814 12.026197 12.104796 12.21555 12.343993\n", - " 12.450144 12.496688 12.486538 12.355079 12.392918 12.255374\n", - " 12.264963 12.253142 12.325458 12.4335985 12.548675 12.676334\n", - " 12.809207 12.929347 12.961151 12.968834 12.995931 13.047281\n", - " 13.058881 13.05738 12.999211 12.934022 12.874292 12.71653\n", - " 12.48942 12.274784 12.261631 12.286319 12.31956 12.422907\n", - " 12.514802 12.578516 12.647194 12.737626 12.800171 12.868728\n", - " 12.966668 13.064786 13.159159 13.272843 13.310819 13.239043\n", - " 12.879361 11.183102 ] float32\n", - "encoder.embed.out.0.weight: (256, 4864) -> (4864, 256)\n", - "encoder.encoders.0.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.0.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.0.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.0.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.0.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.0.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.0.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.0.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.0.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.0.conv_module.norm.running_mean -> encoder.encoders.0.conv_module.norm._mean\n", - "encoder.encoders.0.conv_module.norm.running_var -> encoder.encoders.0.conv_module.norm._variance\n", - "encoder.encoders.0.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.1.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.1.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.1.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.1.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.1.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.1.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.1.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.1.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.1.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.1.conv_module.norm.running_mean -> encoder.encoders.1.conv_module.norm._mean\n", - "encoder.encoders.1.conv_module.norm.running_var -> encoder.encoders.1.conv_module.norm._variance\n", - "encoder.encoders.1.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.2.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.2.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.2.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.2.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.2.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.2.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.2.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.2.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.2.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.2.conv_module.norm.running_mean -> encoder.encoders.2.conv_module.norm._mean\n", - "encoder.encoders.2.conv_module.norm.running_var -> encoder.encoders.2.conv_module.norm._variance\n", - "encoder.encoders.2.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.3.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.3.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.3.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.3.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.3.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.3.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.3.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.3.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.3.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.3.conv_module.norm.running_mean -> encoder.encoders.3.conv_module.norm._mean\n", - "encoder.encoders.3.conv_module.norm.running_var -> encoder.encoders.3.conv_module.norm._variance\n", - "encoder.encoders.3.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.4.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.4.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.4.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.4.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.4.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.4.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.4.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.4.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.4.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.4.conv_module.norm.running_mean -> encoder.encoders.4.conv_module.norm._mean\n", - "encoder.encoders.4.conv_module.norm.running_var -> encoder.encoders.4.conv_module.norm._variance\n", - "encoder.encoders.4.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.5.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.5.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.5.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.5.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.5.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.5.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.5.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.5.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.5.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.5.conv_module.norm.running_mean -> encoder.encoders.5.conv_module.norm._mean\n", - "encoder.encoders.5.conv_module.norm.running_var -> encoder.encoders.5.conv_module.norm._variance\n", - "encoder.encoders.5.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.6.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.6.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.6.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.6.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.6.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.6.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.6.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.6.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.6.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.6.conv_module.norm.running_mean -> encoder.encoders.6.conv_module.norm._mean\n", - "encoder.encoders.6.conv_module.norm.running_var -> encoder.encoders.6.conv_module.norm._variance\n", - "encoder.encoders.6.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.7.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.7.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.7.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.7.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.7.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.7.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.7.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.7.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.7.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.7.conv_module.norm.running_mean -> encoder.encoders.7.conv_module.norm._mean\n", - "encoder.encoders.7.conv_module.norm.running_var -> encoder.encoders.7.conv_module.norm._variance\n", - "encoder.encoders.7.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.8.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.8.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.8.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.8.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.8.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.8.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.8.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.8.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.8.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.8.conv_module.norm.running_mean -> encoder.encoders.8.conv_module.norm._mean\n", - "encoder.encoders.8.conv_module.norm.running_var -> encoder.encoders.8.conv_module.norm._variance\n", - "encoder.encoders.8.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.9.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.9.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.9.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.9.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.9.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.9.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.9.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.9.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.9.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.9.conv_module.norm.running_mean -> encoder.encoders.9.conv_module.norm._mean\n", - "encoder.encoders.9.conv_module.norm.running_var -> encoder.encoders.9.conv_module.norm._variance\n", - "encoder.encoders.9.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.10.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.10.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.10.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.10.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.10.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.10.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.10.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.10.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.10.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.10.conv_module.norm.running_mean -> encoder.encoders.10.conv_module.norm._mean\n", - "encoder.encoders.10.conv_module.norm.running_var -> encoder.encoders.10.conv_module.norm._variance\n", - "encoder.encoders.10.concat_linear.weight: (256, 512) -> (512, 256)\n", - "encoder.encoders.11.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.11.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.11.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.11.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.11.self_attn.linear_pos.weight: (256, 256) -> (256, 256)\n", - "encoder.encoders.11.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.11.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.11.feed_forward_macaron.w_1.weight: (2048, 256) -> (256, 2048)\n", - "encoder.encoders.11.feed_forward_macaron.w_2.weight: (256, 2048) -> (2048, 256)\n", - "encoder.encoders.11.conv_module.norm.running_mean -> encoder.encoders.11.conv_module.norm._mean\n", - "encoder.encoders.11.conv_module.norm.running_var -> encoder.encoders.11.conv_module.norm._variance\n", - "encoder.encoders.11.concat_linear.weight: (256, 512) -> (512, 256)\n", - "decoder.output_layer.weight: (4233, 256) -> (256, 4233)\n", - "decoder.decoders.0.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.0.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.0.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.0.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.0.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.1.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.1.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.1.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.1.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.1.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.2.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.2.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.2.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.2.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.2.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.3.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.3.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.3.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.3.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.3.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.4.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.4.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.4.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.4.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.4.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.5.self_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.self_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.self_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.self_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.src_attn.linear_q.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.src_attn.linear_k.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.src_attn.linear_v.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.src_attn.linear_out.weight: (256, 256) -> (256, 256)\n", - "decoder.decoders.5.feed_forward.w_1.weight: (2048, 256) -> (256, 2048)\n", - "decoder.decoders.5.feed_forward.w_2.weight: (256, 2048) -> (2048, 256)\n", - "decoder.decoders.5.concat_linear1.weight: (256, 512) -> (512, 256)\n", - "decoder.decoders.5.concat_linear2.weight: (256, 512) -> (512, 256)\n", - "ctc.ctc_lo.weight: (4233, 256) -> (256, 4233)\n" - ] - } - ], - "source": [ - "# dump torch model to paddle\n", - "import numpy as np\n", - "state_dict = model.state_dict()\n", - "paddle_state_dict = {}\n", - "\n", - "for n, p in state_dict.items():\n", - " name_change=True\n", - "\n", - " if 'norm.running_mean' in n:\n", - " new_n = n.replace('norm.running_', 'norm._')\n", - " elif 'norm.running_var' in n:\n", - " new_n = n.replace('norm.running_var', 'norm._variance')\n", - " else:\n", - " name_change=False\n", - " new_n = n\n", - " \n", - " if name_change:\n", - " print(f\"{n} -> {new_n}\")\n", - " \n", - " p = p.cpu().detach().numpy()\n", - " if n.endswith('weight') and p.ndim == 2 and 'embed.0.weight' not in n:\n", - " new_p = p.T\n", - " print(f\"{n}: {p.shape} -> {new_p.shape}\")\n", - " else:\n", - " new_p = p\n", - " \n", - " if 'global_cmvn.mean' in n:\n", - " print(p, p.dtype)\n", - " \n", - " paddle_state_dict[new_n] = new_p\n", - " \n", - "np.savez('/workspace/DeepSpeech-2.x/.notebook/model',\n", - " state=paddle_state_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7307dc5b", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "d99b29bc", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor(377.3326, device='cuda:0', grad_fn=)\n", - "None\n", - "[[ 3.16902351e+00 -1.51765049e-02 4.91097234e-02 ... -2.47973716e-03\n", - " -5.93366381e-03 -7.26613170e-03]\n", - " [-1.74185038e+00 7.75875803e-03 -4.49435972e-02 ... 9.92415240e-04\n", - " 2.46338220e-03 2.31891591e-03]\n", - " [-2.33343077e+00 1.30476682e-02 -2.66557615e-02 ... 2.27533933e-03\n", - " 5.76929189e-03 7.48792710e-03]\n", - " ...\n", - " [-4.30356789e+00 2.46056803e-02 -9.00955945e-02 ... 4.43160534e-03\n", - " 1.16123557e-02 1.44716976e-02]\n", - " [-3.36919212e+00 1.73155665e-02 -6.36875406e-02 ... 3.28367390e-03\n", - " 8.58021621e-03 1.07796099e-02]\n", - " [-6.62039661e+00 3.49958315e-02 -1.23963736e-01 ... 6.36674836e-03\n", - " 1.60815325e-02 2.03892551e-02]]\n", - "[-4.3777566e+00 2.3245990e-02 -9.3339972e-02 ... 4.2569702e-03\n", - " 1.0920014e-02 1.3787906e-02]\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - ":6: UserWarning: The .grad attribute of a Tensor that is not a leaf Tensor is being accessed. Its .grad attribute won't be populated during autograd.backward(). If you indeed want the gradient for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. If you access the non-leaf Tensor by mistake, make sure you access the leaf Tensor instead. See github.com/pytorch/pytorch/pull/30531 for more informations.\n", - " print(loss_ctc.grad)\n" - ] - } - ], - "source": [ - "encoder_out_lens = encoder_mask.squeeze(1).sum(1)\n", - "loss_ctc = model.ctc(encoder_out, encoder_out_lens, text, text_len)\n", - "print(loss_ctc)\n", - "dir(loss_ctc)\n", - "loss_ctc.backward()\n", - "print(loss_ctc.grad)\n", - "#print(model.ctc.ctc_lo.weight.grad)\n", - "print(model.ctc.ctc_lo.weight.grad.T.cpu().data.numpy())\n", - "print(model.ctc.ctc_lo.bias.grad.cpu().data.numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "49b05d6d", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor(112., device='cuda:0')\n", - "tensor(830.9634, device='cuda:0', grad_fn=)\n", - "tensor([False, False, False, False, False, True, True, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " True, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, False, True, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, True, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " False, True, True, False, False, False, False, False, False, True,\n", - " False, False, False, False, False, False, True, False, False, False,\n", - " False, False, True, True, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, True, True, False, False, False, False, False,\n", - " True, True], device='cuda:0')\n", - "tensor(669.4634, device='cuda:0', grad_fn=)\n", - "tensor(41.8415, device='cuda:0', grad_fn=) 0.0\n" - ] - } - ], - "source": [ - "loss_att, acc_att = model._calc_att_loss(encoder_out, encoder_mask,\n", - " text, text_len)\n", - "print(loss_att, acc_att)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "413b413f", - "metadata": {}, - "outputs": [], - "source": [ - "def pad_list(xs, pad_value: int):\n", - " n_batch = len(xs)\n", - " max_len = max([x.size(0) for x in xs])\n", - " pad = torch.zeros(n_batch, max_len, dtype=xs[0].dtype, device=xs[0].device)\n", - " pad = pad.fill_(pad_value)\n", - " for i in range(n_batch):\n", - " pad[i, :xs[i].size(0)] = xs[i]\n", - "\n", - " return pad\n", - "\n", - "def add_sos_eos(ys_pad: torch.Tensor, sos: int, eos: int,\n", - " ignore_id: int):\n", - "\n", - " _sos = torch.tensor([sos],\n", - " dtype=torch.long,\n", - " requires_grad=False,\n", - " device=ys_pad.device)\n", - " _eos = torch.tensor([eos],\n", - " dtype=torch.long,\n", - " requires_grad=False,\n", - " device=ys_pad.device)\n", - " ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys\n", - " ys_in = [torch.cat([_sos, y], dim=0) for y in ys]\n", - " ys_out = [torch.cat([y, _eos], dim=0) for y in ys]\n", - " return pad_list(ys_in, eos), pad_list(ys_out, ignore_id)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "ff0c2400", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[4232, 2995, 3116, 1209, 565, 4232, 4232],\n", - " [4232, 236, 1176, 331, 66, 3925, 4077],\n", - " [4232, 2693, 524, 234, 1145, 366, 4232],\n", - " [4232, 3875, 4211, 3062, 700, 4232, 4232],\n", - " [4232, 272, 987, 1134, 494, 2959, 4232],\n", - " [4232, 1936, 3715, 120, 2553, 2695, 2710],\n", - " [4232, 25, 1149, 3930, 4232, 4232, 4232],\n", - " [4232, 1753, 1778, 1237, 482, 3925, 110],\n", - " [4232, 3703, 2, 565, 3827, 4232, 4232],\n", - " [4232, 1150, 2734, 10, 2478, 3490, 4232],\n", - " [4232, 426, 811, 95, 489, 144, 4232],\n", - " [4232, 2313, 2006, 489, 975, 4232, 4232],\n", - " [4232, 3702, 3414, 205, 1488, 2966, 1347],\n", - " [4232, 70, 1741, 702, 1666, 4232, 4232],\n", - " [4232, 703, 1778, 1030, 849, 4232, 4232],\n", - " [4232, 814, 1674, 115, 3827, 4232, 4232]], device='cuda:0')\n", - "tensor([[2995, 3116, 1209, 565, 4232, -1, -1],\n", - " [ 236, 1176, 331, 66, 3925, 4077, 4232],\n", - " [2693, 524, 234, 1145, 366, 4232, -1],\n", - " [3875, 4211, 3062, 700, 4232, -1, -1],\n", - " [ 272, 987, 1134, 494, 2959, 4232, -1],\n", - " [1936, 3715, 120, 2553, 2695, 2710, 4232],\n", - " [ 25, 1149, 3930, 4232, -1, -1, -1],\n", - " [1753, 1778, 1237, 482, 3925, 110, 4232],\n", - " [3703, 2, 565, 3827, 4232, -1, -1],\n", - " [1150, 2734, 10, 2478, 3490, 4232, -1],\n", - " [ 426, 811, 95, 489, 144, 4232, -1],\n", - " [2313, 2006, 489, 975, 4232, -1, -1],\n", - " [3702, 3414, 205, 1488, 2966, 1347, 4232],\n", - " [ 70, 1741, 702, 1666, 4232, -1, -1],\n", - " [ 703, 1778, 1030, 849, 4232, -1, -1],\n", - " [ 814, 1674, 115, 3827, 4232, -1, -1]], device='cuda:0')\n" - ] - } - ], - "source": [ - "ys_pad = text\n", - "ys_pad_lens = text_len\n", - "ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, model.sos, model.eos,\n", - " model.ignore_id)\n", - "ys_in_lens = ys_pad_lens + 1\n", - "print(ys_in_pad)\n", - "print(ys_out_pad)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "3e84da38", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.Size([16, 7, 4233])\n", - "tensor([[-3.7639e-01, -8.2272e-01, 7.4276e-01, ..., 3.4201e-01,\n", - " 1.5035e-02, 4.0337e-01],\n", - " [-8.7386e-01, -3.1389e-01, 4.1988e-01, ..., 3.7724e-01,\n", - " -1.4353e-01, -1.0024e+00],\n", - " [-4.3505e-01, 3.4505e-02, -2.8710e-01, ..., 7.7274e-02,\n", - " -1.1672e+00, -2.6849e-01],\n", - " ...,\n", - " [ 4.2471e-01, 5.8886e-01, 2.0204e-02, ..., 3.7405e-01,\n", - " 4.5470e-02, -3.7139e-01],\n", - " [-3.7978e-01, -8.1084e-01, 7.5725e-01, ..., 2.6039e-01,\n", - " -7.9347e-04, 4.2538e-01],\n", - " [-3.8280e-01, -8.1207e-01, 7.4943e-01, ..., 2.6173e-01,\n", - " -1.0499e-03, 4.2679e-01]], device='cuda:0', grad_fn=)\n" - ] - } - ], - "source": [ - "decoder_out, _ = model.decoder(encoder_out, encoder_mask, ys_in_pad,\n", - " ys_in_lens)\n", - "print(decoder_out.shape)\n", - "print(decoder_out[0])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "aac441ea", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "5ddbca73", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.float32\n", - "torch.int64\n", - "tensor(112., device='cuda:0')\n", - "tensor(830.9634, device='cuda:0', grad_fn=)\n", - "tensor([False, False, False, False, False, True, True, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " True, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, False, True, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, True, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " False, True, True, False, False, False, False, False, False, True,\n", - " False, False, False, False, False, False, True, False, False, False,\n", - " False, False, True, True, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, True, True, False, False, False, False, False,\n", - " True, True], device='cuda:0')\n", - "tensor(669.4634, device='cuda:0', grad_fn=)\n", - "tensor(41.8415, device='cuda:0', grad_fn=)\n", - "tensor([[2995, 3116, 1209, 565, 4232, -1, -1],\n", - " [ 236, 1176, 331, 66, 3925, 4077, 4232],\n", - " [2693, 524, 234, 1145, 366, 4232, -1],\n", - " [3875, 4211, 3062, 700, 4232, -1, -1],\n", - " [ 272, 987, 1134, 494, 2959, 4232, -1],\n", - " [1936, 3715, 120, 2553, 2695, 2710, 4232],\n", - " [ 25, 1149, 3930, 4232, -1, -1, -1],\n", - " [1753, 1778, 1237, 482, 3925, 110, 4232],\n", - " [3703, 2, 565, 3827, 4232, -1, -1],\n", - " [1150, 2734, 10, 2478, 3490, 4232, -1],\n", - " [ 426, 811, 95, 489, 144, 4232, -1],\n", - " [2313, 2006, 489, 975, 4232, -1, -1],\n", - " [3702, 3414, 205, 1488, 2966, 1347, 4232],\n", - " [ 70, 1741, 702, 1666, 4232, -1, -1],\n", - " [ 703, 1778, 1030, 849, 4232, -1, -1],\n", - " [ 814, 1674, 115, 3827, 4232, -1, -1]], device='cuda:0')\n", - "tensor([[-3.7639e-01, -8.2272e-01, 7.4276e-01, ..., 3.4201e-01,\n", - " 1.5035e-02, 4.0337e-01],\n", - " [-8.7386e-01, -3.1389e-01, 4.1988e-01, ..., 3.7724e-01,\n", - " -1.4353e-01, -1.0024e+00],\n", - " [-4.3505e-01, 3.4505e-02, -2.8710e-01, ..., 7.7274e-02,\n", - " -1.1672e+00, -2.6849e-01],\n", - " ...,\n", - " [ 4.2471e-01, 5.8886e-01, 2.0204e-02, ..., 3.7405e-01,\n", - " 4.5470e-02, -3.7139e-01],\n", - " [-3.7978e-01, -8.1084e-01, 7.5725e-01, ..., 2.6039e-01,\n", - " -7.9347e-04, 4.2538e-01],\n", - " [-3.8280e-01, -8.1207e-01, 7.4943e-01, ..., 2.6173e-01,\n", - " -1.0499e-03, 4.2679e-01]], device='cuda:0', grad_fn=)\n" - ] - } - ], - "source": [ - "print(decoder_out.dtype)\n", - "print(ys_out_pad.dtype)\n", - "loss_att = model.criterion_att(decoder_out, ys_out_pad)\n", - "print(loss_att)\n", - "print(ys_out_pad)\n", - "print(decoder_out[0])\n", - "np.savez('/workspace/DeepSpeech-2.x/.notebook/decoder',\n", - " decoder_out=decoder_out.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "78f98c0b", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "8d968cd3", - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "from torch import nn\n", - "\n", - "\n", - "class LabelSmoothingLoss(nn.Module):\n", - " def __init__(self,\n", - " size: int,\n", - " padding_idx: int,\n", - " smoothing: float,\n", - " normalize_length: bool = False):\n", - " \"\"\"Construct an LabelSmoothingLoss object.\"\"\"\n", - " super(LabelSmoothingLoss, self).__init__()\n", - " self.criterion = nn.KLDivLoss(reduction=\"none\")\n", - " self.padding_idx = padding_idx\n", - " self.confidence = 1.0 - smoothing\n", - " self.smoothing = smoothing\n", - " self.size = size\n", - " self.normalize_length = normalize_length\n", - "\n", - " def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n", - " \"\"\"Compute loss between x and target.\n", - "\n", - " The model outputs and data labels tensors are flatten to\n", - " (batch*seqlen, class) shape and a mask is applied to the\n", - " padding part which should not be calculated for loss.\n", - "\n", - " Args:\n", - " x (torch.Tensor): prediction (batch, seqlen, class)\n", - " target (torch.Tensor):\n", - " target signal masked with self.padding_id (batch, seqlen)\n", - " Returns:\n", - " loss (torch.Tensor) : The KL loss, scalar float value\n", - " \"\"\"\n", - " assert x.size(2) == self.size\n", - " batch_size = x.size(0)\n", - " x = x.view(-1, self.size)\n", - " target = target.view(-1)\n", - " # use zeros_like instead of torch.no_grad() for true_dist,\n", - " # since no_grad() can not be exported by JIT\n", - " true_dist = torch.zeros_like(x)\n", - " true_dist.fill_(self.smoothing / (self.size - 1))\n", - " ignore = target == self.padding_idx # (B,)\n", - " print(self.smoothing / (self.size - 1))\n", - " print(true_dist)\n", - " total = len(target) - ignore.sum().item()\n", - " target = target.masked_fill(ignore, 0) # avoid -1 index\n", - " true_dist.scatter_(1, target.unsqueeze(1), self.confidence)\n", - " print(true_dist.dtype)\n", - " print(true_dist.square().sum())\n", - " kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)\n", - " print(kl.sum())\n", - " denom = total if self.normalize_length else batch_size\n", - " print(ignore)\n", - " numer= kl.masked_fill(ignore.unsqueeze(1), 0).sum()\n", - " print(numer)\n", - " return numer /denom" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "3df340ec", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2.3629489603024576e-05\n", - "tensor([[2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05],\n", - " [2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05],\n", - " [2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05],\n", - " ...,\n", - " [2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05],\n", - " [2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05],\n", - " [2.3629e-05, 2.3629e-05, 2.3629e-05, ..., 2.3629e-05, 2.3629e-05,\n", - " 2.3629e-05]], device='cuda:0')\n", - "torch.float32\n", - "tensor(90.7203, device='cuda:0')\n", - "tensor(830.9634, device='cuda:0', grad_fn=)\n", - "tensor([False, False, False, False, False, True, True, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " True, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, False, True, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, True, False,\n", - " False, False, False, False, False, False, False, False, False, False,\n", - " False, True, True, False, False, False, False, False, False, True,\n", - " False, False, False, False, False, False, True, False, False, False,\n", - " False, False, True, True, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, True, True, False, False,\n", - " False, False, False, True, True, False, False, False, False, False,\n", - " True, True], device='cuda:0')\n", - "tensor(669.4634, device='cuda:0', grad_fn=)\n", - "tensor(41.8415, device='cuda:0', grad_fn=)\n", - "torch.int64\n" - ] - } - ], - "source": [ - "criteron = LabelSmoothingLoss(4233, -1, 0.1, False)\n", - "loss_att = criteron(decoder_out, ys_out_pad)\n", - "print(loss_att)\n", - "print(ys_out_pad.dtype)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "badc410d", - "metadata": {}, - "outputs": [ - { - "ename": "RuntimeError", - "evalue": "Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time.", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mloss_att\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_att\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdecoder_out\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/workspace/wenet/venv/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph)\u001b[0m\n\u001b[1;32m 183\u001b[0m \u001b[0mproducts\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mDefaults\u001b[0m \u001b[0mto\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 184\u001b[0m \"\"\"\n\u001b[0;32m--> 185\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 186\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/workspace/wenet/venv/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 125\u001b[0;31m Variable._execution_engine.run_backward(\n\u001b[0m\u001b[1;32m 126\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 127\u001b[0m allow_unreachable=True) # allow_unreachable flag\n", - "\u001b[0;31mRuntimeError\u001b[0m: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time." - ] - } - ], - "source": [ - "loss_att.backward()\n", - "print(loss_att.grad)\n", - "print(decoder_out.grad)" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "219eb41f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([ 0.0024, 0.0019, -0.1098, ..., 0.0028, 0.0020, -1.7978],\n", - " device='cuda:0')\n", - "tensor([[ 6.5052e-04, 6.4419e-05, -6.1955e-06, ..., 9.8220e-04,\n", - " -2.5918e-05, 3.3754e-04],\n", - " [ 3.9305e-04, 4.5799e-04, 1.4362e-04, ..., 4.6800e-04,\n", - " 1.6911e-04, 2.7067e-04],\n", - " [-1.3593e-01, 5.2201e-02, 3.2895e-02, ..., 2.4580e-02,\n", - " 1.4590e-01, -4.6850e-02],\n", - " ...,\n", - " [ 1.0434e-03, 4.2251e-04, 6.5688e-04, ..., 1.2144e-03,\n", - " 2.1159e-04, 6.6838e-04],\n", - " [ 6.4997e-04, 4.4301e-04, 4.1550e-04, ..., 1.0420e-03,\n", - " 2.4114e-04, 1.5338e-04],\n", - " [-9.9337e-01, 5.4573e-01, -1.1371e-02, ..., -4.3175e-01,\n", - " -2.7850e-01, -4.4679e-01]], device='cuda:0')\n" - ] - } - ], - "source": [ - "print(model.decoder.output_layer.bias.grad)\n", - "print(model.decoder.output_layer.weight.grad)" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "id": "40d00a54", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[[-5.3698e-01, -1.9911e-01, -3.4997e-01, ..., -8.2428e-01,\n", - " -1.0265e+00, -9.6301e-01],\n", - " [-4.4642e-02, 2.3176e-01, -3.2539e-01, ..., -9.0159e-01,\n", - " -1.0325e+00, -7.5987e-01],\n", - " [ 5.0035e-01, 2.2691e-01, -7.3052e-01, ..., -1.0055e+00,\n", - " -8.7123e-01, -1.0306e+00],\n", - " ...,\n", - " [-4.0024e-01, -1.4325e-01, -5.7947e-01, ..., -1.0718e+00,\n", - " -1.2806e+00, -1.0518e+00],\n", - " [ 1.5755e-01, -1.8495e-03, -2.8703e-01, ..., -1.1090e+00,\n", - " -9.4519e-01, -7.2506e-01],\n", - " [-4.7520e-01, -1.3942e+00, -2.5754e-01, ..., -1.1365e+00,\n", - " -1.1943e+00, -1.2290e+00]],\n", - "\n", - " [[ 9.5454e-01, 3.6428e-01, -1.3891e+00, ..., -1.1637e+00,\n", - " -1.2845e+00, -1.2015e+00],\n", - " [-8.5735e-02, -1.0579e+00, -8.9173e-01, ..., -9.6441e-01,\n", - " -1.1255e+00, -1.2599e+00],\n", - " [ 4.7654e-01, 3.2887e-01, -5.9201e-01, ..., -1.1942e+00,\n", - " -1.1430e+00, -1.0242e+00],\n", - " ...,\n", - " [-4.7431e-01, -3.3559e-01, -7.2326e-01, ..., -1.4506e+00,\n", - " -1.3957e+00, -1.0464e+00],\n", - " [ 3.6113e-01, 1.0381e-01, -1.1599e+00, ..., -1.0439e+00,\n", - " -1.0221e+00, -1.0208e+00],\n", - " [-1.2717e+00, -2.1460e+00, -7.5677e-01, ..., -9.7822e-01,\n", - " -9.3785e-01, -1.0371e+00]],\n", - "\n", - " [[-1.5465e+00, -1.0152e+00, -8.8901e-01, ..., -4.8522e-01,\n", - " -7.5163e-01, -6.7765e-01],\n", - " [-7.6101e-01, -7.3352e-01, -9.1588e-01, ..., -2.4836e-01,\n", - " -5.8927e-01, -7.3723e-01],\n", - " [-2.4714e-02, 1.7016e-01, -4.2326e-01, ..., -3.3204e-01,\n", - " -7.6696e-01, -7.1652e-01],\n", - " ...,\n", - " [-1.7032e+00, -1.2591e+00, -1.1449e+00, ..., -1.1810e+00,\n", - " -1.1163e+00, -9.3108e-01],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[ 6.4983e-01, 2.6117e-01, -8.4197e-01, ..., -8.7213e-01,\n", - " -1.1073e+00, -1.3253e+00],\n", - " [ 3.5391e-01, -1.5846e-02, -4.0425e-01, ..., -9.9173e-01,\n", - " -1.0727e+00, -1.1924e+00],\n", - " [ 3.7704e-01, -6.2785e-02, -1.1468e-01, ..., -1.1021e+00,\n", - " -1.0952e+00, -1.1182e+00],\n", - " ...,\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00]],\n", - "\n", - " [[ 4.4458e-02, -1.7547e-01, -6.7475e-01, ..., -4.9801e-01,\n", - " -5.6783e-01, -7.7852e-01],\n", - " [-1.3428e+00, -8.0343e-01, -9.0457e-01, ..., -6.5902e-01,\n", - " -7.2550e-01, -6.2796e-01],\n", - " [-7.6253e-01, -1.3071e-01, -1.3280e-01, ..., -5.6133e-01,\n", - " -6.0588e-01, -7.2115e-01],\n", - " ...,\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00]],\n", - "\n", - " [[-1.0798e+00, -1.0834e+00, -1.1797e+00, ..., -1.7757e-01,\n", - " -4.3747e-01, -4.0007e-02],\n", - " [ 9.2354e-01, 6.3771e-01, -5.2810e-01, ..., -1.2928e-01,\n", - " -2.0342e-01, 1.6656e-01],\n", - " [ 4.9337e-01, -9.1133e-03, -7.3302e-01, ..., 1.0074e-01,\n", - " -9.8115e-02, -9.2357e-03],\n", - " ...,\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00],\n", - " [-6.0434e+00, -4.9397e+00, -3.4235e+00, ..., -3.9949e+00,\n", - " -3.9869e+00, -3.6797e+00]]], device='cuda:0')\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "print(xs)" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "id": "505ca294", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[[ True, True, True, ..., True, True, True]],\n", - "\n", - " [[ True, True, True, ..., True, True, True]],\n", - "\n", - " [[ True, True, True, ..., True, False, False]],\n", - "\n", - " ...,\n", - "\n", - " [[ True, True, True, ..., False, False, False]],\n", - "\n", - " [[ True, True, True, ..., False, False, False]],\n", - "\n", - " [[ True, True, True, ..., False, False, False]]], device='cuda:0')\n" - ] - } - ], - "source": [ - "from wenet.utils.mask import make_pad_mask\n", - "masks = ~make_pad_mask(feat_len).unsqueeze(1) # (B, 1, L)\n", - "print(masks)" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "id": "aa03c2b9", - "metadata": {}, - "outputs": [], - "source": [ - "xs, pos_emb, masks = model.encoder.embed(xs, masks)" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "id": "ebc0ea12", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[[-0.5482, 2.2866, -1.0750, ..., 1.4504, 0.2895, -0.6945],\n", - " [-0.8013, 1.7688, -1.6639, ..., 1.8332, 0.6791, -0.2000],\n", - " [-1.7112, 2.7057, -1.3363, ..., 1.2336, 0.1870, -0.5735],\n", - " ...,\n", - " [-0.9697, 2.3129, -0.8752, ..., 0.8584, 0.4853, -0.4177],\n", - " [-1.3609, 2.1779, -1.7813, ..., 2.0928, 0.2528, -0.3650],\n", - " [-1.6967, 2.3544, -1.7417, ..., 1.3670, 0.5951, -0.7415]],\n", - "\n", - " [[-1.9828, 2.3178, -0.9079, ..., 0.4117, 0.5006, 0.0872],\n", - " [-0.7640, 1.3558, -1.3613, ..., 0.7317, 0.6784, 0.1685],\n", - " [-0.9504, 1.6038, -1.3030, ..., 0.5754, 0.2677, 0.3343],\n", - " ...,\n", - " [-1.4757, 2.5317, -1.2321, ..., 1.2997, 0.5019, -0.1034],\n", - " [-1.1731, 2.3172, -1.2542, ..., 1.7391, 0.2171, -0.4445],\n", - " [-1.2700, 3.2229, -0.8872, ..., 1.6461, 0.0973, -0.7679]],\n", - "\n", - " [[-0.5873, 1.4291, -1.3950, ..., 0.2102, 0.1027, 0.0918],\n", - " [ 0.1743, 1.7834, -1.6422, ..., 0.8113, 0.3137, 0.5634],\n", - " [-0.3492, 1.8310, -1.0685, ..., 0.6924, 0.1378, 0.4594],\n", - " ...,\n", - " [-1.0869, 2.3002, -1.2638, ..., 1.7998, 0.5134, -0.5223],\n", - " [-1.2614, 2.7240, -1.3734, ..., 1.4445, 0.5742, -0.3320],\n", - " [-2.2068, 4.3462, -3.8289, ..., 2.1426, 1.2034, -1.3795]],\n", - "\n", - " ...,\n", - "\n", - " [[-0.3914, 1.8553, -0.5747, ..., 1.0062, 0.4632, -1.0452],\n", - " [-0.8605, 2.0172, -1.4437, ..., 1.4526, 0.1657, 0.5923],\n", - " [-0.7307, 2.2841, -1.0699, ..., 1.5825, -0.0980, 0.5503],\n", - " ...,\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270]],\n", - "\n", - " [[-0.1619, 0.6255, -1.1323, ..., 0.0724, -0.2204, 0.4636],\n", - " [-0.0831, 0.5750, -1.0930, ..., 0.9110, -0.0650, 0.7299],\n", - " [-0.2820, 0.0801, -0.9418, ..., 0.3379, -0.1166, 0.4451],\n", - " ...,\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270]],\n", - "\n", - " [[-0.5458, -0.6909, -1.3597, ..., -0.7818, 0.6875, 0.9843],\n", - " [ 0.0421, -1.1062, -1.4389, ..., -0.0239, 0.9115, 0.5287],\n", - " [-0.2909, -0.1886, -1.5487, ..., -0.1392, 0.0580, 0.3066],\n", - " ...,\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270],\n", - " [-5.0821, 8.5920, -4.2137, ..., 6.2693, 0.0539, -2.9270]]],\n", - " device='cuda:0', grad_fn=)\n", - "tensor([[[ 0.0000e+00, 1.0000e+00, 0.0000e+00, ..., 1.0000e+00,\n", - " 0.0000e+00, 1.0000e+00],\n", - " [ 8.4147e-01, 5.4030e-01, 8.0196e-01, ..., 1.0000e+00,\n", - " 1.0746e-04, 1.0000e+00],\n", - " [ 9.0930e-01, -4.1615e-01, 9.5814e-01, ..., 1.0000e+00,\n", - " 2.1492e-04, 1.0000e+00],\n", - " ...,\n", - " [-7.6825e-01, -6.4014e-01, 6.3280e-01, ..., 9.9998e-01,\n", - " 5.1581e-03, 9.9999e-01],\n", - " [-9.5375e-01, 3.0059e-01, 9.9899e-01, ..., 9.9998e-01,\n", - " 5.2656e-03, 9.9999e-01],\n", - " [-2.6237e-01, 9.6497e-01, 5.6075e-01, ..., 9.9998e-01,\n", - " 5.3730e-03, 9.9999e-01]]], device='cuda:0')\n", - "tensor([[[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, False, False, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, False, False, False, False, False, False, False, False,\n", - " False]],\n", - "\n", - " [[ True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, True, True, True, True, True, True, True, True, True,\n", - " True, False, False, False, False, False, False, False, False, False,\n", - " False]]], device='cuda:0')\n", - "torch.Size([16, 1, 51])\n" - ] - } - ], - "source": [ - "print(xs)\n", - "print(pos_emb)\n", - "print(masks)\n", - "print(masks.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "4289461b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[-0.54822 2.2866027 -1.0750197 ... 1.4503604 0.28950194\n", - " -0.6945408 ]\n", - " [-0.8012542 1.7687558 -1.6638877 ... 1.833158 0.6791494\n", - " -0.1999542 ]\n", - " [-1.7112465 2.7057455 -1.3363413 ... 1.2336441 0.18697014\n", - " -0.5735198 ]\n", - " ...\n", - " [-0.96968573 2.312949 -0.87524825 ... 0.85838526 0.4853347\n", - " -0.41773027]\n", - " [-1.3609431 2.1778803 -1.7812773 ... 2.0927877 0.25282228\n", - " -0.36496443]\n", - " [-1.6967483 2.3543842 -1.7416853 ... 1.366951 0.59511113\n", - " -0.74147725]]\n", - "\n", - " [[-1.9828408 2.31777 -0.9078527 ... 0.41170627 0.5006162\n", - " 0.08721463]\n", - " [-0.76404583 1.3557773 -1.3612567 ... 0.7317046 0.678426\n", - " 0.16851945]\n", - " [-0.95044655 1.6037656 -1.3029968 ... 0.57544005 0.26769355\n", - " 0.33433008]\n", - " ...\n", - " [-1.475677 2.531713 -1.2320715 ... 1.2996731 0.50191855\n", - " -0.10343577]\n", - " [-1.1730809 2.3172235 -1.2542105 ... 1.7391105 0.21709818\n", - " -0.44447583]\n", - " [-1.2699623 3.2228963 -0.8871915 ... 1.6460502 0.09731755\n", - " -0.7678688 ]]\n", - "\n", - " [[-0.5872559 1.4290544 -1.3950099 ... 0.21024795 0.10272825\n", - " 0.09179455]\n", - " [ 0.1742807 1.783423 -1.6421788 ... 0.8112701 0.31371105\n", - " 0.56344515]\n", - " [-0.34916472 1.8310343 -1.0685117 ... 0.69243336 0.13782299\n", - " 0.45937473]\n", - " ...\n", - " [-1.0868638 2.300204 -1.2638408 ... 1.7998282 0.5133892\n", - " -0.52227837]\n", - " [-1.2614481 2.7239661 -1.3733778 ... 1.444533 0.57420933\n", - " -0.33201432]\n", - " [-2.2067683 4.346218 -3.828867 ... 2.1426017 1.2033664\n", - " -1.3795122 ]]\n", - "\n", - " ...\n", - "\n", - " [[-0.39141566 1.8553346 -0.5747178 ... 1.0062351 0.46320182\n", - " -1.045236 ]\n", - " [-0.86054784 2.0171793 -1.4436853 ... 1.452623 0.16571884\n", - " 0.5923172 ]\n", - " [-0.73066384 2.2840502 -1.0698992 ... 1.5824941 -0.0979555\n", - " 0.55030036]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]\n", - "\n", - " [[-0.16194311 0.6255052 -1.1323429 ... 0.07242929 -0.22042468\n", - " 0.46362036]\n", - " [-0.08306468 0.575043 -1.09298 ... 0.9109665 -0.06501988\n", - " 0.72986233]\n", - " [-0.28202093 0.08014385 -0.9417719 ... 0.3379485 -0.11664233\n", - " 0.44514441]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]\n", - "\n", - " [[-0.5458492 -0.69092435 -1.3596548 ... -0.78182435 0.68747747\n", - " 0.9842716 ]\n", - " [ 0.04212743 -1.1061852 -1.438915 ... -0.02385022 0.91146135\n", - " 0.52870303]\n", - " [-0.2909345 -0.18858244 -1.5487324 ... -0.13923697 0.05795169\n", - " 0.30663735]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]]\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = ~make_pad_mask(feat_len).unsqueeze(1) # (B, 1, L)\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks, offset=0)\n", - "print(xs.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "67e10d73", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor([[[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 2.0908e-03],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 1.1943e-02, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 4.6105e-02, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 9.6723e-03,\n", - " 4.6135e-02, 0.0000e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[2.2816e-01, 2.4615e-01, 2.5304e-01, ..., 2.0402e-01,\n", - " 2.3248e-01, 3.1191e-01],\n", - " [1.3587e-01, 2.8877e-01, 2.7991e-01, ..., 1.9210e-01,\n", - " 2.0346e-01, 1.9934e-01],\n", - " [2.5739e-01, 3.9348e-01, 2.7877e-01, ..., 2.7483e-01,\n", - " 1.9302e-01, 2.3810e-01],\n", - " ...,\n", - " [1.1939e-01, 2.8473e-01, 3.3082e-01, ..., 2.3838e-01,\n", - " 2.2104e-01, 2.3906e-01],\n", - " [1.7388e-01, 2.0402e-01, 4.0263e-01, ..., 2.4782e-01,\n", - " 2.6742e-01, 1.5427e-01],\n", - " [0.0000e+00, 2.9081e-01, 2.7726e-01, ..., 1.7540e-01,\n", - " 1.8479e-01, 2.2483e-01]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[3.5447e-01, 3.8861e-01, 3.9724e-01, ..., 3.8680e-01,\n", - " 3.3568e-01, 3.4552e-01],\n", - " [4.1739e-01, 5.1039e-01, 4.1730e-01, ..., 3.3993e-01,\n", - " 3.7082e-01, 3.5110e-01],\n", - " [3.6117e-01, 4.0745e-01, 4.8491e-01, ..., 3.4849e-01,\n", - " 3.2321e-01, 3.5189e-01],\n", - " ...,\n", - " [2.3144e-01, 3.8021e-01, 5.1526e-01, ..., 3.6499e-01,\n", - " 3.7412e-01, 3.9986e-01],\n", - " [3.4679e-01, 4.0238e-01, 5.0077e-01, ..., 3.6185e-01,\n", - " 3.1597e-01, 3.6335e-01],\n", - " [3.6498e-01, 3.7943e-01, 5.1719e-01, ..., 3.1798e-01,\n", - " 3.3657e-01, 3.4130e-01]]],\n", - "\n", - "\n", - " [[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[1.4560e-02, 9.4475e-02, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.5002e-02, 2.9632e-02, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [3.2952e-02, 0.0000e+00, 0.0000e+00, ..., 4.5850e-02,\n", - " 2.0439e-02, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 4.4258e-02],\n", - " [0.0000e+00, 0.0000e+00, 2.5565e-02, ..., 0.0000e+00,\n", - " 9.0044e-03, 4.9084e-02]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.1141e-01, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[3.3697e-01, 3.8527e-01, 3.2900e-01, ..., 2.8704e-01,\n", - " 2.3351e-01, 1.9004e-01],\n", - " [1.3575e-01, 3.5783e-01, 3.3573e-01, ..., 2.2082e-01,\n", - " 1.5855e-01, 1.3587e-01],\n", - " [2.1929e-01, 2.8900e-01, 2.8255e-01, ..., 2.0603e-01,\n", - " 2.3927e-01, 2.1909e-01],\n", - " ...,\n", - " [2.3292e-01, 3.9097e-01, 3.6399e-01, ..., 2.0598e-01,\n", - " 2.5374e-01, 2.3137e-01],\n", - " [1.8739e-01, 3.0794e-01, 3.0297e-01, ..., 2.7251e-01,\n", - " 2.5192e-01, 2.0837e-01],\n", - " [2.2454e-01, 4.1402e-01, 5.4083e-01, ..., 3.1875e-01,\n", - " 2.5080e-01, 2.5939e-01]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[2.6457e-01, 4.9519e-01, 5.6702e-01, ..., 3.0955e-01,\n", - " 3.5292e-01, 3.2669e-01],\n", - " [2.1577e-01, 5.1833e-01, 4.9183e-01, ..., 3.6043e-01,\n", - " 3.8524e-01, 3.6155e-01],\n", - " [2.0068e-01, 4.2784e-01, 5.2818e-01, ..., 3.1871e-01,\n", - " 3.2452e-01, 3.1036e-01],\n", - " ...,\n", - " [4.9855e-01, 5.1001e-01, 5.2279e-01, ..., 3.6450e-01,\n", - " 3.4338e-01, 3.3603e-01],\n", - " [4.1233e-01, 5.5518e-01, 5.2828e-01, ..., 4.0676e-01,\n", - " 3.3873e-01, 3.6724e-01],\n", - " [4.0820e-01, 4.6187e-01, 4.7338e-01, ..., 3.8691e-01,\n", - " 3.6039e-01, 3.8022e-01]]],\n", - "\n", - "\n", - " [[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[0.0000e+00, 5.7852e-03, 0.0000e+00, ..., 7.4838e-03,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 3.0351e-02,\n", - " 0.0000e+00, 2.6720e-04],\n", - " [9.4807e-04, 0.0000e+00, 0.0000e+00, ..., 7.9551e-03,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [2.0326e-02, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 1.0801e-02, 0.0000e+00],\n", - " [1.8470e-01, 0.0000e+00, 0.0000e+00, ..., 5.0584e-02,\n", - " 9.4758e-02, 5.9146e-02]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[3.8708e-01, 2.8022e-01, 3.5893e-01, ..., 1.6595e-01,\n", - " 1.6031e-01, 2.1136e-01],\n", - " [1.5595e-01, 3.0544e-01, 2.4666e-01, ..., 2.2675e-01,\n", - " 2.5765e-01, 1.9682e-01],\n", - " [2.9518e-01, 4.1210e-01, 2.0063e-01, ..., 1.7595e-01,\n", - " 2.2537e-01, 2.2214e-01],\n", - " ...,\n", - " [2.4745e-01, 2.6259e-01, 3.8654e-01, ..., 2.3620e-01,\n", - " 2.3157e-01, 1.8514e-01],\n", - " [2.5715e-01, 2.9593e-01, 4.7745e-01, ..., 2.3546e-01,\n", - " 2.5073e-01, 2.0976e-01],\n", - " [1.2015e+00, 8.4644e-01, 7.3386e-01, ..., 1.0252e+00,\n", - " 9.5310e-01, 1.0013e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[4.5013e-01, 4.7484e-01, 4.0540e-01, ..., 1.9346e-01,\n", - " 1.7826e-01, 1.4777e-01],\n", - " [4.7546e-01, 4.8187e-01, 3.6760e-01, ..., 2.7809e-01,\n", - " 3.2997e-01, 3.2337e-01],\n", - " [4.6160e-01, 4.0050e-01, 3.9061e-01, ..., 3.6613e-01,\n", - " 3.5243e-01, 2.9739e-01],\n", - " ...,\n", - " [5.5148e-01, 5.1018e-01, 4.0132e-01, ..., 3.8948e-01,\n", - " 3.5737e-01, 3.3088e-01],\n", - " [4.1973e-01, 4.5475e-01, 4.5320e-01, ..., 3.8343e-01,\n", - " 4.0126e-01, 3.6181e-01],\n", - " [3.4280e-01, 3.1606e-01, 4.4701e-01, ..., 2.1665e-01,\n", - " 2.3985e-01, 2.3903e-01]]],\n", - "\n", - "\n", - " ...,\n", - "\n", - "\n", - " [[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [4.1783e-02, 0.0000e+00, 1.5805e-02, ..., 0.0000e+00,\n", - " 2.2508e-02, 0.0000e+00],\n", - " [4.3234e-02, 7.7864e-02, 0.0000e+00, ..., 1.6347e-02,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[3.2092e-02, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.3563e-01, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[0.0000e+00, 2.5187e-01, 2.4979e-01, ..., 2.4775e-01,\n", - " 2.2354e-01, 1.9149e-01],\n", - " [1.6541e-01, 1.9586e-01, 1.9813e-01, ..., 2.7344e-01,\n", - " 2.0928e-01, 2.6150e-01],\n", - " [1.0495e-01, 6.3299e-02, 3.3844e-01, ..., 2.5138e-01,\n", - " 1.2470e-01, 2.3927e-01],\n", - " ...,\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[1.1428e-01, 4.5667e-01, 4.6821e-01, ..., 3.2058e-01,\n", - " 3.3579e-01, 3.9013e-01],\n", - " [1.0441e-01, 4.5739e-01, 4.6107e-01, ..., 3.8468e-01,\n", - " 3.8291e-01, 3.6686e-01],\n", - " [1.9868e-01, 3.5520e-01, 4.4313e-01, ..., 4.0679e-01,\n", - " 3.8068e-01, 3.0646e-01],\n", - " ...,\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00]]],\n", - "\n", - "\n", - " [[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[2.4654e-02, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 3.3902e-02],\n", - " [0.0000e+00, 0.0000e+00, 1.8307e-02, ..., 5.1669e-02,\n", - " 9.4838e-03, 7.4535e-02],\n", - " [9.9215e-02, 0.0000e+00, 1.5872e-02, ..., 1.6203e-02,\n", - " 5.1401e-02, 1.9239e-03],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[4.0034e-01, 2.5306e-01, 2.0218e-01, ..., 9.8162e-02,\n", - " 7.0643e-02, 4.9741e-02],\n", - " [1.2568e-01, 2.1031e-01, 1.1182e-01, ..., 4.2781e-02,\n", - " 1.1969e-01, 1.2005e-01],\n", - " [2.8787e-01, 2.4031e-01, 2.2566e-01, ..., 0.0000e+00,\n", - " 6.4181e-02, 5.8730e-02],\n", - " ...,\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[3.8405e-01, 3.0990e-01, 3.7156e-01, ..., 1.8125e-01,\n", - " 1.5051e-01, 1.9620e-01],\n", - " [4.7286e-01, 4.0529e-01, 3.9718e-01, ..., 2.4710e-01,\n", - " 4.5657e-02, 1.1501e-01],\n", - " [3.2621e-01, 3.0073e-01, 3.0477e-01, ..., 2.3529e-01,\n", - " 2.1357e-01, 1.6986e-01],\n", - " ...,\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00]]],\n", - "\n", - "\n", - " [[[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[3.3438e-02, 1.2378e-03, 5.2972e-02, ..., 7.2712e-02,\n", - " 8.6563e-02, 1.4494e-01],\n", - " [1.1043e-01, 6.1431e-02, 6.3630e-02, ..., 8.1278e-02,\n", - " 6.2590e-02, 8.3154e-02],\n", - " [1.7677e-02, 2.0111e-03, 7.8750e-02, ..., 6.9633e-02,\n", - " 8.9799e-02, 5.3263e-02],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[1.0034e-01, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.5627e-01, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [5.1447e-02, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 4.3641e-03],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " ...,\n", - "\n", - " [[2.5142e-01, 4.5964e-01, 3.7346e-01, ..., 4.7631e-02,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.9760e-01, 2.6627e-01, 1.1191e-01, ..., 3.0450e-02,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [1.6341e-01, 3.2938e-01, 2.5690e-01, ..., 5.5694e-02,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00],\n", - " [1.1257e+00, 8.7341e-01, 7.8169e-01, ..., 1.0458e+00,\n", - " 1.0094e+00, 1.0221e+00]],\n", - "\n", - " [[0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 2.2189e-02, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 2.8490e-02],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " ...,\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00],\n", - " [0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00,\n", - " 0.0000e+00, 0.0000e+00]],\n", - "\n", - " [[2.5810e-01, 6.3017e-01, 3.7038e-01, ..., 1.8704e-01,\n", - " 8.2694e-02, 9.9127e-02],\n", - " [1.7293e-01, 5.0679e-01, 4.0739e-01, ..., 1.6006e-01,\n", - " 1.1725e-01, 9.9405e-02],\n", - " [2.4175e-01, 4.1616e-01, 4.1257e-01, ..., 1.3520e-01,\n", - " 7.9126e-02, 1.2846e-01],\n", - " ...,\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00],\n", - " [1.4488e+00, 1.0212e+00, 9.4473e-01, ..., 1.2363e+00,\n", - " 1.2189e+00, 1.2380e+00]]]], device='cuda:0',\n", - " grad_fn=)\n" - ] - } - ], - "source": [ - "xs = model.encoder.global_cmvn(feat)\n", - "masks = ~make_pad_mask(feat_len).unsqueeze(1) # (B, 1, L)\n", - "\n", - "x = xs.unsqueeze(1)\n", - "x = model.encoder.embed.conv(x)\n", - "print(x)" - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "id": "9a9478ad", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[-0.03426375 0.14291267 -0.06718873 ... 0.09064753 0.01809387\n", - " -0.0434088 ]\n", - " [-0.05007839 0.11054724 -0.10399298 ... 0.11457238 0.04244684\n", - " -0.01249714]\n", - " [-0.10695291 0.16910909 -0.08352133 ... 0.07710276 0.01168563\n", - " -0.03584499]\n", - " ...\n", - " [-0.06060536 0.14455931 -0.05470302 ... 0.05364908 0.03033342\n", - " -0.02610814]\n", - " [-0.08505894 0.13611752 -0.11132983 ... 0.13079923 0.01580139\n", - " -0.02281028]\n", - " [-0.10604677 0.14714901 -0.10885533 ... 0.08543444 0.03719445\n", - " -0.04634233]]\n", - "\n", - " [[-0.12392755 0.14486063 -0.05674079 ... 0.02573164 0.03128851\n", - " 0.00545091]\n", - " [-0.04775286 0.08473608 -0.08507854 ... 0.04573154 0.04240163\n", - " 0.01053247]\n", - " [-0.05940291 0.10023535 -0.0814373 ... 0.035965 0.01673085\n", - " 0.02089563]\n", - " ...\n", - " [-0.09222981 0.15823206 -0.07700447 ... 0.08122957 0.03136991\n", - " -0.00646474]\n", - " [-0.07331756 0.14482647 -0.07838815 ... 0.1086944 0.01356864\n", - " -0.02777974]\n", - " [-0.07937264 0.20143102 -0.05544947 ... 0.10287814 0.00608235\n", - " -0.0479918 ]]\n", - "\n", - " [[-0.03670349 0.0893159 -0.08718812 ... 0.0131405 0.00642052\n", - " 0.00573716]\n", - " [ 0.01089254 0.11146393 -0.10263617 ... 0.05070438 0.01960694\n", - " 0.03521532]\n", - " [-0.0218228 0.11443964 -0.06678198 ... 0.04327708 0.00861394\n", - " 0.02871092]\n", - " ...\n", - " [-0.06792898 0.14376275 -0.07899005 ... 0.11248926 0.03208683\n", - " -0.0326424 ]\n", - " [-0.07884051 0.17024788 -0.08583611 ... 0.09028331 0.03588808\n", - " -0.0207509 ]\n", - " [-0.13792302 0.27163863 -0.23930418 ... 0.13391261 0.0752104\n", - " -0.08621951]]\n", - "\n", - " ...\n", - "\n", - " [[-0.02446348 0.11595841 -0.03591986 ... 0.0628897 0.02895011\n", - " -0.06532725]\n", - " [-0.05378424 0.1260737 -0.09023033 ... 0.09078894 0.01035743\n", - " 0.03701983]\n", - " [-0.04566649 0.14275314 -0.0668687 ... 0.09890588 -0.00612222\n", - " 0.03439377]\n", - " ...\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]]\n", - "\n", - " [[-0.01012144 0.03909408 -0.07077143 ... 0.00452683 -0.01377654\n", - " 0.02897627]\n", - " [-0.00519154 0.03594019 -0.06831125 ... 0.05693541 -0.00406374\n", - " 0.0456164 ]\n", - " [-0.01762631 0.00500899 -0.05886075 ... 0.02112178 -0.00729015\n", - " 0.02782153]\n", - " ...\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]]\n", - "\n", - " [[-0.03411558 -0.04318277 -0.08497842 ... -0.04886402 0.04296734\n", - " 0.06151697]\n", - " [ 0.00263296 -0.06913657 -0.08993219 ... -0.00149064 0.05696633\n", - " 0.03304394]\n", - " [-0.01818341 -0.0117864 -0.09679577 ... -0.00870231 0.00362198\n", - " 0.01916483]\n", - " ...\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]\n", - " [-0.31763062 0.5370021 -0.2633542 ... 0.39182857 0.00337184\n", - " -0.18293698]]]\n", - "torch.Size([16, 51, 256])\n" - ] - } - ], - "source": [ - "b, c, t, f = x.size()\n", - "x = model.encoder.embed.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n", - "print(x.cpu().detach().numpy())\n", - "print(x.shape)" - ] - }, - { - "cell_type": "code", - "execution_count": 49, - "id": "fd69003f", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[[-0.54822 2.2866027 -1.0750197 ... 1.4503604 0.28950194\n", - " -0.6945408 ]\n", - " [-0.8012542 1.7687558 -1.6638877 ... 1.833158 0.6791494\n", - " -0.1999542 ]\n", - " [-1.7112465 2.7057455 -1.3363413 ... 1.2336441 0.18697014\n", - " -0.5735198 ]\n", - " ...\n", - " [-0.96968573 2.312949 -0.87524825 ... 0.85838526 0.4853347\n", - " -0.41773027]\n", - " [-1.3609431 2.1778803 -1.7812773 ... 2.0927877 0.25282228\n", - " -0.36496443]\n", - " [-1.6967483 2.3543842 -1.7416853 ... 1.366951 0.59511113\n", - " -0.74147725]]\n", - "\n", - " [[-1.9828408 2.31777 -0.9078527 ... 0.41170627 0.5006162\n", - " 0.08721463]\n", - " [-0.76404583 1.3557773 -1.3612567 ... 0.7317046 0.678426\n", - " 0.16851945]\n", - " [-0.95044655 1.6037656 -1.3029968 ... 0.57544005 0.26769355\n", - " 0.33433008]\n", - " ...\n", - " [-1.475677 2.531713 -1.2320715 ... 1.2996731 0.50191855\n", - " -0.10343577]\n", - " [-1.1730809 2.3172235 -1.2542105 ... 1.7391105 0.21709818\n", - " -0.44447583]\n", - " [-1.2699623 3.2228963 -0.8871915 ... 1.6460502 0.09731755\n", - " -0.7678688 ]]\n", - "\n", - " [[-0.5872559 1.4290544 -1.3950099 ... 0.21024795 0.10272825\n", - " 0.09179455]\n", - " [ 0.1742807 1.783423 -1.6421788 ... 0.8112701 0.31371105\n", - " 0.56344515]\n", - " [-0.34916472 1.8310343 -1.0685117 ... 0.69243336 0.13782299\n", - " 0.45937473]\n", - " ...\n", - " [-1.0868638 2.300204 -1.2638408 ... 1.7998282 0.5133892\n", - " -0.52227837]\n", - " [-1.2614481 2.7239661 -1.3733778 ... 1.444533 0.57420933\n", - " -0.33201432]\n", - " [-2.2067683 4.346218 -3.828867 ... 2.1426017 1.2033664\n", - " -1.3795122 ]]\n", - "\n", - " ...\n", - "\n", - " [[-0.39141566 1.8553346 -0.5747178 ... 1.0062351 0.46320182\n", - " -1.045236 ]\n", - " [-0.86054784 2.0171793 -1.4436853 ... 1.452623 0.16571884\n", - " 0.5923172 ]\n", - " [-0.73066384 2.2840502 -1.0698992 ... 1.5824941 -0.0979555\n", - " 0.55030036]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]\n", - "\n", - " [[-0.16194311 0.6255052 -1.1323429 ... 0.07242929 -0.22042468\n", - " 0.46362036]\n", - " [-0.08306468 0.575043 -1.09298 ... 0.9109665 -0.06501988\n", - " 0.72986233]\n", - " [-0.28202093 0.08014385 -0.9417719 ... 0.3379485 -0.11664233\n", - " 0.44514441]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]\n", - "\n", - " [[-0.5458492 -0.69092435 -1.3596548 ... -0.78182435 0.68747747\n", - " 0.9842716 ]\n", - " [ 0.04212743 -1.1061852 -1.438915 ... -0.02385022 0.91146135\n", - " 0.52870303]\n", - " [-0.2909345 -0.18858244 -1.5487324 ... -0.13923697 0.05795169\n", - " 0.30663735]\n", - " ...\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]\n", - " [-5.08209 8.592033 -4.2136674 ... 6.269257 0.05394945\n", - " -2.9269917 ]]]\n" - ] - } - ], - "source": [ - "x, pos_emb = model.encoder.embed.pos_enc(x, 0)\n", - "print(x.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "id": "8ed88489", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.float32\n", - "[[[ 0.0000000e+00 1.0000000e+00 0.0000000e+00 ... 1.0000000e+00\n", - " 0.0000000e+00 1.0000000e+00]\n", - " [ 8.4147096e-01 5.4030234e-01 8.0196178e-01 ... 1.0000000e+00\n", - " 1.0746076e-04 1.0000000e+00]\n", - " [ 9.0929741e-01 -4.1614684e-01 9.5814437e-01 ... 1.0000000e+00\n", - " 2.1492151e-04 1.0000000e+00]\n", - " ...\n", - " [-7.6825464e-01 -6.4014435e-01 6.3279724e-01 ... 9.9998462e-01\n", - " 5.1580933e-03 9.9998671e-01]\n", - " [-9.5375264e-01 3.0059254e-01 9.9899054e-01 ... 9.9998397e-01\n", - " 5.2655530e-03 9.9998611e-01]\n", - " [-2.6237485e-01 9.6496606e-01 5.6074661e-01 ... 9.9998331e-01\n", - " 5.3730118e-03 9.9998558e-01]]]\n" - ] - } - ], - "source": [ - "print(pos_emb.dtype)\n", - "print(pos_emb.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": 54, - "id": "5e277881", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "torch.Size([16, 51, 256])\n" - ] - }, - { - "ename": "NameError", - "evalue": "name 'mask' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0mpos_emb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpos_emb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 143\u001b[0;31m \u001b[0mmask\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmask\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 144\u001b[0m \u001b[0mx_att\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mx_att\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 145\u001b[0m )\n", - "\u001b[0;31mNameError\u001b[0m: name 'mask' is not defined" - ] - } - ], - "source": [ - "def add_optional_chunk_mask(xs: torch.Tensor, masks: torch.Tensor,\n", - " use_dynamic_chunk: bool,\n", - " use_dynamic_left_chunk: bool,\n", - " decoding_chunk_size: int, static_chunk_size: int,\n", - " num_decoding_left_chunks: int):\n", - " \"\"\" Apply optional mask for encoder.\n", - " Args:\n", - " xs (torch.Tensor): padded input, (B, L, D), L for max length\n", - " mask (torch.Tensor): mask for xs, (B, 1, L)\n", - " use_dynamic_chunk (bool): whether to use dynamic chunk or not\n", - " use_dynamic_left_chunk (bool): whether to use dynamic left chunk for\n", - " training.\n", - " decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's\n", - " 0: default for training, use random dynamic chunk.\n", - " <0: for decoding, use full chunk.\n", - " >0: for decoding, use fixed chunk size as set.\n", - " static_chunk_size (int): chunk size for static chunk training/decoding\n", - " if it's greater than 0, if use_dynamic_chunk is true,\n", - " this parameter will be ignored\n", - " num_decoding_left_chunks: number of left chunks, this is for decoding,\n", - " the chunk size is decoding_chunk_size.\n", - " >=0: use num_decoding_left_chunks\n", - " <0: use all left chunks\n", - " Returns:\n", - " torch.Tensor: chunk mask of the input xs.\n", - " \"\"\"\n", - " # Whether to use chunk mask or not\n", - " if use_dynamic_chunk:\n", - " max_len = xs.size(1)\n", - " if decoding_chunk_size < 0:\n", - " chunk_size = max_len\n", - " num_left_chunks = -1\n", - " elif decoding_chunk_size > 0:\n", - " chunk_size = decoding_chunk_size\n", - " num_left_chunks = num_decoding_left_chunks\n", - " else:\n", - " # chunk size is either [1, 25] or full context(max_len).\n", - " # Since we use 4 times subsampling and allow up to 1s(100 frames)\n", - " # delay, the maximum frame is 100 / 4 = 25.\n", - " chunk_size = torch.randint(1, max_len, (1, )).item()\n", - " num_left_chunks = -1\n", - " if chunk_size > max_len // 2:\n", - " chunk_size = max_len\n", - " else:\n", - " chunk_size = chunk_size % 25 + 1\n", - " if use_dynamic_left_chunk:\n", - " max_left_chunks = (max_len - 1) // chunk_size\n", - " num_left_chunks = torch.randint(0, max_left_chunks,\n", - " (1, )).item()\n", - " chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,\n", - " num_left_chunks,\n", - " xs.device) # (L, L)\n", - " chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n", - " chunk_masks = masks & chunk_masks # (B, L, L)\n", - " elif static_chunk_size > 0:\n", - " num_left_chunks = num_decoding_left_chunks\n", - " chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,\n", - " num_left_chunks,\n", - " xs.device) # (L, L)\n", - " chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n", - " chunk_masks = masks & chunk_masks # (B, L, L)\n", - " else:\n", - " chunk_masks = masks\n", - " return chunk_masks\n", - "\n", - "from wenet.utils.mask import make_pad_mask\n", - "\n", - "\n", - "masks = ~make_pad_mask(feat_len).unsqueeze(1)\n", - "xs = model.encoder.global_cmvn(feat)\n", - "xs, pos_emb, masks = model.encoder.embed(xs, masks, offset=0)\n", - "\n", - "mask_pad = masks\n", - "decoding_chunk_size=0\n", - "num_decoding_left_chunks=-1\n", - "use_dynamic_left_chunk=-1\n", - "use_dynamic_chunk=False\n", - "static_chunk_size=-1\n", - "chunk_masks = add_optional_chunk_mask(\n", - " xs, \n", - " masks, \n", - " use_dynamic_chunk,\n", - " use_dynamic_left_chunk,\n", - " decoding_chunk_size, \n", - " static_chunk_size,\n", - " num_decoding_left_chunks)\n", - "\n", - "np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_embed', \n", - " embed_out=xs.cpu().detach().numpy(), \n", - " pos_emb=pos_emb.cpu().detach().numpy(),\n", - " chunk_masks=chunk_masks.cpu().detach().numpy(),\n", - " mask_pad=mask_pad.cpu().detach().numpy())\n", - "\n", - "model.eval()\n", - "# print(chunk_masks)\n", - "print(xs.shape)\n", - "for layer in model.encoder.encoders:\n", - " #xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " #np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_0', enc_0=xs.cpu().detach().numpy())\n", - " \n", - " x = xs\n", - " residual = x\n", - " x_norm = layer.norm_ff_macaron(x)\n", - " !rm /workspace/DeepSpeech-2.x/.notebook/enc_0_norm_ff.npz\n", - " np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_0_norm_ff', \n", - " norm_ff=x_norm.cpu().detach().numpy(),\n", - " xs=xs.cpu().detach().numpy())\n", - " #print(x.cpu().detach().numpy())\n", - " for p in layer.norm_ff_macaron.parameters():\n", - " #print(p, p.sum())\n", - " pass\n", - " \n", - " x = residual + layer.ff_scale * layer.feed_forward_macaron(x_norm)\n", - " \n", - " ps = []\n", - " for n, p in layer.feed_forward_macaron.state_dict().items():\n", - " #print(n, p.cpu().data.numpy())\n", - " ps.append(p.cpu().data.numpy())\n", - " pass\n", - "\n", - " ff_l_x = layer.feed_forward_macaron.w_1(x_norm)\n", - " ff_l_a_x = layer.feed_forward_macaron.activation(ff_l_x)\n", - " ff_l_a_l_x = layer.feed_forward_macaron.w_2(ff_l_a_x)\n", - " np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_0_ff_out', \n", - " norm_ff=x_norm.cpu().detach().numpy(),\n", - " ff_out=x.cpu().detach().numpy(),\n", - " ff_l_x = ff_l_x.cpu().detach().numpy(),\n", - " ff_l_a_x=ff_l_a_x.cpu().detach().numpy(),\n", - " ff_l_a_l_x=ff_l_a_l_x.cpu().detach().numpy(),\n", - " ps=ps,\n", - " )\n", - " \n", - " \n", - " residual = x\n", - " x = layer.norm_mha(x)\n", - " x_q = x\n", - " \n", - " x_att = layer.self_attn(x_q, x, x, pos_emb, masks)\n", - " np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_0_selattn_out', \n", - " x_q=x_q.cpu().detach().numpy(),\n", - " x=x.cpu().detach().numpy(),\n", - " pos_emb = pos_emb.cpu().detach().numpy(),\n", - " mask=mask.cpu().detach().numpy(),\n", - " x_att=x_att.cpu().detach().numpy(),\n", - " )\n", - " \n", - " break\n", - "#print(xs.cpu().detach().numpy())\n", - "\n", - "\n", - "i = 0\n", - "for layer in model.encoder.encoders:\n", - " xs, chunk_masks, _ = layer(xs, chunk_masks, pos_emb, mask_pad)\n", - " i += 1\n", - " if i == 2:\n", - " np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_2', enc_2=xs.cpu().detach().numpy())\n", - " \n", - "np.savez('/workspace/DeepSpeech-2.x/.notebook/enc_all', enc_all=xs.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c43fd4f1", - "metadata": {}, - "outputs": [], - "source": [ - "out, mask = model.encoder(feat, feat_len)\n", - "#print(out.cpu().detach().numpy())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0e73db22", - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8f506114", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.0" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/README.md b/README.md index 424dc485e..71bc63638 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,37 @@ -[中文版](README_cn.md) - -# PaddlePaddle ASR toolkit +# PaddlePaddle Speech to Any toolkit ![License](https://img.shields.io/badge/license-Apache%202-red.svg) ![python version](https://img.shields.io/badge/python-3.7+-orange.svg) ![support os](https://img.shields.io/badge/os-linux-yellow.svg) -*PaddleASR* is an open-source implementation of end-to-end Automatic Speech Recognition (ASR) engine, with [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) platform. Our vision is to empower both industrial application and academic research on speech recognition, via an easy-to-use, efficient, samller and scalable implementation, including training, inference & testing module, and deployment. +*DeepSpeech* is an open-source implementation of end-to-end Automatic Speech Recognition engine, with [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) platform. Our vision is to empower both industrial application and academic research on speech recognition, via an easy-to-use, efficient, samller and scalable implementation, including training, inference & testing module, and deployment. ## Features - See [feature list](doc/src/feature_list.md) for more information. + See [feature list](docs/src/feature_list.md) for more information. ## Setup +All tested under: +* Ubuntu 16.04 * python>=3.7 -* paddlepaddle>=2.1.0 +* paddlepaddle>=2.2.0rc -Please see [install](doc/src/install.md). +Please see [install](docs/src/install.md). ## Getting Started -Please see [Getting Started](doc/src/getting_started.md) and [tiny egs](examples/tiny/s0/README.md). +Please see [Getting Started](docs/src/getting_started.md) and [tiny egs](examples/tiny/s0/README.md). ## More Information -* [Data Prepration](doc/src/data_preparation.md) -* [Data Augmentation](doc/src/augmentation.md) -* [Ngram LM](doc/src/ngram_lm.md) -* [Server Demo](doc/src/server.md) -* [Benchmark](doc/src/benchmark.md) -* [Relased Model](doc/src/released_model.md) -* [FAQ](doc/src/faq.md) +* [Data Prepration](docs/src/data_preparation.md) +* [Data Augmentation](docs/src/augmentation.md) +* [Ngram LM](docs/src/ngram_lm.md) +* [Benchmark](docs/src/benchmark.md) +* [Relased Model](docs/src/released_model.md) ## Questions and Help @@ -43,8 +41,8 @@ You are welcome to submit questions in [Github Discussions](https://github.com/P ## License -DeepASR is provided under the [Apache-2.0 License](./LICENSE). +DeepSpeech is provided under the [Apache-2.0 License](./LICENSE). ## Acknowledgement -We depends on many open source repos. See [References](doc/src/reference.md) for more information. +We depends on many open source repos. See [References](docs/src/reference.md) for more information. diff --git a/README_cn.md b/README_cn.md deleted file mode 100644 index d762ec2ba..000000000 --- a/README_cn.md +++ /dev/null @@ -1,48 +0,0 @@ -[English](README.md) - -# PaddlePaddle ASR toolkit - -![License](https://img.shields.io/badge/license-Apache%202-red.svg) -![python version](https://img.shields.io/badge/python-3.7+-orange.svg) -![support os](https://img.shields.io/badge/os-linux-yellow.svg) - -*PaddleASR*是一个采用[PaddlePaddle](https://github.com/PaddlePaddle/Paddle)平台的端到端自动语音识别(ASR)引擎的开源项目, -我们的愿景是为语音识别在工业应用和学术研究上,提供易于使用、高效、小型化和可扩展的工具,包括训练,推理,以及 部署。 - -## 特性 - - 参看 [特性列表](doc/src/feature_list.md)。 - - -## 安装 - -* python>=3.7 -* paddlepaddle>=2.1.0 - -参看 [安装](doc/src/install.md)。 - -## 开始 - -请查看 [开始](doc/src/getting_started.md) 和 [tiny egs](examples/tiny/s0/README.md)。 - -## 更多信息 - -* [数据处理](doc/src/data_preparation.md) -* [数据增强](doc/src/augmentation.md) -* [语言模型](doc/src/ngram_lm.md) -* [服务部署](doc/src/server.md) -* [Benchmark](doc/src/benchmark.md) -* [Relased Model](doc/src/released_model.md) -* [FAQ](doc/src/faq.md) - -## 问题和帮助 - -欢迎您在[Github讨论](https://github.com/PaddlePaddle/DeepSpeech/discussions)提交问题,[Github问题](https://github.com/PaddlePaddle/models/issues)中反馈bug。也欢迎您为这个项目做出贡献。 - -## License - -DeepASR 遵循[Apache-2.0开源协议](./LICENSE)。 - -## 感谢 - -开发中参考一些优秀的仓库,详情参见 [References](doc/src/reference.md)。 diff --git a/deepspeech/__init__.py b/deepspeech/__init__.py index 37531657e..5505ecbf0 100644 --- a/deepspeech/__init__.py +++ b/deepspeech/__init__.py @@ -30,24 +30,13 @@ logger = Log(__name__).getlog() logger.warn = logger.warning ########### hcak paddle ############# -paddle.bool = 'bool' -paddle.float16 = 'float16' paddle.half = 'float16' -paddle.float32 = 'float32' paddle.float = 'float32' -paddle.float64 = 'float64' paddle.double = 'float64' -paddle.int8 = 'int8' -paddle.int16 = 'int16' paddle.short = 'int16' -paddle.int32 = 'int32' paddle.int = 'int32' -paddle.int64 = 'int64' paddle.long = 'int64' -paddle.uint8 = 'uint8' paddle.uint16 = 'uint16' -paddle.complex64 = 'complex64' -paddle.complex128 = 'complex128' paddle.cdouble = 'complex128' @@ -91,23 +80,23 @@ def convert_dtype_to_string(tensor_dtype): if not hasattr(paddle, 'softmax'): - logger.warn("register user softmax to paddle, remove this when fixed!") + logger.debug("register user softmax to paddle, remove this when fixed!") setattr(paddle, 'softmax', paddle.nn.functional.softmax) if not hasattr(paddle, 'log_softmax'): - logger.warn("register user log_softmax to paddle, remove this when fixed!") + logger.debug("register user log_softmax to paddle, remove this when fixed!") setattr(paddle, 'log_softmax', paddle.nn.functional.log_softmax) if not hasattr(paddle, 'sigmoid'): - logger.warn("register user sigmoid to paddle, remove this when fixed!") + logger.debug("register user sigmoid to paddle, remove this when fixed!") setattr(paddle, 'sigmoid', paddle.nn.functional.sigmoid) if not hasattr(paddle, 'log_sigmoid'): - logger.warn("register user log_sigmoid to paddle, remove this when fixed!") + logger.debug("register user log_sigmoid to paddle, remove this when fixed!") setattr(paddle, 'log_sigmoid', paddle.nn.functional.log_sigmoid) if not hasattr(paddle, 'relu'): - logger.warn("register user relu to paddle, remove this when fixed!") + logger.debug("register user relu to paddle, remove this when fixed!") setattr(paddle, 'relu', paddle.nn.functional.relu) @@ -116,7 +105,7 @@ def cat(xs, dim=0): if not hasattr(paddle, 'cat'): - logger.warn( + logger.debug( "override cat of paddle if exists or register, remove this when fixed!") paddle.cat = cat @@ -127,7 +116,7 @@ def item(x: paddle.Tensor): if not hasattr(paddle.Tensor, 'item'): - logger.warn( + logger.debug( "override item of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.item = item @@ -138,13 +127,13 @@ def func_long(x: paddle.Tensor): if not hasattr(paddle.Tensor, 'long'): - logger.warn( + logger.debug( "override long of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.long = func_long if not hasattr(paddle.Tensor, 'numel'): - logger.warn( + logger.debug( "override numel of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.numel = paddle.numel @@ -158,7 +147,7 @@ def new_full(x: paddle.Tensor, if not hasattr(paddle.Tensor, 'new_full'): - logger.warn( + logger.debug( "override new_full of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.new_full = new_full @@ -173,13 +162,13 @@ def eq(xs: paddle.Tensor, ys: Union[paddle.Tensor, float]) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'eq'): - logger.warn( + logger.debug( "override eq of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.eq = eq if not hasattr(paddle, 'eq'): - logger.warn( + logger.debug( "override eq of paddle if exists or register, remove this when fixed!") paddle.eq = eq @@ -189,7 +178,7 @@ def contiguous(xs: paddle.Tensor) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'contiguous'): - logger.warn( + logger.debug( "override contiguous of paddle.Tensor if exists or register, remove this when fixed!" ) paddle.Tensor.contiguous = contiguous @@ -206,7 +195,7 @@ def size(xs: paddle.Tensor, *args: int) -> paddle.Tensor: #`to_static` do not process `size` property, maybe some `paddle` api dependent on it. -logger.warn( +logger.debug( "override size of paddle.Tensor " "(`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!" ) @@ -218,7 +207,7 @@ def view(xs: paddle.Tensor, *args: int) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'view'): - logger.warn("register user view to paddle.Tensor, remove this when fixed!") + logger.debug("register user view to paddle.Tensor, remove this when fixed!") paddle.Tensor.view = view @@ -227,7 +216,7 @@ def view_as(xs: paddle.Tensor, ys: paddle.Tensor) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'view_as'): - logger.warn( + logger.debug( "register user view_as to paddle.Tensor, remove this when fixed!") paddle.Tensor.view_as = view_as @@ -253,7 +242,7 @@ def masked_fill(xs: paddle.Tensor, if not hasattr(paddle.Tensor, 'masked_fill'): - logger.warn( + logger.debug( "register user masked_fill to paddle.Tensor, remove this when fixed!") paddle.Tensor.masked_fill = masked_fill @@ -271,7 +260,7 @@ def masked_fill_(xs: paddle.Tensor, if not hasattr(paddle.Tensor, 'masked_fill_'): - logger.warn( + logger.debug( "register user masked_fill_ to paddle.Tensor, remove this when fixed!") paddle.Tensor.masked_fill_ = masked_fill_ @@ -283,7 +272,8 @@ def fill_(xs: paddle.Tensor, value: Union[float, int]) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'fill_'): - logger.warn("register user fill_ to paddle.Tensor, remove this when fixed!") + logger.debug( + "register user fill_ to paddle.Tensor, remove this when fixed!") paddle.Tensor.fill_ = fill_ @@ -292,22 +282,22 @@ def repeat(xs: paddle.Tensor, *size: Any) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'repeat'): - logger.warn( + logger.debug( "register user repeat to paddle.Tensor, remove this when fixed!") paddle.Tensor.repeat = repeat if not hasattr(paddle.Tensor, 'softmax'): - logger.warn( + logger.debug( "register user softmax to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'softmax', paddle.nn.functional.softmax) if not hasattr(paddle.Tensor, 'sigmoid'): - logger.warn( + logger.debug( "register user sigmoid to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'sigmoid', paddle.nn.functional.sigmoid) if not hasattr(paddle.Tensor, 'relu'): - logger.warn("register user relu to paddle.Tensor, remove this when fixed!") + logger.debug("register user relu to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'relu', paddle.nn.functional.relu) @@ -316,7 +306,7 @@ def type_as(x: paddle.Tensor, other: paddle.Tensor) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'type_as'): - logger.warn( + logger.debug( "register user type_as to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'type_as', type_as) @@ -332,7 +322,7 @@ def to(x: paddle.Tensor, *args, **kwargs) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'to'): - logger.warn("register user to to paddle.Tensor, remove this when fixed!") + logger.debug("register user to to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'to', to) @@ -341,7 +331,8 @@ def func_float(x: paddle.Tensor) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'float'): - logger.warn("register user float to paddle.Tensor, remove this when fixed!") + logger.debug( + "register user float to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'float', func_float) @@ -350,7 +341,7 @@ def func_int(x: paddle.Tensor) -> paddle.Tensor: if not hasattr(paddle.Tensor, 'int'): - logger.warn("register user int to paddle.Tensor, remove this when fixed!") + logger.debug("register user int to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'int', func_int) @@ -359,139 +350,6 @@ def tolist(x: paddle.Tensor) -> List[Any]: if not hasattr(paddle.Tensor, 'tolist'): - logger.warn( + logger.debug( "register user tolist to paddle.Tensor, remove this when fixed!") setattr(paddle.Tensor, 'tolist', tolist) - -########### hcak paddle.nn.functional ############# - - -def glu(x: paddle.Tensor, axis=-1) -> paddle.Tensor: - """The gated linear unit (GLU) activation.""" - a, b = x.split(2, axis=axis) - act_b = F.sigmoid(b) - return a * act_b - - -if not hasattr(paddle.nn.functional, 'glu'): - logger.warn( - "register user glu to paddle.nn.functional, remove this when fixed!") - setattr(paddle.nn.functional, 'glu', glu) - -# def softplus(x): -# """Softplus function.""" -# if hasattr(paddle.nn.functional, 'softplus'): -# #return paddle.nn.functional.softplus(x.float()).type_as(x) -# return paddle.nn.functional.softplus(x) -# else: -# raise NotImplementedError - -# def gelu_accurate(x): -# """Gaussian Error Linear Units (GELU) activation.""" -# # [reference] https://github.com/pytorch/fairseq/blob/e75cff5f2c1d62f12dc911e0bf420025eb1a4e33/fairseq/modules/gelu.py -# if not hasattr(gelu_accurate, "_a"): -# gelu_accurate._a = math.sqrt(2 / math.pi) -# return 0.5 * x * (1 + paddle.tanh(gelu_accurate._a * -# (x + 0.044715 * paddle.pow(x, 3)))) - -# def gelu(x): -# """Gaussian Error Linear Units (GELU) activation.""" -# if hasattr(nn.functional, 'gelu'): -# #return nn.functional.gelu(x.float()).type_as(x) -# return nn.functional.gelu(x) -# else: -# return x * 0.5 * (1.0 + paddle.erf(x / math.sqrt(2.0))) - - -# hack loss -def ctc_loss(logits, - labels, - input_lengths, - label_lengths, - blank=0, - reduction='mean', - norm_by_times=True): - #logger.info("my ctc loss with norm by times") - ## https://github.com/PaddlePaddle/Paddle/blob/f5ca2db2cc/paddle/fluid/operators/warpctc_op.h#L403 - loss_out = paddle.fluid.layers.warpctc(logits, labels, blank, norm_by_times, - input_lengths, label_lengths) - - loss_out = paddle.fluid.layers.squeeze(loss_out, [-1]) - assert reduction in ['mean', 'sum', 'none'] - if reduction == 'mean': - loss_out = paddle.mean(loss_out / label_lengths) - elif reduction == 'sum': - loss_out = paddle.sum(loss_out) - return loss_out - - -logger.warn( - "override ctc_loss of paddle.nn.functional if exists, remove this when fixed!" -) -F.ctc_loss = ctc_loss - -########### hcak paddle.nn ############# -if not hasattr(paddle.nn, 'Module'): - logger.warn("register user Module to paddle.nn, remove this when fixed!") - setattr(paddle.nn, 'Module', paddle.nn.Layer) - -# maybe cause assert isinstance(sublayer, core.Layer) -if not hasattr(paddle.nn, 'ModuleList'): - logger.warn( - "register user ModuleList to paddle.nn, remove this when fixed!") - setattr(paddle.nn, 'ModuleList', paddle.nn.LayerList) - - -class GLU(nn.Layer): - """Gated Linear Units (GLU) Layer""" - - def __init__(self, dim: int=-1): - super().__init__() - self.dim = dim - - def forward(self, xs): - return glu(xs, dim=self.dim) - - -if not hasattr(paddle.nn, 'GLU'): - logger.warn("register user GLU to paddle.nn, remove this when fixed!") - setattr(paddle.nn, 'GLU', GLU) - - -# TODO(Hui Zhang): remove this Layer -class ConstantPad2d(nn.Layer): - """Pads the input tensor boundaries with a constant value. - For N-dimensional padding, use paddle.nn.functional.pad(). - """ - - def __init__(self, padding: Union[tuple, list, int], value: float): - """ - Args: - paddle ([tuple]): the size of the padding. - If is int, uses the same padding in all boundaries. - If a 4-tuple, uses (padding_left, padding_right, padding_top, padding_bottom) - value ([flaot]): pad value - """ - self.padding = padding if isinstance(padding, - [tuple, list]) else [padding] * 4 - self.value = value - - def forward(self, xs: paddle.Tensor) -> paddle.Tensor: - return nn.functional.pad( - xs, - self.padding, - mode='constant', - value=self.value, - data_format='NCHW') - - -if not hasattr(paddle.nn, 'ConstantPad2d'): - logger.warn( - "register user ConstantPad2d to paddle.nn, remove this when fixed!") - setattr(paddle.nn, 'ConstantPad2d', ConstantPad2d) - -########### hcak paddle.jit ############# - -if not hasattr(paddle.jit, 'export'): - logger.warn("register user export to paddle.jit, remove this when fixed!") - setattr(paddle.jit, 'export', paddle.jit.to_static) diff --git a/deepspeech/decoders/swig/ctc_beam_search_decoder.cpp b/deepspeech/decoders/swig/ctc_beam_search_decoder.cpp index 4dcc7c899..fcb1f7642 100644 --- a/deepspeech/decoders/swig/ctc_beam_search_decoder.cpp +++ b/deepspeech/decoders/swig/ctc_beam_search_decoder.cpp @@ -35,7 +35,8 @@ std::vector> ctc_beam_search_decoder( size_t beam_size, double cutoff_prob, size_t cutoff_top_n, - Scorer *ext_scorer) { + Scorer *ext_scorer, + size_t blank_id) { // dimension check size_t num_time_steps = probs_seq.size(); for (size_t i = 0; i < num_time_steps; ++i) { @@ -48,7 +49,7 @@ std::vector> ctc_beam_search_decoder( // assign blank id // size_t blank_id = vocabulary.size(); - size_t blank_id = 0; + // size_t blank_id = 0; // assign space id auto it = std::find(vocabulary.begin(), vocabulary.end(), " "); @@ -57,7 +58,6 @@ std::vector> ctc_beam_search_decoder( if ((size_t)space_id >= vocabulary.size()) { space_id = -2; } - // init prefixes' root PathTrie root; root.score = root.log_prob_b_prev = 0.0; @@ -218,7 +218,8 @@ ctc_beam_search_decoder_batch( size_t num_processes, double cutoff_prob, size_t cutoff_top_n, - Scorer *ext_scorer) { + Scorer *ext_scorer, + size_t blank_id) { VALID_CHECK_GT(num_processes, 0, "num_processes must be nonnegative!"); // thread pool ThreadPool pool(num_processes); @@ -234,7 +235,8 @@ ctc_beam_search_decoder_batch( beam_size, cutoff_prob, cutoff_top_n, - ext_scorer)); + ext_scorer, + blank_id)); } // get decoding results diff --git a/deepspeech/decoders/swig/ctc_beam_search_decoder.h b/deepspeech/decoders/swig/ctc_beam_search_decoder.h index c31510da3..eaba9da8c 100644 --- a/deepspeech/decoders/swig/ctc_beam_search_decoder.h +++ b/deepspeech/decoders/swig/ctc_beam_search_decoder.h @@ -43,7 +43,8 @@ std::vector> ctc_beam_search_decoder( size_t beam_size, double cutoff_prob = 1.0, size_t cutoff_top_n = 40, - Scorer *ext_scorer = nullptr); + Scorer *ext_scorer = nullptr, + size_t blank_id = 0); /* CTC Beam Search Decoder for batch data @@ -70,6 +71,7 @@ ctc_beam_search_decoder_batch( size_t num_processes, double cutoff_prob = 1.0, size_t cutoff_top_n = 40, - Scorer *ext_scorer = nullptr); + Scorer *ext_scorer = nullptr, + size_t blank_id = 0); #endif // CTC_BEAM_SEARCH_DECODER_H_ diff --git a/deepspeech/decoders/swig/ctc_greedy_decoder.cpp b/deepspeech/decoders/swig/ctc_greedy_decoder.cpp index 1c735c424..18008cced 100644 --- a/deepspeech/decoders/swig/ctc_greedy_decoder.cpp +++ b/deepspeech/decoders/swig/ctc_greedy_decoder.cpp @@ -17,17 +17,18 @@ std::string ctc_greedy_decoder( const std::vector> &probs_seq, - const std::vector &vocabulary) { + const std::vector &vocabulary, + size_t blank_id) { // dimension check size_t num_time_steps = probs_seq.size(); for (size_t i = 0; i < num_time_steps; ++i) { VALID_CHECK_EQ(probs_seq[i].size(), - vocabulary.size() + 1, + vocabulary.size(), "The shape of probs_seq does not match with " "the shape of the vocabulary"); } - size_t blank_id = vocabulary.size(); + // size_t blank_id = vocabulary.size(); std::vector max_idx_vec(num_time_steps, 0); std::vector idx_vec; diff --git a/deepspeech/decoders/swig/ctc_greedy_decoder.h b/deepspeech/decoders/swig/ctc_greedy_decoder.h index 5e8c5c251..dd1b33315 100644 --- a/deepspeech/decoders/swig/ctc_greedy_decoder.h +++ b/deepspeech/decoders/swig/ctc_greedy_decoder.h @@ -29,6 +29,7 @@ */ std::string ctc_greedy_decoder( const std::vector>& probs_seq, - const std::vector& vocabulary); + const std::vector& vocabulary, + size_t blank_id); #endif // CTC_GREEDY_DECODER_H diff --git a/deepspeech/decoders/swig/setup.py b/deepspeech/decoders/swig/setup.py index 86af475af..c089f96cd 100644 --- a/deepspeech/decoders/swig/setup.py +++ b/deepspeech/decoders/swig/setup.py @@ -83,10 +83,12 @@ FILES = glob.glob('kenlm/util/*.cc') \ FILES += glob.glob('openfst-1.6.3/src/lib/*.cc') +# yapf: disable FILES = [ fn for fn in FILES if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith('unittest.cc')) ] +# yapf: enable LIBS = ['stdc++'] if platform.system() != 'Darwin': diff --git a/deepspeech/decoders/swig_wrapper.py b/deepspeech/decoders/swig_wrapper.py index 3ffdb9c74..d883d430c 100644 --- a/deepspeech/decoders/swig_wrapper.py +++ b/deepspeech/decoders/swig_wrapper.py @@ -32,7 +32,7 @@ class Scorer(swig_decoders.Scorer): swig_decoders.Scorer.__init__(self, alpha, beta, model_path, vocabulary) -def ctc_greedy_decoder(probs_seq, vocabulary): +def ctc_greedy_decoder(probs_seq, vocabulary, blank_id): """Wrapper for ctc best path decoder in swig. :param probs_seq: 2-D list of probability distributions over each time @@ -44,7 +44,8 @@ def ctc_greedy_decoder(probs_seq, vocabulary): :return: Decoding result string. :rtype: str """ - result = swig_decoders.ctc_greedy_decoder(probs_seq.tolist(), vocabulary) + result = swig_decoders.ctc_greedy_decoder(probs_seq.tolist(), vocabulary, + blank_id) return result @@ -53,7 +54,8 @@ def ctc_beam_search_decoder(probs_seq, beam_size, cutoff_prob=1.0, cutoff_top_n=40, - ext_scoring_func=None): + ext_scoring_func=None, + blank_id=0): """Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time @@ -81,7 +83,7 @@ def ctc_beam_search_decoder(probs_seq, """ beam_results = swig_decoders.ctc_beam_search_decoder( probs_seq.tolist(), vocabulary, beam_size, cutoff_prob, cutoff_top_n, - ext_scoring_func) + ext_scoring_func, blank_id) beam_results = [(res[0], res[1].decode('utf-8')) for res in beam_results] return beam_results @@ -92,7 +94,8 @@ def ctc_beam_search_decoder_batch(probs_split, num_processes, cutoff_prob=1.0, cutoff_top_n=40, - ext_scoring_func=None): + ext_scoring_func=None, + blank_id=0): """Wrapper for the batched CTC beam search decoder. :param probs_seq: 3-D list with each element as an instance of 2-D list @@ -125,7 +128,7 @@ def ctc_beam_search_decoder_batch(probs_split, batch_beam_results = swig_decoders.ctc_beam_search_decoder_batch( probs_split, vocabulary, beam_size, num_processes, cutoff_prob, - cutoff_top_n, ext_scoring_func) + cutoff_top_n, ext_scoring_func, blank_id) batch_beam_results = [[(res[0], res[1]) for res in beam_results] for beam_results in batch_beam_results] return batch_beam_results diff --git a/deepspeech/exps/deepspeech2/bin/deploy/runtime.py b/deepspeech/exps/deepspeech2/bin/deploy/runtime.py index f3125e04d..21ffa6bf4 100644 --- a/deepspeech/exps/deepspeech2/bin/deploy/runtime.py +++ b/deepspeech/exps/deepspeech2/bin/deploy/runtime.py @@ -18,10 +18,12 @@ import numpy as np import paddle from paddle.inference import Config from paddle.inference import create_predictor +from paddle.io import DataLoader from deepspeech.exps.deepspeech2.config import get_cfg_defaults +from deepspeech.io.collator import SpeechCollator from deepspeech.io.dataset import ManifestDataset -from deepspeech.models.deepspeech2 import DeepSpeech2Model +from deepspeech.models.ds2 import DeepSpeech2Model from deepspeech.training.cli import default_argument_parser from deepspeech.utils.socket_server import AsrRequestHandler from deepspeech.utils.socket_server import AsrTCPServer @@ -78,26 +80,31 @@ def inference(config, args): def start_server(config, args): """Start the ASR server""" config.defrost() - config.data.manfiest = config.data.test_manifest - config.data.augmentation_config = "" - config.data.keep_transcription_text = True + config.data.manifest = config.data.test_manifest dataset = ManifestDataset.from_config(config) - model = DeepSpeech2Model.from_pretrained(dataset, config, + config.collator.augmentation_config = "" + config.collator.keep_transcription_text = True + config.collator.batch_size = 1 + config.collator.num_workers = 0 + collate_fn = SpeechCollator.from_config(config) + test_loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=0) + + model = DeepSpeech2Model.from_pretrained(test_loader, config, args.checkpoint_path) model.eval() # prepare ASR inference handler def file_to_transcript(filename): - feature = dataset.process_utterance(filename, "") - audio = np.array([feature[0]]).astype('float32') #[1, D, T] - audio_len = feature[0].shape[1] + feature = test_loader.collate_fn.process_utterance(filename, "") + audio = np.array([feature[0]]).astype('float32') #[1, T, D] + audio_len = feature[0].shape[0] audio_len = np.array([audio_len]).astype('int64') # [1] result_transcript = model.decode( paddle.to_tensor(audio), paddle.to_tensor(audio_len), - vocab_list=dataset.vocab_list, + vocab_list=test_loader.collate_fn.vocab_list, decoding_method=config.decoding.decoding_method, lang_model_path=config.decoding.lang_model_path, beam_alpha=config.decoding.alpha, @@ -138,7 +145,7 @@ if __name__ == "__main__": add_arg('host_ip', str, 'localhost', "Server's IP address.") - add_arg('host_port', int, 8086, "Server's IP port.") + add_arg('host_port', int, 8089, "Server's IP port.") add_arg('speech_save_dir', str, 'demo_cache', "Directory to save demo audios.") diff --git a/deepspeech/exps/deepspeech2/bin/deploy/server.py b/deepspeech/exps/deepspeech2/bin/deploy/server.py index b2ff37e06..583e90950 100644 --- a/deepspeech/exps/deepspeech2/bin/deploy/server.py +++ b/deepspeech/exps/deepspeech2/bin/deploy/server.py @@ -16,10 +16,12 @@ import functools import numpy as np import paddle +from paddle.io import DataLoader from deepspeech.exps.deepspeech2.config import get_cfg_defaults +from deepspeech.io.collator import SpeechCollator from deepspeech.io.dataset import ManifestDataset -from deepspeech.models.deepspeech2 import DeepSpeech2Model +from deepspeech.models.ds2 import DeepSpeech2Model from deepspeech.training.cli import default_argument_parser from deepspeech.utils.socket_server import AsrRequestHandler from deepspeech.utils.socket_server import AsrTCPServer @@ -31,26 +33,35 @@ from deepspeech.utils.utility import print_arguments def start_server(config, args): """Start the ASR server""" config.defrost() - config.data.manfiest = config.data.test_manifest - config.data.augmentation_config = "" - config.data.keep_transcription_text = True + config.data.manifest = config.data.test_manifest dataset = ManifestDataset.from_config(config) - model = DeepSpeech2Model.from_pretrained(dataset, config, + config.collator.augmentation_config = "" + config.collator.keep_transcription_text = True + config.collator.batch_size = 1 + config.collator.num_workers = 0 + collate_fn = SpeechCollator.from_config(config) + test_loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=0) + + model = DeepSpeech2Model.from_pretrained(test_loader, config, args.checkpoint_path) model.eval() # prepare ASR inference handler def file_to_transcript(filename): - feature = dataset.process_utterance(filename, "") - audio = np.array([feature[0]]).astype('float32') #[1, D, T] - audio_len = feature[0].shape[1] + feature = test_loader.collate_fn.process_utterance(filename, "") + audio = np.array([feature[0]]).astype('float32') #[1, T, D] + # audio = audio.swapaxes(1,2) + print('---file_to_transcript feature----') + print(audio.shape) + audio_len = feature[0].shape[0] + print(audio_len) audio_len = np.array([audio_len]).astype('int64') # [1] result_transcript = model.decode( paddle.to_tensor(audio), paddle.to_tensor(audio_len), - vocab_list=dataset.vocab_list, + vocab_list=test_loader.collate_fn.vocab_list, decoding_method=config.decoding.decoding_method, lang_model_path=config.decoding.lang_model_path, beam_alpha=config.decoding.alpha, @@ -91,7 +102,7 @@ if __name__ == "__main__": add_arg('host_ip', str, 'localhost', "Server's IP address.") - add_arg('host_port', int, 8086, "Server's IP port.") + add_arg('host_port', int, 8088, "Server's IP port.") add_arg('speech_save_dir', str, 'demo_cache', "Directory to save demo audios.") diff --git a/deepspeech/exps/deepspeech2/bin/export.py b/deepspeech/exps/deepspeech2/bin/export.py index a1607d583..7962d4fc0 100644 --- a/deepspeech/exps/deepspeech2/bin/export.py +++ b/deepspeech/exps/deepspeech2/bin/export.py @@ -30,11 +30,18 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + # save jit model to + parser.add_argument( + "--export_path", type=str, help="path of the jit model to save") + parser.add_argument("--model_type") args = parser.parse_args() + if args.model_type is None: + args.model_type = 'offline' + print("model_type:{}".format(args.model_type)) print_arguments(args) # https://yaml.org/type/float.html - config = get_cfg_defaults() + config = get_cfg_defaults(args.model_type) if args.config: config.merge_from_file(args.config) if args.opts: diff --git a/deepspeech/exps/deepspeech2/bin/test.py b/deepspeech/exps/deepspeech2/bin/test.py index f4edf08a8..f2fd3a394 100644 --- a/deepspeech/exps/deepspeech2/bin/test.py +++ b/deepspeech/exps/deepspeech2/bin/test.py @@ -30,11 +30,18 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + parser.add_argument("--model_type") + # save asr result to + parser.add_argument( + "--result_file", type=str, help="path of save the asr result") args = parser.parse_args() print_arguments(args, globals()) + if args.model_type is None: + args.model_type = 'offline' + print("model_type:{}".format(args.model_type)) # https://yaml.org/type/float.html - config = get_cfg_defaults() + config = get_cfg_defaults(args.model_type) if args.config: config.merge_from_file(args.config) if args.opts: diff --git a/deepspeech/exps/deepspeech2/bin/test_export.py b/deepspeech/exps/deepspeech2/bin/test_export.py new file mode 100644 index 000000000..7a012144d --- /dev/null +++ b/deepspeech/exps/deepspeech2/bin/test_export.py @@ -0,0 +1,58 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Evaluation for DeepSpeech2 model.""" +from deepspeech.exps.deepspeech2.config import get_cfg_defaults +from deepspeech.exps.deepspeech2.model import DeepSpeech2ExportTester as ExportTester +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.utility import print_arguments + + +def main_sp(config, args): + exp = ExportTester(config, args) + exp.setup() + exp.run_test() + + +def main(config, args): + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + # save asr result to + parser.add_argument( + "--result_file", type=str, help="path of save the asr result") + #load jit model from + parser.add_argument( + "--export_path", type=str, help="path of the jit model to save") + parser.add_argument("--model_type") + args = parser.parse_args() + print_arguments(args, globals()) + if args.model_type is None: + args.model_type = 'offline' + print("model_type:{}".format(args.model_type)) + + # https://yaml.org/type/float.html + config = get_cfg_defaults(args.model_type) + if args.config: + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + main(config, args) diff --git a/deepspeech/exps/deepspeech2/bin/train.py b/deepspeech/exps/deepspeech2/bin/train.py index 5e5c1e2a4..69ff043a0 100644 --- a/deepspeech/exps/deepspeech2/bin/train.py +++ b/deepspeech/exps/deepspeech2/bin/train.py @@ -35,11 +35,15 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + parser.add_argument("--model_type") args = parser.parse_args() + if args.model_type is None: + args.model_type = 'offline' + print("model_type:{}".format(args.model_type)) print_arguments(args, globals()) # https://yaml.org/type/float.html - config = get_cfg_defaults() + config = get_cfg_defaults(args.model_type) if args.config: config.merge_from_file(args.config) if args.opts: diff --git a/deepspeech/exps/deepspeech2/bin/tune.py b/deepspeech/exps/deepspeech2/bin/tune.py deleted file mode 100644 index 02e329a11..000000000 --- a/deepspeech/exps/deepspeech2/bin/tune.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Beam search parameters tuning for DeepSpeech2 model.""" -import functools -import sys - -import numpy as np -from paddle.io import DataLoader - -from deepspeech.exps.deepspeech2.config import get_cfg_defaults -from deepspeech.io.collator import SpeechCollator -from deepspeech.io.dataset import ManifestDataset -from deepspeech.models.deepspeech2 import DeepSpeech2Model -from deepspeech.training.cli import default_argument_parser -from deepspeech.utils import error_rate -from deepspeech.utils.utility import add_arguments -from deepspeech.utils.utility import print_arguments - - -def tune(config, args): - """Tune parameters alpha and beta incrementally.""" - if not args.num_alphas >= 0: - raise ValueError("num_alphas must be non-negative!") - if not args.num_betas >= 0: - raise ValueError("num_betas must be non-negative!") - config.defrost() - config.data.manfiest = config.data.dev_manifest - config.data.augmentation_config = "" - config.data.keep_transcription_text = True - dev_dataset = ManifestDataset.from_config(config) - - valid_loader = DataLoader( - dev_dataset, - batch_size=config.data.batch_size, - shuffle=False, - drop_last=False, - collate_fn=SpeechCollator(keep_transcription_text=True)) - - model = DeepSpeech2Model.from_pretrained(dev_dataset, config, - args.checkpoint_path) - model.eval() - - # decoders only accept string encoded in utf-8 - vocab_list = valid_loader.dataset.vocab_list - errors_func = error_rate.char_errors if config.decoding.error_rate_type == 'cer' else error_rate.word_errors - - # create grid for search - cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas) - cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas) - params_grid = [(alpha, beta) for alpha in cand_alphas - for beta in cand_betas] - - err_sum = [0.0 for i in range(len(params_grid))] - err_ave = [0.0 for i in range(len(params_grid))] - - num_ins, len_refs, cur_batch = 0, 0, 0 - # initialize external scorer - model.decoder.init_decode(args.alpha_from, args.beta_from, - config.decoding.lang_model_path, vocab_list, - config.decoding.decoding_method) - ## incremental tuning parameters over multiple batches - print("start tuning ...") - for infer_data in valid_loader(): - if (args.num_batches >= 0) and (cur_batch >= args.num_batches): - break - - def ordid2token(texts, texts_len): - """ ord() id to chr() chr """ - trans = [] - for text, n in zip(texts, texts_len): - n = n.numpy().item() - ids = text[:n] - trans.append(''.join([chr(i) for i in ids])) - return trans - - audio, audio_len, text, text_len = infer_data - target_transcripts = ordid2token(text, text_len) - num_ins += audio.shape[0] - - # model infer - eouts, eouts_len = model.encoder(audio, audio_len) - probs = model.decoder.softmax(eouts) - - # grid search - for index, (alpha, beta) in enumerate(params_grid): - print(f"tuneing: alpha={alpha} beta={beta}") - result_transcripts = model.decoder.decode_probs( - probs.numpy(), eouts_len, vocab_list, - config.decoding.decoding_method, - config.decoding.lang_model_path, alpha, beta, - config.decoding.beam_size, config.decoding.cutoff_prob, - config.decoding.cutoff_top_n, config.decoding.num_proc_bsearch) - - for target, result in zip(target_transcripts, result_transcripts): - errors, len_ref = errors_func(target, result) - err_sum[index] += errors - - # accumulate the length of references of every batchπ - # in the first iteration - if args.alpha_from == alpha and args.beta_from == beta: - len_refs += len_ref - - err_ave[index] = err_sum[index] / len_refs - if index % 2 == 0: - sys.stdout.write('.') - sys.stdout.flush() - print("tuneing: one grid done!") - - # output on-line tuning result at the end of current batch - err_ave_min = min(err_ave) - min_index = err_ave.index(err_ave_min) - print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), " - " min [%s] = %f" % - (cur_batch, num_ins, "%.3f" % params_grid[min_index][0], - "%.3f" % params_grid[min_index][1], - config.decoding.error_rate_type, err_ave_min)) - cur_batch += 1 - - # output WER/CER at every (alpha, beta) - print("\nFinal %s:\n" % config.decoding.error_rate_type) - for index in range(len(params_grid)): - print("(alpha, beta) = (%s, %s), [%s] = %f" % - ("%.3f" % params_grid[index][0], "%.3f" % params_grid[index][1], - config.decoding.error_rate_type, err_ave[index])) - - err_ave_min = min(err_ave) - min_index = err_ave.index(err_ave_min) - print("\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)" % - (cur_batch, "%.3f" % params_grid[min_index][0], - "%.3f" % params_grid[min_index][1])) - - print("finish tuning") - - -def main(config, args): - tune(config, args) - - -if __name__ == "__main__": - parser = default_argument_parser() - add_arg = functools.partial(add_arguments, argparser=parser) - add_arg('num_batches', int, -1, "# of batches tuning on. " - "Default -1, on whole dev set.") - add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.") - add_arg('num_betas', int, 8, "# of beta candidates for tuning.") - add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.") - add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.") - add_arg('beta_from', float, 0.1, "Where beta starts tuning from.") - add_arg('beta_to', float, 0.45, "Where beta ends tuning with.") - - add_arg('batch_size', int, 256, "# of samples per batch.") - add_arg('beam_size', int, 500, "Beam search width.") - add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.") - add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.") - add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.") - - args = parser.parse_args() - print_arguments(args, globals()) - - # https://yaml.org/type/float.html - config = get_cfg_defaults() - if args.config: - config.merge_from_file(args.config) - if args.opts: - config.merge_from_list(args.opts) - - config.data.batch_size = args.batch_size - config.decoding.beam_size = args.beam_size - config.decoding.num_proc_bsearch = args.num_proc_bsearch - config.decoding.cutoff_prob = args.cutoff_prob - config.decoding.cutoff_top_n = args.cutoff_top_n - - config.freeze() - print(config) - - if args.dump_config: - with open(args.dump_config, 'w') as f: - print(config, file=f) - - main(config, args) diff --git a/deepspeech/exps/deepspeech2/config.py b/deepspeech/exps/deepspeech2/config.py index a8d452a99..38b7d0e4d 100644 --- a/deepspeech/exps/deepspeech2/config.py +++ b/deepspeech/exps/deepspeech2/config.py @@ -11,77 +11,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from yacs.config import CfgNode as CN - -from deepspeech.models.deepspeech2 import DeepSpeech2Model - -_C = CN() -_C.data = CN( - dict( - train_manifest="", - dev_manifest="", - test_manifest="", - unit_type="char", - vocab_filepath="", - spm_model_prefix="", - mean_std_filepath="", - augmentation_config="", - max_duration=float('inf'), - min_duration=0.0, - stride_ms=10.0, # ms - window_ms=20.0, # ms - n_fft=None, # fft points - max_freq=None, # None for samplerate/2 - specgram_type='linear', # 'linear', 'mfcc', 'fbank' - feat_dim=0, # 'mfcc', 'fbank' - delat_delta=False, # 'mfcc', 'fbank' - target_sample_rate=16000, # target sample rate - use_dB_normalization=True, - target_dB=-20, - random_seed=0, - keep_transcription_text=False, - batch_size=32, # batch size - num_workers=0, # data loader workers - sortagrad=False, # sorted in first epoch when True - shuffle_method="batch_shuffle", # 'batch_shuffle', 'instance_shuffle' - )) - -_C.model = CN( - dict( - num_conv_layers=2, #Number of stacking convolution layers. - num_rnn_layers=3, #Number of stacking RNN layers. - rnn_layer_size=1024, #RNN layer size (number of RNN cells). - use_gru=True, #Use gru if set True. Use simple rnn if set False. - share_rnn_weights=True #Whether to share input-hidden weights between forward and backward directional RNNs.Notice that for GRU, weight sharing is not supported. - )) - -DeepSpeech2Model.params(_C.model) - -_C.training = CN( - dict( - lr=5e-4, # learning rate - lr_decay=1.0, # learning rate decay - weight_decay=1e-6, # the coeff of weight decay - global_grad_clip=5.0, # the global norm clip - n_epoch=50, # train epochs - )) - -_C.decoding = CN( - dict( - alpha=2.5, # Coef of LM for beam search. - beta=0.3, # Coef of WC for beam search. - cutoff_prob=1.0, # Cutoff probability for pruning. - cutoff_top_n=40, # Cutoff number for pruning. - lang_model_path='models/lm/common_crawl_00.prune01111.trie.klm', # Filepath for language model. - decoding_method='ctc_beam_search', # Decoding method. Options: ctc_beam_search, ctc_greedy - error_rate_type='wer', # Error rate type for evaluation. Options `wer`, 'cer' - num_proc_bsearch=8, # # of CPUs for beam search. - beam_size=500, # Beam search width. - batch_size=128, # decoding batch size - )) - - -def get_cfg_defaults(): +from yacs.config import CfgNode + +from deepspeech.exps.deepspeech2.model import DeepSpeech2Tester +from deepspeech.exps.deepspeech2.model import DeepSpeech2Trainer +from deepspeech.io.collator import SpeechCollator +from deepspeech.io.dataset import ManifestDataset +from deepspeech.models.ds2 import DeepSpeech2Model +from deepspeech.models.ds2_online import DeepSpeech2ModelOnline + + +def get_cfg_defaults(model_type='offline'): + _C = CfgNode() + _C.data = ManifestDataset.params() + _C.collator = SpeechCollator.params() + _C.training = DeepSpeech2Trainer.params() + _C.decoding = DeepSpeech2Tester.params() + if model_type == 'offline': + _C.model = DeepSpeech2Model.params() + else: + _C.model = DeepSpeech2ModelOnline.params() """Get a yacs CfgNode object with default values for my_project.""" # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern diff --git a/deepspeech/exps/deepspeech2/model.py b/deepspeech/exps/deepspeech2/model.py index e3a22463b..7bf029300 100644 --- a/deepspeech/exps/deepspeech2/model.py +++ b/deepspeech/exps/deepspeech2/model.py @@ -11,60 +11,109 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Contains DeepSpeech2 model.""" +"""Contains DeepSpeech2 and DeepSpeech2Online model.""" +import os import time from collections import defaultdict +from contextlib import nullcontext from pathlib import Path +from typing import Optional import numpy as np import paddle from paddle import distributed as dist +from paddle import inference from paddle.io import DataLoader +from yacs.config import CfgNode from deepspeech.io.collator import SpeechCollator from deepspeech.io.dataset import ManifestDataset from deepspeech.io.sampler import SortagradBatchSampler from deepspeech.io.sampler import SortagradDistributedBatchSampler -from deepspeech.models.deepspeech2 import DeepSpeech2InferModel -from deepspeech.models.deepspeech2 import DeepSpeech2Model +from deepspeech.models.ds2 import DeepSpeech2InferModel +from deepspeech.models.ds2 import DeepSpeech2Model +from deepspeech.models.ds2_online import DeepSpeech2InferModelOnline +from deepspeech.models.ds2_online import DeepSpeech2ModelOnline from deepspeech.training.gradclip import ClipGradByGlobalNormWithLog +from deepspeech.training.reporter import report from deepspeech.training.trainer import Trainer from deepspeech.utils import error_rate from deepspeech.utils import layer_tools from deepspeech.utils import mp_tools +from deepspeech.utils.log import Autolog from deepspeech.utils.log import Log +from deepspeech.utils.utility import UpdateConfig logger = Log(__name__).getlog() class DeepSpeech2Trainer(Trainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # training config + default = CfgNode( + dict( + lr=5e-4, # learning rate + lr_decay=1.0, # learning rate decay + weight_decay=1e-6, # the coeff of weight decay + global_grad_clip=5.0, # the global norm clip + n_epoch=50, # train epochs + )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + def __init__(self, config, args): super().__init__(config, args) def train_batch(self, batch_index, batch_data, msg): + batch_size = self.config.collator.batch_size + accum_grad = self.config.training.accum_grad + start = time.time() + + # forward utt, audio, audio_len, text, text_len = batch_data loss = self.model(audio, audio_len, text, text_len) - loss.backward() - layer_tools.print_grads(self.model, print_func=None) - self.optimizer.step() - self.optimizer.clear_grad() - iteration_time = time.time() - start - losses_np = { 'train_loss': float(loss), } - msg += "train time: {:>.3f}s, ".format(iteration_time) - msg += "batch size: {}, ".format(self.config.data.batch_size) - msg += ', '.join('{}: {:>.6f}'.format(k, v) - for k, v in losses_np.items()) - logger.info(msg) + + # loss backward + if (batch_index + 1) % accum_grad != 0: + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + context = self.model.no_sync + else: + # Used for single gpu training and DDP gradient synchronization + # processes. + context = nullcontext + + with context(): + loss.backward() + layer_tools.print_grads(self.model, print_func=None) + + # optimizer step + if (batch_index + 1) % accum_grad == 0: + self.optimizer.step() + self.optimizer.clear_grad() + self.iteration += 1 + + iteration_time = time.time() - start + + for k, v in losses_np.items(): + report(k, v) + report("batch_size", batch_size) + report("accum", accum_grad) + report("step_cost", iteration_time) if dist.get_rank() == 0 and self.visualizer: for k, v in losses_np.items(): + # `step -1` since we update `step` after optimizer.step(). self.visualizer.add_scalar("train/{}".format(k), v, - self.iteration) - self.iteration += 1 + self.iteration - 1) @paddle.no_grad() def valid(self): @@ -100,16 +149,17 @@ class DeepSpeech2Trainer(Trainer): return total_loss, num_seen_utts def setup_model(self): - config = self.config - model = DeepSpeech2Model( - feat_size=self.train_loader.dataset.feature_size, - dict_size=self.train_loader.dataset.vocab_size, - num_conv_layers=config.model.num_conv_layers, - num_rnn_layers=config.model.num_rnn_layers, - rnn_size=config.model.rnn_layer_size, - use_gru=config.model.use_gru, - share_rnn_weights=config.model.share_rnn_weights) - + config = self.config.clone() + with UpdateConfig(config): + config.model.feat_size = self.train_loader.collate_fn.feature_size + config.model.dict_size = self.train_loader.collate_fn.vocab_size + + if self.args.model_type == 'offline': + model = DeepSpeech2Model.from_config(config.model) + elif self.args.model_type == 'online': + model = DeepSpeech2ModelOnline.from_config(config.model) + else: + raise Exception("wrong model type") if self.parallel: model = paddle.DataParallel(model) @@ -137,50 +187,87 @@ class DeepSpeech2Trainer(Trainer): def setup_dataloader(self): config = self.config.clone() config.defrost() - config.data.keep_transcription_text = False + config.collator.keep_transcription_text = False config.data.manifest = config.data.train_manifest train_dataset = ManifestDataset.from_config(config) config.data.manifest = config.data.dev_manifest - config.data.augmentation_config = "" dev_dataset = ManifestDataset.from_config(config) + config.data.manifest = config.data.test_manifest + test_dataset = ManifestDataset.from_config(config) + if self.parallel: batch_sampler = SortagradDistributedBatchSampler( train_dataset, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, num_replicas=None, rank=None, shuffle=True, drop_last=True, - sortagrad=config.data.sortagrad, - shuffle_method=config.data.shuffle_method) + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) else: batch_sampler = SortagradBatchSampler( train_dataset, shuffle=True, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, drop_last=True, - sortagrad=config.data.sortagrad, - shuffle_method=config.data.shuffle_method) + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) + + collate_fn_train = SpeechCollator.from_config(config) + + config.collator.augmentation_config = "" + collate_fn_dev = SpeechCollator.from_config(config) + + config.collator.keep_transcription_text = True + config.collator.augmentation_config = "" + collate_fn_test = SpeechCollator.from_config(config) - collate_fn = SpeechCollator(keep_transcription_text=False) self.train_loader = DataLoader( train_dataset, batch_sampler=batch_sampler, - collate_fn=collate_fn, - num_workers=config.data.num_workers) + collate_fn=collate_fn_train, + num_workers=config.collator.num_workers) self.valid_loader = DataLoader( dev_dataset, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, shuffle=False, drop_last=False, - collate_fn=collate_fn) - logger.info("Setup train/valid Dataloader!") + collate_fn=collate_fn_dev) + self.test_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=collate_fn_test) + logger.info("Setup train/valid/test Dataloader!") class DeepSpeech2Tester(DeepSpeech2Trainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # testing config + default = CfgNode( + dict( + alpha=2.5, # Coef of LM for beam search. + beta=0.3, # Coef of WC for beam search. + cutoff_prob=1.0, # Cutoff probability for pruning. + cutoff_top_n=40, # Cutoff number for pruning. + lang_model_path='models/lm/common_crawl_00.prune01111.trie.klm', # Filepath for language model. + decoding_method='ctc_beam_search', # Decoding method. Options: ctc_beam_search, ctc_greedy + error_rate_type='wer', # Error rate type for evaluation. Options `wer`, 'cer' + num_proc_bsearch=8, # # of CPUs for beam search. + beam_size=500, # Beam search width. + batch_size=128, # decoding batch size + )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + def __init__(self, config, args): super().__init__(config, args) @@ -205,22 +292,12 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): errors_func = error_rate.char_errors if cfg.error_rate_type == 'cer' else error_rate.word_errors error_rate_func = error_rate.cer if cfg.error_rate_type == 'cer' else error_rate.wer - vocab_list = self.test_loader.dataset.vocab_list + vocab_list = self.test_loader.collate_fn.vocab_list target_transcripts = self.ordid2token(texts, texts_len) - result_transcripts = self.model.decode( - audio, - audio_len, - vocab_list, - decoding_method=cfg.decoding_method, - lang_model_path=cfg.lang_model_path, - beam_alpha=cfg.alpha, - beam_beta=cfg.beta, - beam_size=cfg.beam_size, - cutoff_prob=cfg.cutoff_prob, - cutoff_top_n=cfg.cutoff_top_n, - num_processes=cfg.num_proc_bsearch) + result_transcripts = self.compute_result_transcripts(audio, audio_len, + vocab_list, cfg) for utt, target, result in zip(utts, target_transcripts, result_transcripts): errors, len_ref = errors_func(target, result) @@ -241,10 +318,34 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): error_rate=errors_sum / len_refs, error_rate_type=cfg.error_rate_type) + def compute_result_transcripts(self, audio, audio_len, vocab_list, cfg): + self.autolog.times.start() + self.autolog.times.stamp() + result_transcripts = self.model.decode( + audio, + audio_len, + vocab_list, + decoding_method=cfg.decoding_method, + lang_model_path=cfg.lang_model_path, + beam_alpha=cfg.alpha, + beam_beta=cfg.beta, + beam_size=cfg.beam_size, + cutoff_prob=cfg.cutoff_prob, + cutoff_top_n=cfg.cutoff_top_n, + num_processes=cfg.num_proc_bsearch) + self.autolog.times.stamp() + self.autolog.times.stamp() + self.autolog.times.end() + return result_transcripts + @mp_tools.rank_zero_only @paddle.no_grad() def test(self): logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}") + self.autolog = Autolog( + batch_size=self.config.decoding.batch_size, + model_name="deepspeech2", + model_precision="fp32").getlog() self.model.eval() cfg = self.config error_rate_type = None @@ -268,6 +369,7 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): msg += "Final error rate [%s] (%d/%d) = %f" % ( error_rate_type, num_ins, num_ins, errors_sum / len_refs) logger.info(msg) + self.autolog.report() def run_test(self): self.resume_or_scratch() @@ -277,19 +379,18 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): exit(-1) def export(self): - infer_model = DeepSpeech2InferModel.from_pretrained( - self.test_loader.dataset, self.config, self.args.checkpoint_path) + if self.args.model_type == 'offline': + infer_model = DeepSpeech2InferModel.from_pretrained( + self.test_loader, self.config, self.args.checkpoint_path) + elif self.args.model_type == 'online': + infer_model = DeepSpeech2InferModelOnline.from_pretrained( + self.test_loader, self.config, self.args.checkpoint_path) + else: + raise Exception("wrong model type") + infer_model.eval() - feat_dim = self.test_loader.dataset.feature_size - static_model = paddle.jit.to_static( - infer_model, - input_spec=[ - paddle.static.InputSpec( - shape=[None, None, feat_dim], - dtype='float32'), # audio, [B,T,D] - paddle.static.InputSpec(shape=[None], - dtype='int64'), # audio_length, [B] - ]) + feat_dim = self.test_loader.collate_fn.feature_size + static_model = infer_model.export() logger.info(f"Export code: {static_model.forward.code}") paddle.jit.save(static_model, self.args.export_path) @@ -313,45 +414,236 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): self.iteration = 0 self.epoch = 0 - def setup_model(self): - config = self.config - model = DeepSpeech2Model( - feat_size=self.test_loader.dataset.feature_size, - dict_size=self.test_loader.dataset.vocab_size, - num_conv_layers=config.model.num_conv_layers, - num_rnn_layers=config.model.num_rnn_layers, - rnn_size=config.model.rnn_layer_size, - use_gru=config.model.use_gru, - share_rnn_weights=config.model.share_rnn_weights) - self.model = model - logger.info("Setup model!") + def setup_output_dir(self): + """Create a directory used for output. + """ + # output dir + if self.args.output: + output_dir = Path(self.args.output).expanduser() + output_dir.mkdir(parents=True, exist_ok=True) + else: + output_dir = Path( + self.args.checkpoint_path).expanduser().parent.parent + output_dir.mkdir(parents=True, exist_ok=True) - def setup_dataloader(self): - config = self.config.clone() - config.defrost() - # return raw text + self.output_dir = output_dir - config.data.manifest = config.data.test_manifest - config.data.keep_transcription_text = True - config.data.augmentation_config = "" - # filter test examples, will cause less examples, but no mismatch with training - # and can use large batch size , save training time, so filter test egs now. - # config.data.min_input_len = 0.0 # second - # config.data.max_input_len = float('inf') # second - # config.data.min_output_len = 0.0 # tokens - # config.data.max_output_len = float('inf') # tokens - # config.data.min_output_input_ratio = 0.00 - # config.data.max_output_input_ratio = float('inf') - test_dataset = ManifestDataset.from_config(config) - # return text ord id - self.test_loader = DataLoader( - test_dataset, - batch_size=config.decoding.batch_size, - shuffle=False, - drop_last=False, - collate_fn=SpeechCollator(keep_transcription_text=True)) - logger.info("Setup test Dataloader!") +class DeepSpeech2ExportTester(DeepSpeech2Tester): + def __init__(self, config, args): + super().__init__(config, args) + + def compute_result_transcripts(self, audio, audio_len, vocab_list, cfg): + if self.args.model_type == "online": + output_probs, output_lens = self.static_forward_online(audio, + audio_len) + elif self.args.model_type == "offline": + output_probs, output_lens = self.static_forward_offline(audio, + audio_len) + else: + raise Exception("wrong model type") + + self.predictor.clear_intermediate_tensor() + self.predictor.try_shrink_memory() + + self.model.decoder.init_decode(cfg.alpha, cfg.beta, cfg.lang_model_path, + vocab_list, cfg.decoding_method) + + result_transcripts = self.model.decoder.decode_probs( + output_probs, output_lens, vocab_list, cfg.decoding_method, + cfg.lang_model_path, cfg.alpha, cfg.beta, cfg.beam_size, + cfg.cutoff_prob, cfg.cutoff_top_n, cfg.num_proc_bsearch) + + return result_transcripts + + def static_forward_online(self, audio, audio_len, + decoder_chunk_size: int=1): + """ + Parameters + ---------- + audio (Tensor): shape[B, T, D] + audio_len (Tensor): shape[B] + decoder_chunk_size(int) + Returns + ------- + output_probs(numpy.array): shape[B, T, vocab_size] + output_lens(numpy.array): shape[B] + """ + output_probs_list = [] + output_lens_list = [] + subsampling_rate = self.model.encoder.conv.subsampling_rate + receptive_field_length = self.model.encoder.conv.receptive_field_length + chunk_stride = subsampling_rate * decoder_chunk_size + chunk_size = (decoder_chunk_size - 1 + ) * subsampling_rate + receptive_field_length + + x_batch = audio.numpy() + batch_size, Tmax, x_dim = x_batch.shape + x_len_batch = audio_len.numpy().astype(np.int64) + if (Tmax - chunk_size) % chunk_stride != 0: + padding_len_batch = chunk_stride - ( + Tmax - chunk_size + ) % chunk_stride # The length of padding for the batch + else: + padding_len_batch = 0 + x_list = np.split(x_batch, batch_size, axis=0) + x_len_list = np.split(x_len_batch, batch_size, axis=0) + + for x, x_len in zip(x_list, x_len_list): + self.autolog.times.start() + self.autolog.times.stamp() + x_len = x_len[0] + assert (chunk_size <= x_len) + + if (x_len - chunk_size) % chunk_stride != 0: + padding_len_x = chunk_stride - (x_len - chunk_size + ) % chunk_stride + else: + padding_len_x = 0 + + padding = np.zeros( + (x.shape[0], padding_len_x, x.shape[2]), dtype=x.dtype) + padded_x = np.concatenate([x, padding], axis=1) + + num_chunk = (x_len + padding_len_x - chunk_size) / chunk_stride + 1 + num_chunk = int(num_chunk) + + chunk_state_h_box = np.zeros( + (self.config.model.num_rnn_layers, 1, + self.config.model.rnn_layer_size), + dtype=x.dtype) + chunk_state_c_box = np.zeros( + (self.config.model.num_rnn_layers, 1, + self.config.model.rnn_layer_size), + dtype=x.dtype) + + input_names = self.predictor.get_input_names() + audio_handle = self.predictor.get_input_handle(input_names[0]) + audio_len_handle = self.predictor.get_input_handle(input_names[1]) + h_box_handle = self.predictor.get_input_handle(input_names[2]) + c_box_handle = self.predictor.get_input_handle(input_names[3]) + + probs_chunk_list = [] + probs_chunk_lens_list = [] + for i in range(0, num_chunk): + start = i * chunk_stride + end = start + chunk_size + x_chunk = padded_x[:, start:end, :] + if x_len < i * chunk_stride: + x_chunk_lens = 0 + else: + x_chunk_lens = min(x_len - i * chunk_stride, chunk_size) + + if (x_chunk_lens < + receptive_field_length): #means the number of input frames in the chunk is not enough for predicting one prob + break + x_chunk_lens = np.array([x_chunk_lens]) + audio_handle.reshape(x_chunk.shape) + audio_handle.copy_from_cpu(x_chunk) + + audio_len_handle.reshape(x_chunk_lens.shape) + audio_len_handle.copy_from_cpu(x_chunk_lens) + + h_box_handle.reshape(chunk_state_h_box.shape) + h_box_handle.copy_from_cpu(chunk_state_h_box) + + c_box_handle.reshape(chunk_state_c_box.shape) + c_box_handle.copy_from_cpu(chunk_state_c_box) + + output_names = self.predictor.get_output_names() + output_handle = self.predictor.get_output_handle( + output_names[0]) + output_lens_handle = self.predictor.get_output_handle( + output_names[1]) + output_state_h_handle = self.predictor.get_output_handle( + output_names[2]) + output_state_c_handle = self.predictor.get_output_handle( + output_names[3]) + self.predictor.run() + output_chunk_probs = output_handle.copy_to_cpu() + output_chunk_lens = output_lens_handle.copy_to_cpu() + chunk_state_h_box = output_state_h_handle.copy_to_cpu() + chunk_state_c_box = output_state_c_handle.copy_to_cpu() + + probs_chunk_list.append(output_chunk_probs) + probs_chunk_lens_list.append(output_chunk_lens) + output_probs = np.concatenate(probs_chunk_list, axis=1) + output_lens = np.sum(probs_chunk_lens_list, axis=0) + vocab_size = output_probs.shape[2] + output_probs_padding_len = Tmax + padding_len_batch - output_probs.shape[ + 1] + output_probs_padding = np.zeros( + (1, output_probs_padding_len, vocab_size), + dtype=output_probs. + dtype) # The prob padding for a piece of utterance + output_probs = np.concatenate( + [output_probs, output_probs_padding], axis=1) + output_probs_list.append(output_probs) + output_lens_list.append(output_lens) + self.autolog.times.stamp() + self.autolog.times.stamp() + self.autolog.times.end() + output_probs = np.concatenate(output_probs_list, axis=0) + output_lens = np.concatenate(output_lens_list, axis=0) + return output_probs, output_lens + + def static_forward_offline(self, audio, audio_len): + """ + Parameters + ---------- + audio (Tensor): shape[B, T, D] + audio_len (Tensor): shape[B] + + Returns + ------- + output_probs(numpy.array): shape[B, T, vocab_size] + output_lens(numpy.array): shape[B] + """ + x = audio.numpy() + x_len = audio_len.numpy().astype(np.int64) + + input_names = self.predictor.get_input_names() + audio_handle = self.predictor.get_input_handle(input_names[0]) + audio_len_handle = self.predictor.get_input_handle(input_names[1]) + + audio_handle.reshape(x.shape) + audio_handle.copy_from_cpu(x) + + audio_len_handle.reshape(x_len.shape) + audio_len_handle.copy_from_cpu(x_len) + + self.autolog.times.start() + self.autolog.times.stamp() + self.predictor.run() + self.autolog.times.stamp() + self.autolog.times.stamp() + self.autolog.times.end() + + output_names = self.predictor.get_output_names() + output_handle = self.predictor.get_output_handle(output_names[0]) + output_lens_handle = self.predictor.get_output_handle(output_names[1]) + output_probs = output_handle.copy_to_cpu() + output_lens = output_lens_handle.copy_to_cpu() + return output_probs, output_lens + + def run_test(self): + try: + self.test() + except KeyboardInterrupt: + exit(-1) + + def setup(self): + """Setup the experiment. + """ + paddle.set_device(self.args.device) + + self.setup_output_dir() + + self.setup_dataloader() + self.setup_model() + + self.iteration = 0 + self.epoch = 0 def setup_output_dir(self): """Create a directory used for output. @@ -361,8 +653,18 @@ class DeepSpeech2Tester(DeepSpeech2Trainer): output_dir = Path(self.args.output).expanduser() output_dir.mkdir(parents=True, exist_ok=True) else: - output_dir = Path( - self.args.checkpoint_path).expanduser().parent.parent + output_dir = Path(self.args.export_path).expanduser().parent.parent output_dir.mkdir(parents=True, exist_ok=True) self.output_dir = output_dir + + def setup_model(self): + super().setup_model() + speedyspeech_config = inference.Config( + self.args.export_path + ".pdmodel", + self.args.export_path + ".pdiparams") + if (os.environ['CUDA_VISIBLE_DEVICES'].strip() != ''): + speedyspeech_config.enable_use_gpu(100, 0) + speedyspeech_config.enable_memory_optim() + speedyspeech_predictor = inference.create_predictor(speedyspeech_config) + self.predictor = speedyspeech_predictor diff --git a/deepspeech/exps/u2/bin/alignment.py b/deepspeech/exps/u2/bin/alignment.py index c1c9582f8..cef9d1ab9 100644 --- a/deepspeech/exps/u2/bin/alignment.py +++ b/deepspeech/exps/u2/bin/alignment.py @@ -30,6 +30,9 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + # save asr result to + parser.add_argument( + "--result_file", type=str, help="path of save the asr result") args = parser.parse_args() print_arguments(args, globals()) diff --git a/deepspeech/exps/u2/bin/export.py b/deepspeech/exps/u2/bin/export.py index 292c78389..3dc41b706 100644 --- a/deepspeech/exps/u2/bin/export.py +++ b/deepspeech/exps/u2/bin/export.py @@ -30,6 +30,9 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + # save jit model to + parser.add_argument( + "--export_path", type=str, help="path of the jit model to save") args = parser.parse_args() print_arguments(args, globals()) diff --git a/deepspeech/exps/u2/bin/test.py b/deepspeech/exps/u2/bin/test.py index c47f932c7..f6127675e 100644 --- a/deepspeech/exps/u2/bin/test.py +++ b/deepspeech/exps/u2/bin/test.py @@ -34,6 +34,9 @@ def main(config, args): if __name__ == "__main__": parser = default_argument_parser() + # save asr result to + parser.add_argument( + "--result_file", type=str, help="path of save the asr result") args = parser.parse_args() print_arguments(args, globals()) diff --git a/deepspeech/exps/u2/bin/train.py b/deepspeech/exps/u2/bin/train.py index 9dd0041dd..b664401a2 100644 --- a/deepspeech/exps/u2/bin/train.py +++ b/deepspeech/exps/u2/bin/train.py @@ -22,6 +22,8 @@ from deepspeech.exps.u2.model import U2Trainer as Trainer from deepspeech.training.cli import default_argument_parser from deepspeech.utils.utility import print_arguments +# from deepspeech.exps.u2.trainer import U2Trainer as Trainer + def main_sp(config, args): exp = Trainer(config, args) diff --git a/deepspeech/exps/u2/config.py b/deepspeech/exps/u2/config.py index 5a0b53f9a..4ec7bd190 100644 --- a/deepspeech/exps/u2/config.py +++ b/deepspeech/exps/u2/config.py @@ -15,6 +15,7 @@ from yacs.config import CfgNode from deepspeech.exps.u2.model import U2Tester from deepspeech.exps.u2.model import U2Trainer +from deepspeech.io.collator import SpeechCollator from deepspeech.io.dataset import ManifestDataset from deepspeech.models.u2 import U2Model @@ -22,6 +23,8 @@ _C = CfgNode() _C.data = ManifestDataset.params() +_C.collator = SpeechCollator.params() + _C.model = U2Model.params() _C.training = U2Trainer.params() diff --git a/deepspeech/exps/u2/model.py b/deepspeech/exps/u2/model.py index 8fabd9ffd..2e512ef1e 100644 --- a/deepspeech/exps/u2/model.py +++ b/deepspeech/exps/u2/model.py @@ -17,6 +17,8 @@ import os import sys import time from collections import defaultdict +from collections import OrderedDict +from contextlib import nullcontext from pathlib import Path from typing import Optional @@ -31,13 +33,20 @@ from deepspeech.io.dataset import ManifestDataset from deepspeech.io.sampler import SortagradBatchSampler from deepspeech.io.sampler import SortagradDistributedBatchSampler from deepspeech.models.u2 import U2Model -from deepspeech.training.gradclip import ClipGradByGlobalNormWithLog -from deepspeech.training.scheduler import WarmupLR +from deepspeech.training.optimizer import OptimizerFactory +from deepspeech.training.reporter import ObsScope +from deepspeech.training.reporter import report +from deepspeech.training.scheduler import LRSchedulerFactory +from deepspeech.training.timer import Timer from deepspeech.training.trainer import Trainer +from deepspeech.utils import ctc_utils from deepspeech.utils import error_rate from deepspeech.utils import layer_tools from deepspeech.utils import mp_tools +from deepspeech.utils import text_grid +from deepspeech.utils import utility from deepspeech.utils.log import Log +from deepspeech.utils.utility import UpdateConfig logger = Log(__name__).getlog() @@ -76,21 +85,36 @@ class U2Trainer(Trainer): def train_batch(self, batch_index, batch_data, msg): train_conf = self.config.training start = time.time() - utt, audio, audio_len, text, text_len = batch_data + # forward + utt, audio, audio_len, text, text_len = batch_data loss, attention_loss, ctc_loss = self.model(audio, audio_len, text, text_len) + # loss div by `batch_size * accum_grad` loss /= train_conf.accum_grad - loss.backward() - layer_tools.print_grads(self.model, print_func=None) - losses_np = {'loss': float(loss) * train_conf.accum_grad} if attention_loss: losses_np['att_loss'] = float(attention_loss) if ctc_loss: losses_np['ctc_loss'] = float(ctc_loss) + # loss backward + if (batch_index + 1) % train_conf.accum_grad != 0: + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + # When using cpu w/o DDP, model does not have `no_sync` + context = self.model.no_sync if self.parallel else nullcontext + else: + # Used for single gpu training and DDP gradient synchronization + # processes. + context = nullcontext + with context(): + loss.backward() + layer_tools.print_grads(self.model, print_func=None) + + # optimizer step if (batch_index + 1) % train_conf.accum_grad == 0: self.optimizer.step() self.optimizer.clear_grad() @@ -100,12 +124,11 @@ class U2Trainer(Trainer): iteration_time = time.time() - start if (batch_index + 1) % train_conf.log_interval == 0: - msg += "train time: {:>.3f}s, ".format(iteration_time) - msg += "batch size: {}, ".format(self.config.data.batch_size) - msg += "accum: {}, ".format(train_conf.accum_grad) - msg += ', '.join('{}: {:>.6f}'.format(k, v) - for k, v in losses_np.items()) - logger.info(msg) + for k, v in losses_np.items(): + report(k, v) + report("batch_size", self.config.collator.batch_size) + report("accum", train_conf.accum_grad) + report("step_cost", iteration_time) if dist.get_rank() == 0 and self.visualizer: losses_np_v = losses_np.copy() @@ -163,43 +186,61 @@ class U2Trainer(Trainer): from_scratch = self.resume_or_scratch() if from_scratch: # save init model, i.e. 0 epoch - self.save(tag='init') + self.save(tag='init', infos=None) - self.lr_scheduler.step(self.iteration) - if self.parallel: + # lr will resotre from optimizer ckpt + # self.lr_scheduler.step(self.iteration) + if self.parallel and hasattr(self.train_loader, 'batch_sampler'): self.train_loader.batch_sampler.set_epoch(self.epoch) logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") while self.epoch < self.config.training.n_epoch: - self.model.train() - try: - data_start_time = time.time() - for batch_index, batch in enumerate(self.train_loader): - dataload_time = time.time() - data_start_time - msg = "Train: Rank: {}, ".format(dist.get_rank()) - msg += "epoch: {}, ".format(self.epoch) - msg += "step: {}, ".format(self.iteration) - msg += "batch : {}/{}, ".format(batch_index + 1, - len(self.train_loader)) - msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) - msg += "data time: {:>.3f}s, ".format(dataload_time) - self.train_batch(batch_index, batch, msg) + with Timer("Epoch-Train Time Cost: {}"): + self.model.train() + try: data_start_time = time.time() - except Exception as e: - logger.error(e) - raise e - - total_loss, num_seen_utts = self.valid() - if dist.get_world_size() > 1: - num_seen_utts = paddle.to_tensor(num_seen_utts) - # the default operator in all_reduce function is sum. - dist.all_reduce(num_seen_utts) - total_loss = paddle.to_tensor(total_loss) - dist.all_reduce(total_loss) - cv_loss = total_loss / num_seen_utts - cv_loss = float(cv_loss) - else: - cv_loss = total_loss / num_seen_utts + for batch_index, batch in enumerate(self.train_loader): + dataload_time = time.time() - data_start_time + msg = "Train:" + observation = OrderedDict() + with ObsScope(observation): + report("Rank", dist.get_rank()) + report("epoch", self.epoch) + report('step', self.iteration) + report('step/total', + (batch_index + 1) / len(self.train_loader)) + report("lr", self.lr_scheduler()) + self.train_batch(batch_index, batch, msg) + self.after_train_batch() + report('reader_cost', dataload_time) + observation['batch_cost'] = observation[ + 'reader_cost'] + observation['step_cost'] + observation['samples'] = observation['batch_size'] + observation['ips[sent./sec]'] = observation[ + 'batch_size'] / observation['batch_cost'] + for k, v in observation.items(): + msg += f" {k}: " + msg += f"{v:>.8f}" if isinstance(v, + float) else f"{v}" + msg += "," + logger.info(msg) + data_start_time = time.time() + except Exception as e: + logger.error(e) + raise e + + with Timer("Eval Time Cost: {}"): + total_loss, num_seen_utts = self.valid() + if dist.get_world_size() > 1: + num_seen_utts = paddle.to_tensor(num_seen_utts) + # the default operator in all_reduce function is sum. + dist.all_reduce(num_seen_utts) + total_loss = paddle.to_tensor(total_loss) + dist.all_reduce(total_loss) + cv_loss = total_loss / num_seen_utts + cv_loss = float(cv_loss) + else: + cv_loss = total_loss / num_seen_utts logger.info( 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss)) @@ -213,76 +254,89 @@ class U2Trainer(Trainer): def setup_dataloader(self): config = self.config.clone() config.defrost() - config.data.keep_transcription_text = False + config.collator.keep_transcription_text = False # train/valid dataset, return token ids config.data.manifest = config.data.train_manifest train_dataset = ManifestDataset.from_config(config) config.data.manifest = config.data.dev_manifest - config.data.augmentation_config = "" dev_dataset = ManifestDataset.from_config(config) - collate_fn = SpeechCollator(keep_transcription_text=False) + collate_fn_train = SpeechCollator.from_config(config) + + config.collator.augmentation_config = "" + collate_fn_dev = SpeechCollator.from_config(config) + if self.parallel: batch_sampler = SortagradDistributedBatchSampler( train_dataset, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, num_replicas=None, rank=None, shuffle=True, drop_last=True, - sortagrad=config.data.sortagrad, - shuffle_method=config.data.shuffle_method) + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) else: batch_sampler = SortagradBatchSampler( train_dataset, shuffle=True, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, drop_last=True, - sortagrad=config.data.sortagrad, - shuffle_method=config.data.shuffle_method) + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) self.train_loader = DataLoader( train_dataset, batch_sampler=batch_sampler, - collate_fn=collate_fn, - num_workers=config.data.num_workers, ) + collate_fn=collate_fn_train, + num_workers=config.collator.num_workers, ) self.valid_loader = DataLoader( dev_dataset, - batch_size=config.data.batch_size, + batch_size=config.collator.batch_size, shuffle=False, drop_last=False, - collate_fn=collate_fn) + collate_fn=collate_fn_dev) # test dataset, return raw text config.data.manifest = config.data.test_manifest - config.data.keep_transcription_text = True - config.data.augmentation_config = "" # filter test examples, will cause less examples, but no mismatch with training # and can use large batch size , save training time, so filter test egs now. - # config.data.min_input_len = 0.0 # second - # config.data.max_input_len = float('inf') # second - # config.data.min_output_len = 0.0 # tokens - # config.data.max_output_len = float('inf') # tokens - # config.data.min_output_input_ratio = 0.00 - # config.data.max_output_input_ratio = float('inf') + config.data.min_input_len = 0.0 # second + config.data.max_input_len = float('inf') # second + config.data.min_output_len = 0.0 # tokens + config.data.max_output_len = float('inf') # tokens + config.data.min_output_input_ratio = 0.00 + config.data.max_output_input_ratio = float('inf') + test_dataset = ManifestDataset.from_config(config) # return text ord id + config.collator.keep_transcription_text = True + config.collator.augmentation_config = "" self.test_loader = DataLoader( test_dataset, batch_size=config.decoding.batch_size, shuffle=False, drop_last=False, - collate_fn=SpeechCollator(keep_transcription_text=True)) - logger.info("Setup train/valid/test Dataloader!") + collate_fn=SpeechCollator.from_config(config)) + # return text token id + config.collator.keep_transcription_text = False + self.align_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=SpeechCollator.from_config(config)) + logger.info("Setup train/valid/test/align Dataloader!") def setup_model(self): config = self.config model_conf = config.model - model_conf.defrost() - model_conf.input_dim = self.train_loader.dataset.feature_size - model_conf.output_dim = self.train_loader.dataset.vocab_size - model_conf.freeze() + + with UpdateConfig(model_conf): + model_conf.input_dim = self.train_loader.collate_fn.feature_size + model_conf.output_dim = self.train_loader.collate_fn.vocab_size + model = U2Model.from_config(model_conf) if self.parallel: @@ -297,30 +351,38 @@ class U2Trainer(Trainer): scheduler_type = train_config.scheduler scheduler_conf = train_config.scheduler_conf - grad_clip = ClipGradByGlobalNormWithLog(train_config.global_grad_clip) - weight_decay = paddle.regularizer.L2Decay(optim_conf.weight_decay) - - if scheduler_type == 'expdecaylr': - lr_scheduler = paddle.optimizer.lr.ExponentialDecay( - learning_rate=optim_conf.lr, - gamma=scheduler_conf.lr_decay, - verbose=False) - elif scheduler_type == 'warmuplr': - lr_scheduler = WarmupLR( - learning_rate=optim_conf.lr, - warmup_steps=scheduler_conf.warmup_steps, - verbose=False) - else: - raise ValueError(f"Not support scheduler: {scheduler_type}") - - if optim_type == 'adam': - optimizer = paddle.optimizer.Adam( - learning_rate=lr_scheduler, - parameters=model.parameters(), - weight_decay=weight_decay, - grad_clip=grad_clip) - else: - raise ValueError(f"Not support optim: {optim_type}") + scheduler_args = { + "learning_rate": optim_conf.lr, + "verbose": False, + "warmup_steps": scheduler_conf.warmup_steps, + "gamma": scheduler_conf.lr_decay, + "d_model": model_conf.encoder_conf.output_size, + } + lr_scheduler = LRSchedulerFactory.from_args(scheduler_type, + scheduler_args) + + def optimizer_args( + config, + parameters, + lr_scheduler=None, ): + train_config = config.training + optim_type = train_config.optim + optim_conf = train_config.optim_conf + scheduler_type = train_config.scheduler + scheduler_conf = train_config.scheduler_conf + return { + "grad_clip": train_config.global_grad_clip, + "weight_decay": optim_conf.weight_decay, + "learning_rate": lr_scheduler + if lr_scheduler else optim_conf.lr, + "parameters": parameters, + "epsilon": 1e-9 if optim_type == 'noam' else None, + "beta1": 0.9 if optim_type == 'noam' else None, + "beat2": 0.98 if optim_type == 'noam' else None, + } + + optimzer_args = optimizer_args(config, model.parameters(), lr_scheduler) + optimizer = OptimizerFactory.from_args(optim_type, optimzer_args) self.model = model self.optimizer = optimizer @@ -349,7 +411,7 @@ class U2Tester(U2Trainer): decoding_chunk_size=-1, # decoding chunk size. Defaults to -1. # <0: for decoding, use full chunk. # >0: for decoding, use fixed chunk size as set. - # 0: used for training, it's prohibited here. + # 0: used for training, it's prohibited here. num_decoding_left_chunks=-1, # number of left chunks for decoding. Defaults to -1. simulate_streaming=False, # simulate streaming inference. Defaults to False. )) @@ -383,7 +445,7 @@ class U2Tester(U2Trainer): error_rate_func = error_rate.cer if cfg.error_rate_type == 'cer' else error_rate.wer start_time = time.time() - text_feature = self.test_loader.dataset.text_feature + text_feature = self.test_loader.collate_fn.text_feature target_transcripts = self.ordid2token(texts, texts_len) result_transcripts = self.model.decode( audio, @@ -432,7 +494,7 @@ class U2Tester(U2Trainer): self.model.eval() logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}") - stride_ms = self.test_loader.dataset.stride_ms + stride_ms = self.test_loader.collate_fn.stride_ms error_rate_type = None errors_sum, len_refs, num_ins = 0.0, 0, 0 num_frames = 0.0 @@ -494,6 +556,73 @@ class U2Tester(U2Trainer): except KeyboardInterrupt: sys.exit(-1) + @paddle.no_grad() + def align(self): + if self.config.decoding.batch_size > 1: + logger.fatal('alignment mode must be running with batch_size == 1') + sys.exit(1) + + # xxx.align + assert self.args.result_file and self.args.result_file.endswith( + '.align') + + self.model.eval() + logger.info(f"Align Total Examples: {len(self.align_loader.dataset)}") + + stride_ms = self.align_loader.collate_fn.stride_ms + token_dict = self.align_loader.collate_fn.vocab_list + with open(self.args.result_file, 'w') as fout: + # one example in batch + for i, batch in enumerate(self.align_loader): + key, feat, feats_length, target, target_length = batch + + # 1. Encoder + encoder_out, encoder_mask = self.model._forward_encoder( + feat, feats_length) # (B, maxlen, encoder_dim) + maxlen = encoder_out.size(1) + ctc_probs = self.model.ctc.log_softmax( + encoder_out) # (1, maxlen, vocab_size) + + # 2. alignment + ctc_probs = ctc_probs.squeeze(0) + target = target.squeeze(0) + alignment = ctc_utils.forced_align(ctc_probs, target) + logger.info("align ids", key[0], alignment) + fout.write('{} {}\n'.format(key[0], alignment)) + + # 3. gen praat + # segment alignment + align_segs = text_grid.segment_alignment(alignment) + logger.info("align tokens", key[0], align_segs) + # IntervalTier, List["start end token\n"] + subsample = utility.get_subsample(self.config) + tierformat = text_grid.align_to_tierformat( + align_segs, subsample, token_dict) + # write tier + align_output_path = os.path.join( + os.path.dirname(self.args.result_file), "align") + tier_path = os.path.join(align_output_path, key[0] + ".tier") + with open(tier_path, 'w') as f: + f.writelines(tierformat) + # write textgrid + textgrid_path = os.path.join(align_output_path, + key[0] + ".TextGrid") + second_per_frame = 1. / (1000. / + stride_ms) # 25ms window, 10ms stride + second_per_example = ( + len(alignment) + 1) * subsample * second_per_frame + text_grid.generate_textgrid( + maxtime=second_per_example, + intervals=tierformat, + output=textgrid_path) + + def run_align(self): + self.resume_or_scratch() + try: + self.align() + except KeyboardInterrupt: + sys.exit(-1) + def load_inferspec(self): """infer model and input spec. @@ -502,15 +631,14 @@ class U2Tester(U2Trainer): List[paddle.static.InputSpec]: input spec. """ from deepspeech.models.u2 import U2InferModel - infer_model = U2InferModel.from_pretrained(self.test_loader.dataset, + infer_model = U2InferModel.from_pretrained(self.test_loader, self.config.model.clone(), self.args.checkpoint_path) - feat_dim = self.test_loader.dataset.feature_size + feat_dim = self.test_loader.collate_fn.feature_size input_spec = [ - paddle.static.InputSpec( - shape=[None, feat_dim, None], - dtype='float32'), # audio, [B,D,T] - paddle.static.InputSpec(shape=[None], + paddle.static.InputSpec(shape=[1, None, feat_dim], + dtype='float32'), # audio, [B,T,D] + paddle.static.InputSpec(shape=[1], dtype='int64'), # audio_length, [B] ] return infer_model, input_spec diff --git a/deepspeech/exps/u2/trainer.py b/deepspeech/exps/u2/trainer.py new file mode 100644 index 000000000..8e8634ac3 --- /dev/null +++ b/deepspeech/exps/u2/trainer.py @@ -0,0 +1,220 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains U2 model.""" +import paddle +from paddle import distributed as dist +from paddle.io import DataLoader + +from deepspeech.io.collator import SpeechCollator +from deepspeech.io.dataset import ManifestDataset +from deepspeech.io.sampler import SortagradBatchSampler +from deepspeech.io.sampler import SortagradDistributedBatchSampler +from deepspeech.models.u2 import U2Evaluator +from deepspeech.models.u2 import U2Model +from deepspeech.models.u2 import U2Updater +from deepspeech.training.extensions.snapshot import Snapshot +from deepspeech.training.extensions.visualizer import VisualDL +from deepspeech.training.optimizer import OptimizerFactory +from deepspeech.training.scheduler import LRSchedulerFactory +from deepspeech.training.timer import Timer +from deepspeech.training.trainer import Trainer +from deepspeech.training.updaters.trainer import Trainer as NewTrainer +from deepspeech.utils import layer_tools +from deepspeech.utils.log import Log +from deepspeech.utils.utility import UpdateConfig + +logger = Log(__name__).getlog() + + +class U2Trainer(Trainer): + def __init__(self, config, args): + super().__init__(config, args) + + def setup_dataloader(self): + config = self.config.clone() + config.defrost() + config.collator.keep_transcription_text = False + + # train/valid dataset, return token ids + config.data.manifest = config.data.train_manifest + train_dataset = ManifestDataset.from_config(config) + + config.data.manifest = config.data.dev_manifest + dev_dataset = ManifestDataset.from_config(config) + + collate_fn_train = SpeechCollator.from_config(config) + + config.collator.augmentation_config = "" + collate_fn_dev = SpeechCollator.from_config(config) + + if self.parallel: + batch_sampler = SortagradDistributedBatchSampler( + train_dataset, + batch_size=config.collator.batch_size, + num_replicas=None, + rank=None, + shuffle=True, + drop_last=True, + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) + else: + batch_sampler = SortagradBatchSampler( + train_dataset, + shuffle=True, + batch_size=config.collator.batch_size, + drop_last=True, + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) + self.train_loader = DataLoader( + train_dataset, + batch_sampler=batch_sampler, + collate_fn=collate_fn_train, + num_workers=config.collator.num_workers, ) + self.valid_loader = DataLoader( + dev_dataset, + batch_size=config.collator.batch_size, + shuffle=False, + drop_last=False, + collate_fn=collate_fn_dev) + + # test dataset, return raw text + config.data.manifest = config.data.test_manifest + # filter test examples, will cause less examples, but no mismatch with training + # and can use large batch size , save training time, so filter test egs now. + config.data.min_input_len = 0.0 # second + config.data.max_input_len = float('inf') # second + config.data.min_output_len = 0.0 # tokens + config.data.max_output_len = float('inf') # tokens + config.data.min_output_input_ratio = 0.00 + config.data.max_output_input_ratio = float('inf') + + test_dataset = ManifestDataset.from_config(config) + # return text ord id + config.collator.keep_transcription_text = True + config.collator.augmentation_config = "" + self.test_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=SpeechCollator.from_config(config)) + # return text token id + config.collator.keep_transcription_text = False + self.align_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=SpeechCollator.from_config(config)) + logger.info("Setup train/valid/test/align Dataloader!") + + def setup_model(self): + config = self.config + model_conf = config.model + with UpdateConfig(model_conf): + model_conf.input_dim = self.train_loader.collate_fn.feature_size + model_conf.output_dim = self.train_loader.collate_fn.vocab_size + + model = U2Model.from_config(model_conf) + + if self.parallel: + model = paddle.DataParallel(model) + + model.train() + logger.info(f"{model}") + layer_tools.print_params(model, logger.info) + + train_config = config.training + optim_type = train_config.optim + optim_conf = train_config.optim_conf + scheduler_type = train_config.scheduler + scheduler_conf = train_config.scheduler_conf + + scheduler_args = { + "learning_rate": optim_conf.lr, + "verbose": False, + "warmup_steps": scheduler_conf.warmup_steps, + "gamma": scheduler_conf.lr_decay, + "d_model": model_conf.encoder_conf.output_size, + } + lr_scheduler = LRSchedulerFactory.from_args(scheduler_type, + scheduler_args) + + def optimizer_args( + config, + parameters, + lr_scheduler=None, ): + train_config = config.training + optim_type = train_config.optim + optim_conf = train_config.optim_conf + scheduler_type = train_config.scheduler + scheduler_conf = train_config.scheduler_conf + return { + "grad_clip": train_config.global_grad_clip, + "weight_decay": optim_conf.weight_decay, + "learning_rate": lr_scheduler + if lr_scheduler else optim_conf.lr, + "parameters": parameters, + "epsilon": 1e-9 if optim_type == 'noam' else None, + "beta1": 0.9 if optim_type == 'noam' else None, + "beat2": 0.98 if optim_type == 'noam' else None, + } + + optimzer_args = optimizer_args(config, model.parameters(), lr_scheduler) + optimizer = OptimizerFactory.from_args(optim_type, optimzer_args) + + self.model = model + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + logger.info("Setup model/optimizer/lr_scheduler!") + + def setup_updater(self): + output_dir = self.output_dir + config = self.config.training + + updater = U2Updater( + model=self.model, + optimizer=self.optimizer, + scheduler=self.lr_scheduler, + dataloader=self.train_loader, + output_dir=output_dir, + accum_grad=config.accum_grad) + + trainer = NewTrainer(updater, (config.n_epoch, 'epoch'), output_dir) + + evaluator = U2Evaluator(self.model, self.valid_loader) + + trainer.extend(evaluator, trigger=(1, "epoch")) + + if dist.get_rank() == 0: + trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) + num_snapshots = config.checkpoint.kbest_n + trainer.extend( + Snapshot( + mode='kbest', + max_size=num_snapshots, + indicator='VALID/LOSS', + less_better=True), + trigger=(1, 'epoch')) + # print(trainer.extensions) + # trainer.run() + self.trainer = trainer + + def run(self): + """The routine of the experiment after setup. This method is intended + to be used by the user. + """ + self.setup_updater() + with Timer("Training Done: {}"): + self.trainer.run() diff --git a/deepspeech/exps/u2_kaldi/__init__.py b/deepspeech/exps/u2_kaldi/__init__.py new file mode 100644 index 000000000..185a92b8d --- /dev/null +++ b/deepspeech/exps/u2_kaldi/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/deepspeech/exps/u2_kaldi/bin/test.py b/deepspeech/exps/u2_kaldi/bin/test.py new file mode 100644 index 000000000..93a29ab15 --- /dev/null +++ b/deepspeech/exps/u2_kaldi/bin/test.py @@ -0,0 +1,83 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Evaluation for U2 model.""" +import cProfile + +from yacs.config import CfgNode + +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.dynamic_import import dynamic_import +from deepspeech.utils.utility import print_arguments + +model_test_alias = { + "u2": "deepspeech.exps.u2.model:U2Tester", + "u2_kaldi": "deepspeech.exps.u2_kaldi.model:U2Tester", +} + + +def main_sp(config, args): + class_obj = dynamic_import(args.model_name, model_test_alias) + exp = class_obj(config, args) + exp.setup() + + if args.run_mode == 'test': + exp.run_test() + elif args.run_mode == 'export': + exp.run_export() + elif args.run_mode == 'align': + exp.run_align() + + +def main(config, args): + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + parser.add_argument( + '--model-name', + type=str, + default='u2_kaldi', + help='model name, e.g: deepspeech2, u2, u2_kaldi, u2_st') + parser.add_argument( + '--run-mode', + type=str, + default='test', + help='run mode, e.g. test, align, export') + parser.add_argument( + '--dict-path', type=str, default=None, help='dict path.') + # save asr result to + parser.add_argument( + "--result-file", type=str, help="path of save the asr result") + # save jit model to + parser.add_argument( + "--export-path", type=str, help="path of the jit model to save") + args = parser.parse_args() + print_arguments(args, globals()) + + config = CfgNode() + config.set_new_allowed(True) + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + # Setting for profiling + pr = cProfile.Profile() + pr.runcall(main, config, args) + pr.dump_stats('test.profile') diff --git a/deepspeech/exps/u2_kaldi/bin/train.py b/deepspeech/exps/u2_kaldi/bin/train.py new file mode 100644 index 000000000..1dcd154d3 --- /dev/null +++ b/deepspeech/exps/u2_kaldi/bin/train.py @@ -0,0 +1,69 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Trainer for U2 model.""" +import cProfile +import os + +from paddle import distributed as dist +from yacs.config import CfgNode + +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.dynamic_import import dynamic_import +from deepspeech.utils.utility import print_arguments + +model_train_alias = { + "u2": "deepspeech.exps.u2.model:U2Trainer", + "u2_kaldi": "deepspeech.exps.u2_kaldi.model:U2Trainer", +} + + +def main_sp(config, args): + class_obj = dynamic_import(args.model_name, model_train_alias) + exp = class_obj(config, args) + exp.setup() + exp.run() + + +def main(config, args): + if args.device == "gpu" and args.nprocs > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + else: + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + parser.add_argument( + '--model-name', + type=str, + default='u2_kaldi', + help='model name, e.g: deepspeech2, u2, u2_kaldi, u2_st') + args = parser.parse_args() + print_arguments(args, globals()) + + config = CfgNode() + config.set_new_allowed(True) + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + # Setting for profiling + pr = cProfile.Profile() + pr.runcall(main, config, args) + pr.dump_stats(os.path.join(args.output, 'train.profile')) diff --git a/deepspeech/exps/u2_kaldi/model.py b/deepspeech/exps/u2_kaldi/model.py new file mode 100644 index 000000000..edcc34012 --- /dev/null +++ b/deepspeech/exps/u2_kaldi/model.py @@ -0,0 +1,671 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains U2 model.""" +import json +import os +import sys +import time +from collections import defaultdict +from contextlib import nullcontext +from pathlib import Path +from typing import Optional + +import numpy as np +import paddle +from paddle import distributed as dist +from yacs.config import CfgNode + +from deepspeech.frontend.featurizer import TextFeaturizer +from deepspeech.frontend.utility import load_dict +from deepspeech.io.dataloader import BatchDataLoader +from deepspeech.models.u2 import U2Model +from deepspeech.training.optimizer import OptimizerFactory +from deepspeech.training.scheduler import LRSchedulerFactory +from deepspeech.training.timer import Timer +from deepspeech.training.trainer import Trainer +from deepspeech.utils import ctc_utils +from deepspeech.utils import error_rate +from deepspeech.utils import layer_tools +from deepspeech.utils import mp_tools +from deepspeech.utils import text_grid +from deepspeech.utils import utility +from deepspeech.utils.log import Log +from deepspeech.utils.utility import UpdateConfig + +logger = Log(__name__).getlog() + + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for my_project.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + _C = CfgNode() + + _C.model = U2Model.params() + + _C.training = U2Trainer.params() + + _C.decoding = U2Tester.params() + + config = _C.clone() + config.set_new_allowed(True) + return config + + +class U2Trainer(Trainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # training config + default = CfgNode( + dict( + n_epoch=50, # train epochs + log_interval=100, # steps + accum_grad=1, # accum grad by # steps + checkpoint=dict( + kbest_n=50, + latest_n=5, ), )) + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, config, args): + super().__init__(config, args) + + def train_batch(self, batch_index, batch_data, msg): + train_conf = self.config.training + start = time.time() + + # forward + utt, audio, audio_len, text, text_len = batch_data + loss, attention_loss, ctc_loss = self.model(audio, audio_len, text, + text_len) + + # loss div by `batch_size * accum_grad` + loss /= train_conf.accum_grad + losses_np = {'loss': float(loss) * train_conf.accum_grad} + if attention_loss: + losses_np['att_loss'] = float(attention_loss) + if ctc_loss: + losses_np['ctc_loss'] = float(ctc_loss) + + # loss backward + if (batch_index + 1) % train_conf.accum_grad != 0: + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + context = self.model.no_sync + else: + # Used for single gpu training and DDP gradient synchronization + # processes. + context = nullcontext + with context(): + loss.backward() + layer_tools.print_grads(self.model, print_func=None) + + # optimizer step + if (batch_index + 1) % train_conf.accum_grad == 0: + self.optimizer.step() + self.optimizer.clear_grad() + self.lr_scheduler.step() + self.iteration += 1 + + iteration_time = time.time() - start + + if (batch_index + 1) % train_conf.log_interval == 0: + msg += "train time: {:>.3f}s, ".format(iteration_time) + msg += "batch size: {}, ".format(self.config.collator.batch_size) + msg += "accum: {}, ".format(train_conf.accum_grad) + msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_np.items()) + logger.info(msg) + + if dist.get_rank() == 0 and self.visualizer: + losses_np_v = losses_np.copy() + losses_np_v.update({"lr": self.lr_scheduler()}) + self.visualizer.add_scalars("step", losses_np_v, + self.iteration - 1) + + @paddle.no_grad() + def valid(self): + self.model.eval() + logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}") + valid_losses = defaultdict(list) + num_seen_utts = 1 + total_loss = 0.0 + + for i, batch in enumerate(self.valid_loader): + utt, audio, audio_len, text, text_len = batch + loss, attention_loss, ctc_loss = self.model(audio, audio_len, text, + text_len) + if paddle.isfinite(loss): + num_utts = batch[1].shape[0] + num_seen_utts += num_utts + total_loss += float(loss) * num_utts + valid_losses['val_loss'].append(float(loss)) + if attention_loss: + valid_losses['val_att_loss'].append(float(attention_loss)) + if ctc_loss: + valid_losses['val_ctc_loss'].append(float(ctc_loss)) + + if (i + 1) % self.config.training.log_interval == 0: + valid_dump = {k: np.mean(v) for k, v in valid_losses.items()} + valid_dump['val_history_loss'] = total_loss / num_seen_utts + + # logging + msg = f"Valid: Rank: {dist.get_rank()}, " + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader)) + msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in valid_dump.items()) + logger.info(msg) + + logger.info('Rank {} Val info val_loss {}'.format( + dist.get_rank(), total_loss / num_seen_utts)) + return total_loss, num_seen_utts + + def train(self): + """The training process control by step.""" + # !!!IMPORTANT!!! + # Try to export the model by script, if fails, we should refine + # the code to satisfy the script export requirements + # script_model = paddle.jit.to_static(self.model) + # script_model_path = str(self.checkpoint_dir / 'init') + # paddle.jit.save(script_model, script_model_path) + + from_scratch = self.resume_or_scratch() + if from_scratch: + # save init model, i.e. 0 epoch + self.save(tag='init') + self.lr_scheduler.step(self.iteration) + + logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") + while self.epoch < self.config.training.n_epoch: + with Timer("Epoch-Train Time Cost: {}"): + self.model.train() + try: + data_start_time = time.time() + for batch_index, batch in enumerate(self.train_loader): + dataload_time = time.time() - data_start_time + msg = "Train: Rank: {}, ".format(dist.get_rank()) + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "batch : {}/{}, ".format(batch_index + 1, + len(self.train_loader)) + msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) + msg += "data time: {:>.3f}s, ".format(dataload_time) + self.train_batch(batch_index, batch, msg) + self.after_train_batch() + data_start_time = time.time() + except Exception as e: + logger.error(e) + raise e + + with Timer("Eval Time Cost: {}"): + total_loss, num_seen_utts = self.valid() + if dist.get_world_size() > 1: + num_seen_utts = paddle.to_tensor(num_seen_utts) + # the default operator in all_reduce function is sum. + dist.all_reduce(num_seen_utts) + total_loss = paddle.to_tensor(total_loss) + dist.all_reduce(total_loss) + cv_loss = total_loss / num_seen_utts + cv_loss = float(cv_loss) + else: + cv_loss = total_loss / num_seen_utts + + logger.info( + 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss)) + if self.visualizer: + self.visualizer.add_scalars( + 'epoch', {'cv_loss': cv_loss, + 'lr': self.lr_scheduler()}, self.epoch) + self.save(tag=self.epoch, infos={'val_loss': cv_loss}) + self.new_epoch() + + def setup_dataloader(self): + config = self.config.clone() + # train/valid dataset, return token ids + self.train_loader = BatchDataLoader( + json_file=config.data.train_manifest, + train_mode=True, + sortagrad=False, + batch_size=config.collator.batch_size, + maxlen_in=float('inf'), + maxlen_out=float('inf'), + minibatches=0, + mini_batch_size=self.args.nprocs, + batch_count='auto', + batch_bins=0, + batch_frames_in=0, + batch_frames_out=0, + batch_frames_inout=0, + preprocess_conf=config.collator.augmentation_config, + n_iter_processes=config.collator.num_workers, + subsampling_factor=1, + num_encs=1) + + self.valid_loader = BatchDataLoader( + json_file=config.data.dev_manifest, + train_mode=False, + sortagrad=False, + batch_size=config.collator.batch_size, + maxlen_in=float('inf'), + maxlen_out=float('inf'), + minibatches=0, + mini_batch_size=self.args.nprocs, + batch_count='auto', + batch_bins=0, + batch_frames_in=0, + batch_frames_out=0, + batch_frames_inout=0, + preprocess_conf=None, + n_iter_processes=1, + subsampling_factor=1, + num_encs=1) + + # test dataset, return raw text + self.test_loader = BatchDataLoader( + json_file=config.data.test_manifest, + train_mode=False, + sortagrad=False, + batch_size=config.decoding.batch_size, + maxlen_in=float('inf'), + maxlen_out=float('inf'), + minibatches=0, + mini_batch_size=1, + batch_count='auto', + batch_bins=0, + batch_frames_in=0, + batch_frames_out=0, + batch_frames_inout=0, + preprocess_conf=None, + n_iter_processes=1, + subsampling_factor=1, + num_encs=1) + + self.align_loader = BatchDataLoader( + json_file=config.data.test_manifest, + train_mode=False, + sortagrad=False, + batch_size=config.decoding.batch_size, + maxlen_in=float('inf'), + maxlen_out=float('inf'), + minibatches=0, + mini_batch_size=1, + batch_count='auto', + batch_bins=0, + batch_frames_in=0, + batch_frames_out=0, + batch_frames_inout=0, + preprocess_conf=None, + n_iter_processes=1, + subsampling_factor=1, + num_encs=1) + logger.info("Setup train/valid/test/align Dataloader!") + + def setup_model(self): + config = self.config + + # model + model_conf = config.model + with UpdateConfig(model_conf): + model_conf.input_dim = self.train_loader.feat_dim + model_conf.output_dim = self.train_loader.vocab_size + + model = U2Model.from_config(model_conf) + if self.parallel: + model = paddle.DataParallel(model) + logger.info(f"{model}") + layer_tools.print_params(model, logger.info) + + # lr + scheduler_conf = config.scheduler_conf + scheduler_args = { + "learning_rate": scheduler_conf.lr, + "warmup_steps": scheduler_conf.warmup_steps, + "gamma": scheduler_conf.lr_decay, + "d_model": model_conf.encoder_conf.output_size, + "verbose": False, + } + lr_scheduler = LRSchedulerFactory.from_args(config.scheduler, + scheduler_args) + + # opt + def optimizer_args( + config, + parameters, + lr_scheduler=None, ): + optim_conf = config.optim_conf + return { + "grad_clip": optim_conf.global_grad_clip, + "weight_decay": optim_conf.weight_decay, + "learning_rate": lr_scheduler, + "parameters": parameters, + } + + optimzer_args = optimizer_args(config, model.parameters(), lr_scheduler) + optimizer = OptimizerFactory.from_args(config.optim, optimzer_args) + + self.model = model + self.lr_scheduler = lr_scheduler + self.optimizer = optimizer + logger.info("Setup model/optimizer/lr_scheduler!") + + +class U2Tester(U2Trainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # decoding config + default = CfgNode( + dict( + alpha=2.5, # Coef of LM for beam search. + beta=0.3, # Coef of WC for beam search. + cutoff_prob=1.0, # Cutoff probability for pruning. + cutoff_top_n=40, # Cutoff number for pruning. + lang_model_path='models/lm/common_crawl_00.prune01111.trie.klm', # Filepath for language model. + decoding_method='attention', # Decoding method. Options: 'attention', 'ctc_greedy_search', + # 'ctc_prefix_beam_search', 'attention_rescoring' + error_rate_type='wer', # Error rate type for evaluation. Options `wer`, 'cer' + num_proc_bsearch=8, # # of CPUs for beam search. + beam_size=10, # Beam search width. + batch_size=16, # decoding batch size + ctc_weight=0.0, # ctc weight for attention rescoring decode mode. + decoding_chunk_size=-1, # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks=-1, # number of left chunks for decoding. Defaults to -1. + simulate_streaming=False, # simulate streaming inference. Defaults to False. + )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, config, args): + super().__init__(config, args) + + def id2token(self, texts, texts_len, text_feature): + """ ord() id to chr() chr """ + trans = [] + for text, n in zip(texts, texts_len): + n = n.numpy().item() + ids = text[:n] + trans.append(text_feature.defeaturize(ids.numpy().tolist())) + return trans + + def compute_metrics(self, + utts, + audio, + audio_len, + texts, + texts_len, + fout=None): + cfg = self.config.decoding + errors_sum, len_refs, num_ins = 0.0, 0, 0 + errors_func = error_rate.char_errors if cfg.error_rate_type == 'cer' else error_rate.word_errors + error_rate_func = error_rate.cer if cfg.error_rate_type == 'cer' else error_rate.wer + + start_time = time.time() + text_feature = TextFeaturizer( + unit_type=self.config.collator.unit_type, + vocab_filepath=self.config.collator.vocab_filepath, + spm_model_prefix=self.config.collator.spm_model_prefix) + target_transcripts = self.id2token(texts, texts_len, text_feature) + result_transcripts = self.model.decode( + audio, + audio_len, + text_feature=text_feature, + decoding_method=cfg.decoding_method, + lang_model_path=cfg.lang_model_path, + beam_alpha=cfg.alpha, + beam_beta=cfg.beta, + beam_size=cfg.beam_size, + cutoff_prob=cfg.cutoff_prob, + cutoff_top_n=cfg.cutoff_top_n, + num_processes=cfg.num_proc_bsearch, + ctc_weight=cfg.ctc_weight, + decoding_chunk_size=cfg.decoding_chunk_size, + num_decoding_left_chunks=cfg.num_decoding_left_chunks, + simulate_streaming=cfg.simulate_streaming) + decode_time = time.time() - start_time + + for utt, target, result in zip(utts, target_transcripts, + result_transcripts): + errors, len_ref = errors_func(target, result) + errors_sum += errors + len_refs += len_ref + num_ins += 1 + if fout: + fout.write(utt + " " + result + "\n") + logger.info("\nTarget Transcription: %s\nOutput Transcription: %s" % + (target, result)) + logger.info("One example error rate [%s] = %f" % + (cfg.error_rate_type, error_rate_func(target, result))) + + return dict( + errors_sum=errors_sum, + len_refs=len_refs, + num_ins=num_ins, # num examples + error_rate=errors_sum / len_refs, + error_rate_type=cfg.error_rate_type, + num_frames=audio_len.sum().numpy().item(), + decode_time=decode_time) + + @mp_tools.rank_zero_only + @paddle.no_grad() + def test(self): + assert self.args.result_file + self.model.eval() + logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}") + + stride_ms = self.config.collator.stride_ms + error_rate_type = None + errors_sum, len_refs, num_ins = 0.0, 0, 0 + num_frames = 0.0 + num_time = 0.0 + with open(self.args.result_file, 'w') as fout: + for i, batch in enumerate(self.test_loader): + metrics = self.compute_metrics(*batch, fout=fout) + num_frames += metrics['num_frames'] + num_time += metrics["decode_time"] + errors_sum += metrics['errors_sum'] + len_refs += metrics['len_refs'] + num_ins += metrics['num_ins'] + error_rate_type = metrics['error_rate_type'] + rtf = num_time / (num_frames * stride_ms) + logger.info( + "RTF: %f, Error rate [%s] (%d/?) = %f" % + (rtf, error_rate_type, num_ins, errors_sum / len_refs)) + + rtf = num_time / (num_frames * stride_ms) + msg = "Test: " + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "RTF: {}, ".format(rtf) + msg += "Final error rate [%s] (%d/%d) = %f" % ( + error_rate_type, num_ins, num_ins, errors_sum / len_refs) + logger.info(msg) + + # test meta results + err_meta_path = os.path.splitext(self.args.result_file)[0] + '.err' + err_type_str = "{}".format(error_rate_type) + with open(err_meta_path, 'w') as f: + data = json.dumps({ + "epoch": + self.epoch, + "step": + self.iteration, + "rtf": + rtf, + error_rate_type: + errors_sum / len_refs, + "dataset_hour": (num_frames * stride_ms) / 1000.0 / 3600.0, + "process_hour": + num_time / 1000.0 / 3600.0, + "num_examples": + num_ins, + "err_sum": + errors_sum, + "ref_len": + len_refs, + "decode_method": + self.config.decoding.decoding_method, + }) + f.write(data + '\n') + + def run_test(self): + self.resume_or_scratch() + try: + self.test() + except KeyboardInterrupt: + sys.exit(-1) + + @paddle.no_grad() + def align(self): + if self.config.decoding.batch_size > 1: + logger.fatal('alignment mode must be running with batch_size == 1') + sys.exit(1) + + # xxx.align + assert self.args.result_file and self.args.result_file.endswith( + '.align') + + self.model.eval() + logger.info(f"Align Total Examples: {len(self.align_loader.dataset)}") + + stride_ms = self.config.collater.stride_ms + token_dict = self.args.char_list + + with open(self.args.result_file, 'w') as fout: + # one example in batch + for i, batch in enumerate(self.align_loader): + key, feat, feats_length, target, target_length = batch + + # 1. Encoder + encoder_out, encoder_mask = self.model._forward_encoder( + feat, feats_length) # (B, maxlen, encoder_dim) + maxlen = encoder_out.size(1) + ctc_probs = self.model.ctc.log_softmax( + encoder_out) # (1, maxlen, vocab_size) + + # 2. alignment + ctc_probs = ctc_probs.squeeze(0) + target = target.squeeze(0) + alignment = ctc_utils.forced_align(ctc_probs, target) + logger.info("align ids", key[0], alignment) + fout.write('{} {}\n'.format(key[0], alignment)) + + # 3. gen praat + # segment alignment + align_segs = text_grid.segment_alignment(alignment) + logger.info("align tokens", key[0], align_segs) + # IntervalTier, List["start end token\n"] + subsample = utility.get_subsample(self.config) + tierformat = text_grid.align_to_tierformat( + align_segs, subsample, token_dict) + # write tier + align_output_path = os.path.join( + os.path.dirname(self.args.result_file), "align") + tier_path = os.path.join(align_output_path, key[0] + ".tier") + with open(tier_path, 'w') as f: + f.writelines(tierformat) + # write textgrid + textgrid_path = os.path.join(align_output_path, + key[0] + ".TextGrid") + second_per_frame = 1. / (1000. / + stride_ms) # 25ms window, 10ms stride + second_per_example = ( + len(alignment) + 1) * subsample * second_per_frame + text_grid.generate_textgrid( + maxtime=second_per_example, + intervals=tierformat, + output=textgrid_path) + + def run_align(self): + self.resume_or_scratch() + try: + self.align() + except KeyboardInterrupt: + sys.exit(-1) + + def load_inferspec(self): + """infer model and input spec. + + Returns: + nn.Layer: inference model + List[paddle.static.InputSpec]: input spec. + """ + from deepspeech.models.u2 import U2InferModel + infer_model = U2InferModel.from_pretrained(self.test_loader, + self.config.model.clone(), + self.args.checkpoint_path) + feat_dim = self.test_loader.feat_dim + input_spec = [ + paddle.static.InputSpec(shape=[1, None, feat_dim], + dtype='float32'), # audio, [B,T,D] + paddle.static.InputSpec(shape=[1], + dtype='int64'), # audio_length, [B] + ] + return infer_model, input_spec + + def export(self): + infer_model, input_spec = self.load_inferspec() + assert isinstance(input_spec, list), type(input_spec) + infer_model.eval() + static_model = paddle.jit.to_static(infer_model, input_spec=input_spec) + logger.info(f"Export code: {static_model.forward.code}") + paddle.jit.save(static_model, self.args.export_path) + + def run_export(self): + try: + self.export() + except KeyboardInterrupt: + sys.exit(-1) + + def setup_dict(self): + # load dictionary for debug log + self.args.char_list = load_dict(self.args.dict_path, + "maskctc" in self.args.model_name) + + def setup(self): + """Setup the experiment. + """ + paddle.set_device(self.args.device) + + self.setup_output_dir() + self.setup_checkpointer() + + self.setup_dataloader() + self.setup_model() + + self.setup_dict() + + self.iteration = 0 + self.epoch = 0 + + def setup_output_dir(self): + """Create a directory used for output. + """ + # output dir + if self.args.output: + output_dir = Path(self.args.output).expanduser() + output_dir.mkdir(parents=True, exist_ok=True) + else: + output_dir = Path( + self.args.checkpoint_path).expanduser().parent.parent + output_dir.mkdir(parents=True, exist_ok=True) + + self.output_dir = output_dir diff --git a/deepspeech/exps/u2_st/__init__.py b/deepspeech/exps/u2_st/__init__.py new file mode 100644 index 000000000..185a92b8d --- /dev/null +++ b/deepspeech/exps/u2_st/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/deepspeech/exps/u2_st/bin/export.py b/deepspeech/exps/u2_st/bin/export.py new file mode 100644 index 000000000..c7eb5d03b --- /dev/null +++ b/deepspeech/exps/u2_st/bin/export.py @@ -0,0 +1,51 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Export for U2 model.""" +from deepspeech.exps.u2_st.config import get_cfg_defaults +from deepspeech.exps.u2_st.model import U2STTester as Tester +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.utility import print_arguments + + +def main_sp(config, args): + exp = Tester(config, args) + exp.setup() + exp.run_export() + + +def main(config, args): + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + # save jit model to + parser.add_argument( + "--export_path", type=str, help="path of the jit model to save") + args = parser.parse_args() + print_arguments(args, globals()) + + # https://yaml.org/type/float.html + config = get_cfg_defaults() + if args.config: + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + main(config, args) diff --git a/deepspeech/exps/u2_st/bin/test.py b/deepspeech/exps/u2_st/bin/test.py new file mode 100644 index 000000000..81197decf --- /dev/null +++ b/deepspeech/exps/u2_st/bin/test.py @@ -0,0 +1,58 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Evaluation for U2 model.""" +import cProfile + +from deepspeech.exps.u2_st.config import get_cfg_defaults +from deepspeech.exps.u2_st.model import U2STTester as Tester +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.utility import print_arguments + +# TODO(hui zhang): dynamic load + + +def main_sp(config, args): + exp = Tester(config, args) + exp.setup() + exp.run_test() + + +def main(config, args): + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + # save asr result to + parser.add_argument( + "--result_file", type=str, help="path of save the asr result") + args = parser.parse_args() + print_arguments(args, globals()) + + # https://yaml.org/type/float.html + config = get_cfg_defaults() + if args.config: + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + # Setting for profiling + pr = cProfile.Profile() + pr.runcall(main, config, args) + pr.dump_stats('test.profile') diff --git a/deepspeech/exps/u2_st/bin/train.py b/deepspeech/exps/u2_st/bin/train.py new file mode 100644 index 000000000..86a0f0000 --- /dev/null +++ b/deepspeech/exps/u2_st/bin/train.py @@ -0,0 +1,59 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Trainer for U2 model.""" +import cProfile +import os + +from paddle import distributed as dist + +from deepspeech.exps.u2_st.config import get_cfg_defaults +from deepspeech.exps.u2_st.model import U2STTrainer as Trainer +from deepspeech.training.cli import default_argument_parser +from deepspeech.utils.utility import print_arguments + + +def main_sp(config, args): + exp = Trainer(config, args) + exp.setup() + exp.run() + + +def main(config, args): + if args.device == "gpu" and args.nprocs > 1: + dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) + else: + main_sp(config, args) + + +if __name__ == "__main__": + parser = default_argument_parser() + args = parser.parse_args() + print_arguments(args, globals()) + + # https://yaml.org/type/float.html + config = get_cfg_defaults() + if args.config: + config.merge_from_file(args.config) + if args.opts: + config.merge_from_list(args.opts) + config.freeze() + print(config) + if args.dump_config: + with open(args.dump_config, 'w') as f: + print(config, file=f) + + # Setting for profiling + pr = cProfile.Profile() + pr.runcall(main, config, args) + pr.dump_stats(os.path.join(args.output, 'train.profile')) diff --git a/deepspeech/exps/u2_st/config.py b/deepspeech/exps/u2_st/config.py new file mode 100644 index 000000000..b1b7b357d --- /dev/null +++ b/deepspeech/exps/u2_st/config.py @@ -0,0 +1,41 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from yacs.config import CfgNode + +from deepspeech.exps.u2_st.model import U2STTester +from deepspeech.exps.u2_st.model import U2STTrainer +from deepspeech.io.collator_st import SpeechCollator +from deepspeech.io.dataset import ManifestDataset +from deepspeech.models.u2_st import U2STModel + +_C = CfgNode() + +_C.data = ManifestDataset.params() + +_C.collator = SpeechCollator.params() + +_C.model = U2STModel.params() + +_C.training = U2STTrainer.params() + +_C.decoding = U2STTester.params() + + +def get_cfg_defaults(): + """Get a yacs CfgNode object with default values for my_project.""" + # Return a clone so that the defaults will not be altered + # This is for the "local variable" use pattern + config = _C.clone() + config.set_new_allowed(True) + return config diff --git a/deepspeech/exps/u2_st/model.py b/deepspeech/exps/u2_st/model.py new file mode 100644 index 000000000..0fa8ed735 --- /dev/null +++ b/deepspeech/exps/u2_st/model.py @@ -0,0 +1,695 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains U2 model.""" +import json +import os +import sys +import time +from collections import defaultdict +from contextlib import nullcontext +from pathlib import Path +from typing import Optional + +import numpy as np +import paddle +from paddle import distributed as dist +from paddle.io import DataLoader +from yacs.config import CfgNode + +from deepspeech.io.collator_st import KaldiPrePorocessedCollator +from deepspeech.io.collator_st import SpeechCollator +from deepspeech.io.collator_st import TripletKaldiPrePorocessedCollator +from deepspeech.io.collator_st import TripletSpeechCollator +from deepspeech.io.dataset import ManifestDataset +from deepspeech.io.dataset import TripletManifestDataset +from deepspeech.io.sampler import SortagradBatchSampler +from deepspeech.io.sampler import SortagradDistributedBatchSampler +from deepspeech.models.u2_st import U2STModel +from deepspeech.training.gradclip import ClipGradByGlobalNormWithLog +from deepspeech.training.scheduler import WarmupLR +from deepspeech.training.timer import Timer +from deepspeech.training.trainer import Trainer +from deepspeech.utils import bleu_score +from deepspeech.utils import ctc_utils +from deepspeech.utils import layer_tools +from deepspeech.utils import mp_tools +from deepspeech.utils import text_grid +from deepspeech.utils import utility +from deepspeech.utils.log import Log +from deepspeech.utils.utility import UpdateConfig + +logger = Log(__name__).getlog() + + +class U2STTrainer(Trainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # training config + default = CfgNode( + dict( + n_epoch=50, # train epochs + log_interval=100, # steps + accum_grad=1, # accum grad by # steps + global_grad_clip=5.0, # the global norm clip + )) + default.optim = 'adam' + default.optim_conf = CfgNode( + dict( + lr=5e-4, # learning rate + weight_decay=1e-6, # the coeff of weight decay + )) + default.scheduler = 'warmuplr' + default.scheduler_conf = CfgNode( + dict( + warmup_steps=25000, + lr_decay=1.0, # learning rate decay + )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, config, args): + super().__init__(config, args) + + def train_batch(self, batch_index, batch_data, msg): + train_conf = self.config.training + start = time.time() + # forward + utt, audio, audio_len, text, text_len = batch_data + if isinstance(text, list) and isinstance(text_len, list): + # joint training with ASR. Two decoding texts [translation, transcription] + text, text_transcript = text + text_len, text_transcript_len = text_len + loss, st_loss, attention_loss, ctc_loss = self.model( + audio, audio_len, text, text_len, text_transcript, + text_transcript_len) + else: + loss, st_loss, attention_loss, ctc_loss = self.model( + audio, audio_len, text, text_len) + + # loss div by `batch_size * accum_grad` + loss /= train_conf.accum_grad + losses_np = {'loss': float(loss) * train_conf.accum_grad} + if attention_loss: + losses_np['att_loss'] = float(attention_loss) + if ctc_loss: + losses_np['ctc_loss'] = float(ctc_loss) + + # loss backward + if (batch_index + 1) % train_conf.accum_grad != 0: + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + context = self.model.no_sync + else: + # Used for single gpu training and DDP gradient synchronization + # processes. + context = nullcontext + with context(): + loss.backward() + layer_tools.print_grads(self.model, print_func=None) + + # optimizer step + if (batch_index + 1) % train_conf.accum_grad == 0: + self.optimizer.step() + self.optimizer.clear_grad() + self.lr_scheduler.step() + self.iteration += 1 + + iteration_time = time.time() - start + + if (batch_index + 1) % train_conf.log_interval == 0: + msg += "train time: {:>.3f}s, ".format(iteration_time) + msg += "batch size: {}, ".format(self.config.collator.batch_size) + msg += "accum: {}, ".format(train_conf.accum_grad) + msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_np.items()) + logger.info(msg) + + if dist.get_rank() == 0 and self.visualizer: + losses_np_v = losses_np.copy() + losses_np_v.update({"lr": self.lr_scheduler()}) + self.visualizer.add_scalars("step", losses_np_v, + self.iteration - 1) + + @paddle.no_grad() + def valid(self): + self.model.eval() + logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}") + valid_losses = defaultdict(list) + num_seen_utts = 1 + total_loss = 0.0 + for i, batch in enumerate(self.valid_loader): + utt, audio, audio_len, text, text_len = batch + if isinstance(text, list) and isinstance(text_len, list): + text, text_transcript = text + text_len, text_transcript_len = text_len + loss, st_loss, attention_loss, ctc_loss = self.model( + audio, audio_len, text, text_len, text_transcript, + text_transcript_len) + else: + loss, st_loss, attention_loss, ctc_loss = self.model( + audio, audio_len, text, text_len) + if paddle.isfinite(loss): + num_utts = batch[1].shape[0] + num_seen_utts += num_utts + total_loss += float(st_loss) * num_utts + valid_losses['val_loss'].append(float(st_loss)) + if attention_loss: + valid_losses['val_att_loss'].append(float(attention_loss)) + if ctc_loss: + valid_losses['val_ctc_loss'].append(float(ctc_loss)) + + if (i + 1) % self.config.training.log_interval == 0: + valid_dump = {k: np.mean(v) for k, v in valid_losses.items()} + valid_dump['val_history_st_loss'] = total_loss / num_seen_utts + + # logging + msg = f"Valid: Rank: {dist.get_rank()}, " + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader)) + msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in valid_dump.items()) + logger.info(msg) + + logger.info('Rank {} Val info st_val_loss {}'.format( + dist.get_rank(), total_loss / num_seen_utts)) + return total_loss, num_seen_utts + + def train(self): + """The training process control by step.""" + # !!!IMPORTANT!!! + # Try to export the model by script, if fails, we should refine + # the code to satisfy the script export requirements + # script_model = paddle.jit.to_static(self.model) + # script_model_path = str(self.checkpoint_dir / 'init') + # paddle.jit.save(script_model, script_model_path) + + from_scratch = self.resume_or_scratch() + if from_scratch: + # save init model, i.e. 0 epoch + self.save(tag='init') + + self.lr_scheduler.step(self.iteration) + if self.parallel: + self.train_loader.batch_sampler.set_epoch(self.epoch) + + logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") + while self.epoch < self.config.training.n_epoch: + with Timer("Epoch-Train Time Cost: {}"): + self.model.train() + try: + data_start_time = time.time() + for batch_index, batch in enumerate(self.train_loader): + dataload_time = time.time() - data_start_time + msg = "Train: Rank: {}, ".format(dist.get_rank()) + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "batch : {}/{}, ".format(batch_index + 1, + len(self.train_loader)) + msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) + msg += "data time: {:>.3f}s, ".format(dataload_time) + self.train_batch(batch_index, batch, msg) + self.after_train_batch() + data_start_time = time.time() + except Exception as e: + logger.error(e) + raise e + + with Timer("Eval Time Cost: {}"): + total_loss, num_seen_utts = self.valid() + if dist.get_world_size() > 1: + num_seen_utts = paddle.to_tensor(num_seen_utts) + # the default operator in all_reduce function is sum. + dist.all_reduce(num_seen_utts) + total_loss = paddle.to_tensor(total_loss) + dist.all_reduce(total_loss) + cv_loss = total_loss / num_seen_utts + cv_loss = float(cv_loss) + else: + cv_loss = total_loss / num_seen_utts + + logger.info( + 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss)) + if self.visualizer: + self.visualizer.add_scalars( + 'epoch', {'cv_loss': cv_loss, + 'lr': self.lr_scheduler()}, self.epoch) + self.save(tag=self.epoch, infos={'val_loss': cv_loss}) + self.new_epoch() + + def setup_dataloader(self): + config = self.config.clone() + config.defrost() + config.collator.keep_transcription_text = False + + # train/valid dataset, return token ids + Dataset = TripletManifestDataset if config.model.model_conf.asr_weight > 0. else ManifestDataset + config.data.manifest = config.data.train_manifest + train_dataset = Dataset.from_config(config) + + config.data.manifest = config.data.dev_manifest + dev_dataset = Dataset.from_config(config) + + if config.collator.raw_wav: + if config.model.model_conf.asr_weight > 0.: + Collator = TripletSpeechCollator + TestCollator = SpeechCollator + else: + TestCollator = Collator = SpeechCollator + # Not yet implement the mtl loader for raw_wav. + else: + if config.model.model_conf.asr_weight > 0.: + Collator = TripletKaldiPrePorocessedCollator + TestCollator = KaldiPrePorocessedCollator + else: + TestCollator = Collator = KaldiPrePorocessedCollator + + collate_fn_train = Collator.from_config(config) + + config.collator.augmentation_config = "" + collate_fn_dev = Collator.from_config(config) + + if self.parallel: + batch_sampler = SortagradDistributedBatchSampler( + train_dataset, + batch_size=config.collator.batch_size, + num_replicas=None, + rank=None, + shuffle=True, + drop_last=True, + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) + else: + batch_sampler = SortagradBatchSampler( + train_dataset, + shuffle=True, + batch_size=config.collator.batch_size, + drop_last=True, + sortagrad=config.collator.sortagrad, + shuffle_method=config.collator.shuffle_method) + self.train_loader = DataLoader( + train_dataset, + batch_sampler=batch_sampler, + collate_fn=collate_fn_train, + num_workers=config.collator.num_workers, ) + self.valid_loader = DataLoader( + dev_dataset, + batch_size=config.collator.batch_size, + shuffle=False, + drop_last=False, + collate_fn=collate_fn_dev) + + # test dataset, return raw text + config.data.manifest = config.data.test_manifest + # filter test examples, will cause less examples, but no mismatch with training + # and can use large batch size , save training time, so filter test egs now. + # config.data.min_input_len = 0.0 # second + # config.data.max_input_len = float('inf') # second + # config.data.min_output_len = 0.0 # tokens + # config.data.max_output_len = float('inf') # tokens + # config.data.min_output_input_ratio = 0.00 + # config.data.max_output_input_ratio = float('inf') + test_dataset = ManifestDataset.from_config(config) + # return text ord id + config.collator.keep_transcription_text = True + config.collator.augmentation_config = "" + self.test_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=TestCollator.from_config(config)) + # return text token id + config.collator.keep_transcription_text = False + self.align_loader = DataLoader( + test_dataset, + batch_size=config.decoding.batch_size, + shuffle=False, + drop_last=False, + collate_fn=TestCollator.from_config(config)) + logger.info("Setup train/valid/test/align Dataloader!") + + def setup_model(self): + config = self.config + model_conf = config.model + with UpdateConfig(model_conf): + model_conf.input_dim = self.train_loader.collate_fn.feature_size + model_conf.output_dim = self.train_loader.collate_fn.vocab_size + + model = U2STModel.from_config(model_conf) + + if self.parallel: + model = paddle.DataParallel(model) + + logger.info(f"{model}") + layer_tools.print_params(model, logger.info) + + train_config = config.training + optim_type = train_config.optim + optim_conf = train_config.optim_conf + scheduler_type = train_config.scheduler + scheduler_conf = train_config.scheduler_conf + + if scheduler_type == 'expdecaylr': + lr_scheduler = paddle.optimizer.lr.ExponentialDecay( + learning_rate=optim_conf.lr, + gamma=scheduler_conf.lr_decay, + verbose=False) + elif scheduler_type == 'warmuplr': + lr_scheduler = WarmupLR( + learning_rate=optim_conf.lr, + warmup_steps=scheduler_conf.warmup_steps, + verbose=False) + elif scheduler_type == 'noam': + lr_scheduler = paddle.optimizer.lr.NoamDecay( + learning_rate=optim_conf.lr, + d_model=model_conf.encoder_conf.output_size, + warmup_steps=scheduler_conf.warmup_steps, + verbose=False) + else: + raise ValueError(f"Not support scheduler: {scheduler_type}") + + grad_clip = ClipGradByGlobalNormWithLog(train_config.global_grad_clip) + weight_decay = paddle.regularizer.L2Decay(optim_conf.weight_decay) + if optim_type == 'adam': + optimizer = paddle.optimizer.Adam( + learning_rate=lr_scheduler, + parameters=model.parameters(), + weight_decay=weight_decay, + grad_clip=grad_clip) + else: + raise ValueError(f"Not support optim: {optim_type}") + + self.model = model + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + logger.info("Setup model/optimizer/lr_scheduler!") + + +class U2STTester(U2STTrainer): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # decoding config + default = CfgNode( + dict( + alpha=2.5, # Coef of LM for beam search. + beta=0.3, # Coef of WC for beam search. + cutoff_prob=1.0, # Cutoff probability for pruning. + cutoff_top_n=40, # Cutoff number for pruning. + lang_model_path='models/lm/common_crawl_00.prune01111.trie.klm', # Filepath for language model. + decoding_method='attention', # Decoding method. Options: 'attention', 'ctc_greedy_search', + # 'ctc_prefix_beam_search', 'attention_rescoring' + error_rate_type='bleu', # Error rate type for evaluation. Options `bleu`, 'char_bleu' + num_proc_bsearch=8, # # of CPUs for beam search. + beam_size=10, # Beam search width. + batch_size=16, # decoding batch size + ctc_weight=0.0, # ctc weight for attention rescoring decode mode. + decoding_chunk_size=-1, # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks=-1, # number of left chunks for decoding. Defaults to -1. + simulate_streaming=False, # simulate streaming inference. Defaults to False. + )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, config, args): + super().__init__(config, args) + + def ordid2token(self, texts, texts_len): + """ ord() id to chr() chr """ + trans = [] + for text, n in zip(texts, texts_len): + n = n.numpy().item() + ids = text[:n] + trans.append(''.join([chr(i) for i in ids])) + return trans + + def compute_translation_metrics(self, + utts, + audio, + audio_len, + texts, + texts_len, + bleu_func, + fout=None): + cfg = self.config.decoding + len_refs, num_ins = 0, 0 + + start_time = time.time() + text_feature = self.test_loader.collate_fn.text_feature + + refs = [ + "".join(chr(t) for t in text[:text_len]) + for text, text_len in zip(texts, texts_len) + ] + # from IPython import embed + # import os + # embed() + # os._exit(0) + hyps = self.model.decode( + audio, + audio_len, + text_feature=text_feature, + decoding_method=cfg.decoding_method, + lang_model_path=cfg.lang_model_path, + beam_alpha=cfg.alpha, + beam_beta=cfg.beta, + beam_size=cfg.beam_size, + cutoff_prob=cfg.cutoff_prob, + cutoff_top_n=cfg.cutoff_top_n, + num_processes=cfg.num_proc_bsearch, + ctc_weight=cfg.ctc_weight, + decoding_chunk_size=cfg.decoding_chunk_size, + num_decoding_left_chunks=cfg.num_decoding_left_chunks, + simulate_streaming=cfg.simulate_streaming) + decode_time = time.time() - start_time + + for utt, target, result in zip(utts, refs, hyps): + len_refs += len(target.split()) + num_ins += 1 + if fout: + fout.write(utt + " " + result + "\n") + logger.info("\nReference: %s\nHypothesis: %s" % (target, result)) + logger.info("One example BLEU = %s" % + (bleu_func([result], [[target]]).prec_str)) + + return dict( + hyps=hyps, + refs=refs, + bleu=bleu_func(hyps, [refs]).score, + len_refs=len_refs, + num_ins=num_ins, # num examples + num_frames=audio_len.sum().numpy().item(), + decode_time=decode_time) + + @mp_tools.rank_zero_only + @paddle.no_grad() + def test(self): + assert self.args.result_file + self.model.eval() + logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}") + + cfg = self.config.decoding + bleu_func = bleu_score.char_bleu if cfg.error_rate_type == 'char-bleu' else bleu_score.bleu + + stride_ms = self.test_loader.collate_fn.stride_ms + hyps, refs = [], [] + len_refs, num_ins = 0, 0 + num_frames = 0.0 + num_time = 0.0 + with open(self.args.result_file, 'w') as fout: + for i, batch in enumerate(self.test_loader): + metrics = self.compute_translation_metrics( + *batch, bleu_func=bleu_func, fout=fout) + hyps += metrics['hyps'] + refs += metrics['refs'] + bleu = metrics['bleu'] + num_frames += metrics['num_frames'] + num_time += metrics["decode_time"] + len_refs += metrics['len_refs'] + num_ins += metrics['num_ins'] + rtf = num_time / (num_frames * stride_ms) + logger.info("RTF: %f, BELU (%d) = %f" % (rtf, num_ins, bleu)) + + rtf = num_time / (num_frames * stride_ms) + msg = "Test: " + msg += "epoch: {}, ".format(self.epoch) + msg += "step: {}, ".format(self.iteration) + msg += "RTF: {}, ".format(rtf) + msg += "Test set [%s]: %s" % (len(hyps), str(bleu_func(hyps, [refs]))) + logger.info(msg) + bleu_meta_path = os.path.splitext(self.args.result_file)[0] + '.bleu' + err_type_str = "BLEU" + with open(bleu_meta_path, 'w') as f: + data = json.dumps({ + "epoch": + self.epoch, + "step": + self.iteration, + "rtf": + rtf, + err_type_str: + bleu_func(hyps, [refs]).score, + "dataset_hour": (num_frames * stride_ms) / 1000.0 / 3600.0, + "process_hour": + num_time / 1000.0 / 3600.0, + "num_examples": + num_ins, + "decode_method": + self.config.decoding.decoding_method, + }) + f.write(data + '\n') + + def run_test(self): + self.resume_or_scratch() + try: + self.test() + except KeyboardInterrupt: + sys.exit(-1) + + @paddle.no_grad() + def align(self): + if self.config.decoding.batch_size > 1: + logger.fatal('alignment mode must be running with batch_size == 1') + sys.exit(1) + + # xxx.align + assert self.args.result_file and self.args.result_file.endswith( + '.align') + + self.model.eval() + logger.info(f"Align Total Examples: {len(self.align_loader.dataset)}") + + stride_ms = self.align_loader.collate_fn.stride_ms + token_dict = self.align_loader.collate_fn.vocab_list + with open(self.args.result_file, 'w') as fout: + # one example in batch + for i, batch in enumerate(self.align_loader): + key, feat, feats_length, target, target_length = batch + + # 1. Encoder + encoder_out, encoder_mask = self.model._forward_encoder( + feat, feats_length) # (B, maxlen, encoder_dim) + maxlen = encoder_out.size(1) + ctc_probs = self.model.ctc.log_softmax( + encoder_out) # (1, maxlen, vocab_size) + + # 2. alignment + ctc_probs = ctc_probs.squeeze(0) + target = target.squeeze(0) + alignment = ctc_utils.forced_align(ctc_probs, target) + logger.info("align ids", key[0], alignment) + fout.write('{} {}\n'.format(key[0], alignment)) + + # 3. gen praat + # segment alignment + align_segs = text_grid.segment_alignment(alignment) + logger.info("align tokens", key[0], align_segs) + # IntervalTier, List["start end token\n"] + subsample = utility.get_subsample(self.config) + tierformat = text_grid.align_to_tierformat( + align_segs, subsample, token_dict) + # write tier + align_output_path = os.path.join( + os.path.dirname(self.args.result_file), "align") + tier_path = os.path.join(align_output_path, key[0] + ".tier") + with open(tier_path, 'w') as f: + f.writelines(tierformat) + # write textgrid + textgrid_path = os.path.join(align_output_path, + key[0] + ".TextGrid") + second_per_frame = 1. / (1000. / + stride_ms) # 25ms window, 10ms stride + second_per_example = ( + len(alignment) + 1) * subsample * second_per_frame + text_grid.generate_textgrid( + maxtime=second_per_example, + intervals=tierformat, + output=textgrid_path) + + def run_align(self): + self.resume_or_scratch() + try: + self.align() + except KeyboardInterrupt: + sys.exit(-1) + + def load_inferspec(self): + """infer model and input spec. + + Returns: + nn.Layer: inference model + List[paddle.static.InputSpec]: input spec. + """ + from deepspeech.models.u2 import U2InferModel + infer_model = U2InferModel.from_pretrained(self.test_loader, + self.config.model.clone(), + self.args.checkpoint_path) + feat_dim = self.test_loader.collate_fn.feature_size + input_spec = [ + paddle.static.InputSpec(shape=[1, None, feat_dim], + dtype='float32'), # audio, [B,T,D] + paddle.static.InputSpec(shape=[1], + dtype='int64'), # audio_length, [B] + ] + return infer_model, input_spec + + def export(self): + infer_model, input_spec = self.load_inferspec() + assert isinstance(input_spec, list), type(input_spec) + infer_model.eval() + static_model = paddle.jit.to_static(infer_model, input_spec=input_spec) + logger.info(f"Export code: {static_model.forward.code}") + paddle.jit.save(static_model, self.args.export_path) + + def run_export(self): + try: + self.export() + except KeyboardInterrupt: + sys.exit(-1) + + def setup(self): + """Setup the experiment. + """ + paddle.set_device(self.args.device) + + self.setup_output_dir() + self.setup_checkpointer() + + self.setup_dataloader() + self.setup_model() + + self.iteration = 0 + self.epoch = 0 + + def setup_output_dir(self): + """Create a directory used for output. + """ + # output dir + if self.args.output: + output_dir = Path(self.args.output).expanduser() + output_dir.mkdir(parents=True, exist_ok=True) + else: + output_dir = Path( + self.args.checkpoint_path).expanduser().parent.parent + output_dir.mkdir(parents=True, exist_ok=True) + + self.output_dir = output_dir diff --git a/deepspeech/frontend/augmentor/augmentation.py b/deepspeech/frontend/augmentor/augmentation.py index cc0564daf..17abcf605 100644 --- a/deepspeech/frontend/augmentor/augmentation.py +++ b/deepspeech/frontend/augmentor/augmentation.py @@ -13,18 +13,28 @@ # limitations under the License. """Contains the data augmentation pipeline.""" import json +from collections.abc import Sequence +from inspect import signature import numpy as np -from deepspeech.frontend.augmentor.impulse_response import ImpulseResponseAugmentor -from deepspeech.frontend.augmentor.noise_perturb import NoisePerturbAugmentor -from deepspeech.frontend.augmentor.online_bayesian_normalization import \ - OnlineBayesianNormalizationAugmentor -from deepspeech.frontend.augmentor.resample import ResampleAugmentor -from deepspeech.frontend.augmentor.shift_perturb import ShiftPerturbAugmentor -from deepspeech.frontend.augmentor.spec_augment import SpecAugmentor -from deepspeech.frontend.augmentor.speed_perturb import SpeedPerturbAugmentor -from deepspeech.frontend.augmentor.volume_perturb import VolumePerturbAugmentor +from deepspeech.frontend.augmentor.base import AugmentorBase +from deepspeech.utils.dynamic_import import dynamic_import +from deepspeech.utils.log import Log + +__all__ = ["AugmentationPipeline"] + +logger = Log(__name__).getlog() + +import_alias = dict( + volume="deepspeech.frontend.augmentor.impulse_response:VolumePerturbAugmentor", + shift="deepspeech.frontend.augmentor.shift_perturb:ShiftPerturbAugmentor", + speed="deepspeech.frontend.augmentor.speed_perturb:SpeedPerturbAugmentor", + resample="deepspeech.frontend.augmentor.resample:ResampleAugmentor", + bayesian_normal="deepspeech.frontend.augmentor.online_bayesian_normalization:OnlineBayesianNormalizationAugmentor", + noise="deepspeech.frontend.augmentor.noise_perturb:NoisePerturbAugmentor", + impulse="deepspeech.frontend.augmentor.impulse_response:ImpulseResponseAugmentor", + specaug="deepspeech.frontend.augmentor.spec_augment:SpecAugmentor", ) class AugmentationPipeline(): @@ -78,20 +88,74 @@ class AugmentationPipeline(): augmentor to take effect. If "prob" is zero, the augmentor does not take effect. - :param augmentation_config: Augmentation configuration in json string. - :type augmentation_config: str - :param random_seed: Random seed. - :type random_seed: int - :raises ValueError: If the augmentation json config is in incorrect format". + Params: + augmentation_config(str): Augmentation configuration in json string. + random_seed(int): Random seed. + train(bool): whether is train mode. + + Raises: + ValueError: If the augmentation json config is in incorrect format". """ - def __init__(self, augmentation_config: str, random_seed=0): + SPEC_TYPES = {'specaug'} + + def __init__(self, augmentation_config: str, random_seed: int=0): self._rng = np.random.RandomState(random_seed) - self._spec_types = ('specaug') - self._augmentors, self._rates = self._parse_pipeline_from( - augmentation_config, 'audio') + self.conf = {'mode': 'sequential', 'process': []} + if augmentation_config: + process = json.loads(augmentation_config) + self.conf['process'] += process + + self._augmentors, self._rates = self._parse_pipeline_from('all') + self._audio_augmentors, self._audio_rates = self._parse_pipeline_from( + 'audio') self._spec_augmentors, self._spec_rates = self._parse_pipeline_from( - augmentation_config, 'feature') + 'feature') + + def __call__(self, xs, uttid_list=None, **kwargs): + if not isinstance(xs, Sequence): + is_batch = False + xs = [xs] + else: + is_batch = True + + if isinstance(uttid_list, str): + uttid_list = [uttid_list for _ in range(len(xs))] + + if self.conf.get("mode", "sequential") == "sequential": + for idx, (func, rate) in enumerate( + zip(self._augmentors, self._rates), 0): + if self._rng.uniform(0., 1.) >= rate: + continue + + # Derive only the args which the func has + try: + param = signature(func).parameters + except ValueError: + # Some function, e.g. built-in function, are failed + param = {} + _kwargs = {k: v for k, v in kwargs.items() if k in param} + + try: + if uttid_list is not None and "uttid" in param: + xs = [ + func(x, u, **_kwargs) + for x, u in zip(xs, uttid_list) + ] + else: + xs = [func(x, **_kwargs) for x in xs] + except Exception: + logger.fatal("Catch a exception from {}th func: {}".format( + idx, func)) + raise + else: + raise NotImplementedError( + "Not supporting mode={}".format(self.conf["mode"])) + + if is_batch: + return xs + else: + return xs[0] def transform_audio(self, audio_segment): """Run the pre-processing pipeline for data augmentation. @@ -101,7 +165,7 @@ class AugmentationPipeline(): :param audio_segment: Audio segment to process. :type audio_segment: AudioSegmenet|SpeechSegment """ - for augmentor, rate in zip(self._augmentors, self._rates): + for augmentor, rate in zip(self._audio_augmentors, self._audio_rates): if self._rng.uniform(0., 1.) < rate: augmentor.transform_audio(audio_segment) @@ -116,52 +180,39 @@ class AugmentationPipeline(): spec_segment = augmentor.transform_feature(spec_segment) return spec_segment - def _parse_pipeline_from(self, config_json, aug_type='audio'): + def _parse_pipeline_from(self, aug_type='all'): """Parse the config json to build a augmentation pipelien.""" - assert aug_type in ('audio', 'feature'), aug_type - try: - configs = json.loads(config_json) - audio_confs = [] - feature_confs = [] - for config in configs: - if config["type"] in self._spec_types: - feature_confs.append(config) - else: - audio_confs.append(config) - - if aug_type == 'audio': - aug_confs = audio_confs - elif aug_type == 'feature': - aug_confs = feature_confs - - augmentors = [ - self._get_augmentor(config["type"], config["params"]) - for config in aug_confs - ] - rates = [config["prob"] for config in aug_confs] - - except Exception as e: - raise ValueError("Failed to parse the augmentation config json: " - "%s" % str(e)) + assert aug_type in ('audio', 'feature', 'all'), aug_type + audio_confs = [] + feature_confs = [] + all_confs = [] + for config in self.conf['process']: + all_confs.append(config) + if config["type"] in self.SPEC_TYPES: + feature_confs.append(config) + else: + audio_confs.append(config) + + if aug_type == 'audio': + aug_confs = audio_confs + elif aug_type == 'feature': + aug_confs = feature_confs + else: + aug_confs = all_confs + + augmentors = [ + self._get_augmentor(config["type"], config["params"]) + for config in aug_confs + ] + rates = [config["prob"] for config in aug_confs] return augmentors, rates def _get_augmentor(self, augmentor_type, params): """Return an augmentation model by the type name, and pass in params.""" - if augmentor_type == "volume": - return VolumePerturbAugmentor(self._rng, **params) - elif augmentor_type == "shift": - return ShiftPerturbAugmentor(self._rng, **params) - elif augmentor_type == "speed": - return SpeedPerturbAugmentor(self._rng, **params) - elif augmentor_type == "resample": - return ResampleAugmentor(self._rng, **params) - elif augmentor_type == "bayesian_normal": - return OnlineBayesianNormalizationAugmentor(self._rng, **params) - elif augmentor_type == "noise": - return NoisePerturbAugmentor(self._rng, **params) - elif augmentor_type == "impulse": - return ImpulseResponseAugmentor(self._rng, **params) - elif augmentor_type == "specaug": - return SpecAugmentor(self._rng, **params) - else: + class_obj = dynamic_import(augmentor_type, import_alias) + assert issubclass(class_obj, AugmentorBase) + try: + obj = class_obj(self._rng, **params) + except Exception: raise ValueError("Unknown augmentor type [%s]." % augmentor_type) + return obj diff --git a/deepspeech/frontend/augmentor/base.py b/deepspeech/frontend/augmentor/base.py index e6f5c1e9f..18d003c0b 100644 --- a/deepspeech/frontend/augmentor/base.py +++ b/deepspeech/frontend/augmentor/base.py @@ -28,6 +28,10 @@ class AugmentorBase(): def __init__(self): pass + @abstractmethod + def __call__(self, xs): + raise NotImplementedError("AugmentorBase: Not impl __call__") + @abstractmethod def transform_audio(self, audio_segment): """Adds various effects to the input audio segment. Such effects @@ -40,7 +44,7 @@ class AugmentorBase(): :param audio_segment: Audio segment to add effects to. :type audio_segment: AudioSegmenet|SpeechSegment """ - raise NotImplementedError + raise NotImplementedError("AugmentorBase: Not impl transform_audio") @abstractmethod def transform_feature(self, spec_segment): @@ -52,4 +56,4 @@ class AugmentorBase(): Args: spec_segment (Spectrogram): Spectrogram segment to add effects to. """ - raise NotImplementedError + raise NotImplementedError("AugmentorBase: Not impl transform_feature") diff --git a/deepspeech/frontend/augmentor/impulse_response.py b/deepspeech/frontend/augmentor/impulse_response.py index fbd617b42..818251ed8 100644 --- a/deepspeech/frontend/augmentor/impulse_response.py +++ b/deepspeech/frontend/augmentor/impulse_response.py @@ -30,6 +30,12 @@ class ImpulseResponseAugmentor(AugmentorBase): self._rng = rng self._impulse_manifest = read_manifest(impulse_manifest_path) + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Add impulse response effect. diff --git a/deepspeech/frontend/augmentor/noise_perturb.py b/deepspeech/frontend/augmentor/noise_perturb.py index b3c07f5c1..790b0c396 100644 --- a/deepspeech/frontend/augmentor/noise_perturb.py +++ b/deepspeech/frontend/augmentor/noise_perturb.py @@ -36,6 +36,12 @@ class NoisePerturbAugmentor(AugmentorBase): self._rng = rng self._noise_manifest = read_manifest(manifest_path=noise_manifest_path) + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Add background noise audio. diff --git a/deepspeech/frontend/augmentor/online_bayesian_normalization.py b/deepspeech/frontend/augmentor/online_bayesian_normalization.py index 5af3b9b03..0f9d3ef6f 100644 --- a/deepspeech/frontend/augmentor/online_bayesian_normalization.py +++ b/deepspeech/frontend/augmentor/online_bayesian_normalization.py @@ -44,6 +44,12 @@ class OnlineBayesianNormalizationAugmentor(AugmentorBase): self._rng = rng self._startup_delay = startup_delay + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Normalizes the input audio using the online Bayesian approach. diff --git a/deepspeech/frontend/augmentor/resample.py b/deepspeech/frontend/augmentor/resample.py index 9afce635d..509fe003d 100644 --- a/deepspeech/frontend/augmentor/resample.py +++ b/deepspeech/frontend/augmentor/resample.py @@ -31,6 +31,12 @@ class ResampleAugmentor(AugmentorBase): self._new_sample_rate = new_sample_rate self._rng = rng + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Resamples the input audio to a target sample rate. diff --git a/deepspeech/frontend/augmentor/shift_perturb.py b/deepspeech/frontend/augmentor/shift_perturb.py index 9cc3fe2d0..8b7439fe5 100644 --- a/deepspeech/frontend/augmentor/shift_perturb.py +++ b/deepspeech/frontend/augmentor/shift_perturb.py @@ -31,6 +31,12 @@ class ShiftPerturbAugmentor(AugmentorBase): self._max_shift_ms = max_shift_ms self._rng = rng + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Shift audio. diff --git a/deepspeech/frontend/augmentor/spec_augment.py b/deepspeech/frontend/augmentor/spec_augment.py index 1c2e09fc7..26c94d416 100644 --- a/deepspeech/frontend/augmentor/spec_augment.py +++ b/deepspeech/frontend/augmentor/spec_augment.py @@ -12,7 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. """Contains the volume perturb augmentation model.""" +import random + import numpy as np +from PIL import Image +from PIL.Image import BICUBIC from deepspeech.frontend.augmentor.base import AugmentorBase from deepspeech.utils.log import Log @@ -41,7 +45,9 @@ class SpecAugmentor(AugmentorBase): W=40, adaptive_number_ratio=0, adaptive_size_ratio=0, - max_n_time_masks=20): + max_n_time_masks=20, + replace_with_zero=True, + warp_mode='PIL'): """SpecAugment class. Args: rng (random.Random): random generator object. @@ -54,17 +60,22 @@ class SpecAugmentor(AugmentorBase): adaptive_number_ratio (float): adaptive multiplicity ratio for time masking adaptive_size_ratio (float): adaptive size ratio for time masking max_n_time_masks (int): maximum number of time masking + replace_with_zero (bool): pad zero on mask if true else use mean + warp_mode (str): "PIL" (default, fast, not differentiable) + or "sparse_image_warp" (slow, differentiable) """ super().__init__() self._rng = rng + self.inplace = True + self.replace_with_zero = replace_with_zero + self.mode = warp_mode self.W = W self.F = F self.T = T self.n_freq_masks = n_freq_masks self.n_time_masks = n_time_masks self.p = p - #logger.info(f"specaug: F-{F}, T-{T}, F-n-{n_freq_masks}, T-n-{n_time_masks}") # adaptive SpecAugment self.adaptive_number_ratio = adaptive_number_ratio @@ -121,21 +132,86 @@ class SpecAugmentor(AugmentorBase): def time_mask(self): return self._time_mask - def time_warp(xs, W=40): - raise NotImplementedError + def __repr__(self): + return f"specaug: F-{F}, T-{T}, F-n-{n_freq_masks}, T-n-{n_time_masks}" + + def time_warp(self, x, mode='PIL'): + """time warp for spec augment + move random center frame by the random width ~ uniform(-window, window) + + Args: + x (np.ndarray): spectrogram (time, freq) + mode (str): PIL or sparse_image_warp + + Raises: + NotImplementedError: [description] + NotImplementedError: [description] + + Returns: + np.ndarray: time warped spectrogram (time, freq) + """ + window = max_time_warp = self.W + if window == 0: + return x + + if mode == "PIL": + t = x.shape[0] + if t - window <= window: + return x + # NOTE: randrange(a, b) emits a, a + 1, ..., b - 1 + center = random.randrange(window, t - window) + warped = random.randrange(center - window, center + + window) + 1 # 1 ... t - 1 + + left = Image.fromarray(x[:center]).resize((x.shape[1], warped), + BICUBIC) + right = Image.fromarray(x[center:]).resize((x.shape[1], t - warped), + BICUBIC) + if self.inplace: + x[:warped] = left + x[warped:] = right + return x + return np.concatenate((left, right), 0) + elif mode == "sparse_image_warp": + raise NotImplementedError('sparse_image_warp') + else: + raise NotImplementedError( + "unknown resize mode: " + mode + + ", choose one from (PIL, sparse_image_warp).") + + def mask_freq(self, x, replace_with_zero=False): + """freq mask + + Args: + x (np.ndarray): spectrogram (time, freq) + replace_with_zero (bool, optional): Defaults to False. - def mask_freq(self, xs, replace_with_zero=False): - n_bins = xs.shape[0] + Returns: + np.ndarray: freq mask spectrogram (time, freq) + """ + n_bins = x.shape[1] for i in range(0, self.n_freq_masks): f = int(self._rng.uniform(low=0, high=self.F)) f_0 = int(self._rng.uniform(low=0, high=n_bins - f)) - xs[f_0:f_0 + f, :] = 0 assert f_0 <= f_0 + f + if replace_with_zero: + x[:, f_0:f_0 + f] = 0 + else: + x[:, f_0:f_0 + f] = x.mean() self._freq_mask = (f_0, f_0 + f) - return xs + return x - def mask_time(self, xs, replace_with_zero=False): - n_frames = xs.shape[1] + def mask_time(self, x, replace_with_zero=False): + """time mask + + Args: + x (np.ndarray): spectrogram (time, freq) + replace_with_zero (bool, optional): Defaults to False. + + Returns: + np.ndarray: time mask spectrogram (time, freq) + """ + n_frames = x.shape[0] if self.adaptive_number_ratio > 0: n_masks = int(n_frames * self.adaptive_number_ratio) @@ -152,19 +228,29 @@ class SpecAugmentor(AugmentorBase): t = int(self._rng.uniform(low=0, high=T)) t = min(t, int(n_frames * self.p)) t_0 = int(self._rng.uniform(low=0, high=n_frames - t)) - xs[:, t_0:t_0 + t] = 0 assert t_0 <= t_0 + t + if replace_with_zero: + x[t_0:t_0 + t, :] = 0 + else: + x[t_0:t_0 + t, :] = x.mean() self._time_mask = (t_0, t_0 + t) - return xs + return x + + def __call__(self, x, train=True): + if not train: + return x + return self.transform_feature(x) - def transform_feature(self, xs: np.ndarray): + def transform_feature(self, x: np.ndarray): """ Args: - xs (FloatTensor): `[F, T]` + x (np.ndarray): `[T, F]` Returns: - xs (FloatTensor): `[F, T]` + x (np.ndarray): `[T, F]` """ - # xs = self.time_warp(xs) - xs = self.mask_freq(xs) - xs = self.mask_time(xs) - return xs + assert isinstance(x, np.ndarray) + assert x.ndim == 2 + x = self.time_warp(x, self.mode) + x = self.mask_freq(x, self.replace_with_zero) + x = self.mask_time(x, self.replace_with_zero) + return x diff --git a/deepspeech/frontend/augmentor/speed_perturb.py b/deepspeech/frontend/augmentor/speed_perturb.py index d0977c131..ce8dfde0a 100644 --- a/deepspeech/frontend/augmentor/speed_perturb.py +++ b/deepspeech/frontend/augmentor/speed_perturb.py @@ -79,6 +79,12 @@ class SpeedPerturbAugmentor(AugmentorBase): self._rates = np.linspace( self._min_rate, self._max_rate, self._num_rates, endpoint=True) + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Sample a new speed rate from the given range and changes the speed of the given audio clip. diff --git a/deepspeech/frontend/augmentor/volume_perturb.py b/deepspeech/frontend/augmentor/volume_perturb.py index 0d76e7a05..70cb28897 100644 --- a/deepspeech/frontend/augmentor/volume_perturb.py +++ b/deepspeech/frontend/augmentor/volume_perturb.py @@ -37,6 +37,12 @@ class VolumePerturbAugmentor(AugmentorBase): self._max_gain_dBFS = max_gain_dBFS self._rng = rng + def __call__(self, x, uttid=None, train=True): + if not train: + return x + self.transform_audio(x) + return x + def transform_audio(self, audio_segment): """Change audio loadness. diff --git a/deepspeech/frontend/featurizer/__init__.py b/deepspeech/frontend/featurizer/__init__.py index 185a92b8d..6992700d9 100644 --- a/deepspeech/frontend/featurizer/__init__.py +++ b/deepspeech/frontend/featurizer/__init__.py @@ -11,3 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from .audio_featurizer import AudioFeaturizer #noqa: F401 +from .speech_featurizer import SpeechFeaturizer +from .text_featurizer import TextFeaturizer diff --git a/deepspeech/frontend/featurizer/audio_featurizer.py b/deepspeech/frontend/featurizer/audio_featurizer.py index 11c1fa2d4..4c40c8472 100644 --- a/deepspeech/frontend/featurizer/audio_featurizer.py +++ b/deepspeech/frontend/featurizer/audio_featurizer.py @@ -18,7 +18,7 @@ from python_speech_features import logfbank from python_speech_features import mfcc -class AudioFeaturizer(object): +class AudioFeaturizer(): """Audio featurizer, for extracting features from audio contents of AudioSegment or SpeechSegment. @@ -167,32 +167,6 @@ class AudioFeaturizer(object): raise ValueError("Unknown specgram_type %s. " "Supported values: linear." % self._specgram_type) - def _compute_linear_specgram(self, - samples, - sample_rate, - stride_ms=10.0, - window_ms=20.0, - max_freq=None, - eps=1e-14): - """Compute the linear spectrogram from FFT energy.""" - if max_freq is None: - max_freq = sample_rate / 2 - if max_freq > sample_rate / 2: - raise ValueError("max_freq must not be greater than half of " - "sample rate.") - if stride_ms > window_ms: - raise ValueError("Stride size must not be greater than " - "window size.") - stride_size = int(0.001 * sample_rate * stride_ms) - window_size = int(0.001 * sample_rate * window_ms) - specgram, freqs = self._specgram_real( - samples, - window_size=window_size, - stride_size=stride_size, - sample_rate=sample_rate) - ind = np.where(freqs <= max_freq)[0][-1] + 1 - return np.log(specgram[:ind, :] + eps) - def _specgram_real(self, samples, window_size, stride_size, sample_rate): """Compute the spectrogram for samples from a real signal.""" # extract strided windows @@ -217,26 +191,65 @@ class AudioFeaturizer(object): freqs = float(sample_rate) / window_size * np.arange(fft.shape[0]) return fft, freqs + def _compute_linear_specgram(self, + samples, + sample_rate, + stride_ms=10.0, + window_ms=20.0, + max_freq=None, + eps=1e-14): + """Compute the linear spectrogram from FFT energy. + + Args: + samples ([type]): [description] + sample_rate ([type]): [description] + stride_ms (float, optional): [description]. Defaults to 10.0. + window_ms (float, optional): [description]. Defaults to 20.0. + max_freq ([type], optional): [description]. Defaults to None. + eps ([type], optional): [description]. Defaults to 1e-14. + + Raises: + ValueError: [description] + ValueError: [description] + + Returns: + np.ndarray: log spectrogram, (time, freq) + """ + if max_freq is None: + max_freq = sample_rate / 2 + if max_freq > sample_rate / 2: + raise ValueError("max_freq must not be greater than half of " + "sample rate.") + if stride_ms > window_ms: + raise ValueError("Stride size must not be greater than " + "window size.") + stride_size = int(0.001 * sample_rate * stride_ms) + window_size = int(0.001 * sample_rate * window_ms) + specgram, freqs = self._specgram_real( + samples, + window_size=window_size, + stride_size=stride_size, + sample_rate=sample_rate) + ind = np.where(freqs <= max_freq)[0][-1] + 1 + # (freq, time) + spec = np.log(specgram[:ind, :] + eps) + return np.transpose(spec) + def _concat_delta_delta(self, feat): """append delat, delta-delta feature. Args: - feat (np.ndarray): (D, T) + feat (np.ndarray): (T, D) Returns: - np.ndarray: feat with delta-delta, (3*D, T) + np.ndarray: feat with delta-delta, (T, 3*D) """ - feat = np.transpose(feat) # Deltas d_feat = delta(feat, 2) # Deltas-Deltas dd_feat = delta(feat, 2) - # transpose - feat = np.transpose(feat) - d_feat = np.transpose(d_feat) - dd_feat = np.transpose(dd_feat) # concat above three features - concat_feat = np.concatenate((feat, d_feat, dd_feat)) + concat_feat = np.concatenate((feat, d_feat, dd_feat), axis=1) return concat_feat def _compute_mfcc(self, @@ -292,7 +305,6 @@ class AudioFeaturizer(object): ceplifter=22, useEnergy=True, winfunc='povey') - mfcc_feat = np.transpose(mfcc_feat) if delta_delta: mfcc_feat = self._concat_delta_delta(mfcc_feat) return mfcc_feat @@ -346,8 +358,6 @@ class AudioFeaturizer(object): remove_dc_offset=True, preemph=0.97, wintype='povey') - - fbank_feat = np.transpose(fbank_feat) if delta_delta: fbank_feat = self._concat_delta_delta(fbank_feat) return fbank_feat diff --git a/deepspeech/frontend/featurizer/speech_featurizer.py b/deepspeech/frontend/featurizer/speech_featurizer.py index e6761cb52..5082850d6 100644 --- a/deepspeech/frontend/featurizer/speech_featurizer.py +++ b/deepspeech/frontend/featurizer/speech_featurizer.py @@ -16,7 +16,7 @@ from deepspeech.frontend.featurizer.audio_featurizer import AudioFeaturizer from deepspeech.frontend.featurizer.text_featurizer import TextFeaturizer -class SpeechFeaturizer(object): +class SpeechFeaturizer(): """Speech featurizer, for extracting features from both audio and transcript contents of SpeechSegment. @@ -107,7 +107,6 @@ class SpeechFeaturizer(object): @property def vocab_size(self): """Return the vocabulary size. - Returns: int: Vocabulary size. """ @@ -116,7 +115,6 @@ class SpeechFeaturizer(object): @property def vocab_list(self): """Return the vocabulary in list. - Returns: List[str]: """ @@ -125,7 +123,6 @@ class SpeechFeaturizer(object): @property def vocab_dict(self): """Return the vocabulary in dict. - Returns: Dict[str, int]: """ @@ -134,7 +131,6 @@ class SpeechFeaturizer(object): @property def feature_size(self): """Return the audio feature size. - Returns: int: audio feature size. """ @@ -143,7 +139,6 @@ class SpeechFeaturizer(object): @property def stride_ms(self): """time length in `ms` unit per frame - Returns: float: time(ms)/frame """ @@ -152,7 +147,6 @@ class SpeechFeaturizer(object): @property def text_feature(self): """Return the text feature object. - Returns: TextFeaturizer: object. """ diff --git a/deepspeech/frontend/featurizer/text_featurizer.py b/deepspeech/frontend/featurizer/text_featurizer.py index 1ba6ac7f9..e4364f70a 100644 --- a/deepspeech/frontend/featurizer/text_featurizer.py +++ b/deepspeech/frontend/featurizer/text_featurizer.py @@ -14,12 +14,19 @@ """Contains the text featurizer class.""" import sentencepiece as spm -from deepspeech.frontend.utility import EOS -from deepspeech.frontend.utility import UNK +from ..utility import EOS +from ..utility import load_dict +from ..utility import UNK +__all__ = ["TextFeaturizer"] -class TextFeaturizer(object): - def __init__(self, unit_type, vocab_filepath, spm_model_prefix=None): + +class TextFeaturizer(): + def __init__(self, + unit_type, + vocab_filepath, + spm_model_prefix=None, + maskctc=False): """Text featurizer, for processing or extracting features from text. Currently, it supports char/word/sentence-piece level tokenizing and conversion into @@ -34,11 +41,12 @@ class TextFeaturizer(object): assert unit_type in ('char', 'spm', 'word') self.unit_type = unit_type self.unk = UNK + self.maskctc = maskctc + if vocab_filepath: - self._vocab_dict, self._id2token, self._vocab_list = self._load_vocabulary_from_file( - vocab_filepath) - self.unk_id = self._vocab_list.index(self.unk) - self.eos_id = self._vocab_list.index(EOS) + self.vocab_dict, self._id2token, self.vocab_list, self.unk_id, self.eos_id = self._load_vocabulary_from_file( + vocab_filepath, maskctc) + self.vocab_size = len(self.vocab_list) if unit_type == 'spm': spm_model = spm_model_prefix + '.model' @@ -67,7 +75,7 @@ class TextFeaturizer(object): """Convert text string to a list of token indices. Args: - text (str): Text to process. + text (str): Text. Returns: List[int]: List of token indices. @@ -75,8 +83,8 @@ class TextFeaturizer(object): tokens = self.tokenize(text) ids = [] for token in tokens: - token = token if token in self._vocab_dict else self.unk - ids.append(self._vocab_dict[token]) + token = token if token in self.vocab_dict else self.unk + ids.append(self.vocab_dict[token]) return ids def defeaturize(self, idxs): @@ -87,7 +95,7 @@ class TextFeaturizer(object): idxs (List[int]): List of token indices. Returns: - str: Text to process. + str: Text. """ tokens = [] for idx in idxs: @@ -97,33 +105,6 @@ class TextFeaturizer(object): text = self.detokenize(tokens) return text - @property - def vocab_size(self): - """Return the vocabulary size. - - :return: Vocabulary size. - :rtype: int - """ - return len(self._vocab_list) - - @property - def vocab_list(self): - """Return the vocabulary in list. - - Returns: - List[str]: tokens. - """ - return self._vocab_list - - @property - def vocab_dict(self): - """Return the vocabulary in dict. - - Returns: - Dict[str, int]: token str -> int - """ - return self._vocab_dict - def char_tokenize(self, text): """Character tokenizer. @@ -206,14 +187,16 @@ class TextFeaturizer(object): return decode(tokens) - def _load_vocabulary_from_file(self, vocab_filepath): + def _load_vocabulary_from_file(self, vocab_filepath: str, maskctc: bool): """Load vocabulary from file.""" - vocab_lines = [] - with open(vocab_filepath, 'r', encoding='utf-8') as file: - vocab_lines.extend(file.readlines()) - vocab_list = [line[:-1] for line in vocab_lines] + vocab_list = load_dict(vocab_filepath, maskctc) + assert vocab_list is not None + id2token = dict( [(idx, token) for (idx, token) in enumerate(vocab_list)]) token2id = dict( [(token, idx) for (idx, token) in enumerate(vocab_list)]) - return token2id, id2token, vocab_list + + unk_id = vocab_list.index(UNK) + eos_id = vocab_list.index(EOS) + return token2id, id2token, vocab_list, unk_id, eos_id diff --git a/deepspeech/frontend/normalizer.py b/deepspeech/frontend/normalizer.py index 287b51e58..73b3a4ba6 100644 --- a/deepspeech/frontend/normalizer.py +++ b/deepspeech/frontend/normalizer.py @@ -40,21 +40,21 @@ class CollateFunc(object): number = 0 for item in batch: audioseg = AudioSegment.from_file(item['feat']) - feat = self.feature_func(audioseg) #(D, T) + feat = self.feature_func(audioseg) #(T, D) - sums = np.sum(feat, axis=1) + sums = np.sum(feat, axis=0) if mean_stat is None: mean_stat = sums else: mean_stat += sums - square_sums = np.sum(np.square(feat), axis=1) + square_sums = np.sum(np.square(feat), axis=0) if var_stat is None: var_stat = square_sums else: var_stat += square_sums - number += feat.shape[1] + number += feat.shape[0] return number, mean_stat, var_stat @@ -120,7 +120,7 @@ class FeatureNormalizer(object): """Normalize features to be of zero mean and unit stddev. :param features: Input features to be normalized. - :type features: ndarray, shape (D, T) + :type features: ndarray, shape (T, D) :param eps: added to stddev to provide numerical stablibity. :type eps: float :return: Normalized features. @@ -131,8 +131,8 @@ class FeatureNormalizer(object): def _read_mean_std_from_file(self, filepath, eps=1e-20): """Load mean and std from file.""" mean, istd = load_cmvn(filepath, filetype='json') - self._mean = np.expand_dims(mean, axis=-1) - self._istd = np.expand_dims(istd, axis=-1) + self._mean = np.expand_dims(mean, axis=0) + self._istd = np.expand_dims(istd, axis=0) def write_to_file(self, filepath): """Write the mean and stddev to the file. diff --git a/deepspeech/frontend/utility.py b/deepspeech/frontend/utility.py index b2dd9601f..72dfc98dd 100644 --- a/deepspeech/frontend/utility.py +++ b/deepspeech/frontend/utility.py @@ -15,6 +15,9 @@ import codecs import json import math +from typing import List +from typing import Optional +from typing import Text import numpy as np @@ -23,16 +26,35 @@ from deepspeech.utils.log import Log logger = Log(__name__).getlog() __all__ = [ - "load_cmvn", "read_manifest", "rms_to_db", "rms_to_dbfs", "max_dbfs", - "mean_dbfs", "gain_db_to_ratio", "normalize_audio", "SOS", "EOS", "UNK", - "BLANK" + "load_dict", "load_cmvn", "read_manifest", "rms_to_db", "rms_to_dbfs", + "max_dbfs", "mean_dbfs", "gain_db_to_ratio", "normalize_audio", "SOS", + "EOS", "UNK", "BLANK", "MASKCTC" ] IGNORE_ID = -1 -SOS = "" +# `sos` and `eos` using same token +SOS = "" EOS = SOS UNK = "" BLANK = "" +MASKCTC = "" + + +def load_dict(dict_path: Optional[Text], maskctc=False) -> Optional[List[Text]]: + if dict_path is None: + return None + + with open(dict_path, "r") as f: + dictionary = f.readlines() + char_list = [entry.strip().split(" ")[0] for entry in dictionary] + if BLANK not in char_list: + char_list.insert(0, BLANK) + if EOS not in char_list: + char_list.append(EOS) + # for non-autoregressive maskctc model + if maskctc and MASKCTC not in char_list: + char_list.append(MASKCTC) + return char_list def read_manifest( @@ -47,12 +69,20 @@ def read_manifest( Args: manifest_path ([type]): Manifest file to load and parse. - max_input_len ([type], optional): maximum output seq length, in seconds for raw wav, in frame numbers for feature data. Defaults to float('inf'). - min_input_len (float, optional): minimum input seq length, in seconds for raw wav, in frame numbers for feature data. Defaults to 0.0. - max_output_len (float, optional): maximum input seq length, in modeling units. Defaults to 500.0. - min_output_len (float, optional): minimum input seq length, in modeling units. Defaults to 0.0. - max_output_input_ratio (float, optional): maximum output seq length/output seq length ratio. Defaults to 10.0. - min_output_input_ratio (float, optional): minimum output seq length/output seq length ratio. Defaults to 0.05. + max_input_len ([type], optional): maximum output seq length, + in seconds for raw wav, in frame numbers for feature data. + Defaults to float('inf'). + min_input_len (float, optional): minimum input seq length, + in seconds for raw wav, in frame numbers for feature data. + Defaults to 0.0. + max_output_len (float, optional): maximum input seq length, + in modeling units. Defaults to 500.0. + min_output_len (float, optional): minimum input seq length, + in modeling units. Defaults to 0.0. + max_output_input_ratio (float, optional): + maximum output seq length/output seq length ratio. Defaults to 10.0. + min_output_input_ratio (float, optional): + minimum output seq length/output seq length ratio. Defaults to 0.05. Raises: IOError: If failed to parse the manifest. diff --git a/deepspeech/io/__init__.py b/deepspeech/io/__init__.py index e180f18ee..185a92b8d 100644 --- a/deepspeech/io/__init__.py +++ b/deepspeech/io/__init__.py @@ -11,139 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np -from paddle.io import DataLoader - -from deepspeech.io.collator import SpeechCollator -from deepspeech.io.dataset import ManifestDataset -from deepspeech.io.sampler import SortagradBatchSampler -from deepspeech.io.sampler import SortagradDistributedBatchSampler - - -def create_dataloader(manifest_path, - unit_type, - vocab_filepath, - mean_std_filepath, - spm_model_prefix, - augmentation_config='{}', - max_input_len=float('inf'), - min_input_len=0.0, - max_output_len=float('inf'), - min_output_len=0.0, - max_output_input_ratio=float('inf'), - min_output_input_ratio=0.0, - stride_ms=10.0, - window_ms=20.0, - max_freq=None, - specgram_type='linear', - feat_dim=None, - delta_delta=False, - use_dB_normalization=True, - random_seed=0, - keep_transcription_text=False, - is_training=False, - batch_size=1, - num_workers=0, - sortagrad=False, - shuffle_method=None, - dist=False): - - dataset = ManifestDataset( - manifest_path=manifest_path, - unit_type=unit_type, - vocab_filepath=vocab_filepath, - mean_std_filepath=mean_std_filepath, - spm_model_prefix=spm_model_prefix, - augmentation_config=augmentation_config, - max_input_len=max_input_len, - min_input_len=min_input_len, - max_output_len=max_output_len, - min_output_len=min_output_len, - max_output_input_ratio=max_output_input_ratio, - min_output_input_ratio=min_output_input_ratio, - stride_ms=stride_ms, - window_ms=window_ms, - max_freq=max_freq, - specgram_type=specgram_type, - feat_dim=feat_dim, - delta_delta=delta_delta, - use_dB_normalization=use_dB_normalization, - random_seed=random_seed, - keep_transcription_text=keep_transcription_text) - - if dist: - batch_sampler = SortagradDistributedBatchSampler( - dataset, - batch_size, - num_replicas=None, - rank=None, - shuffle=is_training, - drop_last=is_training, - sortagrad=is_training, - shuffle_method=shuffle_method) - else: - batch_sampler = SortagradBatchSampler( - dataset, - shuffle=is_training, - batch_size=batch_size, - drop_last=is_training, - sortagrad=is_training, - shuffle_method=shuffle_method) - - def padding_batch(batch, - padding_to=-1, - flatten=False, - keep_transcription_text=True): - """ - Padding audio features with zeros to make them have the same shape (or - a user-defined shape) within one bach. - - If ``padding_to`` is -1, the maximun shape in the batch will be used - as the target shape for padding. Otherwise, `padding_to` will be the - target shape (only refers to the second axis). - - If `flatten` is True, features will be flatten to 1darray. - """ - new_batch = [] - # get target shape - max_length = max([audio.shape[1] for audio, text in batch]) - if padding_to != -1: - if padding_to < max_length: - raise ValueError("If padding_to is not -1, it should be larger " - "than any instance's shape in the batch") - max_length = padding_to - max_text_length = max([len(text) for audio, text in batch]) - # padding - padded_audios = [] - audio_lens = [] - texts, text_lens = [], [] - for audio, text in batch: - padded_audio = np.zeros([audio.shape[0], max_length]) - padded_audio[:, :audio.shape[1]] = audio - if flatten: - padded_audio = padded_audio.flatten() - padded_audios.append(padded_audio) - audio_lens.append(audio.shape[1]) - - padded_text = np.zeros([max_text_length]) - if keep_transcription_text: - padded_text[:len(text)] = [ord(t) for t in text] # string - else: - padded_text[:len(text)] = text # ids - texts.append(padded_text) - text_lens.append(len(text)) - - padded_audios = np.array(padded_audios).astype('float32') - audio_lens = np.array(audio_lens).astype('int64') - texts = np.array(texts).astype('int32') - text_lens = np.array(text_lens).astype('int64') - return padded_audios, audio_lens, texts, text_lens - - # collate_fn=functools.partial(padding_batch, keep_transcription_text=keep_transcription_text), - collate_fn = SpeechCollator(keep_transcription_text=keep_transcription_text) - loader = DataLoader( - dataset, - batch_sampler=batch_sampler, - collate_fn=collate_fn, - num_workers=num_workers) - return loader diff --git a/deepspeech/io/batchfy.py b/deepspeech/io/batchfy.py new file mode 100644 index 000000000..de29d0546 --- /dev/null +++ b/deepspeech/io/batchfy.py @@ -0,0 +1,469 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import itertools + +import numpy as np + +from deepspeech.utils.log import Log + +__all__ = ["make_batchset"] + +logger = Log(__name__).getlog() + + +def batchfy_by_seq( + sorted_data, + batch_size, + max_length_in, + max_length_out, + min_batch_size=1, + shortest_first=False, + ikey="input", + iaxis=0, + okey="output", + oaxis=0, ): + """Make batch set from json dictionary + + :param List[(str, Dict[str, Any])] sorted_data: dictionary loaded from data.json + :param int batch_size: batch size + :param int max_length_in: maximum length of input to decide adaptive batch size + :param int max_length_out: maximum length of output to decide adaptive batch size + :param int min_batch_size: mininum batch size (for multi-gpu) + :param bool shortest_first: Sort from batch with shortest samples + to longest if true, otherwise reverse + :param str ikey: key to access input + (for ASR ikey="input", for TTS, MT ikey="output".) + :param int iaxis: dimension to access input + (for ASR, TTS iaxis=0, for MT iaxis="1".) + :param str okey: key to access output + (for ASR, MT okey="output". for TTS okey="input".) + :param int oaxis: dimension to access output + (for ASR, TTS, MT oaxis=0, reserved for future research, -1 means all axis.) + :return: List[List[Tuple[str, dict]]] list of batches + """ + if batch_size <= 0: + raise ValueError(f"Invalid batch_size={batch_size}") + + # check #utts is more than min_batch_size + if len(sorted_data) < min_batch_size: + raise ValueError( + f"#utts({len(sorted_data)}) is less than min_batch_size({min_batch_size})." + ) + + # make list of minibatches + minibatches = [] + start = 0 + while True: + _, info = sorted_data[start] + ilen = int(info[ikey][iaxis]["shape"][0]) + olen = (int(info[okey][oaxis]["shape"][0]) if oaxis >= 0 else + max(map(lambda x: int(x["shape"][0]), info[okey]))) + factor = max(int(ilen / max_length_in), int(olen / max_length_out)) + # change batchsize depending on the input and output length + # if ilen = 1000 and max_length_in = 800 + # then b = batchsize / 2 + # and max(min_batches, .) avoids batchsize = 0 + bs = max(min_batch_size, int(batch_size / (1 + factor))) + end = min(len(sorted_data), start + bs) + minibatch = sorted_data[start:end] + if shortest_first: + minibatch.reverse() + + # check each batch is more than minimum batchsize + if len(minibatch) < min_batch_size: + mod = min_batch_size - len(minibatch) % min_batch_size + additional_minibatch = [ + sorted_data[i] for i in np.random.randint(0, start, mod) + ] + if shortest_first: + additional_minibatch.reverse() + minibatch.extend(additional_minibatch) + minibatches.append(minibatch) + + if end == len(sorted_data): + break + start = end + + # batch: List[List[Tuple[str, dict]]] + return minibatches + + +def batchfy_by_bin( + sorted_data, + batch_bins, + num_batches=0, + min_batch_size=1, + shortest_first=False, + ikey="input", + okey="output", ): + """Make variably sized batch set, which maximizes + + the number of bins up to `batch_bins`. + + :param List[(str, Dict[str, Any])] sorted_data: dictionary loaded from data.json + :param int batch_bins: Maximum frames of a batch + :param int num_batches: # number of batches to use (for debug) + :param int min_batch_size: minimum batch size (for multi-gpu) + :param int test: Return only every `test` batches + :param bool shortest_first: Sort from batch with shortest samples + to longest if true, otherwise reverse + + :param str ikey: key to access input (for ASR ikey="input", for TTS ikey="output".) + :param str okey: key to access output (for ASR okey="output". for TTS okey="input".) + + :return: List[Tuple[str, Dict[str, List[Dict[str, Any]]]] list of batches + """ + if batch_bins <= 0: + raise ValueError(f"invalid batch_bins={batch_bins}") + length = len(sorted_data) + idim = int(sorted_data[0][1][ikey][0]["shape"][1]) + odim = int(sorted_data[0][1][okey][0]["shape"][1]) + logger.info("# utts: " + str(len(sorted_data))) + minibatches = [] + start = 0 + n = 0 + while True: + # Dynamic batch size depending on size of samples + b = 0 + next_size = 0 + max_olen = 0 + while next_size < batch_bins and (start + b) < length: + ilen = int(sorted_data[start + b][1][ikey][0]["shape"][0]) * idim + olen = int(sorted_data[start + b][1][okey][0]["shape"][0]) * odim + if olen > max_olen: + max_olen = olen + next_size = (max_olen + ilen) * (b + 1) + if next_size <= batch_bins: + b += 1 + elif next_size == 0: + raise ValueError( + f"Can't fit one sample in batch_bins ({batch_bins}): " + f"Please increase the value") + end = min(length, start + max(min_batch_size, b)) + batch = sorted_data[start:end] + if shortest_first: + batch.reverse() + minibatches.append(batch) + # Check for min_batch_size and fixes the batches if needed + i = -1 + while len(minibatches[i]) < min_batch_size: + missing = min_batch_size - len(minibatches[i]) + if -i == len(minibatches): + minibatches[i + 1].extend(minibatches[i]) + minibatches = minibatches[1:] + break + else: + minibatches[i].extend(minibatches[i - 1][:missing]) + minibatches[i - 1] = minibatches[i - 1][missing:] + i -= 1 + if end == length: + break + start = end + n += 1 + if num_batches > 0: + minibatches = minibatches[:num_batches] + lengths = [len(x) for x in minibatches] + logger.info( + str(len(minibatches)) + " batches containing from " + str(min(lengths)) + + " to " + str(max(lengths)) + " samples " + "(avg " + str( + int(np.mean(lengths))) + " samples).") + return minibatches + + +def batchfy_by_frame( + sorted_data, + max_frames_in, + max_frames_out, + max_frames_inout, + num_batches=0, + min_batch_size=1, + shortest_first=False, + ikey="input", + okey="output", ): + """Make variable batch set, which maximizes the number of frames to max_batch_frame. + + :param List[(str, Dict[str, Any])] sorteddata: dictionary loaded from data.json + :param int max_frames_in: Maximum input frames of a batch + :param int max_frames_out: Maximum output frames of a batch + :param int max_frames_inout: Maximum input+output frames of a batch + :param int num_batches: # number of batches to use (for debug) + :param int min_batch_size: minimum batch size (for multi-gpu) + :param int test: Return only every `test` batches + :param bool shortest_first: Sort from batch with shortest samples + to longest if true, otherwise reverse + + :param str ikey: key to access input (for ASR ikey="input", for TTS ikey="output".) + :param str okey: key to access output (for ASR okey="output". for TTS okey="input".) + + :return: List[Tuple[str, Dict[str, List[Dict[str, Any]]]] list of batches + """ + if max_frames_in <= 0 and max_frames_out <= 0 and max_frames_inout <= 0: + raise ValueError( + "At least, one of `--batch-frames-in`, `--batch-frames-out` or " + "`--batch-frames-inout` should be > 0") + length = len(sorted_data) + minibatches = [] + start = 0 + end = 0 + while end != length: + # Dynamic batch size depending on size of samples + b = 0 + max_olen = 0 + max_ilen = 0 + while (start + b) < length: + ilen = int(sorted_data[start + b][1][ikey][0]["shape"][0]) + if ilen > max_frames_in and max_frames_in != 0: + raise ValueError( + f"Can't fit one sample in --batch-frames-in ({max_frames_in}): " + f"Please increase the value") + olen = int(sorted_data[start + b][1][okey][0]["shape"][0]) + if olen > max_frames_out and max_frames_out != 0: + raise ValueError( + f"Can't fit one sample in --batch-frames-out ({max_frames_out}): " + f"Please increase the value") + if ilen + olen > max_frames_inout and max_frames_inout != 0: + raise ValueError( + f"Can't fit one sample in --batch-frames-out ({max_frames_inout}): " + f"Please increase the value") + max_olen = max(max_olen, olen) + max_ilen = max(max_ilen, ilen) + in_ok = max_ilen * (b + 1) <= max_frames_in or max_frames_in == 0 + out_ok = max_olen * (b + 1) <= max_frames_out or max_frames_out == 0 + inout_ok = (max_ilen + max_olen) * ( + b + 1) <= max_frames_inout or max_frames_inout == 0 + if in_ok and out_ok and inout_ok: + # add more seq in the minibatch + b += 1 + else: + # no more seq in the minibatch + break + end = min(length, start + b) + batch = sorted_data[start:end] + if shortest_first: + batch.reverse() + minibatches.append(batch) + # Check for min_batch_size and fixes the batches if needed + i = -1 + while len(minibatches[i]) < min_batch_size: + missing = min_batch_size - len(minibatches[i]) + if -i == len(minibatches): + minibatches[i + 1].extend(minibatches[i]) + minibatches = minibatches[1:] + break + else: + minibatches[i].extend(minibatches[i - 1][:missing]) + minibatches[i - 1] = minibatches[i - 1][missing:] + i -= 1 + start = end + if num_batches > 0: + minibatches = minibatches[:num_batches] + lengths = [len(x) for x in minibatches] + logger.info( + str(len(minibatches)) + " batches containing from " + str(min(lengths)) + + " to " + str(max(lengths)) + " samples" + "(avg " + str( + int(np.mean(lengths))) + " samples).") + + return minibatches + + +def batchfy_shuffle(data, batch_size, min_batch_size, num_batches, + shortest_first): + import random + + logger.info("use shuffled batch.") + sorted_data = random.sample(data.items(), len(data.items())) + logger.info("# utts: " + str(len(sorted_data))) + # make list of minibatches + minibatches = [] + start = 0 + while True: + end = min(len(sorted_data), start + batch_size) + # check each batch is more than minimum batchsize + minibatch = sorted_data[start:end] + if shortest_first: + minibatch.reverse() + if len(minibatch) < min_batch_size: + mod = min_batch_size - len(minibatch) % min_batch_size + additional_minibatch = [ + sorted_data[i] for i in np.random.randint(0, start, mod) + ] + if shortest_first: + additional_minibatch.reverse() + minibatch.extend(additional_minibatch) + minibatches.append(minibatch) + if end == len(sorted_data): + break + start = end + + # for debugging + if num_batches > 0: + minibatches = minibatches[:num_batches] + logger.info("# minibatches: " + str(len(minibatches))) + return minibatches + + +BATCH_COUNT_CHOICES = ["auto", "seq", "bin", "frame"] +BATCH_SORT_KEY_CHOICES = ["input", "output", "shuffle"] + + +def make_batchset( + data, + batch_size=0, + max_length_in=float("inf"), + max_length_out=float("inf"), + num_batches=0, + min_batch_size=1, + shortest_first=False, + batch_sort_key="input", + count="auto", + batch_bins=0, + batch_frames_in=0, + batch_frames_out=0, + batch_frames_inout=0, + iaxis=0, + oaxis=0, ): + """Make batch set from json dictionary + + if utts have "category" value, + + >>> data = [{'category': 'A', 'input': ..., 'utt':'utt1'}, + ... {'category': 'B', 'input': ..., 'utt':'utt2'}, + ... {'category': 'B', 'input': ..., 'utt':'utt3'}, + ... {'category': 'A', 'input': ..., 'utt':'utt4'}] + >>> make_batchset(data, batchsize=2, ...) + [[('utt1', ...), ('utt4', ...)], [('utt2', ...), ('utt3': ...)]] + + Note that if any utts doesn't have "category", + perform as same as batchfy_by_{count} + + :param List[Dict[str, Any]] data: dictionary loaded from data.json + :param int batch_size: maximum number of sequences in a minibatch. + :param int batch_bins: maximum number of bins (frames x dim) in a minibatch. + :param int batch_frames_in: maximum number of input frames in a minibatch. + :param int batch_frames_out: maximum number of output frames in a minibatch. + :param int batch_frames_out: maximum number of input+output frames in a minibatch. + :param str count: strategy to count maximum size of batch. + For choices, see espnet.asr.batchfy.BATCH_COUNT_CHOICES + + :param int max_length_in: maximum length of input to decide adaptive batch size + :param int max_length_out: maximum length of output to decide adaptive batch size + :param int num_batches: # number of batches to use (for debug) + :param int min_batch_size: minimum batch size (for multi-gpu) + :param bool shortest_first: Sort from batch with shortest samples + to longest if true, otherwise reverse + :param str batch_sort_key: how to sort data before creating minibatches + ["input", "output", "shuffle"] + :param bool swap_io: if True, use "input" as output and "output" + as input in `data` dict + :param bool mt: if True, use 0-axis of "output" as output and 1-axis of "output" + as input in `data` dict + :param int iaxis: dimension to access input + (for ASR, TTS iaxis=0, for MT iaxis="1".) + :param int oaxis: dimension to access output (for ASR, TTS, MT oaxis=0, + reserved for future research, -1 means all axis.) + :return: List[List[Tuple[str, dict]]] list of batches + """ + # check args + if count not in BATCH_COUNT_CHOICES: + raise ValueError( + f"arg 'count' ({count}) should be one of {BATCH_COUNT_CHOICES}") + if batch_sort_key not in BATCH_SORT_KEY_CHOICES: + raise ValueError(f"arg 'batch_sort_key' ({batch_sort_key}) should be " + f"one of {BATCH_SORT_KEY_CHOICES}") + + ikey = "input" + okey = "output" + batch_sort_axis = 0 # index of list + if count == "auto": + if batch_size != 0: + count = "seq" + elif batch_bins != 0: + count = "bin" + elif batch_frames_in != 0 or batch_frames_out != 0 or batch_frames_inout != 0: + count = "frame" + else: + raise ValueError( + f"cannot detect `count` manually set one of {BATCH_COUNT_CHOICES}" + ) + logger.info(f"count is auto detected as {count}") + + if count != "seq" and batch_sort_key == "shuffle": + raise ValueError( + "batch_sort_key=shuffle is only available if batch_count=seq") + + category2data = {} # Dict[str, dict] + for v in data: + k = v['utt'] + category2data.setdefault(v.get("category"), {})[k] = v + + batches_list = [] # List[List[List[Tuple[str, dict]]]] + for d in category2data.values(): + if batch_sort_key == "shuffle": + batches = batchfy_shuffle(d, batch_size, min_batch_size, + num_batches, shortest_first) + batches_list.append(batches) + continue + + # sort it by input lengths (long to short) + sorted_data = sorted( + d.items(), + key=lambda data: int(data[1][batch_sort_key][batch_sort_axis]["shape"][0]), + reverse=not shortest_first, ) + logger.info("# utts: " + str(len(sorted_data))) + + if count == "seq": + batches = batchfy_by_seq( + sorted_data, + batch_size=batch_size, + max_length_in=max_length_in, + max_length_out=max_length_out, + min_batch_size=min_batch_size, + shortest_first=shortest_first, + ikey=ikey, + iaxis=iaxis, + okey=okey, + oaxis=oaxis, ) + if count == "bin": + batches = batchfy_by_bin( + sorted_data, + batch_bins=batch_bins, + min_batch_size=min_batch_size, + shortest_first=shortest_first, + ikey=ikey, + okey=okey, ) + if count == "frame": + batches = batchfy_by_frame( + sorted_data, + max_frames_in=batch_frames_in, + max_frames_out=batch_frames_out, + max_frames_inout=batch_frames_inout, + min_batch_size=min_batch_size, + shortest_first=shortest_first, + ikey=ikey, + okey=okey, ) + batches_list.append(batches) + + if len(batches_list) == 1: + batches = batches_list[0] + else: + # Concat list. This way is faster than "sum(batch_list, [])" + batches = list(itertools.chain(*batches_list)) + + # for debugging + if num_batches > 0: + batches = batches[:num_batches] + logger.info("# minibatches: " + str(len(batches))) + + # batch: List[List[Tuple[str, dict]]] + return batches diff --git a/deepspeech/io/collator.py b/deepspeech/io/collator.py index 3bec9875f..df3004790 100644 --- a/deepspeech/io/collator.py +++ b/deepspeech/io/collator.py @@ -11,33 +11,245 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import io +from collections import namedtuple +from typing import Optional + import numpy as np +from yacs.config import CfgNode +from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline +from deepspeech.frontend.featurizer.speech_featurizer import SpeechFeaturizer +from deepspeech.frontend.normalizer import FeatureNormalizer +from deepspeech.frontend.speech import SpeechSegment from deepspeech.frontend.utility import IGNORE_ID -from deepspeech.io.utility import pad_sequence +from deepspeech.io.utility import pad_list from deepspeech.utils.log import Log __all__ = ["SpeechCollator"] logger = Log(__name__).getlog() +# namedtupe need global for pickle. +TarLocalData = namedtuple('TarLocalData', ['tar2info', 'tar2object']) + class SpeechCollator(): - def __init__(self, keep_transcription_text=True): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + default = CfgNode( + dict( + augmentation_config="", + random_seed=0, + mean_std_filepath="", + unit_type="char", + vocab_filepath="", + spm_model_prefix="", + specgram_type='linear', # 'linear', 'mfcc', 'fbank' + feat_dim=0, # 'mfcc', 'fbank' + delta_delta=False, # 'mfcc', 'fbank' + stride_ms=10.0, # ms + window_ms=20.0, # ms + n_fft=None, # fft points + max_freq=None, # None for samplerate/2 + target_sample_rate=16000, # target sample rate + use_dB_normalization=True, + target_dB=-20, + dither=1.0, # feature dither + keep_transcription_text=False)) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + @classmethod + def from_config(cls, config): + """Build a SpeechCollator object from a config. + + Args: + config (yacs.config.CfgNode): configs object. + + Returns: + SpeechCollator: collator object. """ - Padding audio features with zeros to make them have the same shape (or - a user-defined shape) within one bach. + assert 'augmentation_config' in config.collator + assert 'keep_transcription_text' in config.collator + assert 'mean_std_filepath' in config.collator + assert 'vocab_filepath' in config.collator + assert 'specgram_type' in config.collator + assert 'n_fft' in config.collator + assert config.collator - if ``keep_transcription_text`` is False, text is token ids else is raw string. + if isinstance(config.collator.augmentation_config, (str, bytes)): + if config.collator.augmentation_config: + aug_file = io.open( + config.collator.augmentation_config, + mode='r', + encoding='utf8') + else: + aug_file = io.StringIO(initial_value='{}', newline='') + else: + aug_file = config.collator.augmentation_config + assert isinstance(aug_file, io.StringIO) + + speech_collator = cls( + aug_file=aug_file, + random_seed=0, + mean_std_filepath=config.collator.mean_std_filepath, + unit_type=config.collator.unit_type, + vocab_filepath=config.collator.vocab_filepath, + spm_model_prefix=config.collator.spm_model_prefix, + specgram_type=config.collator.specgram_type, + feat_dim=config.collator.feat_dim, + delta_delta=config.collator.delta_delta, + stride_ms=config.collator.stride_ms, + window_ms=config.collator.window_ms, + n_fft=config.collator.n_fft, + max_freq=config.collator.max_freq, + target_sample_rate=config.collator.target_sample_rate, + use_dB_normalization=config.collator.use_dB_normalization, + target_dB=config.collator.target_dB, + dither=config.collator.dither, + keep_transcription_text=config.collator.keep_transcription_text) + return speech_collator + + def __init__( + self, + aug_file, + mean_std_filepath, + vocab_filepath, + spm_model_prefix, + random_seed=0, + unit_type="char", + specgram_type='linear', # 'linear', 'mfcc', 'fbank' + feat_dim=0, # 'mfcc', 'fbank' + delta_delta=False, # 'mfcc', 'fbank' + stride_ms=10.0, # ms + window_ms=20.0, # ms + n_fft=None, # fft points + max_freq=None, # None for samplerate/2 + target_sample_rate=16000, # target sample rate + use_dB_normalization=True, + target_dB=-20, + dither=1.0, + keep_transcription_text=True): + """SpeechCollator Collator + + Args: + unit_type(str): token unit type, e.g. char, word, spm + vocab_filepath (str): vocab file path. + mean_std_filepath (str): mean and std file path, which suffix is *.npy + spm_model_prefix (str): spm model prefix, need if `unit_type` is spm. + augmentation_config (str, optional): augmentation json str. Defaults to '{}'. + stride_ms (float, optional): stride size in ms. Defaults to 10.0. + window_ms (float, optional): window size in ms. Defaults to 20.0. + n_fft (int, optional): fft points for rfft. Defaults to None. + max_freq (int, optional): max cut freq. Defaults to None. + target_sample_rate (int, optional): target sample rate which used for training. Defaults to 16000. + specgram_type (str, optional): 'linear', 'mfcc' or 'fbank'. Defaults to 'linear'. + feat_dim (int, optional): audio feature dim, using by 'mfcc' or 'fbank'. Defaults to None. + delta_delta (bool, optional): audio feature with delta-delta, using by 'fbank' or 'mfcc'. Defaults to False. + use_dB_normalization (bool, optional): do dB normalization. Defaults to True. + target_dB (int, optional): target dB. Defaults to -20. + random_seed (int, optional): for random generator. Defaults to 0. + keep_transcription_text (bool, optional): True, when not in training mode, will not do tokenizer; Defaults to False. + if ``keep_transcription_text`` is False, text is token ids else is raw string. + + Do augmentations + Padding audio features with zeros to make them have the same shape (or + a user-defined shape) within one batch. """ self._keep_transcription_text = keep_transcription_text + self._local_data = TarLocalData(tar2info={}, tar2object={}) + self._augmentation_pipeline = AugmentationPipeline( + augmentation_config=aug_file.read(), random_seed=random_seed) + + self._normalizer = FeatureNormalizer( + mean_std_filepath) if mean_std_filepath else None + + self._stride_ms = stride_ms + self._target_sample_rate = target_sample_rate + + self._speech_featurizer = SpeechFeaturizer( + unit_type=unit_type, + vocab_filepath=vocab_filepath, + spm_model_prefix=spm_model_prefix, + specgram_type=specgram_type, + feat_dim=feat_dim, + delta_delta=delta_delta, + stride_ms=stride_ms, + window_ms=window_ms, + n_fft=n_fft, + max_freq=max_freq, + target_sample_rate=target_sample_rate, + use_dB_normalization=use_dB_normalization, + target_dB=target_dB, + dither=dither) + + def _parse_tar(self, file): + """Parse a tar file to get a tarfile object + and a map containing tarinfoes + """ + result = {} + f = tarfile.open(file) + for tarinfo in f.getmembers(): + result[tarinfo.name] = tarinfo + return f, result + + def _subfile_from_tar(self, file): + """Get subfile object from tar. + + It will return a subfile object from tar file + and cached tar file info for next reading request. + """ + tarpath, filename = file.split(':', 1)[1].split('#', 1) + if 'tar2info' not in self._local_data.__dict__: + self._local_data.tar2info = {} + if 'tar2object' not in self._local_data.__dict__: + self._local_data.tar2object = {} + if tarpath not in self._local_data.tar2info: + object, infoes = self._parse_tar(tarpath) + self._local_data.tar2info[tarpath] = infoes + self._local_data.tar2object[tarpath] = object + return self._local_data.tar2object[tarpath].extractfile( + self._local_data.tar2info[tarpath][filename]) + + def process_utterance(self, audio_file, transcript): + """Load, augment, featurize and normalize for speech data. + + :param audio_file: Filepath or file object of audio file. + :type audio_file: str | file + :param transcript: Transcription text. + :type transcript: str + :return: Tuple of audio feature tensor and data of transcription part, + where transcription part could be token ids or text. + :rtype: tuple of (2darray, list) + """ + if isinstance(audio_file, str) and audio_file.startswith('tar:'): + speech_segment = SpeechSegment.from_file( + self._subfile_from_tar(audio_file), transcript) + else: + speech_segment = SpeechSegment.from_file(audio_file, transcript) + + # audio augment + self._augmentation_pipeline.transform_audio(speech_segment) + + specgram, transcript_part = self._speech_featurizer.featurize( + speech_segment, self._keep_transcription_text) + if self._normalizer: + specgram = self._normalizer.apply(specgram) + + # specgram augment + specgram = self._augmentation_pipeline.transform_feature(specgram) + return specgram, transcript_part + def __call__(self, batch): """batch examples Args: batch ([List]): batch is (audio, text) - audio (np.ndarray) shape (D, T) + audio (np.ndarray) shape (T, D) text (List[int] or str): shape (U,) Returns: @@ -53,11 +265,12 @@ class SpeechCollator(): text_lens = [] utts = [] for utt, audio, text in batch: + audio, text = self.process_utterance(audio, text) #utt utts.append(utt) # audio - audios.append(audio.T) # [T, D] - audio_lens.append(audio.shape[1]) + audios.append(audio) # [T, D] + audio_lens.append(audio.shape[0]) # text # for training, text is token ids # else text is string, convert to unicode ord @@ -72,10 +285,37 @@ class SpeechCollator(): texts.append(tokens) text_lens.append(tokens.shape[0]) - padded_audios = pad_sequence( - audios, padding_value=0.0).astype(np.float32) #[B, T, D] - audio_lens = np.array(audio_lens).astype(np.int64) - padded_texts = pad_sequence( - texts, padding_value=IGNORE_ID).astype(np.int64) - text_lens = np.array(text_lens).astype(np.int64) - return utts, padded_audios, audio_lens, padded_texts, text_lens + #[B, T, D] + xs_pad = pad_list(audios, 0.0).astype(np.float32) + ilens = np.array(audio_lens).astype(np.int64) + ys_pad = pad_list(texts, IGNORE_ID).astype(np.int64) + olens = np.array(text_lens).astype(np.int64) + return utts, xs_pad, ilens, ys_pad, olens + + @property + def manifest(self): + return self._manifest + + @property + def vocab_size(self): + return self._speech_featurizer.vocab_size + + @property + def vocab_list(self): + return self._speech_featurizer.vocab_list + + @property + def vocab_dict(self): + return self._speech_featurizer.vocab_dict + + @property + def text_feature(self): + return self._speech_featurizer.text_feature + + @property + def feature_size(self): + return self._speech_featurizer.feature_size + + @property + def stride_ms(self): + return self._speech_featurizer.stride_ms diff --git a/deepspeech/io/collator_st.py b/deepspeech/io/collator_st.py new file mode 100644 index 000000000..28573366b --- /dev/null +++ b/deepspeech/io/collator_st.py @@ -0,0 +1,631 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import io +from collections import namedtuple +from typing import Optional + +import kaldiio +import numpy as np +from yacs.config import CfgNode + +from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline +from deepspeech.frontend.featurizer.speech_featurizer import SpeechFeaturizer +from deepspeech.frontend.featurizer.text_featurizer import TextFeaturizer +from deepspeech.frontend.normalizer import FeatureNormalizer +from deepspeech.frontend.speech import SpeechSegment +from deepspeech.frontend.utility import IGNORE_ID +from deepspeech.io.utility import pad_sequence +from deepspeech.utils.log import Log + +__all__ = ["SpeechCollator", "KaldiPrePorocessedCollator"] + +logger = Log(__name__).getlog() + +# namedtupe need global for pickle. +TarLocalData = namedtuple('TarLocalData', ['tar2info', 'tar2object']) + + +class SpeechCollator(): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + default = CfgNode( + dict( + augmentation_config="", + random_seed=0, + mean_std_filepath="", + unit_type="char", + vocab_filepath="", + spm_model_prefix="", + specgram_type='linear', # 'linear', 'mfcc', 'fbank' + feat_dim=0, # 'mfcc', 'fbank' + delta_delta=False, # 'mfcc', 'fbank' + stride_ms=10.0, # ms + window_ms=20.0, # ms + n_fft=None, # fft points + max_freq=None, # None for samplerate/2 + target_sample_rate=16000, # target sample rate + use_dB_normalization=True, + target_dB=-20, + dither=1.0, # feature dither + keep_transcription_text=False)) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + @classmethod + def from_config(cls, config): + """Build a SpeechCollator object from a config. + + Args: + config (yacs.config.CfgNode): configs object. + + Returns: + SpeechCollator: collator object. + """ + assert 'augmentation_config' in config.collator + assert 'keep_transcription_text' in config.collator + assert 'mean_std_filepath' in config.collator + assert 'vocab_filepath' in config.collator + assert 'specgram_type' in config.collator + assert 'n_fft' in config.collator + assert config.collator + + if isinstance(config.collator.augmentation_config, (str, bytes)): + if config.collator.augmentation_config: + aug_file = io.open( + config.collator.augmentation_config, + mode='r', + encoding='utf8') + else: + aug_file = io.StringIO(initial_value='{}', newline='') + else: + aug_file = config.collator.augmentation_config + assert isinstance(aug_file, io.StringIO) + + speech_collator = cls( + aug_file=aug_file, + random_seed=0, + mean_std_filepath=config.collator.mean_std_filepath, + unit_type=config.collator.unit_type, + vocab_filepath=config.collator.vocab_filepath, + spm_model_prefix=config.collator.spm_model_prefix, + specgram_type=config.collator.specgram_type, + feat_dim=config.collator.feat_dim, + delta_delta=config.collator.delta_delta, + stride_ms=config.collator.stride_ms, + window_ms=config.collator.window_ms, + n_fft=config.collator.n_fft, + max_freq=config.collator.max_freq, + target_sample_rate=config.collator.target_sample_rate, + use_dB_normalization=config.collator.use_dB_normalization, + target_dB=config.collator.target_dB, + dither=config.collator.dither, + keep_transcription_text=config.collator.keep_transcription_text) + return speech_collator + + def __init__( + self, + aug_file, + mean_std_filepath, + vocab_filepath, + spm_model_prefix, + random_seed=0, + unit_type="char", + specgram_type='linear', # 'linear', 'mfcc', 'fbank' + feat_dim=0, # 'mfcc', 'fbank' + delta_delta=False, # 'mfcc', 'fbank' + stride_ms=10.0, # ms + window_ms=20.0, # ms + n_fft=None, # fft points + max_freq=None, # None for samplerate/2 + target_sample_rate=16000, # target sample rate + use_dB_normalization=True, + target_dB=-20, + dither=1.0, + keep_transcription_text=True): + """SpeechCollator Collator + + Args: + unit_type(str): token unit type, e.g. char, word, spm + vocab_filepath (str): vocab file path. + mean_std_filepath (str): mean and std file path, which suffix is *.npy + spm_model_prefix (str): spm model prefix, need if `unit_type` is spm. + augmentation_config (str, optional): augmentation json str. Defaults to '{}'. + stride_ms (float, optional): stride size in ms. Defaults to 10.0. + window_ms (float, optional): window size in ms. Defaults to 20.0. + n_fft (int, optional): fft points for rfft. Defaults to None. + max_freq (int, optional): max cut freq. Defaults to None. + target_sample_rate (int, optional): target sample rate which used for training. Defaults to 16000. + specgram_type (str, optional): 'linear', 'mfcc' or 'fbank'. Defaults to 'linear'. + feat_dim (int, optional): audio feature dim, using by 'mfcc' or 'fbank'. Defaults to None. + delta_delta (bool, optional): audio feature with delta-delta, using by 'fbank' or 'mfcc'. Defaults to False. + use_dB_normalization (bool, optional): do dB normalization. Defaults to True. + target_dB (int, optional): target dB. Defaults to -20. + random_seed (int, optional): for random generator. Defaults to 0. + keep_transcription_text (bool, optional): True, when not in training mode, will not do tokenizer; Defaults to False. + if ``keep_transcription_text`` is False, text is token ids else is raw string. + + Do augmentations + Padding audio features with zeros to make them have the same shape (or + a user-defined shape) within one batch. + """ + self._keep_transcription_text = keep_transcription_text + + self._local_data = TarLocalData(tar2info={}, tar2object={}) + self._augmentation_pipeline = AugmentationPipeline( + augmentation_config=aug_file.read(), random_seed=random_seed) + + self._normalizer = FeatureNormalizer( + mean_std_filepath) if mean_std_filepath else None + + self._stride_ms = stride_ms + self._target_sample_rate = target_sample_rate + + self._speech_featurizer = SpeechFeaturizer( + unit_type=unit_type, + vocab_filepath=vocab_filepath, + spm_model_prefix=spm_model_prefix, + specgram_type=specgram_type, + feat_dim=feat_dim, + delta_delta=delta_delta, + stride_ms=stride_ms, + window_ms=window_ms, + n_fft=n_fft, + max_freq=max_freq, + target_sample_rate=target_sample_rate, + use_dB_normalization=use_dB_normalization, + target_dB=target_dB, + dither=dither) + + def _parse_tar(self, file): + """Parse a tar file to get a tarfile object + and a map containing tarinfoes + """ + result = {} + f = tarfile.open(file) + for tarinfo in f.getmembers(): + result[tarinfo.name] = tarinfo + return f, result + + def _subfile_from_tar(self, file): + """Get subfile object from tar. + + It will return a subfile object from tar file + and cached tar file info for next reading request. + """ + tarpath, filename = file.split(':', 1)[1].split('#', 1) + if 'tar2info' not in self._local_data.__dict__: + self._local_data.tar2info = {} + if 'tar2object' not in self._local_data.__dict__: + self._local_data.tar2object = {} + if tarpath not in self._local_data.tar2info: + object, infoes = self._parse_tar(tarpath) + self._local_data.tar2info[tarpath] = infoes + self._local_data.tar2object[tarpath] = object + return self._local_data.tar2object[tarpath].extractfile( + self._local_data.tar2info[tarpath][filename]) + + @property + def manifest(self): + return self._manifest + + @property + def vocab_size(self): + return self._speech_featurizer.vocab_size + + @property + def vocab_list(self): + return self._speech_featurizer.vocab_list + + @property + def vocab_dict(self): + return self._speech_featurizer.vocab_dict + + @property + def text_feature(self): + return self._speech_featurizer.text_feature + + @property + def feature_size(self): + return self._speech_featurizer.feature_size + + @property + def stride_ms(self): + return self._speech_featurizer.stride_ms + + def process_utterance(self, audio_file, translation): + """Load, augment, featurize and normalize for speech data. + + :param audio_file: Filepath or file object of audio file. + :type audio_file: str | file + :param translation: translation text. + :type translation: str + :return: Tuple of audio feature tensor and data of translation part, + where translation part could be token ids or text. + :rtype: tuple of (2darray, list) + """ + if isinstance(audio_file, str) and audio_file.startswith('tar:'): + speech_segment = SpeechSegment.from_file( + self._subfile_from_tar(audio_file), translation) + else: + speech_segment = SpeechSegment.from_file(audio_file, translation) + + # audio augment + self._augmentation_pipeline.transform_audio(speech_segment) + + specgram, translation_part = self._speech_featurizer.featurize( + speech_segment, self._keep_transcription_text) + if self._normalizer: + specgram = self._normalizer.apply(specgram) + + # specgram augment + specgram = self._augmentation_pipeline.transform_feature(specgram) + return specgram, translation_part + + def __call__(self, batch): + """batch examples + + Args: + batch ([List]): batch is (audio, text) + audio (np.ndarray) shape (T, D) + text (List[int] or str): shape (U,) + + Returns: + tuple(audio, text, audio_lens, text_lens): batched data. + audio : (B, Tmax, D) + audio_lens: (B) + text : (B, Umax) + text_lens: (B) + """ + audios = [] + audio_lens = [] + texts = [] + text_lens = [] + utts = [] + for utt, audio, text in batch: + audio, text = self.process_utterance(audio, text) + #utt + utts.append(utt) + # audio + audios.append(audio) # [T, D] + audio_lens.append(audio.shape[0]) + # text + # for training, text is token ids + # else text is string, convert to unicode ord + tokens = [] + if self._keep_transcription_text: + assert isinstance(text, str), (type(text), text) + tokens = [ord(t) for t in text] + else: + tokens = text # token ids + tokens = tokens if isinstance(tokens, np.ndarray) else np.array( + tokens, dtype=np.int64) + texts.append(tokens) + text_lens.append(tokens.shape[0]) + + padded_audios = pad_sequence( + audios, padding_value=0.0).astype(np.float32) #[B, T, D] + audio_lens = np.array(audio_lens).astype(np.int64) + padded_texts = pad_sequence( + texts, padding_value=IGNORE_ID).astype(np.int64) + text_lens = np.array(text_lens).astype(np.int64) + return utts, padded_audios, audio_lens, padded_texts, text_lens + + +class TripletSpeechCollator(SpeechCollator): + def process_utterance(self, audio_file, translation, transcript): + """Load, augment, featurize and normalize for speech data. + + :param audio_file: Filepath or file object of audio file. + :type audio_file: str | file + :param translation: translation text. + :type translation: str + :return: Tuple of audio feature tensor and data of translation part, + where translation part could be token ids or text. + :rtype: tuple of (2darray, list) + """ + if isinstance(audio_file, str) and audio_file.startswith('tar:'): + speech_segment = SpeechSegment.from_file( + self._subfile_from_tar(audio_file), translation) + else: + speech_segment = SpeechSegment.from_file(audio_file, translation) + + # audio augment + self._augmentation_pipeline.transform_audio(speech_segment) + + specgram, translation_part = self._speech_featurizer.featurize( + speech_segment, self._keep_transcription_text) + transcript_part = self._speech_featurizer._text_featurizer.featurize( + transcript) + if self._normalizer: + specgram = self._normalizer.apply(specgram) + + # specgram augment + specgram = self._augmentation_pipeline.transform_feature(specgram) + return specgram, translation_part, transcript_part + + def __call__(self, batch): + """batch examples + + Args: + batch ([List]): batch is (audio, text) + audio (np.ndarray) shape (T, D) + text (List[int] or str): shape (U,) + + Returns: + tuple(audio, text, audio_lens, text_lens): batched data. + audio : (B, Tmax, D) + audio_lens: (B) + text : (B, Umax) + text_lens: (B) + """ + audios = [] + audio_lens = [] + translation_text = [] + translation_text_lens = [] + transcription_text = [] + transcription_text_lens = [] + + utts = [] + for utt, audio, translation, transcription in batch: + audio, translation, transcription = self.process_utterance( + audio, translation, transcription) + #utt + utts.append(utt) + # audio + audios.append(audio) # [T, D] + audio_lens.append(audio.shape[0]) + # text + # for training, text is token ids + # else text is string, convert to unicode ord + tokens = [[], []] + for idx, text in enumerate([translation, transcription]): + if self._keep_transcription_text: + assert isinstance(text, str), (type(text), text) + tokens[idx] = [ord(t) for t in text] + else: + tokens[idx] = text # token ids + tokens[idx] = tokens[idx] if isinstance( + tokens[idx], np.ndarray) else np.array( + tokens[idx], dtype=np.int64) + translation_text.append(tokens[0]) + translation_text_lens.append(tokens[0].shape[0]) + transcription_text.append(tokens[1]) + transcription_text_lens.append(tokens[1].shape[0]) + + padded_audios = pad_sequence( + audios, padding_value=0.0).astype(np.float32) #[B, T, D] + audio_lens = np.array(audio_lens).astype(np.int64) + padded_translation = pad_sequence( + translation_text, padding_value=IGNORE_ID).astype(np.int64) + translation_lens = np.array(translation_text_lens).astype(np.int64) + padded_transcription = pad_sequence( + transcription_text, padding_value=IGNORE_ID).astype(np.int64) + transcription_lens = np.array(transcription_text_lens).astype(np.int64) + return utts, padded_audios, audio_lens, ( + padded_translation, padded_transcription), (translation_lens, + transcription_lens) + + +class KaldiPrePorocessedCollator(SpeechCollator): + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + default = CfgNode( + dict( + augmentation_config="", + random_seed=0, + unit_type="char", + vocab_filepath="", + spm_model_prefix="", + feat_dim=0, + stride_ms=10.0, + keep_transcription_text=False)) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + @classmethod + def from_config(cls, config): + """Build a SpeechCollator object from a config. + + Args: + config (yacs.config.CfgNode): configs object. + + Returns: + SpeechCollator: collator object. + """ + assert 'augmentation_config' in config.collator + assert 'keep_transcription_text' in config.collator + assert 'vocab_filepath' in config.collator + assert config.collator + + if isinstance(config.collator.augmentation_config, (str, bytes)): + if config.collator.augmentation_config: + aug_file = io.open( + config.collator.augmentation_config, + mode='r', + encoding='utf8') + else: + aug_file = io.StringIO(initial_value='{}', newline='') + else: + aug_file = config.collator.augmentation_config + assert isinstance(aug_file, io.StringIO) + + speech_collator = cls( + aug_file=aug_file, + random_seed=0, + unit_type=config.collator.unit_type, + vocab_filepath=config.collator.vocab_filepath, + spm_model_prefix=config.collator.spm_model_prefix, + feat_dim=config.collator.feat_dim, + stride_ms=config.collator.stride_ms, + keep_transcription_text=config.collator.keep_transcription_text) + return speech_collator + + def __init__(self, + aug_file, + vocab_filepath, + spm_model_prefix, + random_seed=0, + unit_type="char", + feat_dim=0, + stride_ms=10.0, + keep_transcription_text=True): + """SpeechCollator Collator + + Args: + unit_type(str): token unit type, e.g. char, word, spm + vocab_filepath (str): vocab file path. + spm_model_prefix (str): spm model prefix, need if `unit_type` is spm. + augmentation_config (str, optional): augmentation json str. Defaults to '{}'. + random_seed (int, optional): for random generator. Defaults to 0. + keep_transcription_text (bool, optional): True, when not in training mode, will not do tokenizer; Defaults to False. + if ``keep_transcription_text`` is False, text is token ids else is raw string. + + Do augmentations + Padding audio features with zeros to make them have the same shape (or + a user-defined shape) within one batch. + """ + self._keep_transcription_text = keep_transcription_text + self._feat_dim = feat_dim + self._stride_ms = stride_ms + + self._local_data = TarLocalData(tar2info={}, tar2object={}) + self._augmentation_pipeline = AugmentationPipeline( + augmentation_config=aug_file.read(), random_seed=random_seed) + + self._text_featurizer = TextFeaturizer(unit_type, vocab_filepath, + spm_model_prefix) + + def process_utterance(self, audio_file, translation): + """Load, augment, featurize and normalize for speech data. + + :param audio_file: Filepath or file object of kaldi processed feature. + :type audio_file: str | file + :param translation: Translation text. + :type translation: str + :return: Tuple of audio feature tensor and data of translation part, + where translation part could be token ids or text. + :rtype: tuple of (2darray, list) + """ + specgram = kaldiio.load_mat(audio_file) + assert specgram.shape[ + 1] == self._feat_dim, 'expect feat dim {}, but got {}'.format( + self._feat_dim, specgram.shape[1]) + + # specgram augment + specgram = self._augmentation_pipeline.transform_feature(specgram) + + if self._keep_transcription_text: + return specgram, translation + else: + text_ids = self._text_featurizer.featurize(translation) + return specgram, text_ids + + +class TripletKaldiPrePorocessedCollator(KaldiPrePorocessedCollator): + def process_utterance(self, audio_file, translation, transcript): + """Load, augment, featurize and normalize for speech data. + + :param audio_file: Filepath or file object of kali processed feature. + :type audio_file: str | file + :param translation: Translation text. + :type translation: str + :param transcript: Transcription text. + :type transcript: str + :return: Tuple of audio feature tensor and data of translation and transcription parts, + where translation and transcription parts could be token ids or text. + :rtype: tuple of (2darray, (list, list)) + """ + specgram = kaldiio.load_mat(audio_file) + assert specgram.shape[ + 1] == self._feat_dim, 'expect feat dim {}, but got {}'.format( + self._feat_dim, specgram.shape[1]) + + # specgram augment + specgram = self._augmentation_pipeline.transform_feature(specgram) + + if self._keep_transcription_text: + return specgram, translation, transcript + else: + translation_text_ids = self._text_featurizer.featurize(translation) + transcript_text_ids = self._text_featurizer.featurize(transcript) + return specgram, translation_text_ids, transcript_text_ids + + def __call__(self, batch): + """batch examples + + Args: + batch ([List]): batch is (audio, text) + audio (np.ndarray) shape (T, D) + translation (List[int] or str): shape (U,) + transcription (List[int] or str): shape (V,) + + Returns: + tuple(audio, text, audio_lens, text_lens): batched data. + audio : (B, Tmax, D) + audio_lens: (B) + translation_text : (B, Umax) + translation_text_lens: (B) + transcription_text : (B, Vmax) + transcription_text_lens: (B) + """ + audios = [] + audio_lens = [] + translation_text = [] + translation_text_lens = [] + transcription_text = [] + transcription_text_lens = [] + + utts = [] + for utt, audio, translation, transcription in batch: + audio, translation, transcription = self.process_utterance( + audio, translation, transcription) + #utt + utts.append(utt) + # audio + audios.append(audio) # [T, D] + audio_lens.append(audio.shape[0]) + # text + # for training, text is token ids + # else text is string, convert to unicode ord + tokens = [[], []] + for idx, text in enumerate([translation, transcription]): + if self._keep_transcription_text: + assert isinstance(text, str), (type(text), text) + tokens[idx] = [ord(t) for t in text] + else: + tokens[idx] = text # token ids + tokens[idx] = tokens[idx] if isinstance( + tokens[idx], np.ndarray) else np.array( + tokens[idx], dtype=np.int64) + translation_text.append(tokens[0]) + translation_text_lens.append(tokens[0].shape[0]) + transcription_text.append(tokens[1]) + transcription_text_lens.append(tokens[1].shape[0]) + + padded_audios = pad_sequence( + audios, padding_value=0.0).astype(np.float32) #[B, T, D] + audio_lens = np.array(audio_lens).astype(np.int64) + padded_translation = pad_sequence( + translation_text, padding_value=IGNORE_ID).astype(np.int64) + translation_lens = np.array(translation_text_lens).astype(np.int64) + padded_transcription = pad_sequence( + transcription_text, padding_value=IGNORE_ID).astype(np.int64) + transcription_lens = np.array(transcription_text_lens).astype(np.int64) + return utts, padded_audios, audio_lens, ( + padded_translation, padded_transcription), (translation_lens, + transcription_lens) diff --git a/deepspeech/io/converter.py b/deepspeech/io/converter.py new file mode 100644 index 000000000..b80c7b204 --- /dev/null +++ b/deepspeech/io/converter.py @@ -0,0 +1,81 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import numpy as np + +from deepspeech.io.utility import pad_list +from deepspeech.utils.log import Log + +__all__ = ["CustomConverter"] + +logger = Log(__name__).getlog() + + +class CustomConverter(): + """Custom batch converter. + + Args: + subsampling_factor (int): The subsampling factor. + dtype (np.dtype): Data type to convert. + + """ + + def __init__(self, subsampling_factor=1, dtype=np.float32): + """Construct a CustomConverter object.""" + self.subsampling_factor = subsampling_factor + self.ignore_id = -1 + self.dtype = dtype + + def __call__(self, batch): + """Transform a batch and send it to a device. + + Args: + batch (list): The batch to transform. + + Returns: + tuple(np.ndarray, nn.ndarray, nn.ndarray) + + """ + # batch should be located in list + assert len(batch) == 1 + (xs, ys), utts = batch[0] + assert xs[0] is not None, "please check Reader and Augmentation impl." + + # perform subsampling + if self.subsampling_factor > 1: + xs = [x[::self.subsampling_factor, :] for x in xs] + + # get batch of lengths of input sequences + ilens = np.array([x.shape[0] for x in xs]) + + # perform padding and convert to tensor + # currently only support real number + if xs[0].dtype.kind == "c": + xs_pad_real = pad_list([x.real for x in xs], 0).astype(self.dtype) + xs_pad_imag = pad_list([x.imag for x in xs], 0).astype(self.dtype) + # Note(kamo): + # {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E. + # Don't create ComplexTensor and give it E2E here + # because torch.nn.DataParellel can't handle it. + xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag} + else: + xs_pad = pad_list(xs, 0).astype(self.dtype) + + # NOTE: this is for multi-output (e.g., speech translation) + ys_pad = pad_list( + [np.array(y[0][:]) if isinstance(y, tuple) else y for y in ys], + self.ignore_id) + + olens = np.array( + [y[0].shape[0] if isinstance(y, tuple) else y.shape[0] for y in ys]) + return utts, xs_pad, ilens, ys_pad, olens diff --git a/deepspeech/io/dataloader.py b/deepspeech/io/dataloader.py new file mode 100644 index 000000000..310f5f581 --- /dev/null +++ b/deepspeech/io/dataloader.py @@ -0,0 +1,170 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any +from typing import Dict +from typing import List +from typing import Text + +import numpy as np +from paddle.io import DataLoader + +from deepspeech.frontend.utility import read_manifest +from deepspeech.io.batchfy import make_batchset +from deepspeech.io.converter import CustomConverter +from deepspeech.io.dataset import TransformDataset +from deepspeech.io.reader import LoadInputsAndTargets +from deepspeech.utils.log import Log + +__all__ = ["BatchDataLoader"] + +logger = Log(__name__).getlog() + + +def feat_dim_and_vocab_size(data_json: List[Dict[Text, Any]], + mode: Text="asr", + iaxis=0, + oaxis=0): + if mode == 'asr': + feat_dim = data_json[0]['input'][oaxis]['shape'][1] + vocab_size = data_json[0]['output'][oaxis]['shape'][1] + else: + raise ValueError(f"{mode} mode not support!") + return feat_dim, vocab_size + + +def batch_collate(x): + """de-minibatch, since user compose batch. + + Args: + x (List[Tuple]): [(utts, xs, ilens, ys, olens)] + + Returns: + Tuple: (utts, xs, ilens, ys, olens) + """ + return x[0] + + +class BatchDataLoader(): + def __init__(self, + json_file: str, + train_mode: bool, + sortagrad: bool=False, + batch_size: int=0, + maxlen_in: float=float('inf'), + maxlen_out: float=float('inf'), + minibatches: int=0, + mini_batch_size: int=1, + batch_count: str='auto', + batch_bins: int=0, + batch_frames_in: int=0, + batch_frames_out: int=0, + batch_frames_inout: int=0, + preprocess_conf=None, + n_iter_processes: int=1, + subsampling_factor: int=1, + num_encs: int=1): + self.json_file = json_file + self.train_mode = train_mode + self.use_sortagrad = sortagrad == -1 or sortagrad > 0 + self.batch_size = batch_size + self.maxlen_in = maxlen_in + self.maxlen_out = maxlen_out + self.batch_count = batch_count + self.batch_bins = batch_bins + self.batch_frames_in = batch_frames_in + self.batch_frames_out = batch_frames_out + self.batch_frames_inout = batch_frames_inout + self.subsampling_factor = subsampling_factor + self.num_encs = num_encs + self.preprocess_conf = preprocess_conf + self.n_iter_processes = n_iter_processes + + # read json data + self.data_json = read_manifest(json_file) + self.feat_dim, self.vocab_size = feat_dim_and_vocab_size( + self.data_json, mode='asr') + + # make minibatch list (variable length) + self.minibaches = make_batchset( + self.data_json, + batch_size, + maxlen_in, + maxlen_out, + minibatches, # for debug + min_batch_size=mini_batch_size, + shortest_first=self.use_sortagrad, + count=batch_count, + batch_bins=batch_bins, + batch_frames_in=batch_frames_in, + batch_frames_out=batch_frames_out, + batch_frames_inout=batch_frames_inout, + iaxis=0, + oaxis=0, ) + + # data reader + self.reader = LoadInputsAndTargets( + mode="asr", + load_output=True, + preprocess_conf=preprocess_conf, + preprocess_args={"train": + train_mode}, # Switch the mode of preprocessing + ) + + # Setup a converter + if num_encs == 1: + self.converter = CustomConverter( + subsampling_factor=subsampling_factor, dtype=np.float32) + else: + assert NotImplementedError("not impl CustomConverterMulEnc.") + + # hack to make batchsize argument as 1 + # actual bathsize is included in a list + # default collate function converts numpy array to pytorch tensor + # we used an empty collate function instead which returns list + self.dataset = TransformDataset(self.minibaches, self.converter, + self.reader) + + self.dataloader = DataLoader( + dataset=self.dataset, + batch_size=1, + shuffle=not self.use_sortagrad if self.train_mode else False, + collate_fn=batch_collate, + num_workers=self.n_iter_processes, ) + + def __repr__(self): + echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> " + echo += f"train_mode: {self.train_mode}, " + echo += f"sortagrad: {self.use_sortagrad}, " + echo += f"batch_size: {self.batch_size}, " + echo += f"maxlen_in: {self.maxlen_in}, " + echo += f"maxlen_out: {self.maxlen_out}, " + echo += f"batch_count: {self.batch_count}, " + echo += f"batch_bins: {self.batch_bins}, " + echo += f"batch_frames_in: {self.batch_frames_in}, " + echo += f"batch_frames_out: {self.batch_frames_out}, " + echo += f"batch_frames_inout: {self.batch_frames_inout}, " + echo += f"subsampling_factor: {self.subsampling_factor}, " + echo += f"num_encs: {self.num_encs}, " + echo += f"num_workers: {self.n_iter_processes}, " + echo += f"file: {self.json_file}" + return echo + + def __len__(self): + return len(self.dataloader) + + def __iter__(self): + return self.dataloader.__iter__() + + def __call__(self): + return self.__iter__() diff --git a/deepspeech/io/dataset.py b/deepspeech/io/dataset.py index bd5f630d2..e58e03b4e 100644 --- a/deepspeech/io/dataset.py +++ b/deepspeech/io/dataset.py @@ -11,72 +11,31 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import io -import tarfile -import time -from collections import namedtuple from typing import Optional -import numpy as np from paddle.io import Dataset from yacs.config import CfgNode -from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline -from deepspeech.frontend.featurizer.speech_featurizer import SpeechFeaturizer -from deepspeech.frontend.normalizer import FeatureNormalizer -from deepspeech.frontend.speech import SpeechSegment from deepspeech.frontend.utility import read_manifest from deepspeech.utils.log import Log -__all__ = [ - "ManifestDataset", -] +__all__ = ["ManifestDataset", "TripletManifestDataset", "TransformDataset"] logger = Log(__name__).getlog() -# namedtupe need global for pickle. -TarLocalData = namedtuple('TarLocalData', ['tar2info', 'tar2object']) - class ManifestDataset(Dataset): @classmethod def params(cls, config: Optional[CfgNode]=None) -> CfgNode: default = CfgNode( dict( - train_manifest="", - dev_manifest="", - test_manifest="", manifest="", - unit_type="char", - vocab_filepath="", - spm_model_prefix="", - mean_std_filepath="", - augmentation_config="", max_input_len=27.0, min_input_len=0.0, max_output_len=float('inf'), min_output_len=0.0, max_output_input_ratio=float('inf'), - min_output_input_ratio=0.0, - stride_ms=10.0, # ms - window_ms=20.0, # ms - n_fft=None, # fft points - max_freq=None, # None for samplerate/2 - raw_wav=True, # use raw_wav or kaldi feature - specgram_type='linear', # 'linear', 'mfcc', 'fbank' - feat_dim=0, # 'mfcc', 'fbank' - delta_delta=False, # 'mfcc', 'fbank' - dither=1.0, # feature dither - target_sample_rate=16000, # target sample rate - use_dB_normalization=True, - target_dB=-20, - random_seed=0, - keep_transcription_text=False, - batch_size=32, # batch size - num_workers=0, # data loader workers - sortagrad=False, # sorted in first epoch when True - shuffle_method="batch_shuffle", # 'batch_shuffle', 'instance_shuffle' - )) + min_output_input_ratio=0.0, )) if config is not None: config.merge_from_other_cfg(default) @@ -94,128 +53,44 @@ class ManifestDataset(Dataset): """ assert 'manifest' in config.data assert config.data.manifest - assert 'keep_transcription_text' in config.data - - if isinstance(config.data.augmentation_config, (str, bytes)): - if config.data.augmentation_config: - aug_file = io.open( - config.data.augmentation_config, mode='r', encoding='utf8') - else: - aug_file = io.StringIO(initial_value='{}', newline='') - else: - aug_file = config.data.augmentation_config - assert isinstance(aug_file, io.StringIO) dataset = cls( manifest_path=config.data.manifest, - unit_type=config.data.unit_type, - vocab_filepath=config.data.vocab_filepath, - mean_std_filepath=config.data.mean_std_filepath, - spm_model_prefix=config.data.spm_model_prefix, - augmentation_config=aug_file.read(), max_input_len=config.data.max_input_len, min_input_len=config.data.min_input_len, max_output_len=config.data.max_output_len, min_output_len=config.data.min_output_len, max_output_input_ratio=config.data.max_output_input_ratio, - min_output_input_ratio=config.data.min_output_input_ratio, - stride_ms=config.data.stride_ms, - window_ms=config.data.window_ms, - n_fft=config.data.n_fft, - max_freq=config.data.max_freq, - target_sample_rate=config.data.target_sample_rate, - specgram_type=config.data.specgram_type, - feat_dim=config.data.feat_dim, - delta_delta=config.data.delta_delta, - dither=config.data.dither, - use_dB_normalization=config.data.use_dB_normalization, - target_dB=config.data.target_dB, - random_seed=config.data.random_seed, - keep_transcription_text=config.data.keep_transcription_text) + min_output_input_ratio=config.data.min_output_input_ratio, ) return dataset def __init__(self, manifest_path, - unit_type, - vocab_filepath, - mean_std_filepath, - spm_model_prefix=None, - augmentation_config='{}', max_input_len=float('inf'), min_input_len=0.0, max_output_len=float('inf'), min_output_len=0.0, max_output_input_ratio=float('inf'), - min_output_input_ratio=0.0, - stride_ms=10.0, - window_ms=20.0, - n_fft=None, - max_freq=None, - target_sample_rate=16000, - specgram_type='linear', - feat_dim=None, - delta_delta=False, - dither=1.0, - use_dB_normalization=True, - target_dB=-20, - random_seed=0, - keep_transcription_text=False): + min_output_input_ratio=0.0): """Manifest Dataset Args: manifest_path (str): manifest josn file path - unit_type(str): token unit type, e.g. char, word, spm - vocab_filepath (str): vocab file path. - mean_std_filepath (str): mean and std file path, which suffix is *.npy - spm_model_prefix (str): spm model prefix, need if `unit_type` is spm. - augmentation_config (str, optional): augmentation json str. Defaults to '{}'. - max_input_len ([type], optional): maximum output seq length, in seconds for raw wav, in frame numbers for feature data. Defaults to float('inf'). - min_input_len (float, optional): minimum input seq length, in seconds for raw wav, in frame numbers for feature data. Defaults to 0.0. - max_output_len (float, optional): maximum input seq length, in modeling units. Defaults to 500.0. - min_output_len (float, optional): minimum input seq length, in modeling units. Defaults to 0.0. - max_output_input_ratio (float, optional): maximum output seq length/output seq length ratio. Defaults to 10.0. - min_output_input_ratio (float, optional): minimum output seq length/output seq length ratio. Defaults to 0.05. - stride_ms (float, optional): stride size in ms. Defaults to 10.0. - window_ms (float, optional): window size in ms. Defaults to 20.0. - n_fft (int, optional): fft points for rfft. Defaults to None. - max_freq (int, optional): max cut freq. Defaults to None. - target_sample_rate (int, optional): target sample rate which used for training. Defaults to 16000. - specgram_type (str, optional): 'linear', 'mfcc' or 'fbank'. Defaults to 'linear'. - feat_dim (int, optional): audio feature dim, using by 'mfcc' or 'fbank'. Defaults to None. - delta_delta (bool, optional): audio feature with delta-delta, using by 'fbank' or 'mfcc'. Defaults to False. - use_dB_normalization (bool, optional): do dB normalization. Defaults to True. - target_dB (int, optional): target dB. Defaults to -20. - random_seed (int, optional): for random generator. Defaults to 0. - keep_transcription_text (bool, optional): True, when not in training mode, will not do tokenizer; Defaults to False. + max_input_len ([type], optional): maximum output seq length, + in seconds for raw wav, in frame numbers for feature data. Defaults to float('inf'). + min_input_len (float, optional): minimum input seq length, + in seconds for raw wav, in frame numbers for feature data. Defaults to 0.0. + max_output_len (float, optional): maximum input seq length, + in modeling units. Defaults to 500.0. + min_output_len (float, optional): minimum input seq length, + in modeling units. Defaults to 0.0. + max_output_input_ratio (float, optional): maximum output seq length/output seq length ratio. + Defaults to 10.0. + min_output_input_ratio (float, optional): minimum output seq length/output seq length ratio. + Defaults to 0.05. + """ super().__init__() - self._stride_ms = stride_ms - self._target_sample_rate = target_sample_rate - - self._normalizer = FeatureNormalizer( - mean_std_filepath) if mean_std_filepath else None - self._augmentation_pipeline = AugmentationPipeline( - augmentation_config=augmentation_config, random_seed=random_seed) - self._speech_featurizer = SpeechFeaturizer( - unit_type=unit_type, - vocab_filepath=vocab_filepath, - spm_model_prefix=spm_model_prefix, - specgram_type=specgram_type, - feat_dim=feat_dim, - delta_delta=delta_delta, - stride_ms=stride_ms, - window_ms=window_ms, - n_fft=n_fft, - max_freq=max_freq, - target_sample_rate=target_sample_rate, - use_dB_normalization=use_dB_normalization, - target_dB=target_dB, - dither=dither) - - self._rng = np.random.RandomState(random_seed) - self._keep_transcription_text = keep_transcription_text - # for caching tar files info - self._local_data = TarLocalData(tar2info={}, tar2object={}) # read manifest self._manifest = read_manifest( @@ -228,124 +103,47 @@ class ManifestDataset(Dataset): min_output_input_ratio=min_output_input_ratio) self._manifest.sort(key=lambda x: x["feat_shape"][0]) - @property - def manifest(self): - return self._manifest - - @property - def vocab_size(self): - return self._speech_featurizer.vocab_size - - @property - def vocab_list(self): - return self._speech_featurizer.vocab_list - - @property - def vocab_dict(self): - return self._speech_featurizer.vocab_dict - - @property - def text_feature(self): - return self._speech_featurizer.text_feature - - @property - def feature_size(self): - return self._speech_featurizer.feature_size - - @property - def stride_ms(self): - return self._speech_featurizer.stride_ms - - def _parse_tar(self, file): - """Parse a tar file to get a tarfile object - and a map containing tarinfoes - """ - result = {} - f = tarfile.open(file) - for tarinfo in f.getmembers(): - result[tarinfo.name] = tarinfo - return f, result - - def _subfile_from_tar(self, file): - """Get subfile object from tar. - - It will return a subfile object from tar file - and cached tar file info for next reading request. - """ - tarpath, filename = file.split(':', 1)[1].split('#', 1) - if 'tar2info' not in self._local_data.__dict__: - self._local_data.tar2info = {} - if 'tar2object' not in self._local_data.__dict__: - self._local_data.tar2object = {} - if tarpath not in self._local_data.tar2info: - object, infoes = self._parse_tar(tarpath) - self._local_data.tar2info[tarpath] = infoes - self._local_data.tar2object[tarpath] = object - return self._local_data.tar2object[tarpath].extractfile( - self._local_data.tar2info[tarpath][filename]) - - def process_utterance(self, audio_file, transcript): - """Load, augment, featurize and normalize for speech data. + def __len__(self): + return len(self._manifest) - :param audio_file: Filepath or file object of audio file. - :type audio_file: str | file - :param transcript: Transcription text. - :type transcript: str - :return: Tuple of audio feature tensor and data of transcription part, - where transcription part could be token ids or text. - :rtype: tuple of (2darray, list) - """ - start_time = time.time() - if isinstance(audio_file, str) and audio_file.startswith('tar:'): - speech_segment = SpeechSegment.from_file( - self._subfile_from_tar(audio_file), transcript) - else: - speech_segment = SpeechSegment.from_file(audio_file, transcript) - load_wav_time = time.time() - start_time - #logger.debug(f"load wav time: {load_wav_time}") + def __getitem__(self, idx): + instance = self._manifest[idx] + return instance["utt"], instance["feat"], instance["text"] - # audio augment - start_time = time.time() - self._augmentation_pipeline.transform_audio(speech_segment) - audio_aug_time = time.time() - start_time - #logger.debug(f"audio augmentation time: {audio_aug_time}") - start_time = time.time() - specgram, transcript_part = self._speech_featurizer.featurize( - speech_segment, self._keep_transcription_text) - if self._normalizer: - specgram = self._normalizer.apply(specgram) - feature_time = time.time() - start_time - #logger.debug(f"audio & test feature time: {feature_time}") +class TripletManifestDataset(ManifestDataset): + """ + For Joint Training of Speech Translation and ASR. + text: translation, + text1: transcript. + """ - # specgram augment - start_time = time.time() - specgram = self._augmentation_pipeline.transform_feature(specgram) - feature_aug_time = time.time() - start_time - #logger.debug(f"audio feature augmentation time: {feature_aug_time}") - return specgram, transcript_part + def __getitem__(self, idx): + instance = self._manifest[idx] + return instance["utt"], instance["feat"], instance["text"], instance[ + "text1"] - def _instance_reader_creator(self, manifest): - """ - Instance reader creator. Create a callable function to produce - instances of data. - Instance: a tuple of ndarray of audio spectrogram and a list of - token indices for transcript. - """ +class TransformDataset(Dataset): + """Transform Dataset. - def reader(): - for instance in manifest: - inst = self.process_utterance(instance["feat"], - instance["text"]) - yield inst + Args: + data: list object from make_batchset + converter: batch function + reader: read data + """ - return reader + def __init__(self, data, converter, reader): + """Init function.""" + super().__init__() + self.data = data + self.converter = converter + self.reader = reader def __len__(self): - return len(self._manifest) + """Len function.""" + return len(self.data) def __getitem__(self, idx): - instance = self._manifest[idx] - feat, text = self.process_utterance(instance["feat"], instance["text"]) - return instance["utt"], feat, text + """[] operator.""" + return self.converter([self.reader(self.data[idx], return_uttid=True)]) diff --git a/deepspeech/io/reader.py b/deepspeech/io/reader.py new file mode 100644 index 000000000..95cdbb951 --- /dev/null +++ b/deepspeech/io/reader.py @@ -0,0 +1,410 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections import OrderedDict + +import kaldiio +import numpy as np +import soundfile + +from deepspeech.frontend.augmentor.augmentation import AugmentationPipeline +from deepspeech.utils.log import Log + +__all__ = ["LoadInputsAndTargets"] + +logger = Log(__name__).getlog() + + +class LoadInputsAndTargets(): + """Create a mini-batch from a list of dicts + + >>> batch = [('utt1', + ... dict(input=[dict(feat='some.ark:123', + ... filetype='mat', + ... name='input1', + ... shape=[100, 80])], + ... output=[dict(tokenid='1 2 3 4', + ... name='target1', + ... shape=[4, 31])]])) + >>> l = LoadInputsAndTargets() + >>> feat, target = l(batch) + + :param: str mode: Specify the task mode, "asr" or "tts" + :param: str preprocess_conf: The path of a json file for pre-processing + :param: bool load_input: If False, not to load the input data + :param: bool load_output: If False, not to load the output data + :param: bool sort_in_input_length: Sort the mini-batch in descending order + of the input length + :param: bool use_speaker_embedding: Used for tts mode only + :param: bool use_second_target: Used for tts mode only + :param: dict preprocess_args: Set some optional arguments for preprocessing + :param: Optional[dict] preprocess_args: Used for tts mode only + """ + + def __init__( + self, + mode="asr", + preprocess_conf=None, + load_input=True, + load_output=True, + sort_in_input_length=True, + preprocess_args=None, + keep_all_data_on_mem=False, ): + self._loaders = {} + + if mode not in ["asr"]: + raise ValueError("Only asr are allowed: mode={}".format(mode)) + + if preprocess_conf is not None: + with open(preprocess_conf, 'r') as fin: + self.preprocessing = AugmentationPipeline(fin.read()) + logger.warning( + "[Experimental feature] Some preprocessing will be done " + "for the mini-batch creation using {}".format( + self.preprocessing)) + else: + # If conf doesn't exist, this function don't touch anything. + self.preprocessing = None + + self.mode = mode + self.load_output = load_output + self.load_input = load_input + self.sort_in_input_length = sort_in_input_length + if preprocess_args is None: + self.preprocess_args = {} + else: + assert isinstance(preprocess_args, dict), type(preprocess_args) + self.preprocess_args = dict(preprocess_args) + + self.keep_all_data_on_mem = keep_all_data_on_mem + + def __call__(self, batch, return_uttid=False): + """Function to load inputs and targets from list of dicts + + :param List[Tuple[str, dict]] batch: list of dict which is subset of + loaded data.json + :param bool return_uttid: return utterance ID information for visualization + :return: list of input token id sequences [(L_1), (L_2), ..., (L_B)] + :return: list of input feature sequences + [(T_1, D), (T_2, D), ..., (T_B, D)] + :rtype: list of float ndarray + :return: list of target token id sequences [(L_1), (L_2), ..., (L_B)] + :rtype: list of int ndarray + + """ + x_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]] + y_feats_dict = OrderedDict() # OrderedDict[str, List[np.ndarray]] + uttid_list = [] # List[str] + + for uttid, info in batch: + uttid_list.append(uttid) + + if self.load_input: + # Note(kamo): This for-loop is for multiple inputs + for idx, inp in enumerate(info["input"]): + # {"input": + # [{"feat": "some/path.h5:F01_050C0101_PED_REAL", + # "filetype": "hdf5", + # "name": "input1", ...}], ...} + x = self._get_from_loader( + filepath=inp["feat"], + filetype=inp.get("filetype", "mat")) + x_feats_dict.setdefault(inp["name"], []).append(x) + + if self.load_output: + for idx, inp in enumerate(info["output"]): + if "tokenid" in inp: + # ======= Legacy format for output ======= + # {"output": [{"tokenid": "1 2 3 4"}]) + x = np.fromiter( + map(int, inp["tokenid"].split()), dtype=np.int64) + else: + # ======= New format ======= + # {"input": + # [{"feat": "some/path.h5:F01_050C0101_PED_REAL", + # "filetype": "hdf5", + # "name": "target1", ...}], ...} + x = self._get_from_loader( + filepath=inp["feat"], + filetype=inp.get("filetype", "mat")) + + y_feats_dict.setdefault(inp["name"], []).append(x) + + if self.mode == "asr": + return_batch, uttid_list = self._create_batch_asr( + x_feats_dict, y_feats_dict, uttid_list) + else: + raise NotImplementedError(self.mode) + + if self.preprocessing is not None: + # Apply pre-processing all input features + for x_name in return_batch.keys(): + if x_name.startswith("input"): + return_batch[x_name] = self.preprocessing( + return_batch[x_name], uttid_list, + **self.preprocess_args) + + if return_uttid: + return tuple(return_batch.values()), uttid_list + + # Doesn't return the names now. + return tuple(return_batch.values()) + + def _create_batch_asr(self, x_feats_dict, y_feats_dict, uttid_list): + """Create a OrderedDict for the mini-batch + + :param OrderedDict x_feats_dict: + e.g. {"input1": [ndarray, ndarray, ...], + "input2": [ndarray, ndarray, ...]} + :param OrderedDict y_feats_dict: + e.g. {"target1": [ndarray, ndarray, ...], + "target2": [ndarray, ndarray, ...]} + :param: List[str] uttid_list: + Give uttid_list to sort in the same order as the mini-batch + :return: batch, uttid_list + :rtype: Tuple[OrderedDict, List[str]] + """ + # handle single-input and multi-input (paralell) asr mode + xs = list(x_feats_dict.values()) + + if self.load_output: + ys = list(y_feats_dict.values()) + assert len(xs[0]) == len(ys[0]), (len(xs[0]), len(ys[0])) + + # get index of non-zero length samples + nonzero_idx = list( + filter(lambda i: len(ys[0][i]) > 0, range(len(ys[0])))) + for n in range(1, len(y_feats_dict)): + nonzero_idx = filter(lambda i: len(ys[n][i]) > 0, nonzero_idx) + else: + # Note(kamo): Be careful not to make nonzero_idx to a generator + nonzero_idx = list(range(len(xs[0]))) + + if self.sort_in_input_length: + # sort in input lengths based on the first input + nonzero_sorted_idx = sorted( + nonzero_idx, key=lambda i: -len(xs[0][i])) + else: + nonzero_sorted_idx = nonzero_idx + + if len(nonzero_sorted_idx) != len(xs[0]): + logger.warning( + "Target sequences include empty tokenid (batch {} -> {}).". + format(len(xs[0]), len(nonzero_sorted_idx))) + + # remove zero-length samples + xs = [[x[i] for i in nonzero_sorted_idx] for x in xs] + uttid_list = [uttid_list[i] for i in nonzero_sorted_idx] + + x_names = list(x_feats_dict.keys()) + if self.load_output: + ys = [[y[i] for i in nonzero_sorted_idx] for y in ys] + y_names = list(y_feats_dict.keys()) + + # Keeping x_name and y_name, e.g. input1, for future extension + return_batch = OrderedDict([ + * [(x_name, x) for x_name, x in zip(x_names, xs)], + * [(y_name, y) for y_name, y in zip(y_names, ys)], + ]) + else: + return_batch = OrderedDict( + [(x_name, x) for x_name, x in zip(x_names, xs)]) + return return_batch, uttid_list + + def _get_from_loader(self, filepath, filetype): + """Return ndarray + + In order to make the fds to be opened only at the first referring, + the loader are stored in self._loaders + + >>> ndarray = loader.get_from_loader( + ... 'some/path.h5:F01_050C0101_PED_REAL', filetype='hdf5') + + :param: str filepath: + :param: str filetype: + :return: + :rtype: np.ndarray + """ + if filetype == "hdf5": + # e.g. + # {"input": [{"feat": "some/path.h5:F01_050C0101_PED_REAL", + # "filetype": "hdf5", + # -> filepath = "some/path.h5", key = "F01_050C0101_PED_REAL" + filepath, key = filepath.split(":", 1) + + loader = self._loaders.get(filepath) + if loader is None: + # To avoid disk access, create loader only for the first time + loader = h5py.File(filepath, "r") + self._loaders[filepath] = loader + return loader[key][()] + elif filetype == "sound.hdf5": + # e.g. + # {"input": [{"feat": "some/path.h5:F01_050C0101_PED_REAL", + # "filetype": "sound.hdf5", + # -> filepath = "some/path.h5", key = "F01_050C0101_PED_REAL" + filepath, key = filepath.split(":", 1) + + loader = self._loaders.get(filepath) + if loader is None: + # To avoid disk access, create loader only for the first time + loader = SoundHDF5File(filepath, "r", dtype="int16") + self._loaders[filepath] = loader + array, rate = loader[key] + return array + elif filetype == "sound": + # e.g. + # {"input": [{"feat": "some/path.wav", + # "filetype": "sound"}, + # Assume PCM16 + if not self.keep_all_data_on_mem: + array, _ = soundfile.read(filepath, dtype="int16") + return array + if filepath not in self._loaders: + array, _ = soundfile.read(filepath, dtype="int16") + self._loaders[filepath] = array + return self._loaders[filepath] + elif filetype == "npz": + # e.g. + # {"input": [{"feat": "some/path.npz:F01_050C0101_PED_REAL", + # "filetype": "npz", + filepath, key = filepath.split(":", 1) + + loader = self._loaders.get(filepath) + if loader is None: + # To avoid disk access, create loader only for the first time + loader = np.load(filepath) + self._loaders[filepath] = loader + return loader[key] + elif filetype == "npy": + # e.g. + # {"input": [{"feat": "some/path.npy", + # "filetype": "npy"}, + if not self.keep_all_data_on_mem: + return np.load(filepath) + if filepath not in self._loaders: + self._loaders[filepath] = np.load(filepath) + return self._loaders[filepath] + elif filetype in ["mat", "vec"]: + # e.g. + # {"input": [{"feat": "some/path.ark:123", + # "filetype": "mat"}]}, + # In this case, "123" indicates the starting points of the matrix + # load_mat can load both matrix and vector + if not self.keep_all_data_on_mem: + return kaldiio.load_mat(filepath) + if filepath not in self._loaders: + self._loaders[filepath] = kaldiio.load_mat(filepath) + return self._loaders[filepath] + elif filetype == "scp": + # e.g. + # {"input": [{"feat": "some/path.scp:F01_050C0101_PED_REAL", + # "filetype": "scp", + filepath, key = filepath.split(":", 1) + loader = self._loaders.get(filepath) + if loader is None: + # To avoid disk access, create loader only for the first time + loader = kaldiio.load_scp(filepath) + self._loaders[filepath] = loader + return loader[key] + else: + raise NotImplementedError( + "Not supported: loader_type={}".format(filetype)) + + +class SoundHDF5File(): + """Collecting sound files to a HDF5 file + + >>> f = SoundHDF5File('a.flac.h5', mode='a') + >>> array = np.random.randint(0, 100, 100, dtype=np.int16) + >>> f['id'] = (array, 16000) + >>> array, rate = f['id'] + + + :param: str filepath: + :param: str mode: + :param: str format: The type used when saving wav. flac, nist, htk, etc. + :param: str dtype: + + """ + + def __init__(self, + filepath, + mode="r+", + format=None, + dtype="int16", + **kwargs): + self.filepath = filepath + self.mode = mode + self.dtype = dtype + + self.file = h5py.File(filepath, mode, **kwargs) + if format is None: + # filepath = a.flac.h5 -> format = flac + second_ext = os.path.splitext(os.path.splitext(filepath)[0])[1] + format = second_ext[1:] + if format.upper() not in soundfile.available_formats(): + # If not found, flac is selected + format = "flac" + + # This format affects only saving + self.format = format + + def __repr__(self): + return ''.format( + self.filepath, self.mode, self.format, self.dtype) + + def create_dataset(self, name, shape=None, data=None, **kwds): + f = io.BytesIO() + array, rate = data + soundfile.write(f, array, rate, format=self.format) + self.file.create_dataset( + name, shape=shape, data=np.void(f.getvalue()), **kwds) + + def __setitem__(self, name, data): + self.create_dataset(name, data=data) + + def __getitem__(self, key): + data = self.file[key][()] + f = io.BytesIO(data.tobytes()) + array, rate = soundfile.read(f, dtype=self.dtype) + return array, rate + + def keys(self): + return self.file.keys() + + def values(self): + for k in self.file: + yield self[k] + + def items(self): + for k in self.file: + yield k, self[k] + + def __iter__(self): + return iter(self.file) + + def __contains__(self, item): + return item in self.file + + def __len__(self, item): + return len(self.file) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.file.close() + + def close(self): + self.file.close() diff --git a/deepspeech/io/utility.py b/deepspeech/io/utility.py index 0cd37428b..99487a0af 100644 --- a/deepspeech/io/utility.py +++ b/deepspeech/io/utility.py @@ -17,11 +17,16 @@ import numpy as np from deepspeech.utils.log import Log -__all__ = ["pad_sequence"] +__all__ = ["pad_list", "pad_sequence"] logger = Log(__name__).getlog() +def pad_list(sequences: List[np.ndarray], + padding_value: float=0.0) -> np.ndarray: + return pad_sequence(sequences, True, padding_value) + + def pad_sequence(sequences: List[np.ndarray], batch_first: bool=True, padding_value: float=0.0) -> np.ndarray: diff --git a/deepspeech/models/ds2/__init__.py b/deepspeech/models/ds2/__init__.py new file mode 100644 index 000000000..39bea5bf9 --- /dev/null +++ b/deepspeech/models/ds2/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .deepspeech2 import DeepSpeech2InferModel +from .deepspeech2 import DeepSpeech2Model + +__all__ = ['DeepSpeech2Model', 'DeepSpeech2InferModel'] diff --git a/deepspeech/models/ds2/conv.py b/deepspeech/models/ds2/conv.py new file mode 100644 index 000000000..9548af0a2 --- /dev/null +++ b/deepspeech/models/ds2/conv.py @@ -0,0 +1,163 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from paddle import nn +from paddle.nn import functional as F + +from deepspeech.modules.activation import brelu +from deepspeech.modules.mask import make_non_pad_mask +from deepspeech.utils.log import Log + +logger = Log(__name__).getlog() + +__all__ = ['ConvStack', "conv_output_size"] + + +def conv_output_size(I, F, P, S): + # https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-convolutional-neural-networks#hyperparameters + # Output size after Conv: + # By noting I the length of the input volume size, + # F the length of the filter, + # P the amount of zero padding, + # S the stride, + # then the output size O of the feature map along that dimension is given by: + # O = (I - F + Pstart + Pend) // S + 1 + # When Pstart == Pend == P, we can replace Pstart + Pend by 2P. + # When Pstart == Pend == 0 + # O = (I - F - S) // S + # https://iq.opengenus.org/output-size-of-convolution/ + # Output height = (Input height + padding height top + padding height bottom - kernel height) / (stride height) + 1 + # Output width = (Output width + padding width right + padding width left - kernel width) / (stride width) + 1 + return (I - F + 2 * P - S) // S + + +class ConvBn(nn.Layer): + """Convolution layer with batch normalization. + + :param kernel_size: The x dimension of a filter kernel. Or input a tuple for + two image dimension. + :type kernel_size: int|tuple|list + :param num_channels_in: Number of input channels. + :type num_channels_in: int + :param num_channels_out: Number of output channels. + :type num_channels_out: int + :param stride: The x dimension of the stride. Or input a tuple for two + image dimension. + :type stride: int|tuple|list + :param padding: The x dimension of the padding. Or input a tuple for two + image dimension. + :type padding: int|tuple|list + :param act: Activation type, relu|brelu + :type act: string + :return: Batch norm layer after convolution layer. + :rtype: Variable + + """ + + def __init__(self, num_channels_in, num_channels_out, kernel_size, stride, + padding, act): + + super().__init__() + assert len(kernel_size) == 2 + assert len(stride) == 2 + assert len(padding) == 2 + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + + self.conv = nn.Conv2D( + num_channels_in, + num_channels_out, + kernel_size=kernel_size, + stride=stride, + padding=padding, + weight_attr=None, + bias_attr=False, + data_format='NCHW') + + self.bn = nn.BatchNorm2D( + num_channels_out, + weight_attr=None, + bias_attr=None, + data_format='NCHW') + self.act = F.relu if act == 'relu' else brelu + + def forward(self, x, x_len): + """ + x(Tensor): audio, shape [B, C, D, T] + """ + x = self.conv(x) + x = self.bn(x) + x = self.act(x) + + x_len = (x_len - self.kernel_size[1] + 2 * self.padding[1] + ) // self.stride[1] + 1 + + # reset padding part to 0 + masks = make_non_pad_mask(x_len) #[B, T] + masks = masks.unsqueeze(1).unsqueeze(1) # [B, 1, 1, T] + # https://github.com/PaddlePaddle/Paddle/pull/29265 + # rhs will type promote to lhs + x = x * masks + return x, x_len + + +class ConvStack(nn.Layer): + """Convolution group with stacked convolution layers. + + :param feat_size: audio feature dim. + :type feat_size: int + :param num_stacks: Number of stacked convolution layers. + :type num_stacks: int + """ + + def __init__(self, feat_size, num_stacks): + super().__init__() + self.feat_size = feat_size # D + self.num_stacks = num_stacks + + self.conv_in = ConvBn( + num_channels_in=1, + num_channels_out=32, + kernel_size=(41, 11), #[D, T] + stride=(2, 3), + padding=(20, 5), + act='brelu') + + out_channel = 32 + convs = [ + ConvBn( + num_channels_in=32, + num_channels_out=out_channel, + kernel_size=(21, 11), + stride=(2, 1), + padding=(10, 5), + act='brelu') for i in range(num_stacks - 1) + ] + self.conv_stack = nn.LayerList(convs) + + # conv output feat_dim + output_height = (feat_size - 1) // 2 + 1 + for i in range(self.num_stacks - 1): + output_height = (output_height - 1) // 2 + 1 + self.output_height = out_channel * output_height + + def forward(self, x, x_len): + """ + x: shape [B, C, D, T] + x_len : shape [B] + """ + x, x_len = self.conv_in(x, x_len) + for i, conv in enumerate(self.conv_stack): + x, x_len = conv(x, x_len) + return x, x_len diff --git a/deepspeech/models/deepspeech2.py b/deepspeech/models/ds2/deepspeech2.py similarity index 77% rename from deepspeech/models/deepspeech2.py rename to deepspeech/models/ds2/deepspeech2.py index 0ff5514de..dda26358b 100644 --- a/deepspeech/models/deepspeech2.py +++ b/deepspeech/models/ds2/deepspeech2.py @@ -18,16 +18,16 @@ import paddle from paddle import nn from yacs.config import CfgNode -from deepspeech.modules.conv import ConvStack +from deepspeech.models.ds2.conv import ConvStack +from deepspeech.models.ds2.rnn import RNNStack from deepspeech.modules.ctc import CTCDecoder -from deepspeech.modules.rnn import RNNStack -from deepspeech.utils import checkpoint from deepspeech.utils import layer_tools +from deepspeech.utils.checkpoint import Checkpoint from deepspeech.utils.log import Log logger = Log(__name__).getlog() -__all__ = ['DeepSpeech2Model'] +__all__ = ['DeepSpeech2Model', 'DeepSpeech2InferModel'] class CRNNEncoder(nn.Layer): @@ -117,7 +117,7 @@ class DeepSpeech2Model(nn.Layer): :type share_weights: bool :return: A tuple of an output unnormalized log probability layer ( before softmax) and a ctc cost layer. - :rtype: tuple of LayerOutput + :rtype: tuple of LayerOutput """ @classmethod @@ -128,8 +128,8 @@ class DeepSpeech2Model(nn.Layer): num_rnn_layers=3, #Number of stacking RNN layers. rnn_layer_size=1024, #RNN layer size (number of RNN cells). use_gru=True, #Use gru if set True. Use simple rnn if set False. - share_rnn_weights=True #Whether to share input-hidden weights between forward and backward directional RNNs.Notice that for GRU, weight sharing is not supported. - )) + share_rnn_weights=True, #Whether to share input-hidden weights between forward and backward directional RNNs.Notice that for GRU, weight sharing is not supported. + ctc_grad_norm_type='instance', )) if config is not None: config.merge_from_other_cfg(default) return default @@ -141,7 +141,9 @@ class DeepSpeech2Model(nn.Layer): num_rnn_layers=3, rnn_size=1024, use_gru=False, - share_rnn_weights=True): + share_rnn_weights=True, + blank_id=0, + ctc_grad_norm_type='instance'): super().__init__() self.encoder = CRNNEncoder( feat_size=feat_size, @@ -156,10 +158,11 @@ class DeepSpeech2Model(nn.Layer): self.decoder = CTCDecoder( odim=dict_size, # is in vocab enc_n_units=self.encoder.output_size, - blank_id=0, # first token is + blank_id=blank_id, dropout_rate=0.0, reduction=True, # sum - batch_average=True) # sum / batch_size + batch_average=True, # sum / batch_size + grad_norm_type=ctc_grad_norm_type) def forward(self, audio, audio_len, text, text_len): """Compute Model loss @@ -198,36 +201,59 @@ class DeepSpeech2Model(nn.Layer): cutoff_top_n, num_processes) @classmethod - def from_pretrained(cls, dataset, config, checkpoint_path): + def from_pretrained(cls, dataloader, config, checkpoint_path): """Build a DeepSpeech2Model model from a pretrained model. Parameters ---------- - dataset: paddle.io.Dataset + dataloader: paddle.io.DataLoader config: yacs.config.CfgNode model configs - + checkpoint_path: Path or str the path of pretrained model checkpoint, without extension name - + Returns ------- DeepSpeech2Model The model built from pretrained result. """ - model = cls(feat_size=dataset.feature_size, - dict_size=dataset.vocab_size, + model = cls(feat_size=dataloader.collate_fn.feature_size, + dict_size=dataloader.collate_fn.vocab_size, num_conv_layers=config.model.num_conv_layers, num_rnn_layers=config.model.num_rnn_layers, rnn_size=config.model.rnn_layer_size, use_gru=config.model.use_gru, - share_rnn_weights=config.model.share_rnn_weights) - infos = checkpoint.load_parameters( + share_rnn_weights=config.model.share_rnn_weights, + blank_id=config.model.blank_id) + infos = Checkpoint().load_parameters( model, checkpoint_path=checkpoint_path) logger.info(f"checkpoint info: {infos}") layer_tools.summary(model) return model + @classmethod + def from_config(cls, config): + """Build a DeepSpeec2Model from config + Parameters + + config: yacs.config.CfgNode + config.model + Returns + ------- + DeepSpeech2Model + The model built from config. + """ + model = cls(feat_size=config.feat_size, + dict_size=config.dict_size, + num_conv_layers=config.num_conv_layers, + num_rnn_layers=config.num_rnn_layers, + rnn_size=config.rnn_layer_size, + use_gru=config.use_gru, + share_rnn_weights=config.share_rnn_weights, + blank_id=config.blank_id) + return model + class DeepSpeech2InferModel(DeepSpeech2Model): def __init__(self, @@ -237,7 +263,8 @@ class DeepSpeech2InferModel(DeepSpeech2Model): num_rnn_layers=3, rnn_size=1024, use_gru=False, - share_rnn_weights=True): + share_rnn_weights=True, + blank_id=0): super().__init__( feat_size=feat_size, dict_size=dict_size, @@ -245,7 +272,8 @@ class DeepSpeech2InferModel(DeepSpeech2Model): num_rnn_layers=num_rnn_layers, rnn_size=rnn_size, use_gru=use_gru, - share_rnn_weights=share_rnn_weights) + share_rnn_weights=share_rnn_weights, + blank_id=blank_id) def forward(self, audio, audio_len): """export model function @@ -259,4 +287,16 @@ class DeepSpeech2InferModel(DeepSpeech2Model): """ eouts, eouts_len = self.encoder(audio, audio_len) probs = self.decoder.softmax(eouts) - return probs + return probs, eouts_len + + def export(self): + static_model = paddle.jit.to_static( + self, + input_spec=[ + paddle.static.InputSpec( + shape=[None, None, self.encoder.feat_size], + dtype='float32'), # audio, [B,T,D] + paddle.static.InputSpec(shape=[None], + dtype='int64'), # audio_length, [B] + ]) + return static_model diff --git a/deepspeech/models/ds2/rnn.py b/deepspeech/models/ds2/rnn.py new file mode 100644 index 000000000..3fc52a378 --- /dev/null +++ b/deepspeech/models/ds2/rnn.py @@ -0,0 +1,315 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math + +import paddle +from paddle import nn +from paddle.nn import functional as F +from paddle.nn import initializer as I + +from deepspeech.modules.activation import brelu +from deepspeech.modules.mask import make_non_pad_mask +from deepspeech.utils.log import Log + +logger = Log(__name__).getlog() + +__all__ = ['RNNStack'] + + +class RNNCell(nn.RNNCellBase): + r""" + Elman RNN (SimpleRNN) cell. Given the inputs and previous states, it + computes the outputs and updates states. + The formula used is as follows: + .. math:: + h_{t} & = act(x_{t} + b_{ih} + W_{hh}h_{t-1} + b_{hh}) + y_{t} & = h_{t} + + where :math:`act` is for :attr:`activation`. + """ + + def __init__(self, + hidden_size: int, + activation="tanh", + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None): + super().__init__() + std = 1.0 / math.sqrt(hidden_size) + self.weight_hh = self.create_parameter( + (hidden_size, hidden_size), + weight_hh_attr, + default_initializer=I.Uniform(-std, std)) + self.bias_ih = None + self.bias_hh = self.create_parameter( + (hidden_size, ), + bias_hh_attr, + is_bias=True, + default_initializer=I.Uniform(-std, std)) + + self.hidden_size = hidden_size + if activation not in ["tanh", "relu", "brelu"]: + raise ValueError( + "activation for SimpleRNNCell should be tanh or relu, " + "but get {}".format(activation)) + self.activation = activation + self._activation_fn = paddle.tanh \ + if activation == "tanh" \ + else F.relu + if activation == 'brelu': + self._activation_fn = brelu + + def forward(self, inputs, states=None): + if states is None: + states = self.get_initial_states(inputs, self.state_shape) + pre_h = states + i2h = inputs + if self.bias_ih is not None: + i2h += self.bias_ih + h2h = paddle.matmul(pre_h, self.weight_hh, transpose_y=True) + if self.bias_hh is not None: + h2h += self.bias_hh + h = self._activation_fn(i2h + h2h) + return h, h + + @property + def state_shape(self): + return (self.hidden_size, ) + + +class GRUCell(nn.RNNCellBase): + r""" + Gated Recurrent Unit (GRU) RNN cell. Given the inputs and previous states, + it computes the outputs and updates states. + The formula for GRU used is as follows: + .. math:: + r_{t} & = \sigma(W_{ir}x_{t} + b_{ir} + W_{hr}h_{t-1} + b_{hr}) + z_{t} & = \sigma(W_{iz}x_{t} + b_{iz} + W_{hz}h_{t-1} + b_{hz}) + \widetilde{h}_{t} & = \tanh(W_{ic}x_{t} + b_{ic} + r_{t} * (W_{hc}h_{t-1} + b_{hc})) + h_{t} & = z_{t} * h_{t-1} + (1 - z_{t}) * \widetilde{h}_{t} + y_{t} & = h_{t} + + where :math:`\sigma` is the sigmoid fucntion, and * is the elemetwise + multiplication operator. + """ + + def __init__(self, + input_size: int, + hidden_size: int, + weight_ih_attr=None, + weight_hh_attr=None, + bias_ih_attr=None, + bias_hh_attr=None, + name=None): + super().__init__() + std = 1.0 / math.sqrt(hidden_size) + self.weight_hh = self.create_parameter( + (3 * hidden_size, hidden_size), + weight_hh_attr, + default_initializer=I.Uniform(-std, std)) + self.bias_ih = None + self.bias_hh = self.create_parameter( + (3 * hidden_size, ), + bias_hh_attr, + is_bias=True, + default_initializer=I.Uniform(-std, std)) + + self.hidden_size = hidden_size + self.input_size = input_size + self._gate_activation = F.sigmoid + self._activation = paddle.tanh + + def forward(self, inputs, states=None): + if states is None: + states = self.get_initial_states(inputs, self.state_shape) + + pre_hidden = states + x_gates = inputs + if self.bias_ih is not None: + x_gates = x_gates + self.bias_ih + h_gates = paddle.matmul(pre_hidden, self.weight_hh, transpose_y=True) + if self.bias_hh is not None: + h_gates = h_gates + self.bias_hh + + x_r, x_z, x_c = paddle.split(x_gates, num_or_sections=3, axis=1) + h_r, h_z, h_c = paddle.split(h_gates, num_or_sections=3, axis=1) + + r = self._gate_activation(x_r + h_r) + z = self._gate_activation(x_z + h_z) + c = self._activation(x_c + r * h_c) # apply reset gate after mm + h = (pre_hidden - c) * z + c + # https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/layers/dynamic_gru_cn.html#dynamic-gru + + return h, h + + @property + def state_shape(self): + r""" + The `state_shape` of GRUCell is a shape `[hidden_size]` (-1 for batch + size would be automatically inserted into shape). The shape corresponds + to the shape of :math:`h_{t-1}`. + """ + return (self.hidden_size, ) + + +class BiRNNWithBN(nn.Layer): + """Bidirectonal simple rnn layer with sequence-wise batch normalization. + The batch normalization is only performed on input-state weights. + + :param size: Dimension of RNN cells. + :type size: int + :param share_weights: Whether to share input-hidden weights between + forward and backward directional RNNs. + :type share_weights: bool + :return: Bidirectional simple rnn layer. + :rtype: Variable + """ + + def __init__(self, i_size: int, h_size: int, share_weights: bool): + super().__init__() + self.share_weights = share_weights + if self.share_weights: + #input-hidden weights shared between bi-directional rnn. + self.fw_fc = nn.Linear(i_size, h_size, bias_attr=False) + # batch norm is only performed on input-state projection + self.fw_bn = nn.BatchNorm1D( + h_size, bias_attr=None, data_format='NLC') + self.bw_fc = self.fw_fc + self.bw_bn = self.fw_bn + else: + self.fw_fc = nn.Linear(i_size, h_size, bias_attr=False) + self.fw_bn = nn.BatchNorm1D( + h_size, bias_attr=None, data_format='NLC') + self.bw_fc = nn.Linear(i_size, h_size, bias_attr=False) + self.bw_bn = nn.BatchNorm1D( + h_size, bias_attr=None, data_format='NLC') + + self.fw_cell = RNNCell(hidden_size=h_size, activation='brelu') + self.bw_cell = RNNCell(hidden_size=h_size, activation='brelu') + self.fw_rnn = nn.RNN( + self.fw_cell, is_reverse=False, time_major=False) #[B, T, D] + self.bw_rnn = nn.RNN( + self.bw_cell, is_reverse=True, time_major=False) #[B, T, D] + + def forward(self, x: paddle.Tensor, x_len: paddle.Tensor): + # x, shape [B, T, D] + fw_x = self.fw_bn(self.fw_fc(x)) + bw_x = self.bw_bn(self.bw_fc(x)) + fw_x, _ = self.fw_rnn(inputs=fw_x, sequence_length=x_len) + bw_x, _ = self.bw_rnn(inputs=bw_x, sequence_length=x_len) + x = paddle.concat([fw_x, bw_x], axis=-1) + return x, x_len + + +class BiGRUWithBN(nn.Layer): + """Bidirectonal gru layer with sequence-wise batch normalization. + The batch normalization is only performed on input-state weights. + + :param name: Name of the layer. + :type name: string + :param input: Input layer. + :type input: Variable + :param size: Dimension of GRU cells. + :type size: int + :param act: Activation type. + :type act: string + :return: Bidirectional GRU layer. + :rtype: Variable + """ + + def __init__(self, i_size: int, h_size: int): + super().__init__() + hidden_size = h_size * 3 + + self.fw_fc = nn.Linear(i_size, hidden_size, bias_attr=False) + self.fw_bn = nn.BatchNorm1D( + hidden_size, bias_attr=None, data_format='NLC') + self.bw_fc = nn.Linear(i_size, hidden_size, bias_attr=False) + self.bw_bn = nn.BatchNorm1D( + hidden_size, bias_attr=None, data_format='NLC') + + self.fw_cell = GRUCell(input_size=hidden_size, hidden_size=h_size) + self.bw_cell = GRUCell(input_size=hidden_size, hidden_size=h_size) + self.fw_rnn = nn.RNN( + self.fw_cell, is_reverse=False, time_major=False) #[B, T, D] + self.bw_rnn = nn.RNN( + self.bw_cell, is_reverse=True, time_major=False) #[B, T, D] + + def forward(self, x, x_len): + # x, shape [B, T, D] + fw_x = self.fw_bn(self.fw_fc(x)) + bw_x = self.bw_bn(self.bw_fc(x)) + fw_x, _ = self.fw_rnn(inputs=fw_x, sequence_length=x_len) + bw_x, _ = self.bw_rnn(inputs=bw_x, sequence_length=x_len) + x = paddle.concat([fw_x, bw_x], axis=-1) + return x, x_len + + +class RNNStack(nn.Layer): + """RNN group with stacked bidirectional simple RNN or GRU layers. + + :param input: Input layer. + :type input: Variable + :param size: Dimension of RNN cells in each layer. + :type size: int + :param num_stacks: Number of stacked rnn layers. + :type num_stacks: int + :param use_gru: Use gru if set True. Use simple rnn if set False. + :type use_gru: bool + :param share_rnn_weights: Whether to share input-hidden weights between + forward and backward directional RNNs. + It is only available when use_gru=False. + :type share_weights: bool + :return: Output layer of the RNN group. + :rtype: Variable + """ + + def __init__(self, + i_size: int, + h_size: int, + num_stacks: int, + use_gru: bool, + share_rnn_weights: bool): + super().__init__() + rnn_stacks = [] + for i in range(num_stacks): + if use_gru: + #default:GRU using tanh + rnn_stacks.append(BiGRUWithBN(i_size=i_size, h_size=h_size)) + else: + rnn_stacks.append( + BiRNNWithBN( + i_size=i_size, + h_size=h_size, + share_weights=share_rnn_weights)) + i_size = h_size * 2 + + self.rnn_stacks = nn.LayerList(rnn_stacks) + + def forward(self, x: paddle.Tensor, x_len: paddle.Tensor): + """ + x: shape [B, T, D] + x_len: shpae [B] + """ + for i, rnn in enumerate(self.rnn_stacks): + x, x_len = rnn(x, x_len) + masks = make_non_pad_mask(x_len) #[B, T] + masks = masks.unsqueeze(-1) # [B, T, 1] + # https://github.com/PaddlePaddle/Paddle/pull/29265 + # rhs will type promote to lhs + x = x * masks + + return x, x_len diff --git a/deepspeech/models/ds2_online/__init__.py b/deepspeech/models/ds2_online/__init__.py new file mode 100644 index 000000000..255000eeb --- /dev/null +++ b/deepspeech/models/ds2_online/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .deepspeech2 import DeepSpeech2InferModelOnline +from .deepspeech2 import DeepSpeech2ModelOnline + +__all__ = ['DeepSpeech2ModelOnline', 'DeepSpeech2InferModelOnline'] diff --git a/deepspeech/models/ds2_online/conv.py b/deepspeech/models/ds2_online/conv.py new file mode 100644 index 000000000..4a6fd5abd --- /dev/null +++ b/deepspeech/models/ds2_online/conv.py @@ -0,0 +1,33 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddle + +from deepspeech.modules.subsampling import Conv2dSubsampling4 + + +class Conv2dSubsampling4Online(Conv2dSubsampling4): + def __init__(self, idim: int, odim: int, dropout_rate: float): + super().__init__(idim, odim, dropout_rate, None) + self.output_dim = ((idim - 1) // 2 - 1) // 2 * odim + self.receptive_field_length = 2 * ( + 3 - 1) + 3 # stride_1 * (kernel_size_2 - 1) + kerel_size_1 + + def forward(self, x: paddle.Tensor, + x_len: paddle.Tensor) -> [paddle.Tensor, paddle.Tensor]: + x = x.unsqueeze(1) # (b, c=1, t, f) + x = self.conv(x) + #b, c, t, f = paddle.shape(x) #not work under jit + x = x.transpose([0, 2, 1, 3]).reshape([0, 0, -1]) + x_len = ((x_len - 1) // 2 - 1) // 2 + return x, x_len diff --git a/deepspeech/models/ds2_online/deepspeech2.py b/deepspeech/models/ds2_online/deepspeech2.py new file mode 100644 index 000000000..29d207c44 --- /dev/null +++ b/deepspeech/models/ds2_online/deepspeech2.py @@ -0,0 +1,438 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Deepspeech2 ASR Online Model""" +from typing import Optional + +import paddle +import paddle.nn.functional as F +from paddle import nn +from yacs.config import CfgNode + +from deepspeech.models.ds2_online.conv import Conv2dSubsampling4Online +from deepspeech.modules.ctc import CTCDecoder +from deepspeech.utils import layer_tools +from deepspeech.utils.checkpoint import Checkpoint +from deepspeech.utils.log import Log +logger = Log(__name__).getlog() + +__all__ = ['DeepSpeech2ModelOnline', 'DeepSpeech2InferModelOnline'] + + +class CRNNEncoder(nn.Layer): + def __init__(self, + feat_size, + dict_size, + num_conv_layers=2, + num_rnn_layers=4, + rnn_size=1024, + rnn_direction='forward', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False): + super().__init__() + self.rnn_size = rnn_size + self.feat_size = feat_size # 161 for linear + self.dict_size = dict_size + self.num_rnn_layers = num_rnn_layers + self.num_fc_layers = num_fc_layers + self.rnn_direction = rnn_direction + self.fc_layers_size_list = fc_layers_size_list + self.use_gru = use_gru + self.conv = Conv2dSubsampling4Online(feat_size, 32, dropout_rate=0.0) + + self.output_dim = self.conv.output_dim + + i_size = self.conv.output_dim + self.rnn = nn.LayerList() + self.layernorm_list = nn.LayerList() + self.fc_layers_list = nn.LayerList() + if rnn_direction == 'bidirect' or rnn_direction == 'bidirectional': + layernorm_size = 2 * rnn_size + elif rnn_direction == 'forward': + layernorm_size = rnn_size + else: + raise Exception("Wrong rnn direction") + for i in range(0, num_rnn_layers): + if i == 0: + rnn_input_size = i_size + else: + rnn_input_size = layernorm_size + if use_gru is True: + self.rnn.append( + nn.GRU( + input_size=rnn_input_size, + hidden_size=rnn_size, + num_layers=1, + direction=rnn_direction)) + else: + self.rnn.append( + nn.LSTM( + input_size=rnn_input_size, + hidden_size=rnn_size, + num_layers=1, + direction=rnn_direction)) + self.layernorm_list.append(nn.LayerNorm(layernorm_size)) + self.output_dim = layernorm_size + + fc_input_size = layernorm_size + for i in range(self.num_fc_layers): + self.fc_layers_list.append( + nn.Linear(fc_input_size, fc_layers_size_list[i])) + fc_input_size = fc_layers_size_list[i] + self.output_dim = fc_layers_size_list[i] + + @property + def output_size(self): + return self.output_dim + + def forward(self, x, x_lens, init_state_h_box=None, init_state_c_box=None): + """Compute Encoder outputs + + Args: + x (Tensor): [B, T, D] + x_lens (Tensor): [B] + init_state_h_box(Tensor): init_states h for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + init_state_c_box(Tensor): init_states c for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + Return: + x (Tensor): encoder outputs, [B, T, D] + x_lens (Tensor): encoder length, [B] + final_state_h_box(Tensor): final_states h for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + final_state_c_box(Tensor): final_states c for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + """ + if init_state_h_box is not None: + init_state_list = None + + if self.use_gru is True: + init_state_h_list = paddle.split( + init_state_h_box, self.num_rnn_layers, axis=0) + init_state_list = init_state_h_list + else: + init_state_h_list = paddle.split( + init_state_h_box, self.num_rnn_layers, axis=0) + init_state_c_list = paddle.split( + init_state_c_box, self.num_rnn_layers, axis=0) + init_state_list = [(init_state_h_list[i], init_state_c_list[i]) + for i in range(self.num_rnn_layers)] + else: + init_state_list = [None] * self.num_rnn_layers + + x, x_lens = self.conv(x, x_lens) + final_chunk_state_list = [] + for i in range(0, self.num_rnn_layers): + x, final_state = self.rnn[i](x, init_state_list[i], + x_lens) #[B, T, D] + final_chunk_state_list.append(final_state) + x = self.layernorm_list[i](x) + + for i in range(self.num_fc_layers): + x = self.fc_layers_list[i](x) + x = F.relu(x) + + if self.use_gru is True: + final_chunk_state_h_box = paddle.concat( + final_chunk_state_list, axis=0) + final_chunk_state_c_box = init_state_c_box + else: + final_chunk_state_h_list = [ + final_chunk_state_list[i][0] for i in range(self.num_rnn_layers) + ] + final_chunk_state_c_list = [ + final_chunk_state_list[i][1] for i in range(self.num_rnn_layers) + ] + final_chunk_state_h_box = paddle.concat( + final_chunk_state_h_list, axis=0) + final_chunk_state_c_box = paddle.concat( + final_chunk_state_c_list, axis=0) + + return x, x_lens, final_chunk_state_h_box, final_chunk_state_c_box + + def forward_chunk_by_chunk(self, x, x_lens, decoder_chunk_size=8): + """Compute Encoder outputs + + Args: + x (Tensor): [B, T, D] + x_lens (Tensor): [B] + decoder_chunk_size: The chunk size of decoder + Returns: + eouts_list (List of Tensor): The list of encoder outputs in chunk_size: [B, chunk_size, D] * num_chunks + eouts_lens_list (List of Tensor): The list of encoder length in chunk_size: [B] * num_chunks + final_state_h_box(Tensor): final_states h for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + final_state_c_box(Tensor): final_states c for RNN layers: [num_rnn_layers * num_directions, batch_size, hidden_size] + """ + subsampling_rate = self.conv.subsampling_rate + receptive_field_length = self.conv.receptive_field_length + chunk_size = (decoder_chunk_size - 1 + ) * subsampling_rate + receptive_field_length + chunk_stride = subsampling_rate * decoder_chunk_size + max_len = x.shape[1] + assert (chunk_size <= max_len) + + eouts_chunk_list = [] + eouts_chunk_lens_list = [] + if (max_len - chunk_size) % chunk_stride != 0: + padding_len = chunk_stride - (max_len - chunk_size) % chunk_stride + else: + padding_len = 0 + padding = paddle.zeros((x.shape[0], padding_len, x.shape[2])) + padded_x = paddle.concat([x, padding], axis=1) + num_chunk = (max_len + padding_len - chunk_size) / chunk_stride + 1 + num_chunk = int(num_chunk) + chunk_state_h_box = None + chunk_state_c_box = None + final_state_h_box = None + final_state_c_box = None + for i in range(0, num_chunk): + start = i * chunk_stride + end = start + chunk_size + x_chunk = padded_x[:, start:end, :] + + x_len_left = paddle.where(x_lens - i * chunk_stride < 0, + paddle.zeros_like(x_lens), + x_lens - i * chunk_stride) + x_chunk_len_tmp = paddle.ones_like(x_lens) * chunk_size + x_chunk_lens = paddle.where(x_len_left < x_chunk_len_tmp, + x_len_left, x_chunk_len_tmp) + + eouts_chunk, eouts_chunk_lens, chunk_state_h_box, chunk_state_c_box = self.forward( + x_chunk, x_chunk_lens, chunk_state_h_box, chunk_state_c_box) + + eouts_chunk_list.append(eouts_chunk) + eouts_chunk_lens_list.append(eouts_chunk_lens) + final_state_h_box = chunk_state_h_box + final_state_c_box = chunk_state_c_box + return eouts_chunk_list, eouts_chunk_lens_list, final_state_h_box, final_state_c_box + + +class DeepSpeech2ModelOnline(nn.Layer): + """The DeepSpeech2 network structure for online. + + :param audio: Audio spectrogram data layer. + :type audio: Variable + :param text: Transcription text data layer. + :type text: Variable + :param audio_len: Valid sequence length data layer. + :type audio_len: Variable + :param feat_size: feature size for audio. + :type feat_size: int + :param dict_size: Dictionary size for tokenized transcription. + :type dict_size: int + :param num_conv_layers: Number of stacking convolution layers. + :type num_conv_layers: int + :param num_rnn_layers: Number of stacking RNN layers. + :type num_rnn_layers: int + :param rnn_size: RNN layer size (dimension of RNN cells). + :type rnn_size: int + :param num_fc_layers: Number of stacking FC layers. + :type num_fc_layers: int + :param fc_layers_size_list: The list of FC layer sizes. + :type fc_layers_size_list: [int,] + :param use_gru: Use gru if set True. Use simple rnn if set False. + :type use_gru: bool + :return: A tuple of an output unnormalized log probability layer ( + before softmax) and a ctc cost layer. + :rtype: tuple of LayerOutput + """ + + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + default = CfgNode( + dict( + num_conv_layers=2, #Number of stacking convolution layers. + num_rnn_layers=4, #Number of stacking RNN layers. + rnn_layer_size=1024, #RNN layer size (number of RNN cells). + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=True, #Use gru if set True. Use simple rnn if set False. + blank_id=0, # index of blank in vocob.txt + )) + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, + feat_size, + dict_size, + num_conv_layers=2, + num_rnn_layers=4, + rnn_size=1024, + rnn_direction='forward', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False, + blank_id=0): + super().__init__() + self.encoder = CRNNEncoder( + feat_size=feat_size, + dict_size=dict_size, + num_conv_layers=num_conv_layers, + num_rnn_layers=num_rnn_layers, + rnn_direction=rnn_direction, + num_fc_layers=num_fc_layers, + fc_layers_size_list=fc_layers_size_list, + rnn_size=rnn_size, + use_gru=use_gru) + + self.decoder = CTCDecoder( + odim=dict_size, # is in vocab + enc_n_units=self.encoder.output_size, + blank_id=blank_id, + dropout_rate=0.0, + reduction=True, # sum + batch_average=True, # sum / batch_size + grad_norm_type='instance') + + def forward(self, audio, audio_len, text, text_len): + """Compute Model loss + + Args: + audio (Tenosr): [B, T, D] + audio_len (Tensor): [B] + text (Tensor): [B, U] + text_len (Tensor): [B] + + Returns: + loss (Tenosr): [1] + """ + eouts, eouts_len, final_state_h_box, final_state_c_box = self.encoder( + audio, audio_len, None, None) + loss = self.decoder(eouts, eouts_len, text, text_len) + return loss + + @paddle.no_grad() + def decode(self, audio, audio_len, vocab_list, decoding_method, + lang_model_path, beam_alpha, beam_beta, beam_size, cutoff_prob, + cutoff_top_n, num_processes): + # init once + # decoders only accept string encoded in utf-8 + self.decoder.init_decode( + beam_alpha=beam_alpha, + beam_beta=beam_beta, + lang_model_path=lang_model_path, + vocab_list=vocab_list, + decoding_method=decoding_method) + + eouts, eouts_len, final_state_h_box, final_state_c_box = self.encoder( + audio, audio_len, None, None) + probs = self.decoder.softmax(eouts) + return self.decoder.decode_probs( + probs.numpy(), eouts_len, vocab_list, decoding_method, + lang_model_path, beam_alpha, beam_beta, beam_size, cutoff_prob, + cutoff_top_n, num_processes) + + @classmethod + def from_pretrained(cls, dataloader, config, checkpoint_path): + """Build a DeepSpeech2Model model from a pretrained model. + Parameters + ---------- + dataloader: paddle.io.DataLoader + + config: yacs.config.CfgNode + model configs + + checkpoint_path: Path or str + the path of pretrained model checkpoint, without extension name + + Returns + ------- + DeepSpeech2ModelOnline + The model built from pretrained result. + """ + model = cls(feat_size=dataloader.collate_fn.feature_size, + dict_size=dataloader.collate_fn.vocab_size, + num_conv_layers=config.model.num_conv_layers, + num_rnn_layers=config.model.num_rnn_layers, + rnn_size=config.model.rnn_layer_size, + rnn_direction=config.model.rnn_direction, + num_fc_layers=config.model.num_fc_layers, + fc_layers_size_list=config.model.fc_layers_size_list, + use_gru=config.model.use_gru, + blank_id=config.model.blank_id) + infos = Checkpoint().load_parameters( + model, checkpoint_path=checkpoint_path) + logger.info(f"checkpoint info: {infos}") + layer_tools.summary(model) + return model + + @classmethod + def from_config(cls, config): + """Build a DeepSpeec2ModelOnline from config + Parameters + + config: yacs.config.CfgNode + config.model + Returns + ------- + DeepSpeech2ModelOnline + The model built from config. + """ + model = cls(feat_size=config.feat_size, + dict_size=config.dict_size, + num_conv_layers=config.num_conv_layers, + num_rnn_layers=config.num_rnn_layers, + rnn_size=config.rnn_layer_size, + rnn_direction=config.rnn_direction, + num_fc_layers=config.num_fc_layers, + fc_layers_size_list=config.fc_layers_size_list, + use_gru=config.use_gru, + blank_id=config.blank_id) + return model + + +class DeepSpeech2InferModelOnline(DeepSpeech2ModelOnline): + def __init__(self, + feat_size, + dict_size, + num_conv_layers=2, + num_rnn_layers=4, + rnn_size=1024, + rnn_direction='forward', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False, + blank_id=0): + super().__init__( + feat_size=feat_size, + dict_size=dict_size, + num_conv_layers=num_conv_layers, + num_rnn_layers=num_rnn_layers, + rnn_size=rnn_size, + rnn_direction=rnn_direction, + num_fc_layers=num_fc_layers, + fc_layers_size_list=fc_layers_size_list, + use_gru=use_gru, + blank_id=blank_id) + + def forward(self, audio_chunk, audio_chunk_lens, chunk_state_h_box, + chunk_state_c_box): + eouts_chunk, eouts_chunk_lens, final_state_h_box, final_state_c_box = self.encoder( + audio_chunk, audio_chunk_lens, chunk_state_h_box, chunk_state_c_box) + probs_chunk = self.decoder.softmax(eouts_chunk) + return probs_chunk, eouts_chunk_lens, final_state_h_box, final_state_c_box + + def export(self): + static_model = paddle.jit.to_static( + self, + input_spec=[ + paddle.static.InputSpec( + shape=[None, None, + self.encoder.feat_size], #[B, chunk_size, feat_dim] + dtype='float32'), + paddle.static.InputSpec(shape=[None], + dtype='int64'), # audio_length, [B] + paddle.static.InputSpec( + shape=[None, None, None], dtype='float32'), + paddle.static.InputSpec( + shape=[None, None, None], dtype='float32') + ]) + return static_model diff --git a/deepspeech/models/u2/__init__.py b/deepspeech/models/u2/__init__.py new file mode 100644 index 000000000..a9010f1d0 --- /dev/null +++ b/deepspeech/models/u2/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .u2 import U2InferModel +from .u2 import U2Model +from .updater import U2Evaluator +from .updater import U2Updater + +__all__ = ["U2Model", "U2InferModel", "U2Evaluator", "U2Updater"] diff --git a/deepspeech/models/u2.py b/deepspeech/models/u2/u2.py similarity index 94% rename from deepspeech/models/u2.py rename to deepspeech/models/u2/u2.py index 238e2d35c..39ed9d5d1 100644 --- a/deepspeech/models/u2.py +++ b/deepspeech/models/u2/u2.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """U2 ASR Model -Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recognition +Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recognition (https://arxiv.org/pdf/2012.05481.pdf) """ import sys @@ -48,13 +48,14 @@ from deepspeech.utils.tensor_utils import add_sos_eos from deepspeech.utils.tensor_utils import pad_sequence from deepspeech.utils.tensor_utils import th_accuracy from deepspeech.utils.utility import log_add +from deepspeech.utils.utility import UpdateConfig __all__ = ["U2Model", "U2InferModel"] logger = Log(__name__).getlog() -class U2BaseModel(nn.Module): +class U2BaseModel(nn.Layer): """CTC-Attention hybrid Encoder-Decoder model""" @classmethod @@ -83,7 +84,7 @@ class U2BaseModel(nn.Module): # cnn_module_kernel=15, # activation_type='swish', # pos_enc_layer_type='rel_pos', - # selfattention_layer_type='rel_selfattn', + # selfattention_layer_type='rel_selfattn', )) # decoder related default.decoder = 'transformer' @@ -115,7 +116,8 @@ class U2BaseModel(nn.Module): ctc_weight: float=0.5, ignore_id: int=IGNORE_ID, lsm_weight: float=0.0, - length_normalized_loss: bool=False): + length_normalized_loss: bool=False, + **kwargs): assert 0.0 <= ctc_weight <= 1.0, ctc_weight super().__init__() @@ -162,10 +164,7 @@ class U2BaseModel(nn.Module): encoder_out, encoder_mask = self.encoder(speech, speech_lengths) encoder_time = time.time() - start #logger.debug(f"encoder time: {encoder_time}") - #TODO(Hui Zhang): sum not support bool type - #encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B] - encoder_out_lens = encoder_mask.squeeze(1).cast(paddle.int64).sum( - 1) #[B, 1, T] -> [B] + encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B] # 2a. Attention-decoder branch loss_att = None @@ -244,8 +243,8 @@ class U2BaseModel(nn.Module): simulate_streaming (bool, optional): streaming or not. Defaults to False. Returns: - Tuple[paddle.Tensor, paddle.Tensor]: - encoder hiddens (B, Tmax, D), + Tuple[paddle.Tensor, paddle.Tensor]: + encoder hiddens (B, Tmax, D), encoder hiddens mask (B, 1, Tmax). """ # Let's assume B = batch_size @@ -320,8 +319,7 @@ class U2BaseModel(nn.Module): # 2. Decoder forward step by step for i in range(1, maxlen + 1): # Stop if all batch and all beam produce eos - # TODO(Hui Zhang): if end_flag.sum() == running_size: - if end_flag.cast(paddle.int64).sum() == running_size: + if end_flag.sum() == running_size: break # 2.1 Forward decoder step @@ -399,6 +397,7 @@ class U2BaseModel(nn.Module): assert speech.shape[0] == speech_lengths.shape[0] assert decoding_chunk_size != 0 batch_size = speech.shape[0] + # Let's assume B = batch_size # encoder_out: (B, maxlen, encoder_dim) # encoder_mask: (B, 1, Tmax) @@ -406,14 +405,14 @@ class U2BaseModel(nn.Module): speech, speech_lengths, decoding_chunk_size, num_decoding_left_chunks, simulate_streaming) maxlen = encoder_out.size(1) - # (TODO Hui Zhang): bool no support reduce_sum - # encoder_out_lens = encoder_mask.squeeze(1).sum(1) - encoder_out_lens = encoder_mask.squeeze(1).astype(paddle.int).sum(1) + encoder_out_lens = encoder_mask.squeeze(1).sum(1) ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size) + topk_prob, topk_index = ctc_probs.topk(1, axis=2) # (B, maxlen, 1) topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen) pad_mask = make_pad_mask(encoder_out_lens) # (B, maxlen) topk_index = topk_index.masked_fill_(pad_mask, self.eos) # (B, maxlen) + hyps = [hyp.tolist() for hyp in topk_index] hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps] return hyps @@ -449,6 +448,7 @@ class U2BaseModel(nn.Module): batch_size = speech.shape[0] # For CTC prefix beam search, we only support batch_size=1 assert batch_size == 1 + # Let's assume B = batch_size and N = beam_size # 1. Encoder forward and get CTC score encoder_out, encoder_mask = self._forward_encoder( @@ -458,7 +458,9 @@ class U2BaseModel(nn.Module): maxlen = encoder_out.size(1) ctc_probs = self.ctc.log_softmax(encoder_out) # (1, maxlen, vocab_size) ctc_probs = ctc_probs.squeeze(0) + # cur_hyps: (prefix, (blank_ending_score, none_blank_ending_score)) + # blank_ending_score and none_blank_ending_score in ln domain cur_hyps = [(tuple(), (0.0, -float('inf')))] # 2. CTC beam search step by step for t in range(0, maxlen): @@ -498,6 +500,7 @@ class U2BaseModel(nn.Module): key=lambda x: log_add(list(x[1])), reverse=True) cur_hyps = next_hyps[:beam_size] + hyps = [(y[0], log_add([y[1][0], y[1][1]])) for y in cur_hyps] return hyps, encoder_out @@ -561,12 +564,13 @@ class U2BaseModel(nn.Module): batch_size = speech.shape[0] # For attention rescoring we only support batch_size=1 assert batch_size == 1 - # encoder_out: (1, maxlen, encoder_dim), len(hyps) = beam_size + + # len(hyps) = beam_size, encoder_out: (1, maxlen, encoder_dim) hyps, encoder_out = self._ctc_prefix_beam_search( speech, speech_lengths, beam_size, decoding_chunk_size, num_decoding_left_chunks, simulate_streaming) - assert len(hyps) == beam_size + hyps_pad = pad_sequence([ paddle.to_tensor(hyp[0], place=device, dtype=paddle.long) for hyp in hyps @@ -576,55 +580,60 @@ class U2BaseModel(nn.Module): dtype=paddle.long) # (beam_size,) hyps_pad, _ = add_sos_eos(hyps_pad, self.sos, self.eos, self.ignore_id) hyps_lens = hyps_lens + 1 # Add at begining + encoder_out = encoder_out.repeat(beam_size, 1, 1) encoder_mask = paddle.ones( (beam_size, 1, encoder_out.size(1)), dtype=paddle.bool) decoder_out, _ = self.decoder( encoder_out, encoder_mask, hyps_pad, hyps_lens) # (beam_size, max_hyps_len, vocab_size) + # ctc score in ln domain decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1) decoder_out = decoder_out.numpy() + # Only use decoder score for rescoring best_score = -float('inf') best_index = 0 + # hyps is List[(Text=List[int], Score=float)], len(hyps)=beam_size for i, hyp in enumerate(hyps): score = 0.0 for j, w in enumerate(hyp[0]): score += decoder_out[i][j][w] + # last decoder output token is `eos`, for laste decoder input token. score += decoder_out[i][len(hyp[0])][self.eos] - # add ctc score + # add ctc score (which in ln domain) score += hyp[1] * ctc_weight if score > best_score: best_score = score best_index = i return hyps[best_index][0] - @jit.export + #@jit.to_static def subsampling_rate(self) -> int: """ Export interface for c++ call, return subsampling_rate of the model """ return self.encoder.embed.subsampling_rate - @jit.export + #@jit.to_static def right_context(self) -> int: """ Export interface for c++ call, return right_context of the model """ return self.encoder.embed.right_context - @jit.export + #@jit.to_static def sos_symbol(self) -> int: """ Export interface for c++ call, return sos symbol id of the model """ return self.sos - @jit.export + #@jit.to_static def eos_symbol(self) -> int: """ Export interface for c++ call, return eos symbol id of the model """ return self.eos - @jit.export + @jit.to_static def forward_encoder_chunk( self, xs: paddle.Tensor, @@ -654,18 +663,18 @@ class U2BaseModel(nn.Module): xs, offset, required_cache_size, subsampling_cache, elayers_output_cache, conformer_cnn_cache) - @jit.export + # @jit.to_static def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor: """ Export interface for c++ call, apply linear transform and log softmax before ctc Args: - xs (paddle.Tensor): encoder output + xs (paddle.Tensor): encoder output, (B, T, D) Returns: paddle.Tensor: activation before ctc """ return self.ctc.log_softmax(xs) - @jit.export + @jit.to_static def forward_attention_decoder( self, hyps: paddle.Tensor, @@ -717,8 +726,8 @@ class U2BaseModel(nn.Module): feats (Tenosr): audio features, (B, T, D) feats_lengths (Tenosr): (B) text_feature (TextFeaturizer): text feature object. - decoding_method (str): decoding mode, e.g. - 'attention', 'ctc_greedy_search', + decoding_method (str): decoding mode, e.g. + 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' lang_model_path (str): lm path. beam_alpha (float): lm weight. @@ -726,19 +735,19 @@ class U2BaseModel(nn.Module): beam_size (int): beam size for search cutoff_prob (float): for prune. cutoff_top_n (int): for prune. - num_processes (int): + num_processes (int): ctc_weight (float, optional): ctc weight for attention rescoring decode mode. Defaults to 0.0. decoding_chunk_size (int, optional): decoding chunk size. Defaults to -1. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. - 0: used for training, it's prohibited here. - num_decoding_left_chunks (int, optional): + 0: used for training, it's prohibited here. + num_decoding_left_chunks (int, optional): number of left chunks for decoding. Defaults to -1. simulate_streaming (bool, optional): simulate streaming inference. Defaults to False. Raises: ValueError: when not support decoding_method. - + Returns: List[List[int]]: transcripts. """ @@ -819,8 +828,9 @@ class U2Model(U2BaseModel): ValueError: raise when using not support encoder type. Returns: - int, nn.Layer, nn.Layer, nn.Layer: vocab size, encoder, decoder, ctc + int, nn.Layer, nn.Layer, nn.Layer: vocab size, encoder, decoder, ctc """ + # cmvn if configs['cmvn_file'] is not None: mean, istd = load_cmvn(configs['cmvn_file'], configs['cmvn_file_type']) @@ -830,11 +840,13 @@ class U2Model(U2BaseModel): else: global_cmvn = None + # input & output dim input_dim = configs['input_dim'] vocab_size = configs['output_dim'] assert input_dim != 0, input_dim assert vocab_size != 0, vocab_size + # encoder encoder_type = configs.get('encoder', 'transformer') logger.info(f"U2 Encoder type: {encoder_type}") if encoder_type == 'transformer': @@ -846,16 +858,21 @@ class U2Model(U2BaseModel): else: raise ValueError(f"not support encoder type:{encoder_type}") + # decoder decoder = TransformerDecoder(vocab_size, encoder.output_size(), **configs['decoder_conf']) + + # ctc decoder and ctc loss + model_conf = configs['model_conf'] ctc = CTCDecoder( odim=vocab_size, enc_n_units=encoder.output_size(), blank_id=0, - dropout_rate=0.0, + dropout_rate=model_conf['ctc_dropoutrate'], reduction=True, # sum - batch_average=True) # sum / batch_size + batch_average=True, # sum / batch_size + grad_norm_type=model_conf['ctc_grad_norm_type']) return vocab_size, encoder, decoder, ctc @@ -876,25 +893,25 @@ class U2Model(U2BaseModel): return model @classmethod - def from_pretrained(cls, dataset, config, checkpoint_path): + def from_pretrained(cls, dataloader, config, checkpoint_path): """Build a DeepSpeech2Model model from a pretrained model. Args: - dataset (paddle.io.Dataset): not used. + dataloader (paddle.io.DataLoader): not used. config (yacs.config.CfgNode): model configs checkpoint_path (Path or str): the path of pretrained model checkpoint, without extension name Returns: DeepSpeech2Model: The model built from pretrained result. """ - config.defrost() - config.input_dim = dataset.feature_size - config.output_dim = dataset.vocab_size - config.freeze() + with UpdateConfig(config): + config.input_dim = dataloader.collate_fn.feature_size + config.output_dim = dataloader.collate_fn.vocab_size + model = cls.from_config(config) if checkpoint_path: - infos = checkpoint.load_parameters( + infos = checkpoint.Checkpoint().load_parameters( model, checkpoint_path=checkpoint_path) logger.info(f"checkpoint info: {infos}") layer_tools.summary(model) diff --git a/deepspeech/models/u2/updater.py b/deepspeech/models/u2/updater.py new file mode 100644 index 000000000..7b70ca047 --- /dev/null +++ b/deepspeech/models/u2/updater.py @@ -0,0 +1,149 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from contextlib import nullcontext + +import paddle +from paddle import distributed as dist + +from deepspeech.training.extensions.evaluator import StandardEvaluator +from deepspeech.training.reporter import report +from deepspeech.training.timer import Timer +from deepspeech.training.updaters.standard_updater import StandardUpdater +from deepspeech.utils import layer_tools +from deepspeech.utils.log import Log + +logger = Log(__name__).getlog() + + +class U2Evaluator(StandardEvaluator): + def __init__(self, model, dataloader): + super().__init__(model, dataloader) + self.msg = "" + self.num_seen_utts = 0 + self.total_loss = 0.0 + + def evaluate_core(self, batch): + self.msg = "Valid: Rank: {}, ".format(dist.get_rank()) + losses_dict = {} + + loss, attention_loss, ctc_loss = self.model(*batch[1:]) + if paddle.isfinite(loss): + num_utts = batch[1].shape[0] + self.num_seen_utts += num_utts + self.total_loss += float(loss) * num_utts + + losses_dict['loss'] = float(loss) + if attention_loss: + losses_dict['att_loss'] = float(attention_loss) + if ctc_loss: + losses_dict['ctc_loss'] = float(ctc_loss) + + for k, v in losses_dict.items(): + report("eval/" + k, v) + + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + logger.info(self.msg) + return self.total_loss, self.num_seen_utts + + +class U2Updater(StandardUpdater): + def __init__(self, + model, + optimizer, + scheduler, + dataloader, + init_state=None, + accum_grad=1, + **kwargs): + super().__init__( + model, optimizer, scheduler, dataloader, init_state=init_state) + self.accum_grad = accum_grad + self.forward_count = 0 + self.msg = "" + + def update_core(self, batch): + """One Step + + Args: + batch (List[Object]): utts, xs, xlens, ys, ylens + """ + losses_dict = {} + self.msg = "Rank: {}, ".format(dist.get_rank()) + + # forward + batch_size = batch[1].shape[0] + loss, attention_loss, ctc_loss = self.model(*batch[1:]) + # loss div by `batch_size * accum_grad` + loss /= self.accum_grad + + # loss backward + if (self.forward_count + 1) != self.accum_grad: + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + context = self.model.no_sync + else: + # Used for single gpu training and DDP gradient synchronization + # processes. + context = nullcontext + + with context(): + loss.backward() + layer_tools.print_grads(self.model, print_func=None) + + # loss info + losses_dict['loss'] = float(loss) * self.accum_grad + if attention_loss: + losses_dict['att_loss'] = float(attention_loss) + if ctc_loss: + losses_dict['ctc_loss'] = float(ctc_loss) + # report loss + for k, v in losses_dict.items(): + report("train/" + k, v) + # loss msg + self.msg += "batch size: {}, ".format(batch_size) + self.msg += "accum: {}, ".format(self.accum_grad) + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + + # Truncate the graph + loss.detach() + + # update parameters + self.forward_count += 1 + if self.forward_count != self.accum_grad: + return + self.forward_count = 0 + + self.optimizer.step() + self.optimizer.clear_grad() + self.scheduler.step() + + def update(self): + # model is default in train mode + + # training for a step is implemented here + with Timer("data time cost:{}"): + batch = self.read_batch() + with Timer("step time cost:{}"): + self.update_core(batch) + + # #iterations with accum_grad > 1 + # Ref.: https://github.com/espnet/espnet/issues/777 + if self.forward_count == 0: + self.state.iteration += 1 + if self.updates_per_epoch is not None: + if self.state.iteration % self.updates_per_epoch == 0: + self.state.epoch += 1 diff --git a/deepspeech/models/u2_st.py b/deepspeech/models/u2_st.py new file mode 100644 index 000000000..87ca68b29 --- /dev/null +++ b/deepspeech/models/u2_st.py @@ -0,0 +1,728 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""U2 ASR Model +Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recognition +(https://arxiv.org/pdf/2012.05481.pdf) +""" +import time +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple + +import paddle +from paddle import jit +from paddle import nn +from yacs.config import CfgNode + +from deepspeech.frontend.utility import IGNORE_ID +from deepspeech.frontend.utility import load_cmvn +from deepspeech.modules.cmvn import GlobalCMVN +from deepspeech.modules.ctc import CTCDecoder +from deepspeech.modules.decoder import TransformerDecoder +from deepspeech.modules.encoder import ConformerEncoder +from deepspeech.modules.encoder import TransformerEncoder +from deepspeech.modules.loss import LabelSmoothingLoss +from deepspeech.modules.mask import mask_finished_preds +from deepspeech.modules.mask import mask_finished_scores +from deepspeech.modules.mask import subsequent_mask +from deepspeech.utils import checkpoint +from deepspeech.utils import layer_tools +from deepspeech.utils.log import Log +from deepspeech.utils.tensor_utils import add_sos_eos +from deepspeech.utils.tensor_utils import th_accuracy +from deepspeech.utils.utility import UpdateConfig + +__all__ = ["U2STModel", "U2STInferModel"] + +logger = Log(__name__).getlog() + + +class U2STBaseModel(nn.Layer): + """CTC-Attention hybrid Encoder-Decoder model""" + + @classmethod + def params(cls, config: Optional[CfgNode]=None) -> CfgNode: + # network architecture + default = CfgNode() + # allow add new item when merge_with_file + default.cmvn_file = "" + default.cmvn_file_type = "json" + default.input_dim = 0 + default.output_dim = 0 + # encoder related + default.encoder = 'transformer' + default.encoder_conf = CfgNode( + dict( + output_size=256, # dimension of attention + attention_heads=4, + linear_units=2048, # the number of units of position-wise feed forward + num_blocks=12, # the number of encoder blocks + dropout_rate=0.1, + positional_dropout_rate=0.1, + attention_dropout_rate=0.0, + input_layer='conv2d', # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before=True, + # use_cnn_module=True, + # cnn_module_kernel=15, + # activation_type='swish', + # pos_enc_layer_type='rel_pos', + # selfattention_layer_type='rel_selfattn', + )) + # decoder related + default.decoder = 'transformer' + default.decoder_conf = CfgNode( + dict( + attention_heads=4, + linear_units=2048, + num_blocks=6, + dropout_rate=0.1, + positional_dropout_rate=0.1, + self_attention_dropout_rate=0.0, + src_attention_dropout_rate=0.0, )) + # hybrid CTC/attention + default.model_conf = CfgNode( + dict( + asr_weight=0.0, + ctc_weight=0.0, + lsm_weight=0.1, # label smoothing option + length_normalized_loss=False, )) + + if config is not None: + config.merge_from_other_cfg(default) + return default + + def __init__(self, + vocab_size: int, + encoder: TransformerEncoder, + st_decoder: TransformerDecoder, + decoder: TransformerDecoder=None, + ctc: CTCDecoder=None, + ctc_weight: float=0.0, + asr_weight: float=0.0, + ignore_id: int=IGNORE_ID, + lsm_weight: float=0.0, + length_normalized_loss: bool=False): + assert 0.0 <= ctc_weight <= 1.0, ctc_weight + + super().__init__() + # note that eos is the same as sos (equivalent ID) + self.sos = vocab_size - 1 + self.eos = vocab_size - 1 + self.vocab_size = vocab_size + self.ignore_id = ignore_id + self.ctc_weight = ctc_weight + self.asr_weight = asr_weight + + self.encoder = encoder + self.st_decoder = st_decoder + self.decoder = decoder + self.ctc = ctc + self.criterion_att = LabelSmoothingLoss( + size=vocab_size, + padding_idx=ignore_id, + smoothing=lsm_weight, + normalize_length=length_normalized_loss, ) + + def forward( + self, + speech: paddle.Tensor, + speech_lengths: paddle.Tensor, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + asr_text: paddle.Tensor=None, + asr_text_lengths: paddle.Tensor=None, + ) -> Tuple[Optional[paddle.Tensor], Optional[paddle.Tensor], Optional[ + paddle.Tensor]]: + """Frontend + Encoder + Decoder + Calc loss + Args: + speech: (Batch, Length, ...) + speech_lengths: (Batch, ) + text: (Batch, Length) + text_lengths: (Batch,) + Returns: + total_loss, attention_loss, ctc_loss + """ + assert text_lengths.dim() == 1, text_lengths.shape + # Check that batch_size is unified + assert (speech.shape[0] == speech_lengths.shape[0] == text.shape[0] == + text_lengths.shape[0]), (speech.shape, speech_lengths.shape, + text.shape, text_lengths.shape) + # 1. Encoder + start = time.time() + encoder_out, encoder_mask = self.encoder(speech, speech_lengths) + encoder_time = time.time() - start + #logger.debug(f"encoder time: {encoder_time}") + encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B] + + # 2a. ST-decoder branch + start = time.time() + loss_st, acc_st = self._calc_st_loss(encoder_out, encoder_mask, text, + text_lengths) + decoder_time = time.time() - start + + loss_asr_att = None + loss_asr_ctc = None + # 2b. ASR Attention-decoder branch + if self.asr_weight > 0.: + if self.ctc_weight != 1.0: + start = time.time() + loss_asr_att, acc_att = self._calc_att_loss( + encoder_out, encoder_mask, asr_text, asr_text_lengths) + decoder_time = time.time() - start + + # 2c. CTC branch + if self.ctc_weight != 0.0: + start = time.time() + loss_asr_ctc = self.ctc(encoder_out, encoder_out_lens, asr_text, + asr_text_lengths) + ctc_time = time.time() - start + + if loss_asr_ctc is None: + loss_asr = loss_asr_att + elif loss_asr_att is None: + loss_asr = loss_asr_ctc + else: + loss_asr = self.ctc_weight * loss_asr_ctc + (1 - self.ctc_weight + ) * loss_asr_att + loss = self.asr_weight * loss_asr + (1 - self.asr_weight) * loss_st + else: + loss = loss_st + return loss, loss_st, loss_asr_att, loss_asr_ctc + + def _calc_st_loss( + self, + encoder_out: paddle.Tensor, + encoder_mask: paddle.Tensor, + ys_pad: paddle.Tensor, + ys_pad_lens: paddle.Tensor, ) -> Tuple[paddle.Tensor, float]: + """Calc attention loss. + + Args: + encoder_out (paddle.Tensor): [B, Tmax, D] + encoder_mask (paddle.Tensor): [B, 1, Tmax] + ys_pad (paddle.Tensor): [B, Umax] + ys_pad_lens (paddle.Tensor): [B] + + Returns: + Tuple[paddle.Tensor, float]: attention_loss, accuracy rate + """ + ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, + self.ignore_id) + ys_in_lens = ys_pad_lens + 1 + + # 1. Forward decoder + decoder_out, _ = self.st_decoder(encoder_out, encoder_mask, ys_in_pad, + ys_in_lens) + + # 2. Compute attention loss + loss_att = self.criterion_att(decoder_out, ys_out_pad) + acc_att = th_accuracy( + decoder_out.view(-1, self.vocab_size), + ys_out_pad, + ignore_label=self.ignore_id, ) + return loss_att, acc_att + + def _calc_att_loss( + self, + encoder_out: paddle.Tensor, + encoder_mask: paddle.Tensor, + ys_pad: paddle.Tensor, + ys_pad_lens: paddle.Tensor, ) -> Tuple[paddle.Tensor, float]: + """Calc attention loss. + + Args: + encoder_out (paddle.Tensor): [B, Tmax, D] + encoder_mask (paddle.Tensor): [B, 1, Tmax] + ys_pad (paddle.Tensor): [B, Umax] + ys_pad_lens (paddle.Tensor): [B] + + Returns: + Tuple[paddle.Tensor, float]: attention_loss, accuracy rate + """ + ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, + self.ignore_id) + ys_in_lens = ys_pad_lens + 1 + + # 1. Forward decoder + decoder_out, _ = self.decoder(encoder_out, encoder_mask, ys_in_pad, + ys_in_lens) + + # 2. Compute attention loss + loss_att = self.criterion_att(decoder_out, ys_out_pad) + acc_att = th_accuracy( + decoder_out.view(-1, self.vocab_size), + ys_out_pad, + ignore_label=self.ignore_id, ) + return loss_att, acc_att + + def _forward_encoder( + self, + speech: paddle.Tensor, + speech_lengths: paddle.Tensor, + decoding_chunk_size: int=-1, + num_decoding_left_chunks: int=-1, + simulate_streaming: bool=False, + ) -> Tuple[paddle.Tensor, paddle.Tensor]: + """Encoder pass. + + Args: + speech (paddle.Tensor): [B, Tmax, D] + speech_lengths (paddle.Tensor): [B] + decoding_chunk_size (int, optional): chuck size. Defaults to -1. + num_decoding_left_chunks (int, optional): nums chunks. Defaults to -1. + simulate_streaming (bool, optional): streaming or not. Defaults to False. + + Returns: + Tuple[paddle.Tensor, paddle.Tensor]: + encoder hiddens (B, Tmax, D), + encoder hiddens mask (B, 1, Tmax). + """ + # Let's assume B = batch_size + # 1. Encoder + if simulate_streaming and decoding_chunk_size > 0: + encoder_out, encoder_mask = self.encoder.forward_chunk_by_chunk( + speech, + decoding_chunk_size=decoding_chunk_size, + num_decoding_left_chunks=num_decoding_left_chunks + ) # (B, maxlen, encoder_dim) + else: + encoder_out, encoder_mask = self.encoder( + speech, + speech_lengths, + decoding_chunk_size=decoding_chunk_size, + num_decoding_left_chunks=num_decoding_left_chunks + ) # (B, maxlen, encoder_dim) + return encoder_out, encoder_mask + + def translate( + self, + speech: paddle.Tensor, + speech_lengths: paddle.Tensor, + beam_size: int=10, + decoding_chunk_size: int=-1, + num_decoding_left_chunks: int=-1, + simulate_streaming: bool=False, ) -> paddle.Tensor: + """ Apply beam search on attention decoder + Args: + speech (paddle.Tensor): (batch, max_len, feat_dim) + speech_length (paddle.Tensor): (batch, ) + beam_size (int): beam size for beam search + decoding_chunk_size (int): decoding chunk for dynamic chunk + trained model. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + 0: used for training, it's prohibited here + simulate_streaming (bool): whether do encoder forward in a + streaming fashion + Returns: + paddle.Tensor: decoding result, (batch, max_result_len) + """ + assert speech.shape[0] == speech_lengths.shape[0] + assert decoding_chunk_size != 0 + device = speech.place + batch_size = speech.shape[0] + + # Let's assume B = batch_size and N = beam_size + # 1. Encoder + encoder_out, encoder_mask = self._forward_encoder( + speech, speech_lengths, decoding_chunk_size, + num_decoding_left_chunks, + simulate_streaming) # (B, maxlen, encoder_dim) + maxlen = encoder_out.size(1) + encoder_dim = encoder_out.size(2) + running_size = batch_size * beam_size + encoder_out = encoder_out.unsqueeze(1).repeat(1, beam_size, 1, 1).view( + running_size, maxlen, encoder_dim) # (B*N, maxlen, encoder_dim) + encoder_mask = encoder_mask.unsqueeze(1).repeat( + 1, beam_size, 1, 1).view(running_size, 1, + maxlen) # (B*N, 1, max_len) + + hyps = paddle.ones( + [running_size, 1], dtype=paddle.long).fill_(self.sos) # (B*N, 1) + # log scale score + scores = paddle.to_tensor( + [0.0] + [-float('inf')] * (beam_size - 1), dtype=paddle.float) + scores = scores.to(device).repeat(batch_size).unsqueeze(1).to( + device) # (B*N, 1) + end_flag = paddle.zeros_like(scores, dtype=paddle.bool) # (B*N, 1) + cache: Optional[List[paddle.Tensor]] = None + # 2. Decoder forward step by step + for i in range(1, maxlen + 1): + # Stop if all batch and all beam produce eos + if end_flag.sum() == running_size: + break + + # 2.1 Forward decoder step + hyps_mask = subsequent_mask(i).unsqueeze(0).repeat( + running_size, 1, 1).to(device) # (B*N, i, i) + # logp: (B*N, vocab) + logp, cache = self.st_decoder.forward_one_step( + encoder_out, encoder_mask, hyps, hyps_mask, cache) + + # 2.2 First beam prune: select topk best prob at current time + top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N) + top_k_logp = mask_finished_scores(top_k_logp, end_flag) + top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos) + + # 2.3 Seconde beam prune: select topk score with history + scores = scores + top_k_logp # (B*N, N), broadcast add + scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N) + scores, offset_k_index = scores.topk(k=beam_size) # (B, N) + scores = scores.view(-1, 1) # (B*N, 1) + + # 2.4. Compute base index in top_k_index, + # regard top_k_index as (B*N*N),regard offset_k_index as (B*N), + # then find offset_k_index in top_k_index + base_k_index = paddle.arange(batch_size).view(-1, 1).repeat( + 1, beam_size) # (B, N) + base_k_index = base_k_index * beam_size * beam_size + best_k_index = base_k_index.view(-1) + offset_k_index.view( + -1) # (B*N) + + # 2.5 Update best hyps + best_k_pred = paddle.index_select( + top_k_index.view(-1), index=best_k_index, axis=0) # (B*N) + best_hyps_index = best_k_index // beam_size + last_best_k_hyps = paddle.index_select( + hyps, index=best_hyps_index, axis=0) # (B*N, i) + hyps = paddle.cat( + (last_best_k_hyps, best_k_pred.view(-1, 1)), + dim=1) # (B*N, i+1) + + # 2.6 Update end flag + end_flag = paddle.eq(hyps[:, -1], self.eos).view(-1, 1) + + # 3. Select best of best + scores = scores.view(batch_size, beam_size) + # TODO: length normalization + best_index = paddle.argmax(scores, axis=-1).long() # (B) + best_hyps_index = best_index + paddle.arange( + batch_size, dtype=paddle.long) * beam_size + best_hyps = paddle.index_select(hyps, index=best_hyps_index, axis=0) + best_hyps = best_hyps[:, 1:] + return best_hyps + + # @jit.to_static + def subsampling_rate(self) -> int: + """ Export interface for c++ call, return subsampling_rate of the + model + """ + return self.encoder.embed.subsampling_rate + + # @jit.to_static + def right_context(self) -> int: + """ Export interface for c++ call, return right_context of the model + """ + return self.encoder.embed.right_context + + # @jit.to_static + def sos_symbol(self) -> int: + """ Export interface for c++ call, return sos symbol id of the model + """ + return self.sos + + # @jit.to_static + def eos_symbol(self) -> int: + """ Export interface for c++ call, return eos symbol id of the model + """ + return self.eos + + @jit.to_static + def forward_encoder_chunk( + self, + xs: paddle.Tensor, + offset: int, + required_cache_size: int, + subsampling_cache: Optional[paddle.Tensor]=None, + elayers_output_cache: Optional[List[paddle.Tensor]]=None, + conformer_cnn_cache: Optional[List[paddle.Tensor]]=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor, List[paddle.Tensor], List[ + paddle.Tensor]]: + """ Export interface for c++ call, give input chunk xs, and return + output from time 0 to current chunk. + Args: + xs (paddle.Tensor): chunk input + subsampling_cache (Optional[paddle.Tensor]): subsampling cache + elayers_output_cache (Optional[List[paddle.Tensor]]): + transformer/conformer encoder layers output cache + conformer_cnn_cache (Optional[List[paddle.Tensor]]): conformer + cnn cache + Returns: + paddle.Tensor: output, it ranges from time 0 to current chunk. + paddle.Tensor: subsampling cache + List[paddle.Tensor]: attention cache + List[paddle.Tensor]: conformer cnn cache + """ + return self.encoder.forward_chunk( + xs, offset, required_cache_size, subsampling_cache, + elayers_output_cache, conformer_cnn_cache) + + # @jit.to_static + def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor: + """ Export interface for c++ call, apply linear transform and log + softmax before ctc + Args: + xs (paddle.Tensor): encoder output + Returns: + paddle.Tensor: activation before ctc + """ + return self.ctc.log_softmax(xs) + + @jit.to_static + def forward_attention_decoder( + self, + hyps: paddle.Tensor, + hyps_lens: paddle.Tensor, + encoder_out: paddle.Tensor, ) -> paddle.Tensor: + """ Export interface for c++ call, forward decoder with multiple + hypothesis from ctc prefix beam search and one encoder output + Args: + hyps (paddle.Tensor): hyps from ctc prefix beam search, already + pad sos at the begining, (B, T) + hyps_lens (paddle.Tensor): length of each hyp in hyps, (B) + encoder_out (paddle.Tensor): corresponding encoder output, (B=1, T, D) + Returns: + paddle.Tensor: decoder output, (B, L) + """ + assert encoder_out.size(0) == 1 + num_hyps = hyps.size(0) + assert hyps_lens.size(0) == num_hyps + encoder_out = encoder_out.repeat(num_hyps, 1, 1) + # (B, 1, T) + encoder_mask = paddle.ones( + [num_hyps, 1, encoder_out.size(1)], dtype=paddle.bool) + # (num_hyps, max_hyps_len, vocab_size) + decoder_out, _ = self.decoder(encoder_out, encoder_mask, hyps, + hyps_lens) + decoder_out = paddle.nn.functional.log_softmax(decoder_out, dim=-1) + return decoder_out + + @paddle.no_grad() + def decode(self, + feats: paddle.Tensor, + feats_lengths: paddle.Tensor, + text_feature: Dict[str, int], + decoding_method: str, + lang_model_path: str, + beam_alpha: float, + beam_beta: float, + beam_size: int, + cutoff_prob: float, + cutoff_top_n: int, + num_processes: int, + ctc_weight: float=0.0, + decoding_chunk_size: int=-1, + num_decoding_left_chunks: int=-1, + simulate_streaming: bool=False): + """u2 decoding. + + Args: + feats (Tenosr): audio features, (B, T, D) + feats_lengths (Tenosr): (B) + text_feature (TextFeaturizer): text feature object. + decoding_method (str): decoding mode, e.g. + 'fullsentence', + 'simultaneous' + lang_model_path (str): lm path. + beam_alpha (float): lm weight. + beam_beta (float): length penalty. + beam_size (int): beam size for search + cutoff_prob (float): for prune. + cutoff_top_n (int): for prune. + num_processes (int): + ctc_weight (float, optional): ctc weight for attention rescoring decode mode. Defaults to 0.0. + decoding_chunk_size (int, optional): decoding chunk size. Defaults to -1. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + 0: used for training, it's prohibited here. + num_decoding_left_chunks (int, optional): + number of left chunks for decoding. Defaults to -1. + simulate_streaming (bool, optional): simulate streaming inference. Defaults to False. + + Raises: + ValueError: when not support decoding_method. + + Returns: + List[List[int]]: transcripts. + """ + batch_size = feats.size(0) + + if decoding_method == 'fullsentence': + hyps = self.translate( + feats, + feats_lengths, + beam_size=beam_size, + decoding_chunk_size=decoding_chunk_size, + num_decoding_left_chunks=num_decoding_left_chunks, + simulate_streaming=simulate_streaming) + hyps = [hyp.tolist() for hyp in hyps] + else: + raise ValueError(f"Not support decoding method: {decoding_method}") + + res = [text_feature.defeaturize(hyp) for hyp in hyps] + return res + + +class U2STModel(U2STBaseModel): + def __init__(self, configs: dict): + vocab_size, encoder, decoder = U2STModel._init_from_config(configs) + + if isinstance(decoder, Tuple): + st_decoder, asr_decoder, ctc = decoder + super().__init__( + vocab_size=vocab_size, + encoder=encoder, + st_decoder=st_decoder, + decoder=asr_decoder, + ctc=ctc, + **configs['model_conf']) + else: + super().__init__( + vocab_size=vocab_size, + encoder=encoder, + st_decoder=decoder, + **configs['model_conf']) + + @classmethod + def _init_from_config(cls, configs: dict): + """init sub module for model. + + Args: + configs (dict): config dict. + + Raises: + ValueError: raise when using not support encoder type. + + Returns: + int, nn.Layer, nn.Layer, nn.Layer: vocab size, encoder, decoder, ctc + """ + if configs['cmvn_file'] is not None: + mean, istd = load_cmvn(configs['cmvn_file'], + configs['cmvn_file_type']) + global_cmvn = GlobalCMVN( + paddle.to_tensor(mean, dtype=paddle.float), + paddle.to_tensor(istd, dtype=paddle.float)) + else: + global_cmvn = None + + input_dim = configs['input_dim'] + vocab_size = configs['output_dim'] + assert input_dim != 0, input_dim + assert vocab_size != 0, vocab_size + + encoder_type = configs.get('encoder', 'transformer') + logger.info(f"U2 Encoder type: {encoder_type}") + if encoder_type == 'transformer': + encoder = TransformerEncoder( + input_dim, global_cmvn=global_cmvn, **configs['encoder_conf']) + elif encoder_type == 'conformer': + encoder = ConformerEncoder( + input_dim, global_cmvn=global_cmvn, **configs['encoder_conf']) + else: + raise ValueError(f"not support encoder type:{encoder_type}") + + st_decoder = TransformerDecoder(vocab_size, + encoder.output_size(), + **configs['decoder_conf']) + + asr_weight = configs['model_conf']['asr_weight'] + logger.info(f"ASR Joint Training Weight: {asr_weight}") + + if asr_weight > 0.: + decoder = TransformerDecoder(vocab_size, + encoder.output_size(), + **configs['decoder_conf']) + # ctc decoder and ctc loss + model_conf = configs['model_conf'] + ctc = CTCDecoder( + odim=vocab_size, + enc_n_units=encoder.output_size(), + blank_id=0, + dropout_rate=model_conf['ctc_dropout_rate'], + reduction=True, # sum + batch_average=True, # sum / batch_size + grad_norm_type=model_conf['ctc_grad_norm_type']) + + return vocab_size, encoder, (st_decoder, decoder, ctc) + else: + return vocab_size, encoder, st_decoder + + @classmethod + def from_config(cls, configs: dict): + """init model. + + Args: + configs (dict): config dict. + + Raises: + ValueError: raise when using not support encoder type. + + Returns: + nn.Layer: U2STModel + """ + model = cls(configs) + return model + + @classmethod + def from_pretrained(cls, dataloader, config, checkpoint_path): + """Build a DeepSpeech2Model model from a pretrained model. + + Args: + dataloader (paddle.io.DataLoader): not used. + config (yacs.config.CfgNode): model configs + checkpoint_path (Path or str): the path of pretrained model checkpoint, without extension name + + Returns: + DeepSpeech2Model: The model built from pretrained result. + """ + with UpdateConfig(config): + config.input_dim = dataloader.collate_fn.feature_size + config.output_dim = dataloader.collate_fn.vocab_size + + model = cls.from_config(config) + + if checkpoint_path: + infos = checkpoint.load_parameters( + model, checkpoint_path=checkpoint_path) + logger.info(f"checkpoint info: {infos}") + layer_tools.summary(model) + return model + + +class U2STInferModel(U2STModel): + def __init__(self, configs: dict): + super().__init__(configs) + + def forward(self, + feats, + feats_lengths, + decoding_chunk_size=-1, + num_decoding_left_chunks=-1, + simulate_streaming=False): + """export model function + + Args: + feats (Tensor): [B, T, D] + feats_lengths (Tensor): [B] + + Returns: + List[List[int]]: best path result + """ + return self.translate( + feats, + feats_lengths, + decoding_chunk_size=decoding_chunk_size, + num_decoding_left_chunks=num_decoding_left_chunks, + simulate_streaming=simulate_streaming) diff --git a/deepspeech/modules/activation.py b/deepspeech/modules/activation.py index 0fe66b739..3cb8729e1 100644 --- a/deepspeech/modules/activation.py +++ b/deepspeech/modules/activation.py @@ -15,12 +15,13 @@ from collections import OrderedDict import paddle from paddle import nn +from paddle.nn import functional as F from deepspeech.utils.log import Log logger = Log(__name__).getlog() -__all__ = ["get_activation", "brelu", "LinearGLUBlock", "ConvGLUBlock"] +__all__ = ["get_activation", "brelu", "LinearGLUBlock", "ConvGLUBlock", "GLU"] def brelu(x, t_min=0.0, t_max=24.0, name=None): @@ -30,6 +31,17 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): return x.maximum(t_min).minimum(t_max) +class GLU(nn.Layer): + """Gated Linear Units (GLU) Layer""" + + def __init__(self, dim: int=-1): + super().__init__() + self.dim = dim + + def forward(self, xs): + return F.glu(xs, axis=self.dim) + + class LinearGLUBlock(nn.Layer): """A linear Gated Linear Units (GLU) block.""" @@ -69,7 +81,7 @@ class ConvGLUBlock(nn.Layer): dim=0) self.dropout_residual = nn.Dropout(p=dropout) - self.pad_left = ConstantPad2d((0, 0, kernel_size - 1, 0), 0) + self.pad_left = nn.Pad2d((0, 0, kernel_size - 1, 0), 0) layers = OrderedDict() if bottlececk_dim == 0: @@ -133,13 +145,18 @@ def get_activation(act): """Return activation function.""" # Lazy load to avoid unused import activation_funcs = { + "hardshrink": paddle.nn.Hardshrink, + "hardswish": paddle.nn.Hardswish, "hardtanh": paddle.nn.Hardtanh, "tanh": paddle.nn.Tanh, "relu": paddle.nn.ReLU, + "relu6": paddle.nn.ReLU6, + "leakyrelu": paddle.nn.LeakyReLU, "selu": paddle.nn.SELU, "swish": paddle.nn.Swish, "gelu": paddle.nn.GELU, - "brelu": brelu, + "glu": GLU, + "elu": paddle.nn.ELU, } return activation_funcs[act]() diff --git a/deepspeech/modules/attention.py b/deepspeech/modules/attention.py index 4401a4a55..1a984dd45 100644 --- a/deepspeech/modules/attention.py +++ b/deepspeech/modules/attention.py @@ -109,8 +109,8 @@ class MultiHeadedAttention(nn.Layer): p_attn = self.dropout(attn) x = paddle.matmul(p_attn, value) # (batch, head, time1, d_k) - x = x.transpose([0, 2, 1, 3]).contiguous().view( - n_batch, -1, self.h * self.d_k) # (batch, time1, d_model) + x = x.transpose([0, 2, 1, 3]).view(n_batch, -1, self.h * + self.d_k) # (batch, time1, d_model) return self.linear_out(x) # (batch, time1, d_model) diff --git a/deepspeech/modules/conv.py b/deepspeech/modules/conv.py index 8bf48b2c8..22a168800 100644 --- a/deepspeech/modules/conv.py +++ b/deepspeech/modules/conv.py @@ -113,11 +113,9 @@ class ConvBn(nn.Layer): # reset padding part to 0 masks = make_non_pad_mask(x_len) #[B, T] masks = masks.unsqueeze(1).unsqueeze(1) # [B, 1, 1, T] - # TODO(Hui Zhang): not support bool multiply - # masks = masks.type_as(x) - masks = masks.astype(x.dtype) - x = x.multiply(masks) - + # https://github.com/PaddlePaddle/Paddle/pull/29265 + # rhs will type promote to lhs + x = x * masks return x, x_len diff --git a/deepspeech/modules/ctc.py b/deepspeech/modules/ctc.py index 31e489a3d..b3ca28279 100644 --- a/deepspeech/modules/ctc.py +++ b/deepspeech/modules/ctc.py @@ -16,15 +16,19 @@ from paddle import nn from paddle.nn import functional as F from typeguard import check_argument_types -from deepspeech.decoders.swig_wrapper import ctc_beam_search_decoder_batch -from deepspeech.decoders.swig_wrapper import ctc_greedy_decoder -from deepspeech.decoders.swig_wrapper import Scorer from deepspeech.modules.loss import CTCLoss from deepspeech.utils import ctc_utils from deepspeech.utils.log import Log logger = Log(__name__).getlog() +try: + from deepspeech.decoders.swig_wrapper import ctc_beam_search_decoder_batch # noqa: F401 + from deepspeech.decoders.swig_wrapper import ctc_greedy_decoder # noqa: F401 + from deepspeech.decoders.swig_wrapper import Scorer # noqa: F401 +except Exception as e: + logger.info("ctcdecoder not installed!") + __all__ = ['CTCDecoder'] @@ -35,7 +39,8 @@ class CTCDecoder(nn.Layer): blank_id=0, dropout_rate: float=0.0, reduction: bool=True, - batch_average: bool=True): + batch_average: bool=True, + grad_norm_type: str="instance"): """CTC decoder Args: @@ -44,6 +49,7 @@ class CTCDecoder(nn.Layer): dropout_rate (float): dropout rate (0.0 ~ 1.0) reduction (bool): reduce the CTC loss into a scalar, True for 'sum' or 'none' batch_average (bool): do batch dim wise average. + grad_norm_type (str): one of 'instance', 'batchsize', 'frame', None. """ assert check_argument_types() super().__init__() @@ -56,7 +62,8 @@ class CTCDecoder(nn.Layer): self.criterion = CTCLoss( blank=self.blank_id, reduction=reduction_type, - batch_average=batch_average) + batch_average=batch_average, + grad_norm_type=grad_norm_type) # CTCDecoder LM Score handle self._ext_scorer = None @@ -132,7 +139,7 @@ class CTCDecoder(nn.Layer): results = [] for i, probs in enumerate(probs_split): output_transcription = ctc_greedy_decoder( - probs_seq=probs, vocabulary=vocab_list) + probs_seq=probs, vocabulary=vocab_list, blank_id=self.blank_id) results.append(output_transcription) return results @@ -212,13 +219,15 @@ class CTCDecoder(nn.Layer): num_processes=num_processes, ext_scoring_func=self._ext_scorer, cutoff_prob=cutoff_prob, - cutoff_top_n=cutoff_top_n) + cutoff_top_n=cutoff_top_n, + blank_id=self.blank_id) results = [result[0][1] for result in beam_search_results] return results def init_decode(self, beam_alpha, beam_beta, lang_model_path, vocab_list, decoding_method): + if decoding_method == "ctc_beam_search": self._init_ext_scorer(beam_alpha, beam_beta, lang_model_path, vocab_list) @@ -229,7 +238,7 @@ class CTCDecoder(nn.Layer): """ctc decoding with probs. Args: - probs (Tenosr): activation after softmax + probs (Tenosr): activation after softmax logits_lens (Tenosr): audio output lens vocab_list ([type]): [description] decoding_method ([type]): [description] diff --git a/deepspeech/modules/decoder.py b/deepspeech/modules/decoder.py index 696a6315b..143f6cc57 100644 --- a/deepspeech/modules/decoder.py +++ b/deepspeech/modules/decoder.py @@ -33,7 +33,7 @@ logger = Log(__name__).getlog() __all__ = ["TransformerDecoder"] -class TransformerDecoder(nn.Module): +class TransformerDecoder(nn.Layer): """Base class of Transfomer decoder module. Args: vocab_size: output dim @@ -86,7 +86,7 @@ class TransformerDecoder(nn.Module): self.use_output_layer = use_output_layer self.output_layer = nn.Linear(attention_dim, vocab_size) - self.decoders = nn.ModuleList([ + self.decoders = nn.LayerList([ DecoderLayer( size=attention_dim, self_attn=MultiHeadedAttention(attention_heads, attention_dim, @@ -124,9 +124,7 @@ class TransformerDecoder(nn.Module): # m: (1, L, L) m = subsequent_mask(tgt_mask.size(-1)).unsqueeze(0) # tgt_mask: (B, L, L) - # TODO(Hui Zhang): not support & for tensor - # tgt_mask = tgt_mask & m - tgt_mask = tgt_mask.logical_and(m) + tgt_mask = tgt_mask & m x, _ = self.embed(tgt) for layer in self.decoders: @@ -137,9 +135,7 @@ class TransformerDecoder(nn.Module): if self.use_output_layer: x = self.output_layer(x) - # TODO(Hui Zhang): reduce_sum not support bool type - # olens = tgt_mask.sum(1) - olens = tgt_mask.astype(paddle.int).sum(1) + olens = tgt_mask.sum(1) return x, olens def forward_one_step( diff --git a/deepspeech/modules/decoder_layer.py b/deepspeech/modules/decoder_layer.py index c6fac5412..47c42615e 100644 --- a/deepspeech/modules/decoder_layer.py +++ b/deepspeech/modules/decoder_layer.py @@ -25,15 +25,15 @@ logger = Log(__name__).getlog() __all__ = ["DecoderLayer"] -class DecoderLayer(nn.Module): +class DecoderLayer(nn.Layer): """Single decoder layer module. Args: size (int): Input dimension. - self_attn (nn.Module): Self-attention module instance. + self_attn (nn.Layer): Self-attention module instance. `MultiHeadedAttention` instance can be used as the argument. - src_attn (nn.Module): Self-attention module instance. + src_attn (nn.Layer): Self-attention module instance. `MultiHeadedAttention` instance can be used as the argument. - feed_forward (nn.Module): Feed-forward module instance. + feed_forward (nn.Layer): Feed-forward module instance. `PositionwiseFeedForward` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): @@ -48,9 +48,9 @@ class DecoderLayer(nn.Module): def __init__( self, size: int, - self_attn: nn.Module, - src_attn: nn.Module, - feed_forward: nn.Module, + self_attn: nn.Layer, + src_attn: nn.Layer, + feed_forward: nn.Layer, dropout_rate: float, normalize_before: bool=True, concat_after: bool=False, ): diff --git a/deepspeech/modules/encoder.py b/deepspeech/modules/encoder.py index e326db8f9..fb44fe295 100644 --- a/deepspeech/modules/encoder.py +++ b/deepspeech/modules/encoder.py @@ -162,8 +162,7 @@ class BaseEncoder(nn.Layer): xs, pos_emb, masks = self.embed(xs, masks.type_as(xs), offset=0) #TODO(Hui Zhang): remove mask.astype, stride_slice not support bool tensor masks = masks.astype(paddle.bool) - #TODO(Hui Zhang): mask_pad = ~masks - mask_pad = masks.logical_not() + mask_pad = ~masks chunk_masks = add_optional_chunk_mask( xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk, decoding_chunk_size, self.static_chunk_size, @@ -219,11 +218,14 @@ class BaseEncoder(nn.Layer): xs, pos_emb, _ = self.embed( xs, tmp_masks, offset=offset) #xs=(B, T, D), pos_emb=(B=1, T, D) + if subsampling_cache is not None: cache_size = subsampling_cache.size(1) #T xs = paddle.cat((subsampling_cache, xs), dim=1) else: cache_size = 0 + + # only used when using `RelPositionMultiHeadedAttention` pos_emb = self.embed.position_encoding( offset=offset - cache_size, size=xs.size(1)) @@ -237,7 +239,7 @@ class BaseEncoder(nn.Layer): # Real mask for transformer/conformer layers masks = paddle.ones([1, xs.size(1)], dtype=paddle.bool) - masks = masks.unsqueeze(1) #[B=1, C=1, T] + masks = masks.unsqueeze(1) #[B=1, L'=1, T] r_elayers_output_cache = [] r_conformer_cnn_cache = [] for i, layer in enumerate(self.encoders): @@ -355,7 +357,7 @@ class TransformerEncoder(BaseEncoder): pos_enc_layer_type, normalize_before, concat_after, static_chunk_size, use_dynamic_chunk, global_cmvn, use_dynamic_left_chunk) - self.encoders = nn.ModuleList([ + self.encoders = nn.LayerList([ TransformerEncoderLayer( size=output_size, self_attn=MultiHeadedAttention(attention_heads, output_size, @@ -435,7 +437,7 @@ class ConformerEncoder(BaseEncoder): convolution_layer_args = (output_size, cnn_module_kernel, activation, cnn_module_norm, causal) - self.encoders = nn.ModuleList([ + self.encoders = nn.LayerList([ ConformerEncoderLayer( size=output_size, self_attn=encoder_selfattn_layer(*encoder_selfattn_layer_args), diff --git a/deepspeech/modules/loss.py b/deepspeech/modules/loss.py index 3e441bbbc..2c58be7e3 100644 --- a/deepspeech/modules/loss.py +++ b/deepspeech/modules/loss.py @@ -23,11 +23,32 @@ __all__ = ['CTCLoss', "LabelSmoothingLoss"] class CTCLoss(nn.Layer): - def __init__(self, blank=0, reduction='sum', batch_average=False): + def __init__(self, + blank=0, + reduction='sum', + batch_average=False, + grad_norm_type=None): super().__init__() # last token id as blank id self.loss = nn.CTCLoss(blank=blank, reduction=reduction) self.batch_average = batch_average + logger.info( + f"CTCLoss Loss reduction: {reduction}, div-bs: {batch_average}") + + # instance for norm_by_times + # batch for norm_by_batchsize + # frame for norm_by_total_logits_len + assert grad_norm_type in ('instance', 'batch', 'frame', None) + self.norm_by_times = False + self.norm_by_batchsize = False + self.norm_by_total_logits_len = False + logger.info(f"CTCLoss Grad Norm Type: {grad_norm_type}") + if grad_norm_type == 'instance': + self.norm_by_times = True + if grad_norm_type == 'batch': + self.norm_by_batchsize = True + if grad_norm_type == 'frame': + self.norm_by_total_logits_len = True def forward(self, logits, ys_pad, hlens, ys_lens): """Compute CTC loss. @@ -46,9 +67,15 @@ class CTCLoss(nn.Layer): # warp-ctc need activation with shape [T, B, V + 1] # logits: (B, L, D) -> (L, B, D) logits = logits.transpose([1, 0, 2]) - # (TODO:Hui Zhang) ctc loss does not support int64 labels ys_pad = ys_pad.astype(paddle.int32) - loss = self.loss(logits, ys_pad, hlens, ys_lens) + loss = self.loss( + logits, + ys_pad, + hlens, + ys_lens, + norm_by_times=self.norm_by_times, + norm_by_batchsize=self.norm_by_batchsize, + norm_by_total_logits_len=self.norm_by_total_logits_len) if self.batch_average: # Batch-size average loss = loss / B @@ -123,9 +150,9 @@ class LabelSmoothingLoss(nn.Layer): # use zeros_like instead of torch.no_grad() for true_dist, # since no_grad() can not be exported by JIT true_dist = paddle.full_like(x, self.smoothing / (self.size - 1)) - ignore = target == self.padding_idx # (B,) + ignore = (target == self.padding_idx) # (B,) - # target = target * (1 - ignore) # avoid -1 index + #TODO(Hui Zhang): target = target * (1 - ignore) # avoid -1 index target = target.masked_fill(ignore, 0) # avoid -1 index # true_dist.scatter_(1, target.unsqueeze(1), self.confidence) target_mask = F.one_hot(target, self.size) @@ -134,10 +161,8 @@ class LabelSmoothingLoss(nn.Layer): kl = self.criterion(F.log_softmax(x, axis=1), true_dist) - #TODO(Hui Zhang): sum not support bool type - #total = len(target) - int(ignore.sum()) - total = len(target) - int(ignore.type_as(target).sum()) + total = len(target) - int(ignore.sum()) denom = total if self.normalize_length else B - #numer = (kl * (1 - ignore)).sum() + #TODO(Hui Zhang): numer = (kl * (1 - ignore)).sum() numer = kl.masked_fill(ignore.unsqueeze(1), 0).sum() return numer / denom diff --git a/deepspeech/modules/mask.py b/deepspeech/modules/mask.py index 05e86eb33..6d46f5ba0 100644 --- a/deepspeech/modules/mask.py +++ b/deepspeech/modules/mask.py @@ -69,8 +69,7 @@ def make_non_pad_mask(lengths: paddle.Tensor) -> paddle.Tensor: [1, 1, 1, 0, 0], [1, 1, 0, 0, 0]] """ - #TODO(Hui Zhang): return ~make_pad_mask(lengths), not support ~ - return make_pad_mask(lengths).logical_not() + return ~make_pad_mask(lengths) def subsequent_mask(size: int) -> paddle.Tensor: @@ -92,12 +91,7 @@ def subsequent_mask(size: int) -> paddle.Tensor: [1, 1, 1]] """ ret = paddle.ones([size, size], dtype=paddle.bool) - #TODO(Hui Zhang): tril not support bool - #return paddle.tril(ret) - ret = ret.astype(paddle.float) - ret = paddle.tril(ret) - ret = ret.astype(paddle.bool) - return ret + return paddle.tril(ret) def subsequent_chunk_mask( @@ -186,15 +180,13 @@ def add_optional_chunk_mask(xs: paddle.Tensor, chunk_masks = subsequent_chunk_mask(xs.shape[1], chunk_size, num_left_chunks) # (L, L) chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) - # chunk_masks = masks & chunk_masks # (B, L, L) - chunk_masks = masks.logical_and(chunk_masks) # (B, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) elif static_chunk_size > 0: num_left_chunks = num_decoding_left_chunks chunk_masks = subsequent_chunk_mask(xs.shape[1], static_chunk_size, num_left_chunks) # (L, L) chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) - # chunk_masks = masks & chunk_masks # (B, L, L) - chunk_masks = masks.logical_and(chunk_masks) # (B, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) else: chunk_masks = masks return chunk_masks diff --git a/deepspeech/modules/rnn.py b/deepspeech/modules/rnn.py index 01b55c4a2..8f8b2a18d 100644 --- a/deepspeech/modules/rnn.py +++ b/deepspeech/modules/rnn.py @@ -297,7 +297,7 @@ class RNNStack(nn.Layer): share_weights=share_rnn_weights)) i_size = h_size * 2 - self.rnn_stacks = nn.ModuleList(rnn_stacks) + self.rnn_stacks = nn.LayerList(rnn_stacks) def forward(self, x: paddle.Tensor, x_len: paddle.Tensor): """ @@ -308,7 +308,7 @@ class RNNStack(nn.Layer): x, x_len = rnn(x, x_len) masks = make_non_pad_mask(x_len) #[B, T] masks = masks.unsqueeze(-1) # [B, T, 1] - # TODO(Hui Zhang): not support bool multiply - masks = masks.astype(x.dtype) - x = x.multiply(masks) + # https://github.com/PaddlePaddle/Paddle/pull/29265 + # rhs will type promote to lhs + x = x * masks return x, x_len diff --git a/deepspeech/modules/subsampling.py b/deepspeech/modules/subsampling.py index 5aa2fd8ea..3bed62f3c 100644 --- a/deepspeech/modules/subsampling.py +++ b/deepspeech/modules/subsampling.py @@ -92,7 +92,7 @@ class Conv2dSubsampling4(BaseSubsampling): dropout_rate: float, pos_enc_class: nn.Layer=PositionalEncoding): """Construct an Conv2dSubsampling4 object. - + Args: idim (int): Input dimension. odim (int): Output dimension. @@ -108,8 +108,8 @@ class Conv2dSubsampling4(BaseSubsampling): nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)) self.subsampling_rate = 4 # The right context for every conv layer is computed by: - # (kernel_size - 1) / 2 * stride * frame_rate_of_this_layer - # 6 = (3 - 1) / 2 * 2 * 1 + (3 - 1) / 2 * 2 * 2 + # (kernel_size - 1) * frame_rate_of_this_layer + # 6 = (3 - 1) * 1 + (3 - 1) * 2 self.right_context = 6 def forward(self, x: paddle.Tensor, x_mask: paddle.Tensor, offset: int=0 @@ -143,7 +143,7 @@ class Conv2dSubsampling6(BaseSubsampling): dropout_rate: float, pos_enc_class: nn.Layer=PositionalEncoding): """Construct an Conv2dSubsampling6 object. - + Args: idim (int): Input dimension. odim (int): Output dimension. @@ -160,10 +160,10 @@ class Conv2dSubsampling6(BaseSubsampling): # when Padding == 0, O = (I - F - S) // S self.linear = nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim) # The right context for every conv layer is computed by: - # (kernel_size - 1) / 2 * stride * frame_rate_of_this_layer - # 14 = (3 - 1) / 2 * 2 * 1 + (5 - 1) / 2 * 3 * 2 + # (kernel_size - 1) * frame_rate_of_this_layer + # 10 = (3 - 1) * 1 + (5 - 1) * 2 self.subsampling_rate = 6 - self.right_context = 14 + self.right_context = 10 def forward(self, x: paddle.Tensor, x_mask: paddle.Tensor, offset: int=0 ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: @@ -196,7 +196,7 @@ class Conv2dSubsampling8(BaseSubsampling): dropout_rate: float, pos_enc_class: nn.Layer=PositionalEncoding): """Construct an Conv2dSubsampling8 object. - + Args: idim (int): Input dimension. odim (int): Output dimension. @@ -214,8 +214,8 @@ class Conv2dSubsampling8(BaseSubsampling): odim) self.subsampling_rate = 8 # The right context for every conv layer is computed by: - # (kernel_size - 1) / 2 * stride * frame_rate_of_this_layer - # 14 = (3 - 1) / 2 * 2 * 1 + (3 - 1) / 2 * 2 * 2 + (3 - 1) / 2 * 2 * 4 + # (kernel_size - 1) * frame_rate_of_this_layer + # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4 self.right_context = 14 def forward(self, x: paddle.Tensor, x_mask: paddle.Tensor, offset: int=0 diff --git a/deepspeech/training/cli.py b/deepspeech/training/cli.py index b83d989d6..07c213dbc 100644 --- a/deepspeech/training/cli.py +++ b/deepspeech/training/cli.py @@ -16,23 +16,23 @@ import argparse def default_argument_parser(): r"""A simple yet genral argument parser for experiments with parakeet. - - This is used in examples with parakeet. And it is intended to be used by - other experiments with parakeet. It requires a minimal set of command line + + This is used in examples with parakeet. And it is intended to be used by + other experiments with parakeet. It requires a minimal set of command line arguments to start a training script. - - The ``--config`` and ``--opts`` are used for overwrite the deault + + The ``--config`` and ``--opts`` are used for overwrite the deault configuration. - - The ``--data`` and ``--output`` specifies the data path and output path. - Resuming training from existing progress at the output directory is the + + The ``--data`` and ``--output`` specifies the data path and output path. + Resuming training from existing progress at the output directory is the intended default behavior. - + The ``--checkpoint_path`` specifies the checkpoint to load from. - + The ``--device`` and ``--nprocs`` specifies how to run the training. - - + + See Also -------- parakeet.training.experiment @@ -43,32 +43,57 @@ def default_argument_parser(): """ parser = argparse.ArgumentParser() - # yapf: disable - # data and output - parser.add_argument("--config", metavar="FILE", help="path of the config file to overwrite to default config with.") - parser.add_argument("--dump-config", metavar="FILE", help="dump config to yaml file.") - # parser.add_argument("--data", metavar="DATA_DIR", help="path to the datatset.") - parser.add_argument("--output", metavar="OUTPUT_DIR", help="path to save checkpoint and logs.") - - # load from saved checkpoint - parser.add_argument("--checkpoint_path", type=str, help="path of the checkpoint to load") - - # save jit model to - parser.add_argument("--export_path", type=str, help="path of the jit model to save") - - # save asr result to - parser.add_argument("--result_file", type=str, help="path of save the asr result") - - # running - parser.add_argument("--device", type=str, default='gpu', choices=["cpu", "gpu"], - help="device type to use, cpu and gpu are supported.") - parser.add_argument("--nprocs", type=int, default=1, help="number of parallel processes to use.") + train_group = parser.add_argument_group( + title='Train Options', description=None) + train_group.add_argument( + "--seed", + type=int, + default=None, + help="seed to use for paddle, np and random. None or 0 for random, else set seed." + ) + train_group.add_argument( + "--device", + type=str, + default='gpu', + choices=["cpu", "gpu"], + help="device cpu and gpu are supported.") + train_group.add_argument( + "--nprocs", + type=int, + default=1, + help="number of parallel processes. 0 for cpu.") + train_group.add_argument( + "--config", metavar="CONFIG_FILE", help="config file.") + train_group.add_argument( + "--output", metavar="CKPT_DIR", help="path to save checkpoint.") + train_group.add_argument( + "--checkpoint_path", type=str, help="path to load checkpoint") + train_group.add_argument( + "--opts", + type=str, + default=[], + nargs='+', + help="overwrite --config file, passing in LIST[KEY VALUE] pairs") + train_group.add_argument( + "--dump-config", metavar="FILE", help="dump config to `this` file.") - # overwrite extra config and default config - # parser.add_argument("--opts", nargs=argparse.REMAINDER, - # help="options to overwrite --config file and the default config, passing in KEY VALUE pairs") - parser.add_argument("--opts", type=str, default=[], nargs='+', - help="options to overwrite --config file and the default config, passing in KEY VALUE pairs") - # yapd: enable + profile_group = parser.add_argument_group( + title='Benchmark Options', description=None) + profile_group.add_argument( + '--profiler-options', + type=str, + default=None, + help='The option of profiler, which should be in format \"key1=value1;key2=value2;key3=value3\".' + ) + profile_group.add_argument( + '--benchmark-batch-size', + type=int, + default=None, + help='batch size for benchmark.') + profile_group.add_argument( + '--benchmark-max-step', + type=int, + default=None, + help='max iteration for benchmark.') return parser diff --git a/deepspeech/training/extensions/__init__.py b/deepspeech/training/extensions/__init__.py new file mode 100644 index 000000000..6ad041559 --- /dev/null +++ b/deepspeech/training/extensions/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Callable + +from .extension import Extension + + +def make_extension(trigger: Callable=None, + default_name: str=None, + priority: int=None, + finalizer: Callable=None, + initializer: Callable=None, + on_error: Callable=None): + """Make an Extension-like object by injecting required attributes to it. + """ + if trigger is None: + trigger = Extension.trigger + if priority is None: + priority = Extension.priority + + def decorator(ext): + ext.trigger = trigger + ext.default_name = default_name or ext.__name__ + ext.priority = priority + ext.finalize = finalizer + ext.on_error = on_error + ext.initialize = initializer + return ext + + return decorator diff --git a/deepspeech/training/extensions/evaluator.py b/deepspeech/training/extensions/evaluator.py new file mode 100644 index 000000000..1026a4ec3 --- /dev/null +++ b/deepspeech/training/extensions/evaluator.py @@ -0,0 +1,101 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict + +import paddle +from paddle import distributed as dist +from paddle.io import DataLoader +from paddle.nn import Layer + +from . import extension +from ..reporter import DictSummary +from ..reporter import ObsScope +from ..reporter import report +from ..timer import Timer +from deepspeech.utils.log import Log +logger = Log(__name__).getlog() + + +class StandardEvaluator(extension.Extension): + + trigger = (1, 'epoch') + default_name = 'validation' + priority = extension.PRIORITY_WRITER + + name = None + + def __init__(self, model: Layer, dataloader: DataLoader): + # it is designed to hold multiple models + models = {"main": model} + self.models: Dict[str, Layer] = models + self.model = model + + # dataloaders + self.dataloader = dataloader + + def evaluate_core(self, batch): + # compute + self.model(batch) # you may report here + return + + def evaluate_sync(self, data): + # dist sync `evaluate_core` outputs + if data is None: + return + + numerator, denominator = data + if dist.get_world_size() > 1: + numerator = paddle.to_tensor(numerator) + denominator = paddle.to_tensor(denominator) + # the default operator in all_reduce function is sum. + dist.all_reduce(numerator) + dist.all_reduce(denominator) + value = numerator / denominator + value = float(value) + else: + value = numerator / denominator + # used for `snapshort` to do kbest save. + report("VALID/LOSS", value) + logger.info(f"Valid: all-reduce loss {value}") + + def evaluate(self): + # switch to eval mode + for model in self.models.values(): + model.eval() + + # to average evaluation metrics + summary = DictSummary() + for batch in self.dataloader: + observation = {} + with ObsScope(observation): + # main evaluation computation here. + with paddle.no_grad(): + self.evaluate_sync(self.evaluate_core(batch)) + summary.add(observation) + summary = summary.compute_mean() + + # switch to train mode + for model in self.models.values(): + model.train() + return summary + + def __call__(self, trainer=None): + # evaluate and report the averaged metric to current observation + # if it is used to extend a trainer, the metrics is reported to + # to observation of the trainer + # or otherwise, you can use your own observation + with Timer("Eval Time Cost: {}"): + summary = self.evaluate() + for k, v in summary.items(): + report(k, v) diff --git a/deepspeech/training/extensions/extension.py b/deepspeech/training/extensions/extension.py new file mode 100644 index 000000000..02f924951 --- /dev/null +++ b/deepspeech/training/extensions/extension.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +PRIORITY_WRITER = 300 +PRIORITY_EDITOR = 200 +PRIORITY_READER = 100 + + +class Extension(): + """Extension to customize the behavior of Trainer.""" + trigger = (1, 'iteration') + priority = PRIORITY_READER + name = None + + @property + def default_name(self): + """Default name of the extension, class name by default.""" + return type(self).__name__ + + def __call__(self, trainer): + """Main action of the extention. After each update, it is executed + when the trigger fires.""" + raise NotImplementedError( + 'Extension implementation must override __call__.') + + def initialize(self, trainer): + """Action that is executed once to get the corect trainer state. + It is called before training normally, but if the trainer restores + states with an Snapshot extension, this method should also be called. + """ + pass + + def on_error(self, trainer, exc, tb): + """Handles the error raised during training before finalization. + """ + pass + + def finalize(self, trainer): + """Action that is executed when training is done. + For example, visualizers would need to be closed. + """ + pass diff --git a/deepspeech/training/extensions/snapshot.py b/deepspeech/training/extensions/snapshot.py new file mode 100644 index 000000000..e81eb97fc --- /dev/null +++ b/deepspeech/training/extensions/snapshot.py @@ -0,0 +1,133 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from datetime import datetime +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List + +import jsonlines + +from . import extension +from ..reporter import get_observations +from ..updaters.trainer import Trainer +from deepspeech.utils.log import Log +from deepspeech.utils.mp_tools import rank_zero_only + +logger = Log(__name__).getlog() + + +def load_records(records_fp): + """Load record files (json lines.)""" + with jsonlines.open(records_fp, 'r') as reader: + records = list(reader) + return records + + +class Snapshot(extension.Extension): + """An extension to make snapshot of the updater object inside + the trainer. It is done by calling the updater's `save` method. + An Updater save its state_dict by default, which contains the + updater state, (i.e. epoch and iteration) and all the model + parameters and optimizer states. If the updater inside the trainer + subclasses StandardUpdater, everything is good to go. + Parameters + ---------- + checkpoint_dir : Union[str, Path] + The directory to save checkpoints into. + """ + + trigger = (1, 'epoch') + priority = -100 + default_name = "snapshot" + + def __init__(self, + mode='latest', + max_size: int=5, + indicator=None, + less_better=True, + snapshot_on_error: bool=False): + self.records: List[Dict[str, Any]] = [] + assert mode in ('latest', 'kbest'), mode + if mode == 'kbest': + assert indicator is not None + self.mode = mode + self.indicator = indicator + self.less_is_better = less_better + self.max_size = max_size + self._snapshot_on_error = snapshot_on_error + self._save_all = (max_size == -1) + self.checkpoint_dir = None + + def initialize(self, trainer: Trainer): + """Setting up this extention.""" + self.checkpoint_dir = trainer.out / "checkpoints" + + # load existing records + record_path: Path = self.checkpoint_dir / "records.jsonl" + if record_path.exists(): + self.records = load_records(record_path) + ckpt_path = self.records[-1]['path'] + logger.info(f"Loading from an existing checkpoint {ckpt_path}") + trainer.updater.load(ckpt_path) + + def on_error(self, trainer, exc, tb): + if self._snapshot_on_error: + self.save_checkpoint_and_update(trainer, 'latest') + + def __call__(self, trainer: Trainer): + self.save_checkpoint_and_update(trainer, self.mode) + + def full(self): + """Whether the number of snapshots it keeps track of is greater + than the max_size.""" + return (not self._save_all) and len(self.records) > self.max_size + + @rank_zero_only + def save_checkpoint_and_update(self, trainer: Trainer, mode: str): + """Saving new snapshot and remove the oldest snapshot if needed.""" + iteration = trainer.updater.state.iteration + epoch = trainer.updater.state.epoch + num = epoch if self.trigger[1] == 'epoch' else iteration + path = self.checkpoint_dir / f"{num}.np" + + # add the new one + trainer.updater.save(path) + record = { + "time": str(datetime.now()), + 'path': str(path.resolve()), # use absolute path + 'iteration': iteration, + 'epoch': epoch, + 'indicator': get_observations()[self.indicator] + } + self.records.append(record) + + # remove the earist + if self.full(): + if mode == 'kbest': + self.records = sorted( + self.records, + key=lambda record: record['indicator'], + reverse=not self.less_is_better) + eariest_record = self.records[0] + os.remove(eariest_record["path"]) + self.records.pop(0) + + # update the record file + record_path = self.checkpoint_dir / "records.jsonl" + with jsonlines.open(record_path, 'w') as writer: + for record in self.records: + # jsonlines.open may return a Writer or a Reader + writer.write(record) # pylint: disable=no-member diff --git a/deepspeech/training/extensions/visualizer.py b/deepspeech/training/extensions/visualizer.py new file mode 100644 index 000000000..e5f456cac --- /dev/null +++ b/deepspeech/training/extensions/visualizer.py @@ -0,0 +1,39 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from visualdl import LogWriter + +from . import extension +from ..updaters.trainer import Trainer + + +class VisualDL(extension.Extension): + """A wrapper of visualdl log writer. It assumes that the metrics to be visualized + are all scalars which are recorded into the `.observation` dictionary of the + trainer object. The dictionary is created for each step, thus the visualdl log + writer uses the iteration from the updater's `iteration` as the global step to + add records. + """ + trigger = (1, 'iteration') + default_name = 'visualdl' + priority = extension.PRIORITY_READER + + def __init__(self, output_dir): + self.writer = LogWriter(str(output_dir)) + + def __call__(self, trainer: Trainer): + for k, v in trainer.observation.items(): + self.writer.add_scalar(k, v, step=trainer.updater.state.iteration) + + def finalize(self, trainer): + self.writer.close() diff --git a/deepspeech/training/gradclip.py b/deepspeech/training/gradclip.py index d0f9803d2..87b36acae 100644 --- a/deepspeech/training/gradclip.py +++ b/deepspeech/training/gradclip.py @@ -27,6 +27,9 @@ class ClipGradByGlobalNormWithLog(paddle.nn.ClipGradByGlobalNorm): def __init__(self, clip_norm): super().__init__(clip_norm) + def __repr__(self): + return f"{self.__class__.__name__}(global_clip_norm={self.clip_norm})" + @imperative_base.no_grad def _dygraph_clip(self, params_grads): params_and_grads = [] @@ -44,7 +47,7 @@ class ClipGradByGlobalNormWithLog(paddle.nn.ClipGradByGlobalNorm): sum_square = layers.reduce_sum(square) sum_square_list.append(sum_square) - # debug log + # debug log, not dump all since slow down train process if i < 10: logger.debug( f"Grad Before Clip: {p.name}: {float(sum_square.sqrt()) }") @@ -73,7 +76,7 @@ class ClipGradByGlobalNormWithLog(paddle.nn.ClipGradByGlobalNorm): new_grad = layers.elementwise_mul(x=g, y=clip_var) params_and_grads.append((p, new_grad)) - # debug log + # debug log, not dump all since slow down train process if i < 10: logger.debug( f"Grad After Clip: {p.name}: {float(new_grad.square().sum().sqrt())}" diff --git a/deepspeech/training/optimizer.py b/deepspeech/training/optimizer.py new file mode 100644 index 000000000..db7069c98 --- /dev/null +++ b/deepspeech/training/optimizer.py @@ -0,0 +1,121 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any +from typing import Dict +from typing import Text + +import paddle +from paddle.optimizer import Optimizer +from paddle.regularizer import L2Decay + +from deepspeech.training.gradclip import ClipGradByGlobalNormWithLog +from deepspeech.utils.dynamic_import import dynamic_import +from deepspeech.utils.dynamic_import import instance_class +from deepspeech.utils.log import Log + +__all__ = ["OptimizerFactory"] + +logger = Log(__name__).getlog() + +OPTIMIZER_DICT = { + "sgd": "paddle.optimizer:SGD", + "momentum": "paddle.optimizer:Momentum", + "adadelta": "paddle.optimizer:Adadelta", + "adam": "paddle.optimizer:Adam", + "adamw": "paddle.optimizer:AdamW", +} + + +def register_optimizer(cls): + """Register optimizer.""" + alias = cls.__name__.lower() + OPTIMIZER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__ + return cls + + +@register_optimizer +class Noam(paddle.optimizer.Adam): + """Seem to: espnet/nets/pytorch_backend/transformer/optimizer.py """ + + def __init__(self, + learning_rate=0, + beta1=0.9, + beta2=0.98, + epsilon=1e-9, + parameters=None, + weight_decay=None, + grad_clip=None, + lazy_mode=False, + multi_precision=False, + name=None): + super().__init__( + learning_rate=learning_rate, + beta1=beta1, + beta2=beta2, + epsilon=epsilon, + parameters=parameters, + weight_decay=weight_decay, + grad_clip=grad_clip, + lazy_mode=lazy_mode, + multi_precision=multi_precision, + name=name) + + def __repr__(self): + echo = f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}> " + echo += f"learning_rate: {self._learning_rate}, " + echo += f"(beta1: {self._beta1} beta2: {self._beta2}), " + echo += f"epsilon: {self._epsilon}" + + +def dynamic_import_optimizer(module): + """Import Optimizer class dynamically. + + Args: + module (str): module_name:class_name or alias in `OPTIMIZER_DICT` + + Returns: + type: Optimizer class + + """ + module_class = dynamic_import(module, OPTIMIZER_DICT) + assert issubclass(module_class, + Optimizer), f"{module} does not implement Optimizer" + return module_class + + +class OptimizerFactory(): + @classmethod + def from_args(cls, name: str, args: Dict[Text, Any]): + assert "parameters" in args, "parameters not in args." + assert "learning_rate" in args, "learning_rate not in args." + + grad_clip = ClipGradByGlobalNormWithLog( + args['grad_clip']) if "grad_clip" in args else None + weight_decay = L2Decay( + args['weight_decay']) if "weight_decay" in args else None + if weight_decay: + logger.info(f'') + if grad_clip: + logger.info(f'') + + module_class = dynamic_import_optimizer(name.lower()) + args.update({"grad_clip": grad_clip, "weight_decay": weight_decay}) + opt = instance_class(module_class, args) + if "__repr__" in vars(opt): + logger.info(f"{opt}") + else: + logger.info( + f" LR: {args['learning_rate']}" + ) + return opt diff --git a/deepspeech/training/reporter.py b/deepspeech/training/reporter.py new file mode 100644 index 000000000..7afc33f38 --- /dev/null +++ b/deepspeech/training/reporter.py @@ -0,0 +1,144 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import math +from collections import defaultdict + +OBSERVATIONS = None + + +@contextlib.contextmanager +def ObsScope(observations): + # make `observation` the target to report to. + # it is basically a dictionary that stores temporary observations + global OBSERVATIONS + old = OBSERVATIONS + OBSERVATIONS = observations + + try: + yield + finally: + OBSERVATIONS = old + + +def get_observations(): + global OBSERVATIONS + return OBSERVATIONS + + +def report(name, value): + # a simple function to report named value + # you can use it everywhere, it will get the default target and writ to it + # you can think of it as std.out + observations = get_observations() + if observations is None: + return + else: + observations[name] = value + + +class Summary(): + """Online summarization of a sequence of scalars. + Summary computes the statistics of given scalars online. + """ + + def __init__(self): + self._x = 0.0 + self._x2 = 0.0 + self._n = 0 + + def add(self, value, weight=1): + """Adds a scalar value. + Args: + value: Scalar value to accumulate. It is either a NumPy scalar or + a zero-dimensional array (on CPU or GPU). + weight: An optional weight for the value. It is a NumPy scalar or + a zero-dimensional array (on CPU or GPU). + Default is 1 (integer). + """ + self._x += weight * value + self._x2 += weight * value * value + self._n += weight + + def compute_mean(self): + """Computes the mean.""" + x, n = self._x, self._n + return x / n + + def make_statistics(self): + """Computes and returns the mean and standard deviation values. + Returns: + tuple: Mean and standard deviation values. + """ + x, n = self._x, self._n + mean = x / n + var = self._x2 / n - mean * mean + std = math.sqrt(var) + return mean, std + + +class DictSummary(): + """Online summarization of a sequence of dictionaries. + ``DictSummary`` computes the statistics of a given set of scalars online. + It only computes the statistics for scalar values and variables of scalar + values in the dictionaries. + """ + + def __init__(self): + self._summaries = defaultdict(Summary) + + def add(self, d): + """Adds a dictionary of scalars. + Args: + d (dict): Dictionary of scalars to accumulate. Only elements of + scalars, zero-dimensional arrays, and variables of + zero-dimensional arrays are accumulated. When the value + is a tuple, the second element is interpreted as a weight. + """ + summaries = self._summaries + for k, v in d.items(): + w = 1 + if isinstance(v, tuple): + v = v[0] + w = v[1] + summaries[k].add(v, weight=w) + + def compute_mean(self): + """Creates a dictionary of mean values. + It returns a single dictionary that holds a mean value for each entry + added to the summary. + Returns: + dict: Dictionary of mean values. + """ + return { + name: summary.compute_mean() + for name, summary in self._summaries.items() + } + + def make_statistics(self): + """Creates a dictionary of statistics. + It returns a single dictionary that holds mean and standard deviation + values for every entry added to the summary. For an entry of name + ``'key'``, these values are added to the dictionary by names ``'key'`` + and ``'key.std'``, respectively. + Returns: + dict: Dictionary of statistics of all entries. + """ + stats = {} + for name, summary in self._summaries.items(): + mean, std = summary.make_statistics() + stats[name] = mean + stats[name + '.std'] = std + + return stats diff --git a/deepspeech/training/scheduler.py b/deepspeech/training/scheduler.py index d36130284..bb53281a8 100644 --- a/deepspeech/training/scheduler.py +++ b/deepspeech/training/scheduler.py @@ -11,18 +11,37 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any +from typing import Dict +from typing import Text from typing import Union from paddle.optimizer.lr import LRScheduler from typeguard import check_argument_types +from deepspeech.utils.dynamic_import import dynamic_import +from deepspeech.utils.dynamic_import import instance_class from deepspeech.utils.log import Log -__all__ = ["WarmupLR"] +__all__ = ["WarmupLR", "LRSchedulerFactory"] logger = Log(__name__).getlog() +SCHEDULER_DICT = { + "noam": "paddle.optimizer.lr:NoamDecay", + "expdecaylr": "paddle.optimizer.lr:ExponentialDecay", + "piecewisedecay": "paddle.optimizer.lr:PiecewiseDecay", +} + +def register_scheduler(cls): + """Register scheduler.""" + alias = cls.__name__.lower() + SCHEDULER_DICT[cls.__name__.lower()] = cls.__module__ + ":" + cls.__name__ + return cls + + +@register_scheduler class WarmupLR(LRScheduler): """The WarmupLR scheduler This scheduler is almost same as NoamLR Scheduler except for following @@ -40,7 +59,8 @@ class WarmupLR(LRScheduler): warmup_steps: Union[int, float]=25000, learning_rate=1.0, last_epoch=-1, - verbose=False): + verbose=False, + **kwargs): assert check_argument_types() self.warmup_steps = warmup_steps super().__init__(learning_rate, last_epoch, verbose) @@ -64,3 +84,45 @@ class WarmupLR(LRScheduler): None ''' self.step(epoch=step) + + +@register_scheduler +class ConstantLR(LRScheduler): + """ + Args: + learning_rate (float): The initial learning rate. It is a python float number. + last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate. + verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` . + + Returns: + ``ConstantLR`` instance to schedule learning rate. + """ + + def __init__(self, learning_rate, last_epoch=-1, verbose=False): + super().__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + return self.base_lr + + +def dynamic_import_scheduler(module): + """Import Scheduler class dynamically. + + Args: + module (str): module_name:class_name or alias in `SCHEDULER_DICT` + + Returns: + type: Scheduler class + + """ + module_class = dynamic_import(module, SCHEDULER_DICT) + assert issubclass(module_class, + LRScheduler), f"{module} does not implement LRScheduler" + return module_class + + +class LRSchedulerFactory(): + @classmethod + def from_args(cls, name: str, args: Dict[Text, Any]): + module_class = dynamic_import_scheduler(name.lower()) + return instance_class(module_class, args) diff --git a/deepspeech/training/timer.py b/deepspeech/training/timer.py new file mode 100644 index 000000000..2ca9d6386 --- /dev/null +++ b/deepspeech/training/timer.py @@ -0,0 +1,50 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime +import time + +from deepspeech.utils.log import Log + +__all__ = ["Timer"] + +logger = Log(__name__).getlog() + + +class Timer(): + """To be used like this: + with Timer("Message") as value: + do some thing + """ + + def __init__(self, message=None): + self.message = message + + def duration(self) -> str: + elapsed_time = time.time() - self.start + time_str = str(datetime.timedelta(seconds=elapsed_time)) + return time_str + + def __enter__(self): + self.start = time.time() + return self + + def __exit__(self, type, value, traceback): + if self.message: + logger.info(self.message.format(self.duration())) + + def __call__(self) -> float: + return time.time() - self.start + + def __str__(self): + return self.duration() diff --git a/deepspeech/training/trainer.py b/deepspeech/training/trainer.py index 56de32617..a5efdd541 100644 --- a/deepspeech/training/trainer.py +++ b/deepspeech/training/trainer.py @@ -11,16 +11,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys import time +from collections import OrderedDict from pathlib import Path import paddle from paddle import distributed as dist from tensorboardX import SummaryWriter -from deepspeech.utils import checkpoint +from deepspeech.training.reporter import ObsScope +from deepspeech.training.reporter import report +from deepspeech.training.timer import Timer from deepspeech.utils import mp_tools +from deepspeech.utils import profiler +from deepspeech.utils.checkpoint import Checkpoint from deepspeech.utils.log import Log +from deepspeech.utils.utility import seed_all +from deepspeech.utils.utility import UpdateConfig __all__ = ["Trainer"] @@ -29,37 +37,37 @@ logger = Log(__name__).getlog() class Trainer(): """ - An experiment template in order to structure the training code and take - care of saving, loading, logging, visualization stuffs. It's intended to - be flexible and simple. - - So it only handles output directory (create directory for the output, - create a checkpoint directory, dump the config in use and create + An experiment template in order to structure the training code and take + care of saving, loading, logging, visualization stuffs. It's intended to + be flexible and simple. + + So it only handles output directory (create directory for the output, + create a checkpoint directory, dump the config in use and create visualizer and logger) in a standard way without enforcing any - input-output protocols to the model and dataloader. It leaves the main - part for the user to implement their own (setup the model, criterion, - optimizer, define a training step, define a validation function and + input-output protocols to the model and dataloader. It leaves the main + part for the user to implement their own (setup the model, criterion, + optimizer, define a training step, define a validation function and customize all the text and visual logs). - It does not save too much boilerplate code. The users still have to write - the forward/backward/update mannually, but they are free to add + It does not save too much boilerplate code. The users still have to write + the forward/backward/update mannually, but they are free to add non-standard behaviors if needed. We have some conventions to follow. - 1. Experiment should have ``model``, ``optimizer``, ``train_loader`` and + 1. Experiment should have ``model``, ``optimizer``, ``train_loader`` and ``valid_loader``, ``config`` and ``args`` attributes. - 2. The config should have a ``training`` field, which has - ``valid_interval``, ``save_interval`` and ``max_iteration`` keys. It is - used as the trigger to invoke validation, checkpointing and stop of the + 2. The config should have a ``training`` field, which has + ``valid_interval``, ``save_interval`` and ``max_iteration`` keys. It is + used as the trigger to invoke validation, checkpointing and stop of the experiment. - 3. There are four methods, namely ``train_batch``, ``valid``, + 3. There are four methods, namely ``train_batch``, ``valid``, ``setup_model`` and ``setup_dataloader`` that should be implemented. - Feel free to add/overwrite other methods and standalone functions if you + Feel free to add/overwrite other methods and standalone functions if you need. - + Parameters ---------- config: yacs.config.CfgNode The configuration used for the experiment. - + args: argparse.Namespace The parsed command line arguments. Examples @@ -68,16 +76,16 @@ class Trainer(): >>> exp = Trainer(config, args) >>> exp.setup() >>> exp.run() - >>> + >>> >>> config = get_cfg_defaults() >>> parser = default_argument_parser() >>> args = parser.parse_args() - >>> if args.config: + >>> if args.config: >>> config.merge_from_file(args.config) >>> if args.opts: >>> config.merge_from_list(args.opts) >>> config.freeze() - >>> + >>> >>> if args.nprocs > 1 and args.device == "gpu": >>> dist.spawn(main_sp, args=(config, args), nprocs=args.nprocs) >>> else: @@ -93,6 +101,20 @@ class Trainer(): self.checkpoint_dir = None self.iteration = 0 self.epoch = 0 + self.rank = dist.get_rank() + + logger.info(f"Rank: {self.rank}/{dist.get_world_size()}") + + if args.seed: + seed_all(args.seed) + logger.info(f"Set seed {args.seed}") + + if self.args.benchmark_batch_size: + with UpdateConfig(self.config): + self.config.collator.batch_size = self.args.benchmark_batch_size + self.config.training.log_interval = 1 + logger.info( + f"Benchmark reset batch-size: {self.args.benchmark_batch_size}") def setup(self): """Setup the experiment. @@ -114,7 +136,7 @@ class Trainer(): @property def parallel(self): - """A flag indicating whether the experiment should run with + """A flag indicating whether the experiment should run with multiprocessing. """ return self.args.device == "gpu" and self.args.nprocs > 1 @@ -139,19 +161,19 @@ class Trainer(): "epoch": self.epoch, "lr": self.optimizer.get_lr() }) - checkpoint.save_parameters(self.checkpoint_dir, self.iteration - if tag is None else tag, self.model, - self.optimizer, infos) + self.checkpoint.add_checkpoint(self.checkpoint_dir, self.iteration + if tag is None else tag, self.model, + self.optimizer, infos) def resume_or_scratch(self): - """Resume from latest checkpoint at checkpoints in the output + """Resume from latest checkpoint at checkpoints in the output directory or load a specified checkpoint. - + If ``args.checkpoint_path`` is not None, load the checkpoint, else resume training. """ scratch = None - infos = checkpoint.load_parameters( + infos = self.checkpoint.load_latest_parameters( self.model, self.optimizer, checkpoint_dir=self.checkpoint_dir, @@ -165,58 +187,88 @@ class Trainer(): self.iteration = 0 self.epoch = 0 scratch = True - + logger.info("Restore/Init checkpoint!") return scratch def new_epoch(self): """Reset the train loader seed and increment `epoch`. """ self.epoch += 1 - if self.parallel: - self.train_loader.batch_sampler.set_epoch(self.epoch) + if self.parallel and hasattr(self.train_loader, "batch_sampler"): + batch_sampler = self.train_loader.batch_sampler + if isinstance(batch_sampler, paddle.io.DistributedBatchSampler): + batch_sampler.set_epoch(self.epoch) + + def after_train_batch(self): + if self.args.profiler_options: + profiler.add_profiler_step(self.args.profiler_options) + + if self.args.benchmark_max_step and self.iteration > self.args.benchmark_max_step: + logger.info( + f"Reach benchmark-max-step: {self.args.benchmark_max_step}") + sys.exit( + f"Reach benchmark-max-step: {self.args.benchmark_max_step}") def train(self): """The training process control by epoch.""" from_scratch = self.resume_or_scratch() if from_scratch: # save init model, i.e. 0 epoch - self.save(tag='init') + self.save(tag='init', infos=None) - self.lr_scheduler.step(self.iteration) - if self.parallel: + # lr will resotre from optimizer ckpt + # self.lr_scheduler.step(self.epoch) + if self.parallel and hasattr(self.train_loader, "batch_sampler"): self.train_loader.batch_sampler.set_epoch(self.epoch) logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") while self.epoch < self.config.training.n_epoch: - self.model.train() - try: - data_start_time = time.time() - for batch_index, batch in enumerate(self.train_loader): - dataload_time = time.time() - data_start_time - msg = "Train: Rank: {}, ".format(dist.get_rank()) - msg += "epoch: {}, ".format(self.epoch) - msg += "step: {}, ".format(self.iteration) - msg += "batch : {}/{}, ".format(batch_index + 1, - len(self.train_loader)) - msg += "lr: {:>.8f}, ".format(self.lr_scheduler()) - msg += "data time: {:>.3f}s, ".format(dataload_time) - self.train_batch(batch_index, batch, msg) + with Timer("Epoch-Train Time Cost: {}"): + self.model.train() + try: data_start_time = time.time() - except Exception as e: - logger.error(e) - raise e - - total_loss, num_seen_utts = self.valid() - if dist.get_world_size() > 1: - num_seen_utts = paddle.to_tensor(num_seen_utts) - # the default operator in all_reduce function is sum. - dist.all_reduce(num_seen_utts) - total_loss = paddle.to_tensor(total_loss) - dist.all_reduce(total_loss) - cv_loss = total_loss / num_seen_utts - cv_loss = float(cv_loss) - else: - cv_loss = total_loss / num_seen_utts + for batch_index, batch in enumerate(self.train_loader): + dataload_time = time.time() - data_start_time + msg = "Train:" + observation = OrderedDict() + with ObsScope(observation): + report("Rank", dist.get_rank()) + report("epoch", self.epoch) + report('step', self.iteration) + report('step/total', + (batch_index + 1) / len(self.train_loader)) + report("lr", self.lr_scheduler()) + self.train_batch(batch_index, batch, msg) + self.after_train_batch() + report('reader_cost', dataload_time) + observation['batch_cost'] = observation[ + 'reader_cost'] + observation['step_cost'] + observation['samples'] = observation['batch_size'] + observation['ips[sent./sec]'] = observation[ + 'batch_size'] / observation['batch_cost'] + for k, v in observation.items(): + msg += f" {k}: " + msg += f"{v:>.8f}" if isinstance(v, + float) else f"{v}" + msg += "," + logger.info(msg) + data_start_time = time.time() + except Exception as e: + logger.error(e) + raise e + + with Timer("Eval Time Cost: {}"): + total_loss, num_seen_utts = self.valid() + if dist.get_world_size() > 1: + num_seen_utts = paddle.to_tensor(num_seen_utts) + # the default operator in all_reduce function is sum. + dist.all_reduce(num_seen_utts) + total_loss = paddle.to_tensor(total_loss) + dist.all_reduce(total_loss) + cv_loss = total_loss / num_seen_utts + cv_loss = float(cv_loss) + else: + cv_loss = total_loss / num_seen_utts logger.info( 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss)) @@ -234,14 +286,14 @@ class Trainer(): """The routine of the experiment after setup. This method is intended to be used by the user. """ - try: - self.train() - except KeyboardInterrupt: - self.save() - exit(-1) - finally: - self.destory() - logger.info("Training Done.") + with Timer("Training Done: {}"): + try: + self.train() + except KeyboardInterrupt: + self.save() + exit(-1) + finally: + self.destory() def setup_output_dir(self): """Create a directory used for output. @@ -254,7 +306,7 @@ class Trainer(): def setup_checkpointer(self): """Create a directory used to save checkpoints into. - + It is "checkpoints" inside the output directory. """ # checkpoint dir @@ -263,6 +315,10 @@ class Trainer(): self.checkpoint_dir = checkpoint_dir + self.checkpoint = Checkpoint( + kbest_n=self.config.training.checkpoint.kbest_n, + latest_n=self.config.training.checkpoint.latest_n) + @mp_tools.rank_zero_only def destory(self): """Close visualizer to avoid hanging after training""" @@ -273,13 +329,13 @@ class Trainer(): @mp_tools.rank_zero_only def setup_visualizer(self): """Initialize a visualizer to log the experiment. - + The visual log is saved in the output directory. - + Notes ------ - Only the main process has a visualizer with it. Use multiple - visualizers in multiprocess to write to a same log file may cause + Only the main process has a visualizer with it. Use multiple + visualizers in multiprocess to write to a same log file may cause unexpected behaviors. """ # visualizer @@ -288,9 +344,9 @@ class Trainer(): @mp_tools.rank_zero_only def dump_config(self): - """Save the configuration used for this experiment. - - It is saved in to ``config.yaml`` in the output directory at the + """Save the configuration used for this experiment. + + It is saved in to ``config.yaml`` in the output directory at the beginning of the experiment. """ with open(self.output_dir / "config.yaml", 'wt') as f: @@ -308,13 +364,13 @@ class Trainer(): raise NotImplementedError("valid should be implemented.") def setup_model(self): - """Setup model, criterion and optimizer, etc. A subclass should + """Setup model, criterion and optimizer, etc. A subclass should implement this method. """ raise NotImplementedError("setup_model should be implemented.") def setup_dataloader(self): - """Setup training dataloader and validation dataloader. A subclass + """Setup training dataloader and validation dataloader. A subclass should implement this method. """ raise NotImplementedError("setup_dataloader should be implemented.") diff --git a/deepspeech/training/triggers/__init__.py b/deepspeech/training/triggers/__init__.py new file mode 100644 index 000000000..1a7c4292e --- /dev/null +++ b/deepspeech/training/triggers/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .interval_trigger import IntervalTrigger + + +def never_fail_trigger(trainer): + return False + + +def get_trigger(trigger): + if trigger is None: + return never_fail_trigger + if callable(trigger): + return trigger + else: + trigger = IntervalTrigger(*trigger) + return trigger diff --git a/deepspeech/training/triggers/interval_trigger.py b/deepspeech/training/triggers/interval_trigger.py new file mode 100644 index 000000000..1e04afad8 --- /dev/null +++ b/deepspeech/training/triggers/interval_trigger.py @@ -0,0 +1,38 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class IntervalTrigger(): + """A Predicate to do something every N cycle.""" + + def __init__(self, period: int, unit: str): + if unit not in ("iteration", "epoch"): + raise ValueError("unit should be 'iteration' or 'epoch'") + if period <= 0: + raise ValueError("period should be a positive integer.") + self.period = period + self.unit = unit + self.last_index = None + + def __call__(self, trainer): + if self.last_index is None: + last_index = getattr(trainer.updater.state, self.unit) + self.last_index = last_index + + last_index = self.last_index + index = getattr(trainer.updater.state, self.unit) + fire = index // self.period != last_index // self.period + + self.last_index = index + return fire diff --git a/deepspeech/training/triggers/limit_trigger.py b/deepspeech/training/triggers/limit_trigger.py new file mode 100644 index 000000000..ecd527ac5 --- /dev/null +++ b/deepspeech/training/triggers/limit_trigger.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class LimitTrigger(): + """A Predicate to decide whether to stop.""" + + def __init__(self, limit: int, unit: str): + if unit not in ("iteration", "epoch"): + raise ValueError("unit should be 'iteration' or 'epoch'") + if limit <= 0: + raise ValueError("limit should be a positive integer.") + self.limit = limit + self.unit = unit + + def __call__(self, trainer): + state = trainer.updater.state + index = getattr(state, self.unit) + fire = index >= self.limit + return fire diff --git a/deepspeech/training/triggers/time_trigger.py b/deepspeech/training/triggers/time_trigger.py new file mode 100644 index 000000000..ea8fe562c --- /dev/null +++ b/deepspeech/training/triggers/time_trigger.py @@ -0,0 +1,32 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class TimeTrigger(): + """Trigger based on a fixed time interval. + This trigger accepts iterations with a given interval time. + Args: + period (float): Interval time. It is given in seconds. + """ + + def __init__(self, period): + self._period = period + self._next_time = self._period + + def __call__(self, trainer): + if self._next_time < trainer.elapsed_time: + self._next_time += self._period + return True + else: + return False diff --git a/deepspeech/training/updaters/__init__.py b/deepspeech/training/updaters/__init__.py new file mode 100644 index 000000000..185a92b8d --- /dev/null +++ b/deepspeech/training/updaters/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/deepspeech/training/updaters/standard_updater.py b/deepspeech/training/updaters/standard_updater.py new file mode 100644 index 000000000..10c99e7fc --- /dev/null +++ b/deepspeech/training/updaters/standard_updater.py @@ -0,0 +1,195 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict +from typing import Optional + +import paddle +from paddle.io import DataLoader +from paddle.io import DistributedBatchSampler +from paddle.nn import Layer +from paddle.optimizer import Optimizer +from paddle.optimizer.lr import LRScheduler + +from deepspeech.training.reporter import report +from deepspeech.training.updaters.updater import UpdaterBase +from deepspeech.training.updaters.updater import UpdaterState +from deepspeech.utils.log import Log + +__all__ = ["StandardUpdater"] + +logger = Log(__name__).getlog() + + +class StandardUpdater(UpdaterBase): + """An example of over-simplification. Things may not be that simple, but + you can subclass it to fit your need. + """ + + def __init__(self, + model: Layer, + optimizer: Optimizer, + scheduler: LRScheduler, + dataloader: DataLoader, + init_state: Optional[UpdaterState]=None): + super().__init__(init_state) + # it is designed to hold multiple models + models = {"main": model} + self.models: Dict[str, Layer] = models + self.model = model + + # it is designed to hold multiple optimizers + optimizers = {"main": optimizer} + self.optimizer = optimizer + self.optimizers: Dict[str, Optimizer] = optimizers + + # it is designed to hold multiple scheduler + schedulers = {"main": scheduler} + self.scheduler = scheduler + self.schedulers: Dict[str, LRScheduler] = schedulers + + # dataloaders + self.dataloader = dataloader + + self.train_iterator = iter(dataloader) + + def update(self): + # We increase the iteration index after updating and before extension. + # Here are the reasons. + + # 0. Snapshotting(as well as other extensions, like visualizer) is + # executed after a step of updating; + # 1. We decide to increase the iteration index after updating and + # before any all extension is executed. + # 3. We do not increase the iteration after extension because we + # prefer a consistent resume behavior, when load from a + # `snapshot_iter_100.pdz` then the next step to train is `101`, + # naturally. But if iteration is increased increased after + # extension(including snapshot), then, a `snapshot_iter_99` is + # loaded. You would need a extra increasing of the iteration idex + # before training to avoid another iteration `99`, which has been + # done before snapshotting. + # 4. Thus iteration index represrnts "currently how mant epochs has + # been done." + # NOTE: use report to capture the correctly value. If you want to + # report the learning rate used for a step, you must report it before + # the learning rate scheduler's step() has been called. In paddle's + # convention, we do not use an extension to change the learning rate. + # so if you want to report it, do it in the updater. + + # Then here comes the next question. When is the proper time to + # increase the epoch index? Since all extensions are executed after + # updating, it is the time that after updating is the proper time to + # increase epoch index. + # 1. If we increase the epoch index before updating, then an extension + # based ot epoch would miss the correct timing. It could only be + # triggerd after an extra updating. + # 2. Theoretically, when an epoch is done, the epoch index should be + # increased. So it would be increase after updating. + # 3. Thus, eppoch index represents "currently how many epochs has been + # done." So it starts from 0. + + # switch to training mode + for model in self.models.values(): + model.train() + + # training for a step is implemented here + with Timier("data time cost:{}"): + batch = self.read_batch() + with Timier("step time cost:{}"): + self.update_core(batch) + + self.state.iteration += 1 + if self.updates_per_epoch is not None: + if self.state.iteration % self.updates_per_epoch == 0: + self.state.epoch += 1 + + def update_core(self, batch): + """A simple case for a training step. Basic assumptions are: + Single model; + Single optimizer; + Single scheduler, and update learning rate each step; + A batch from the dataloader is just the input of the model; + The model return a single loss, or a dict containing serval losses. + Parameters updates at every batch, no gradient accumulation. + """ + loss = self.model(*batch) + + if isinstance(loss, paddle.Tensor): + loss_dict = {"main": loss} + else: + # Dict[str, Tensor] + loss_dict = loss + if "main" not in loss_dict: + main_loss = 0 + for loss_item in loss.values(): + main_loss += loss_item + loss_dict["main"] = main_loss + + for name, loss_item in loss_dict.items(): + report(name, float(loss_item)) + + self.optimizer.clear_grad() + loss_dict["main"].backward() + self.optimizer.step() + self.scheduler.step() + + @property + def updates_per_epoch(self): + """Number of steps per epoch, + determined by the length of the dataloader.""" + length_of_dataloader = None + try: + length_of_dataloader = len(self.dataloader) + except TypeError: + logger.debug("This dataloader has no __len__.") + finally: + return length_of_dataloader + + def new_epoch(self): + """Start a new epoch.""" + # NOTE: all batch sampler for distributed training should + # subclass DistributedBatchSampler and implement `set_epoch` method + if hasattr(self.dataloader, "batch_sampler"): + batch_sampler = self.dataloader.batch_sampler + if isinstance(batch_sampler, DistributedBatchSampler): + batch_sampler.set_epoch(self.state.epoch) + self.train_iterator = iter(self.dataloader) + + def read_batch(self): + """Read a batch from the data loader, auto renew when data is exhausted.""" + try: + batch = next(self.train_iterator) + except StopIteration: + self.new_epoch() + batch = next(self.train_iterator) + return batch + + def state_dict(self): + """State dict of a Updater, model, optimizers/schedulers + and updater state are included.""" + state_dict = super().state_dict() + for name, model in self.models.items(): + state_dict[f"{name}_params"] = model.state_dict() + for name, optim in self.optimizers.items(): + state_dict[f"{name}_optimizer"] = optim.state_dict() + return state_dict + + def set_state_dict(self, state_dict): + """Set state dict for a Updater. Parameters of models, states for + optimizers/schedulers and UpdaterState are restored.""" + for name, model in self.models.items(): + model.set_state_dict(state_dict[f"{name}_params"]) + for name, optim in self.optimizers.items(): + optim.set_state_dict(state_dict[f"{name}_optimizer"]) + super().set_state_dict(state_dict) diff --git a/deepspeech/training/updaters/trainer.py b/deepspeech/training/updaters/trainer.py new file mode 100644 index 000000000..077694659 --- /dev/null +++ b/deepspeech/training/updaters/trainer.py @@ -0,0 +1,184 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import traceback +from collections import OrderedDict +from pathlib import Path +from typing import Callable +from typing import List +from typing import Union + +import six +import tqdm + +from deepspeech.training.extensions.extension import Extension +from deepspeech.training.extensions.extension import PRIORITY_READER +from deepspeech.training.reporter import ObsScope +from deepspeech.training.triggers import get_trigger +from deepspeech.training.triggers.limit_trigger import LimitTrigger +from deepspeech.training.updaters.updater import UpdaterBase + + +class _ExtensionEntry(): + def __init__(self, extension, trigger, priority): + self.extension = extension + self.trigger = trigger + self.priority = priority + + +class Trainer(): + def __init__(self, + updater: UpdaterBase, + stop_trigger: Callable=None, + out: Union[str, Path]='result', + extensions: List[Extension]=None): + self.updater = updater + self.extensions = OrderedDict() + self.stop_trigger = LimitTrigger(*stop_trigger) + self.out = Path(out) + self.observation = None + + self._done = False + if extensions: + for ext in extensions: + self.extend(ext) + + @property + def is_before_training(self): + return self.updater.state.iteration == 0 + + def extend(self, extension, name=None, trigger=None, priority=None): + # get name for the extension + # argument \ + # -> extention's name \ + # -> default_name (class name, when it is an object) \ + # -> function name when it is a function \ + # -> error + + if name is None: + name = getattr(extension, 'name', None) + if name is None: + name = getattr(extension, 'default_name', None) + if name is None: + name = getattr(extension, '__name__', None) + if name is None: + raise ValueError("Name is not given for the extension.") + if name == 'training': + raise ValueError("training is a reserved name.") + + if trigger is None: + trigger = getattr(extension, 'trigger', (1, 'iteration')) + trigger = get_trigger(trigger) + + if priority is None: + priority = getattr(extension, 'priority', PRIORITY_READER) + + # add suffix to avoid nameing conflict + ordinal = 0 + modified_name = name + while modified_name in self.extensions: + ordinal += 1 + modified_name = f"{name}_{ordinal}" + extension.name = modified_name + + self.extensions[modified_name] = _ExtensionEntry(extension, trigger, + priority) + + def get_extension(self, name): + """get extension by name.""" + extensions = self.extensions + if name in extensions: + return extensions[name].extension + else: + raise ValueError(f'extension {name} not found') + + def run(self): + if self._done: + raise RuntimeError("Training is already done!.") + + self.out.mkdir(parents=True, exist_ok=True) + + # sort extensions by priorities once + extension_order = sorted( + self.extensions.keys(), + key=lambda name: self.extensions[name].priority, + reverse=True) + extensions = [(name, self.extensions[name]) for name in extension_order] + + # initializing all extensions + for name, entry in extensions: + if hasattr(entry.extension, "initialize"): + entry.extension.initialize(self) + + update = self.updater.update # training step + stop_trigger = self.stop_trigger + + # display only one progress bar + max_iteration = None + if isinstance(stop_trigger, LimitTrigger): + if stop_trigger.unit == 'epoch': + max_epoch = self.stop_trigger.limit + updates_per_epoch = getattr(self.updater, "updates_per_epoch", + None) + max_iteration = max_epoch * updates_per_epoch if updates_per_epoch else None + else: + max_iteration = self.stop_trigger.limit + + p = tqdm.tqdm(initial=self.updater.state.iteration, total=max_iteration) + + try: + while not stop_trigger(self): + self.observation = {} + # set observation as the `report` target + # you can use `report` freely in Updater.update() + + # updating parameters and state + with ObsScope(self.observation): + update() + p.update() + + # execute extension when necessary + for name, entry in extensions: + if entry.trigger(self): + entry.extension(self) + + # print("###", self.observation) + except Exception as e: + f = sys.stderr + f.write(f"Exception in main training loop: {e}\n") + f.write("Traceback (most recent call last):\n") + traceback.print_tb(sys.exc_info()[2]) + f.write( + "Trainer extensions will try to handle the extension. Then all extensions will finalize." + ) + + # capture the exception in the mian training loop + exc_info = sys.exc_info() + + # try to handle it + for name, entry in extensions: + if hasattr(entry.extension, "on_error"): + try: + entry.extension.on_error(self, e, sys.exc_info()[2]) + except Exception as ee: + f.write(f"Exception in error handler: {ee}\n") + f.write('Traceback (most recent call last):\n') + traceback.print_tb(sys.exc_info()[2]) + + # raise exception in main training loop + six.reraise(*exc_info) + finally: + for name, entry in extensions: + if hasattr(entry.extension, "finalize"): + entry.extension.finalize(self) diff --git a/deepspeech/training/updaters/updater.py b/deepspeech/training/updaters/updater.py new file mode 100644 index 000000000..e5dd65563 --- /dev/null +++ b/deepspeech/training/updaters/updater.py @@ -0,0 +1,84 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dataclasses import dataclass + +import paddle + +from deepspeech.utils.log import Log + +__all__ = ["UpdaterBase", "UpdaterState"] + +logger = Log(__name__).getlog() + + +@dataclass +class UpdaterState: + iteration: int = 0 + epoch: int = 0 + + +class UpdaterBase(): + """An updater is the abstraction of how a model is trained given the + dataloader and the optimizer. + The `update_core` method is a step in the training loop with only necessary + operations (get a batch, forward and backward, update the parameters). + Other stuffs are made extensions. Visualization, saving, loading and + periodical validation and evaluation are not considered here. + But even in such simplist case, things are not that simple. There is an + attempt to standardize this process and requires only the model and + dataset and do all the stuffs automatically. But this may hurt flexibility. + If we assume a batch yield from the dataloader is just the input to the + model, we will find that some model requires more arguments, or just some + keyword arguments. But this prevents us from over-simplifying it. + From another perspective, the batch may includes not just the input, but + also the target. But the model's forward method may just need the input. + We can pass a dict or a super-long tuple to the model and let it pick what + it really needs. But this is an abuse of lazy interface. + After all, we care about how a model is trained. But just how the model is + used for inference. We want to control how a model is trained. We just + don't want to be messed up with other auxiliary code. + So the best practice is to define a model and define a updater for it. + """ + + def __init__(self, init_state=None): + # init state + if init_state is None: + self.state = UpdaterState() + else: + self.state = init_state + + def update(self, batch): + raise NotImplementedError( + "Implement your own `update` method for training a step.") + + def state_dict(self): + state_dict = { + "epoch": self.state.epoch, + "iteration": self.state.iteration, + } + return state_dict + + def set_state_dict(self, state_dict): + self.state.epoch = state_dict["epoch"] + self.state.iteration = state_dict["iteration"] + + def save(self, path): + logger.debug(f"Saving to {path}.") + archive = self.state_dict() + paddle.save(archive, str(path)) + + def load(self, path): + logger.debug(f"Loading from {path}.") + archive = paddle.load(str(path)) + self.set_state_dict(archive) diff --git a/deepspeech/utils/bleu_score.py b/deepspeech/utils/bleu_score.py new file mode 100644 index 000000000..09646133a --- /dev/null +++ b/deepspeech/utils/bleu_score.py @@ -0,0 +1,54 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This module provides functions to calculate bleu score in different level. +e.g. wer for word-level, cer for char-level. +""" +import sacrebleu + +__all__ = ['bleu', 'char_bleu'] + + +def bleu(hypothesis, reference): + """Calculate BLEU. BLEU compares reference text and + hypothesis text in word-level using scarebleu. + + + + :param reference: The reference sentences. + :type reference: list[list[str]] + :param hypothesis: The hypothesis sentence. + :type hypothesis: list[str] + :raises ValueError: If the reference length is zero. + """ + + return sacrebleu.corpus_bleu(hypothesis, reference) + + +def char_bleu(hypothesis, reference): + """Calculate BLEU. BLEU compares reference text and + hypothesis text in char-level using scarebleu. + + + + :param reference: The reference sentences. + :type reference: list[list[str]] + :param hypothesis: The hypothesis sentence. + :type hypothesis: list[str] + :raises ValueError: If the reference number is zero. + """ + hypothesis = [' '.join(list(hyp.replace(' ', ''))) for hyp in hypothesis] + reference = [[' '.join(list(ref_i.replace(' ', ''))) for ref_i in ref] + for ref in reference] + + return sacrebleu.corpus_bleu(hypothesis, reference) diff --git a/deepspeech/utils/checkpoint.py b/deepspeech/utils/checkpoint.py index 8ede6b8fd..8e31edfae 100644 --- a/deepspeech/utils/checkpoint.py +++ b/deepspeech/utils/checkpoint.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import glob import json import os import re +from pathlib import Path +from typing import Text from typing import Union import paddle @@ -25,128 +28,271 @@ from deepspeech.utils.log import Log logger = Log(__name__).getlog() -__all__ = ["load_parameters", "save_parameters"] - - -def _load_latest_checkpoint(checkpoint_dir: str) -> int: - """Get the iteration number corresponding to the latest saved checkpoint. - Args: - checkpoint_dir (str): the directory where checkpoint is saved. - Returns: - int: the latest iteration number. -1 for no checkpoint to load. - """ - checkpoint_record = os.path.join(checkpoint_dir, "checkpoint") - if not os.path.isfile(checkpoint_record): - return -1 - - # Fetch the latest checkpoint index. - with open(checkpoint_record, "rt") as handle: - latest_checkpoint = handle.readlines()[-1].strip() - iteration = int(latest_checkpoint.split(":")[-1]) - return iteration - - -def _save_record(checkpoint_dir: str, iteration: int): - """Save the iteration number of the latest model to be checkpoint record. - Args: - checkpoint_dir (str): the directory where checkpoint is saved. - iteration (int): the latest iteration number. - Returns: - None - """ - checkpoint_record = os.path.join(checkpoint_dir, "checkpoint") - # Update the latest checkpoint index. - with open(checkpoint_record, "a+") as handle: - handle.write("model_checkpoint_path:{}\n".format(iteration)) - - -def load_parameters(model, - optimizer=None, - checkpoint_dir=None, - checkpoint_path=None): - """Load a specific model checkpoint from disk. - Args: - model (Layer): model to load parameters. - optimizer (Optimizer, optional): optimizer to load states if needed. - Defaults to None. - checkpoint_dir (str, optional): the directory where checkpoint is saved. - checkpoint_path (str, optional): if specified, load the checkpoint - stored in the checkpoint_path(prefix) and the argument 'checkpoint_dir' will - be ignored. Defaults to None. - Returns: - configs (dict): epoch or step, lr and other meta info should be saved. - """ - configs = {} - - if checkpoint_path is not None: - tag = os.path.basename(checkpoint_path).split(":")[-1] - elif checkpoint_dir is not None: - iteration = _load_latest_checkpoint(checkpoint_dir) - if iteration == -1: - return configs - checkpoint_path = os.path.join(checkpoint_dir, "{}".format(iteration)) - else: - raise ValueError( - "At least one of 'checkpoint_dir' and 'checkpoint_path' should be specified!" - ) - - rank = dist.get_rank() - - params_path = checkpoint_path + ".pdparams" - model_dict = paddle.load(params_path) - model.set_state_dict(model_dict) - logger.info("Rank {}: loaded model from {}".format(rank, params_path)) - - optimizer_path = checkpoint_path + ".pdopt" - if optimizer and os.path.isfile(optimizer_path): - optimizer_dict = paddle.load(optimizer_path) - optimizer.set_state_dict(optimizer_dict) - logger.info("Rank {}: loaded optimizer state from {}".format( - rank, optimizer_path)) - - info_path = re.sub('.pdparams$', '.json', params_path) - if os.path.exists(info_path): - with open(info_path, 'r') as fin: - configs = json.load(fin) - return configs - - -@mp_tools.rank_zero_only -def save_parameters(checkpoint_dir: str, - tag_or_iteration: Union[int, str], - model: paddle.nn.Layer, - optimizer: Optimizer=None, - infos: dict=None): - """Checkpoint the latest trained model parameters. - Args: - checkpoint_dir (str): the directory where checkpoint is saved. - tag_or_iteration (int or str): the latest iteration(step or epoch) number. - model (Layer): model to be checkpointed. - optimizer (Optimizer, optional): optimizer to be checkpointed. - Defaults to None. - infos (dict or None): any info you want to save. - Returns: - None - """ - checkpoint_path = os.path.join(checkpoint_dir, - "{}".format(tag_or_iteration)) - - model_dict = model.state_dict() - params_path = checkpoint_path + ".pdparams" - paddle.save(model_dict, params_path) - logger.info("Saved model to {}".format(params_path)) - - if optimizer: - opt_dict = optimizer.state_dict() +__all__ = ["Checkpoint"] + + +class Checkpoint(): + def __init__(self, kbest_n: int=5, latest_n: int=1): + self.best_records: Mapping[Path, float] = {} + self.latest_records = [] + self.kbest_n = kbest_n + self.latest_n = latest_n + self._save_all = (kbest_n == -1) + + def add_checkpoint(self, + checkpoint_dir, + tag_or_iteration: Union[int, Text], + model: paddle.nn.Layer, + optimizer: Optimizer=None, + infos: dict=None, + metric_type="val_loss"): + """Save checkpoint in best_n and latest_n. + + Args: + checkpoint_dir (str): the directory where checkpoint is saved. + tag_or_iteration (int or str): the latest iteration(step or epoch) number or tag. + model (Layer): model to be checkpointed. + optimizer (Optimizer, optional): optimizer to be checkpointed. + infos (dict or None)): any info you want to save. + metric_type (str, optional): metric type. Defaults to "val_loss". + """ + if (metric_type not in infos.keys()): + self._save_parameters(checkpoint_dir, tag_or_iteration, model, + optimizer, infos) + return + + #save best + if self._should_save_best(infos[metric_type]): + self._save_best_checkpoint_and_update( + infos[metric_type], checkpoint_dir, tag_or_iteration, model, + optimizer, infos) + #save latest + self._save_latest_checkpoint_and_update( + checkpoint_dir, tag_or_iteration, model, optimizer, infos) + + if isinstance(tag_or_iteration, int): + self._save_checkpoint_record(checkpoint_dir, tag_or_iteration) + + def load_parameters(self, + model, + optimizer=None, + checkpoint_dir=None, + checkpoint_path=None, + record_file="checkpoint_latest"): + """Load a last model checkpoint from disk. + Args: + model (Layer): model to load parameters. + optimizer (Optimizer, optional): optimizer to load states if needed. + Defaults to None. + checkpoint_dir (str, optional): the directory where checkpoint is saved. + checkpoint_path (str, optional): if specified, load the checkpoint + stored in the checkpoint_path(prefix) and the argument 'checkpoint_dir' will + be ignored. Defaults to None. + record_file "checkpoint_latest" or "checkpoint_best" + Returns: + configs (dict): epoch or step, lr and other meta info should be saved. + """ + configs = {} + + if checkpoint_path is not None: + pass + elif checkpoint_dir is not None and record_file is not None: + # load checkpint from record file + checkpoint_record = os.path.join(checkpoint_dir, record_file) + iteration = self._load_checkpoint_idx(checkpoint_record) + if iteration == -1: + return configs + checkpoint_path = os.path.join(checkpoint_dir, + "{}".format(iteration)) + else: + raise ValueError( + "At least one of 'checkpoint_path' or 'checkpoint_dir' should be specified!" + ) + + rank = dist.get_rank() + + params_path = checkpoint_path + ".pdparams" + model_dict = paddle.load(params_path) + model.set_state_dict(model_dict) + logger.info("Rank {}: Restore model from {}".format(rank, params_path)) + optimizer_path = checkpoint_path + ".pdopt" - paddle.save(opt_dict, optimizer_path) - logger.info("Saved optimzier state to {}".format(optimizer_path)) + if optimizer and os.path.isfile(optimizer_path): + optimizer_dict = paddle.load(optimizer_path) + optimizer.set_state_dict(optimizer_dict) + logger.info("Rank {}: Restore optimizer state from {}".format( + rank, optimizer_path)) + + info_path = re.sub('.pdparams$', '.json', params_path) + if os.path.exists(info_path): + with open(info_path, 'r') as fin: + configs = json.load(fin) + return configs + + def load_latest_parameters(self, + model, + optimizer=None, + checkpoint_dir=None, + checkpoint_path=None): + """Load a last model checkpoint from disk. + Args: + model (Layer): model to load parameters. + optimizer (Optimizer, optional): optimizer to load states if needed. + Defaults to None. + checkpoint_dir (str, optional): the directory where checkpoint is saved. + checkpoint_path (str, optional): if specified, load the checkpoint + stored in the checkpoint_path(prefix) and the argument 'checkpoint_dir' will + be ignored. Defaults to None. + Returns: + configs (dict): epoch or step, lr and other meta info should be saved. + """ + return self.load_parameters(model, optimizer, checkpoint_dir, + checkpoint_path, "checkpoint_latest") + + def load_best_parameters(self, + model, + optimizer=None, + checkpoint_dir=None, + checkpoint_path=None): + """Load a last model checkpoint from disk. + Args: + model (Layer): model to load parameters. + optimizer (Optimizer, optional): optimizer to load states if needed. + Defaults to None. + checkpoint_dir (str, optional): the directory where checkpoint is saved. + checkpoint_path (str, optional): if specified, load the checkpoint + stored in the checkpoint_path(prefix) and the argument 'checkpoint_dir' will + be ignored. Defaults to None. + Returns: + configs (dict): epoch or step, lr and other meta info should be saved. + """ + return self.load_parameters(model, optimizer, checkpoint_dir, + checkpoint_path, "checkpoint_best") + + def _should_save_best(self, metric: float) -> bool: + if not self._best_full(): + return True + + # already full + worst_record_path = max(self.best_records, key=self.best_records.get) + # worst_record_path = max(self.best_records.iteritems(), key=operator.itemgetter(1))[0] + worst_metric = self.best_records[worst_record_path] + return metric < worst_metric + + def _best_full(self): + return (not self._save_all) and len(self.best_records) == self.kbest_n + + def _latest_full(self): + return len(self.latest_records) == self.latest_n + + def _save_best_checkpoint_and_update(self, metric, checkpoint_dir, + tag_or_iteration, model, optimizer, + infos): + # remove the worst + if self._best_full(): + worst_record_path = max(self.best_records, + key=self.best_records.get) + self.best_records.pop(worst_record_path) + if (worst_record_path not in self.latest_records): + logger.info( + "remove the worst checkpoint: {}".format(worst_record_path)) + self._del_checkpoint(checkpoint_dir, worst_record_path) + + # add the new one + self._save_parameters(checkpoint_dir, tag_or_iteration, model, + optimizer, infos) + self.best_records[tag_or_iteration] = metric + + def _save_latest_checkpoint_and_update( + self, checkpoint_dir, tag_or_iteration, model, optimizer, infos): + # remove the old + if self._latest_full(): + to_del_fn = self.latest_records.pop(0) + if (to_del_fn not in self.best_records.keys()): + logger.info( + "remove the latest checkpoint: {}".format(to_del_fn)) + self._del_checkpoint(checkpoint_dir, to_del_fn) + self.latest_records.append(tag_or_iteration) + + self._save_parameters(checkpoint_dir, tag_or_iteration, model, + optimizer, infos) + + def _del_checkpoint(self, checkpoint_dir, tag_or_iteration): + checkpoint_path = os.path.join(checkpoint_dir, + "{}".format(tag_or_iteration)) + for filename in glob.glob(checkpoint_path + ".*"): + os.remove(filename) + logger.info("delete file: {}".format(filename)) + + def _load_checkpoint_idx(self, checkpoint_record: str) -> int: + """Get the iteration number corresponding to the latest saved checkpoint. + Args: + checkpoint_path (str): the saved path of checkpoint. + Returns: + int: the latest iteration number. -1 for no checkpoint to load. + """ + if not os.path.isfile(checkpoint_record): + return -1 + + # Fetch the latest checkpoint index. + with open(checkpoint_record, "rt") as handle: + latest_checkpoint = handle.readlines()[-1].strip() + iteration = int(latest_checkpoint.split(":")[-1]) + return iteration + + def _save_checkpoint_record(self, checkpoint_dir: str, iteration: int): + """Save the iteration number of the latest model to be checkpoint record. + Args: + checkpoint_dir (str): the directory where checkpoint is saved. + iteration (int): the latest iteration number. + Returns: + None + """ + checkpoint_record_latest = os.path.join(checkpoint_dir, + "checkpoint_latest") + checkpoint_record_best = os.path.join(checkpoint_dir, "checkpoint_best") + + with open(checkpoint_record_best, "w") as handle: + for i in self.best_records.keys(): + handle.write("model_checkpoint_path:{}\n".format(i)) + with open(checkpoint_record_latest, "w") as handle: + for i in self.latest_records: + handle.write("model_checkpoint_path:{}\n".format(i)) + + @mp_tools.rank_zero_only + def _save_parameters(self, + checkpoint_dir: str, + tag_or_iteration: Union[int, str], + model: paddle.nn.Layer, + optimizer: Optimizer=None, + infos: dict=None): + """Checkpoint the latest trained model parameters. + Args: + checkpoint_dir (str): the directory where checkpoint is saved. + tag_or_iteration (int or str): the latest iteration(step or epoch) number. + model (Layer): model to be checkpointed. + optimizer (Optimizer, optional): optimizer to be checkpointed. + Defaults to None. + infos (dict or None): any info you want to save. + Returns: + None + """ + checkpoint_path = os.path.join(checkpoint_dir, + "{}".format(tag_or_iteration)) + + model_dict = model.state_dict() + params_path = checkpoint_path + ".pdparams" + paddle.save(model_dict, params_path) + logger.info("Saved model to {}".format(params_path)) - info_path = re.sub('.pdparams$', '.json', params_path) - infos = {} if infos is None else infos - with open(info_path, 'w') as fout: - data = json.dumps(infos) - fout.write(data) + if optimizer: + opt_dict = optimizer.state_dict() + optimizer_path = checkpoint_path + ".pdopt" + paddle.save(opt_dict, optimizer_path) + logger.info("Saved optimzier state to {}".format(optimizer_path)) - if isinstance(tag_or_iteration, int): - _save_record(checkpoint_dir, tag_or_iteration) + info_path = re.sub('.pdparams$', '.json', params_path) + infos = {} if infos is None else infos + with open(info_path, 'w') as fout: + data = json.dumps(infos) + fout.write(data) diff --git a/deepspeech/utils/ctc_utils.py b/deepspeech/utils/ctc_utils.py index 73669fea6..09543d48d 100644 --- a/deepspeech/utils/ctc_utils.py +++ b/deepspeech/utils/ctc_utils.py @@ -38,21 +38,23 @@ def remove_duplicates_and_blank(hyp: List[int], blank_id=0) -> List[int]: new_hyp: List[int] = [] cur = 0 while cur < len(hyp): + # add non-blank into new_hyp if hyp[cur] != blank_id: new_hyp.append(hyp[cur]) + # skip repeat label prev = cur while cur < len(hyp) and hyp[cur] == hyp[prev]: cur += 1 return new_hyp -def insert_blank(label: np.ndarray, blank_id: int=0): +def insert_blank(label: np.ndarray, blank_id: int=0) -> np.ndarray: """Insert blank token between every two label token. "abcdefg" -> "-a-b-c-d-e-f-g-" Args: - label ([np.ndarray]): label ids, (L). + label ([np.ndarray]): label ids, List[int], (L). blank_id (int, optional): blank id. Defaults to 0. Returns: @@ -61,13 +63,13 @@ def insert_blank(label: np.ndarray, blank_id: int=0): label = np.expand_dims(label, 1) #[L, 1] blanks = np.zeros((label.shape[0], 1), dtype=np.int64) + blank_id label = np.concatenate([blanks, label], axis=1) #[L, 2] - label = label.reshape(-1) #[2L] - label = np.append(label, label[0]) #[2L + 1] + label = label.reshape(-1) #[2L], -l-l-l + label = np.append(label, label[0]) #[2L + 1], -l-l-l- return label def forced_align(ctc_probs: paddle.Tensor, y: paddle.Tensor, - blank_id=0) -> list: + blank_id=0) -> List[int]: """ctc forced alignment. https://distill.pub/2017/ctc/ @@ -77,23 +79,25 @@ def forced_align(ctc_probs: paddle.Tensor, y: paddle.Tensor, y (paddle.Tensor): label id sequence tensor, 1d tensor (L) blank_id (int): blank symbol index Returns: - paddle.Tensor: best alignment result, (T). + List[int]: best alignment result, (T). """ - y_insert_blank = insert_blank(y, blank_id) + y_insert_blank = insert_blank(y, blank_id) #(2L+1) log_alpha = paddle.zeros( (ctc_probs.size(0), len(y_insert_blank))) #(T, 2L+1) log_alpha = log_alpha - float('inf') # log of zero + # TODO(Hui Zhang): zeros not support paddle.int16 state_path = (paddle.zeros( - (ctc_probs.size(0), len(y_insert_blank)), dtype=paddle.int16) - 1 - ) # state path + (ctc_probs.size(0), len(y_insert_blank)), dtype=paddle.int32) - 1 + ) # state path, Tuple((T, 2L+1)) # init start state - log_alpha[0, 0] = ctc_probs[0][y_insert_blank[0]] # Sb - log_alpha[0, 1] = ctc_probs[0][y_insert_blank[1]] # Snb + # TODO(Hui Zhang): VarBase.__getitem__() not support np.int64 + log_alpha[0, 0] = ctc_probs[0][int(y_insert_blank[0])] # State-b, Sb + log_alpha[0, 1] = ctc_probs[0][int(y_insert_blank[1])] # State-nb, Snb - for t in range(1, ctc_probs.size(0)): - for s in range(len(y_insert_blank)): + for t in range(1, ctc_probs.size(0)): # T + for s in range(len(y_insert_blank)): # 2L+1 if y_insert_blank[s] == blank_id or s < 2 or y_insert_blank[ s] == y_insert_blank[s - 2]: candidates = paddle.to_tensor( @@ -106,11 +110,13 @@ def forced_align(ctc_probs: paddle.Tensor, y: paddle.Tensor, log_alpha[t - 1, s - 2], ]) prev_state = [s, s - 1, s - 2] - log_alpha[t, s] = paddle.max(candidates) + ctc_probs[t][ - y_insert_blank[s]] + # TODO(Hui Zhang): VarBase.__getitem__() not support np.int64 + log_alpha[t, s] = paddle.max(candidates) + ctc_probs[t][int( + y_insert_blank[s])] state_path[t, s] = prev_state[paddle.argmax(candidates)] - state_seq = -1 * paddle.ones((ctc_probs.size(0), 1), dtype=paddle.int16) + # TODO(Hui Zhang): zeros not support paddle.int16 + state_seq = -1 * paddle.ones((ctc_probs.size(0), 1), dtype=paddle.int32) candidates = paddle.to_tensor([ log_alpha[-1, len(y_insert_blank) - 1], # Sb diff --git a/deepspeech/utils/dynamic_import.py b/deepspeech/utils/dynamic_import.py new file mode 100644 index 000000000..533f15eee --- /dev/null +++ b/deepspeech/utils/dynamic_import.py @@ -0,0 +1,67 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib +import inspect +from typing import Any +from typing import Dict +from typing import List +from typing import Text + +from deepspeech.utils.log import Log +from deepspeech.utils.tensor_utils import has_tensor + +logger = Log(__name__).getlog() + +__all__ = ["dynamic_import", "instance_class"] + + +def dynamic_import(import_path, alias=dict()): + """dynamic import module and class + + :param str import_path: syntax 'module_name:class_name' + e.g., 'deepspeech.models.u2:U2Model' + :param dict alias: shortcut for registered class + :return: imported class + """ + if import_path not in alias and ":" not in import_path: + raise ValueError("import_path should be one of {} or " + 'include ":", e.g. "deepspeech.models.u2:U2Model" : ' + "{}".format(set(alias), import_path)) + if ":" not in import_path: + import_path = alias[import_path] + + module_name, objname = import_path.split(":") + m = importlib.import_module(module_name) + return getattr(m, objname) + + +def filter_valid_args(args: Dict[Text, Any], valid_keys: List[Text]): + # filter by `valid_keys` and filter `val` is not None + new_args = { + key: val + for key, val in args.items() if (key in valid_keys and val is not None) + } + return new_args + + +def filter_out_tenosr(args: Dict[Text, Any]): + return {key: val for key, val in args.items() if not has_tensor(val)} + + +def instance_class(module_class, args: Dict[Text, Any]): + valid_keys = inspect.signature(module_class).parameters.keys() + new_args = filter_valid_args(args, valid_keys) + logger.info( + f"Instance: {module_class.__name__} {filter_out_tenosr(new_args)}.") + return module_class(**new_args) diff --git a/deepspeech/utils/log.py b/deepspeech/utils/log.py index 499b1872f..7e8de600a 100644 --- a/deepspeech/utils/log.py +++ b/deepspeech/utils/log.py @@ -12,16 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. import getpass -import logging import os import socket import sys -FORMAT_STR = '[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s' -DATE_FMT_STR = '%Y/%m/%d %H:%M:%S' - -logging.basicConfig( - level=logging.DEBUG, format=FORMAT_STR, datefmt=DATE_FMT_STR) +from loguru import logger +from paddle import inference def find_log_dir(log_dir=None): @@ -96,53 +92,54 @@ def find_log_dir_and_names(program_name=None, log_dir=None): class Log(): + """Default Logger for all.""" + logger.remove() + logger.add( + sys.stdout, + level='INFO', + enqueue=True, + filter=lambda record: record['level'].no >= 20) + _, file_prefix, _ = find_log_dir_and_names() + sink_prefix = os.path.join("exp/log", file_prefix) + sink_path = sink_prefix[:-3] + "{time}.log" + logger.add(sink_path, level='DEBUG', enqueue=True, rotation="500 MB") + + def __init__(self, name=None): + pass - log_name = None - - def __init__(self, logger=None): - self.logger = logging.getLogger(logger) - self.logger.setLevel(logging.DEBUG) - - file_dir = os.getcwd() + '/log' - if not os.path.exists(file_dir): - os.mkdir(file_dir) - self.log_dir = file_dir - - actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names( - program_name=None, log_dir=self.log_dir) - - basename = '%s.DEBUG.%d' % (file_prefix, os.getpid()) - filename = os.path.join(actual_log_dir, basename) - if Log.log_name is None: - Log.log_name = filename - - # Create a symlink to the log file with a canonical name. - symlink = os.path.join(actual_log_dir, symlink_prefix + '.DEBUG') - try: - if os.path.islink(symlink): - os.unlink(symlink) - os.symlink(os.path.basename(Log.log_name), symlink) - except EnvironmentError: - # If it fails, we're sad but it's no error. Commonly, this - # fails because the symlink was created by another user and so - # we can't modify it - pass - - if not self.logger.hasHandlers(): - formatter = logging.Formatter(fmt=FORMAT_STR, datefmt=DATE_FMT_STR) - fh = logging.FileHandler(Log.log_name) - fh.setLevel(logging.DEBUG) - fh.setFormatter(formatter) - self.logger.addHandler(fh) - - ch = logging.StreamHandler() - ch.setLevel(logging.INFO) - ch.setFormatter(formatter) - self.logger.addHandler(ch) - - # stop propagate for propagating may print - # log multiple times - self.logger.propagate = False + def getlog(self): + return logger + + +class Autolog: + """Just used by fullchain project""" + + def __init__(self, + batch_size, + model_name="DeepSpeech", + model_precision="fp32"): + import auto_log + pid = os.getpid() + if (os.environ['CUDA_VISIBLE_DEVICES'].strip() != ''): + gpu_id = int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0]) + infer_config = inference.Config() + infer_config.enable_use_gpu(100, gpu_id) + else: + gpu_id = None + infer_config = inference.Config() + autolog = auto_log.AutoLogger( + model_name=model_name, + model_precision=model_precision, + batch_size=batch_size, + data_shape="dynamic", + save_path="./output/auto_log.lpg", + inference_config=infer_config, + pids=pid, + process_name=None, + gpu_ids=gpu_id, + time_keys=['preprocess_time', 'inference_time', 'postprocess_time'], + warmup=0) + self.autolog = autolog def getlog(self): - return self.logger + return self.autolog diff --git a/deepspeech/utils/profiler.py b/deepspeech/utils/profiler.py new file mode 100644 index 000000000..83b003cad --- /dev/null +++ b/deepspeech/utils/profiler.py @@ -0,0 +1,119 @@ +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys + +import paddle + +from deepspeech.utils.log import Log + +logger = Log(__name__).getlog() + +# A global variable to record the number of calling times for profiler +# functions. It is used to specify the tracing range of training steps. +_profiler_step_id = 0 + +# A global variable to avoid parsing from string every time. +_profiler_options = None + + +class ProfilerOptions(object): + ''' + Use a string to initialize a ProfilerOptions. + The string should be in the format: "key1=value1;key2=value;key3=value3". + For example: + "profile_path=model.profile" + "batch_range=[50, 60]; profile_path=model.profile" + "batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile" + ProfilerOptions supports following key-value pair: + batch_range - a integer list, e.g. [100, 110]. + state - a string, the optional values are 'CPU', 'GPU' or 'All'. + sorted_key - a string, the optional values are 'calls', 'total', + 'max', 'min' or 'ave. + tracer_option - a string, the optional values are 'Default', 'OpDetail', + 'AllOpDetail'. + profile_path - a string, the path to save the serialized profile data, + which can be used to generate a timeline. + exit_on_finished - a boolean. + ''' + + def __init__(self, options_str): + assert isinstance(options_str, str) + + self._options = { + 'batch_range': [10, 20], + 'state': 'All', + 'sorted_key': 'total', + 'tracer_option': 'Default', + 'profile_path': '/tmp/profile', + 'exit_on_finished': True + } + self._parse_from_string(options_str) + + def _parse_from_string(self, options_str): + if not options_str: + return + + for kv in options_str.replace(' ', '').split(';'): + key, value = kv.split('=') + if key == 'batch_range': + value_list = value.replace('[', '').replace(']', '').split(',') + value_list = list(map(int, value_list)) + if len(value_list) >= 2 and value_list[0] >= 0 and value_list[ + 1] > value_list[0]: + self._options[key] = value_list + elif key == 'exit_on_finished': + self._options[key] = value.lower() in ("yes", "true", "t", "1") + elif key in [ + 'state', 'sorted_key', 'tracer_option', 'profile_path' + ]: + self._options[key] = value + + def __getitem__(self, name): + if self._options.get(name, None) is None: + raise ValueError( + "ProfilerOptions does not have an option named %s." % name) + return self._options[name] + + +def add_profiler_step(options_str=None): + ''' + Enable the operator-level timing using PaddlePaddle's profiler. + The profiler uses a independent variable to count the profiler steps. + One call of this function is treated as a profiler step. + + Args: + profiler_options - a string to initialize the ProfilerOptions. + Default is None, and the profiler is disabled. + ''' + if options_str is None: + return + + global _profiler_step_id + global _profiler_options + + if _profiler_options is None: + _profiler_options = ProfilerOptions(options_str) + logger.info(f"{options_str}") + logger.info(f"{_profiler_options._options}") + + if _profiler_step_id == _profiler_options['batch_range'][0]: + paddle.utils.profiler.start_profiler(_profiler_options['state'], + _profiler_options['tracer_option']) + elif _profiler_step_id == _profiler_options['batch_range'][1]: + paddle.utils.profiler.stop_profiler(_profiler_options['sorted_key'], + _profiler_options['profile_path']) + if _profiler_options['exit_on_finished']: + sys.exit(0) + + _profiler_step_id += 1 diff --git a/deepspeech/utils/socket_server.py b/deepspeech/utils/socket_server.py index adcbf3bb2..45c659f60 100644 --- a/deepspeech/utils/socket_server.py +++ b/deepspeech/utils/socket_server.py @@ -48,9 +48,9 @@ def warm_up_test(audio_process_handler, rng = random.Random(random_seed) samples = rng.sample(manifest, num_test_cases) for idx, sample in enumerate(samples): - print("Warm-up Test Case %d: %s", idx, sample['audio_filepath']) + print("Warm-up Test Case %d: %s" % (idx, sample['feat'])) start_time = time.time() - transcript = audio_process_handler(sample['audio_filepath']) + transcript = audio_process_handler(sample['feat']) finish_time = time.time() print("Response Time: %f, Transcript: %s" % (finish_time - start_time, transcript)) diff --git a/deepspeech/utils/tensor_utils.py b/deepspeech/utils/tensor_utils.py index 7679d9e1c..3519f4fa5 100644 --- a/deepspeech/utils/tensor_utils.py +++ b/deepspeech/utils/tensor_utils.py @@ -19,11 +19,25 @@ import paddle from deepspeech.utils.log import Log -__all__ = ["pad_sequence", "add_sos_eos", "th_accuracy"] +__all__ = ["pad_sequence", "add_sos_eos", "th_accuracy", "has_tensor"] logger = Log(__name__).getlog() +def has_tensor(val): + if isinstance(val, (list, tuple)): + for item in val: + if has_tensor(item): + return True + elif isinstance(val, dict): + for k, v in val.items(): + print(k) + if has_tensor(v): + return True + else: + return paddle.is_tensor(val) + + def pad_sequence(sequences: List[paddle.Tensor], batch_first: bool=False, padding_value: float=0.0) -> paddle.Tensor: @@ -154,13 +168,7 @@ def th_accuracy(pad_outputs: paddle.Tensor, pad_pred = pad_outputs.view( pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)).argmax(2) mask = pad_targets != ignore_label - #TODO(Hui Zhang): sum not support bool type - # numerator = paddle.sum( - # pad_pred.masked_select(mask) == pad_targets.masked_select(mask)) - numerator = ( + numerator = paddle.sum( pad_pred.masked_select(mask) == pad_targets.masked_select(mask)) - numerator = paddle.sum(numerator.type_as(pad_targets)) - #TODO(Hui Zhang): sum not support bool type - # denominator = paddle.sum(mask) - denominator = paddle.sum(mask.type_as(pad_targets)) + denominator = paddle.sum(mask) return float(numerator) / float(denominator) diff --git a/deepspeech/utils/text_grid.py b/deepspeech/utils/text_grid.py new file mode 100644 index 000000000..3af58c9ba --- /dev/null +++ b/deepspeech/utils/text_grid.py @@ -0,0 +1,127 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict +from typing import List +from typing import Text + +import textgrid + + +def segment_alignment(alignment: List[int], blank_id=0) -> List[List[int]]: + """segment ctc alignment ids by continuous blank and repeat label. + + Args: + alignment (List[int]): ctc alignment id sequence. + e.g. [0, 0, 0, 1, 1, 1, 2, 0, 0, 3] + blank_id (int, optional): blank id. Defaults to 0. + + Returns: + List[List[int]]: token align, segment aligment id sequence. + e.g. [[0, 0, 0, 1, 1, 1], [2], [0, 0, 3]] + """ + # convert alignment to a praat format, which is a doing phonetics + # by computer and helps analyzing alignment + align_segs = [] + # get frames level duration for each token + start = 0 + end = 0 + while end < len(alignment): + while end < len(alignment) and alignment[end] == blank_id: # blank + end += 1 + if end == len(alignment): + align_segs[-1].extend(alignment[start:]) + break + end += 1 + while end < len(alignment) and alignment[end - 1] == alignment[ + end]: # repeat label + end += 1 + align_segs.append(alignment[start:end]) + start = end + return align_segs + + +def align_to_tierformat(align_segs: List[List[int]], + subsample: int, + token_dict: Dict[int, Text], + blank_id=0) -> List[Text]: + """Generate textgrid.Interval format from alignment segmentations. + + Args: + align_segs (List[List[int]]): segmented ctc alignment ids. + subsample (int): 25ms frame_length, 10ms hop_length, 1/subsample + token_dict (Dict[int, Text]): int -> str map. + + Returns: + List[Text]: list of textgrid.Interval text, str(start, end, text). + """ + hop_length = 10 # ms + second_ms = 1000 # ms + frame_per_second = second_ms / hop_length # 25ms frame_length, 10ms hop_length + second_per_frame = 1.0 / frame_per_second + + begin = 0 + duration = 0 + tierformat = [] + + for idx, tokens in enumerate(align_segs): + token_len = len(tokens) + token = tokens[-1] + # time duration in second + duration = token_len * subsample * second_per_frame + if idx < len(align_segs) - 1: + print(f"{begin:.2f} {begin + duration:.2f} {token_dict[token]}") + tierformat.append( + f"{begin:.2f} {begin + duration:.2f} {token_dict[token]}\n") + else: + for i in tokens: + if i != blank_id: + token = i + break + print(f"{begin:.2f} {begin + duration:.2f} {token_dict[token]}") + tierformat.append( + f"{begin:.2f} {begin + duration:.2f} {token_dict[token]}\n") + begin = begin + duration + + return tierformat + + +def generate_textgrid(maxtime: float, + intervals: List[Text], + output: Text, + name: Text='ali') -> None: + """Create alignment textgrid file. + + Args: + maxtime (float): audio duartion. + intervals (List[Text]): ctc output alignment. e.g. "start-time end-time word" per item. + output (Text): textgrid filepath. + name (Text, optional): tier or layer name. Defaults to 'ali'. + """ + # Download Praat: https://www.fon.hum.uva.nl/praat/ + avg_interval = maxtime / (len(intervals) + 1) + print(f"average second/token: {avg_interval}") + margin = 0.0001 + + tg = textgrid.TextGrid(maxTime=maxtime) + tier = textgrid.IntervalTier(name=name, maxTime=maxtime) + + i = 0 + for dur in intervals: + s, e, text = dur.split() + tier.add(minTime=float(s) + margin, maxTime=float(e), mark=text) + + tg.append(tier) + + tg.write(output) + print("successfully generator textgrid {}.".format(output)) diff --git a/deepspeech/utils/utility.py b/deepspeech/utils/utility.py index 64570026b..6f84c41be 100644 --- a/deepspeech/utils/utility.py +++ b/deepspeech/utils/utility.py @@ -15,9 +15,31 @@ import distutils.util import math import os +import random +from contextlib import contextmanager from typing import List -__all__ = ['print_arguments', 'add_arguments', "log_add"] +import numpy as np +import paddle + +__all__ = [ + "UpdateConfig", "seed_all", 'print_arguments', 'add_arguments', "log_add" +] + + +@contextmanager +def UpdateConfig(config): + """Update yacs config""" + config.defrost() + yield + config.freeze() + + +def seed_all(seed: int=210329): + """freeze random generator seed.""" + np.random.seed(seed) + random.seed(seed) + paddle.seed(seed) def print_arguments(args, info=None): @@ -79,3 +101,22 @@ def log_add(args: List[int]) -> float: a_max = max(args) lsp = math.log(sum(math.exp(a - a_max) for a in args)) return a_max + lsp + + +def get_subsample(config): + """Subsample rate from config. + + Args: + config (yacs.config.CfgNode): yaml config + + Returns: + int: subsample rate. + """ + input_layer = config["model"]["encoder_conf"]["input_layer"] + assert input_layer in ["conv2d", "conv2d6", "conv2d8"] + if input_layer == "conv2d": + return 4 + elif input_layer == "conv2d6": + return 6 + elif input_layer == "conv2d8": + return 8 diff --git a/doc/images/multi_gpu_speedup.png b/doc/images/multi_gpu_speedup.png deleted file mode 100755 index 286de5151..000000000 Binary files a/doc/images/multi_gpu_speedup.png and /dev/null differ diff --git a/doc/images/tuning_error_surface.png b/doc/images/tuning_error_surface.png deleted file mode 100644 index 2204cee2f..000000000 Binary files a/doc/images/tuning_error_surface.png and /dev/null differ diff --git a/doc/src/benchmark.md b/doc/src/benchmark.md deleted file mode 100644 index 9c1c86fd7..000000000 --- a/doc/src/benchmark.md +++ /dev/null @@ -1,16 +0,0 @@ -# Benchmarks - -## Acceleration with Multi-GPUs - -We compare the training time with 1, 2, 4, 8 Tesla V100 GPUs (with a subset of LibriSpeech samples whose audio durations are between 6.0 and 7.0 seconds). And it shows that a **near-linear** acceleration with multiple GPUs has been achieved. In the following figure, the time (in seconds) cost for training is printed on the blue bars. - - - -| # of GPU | Acceleration Rate | -| -------- | --------------: | -| 1 | 1.00 X | -| 2 | 1.98 X | -| 4 | 3.73 X | -| 8 | 6.95 X | - -`utils/profile.sh` provides such a demo profiling tool, you can change it as need. diff --git a/doc/src/faq.md b/doc/src/faq.md deleted file mode 100644 index e29428176..000000000 --- a/doc/src/faq.md +++ /dev/null @@ -1,37 +0,0 @@ -# FAQ - -1. 音频变速快慢到达什么晨读会影响识别率? - - 变速会提升识别效果,一般用0.9, 1.0, 1.1 的变速。 - -2. 音量大小到什么程度会影响识别率? - - 一般训练会固定音量到一个范围内,波动过大会影响训练,估计在10dB ~ 20dB吧。 - -3. 语音模型训练数据的最小时长要求时多少? - - Aishell-1大约178h的数据,数据越多越好。 - -4. 那些噪声或背景生会影响识别率? - - 主要是人生干扰和低信噪比会影响识别率。 - -5. 单条语音数据的长度限制是多少? - - 一般训练的语音长度会限制在1s~6s之间,和训练配置有关。 - -6. 背景声在识别前是否需要分离出来,或做降噪处理? - - 需要分离的,需要结合具体场景考虑。 - -7. 模型是否带有VAD人生激活识别能力? - - VAD是单独的模型或模块,模型不包含此能力。 - -8. 是否支持长语音识别? - - 一般过VAD后识别。 - -9. Mandarin LM Large语言模型需要的硬件配置时怎样的? - - 内存能放得下LM即可。 diff --git a/doc/src/reference.md b/doc/src/reference.md deleted file mode 100644 index 69ff6ab88..000000000 --- a/doc/src/reference.md +++ /dev/null @@ -1,3 +0,0 @@ -# Reference - -* [wenet](https://github.com/mobvoi/wenet) diff --git a/doc/src/server.md b/doc/src/server.md deleted file mode 100644 index 4918d5ebe..000000000 --- a/doc/src/server.md +++ /dev/null @@ -1,34 +0,0 @@ - -# Trying Live Demo with Your Own Voice - -Until now, an ASR model is trained and tested qualitatively (`infer`) and quantitatively (`test`) with existing audio files. But it is not yet tested with your own speech. We build up a real-time demo ASR engine with the trained model, enabling you to test and play around with the demo, with your own voice. - -First, change your directory to `examples/aishell` and `source path.sh`. - -To start the demo's server, please run this in one console: - -```bash -CUDA_VISIBLE_DEVICES=0 bash local/server.sh -``` - -For the machine (might not be the same machine) to run the demo's client, please do the following installation before moving on. - -For example, on MAC OS X: - -```bash -brew install portaudio -pip install pyaudio -pip install keyboard -``` - -Then to start the client, please run this in another console: - -```bash -CUDA_VISIBLE_DEVICES=0 bash local/client.sh -``` - -Now, in the client console, press the `whitespace` key, hold, and start speaking. Until finishing your utterance, release the key to let the speech-to-text results shown in the console. To quit the client, just press `ESC` key. - -Notice that `deepspeech/exps/deepspeech2/deploy/client.py` must be run on a machine with a microphone device, while `deepspeech/exps/deepspeech2/deploy/server.py` could be run on one without any audio recording hardware, e.g. any remote server machine. Just be careful to set the `host_ip` and `host_port` argument with the actual accessible IP address and port, if the server and client are running with two separate machines. Nothing should be done if they are running on one single machine. - -Please also refer to `examples/aishell/local/server.sh`, which will first download a pre-trained Chinese model (trained with AISHELL1) and then start the demo server with the model. With running `examples/aishell/local/client.sh`, you can speak Chinese to test it. If you would like to try some other models, just update `--checkpoint_path` argument in the script.   diff --git a/docs/images/ds2offlineModel.png b/docs/images/ds2offlineModel.png new file mode 100644 index 000000000..0d8722ab0 Binary files /dev/null and b/docs/images/ds2offlineModel.png differ diff --git a/docs/images/ds2onlineModel.png b/docs/images/ds2onlineModel.png new file mode 100644 index 000000000..97a0e5619 Binary files /dev/null and b/docs/images/ds2onlineModel.png differ diff --git a/doc/src/augmentation.md b/docs/src/augmentation.md similarity index 100% rename from doc/src/augmentation.md rename to docs/src/augmentation.md diff --git a/doc/src/data_preparation.md b/docs/src/data_preparation.md similarity index 100% rename from doc/src/data_preparation.md rename to docs/src/data_preparation.md diff --git a/docs/src/deepspeech_architecture.md b/docs/src/deepspeech_architecture.md new file mode 100644 index 000000000..580b13882 --- /dev/null +++ b/docs/src/deepspeech_architecture.md @@ -0,0 +1,190 @@ +# Deepspeech2 +## Streaming + +The implemented arcitecure of Deepspeech2 online model is based on [Deepspeech2 model](https://arxiv.org/pdf/1512.02595.pdf) with some changes. +The model is mainly composed of 2D convolution subsampling layer and stacked single direction rnn layers. + +To illustrate the model implementation clearly, 3 parts are described in detail. +- Data Preparation +- Encoder +- Decoder + +In addition, the training process and the testing process are also introduced. + +The arcitecture of the model is shown in Fig.1. + +

+ +
Fig.1 The Arcitecture of deepspeech2 online model +

+ +### Data Preparation +#### Vocabulary +For English data, the vocabulary dictionary is composed of 26 English characters with " ' ", space, \ and \. The \ represents the blank label in CTC, the \ represents the unknown character and the \ represents the start and the end characters. For mandarin, the vocabulary dictionary is composed of chinese characters statisticed from the training set and three additional characters are added. The added characters are \, \ and \. For both English and mandarin data, we set the default indexs that \=0, \=1 and \= last index. +``` + # The code to build vocabulary + cd examples/aishell/s0 + python3 ../../../utils/build_vocab.py \ + --unit_type="char" \ + --count_threshold=0 \ + --vocab_path="data/vocab.txt" \ + --manifest_paths "data/manifest.train.raw" "data/manifest.dev.raw" + +# vocabulary for aishell dataset (Mandarin) +vi examples/aishell/s0/data/vocab.txt + +# vocabulary for librispeech dataset (English) +vi examples/librispeech/s0/data/vocab.txt +``` + +#### CMVN +For CMVN, a subset or the full of traininig set is chosed and be used to compute the feature mean and std. +``` + # The code to compute the feature mean and std +cd examples/aishell/s0 +python3 ../../../utils/compute_mean_std.py \ + --manifest_path="data/manifest.train.raw" \ + --specgram_type="linear" \ + --delta_delta=false \ + --stride_ms=10.0 \ + --window_ms=20.0 \ + --sample_rate=16000 \ + --use_dB_normalization=True \ + --num_samples=2000 \ + --num_workers=10 \ + --output_path="data/mean_std.json" + +``` + +#### Feature Extraction + For feature extraction, three methods are implemented, which are linear (FFT without using filter bank), fbank and mfcc. + Currently, the released deepspeech2 online model use the linear feature extraction method. + ``` + The code for feature extraction + vi deepspeech/frontend/featurizer/audio_featurizer.py + ``` + +### Encoder +The encoder is composed of two 2D convolution subsampling layers and a number of stacked single direction rnn layers. The 2D convolution subsampling layers extract feature represention from the raw audio feature and reduce the length of audio feature at the same time. After passing through the convolution subsampling layers, then the feature represention are input into the stacked rnn layers. For the stacked rnn layers, LSTM cell and GRU cell are provided to use. Adding one fully connected (fc) layer after the stacked rnn layers is optional. If the number of stacked rnn layers is less than 5, adding one fc layer after stacked rnn layers is recommand. + +The code of Encoder is in: +``` +vi deepspeech/models/ds2_online/deepspeech2.py +``` + +### Decoder +To got the character possibilities of each frame, the feature represention of each frame output from the encoder are input into a projection layer which is implemented as a dense layer to do feature projection. The output dim of the projection layer is same with the vocabulary size. After projection layer, the softmax function is used to transform the frame-level feature representation be the possibilities of characters. While making model inference, the character possibilities of each frame are input into the CTC decoder to get the final speech recognition results. + +The code of the decoder is in: +``` +# The code of constructing the decoder in model +vi deepspeech/models/ds2_online/deepspeech2.py +# The code of CTC Decoder +vi deepspeech/modules/ctc.py +``` + +## Training Process +Using the command below, you can train the deepspeech2 online model. +``` + cd examples/aishell/s0 + bash run.sh --stage 0 --stop_stage 2 --model_type online --conf_path conf/deepspeech2_online.yaml +``` +The detail commands are: +``` +# The code for training in run.sh +set -e +source path.sh + +gpus=2,3,5,7 +stage=0 +stop_stage=5 +conf_path=conf/deepspeech2_online.yaml # conf/deepspeech2.yaml | conf/deepspeech2_online.yaml +avg_num=1 +model_type=online # online | offline + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +avg_ckpt=avg_${avg_num} +ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') +echo "checkpoint name ${ckpt}" + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + bash ./local/data.sh || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `exp` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} ${model_type} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # avg n best model + avg.sh exp/${ckpt}/checkpoints ${avg_num} +fi +``` + +By using the command above, the training process can be started. There are 5 stages in "run.sh", and the first 3 stages are used for training process. The stage 0 is used for data preparation, in which the dataset will be downloaded, and the manifest files of the datasets, vocabulary dictionary and CMVN file will be generated in "./data/". The stage 1 is used for training the model, the log files and model checkpoint is saved in "exp/deepspeech2_online/". The stage 2 is used to generated final model for predicting by averaging the top-k model parameters based on validation loss. + +## Testing Process +Using the command below, you can test the deepspeech2 online model. + ``` + bash run.sh --stage 3 --stop_stage 5 --model_type online --conf_path conf/deepspeech2_online.yaml +``` +The detail commands are: +``` +conf_path=conf/deepspeech2_online.yaml +avg_num=1 +model_type=online +avg_ckpt=avg_${avg_num} + + if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # test ckpt avg_n + CUDA_VISIBLE_DEVICES=2 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type}|| exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # export ckpt avg_n + CUDA_VISIBLE_DEVICES=5 ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type} +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # test export ckpt avg_n + CUDA_VISIBLE_DEVICES=0 ./local/test_export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type}|| exit -1 +fi + ``` +After the training process, we use stage 3,4,5 for testing process. The stage 3 is for testing the model generated in the stage 2 and provided the CER index of the test set. The stage 4 is for transforming the model from dynamic graph to static graph by using "paddle.jit" library. The stage 5 is for testing the model in static graph. + + +## Non-Streaming +The deepspeech2 offline model is similarity to the deepspeech2 online model. The main difference between them is the offline model use the stacked bi-directional rnn layers while the online model use the single direction rnn layers and the fc layer is not used. For the stacked bi-directional rnn layers in the offline model, the rnn cell and gru cell are provided to use. + +The arcitecture of the model is shown in Fig.2. +

+ +
Fig.2 The Arcitecture of deepspeech2 offline model +

+ + + +For data preparation and decoder, the deepspeech2 offline model is same with the deepspeech2 online model. + +The code of encoder and decoder for deepspeech2 offline model is in: +``` +vi deepspeech/models/ds2/deepspeech2.py +``` + +The training process and testing process of deepspeech2 offline model is very similary to deepspeech2 online model. +Only some changes should be noticed. + +For training and testing, the "model_type" and the "conf_path" must be set. + ``` +# Training offline +cd examples/aishell/s0 +bash run.sh --stage 0 --stop_stage 2 --model_type offline --conf_path conf/deepspeech2.yaml +``` +``` +# Testing offline +cd examples/aishell/s0 +bash run.sh --stage 3 --stop_stage 5 --model_type offline --conf_path conf/deepspeech2.yaml +``` diff --git a/doc/src/feature_list.md b/docs/src/feature_list.md similarity index 78% rename from doc/src/feature_list.md rename to docs/src/feature_list.md index 573669fa2..4639ddd6f 100644 --- a/doc/src/feature_list.md +++ b/docs/src/feature_list.md @@ -1,13 +1,20 @@ -# Featrues +# Features + +### Dataset +* Aishell +* Librispeech +* THCHS30 +* TIMIT ### Speech Recognition -* Offline +* Non-Streaming * [Baidu's DeepSpeech2](http://proceedings.mlr.press/v48/amodei16.pdf) * [Transformer](https://arxiv.org/abs/1706.03762) * [Conformer](https://arxiv.org/abs/2005.08100) -* Online +* Streaming + * [Baidu's DeepSpeech2](http://proceedings.mlr.press/v48/amodei16.pdf) * [U2](https://arxiv.org/pdf/2012.05481.pdf) ### Language Model @@ -22,6 +29,15 @@ * beam search * attention rescore +### Deployment + +* Paddle Inference + +### Aligment + +* MFA +* CTC Aligment + ### Speech Frontend * Audio diff --git a/doc/src/getting_started.md b/docs/src/getting_started.md similarity index 100% rename from doc/src/getting_started.md rename to docs/src/getting_started.md diff --git a/doc/src/install.md b/docs/src/install.md similarity index 95% rename from doc/src/install.md rename to docs/src/install.md index 01049a2fc..8cecba125 100644 --- a/doc/src/install.md +++ b/docs/src/install.md @@ -4,15 +4,16 @@ To avoid the trouble of environment setup, [running in Docker container](#runnin ## Prerequisites - Python >= 3.7 -- PaddlePaddle 2.0.0 or later (please refer to the [Installation Guide](https://www.paddlepaddle.org.cn/documentation/docs/en/beginners_guide/index_en.html)) +- PaddlePaddle latest version (please refer to the [Installation Guide](https://www.paddlepaddle.org.cn/documentation/docs/en/beginners_guide/index_en.html)) -## Setup +## Setup (Important) - Make sure these libraries or tools installed: `pkg-config`, `flac`, `ogg`, `vorbis`, `boost`, `sox, and `swig`, e.g. installing them via `apt-get`: ```bash sudo apt-get install -y sox pkg-config libflac-dev libogg-dev libvorbis-dev libboost-dev swig python3-dev ``` +The version of `swig` should >= 3.0 or, installing them via `yum`: diff --git a/doc/src/ngram_lm.md b/docs/src/ngram_lm.md similarity index 64% rename from doc/src/ngram_lm.md rename to docs/src/ngram_lm.md index 119a3b21c..7872df22d 100644 --- a/doc/src/ngram_lm.md +++ b/docs/src/ngram_lm.md @@ -35,52 +35,3 @@ Different from the English language model, Mandarin language model is character- * A whitespace character between two tokens is inserted. Please notice that the released language models only contain Chinese simplified characters. After preprocessing done we can begin to train the language model. The key training arguments for small LM is '-o 5 --prune 0 1 2 4 4' and '-o 5' for large LM. Please refer above section for the meaning of each argument. We also convert the arpa file to binary file using default settings. - - - -## [KenLM](http://kheafield.com/code/kenlm/) - -统计语言模型工具有比较多的选择,目前使用比较好的有srilm及kenlm,其中kenlm比srilm晚出来,训练速度也更快,而且支持单机大数据的训练。现在介绍一下kenlm的使用方法。 - -1. 工具包的下载地址:http://kheafield.com/code/kenlm.tar.gz - -2. 使用。该工具在linux环境下使用方便。 先确保linux环境已经按照1.36.0的Boost和zlib - - ``` - boost: - yum install boost - yum install boost-devel - - zlib: - yum install zlib - yum install zlib-devel - ``` - - 然后gcc版本需要是4.8.2及以上。 - - ``` - wget -O - https://kheafield.com/code/kenlm.tar.gz |tar xz - mkdir kenlm/build - cd kenlm/build - cmake .. - make -j2 - ``` - -3. 训练。使用如下命令进行训练: - - ``` - build/bin/lmplz -o 3 --verbose_header --text people2014corpus_words.txt --arpa result/people2014corpus_words.arps - ``` - - 其中, - 1)people2014corpus_words.txt文件必须是分词以后的文件。 - - 训练语料<人民日报2014版熟语料>,包括: 1)标准人工切词及词性数据people2014.tar.gz, 2)未切词文本数据people2014_words.txt, 3)kenlm训练字粒度语言模型文件及其二进制文件people2014corpus_chars.arps/klm, 4)kenlm词粒度语言模型文件及其二进制文件people2014corpus_words.arps/klm。 - - 2)-o后面的5表示的是5-gram,一般取到3即可,但可以结合自己实际情况判断。 - -4. 压缩。压缩模型为二进制,方便模型快速加载: - - ``` - build/bin/build_binary ./result/people2014corpus_words.arps ./result/people2014corpus_words.klm - ``` diff --git a/docs/src/reference.md b/docs/src/reference.md new file mode 100644 index 000000000..d3676fff2 --- /dev/null +++ b/docs/src/reference.md @@ -0,0 +1,8 @@ +# Reference + +We refer these repos to build `model` and `engine`: + +* [delta](https://github.com/Delta-ML/delta.git) +* [espnet](https://github.com/espnet/espnet.git) +* [kaldi](https://github.com/kaldi-asr/kaldi.git) +* [wenet](https://github.com/mobvoi/wenet) diff --git a/doc/src/released_model.md b/docs/src/released_model.md similarity index 59% rename from doc/src/released_model.md rename to docs/src/released_model.md index 0919bba58..581cff45f 100644 --- a/doc/src/released_model.md +++ b/docs/src/released_model.md @@ -1,5 +1,12 @@ # Released Models +## Acoustic Model Released +Acoustic Model | Training Data | Token-based | Size | Descriptions | CER | Hours of speech +:-------------:| :------------:| :-----: | -----: | :----------------- | :---------- | :--------- +[Ds2 Online Aishell Model](https://deepspeech.bj.bcebos.com/release2.1/aishell/s0/aishell.s0.ds_online.5rnn.debug.tar.gz) | Aishell Dataset | Char-based | 345 MB | 2 Conv + 5 LSTM layers with only forward direction | 0.0824 | 151 h +[Ds2 Offline Aishell Model](https://deepspeech.bj.bcebos.com/release2.1/aishell/s0/aishell.s0.ds2.offline.cer6p65.release.tar.gz)| Aishell Dataset | Char-based | 306 MB | 2 Conv + 3 bidirectional gru layers| 0.065 | 151 h + + ## Language Model Released Language Model | Training Data | Token-based | Size | Descriptions diff --git a/env.sh b/env.sh index c5acd0112..461586e7d 100644 --- a/env.sh +++ b/env.sh @@ -1,10 +1,10 @@ export MAIN_ROOT=${PWD} -export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:/usr/local/bin:${PATH} export LC_ALL=C # Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C -export PYTHONIOENCODING=UTF-8 +export PYTHONIOENCODING=UTF-8 export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ diff --git a/examples/aishell/README.md b/examples/aishell/README.md index 8bb28a26c..c2534b9e3 100644 --- a/examples/aishell/README.md +++ b/examples/aishell/README.md @@ -1,3 +1,4 @@ # ASR -* s0 for deepspeech2 + +* s0 for deepspeech2 offline * s1 for u2 diff --git a/examples/aishell/s0/README.md b/examples/aishell/s0/README.md index 8c1a51b62..ee0f1405e 100644 --- a/examples/aishell/s0/README.md +++ b/examples/aishell/s0/README.md @@ -1,10 +1,20 @@ # Aishell-1 +## Data +| Data Subset | Duration in Seconds | +| data/manifest.train | 1.23 ~ 14.53125 | +| data/manifest.dev | 1.645 ~ 12.533 | +| data/manifest.test | 1.859125 ~ 14.6999375 | + ## Deepspeech2 -| Model | release | Config | Test set | Loss | CER | -| --- | --- | --- | --- | --- | --- | -| DeepSpeech2 | 2.1.0 | conf/deepspeech2.yaml + spec aug | test | 7.483316898345947 | 0.077860 | -| DeepSpeech2 | 2.1.0 | conf/deepspeech2.yaml | test | 7.299022197723389 | 0.078671 | -| DeepSpeech2 | 2.0.0 | conf/deepspeech2.yaml | test | - | 0.078977 | -| DeepSpeech2 | 1.8.5 | - | test | - | 0.080447 | +| Model | Params | Release | Config | Test set | Loss | CER | +| --- | --- | --- | --- | --- | --- | --- | +| DeepSpeech2 | 58.4M | 2.2.0 | conf/deepspeech2.yaml + spec aug | test | 6.016139030456543 | 0.066549 | +| --- | --- | --- | --- | --- | --- | --- | +| DeepSpeech2 | 58.4M | 7181e427 | conf/deepspeech2.yaml + spec aug | test | 5.71956205368042 | 0.064287 | +| DeepSpeech2 | 58.4M | 2.1.0 | conf/deepspeech2.yaml + spec aug | test | 7.483316898345947 | 0.077860 | +| DeepSpeech2 | 58.4M | 2.1.0 | conf/deepspeech2.yaml | test | 7.299022197723389 | 0.078671 | +| DeepSpeech2 | 58.4M | 2.0.0 | conf/deepspeech2.yaml | test | - | 0.078977 | +| --- | --- | --- | --- | --- | --- | --- | +| DeepSpeech2 | 58.4M | 1.8.5 | - | test | - | 0.080447 | diff --git a/examples/aishell/s0/conf/augmentation.json b/examples/aishell/s0/conf/augmentation.json index 1987ad424..31c481c8d 100644 --- a/examples/aishell/s0/conf/augmentation.json +++ b/examples/aishell/s0/conf/augmentation.json @@ -19,15 +19,17 @@ { "type": "specaug", "params": { + "W": 0, + "warp_mode": "PIL", "F": 10, - "T": 50, "n_freq_masks": 2, + "T": 50, "n_time_masks": 2, "p": 1.0, - "W": 80, "adaptive_number_ratio": 0, "adaptive_size_ratio": 0, - "max_n_time_masks": 20 + "max_n_time_masks": 20, + "replace_with_zero": true }, "prob": 1.0 } diff --git a/examples/aishell/s0/conf/deepspeech2.yaml b/examples/aishell/s0/conf/deepspeech2.yaml index 8b08ee308..9560930ac 100644 --- a/examples/aishell/s0/conf/deepspeech2.yaml +++ b/examples/aishell/s0/conf/deepspeech2.yaml @@ -3,31 +3,36 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test - mean_std_filepath: data/mean_std.json - vocab_filepath: data/vocab.txt - augmentation_config: conf/augmentation.json - batch_size: 64 # one gpu min_input_len: 0.0 max_input_len: 27.0 # second min_output_len: 0.0 max_output_len: .inf min_output_input_ratio: 0.00 max_output_input_ratio: .inf + +collator: + batch_size: 64 # one gpu + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: specgram_type: linear - target_sample_rate: 16000 - max_freq: None - n_fft: None + feat_dim: + delta_delta: False stride_ms: 10.0 window_ms: 20.0 - delta_delta: False - dither: 1.0 + n_fft: None + max_freq: None + target_sample_rate: 16000 use_dB_normalization: True target_dB: -20 - random_seed: 0 + dither: 1.0 keep_transcription_text: False sortagrad: True shuffle_method: batch_shuffle - num_workers: 0 + num_workers: 2 model: num_conv_layers: 2 @@ -35,14 +40,20 @@ model: rnn_layer_size: 1024 use_gru: True share_rnn_weights: False + blank_id: 0 + ctc_grad_norm_type: instance training: - n_epoch: 50 + n_epoch: 80 + accum_grad: 1 lr: 2e-3 lr_decay: 0.83 weight_decay: 1e-06 global_grad_clip: 3.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: batch_size: 128 diff --git a/examples/aishell/s0/conf/deepspeech2_online.yaml b/examples/aishell/s0/conf/deepspeech2_online.yaml new file mode 100644 index 000000000..7e87594cc --- /dev/null +++ b/examples/aishell/s0/conf/deepspeech2_online.yaml @@ -0,0 +1,70 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.0 + max_input_len: 27.0 # second + min_output_len: 0.0 + max_output_len: .inf + min_output_input_ratio: 0.00 + max_output_input_ratio: .inf + +collator: + batch_size: 64 # one gpu + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: + specgram_type: linear #linear, mfcc, fbank + feat_dim: + delta_delta: False + stride_ms: 10.0 + window_ms: 20.0 + n_fft: None + max_freq: None + target_sample_rate: 16000 + use_dB_normalization: True + target_dB: -20 + dither: 1.0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 0 + +model: + num_conv_layers: 2 + num_rnn_layers: 5 + rnn_layer_size: 1024 + rnn_direction: forward # [forward, bidirect] + num_fc_layers: 0 + fc_layers_size_list: -1, + use_gru: False + blank_id: 0 + ctc_grad_norm_type: instance + +training: + n_epoch: 50 + accum_grad: 1 + lr: 2e-3 + lr_decay: 0.9 # 0.83 + weight_decay: 1e-06 + global_grad_clip: 3.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + +decoding: + batch_size: 32 + error_rate_type: cer + decoding_method: ctc_beam_search + lang_model_path: data/lm/zh_giga.no_cna_cmn.prune01244.klm + alpha: 2.2 #1.9 + beta: 5.0 + beam_size: 300 + cutoff_prob: 0.99 + cutoff_top_n: 40 + num_proc_bsearch: 10 diff --git a/examples/aishell/s0/local/client.sh b/examples/aishell/s0/local/client.sh deleted file mode 100755 index d626ecc75..000000000 --- a/examples/aishell/s0/local/client.sh +++ /dev/null @@ -1,20 +0,0 @@ -#! /usr/bin/env bash - -source path.sh - -# run on MacOS -# brew install portaudio -# pip install pyaudio -# pip install keyboard - -# start demo client -python3 -u ${BIN_DIR}/deploy/client.py \ ---host_ip="localhost" \ ---host_port=8086 \ - -if [ $? -ne 0 ]; then - echo "Failed in starting demo client!" - exit 1 -fi - -exit 0 diff --git a/examples/aishell/s0/local/data.sh b/examples/aishell/s0/local/data.sh index 2f09b14ad..b106f3f28 100755 --- a/examples/aishell/s0/local/data.sh +++ b/examples/aishell/s0/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/aishell/s0/local/download_lm_ch.sh b/examples/aishell/s0/local/download_lm_ch.sh index f9e2261fd..ac27a9076 100755 --- a/examples/aishell/s0/local/download_lm_ch.sh +++ b/examples/aishell/s0/local/download_lm_ch.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash . ${MAIN_ROOT}/utils/utility.sh diff --git a/examples/aishell/s0/local/export.sh b/examples/aishell/s0/local/export.sh index 1b19d5720..2e09e5f5e 100755 --- a/examples/aishell/s0/local/export.sh +++ b/examples/aishell/s0/local/export.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 3 ];then - echo "usage: $0 config_path ckpt_prefix jit_model_path" +if [ $# != 4 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path model_type" exit -1 fi @@ -11,9 +11,10 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_path_prefix=$2 jit_model_export_path=$3 +model_type=$4 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi @@ -22,8 +23,8 @@ python3 -u ${BIN_DIR}/export.py \ --nproc ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ ---export_path ${jit_model_export_path} - +--export_path ${jit_model_export_path} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in export!" diff --git a/examples/aishell/s0/local/server.sh b/examples/aishell/s0/local/server.sh deleted file mode 100755 index 1cf069ebd..000000000 --- a/examples/aishell/s0/local/server.sh +++ /dev/null @@ -1,40 +0,0 @@ -#! /usr/bin/env bash -# TODO: replace the model with a mandarin model - -if [[ $# != 1 ]];then - echo "usage: $1 checkpoint_path" - exit -1 -fi - -source path.sh - -# download language model -bash local/download_lm_ch.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# download well-trained model -bash local/download_model.sh -if [ $? -ne 0 ]; then - exit 1 -fi - -# start demo server -CUDA_VISIBLE_DEVICES=0 \ -python3 -u ${BIN_DIR}/deploy/server.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---host_ip="localhost" \ ---host_port=8086 \ ---speech_save_dir="demo_cache" \ ---checkpoint_path ${1} - -if [ $? -ne 0 ]; then - echo "Failed in starting demo server!" - exit 1 -fi - - -exit 0 diff --git a/examples/aishell/s0/local/test.sh b/examples/aishell/s0/local/test.sh index 6fd298202..9fd0bc8d5 100755 --- a/examples/aishell/s0/local/test.sh +++ b/examples/aishell/s0/local/test.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: ${0} config_path ckpt_path_prefix" +if [ $# != 3 ];then + echo "usage: ${0} config_path ckpt_path_prefix model_type" exit -1 fi @@ -9,11 +9,12 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi config_path=$1 ckpt_prefix=$2 +model_type=$3 # download language model bash local/download_lm_ch.sh @@ -26,7 +27,8 @@ python3 -u ${BIN_DIR}/test.py \ --nproc 1 \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ ---checkpoint_path ${ckpt_prefix} +--checkpoint_path ${ckpt_prefix} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in evaluation!" diff --git a/examples/aishell/s0/local/test_export.sh b/examples/aishell/s0/local/test_export.sh new file mode 100755 index 000000000..b6d580979 --- /dev/null +++ b/examples/aishell/s0/local/test_export.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +if [ $# != 3 ];then + echo "usage: ${0} config_path ckpt_path_prefix model_type" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +jit_model_export_path=$2 +model_type=$3 + +# download language model +bash local/download_lm_ch.sh +if [ $? -ne 0 ]; then + exit 1 +fi + +python3 -u ${BIN_DIR}/test_export.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${jit_model_export_path}.rsl \ +--export_path ${jit_model_export_path} \ +--model_type ${model_type} + +if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 +fi + + +exit 0 diff --git a/examples/aishell/s0/local/train.sh b/examples/aishell/s0/local/train.sh index f8c9dbc0b..668ad0ead 100755 --- a/examples/aishell/s0/local/train.sh +++ b/examples/aishell/s0/local/train.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" +if [ $# != 3 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name model_type" exit -1 fi @@ -10,19 +10,32 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_name=$2 +model_type=$3 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi mkdir -p exp +# seed may break model convergence +seed=10086 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + python3 -u ${BIN_DIR}/train.py \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/aishell/s0/local/tune.sh b/examples/aishell/s0/local/tune.sh deleted file mode 100755 index 9ff5e8b99..000000000 --- a/examples/aishell/s0/local/tune.sh +++ /dev/null @@ -1,28 +0,0 @@ -#! /usr/bin/env bash - -# grid-search for hyper-parameters in language model -python3 -u ${BIN_DIR}/tune.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---num_batches=10 \ ---batch_size=128 \ ---beam_size=300 \ ---num_proc_bsearch=8 \ ---num_alphas=10 \ ---num_betas=10 \ ---alpha_from=0.0 \ ---alpha_to=5.0 \ ---beta_from=-6 \ ---beta_to=6 \ ---cutoff_prob=1.0 \ ---cutoff_top_n=40 \ ---checkpoint_path ${1} - -if [ $? -ne 0 ]; then - echo "Failed in tuning!" - exit 1 -fi - - -exit 0 diff --git a/examples/aishell/s0/path.sh b/examples/aishell/s0/path.sh index 552b96783..e6d3a655b 100644 --- a/examples/aishell/s0/path.sh +++ b/examples/aishell/s0/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/aishell/s0/run.sh b/examples/aishell/s0/run.sh index 4073c81b9..71191c3ac 100755 --- a/examples/aishell/s0/run.sh +++ b/examples/aishell/s0/run.sh @@ -2,11 +2,12 @@ set -e source path.sh -gpus=0 +gpus=0,1,2,3 stage=0 stop_stage=100 conf_path=conf/deepspeech2.yaml avg_num=1 +model_type=offline source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; @@ -21,20 +22,25 @@ fi if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # train model, all `ckpt` under `exp` dir - CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} ${model_type} fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # test ckpt avg_n - CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type}|| exit -1 fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then # export ckpt avg_n - CUDA_VISIBLE_DEVICES=${gpus} ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit + CUDA_VISIBLE_DEVICES=0 ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type} +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # test export ckpt avg_n + CUDA_VISIBLE_DEVICES=0 ./local/test_export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type}|| exit -1 fi diff --git a/examples/aishell/s1/README.md b/examples/aishell/s1/README.md index 2048c4d58..07cc569ed 100644 --- a/examples/aishell/s1/README.md +++ b/examples/aishell/s1/README.md @@ -2,15 +2,26 @@ ## Conformer -| Model | Config | Augmentation| Test set | Decode method | Loss | WER | -| --- | --- | --- | --- | --- | --- | --- | -| conformer | conf/conformer.yaml | spec_aug + shift | test | attention | - | 0.059858 | -| conformer | conf/conformer.yaml | spec_aug + shift | test | ctc_greedy_search | - | 0.062311 | -| conformer | conf/conformer.yaml | spec_aug + shift | test | ctc_prefix_beam_search | - | 0.062196 | -| conformer | conf/conformer.yaml | spec_aug + shift | test | attention_rescoring | - | 0.054694 | +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.07M | conf/conformer.yaml | spec_aug + shift | test | attention | - | 0.059858 | +| conformer | 47.07M | conf/conformer.yaml | spec_aug + shift | test | ctc_greedy_search | - | 0.062311 | +| conformer | 47.07M | conf/conformer.yaml | spec_aug + shift | test | ctc_prefix_beam_search | - | 0.062196 | +| conformer | 47.07M | conf/conformer.yaml | spec_aug + shift | test | attention_rescoring | - | 0.054694 | + + +## Chunk Conformer + +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size & Left Chunks | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.06M | conf/chunk_conformer.yaml | spec_aug + shift | test | attention | 16, -1 | - | 0.061939 | +| conformer | 47.06M | conf/chunk_conformer.yaml | spec_aug + shift | test | ctc_greedy_search | 16, -1 | - | 0.070806 | +| conformer | 47.06M | conf/chunk_conformer.yaml | spec_aug + shift | test | ctc_prefix_beam_search | 16, -1 | - | 0.070739 | +| conformer | 47.06M | conf/chunk_conformer.yaml | spec_aug + shift | test | attention_rescoring | 16, -1 | - | 0.059400 | + ## Transformer -| Model | Config | Augmentation| Test set | Decode method | Loss | WER | -| --- | --- | --- | --- | --- | --- | ---| -| transformer | conf/transformer.yaml | spec_aug + shift | test | attention | - | - | +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | ---| +| transformer | - | conf/transformer.yaml | spec_aug + shift | test | attention | - | - | diff --git a/examples/aishell/s1/conf/augmentation.json b/examples/aishell/s1/conf/augmentation.json index 1987ad424..d0409b142 100644 --- a/examples/aishell/s1/conf/augmentation.json +++ b/examples/aishell/s1/conf/augmentation.json @@ -27,7 +27,9 @@ "W": 80, "adaptive_number_ratio": 0, "adaptive_size_ratio": 0, - "max_n_time_masks": 20 + "max_n_time_masks": 20, + "replace_with_zero": true, + "warp_mode": "PIL" }, "prob": 1.0 } diff --git a/examples/aishell/s1/conf/chunk_conformer.yaml b/examples/aishell/s1/conf/chunk_conformer.yaml index 904624c3c..6f8ae135f 100644 --- a/examples/aishell/s1/conf/chunk_conformer.yaml +++ b/examples/aishell/s1/conf/chunk_conformer.yaml @@ -3,17 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test - vocab_filepath: data/vocab.txt - unit_type: 'char' - spm_model_prefix: '' - augmentation_config: conf/augmentation.json - batch_size: 32 min_input_len: 0.5 max_input_len: 20.0 # second min_output_len: 0.0 max_output_len: 400.0 min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'char' + spm_model_prefix: '' + augmentation_config: conf/augmentation.json + batch_size: 32 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -30,7 +33,7 @@ data: keep_transcription_text: False sortagrad: True shuffle_method: batch_shuffle - num_workers: 0 + num_workers: 2 # network architecture @@ -73,12 +76,14 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false training: - n_epoch: 180 + n_epoch: 240 accum_grad: 4 global_grad_clip: 5.0 optim: adam @@ -90,6 +95,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: diff --git a/examples/aishell/s1/conf/conformer.yaml b/examples/aishell/s1/conf/conformer.yaml index b880f8587..a4248459c 100644 --- a/examples/aishell/s1/conf/conformer.yaml +++ b/examples/aishell/s1/conf/conformer.yaml @@ -3,17 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test - vocab_filepath: data/vocab.txt - unit_type: 'char' - spm_model_prefix: '' - augmentation_config: conf/augmentation.json - batch_size: 64 min_input_len: 0.5 max_input_len: 20.0 # second min_output_len: 0.0 max_output_len: 400.0 min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'char' + spm_model_prefix: '' + augmentation_config: conf/augmentation.json + batch_size: 64 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -32,7 +35,6 @@ data: shuffle_method: batch_shuffle num_workers: 2 - # network architecture model: cmvn_file: "data/mean_std.json" @@ -69,6 +71,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -86,6 +90,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: diff --git a/examples/aishell/s1/local/aishell_train_lms.sh b/examples/aishell/s1/local/aishell_train_lms.sh new file mode 100755 index 000000000..7fb555b46 --- /dev/null +++ b/examples/aishell/s1/local/aishell_train_lms.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# To be run from one directory above this script. +. ./path.sh + +text=data/local/lm/text +lexicon=data/local/dict/lexicon.txt + +for f in "$text" "$lexicon"; do + [ ! -f $x ] && echo "$0: No such file $f" && exit 1; +done + +# Check SRILM tools +if ! which ngram-count > /dev/null; then + echo "srilm tools are not found, please download it and install it from: " + echo "http://www.speech.sri.com/projects/srilm/download.html" + echo "Then add the tools to your PATH" + exit 1 +fi + +# This script takes no arguments. It assumes you have already run +# aishell_data_prep.sh. +# It takes as input the files +# data/local/lm/text +# data/local/dict/lexicon.txt +dir=data/local/lm +mkdir -p $dir + + +cleantext=$dir/text.no_oov + +cat $text | awk -v lex=$lexicon 'BEGIN{while((getline0){ seen[$1]=1; } } + {for(n=1; n<=NF;n++) { if (seen[$n]) { printf("%s ", $n); } else {printf(" ");} } printf("\n");}' \ + > $cleantext || exit 1; + +cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | sort | uniq -c | \ + sort -nr > $dir/word.counts || exit 1; + +# Get counts from acoustic training transcripts, and add one-count +# for each word in the lexicon (but not silence, we don't want it +# in the LM-- we'll add it optionally later). +cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | \ + cat - <(grep -w -v '!SIL' $lexicon | awk '{print $1}') | \ + sort | uniq -c | sort -nr > $dir/unigram.counts || exit 1; + +cat $dir/unigram.counts | awk '{print $2}' | cat - <(echo ""; echo "" ) > $dir/wordlist + +heldout_sent=10000 # Don't change this if you want result to be comparable with + # kaldi_lm results +mkdir -p $dir +cat $cleantext | awk '{for(n=2;n<=NF;n++){ printf $n; if(n $dir/heldout +cat $cleantext | awk '{for(n=2;n<=NF;n++){ printf $n; if(n $dir/train + +ngram-count -text $dir/train -order 3 -limit-vocab -vocab $dir/wordlist -unk \ + -map-unk "" -kndiscount -interpolate -lm $dir/lm.arpa +ngram -lm $dir/lm.arpa -ppl $dir/heldout \ No newline at end of file diff --git a/examples/aishell/s1/local/align.sh b/examples/aishell/s1/local/align.sh new file mode 100755 index 000000000..ad6c84bc8 --- /dev/null +++ b/examples/aishell/s1/local/align.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/alignment.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/aishell/s1/local/data.sh b/examples/aishell/s1/local/data.sh index c6abce3b4..8d5ac4d59 100755 --- a/examples/aishell/s1/local/data.sh +++ b/examples/aishell/s1/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/aishell/s1/local/download_lm_ch.sh b/examples/aishell/s1/local/download_lm_ch.sh deleted file mode 120000 index 6541d91c5..000000000 --- a/examples/aishell/s1/local/download_lm_ch.sh +++ /dev/null @@ -1 +0,0 @@ -../../s0/local/download_lm_ch.sh \ No newline at end of file diff --git a/examples/aishell/s1/local/export.sh b/examples/aishell/s1/local/export.sh index 1b19d5720..f99a15bad 100755 --- a/examples/aishell/s1/local/export.sh +++ b/examples/aishell/s1/local/export.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 3 ];then echo "usage: $0 config_path ckpt_prefix jit_model_path" @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi diff --git a/examples/aishell/s1/local/test.sh b/examples/aishell/s1/local/test.sh index 073aaffd4..f7e99ad7f 100755 --- a/examples/aishell/s1/local/test.sh +++ b/examples/aishell/s1/local/test.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 2 ];then echo "usage: ${0} config_path ckpt_path_prefix" @@ -9,15 +9,17 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi + config_path=$1 ckpt_prefix=$2 -ckpt_name=$(basename ${ckpt_prefxi}) - -mkdir -p exp +chunk_mode=false +if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then + chunk_mode=true +fi # download language model #bash local/download_lm_ch.sh @@ -28,7 +30,12 @@ mkdir -p exp for type in attention ctc_greedy_search; do echo "decoding ${type}" - batch_size=64 + if [ ${chunk_mode} == true ];then + # stream decoding only support batchsize=1 + batch_size=1 + else + batch_size=64 + fi output_dir=${ckpt_prefix} mkdir -p ${output_dir} python3 -u ${BIN_DIR}/test.py \ diff --git a/examples/aishell/s1/local/tlg.sh b/examples/aishell/s1/local/tlg.sh new file mode 100755 index 000000000..f5287f794 --- /dev/null +++ b/examples/aishell/s1/local/tlg.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -eo pipefail + +stage=-1 +stop_stage=100 +corpus=aishell +lmtype=srilm + +source utils/parse_options.sh + +data=${MAIN_ROOT}/examples/dataset/${corpus} +lexicon=$data/resource_aishell/lexicon.txt +text=$data/data_aishell/transcript/aishell_transcript_v0.8.txt + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # 7.1 Prepare dict + unit_file=data/vocab.txt + mkdir -p data/local/dict + cp $unit_file data/local/dict/units.txt + utils/fst/prepare_dict.py \ + --unit_file $unit_file \ + --in_lexicon ${lexicon} \ + --out_lexicon data/local/dict/lexicon.txt +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # 7.2 Train lm + lm=data/local/lm + mkdir -p data/train + mkdir -p $lm + utils/manifest_key_value.py \ + --manifest_path data/manifest.train \ + --output_path data/train + utils/filter_scp.pl data/train/text \ + $text > $lm/text + if [ $lmtype == 'srilm' ];then + local/aishell_train_lms.sh + else + utils/ngram_train.sh --order 3 $lm/text $lm/lm.arpa + fi +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # 7.3 Build decoding TLG + utils/fst/compile_lexicon_token_fst.sh \ + data/local/dict data/local/tmp data/local/lang + utils/fst/make_tlg.sh data/local/lm data/local/lang data/lang_test || exit 1; +fi + +echo "Aishell build TLG done." +exit 0 diff --git a/examples/aishell/s1/local/train.sh b/examples/aishell/s1/local/train.sh index a4218aa86..5097d4d03 100755 --- a/examples/aishell/s1/local/train.sh +++ b/examples/aishell/s1/local/train.sh @@ -1,29 +1,51 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" - exit -1 -fi +profiler_options= +benchmark_batch_size=0 +benchmark_max_step=0 + +# seed may break model convergence +seed=0 + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." -config_path=$1 -ckpt_name=$2 - device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi -echo "using ${device}..." + +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True + echo "using seed $seed & FLAGS_cudnn_deterministic=True ..." +fi + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +config_path=$1 +ckpt_name=$2 mkdir -p exp python3 -u ${BIN_DIR}/train.py \ +--seed ${seed} \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--profiler-options "${profiler_options}" \ +--benchmark-batch-size ${benchmark_batch_size} \ +--benchmark-max-step ${benchmark_max_step} + + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/aishell/s1/path.sh b/examples/aishell/s1/path.sh index 30adb6ca0..6807a9505 100644 --- a/examples/aishell/s1/path.sh +++ b/examples/aishell/s1/path.sh @@ -1,14 +1,28 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C # Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C -export PYTHONIOENCODING=UTF-8 +export PYTHONIOENCODING=UTF-8 export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ - +# model exp MODEL=u2 export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin + + +# srilm +export LIBLBFGS=${MAIN_ROOT}/tools/liblbfgs-1.10 +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:${LIBLBFGS}/lib/.libs +export SRILM=${MAIN_ROOT}/tools/srilm +export PATH=${PATH}:${SRILM}/bin:${SRILM}/bin/i686-m64 + +# Kaldi +export KALDI_ROOT=${MAIN_ROOT}/tools/kaldi +[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh +export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH +[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" +. $KALDI_ROOT/tools/config/common_path.sh || true diff --git a/examples/aishell/s1/run.sh b/examples/aishell/s1/run.sh index 4cf09553b..e3c008234 100644 --- a/examples/aishell/s1/run.sh +++ b/examples/aishell/s1/run.sh @@ -25,15 +25,26 @@ fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # test ckpt avg_n - CUDA_VISIBLE_DEVICES=4 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # export ckpt avg_n - CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit + CUDA_VISIBLE_DEVICES=0 ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit fi + + # Optionally, you can add LM and test it with runtime. + if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then + # train lm and build TLG + ./local/tlg.sh --corpus aishell --lmtype srilm + fi diff --git a/examples/aishell/s1/utils b/examples/aishell/s1/utils new file mode 120000 index 000000000..973afe674 --- /dev/null +++ b/examples/aishell/s1/utils @@ -0,0 +1 @@ +../../../utils \ No newline at end of file diff --git a/examples/aug_conf/augmentation.json b/examples/aug_conf/augmentation.json deleted file mode 100644 index a1a759e67..000000000 --- a/examples/aug_conf/augmentation.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - { - "type": "shift", - "params": { - "min_shift_ms": -5, - "max_shift_ms": 5 - }, - "prob": 1.0 - } -] diff --git a/examples/aug_conf/augmentation.example.json b/examples/augmentation/augmentation.json similarity index 91% rename from examples/aug_conf/augmentation.example.json rename to examples/augmentation/augmentation.json index efae2e5e3..c99299d6c 100644 --- a/examples/aug_conf/augmentation.example.json +++ b/examples/augmentation/augmentation.json @@ -52,16 +52,18 @@ { "type": "specaug", "params": { + "W": 80, + "warp_mode": "PIL", "F": 10, - "T": 50, "n_freq_masks": 2, + "T": 50, "n_time_masks": 2, "p": 1.0, - "W": 80, "adaptive_number_ratio": 0, "adaptive_size_ratio": 0, - "max_n_time_masks": 20 + "max_n_time_masks": 20, + "replace_with_zero": false }, - "prob": 0.0 + "prob": 1.0 } ] diff --git a/examples/callcenter/s1/.gitignore b/examples/callcenter/s1/.gitignore new file mode 100644 index 000000000..02a229225 --- /dev/null +++ b/examples/callcenter/s1/.gitignore @@ -0,0 +1,3 @@ +data +exp +*.profile diff --git a/examples/callcenter/s1/README.md b/examples/callcenter/s1/README.md new file mode 100644 index 000000000..b9fa1472e --- /dev/null +++ b/examples/callcenter/s1/README.md @@ -0,0 +1,20 @@ +# MandarinK8 + +## Conformer + +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | CER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 45.73 M | conf/conformer.yaml | spec_aug + shift | test | attention | 2.1794936656951904 | 0.102304 | +| conformer | 45.73 M | conf/conformer.yaml | spec_aug + shift | test | ctc_greedy_search | 2.1794936656951904 | 0.084295 | +| conformer | 45.73 M | conf/conformer.yaml | spec_aug + shift | test | ctc_prefix_beam_search | 2.1794936656951904 | 0.084340 | +| conformer | 45.73 M | conf/conformer.yaml | spec_aug + shift | test | attention_rescoring | 2.1794936656951904 | 0.081675 | + + +## Chunk Conformer + +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size & Left Chunks | Loss | CER | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 45.73 M | conf/chunk_conformer.yaml | spec_aug + shift | test | attention | 16, -1 | 2.23287845 | 0.087982 | +| conformer | 45.73 M | conf/chunk_conformer.yaml | spec_aug + shift | test | ctc_greedy_search | 16, -1 | 2.23287845 | 0.086962 | +| conformer | 45.73 M | conf/chunk_conformer.yaml | spec_aug + shift | test | ctc_prefix_beam_search | 16, -1 | 2.23287845 | 0.086741 | +| conformer | 45.73 M | conf/chunk_conformer.yaml | spec_aug + shift | test | attention_rescoring | 16, -1 | 2.23287845 | 0.083495 | diff --git a/examples/callcenter/s1/conf/augmentation.json b/examples/callcenter/s1/conf/augmentation.json new file mode 100644 index 000000000..81d110b0b --- /dev/null +++ b/examples/callcenter/s1/conf/augmentation.json @@ -0,0 +1,35 @@ +[ + { + "type": "speed", + "params": { + "min_speed_rate": 0.9, + "max_speed_rate": 1.1, + "num_rates": 3 + }, + "prob": 0.0 + }, + { + "type": "shift", + "params": { + "min_shift_ms": -5, + "max_shift_ms": 5 + }, + "prob": 1.0 + }, + { + "type": "specaug", + "params": { + "F": 10, + "T": 50, + "n_freq_masks": 2, + "n_time_masks": 2, + "p": 1.0, + "W": 80, + "adaptive_number_ratio": 0, + "adaptive_size_ratio": 0, + "max_n_time_masks": 20, + "replace_with_zero": true + }, + "prob": 1.0 + } +] diff --git a/examples/callcenter/s1/conf/chunk_conformer.yaml b/examples/callcenter/s1/conf/chunk_conformer.yaml new file mode 100644 index 000000000..f79b8eaa0 --- /dev/null +++ b/examples/callcenter/s1/conf/chunk_conformer.yaml @@ -0,0 +1,120 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.5 + max_input_len: 20.0 # second + min_output_len: 0.0 + max_output_len: 400.0 + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'char' + spm_model_prefix: '' + augmentation_config: conf/augmentation.json + batch_size: 32 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 8000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: conformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: True + use_cnn_module: True + cnn_module_kernel: 15 + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + selfattention_layer_type: 'rel_selfattn' + causal: true + use_dynamic_chunk: true + cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster + use_dynamic_left_chunk: false + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 240 + accum_grad: 4 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.001 + weight_decay: 1e-6 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 128 + error_rate_type: cer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: true # simulate streaming inference. Defaults to False. + + diff --git a/examples/callcenter/s1/conf/conformer.yaml b/examples/callcenter/s1/conf/conformer.yaml new file mode 100644 index 000000000..3b08cc7a1 --- /dev/null +++ b/examples/callcenter/s1/conf/conformer.yaml @@ -0,0 +1,117 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.5 + max_input_len: 20.0 # second + min_output_len: 0.0 + max_output_len: 400.0 + min_output_input_ratio: 0.0 + max_output_input_ratio: .inf + + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'char' + spm_model_prefix: '' + augmentation_config: conf/augmentation.json + batch_size: 32 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 8000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: conformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: True + use_cnn_module: True + cnn_module_kernel: 15 + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + selfattention_layer_type: 'rel_selfattn' + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 100 # 50 will be lowest + accum_grad: 4 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.002 + weight_decay: 1e-6 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + + + +decoding: + batch_size: 128 + error_rate_type: cer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. + + diff --git a/examples/callcenter/s1/local/align.sh b/examples/callcenter/s1/local/align.sh new file mode 100755 index 000000000..f2c878c20 --- /dev/null +++ b/examples/callcenter/s1/local/align.sh @@ -0,0 +1,43 @@ +#! /usr/bin/env bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +ckpt_name=$(basename ${ckpt_prefxi}) + +mkdir -p exp + + + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/alignment.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/callcenter/s1/local/data.sh b/examples/callcenter/s1/local/data.sh new file mode 100755 index 000000000..e2640ead7 --- /dev/null +++ b/examples/callcenter/s1/local/data.sh @@ -0,0 +1,77 @@ +#! /usr/bin/env bash + +stage=-1 +stop_stage=100 + +source ${MAIN_ROOT}/utils/parse_options.sh + +mkdir -p data + +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + for dataset in train dev test; do + mv data/manifest.${dataset} data/manifest.${dataset}.raw + done +fi + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # download data, generate manifests + # build vocabulary + python3 ${MAIN_ROOT}/utils/build_vocab.py \ + --unit_type="char" \ + --count_threshold=0 \ + --vocab_path="data/vocab.txt" \ + --manifest_paths "data/manifest.train.raw" + + if [ $? -ne 0 ]; then + echo "Build vocabulary failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # compute mean and stddev for normalizer + num_workers=$(nproc) + python3 ${MAIN_ROOT}/utils/compute_mean_std.py \ + --manifest_path="data/manifest.train.raw" \ + --specgram_type="fbank" \ + --feat_dim=80 \ + --delta_delta=false \ + --stride_ms=10.0 \ + --window_ms=25.0 \ + --sample_rate=8000 \ + --use_dB_normalization=False \ + --num_samples=-1 \ + --num_workers=${num_workers} \ + --output_path="data/mean_std.json" + + if [ $? -ne 0 ]; then + echo "Compute mean and stddev failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # format manifest with tokenids, vocab size + for dataset in train dev test; do + { + python3 ${MAIN_ROOT}/utils/format_data.py \ + --feat_type "raw" \ + --cmvn_path "data/mean_std.json" \ + --unit_type "char" \ + --vocab_path="data/vocab.txt" \ + --manifest_path="data/manifest.${dataset}.raw" \ + --output_path="data/manifest.${dataset}" + + if [ $? -ne 0 ]; then + echo "Formt mnaifest failed. Terminated." + exit 1 + fi + } & + done + wait +fi + +echo "data preparation done." +exit 0 diff --git a/examples/callcenter/s1/local/download_lm_ch.sh b/examples/callcenter/s1/local/download_lm_ch.sh new file mode 100755 index 000000000..ac27a9076 --- /dev/null +++ b/examples/callcenter/s1/local/download_lm_ch.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +. ${MAIN_ROOT}/utils/utility.sh + +DIR=data/lm +mkdir -p ${DIR} + +URL='https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm' +MD5="29e02312deb2e59b3c8686c7966d4fe3" +TARGET=${DIR}/zh_giga.no_cna_cmn.prune01244.klm + + +echo "Download language model ..." +download $URL $MD5 $TARGET +if [ $? -ne 0 ]; then + echo "Fail to download the language model!" + exit 1 +fi + + +exit 0 diff --git a/examples/callcenter/s1/local/export.sh b/examples/callcenter/s1/local/export.sh new file mode 100755 index 000000000..d171899cd --- /dev/null +++ b/examples/callcenter/s1/local/export.sh @@ -0,0 +1,34 @@ +#! /usr/bin/env bash + +if [ $# != 3 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_path_prefix=$2 +jit_model_export_path=$3 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi + +python3 -u ${BIN_DIR}/export.py \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--checkpoint_path ${ckpt_path_prefix} \ +--export_path ${jit_model_export_path} + + +if [ $? -ne 0 ]; then + echo "Failed in export!" + exit 1 +fi + + +exit 0 diff --git a/examples/callcenter/s1/local/test.sh b/examples/callcenter/s1/local/test.sh new file mode 100755 index 000000000..7a5b1cdb1 --- /dev/null +++ b/examples/callcenter/s1/local/test.sh @@ -0,0 +1,67 @@ +#! /usr/bin/env bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +ckpt_name=$(basename ${ckpt_prefxi}) + +mkdir -p exp + +# download language model +#bash local/download_lm_ch.sh +#if [ $? -ne 0 ]; then +# exit 1 +#fi + + +for type in attention ctc_greedy_search; do + echo "decoding ${type}" + batch_size=1 + output_dir=${ckpt_prefix} + mkdir -p ${output_dir} + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${output_dir}/${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +for type in ctc_prefix_beam_search attention_rescoring; do + echo "decoding ${type}" + batch_size=1 + output_dir=${ckpt_prefix} + mkdir -p ${output_dir} + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${output_dir}/${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +exit 0 diff --git a/examples/callcenter/s1/local/train.sh b/examples/callcenter/s1/local/train.sh new file mode 100755 index 000000000..d5dc15b03 --- /dev/null +++ b/examples/callcenter/s1/local/train.sh @@ -0,0 +1,44 @@ +#! /usr/bin/env bash + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_name=$2 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +echo "using ${device}..." + +mkdir -p exp + +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + +python3 -u ${BIN_DIR}/train.py \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi + +if [ $? -ne 0 ]; then + echo "Failed in training!" + exit 1 +fi + +exit 0 diff --git a/examples/callcenter/s1/path.sh b/examples/callcenter/s1/path.sh new file mode 100644 index 000000000..29841bc10 --- /dev/null +++ b/examples/callcenter/s1/path.sh @@ -0,0 +1,14 @@ +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ + + +MODEL=u2 +export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin diff --git a/examples/callcenter/s1/run.sh b/examples/callcenter/s1/run.sh new file mode 100644 index 000000000..305021f19 --- /dev/null +++ b/examples/callcenter/s1/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -e +source path.sh + +stage=0 +stop_stage=100 +conf_path=conf/conformer.yaml +avg_num=20 + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +avg_ckpt=avg_${avg_num} +ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') +echo "checkpoint name ${ckpt}" + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + bash ./local/data.sh || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `exp` dir + CUDA_VISIBLE_DEVICES=0,1,2,3 ./local/train.sh ${conf_path} ${ckpt} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # avg n best model + avg.sh best exp/${ckpt}/checkpoints ${avg_num} +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # test ckpt avg_n + CUDA_VISIBLE_DEVICES=4 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # export ckpt avg_n + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit +fi diff --git a/examples/cc-cedict/README.md b/examples/cc-cedict/README.md index e69de29bb..513fca533 100644 --- a/examples/cc-cedict/README.md +++ b/examples/cc-cedict/README.md @@ -0,0 +1,58 @@ +# [CC-CEDICT](https://cc-cedict.org/wiki/) + +What is CC-CEDICT? +CC-CEDICT is a continuation of the CEDICT project. +The objective of the CEDICT project was to create an online, downloadable (as opposed to searchable-only) public-domain Chinese-English dictionary. +CEDICT was started by Paul Andrew Denisowski in October 1997. +For the most part, the project is modeled on Jim Breen's highly successful EDICT (Japanese-English dictionary) project and is intended to be a collaborative effort, +with users providing entries and corrections to the main file. + + +## Parse CC-CEDICT to Json format + +1. Parse to Json + +``` +run.sh +``` + +2. Result + +``` +exp/ +|-- cedict +`-- cedict.json + +0 directories, 2 files +``` + +``` +4c4bffc84e24467fe1b2ea9ba37ed6b6 exp/cedict +3adf504dacd13886f88cc9fe3b37c75d exp/cedict.json +``` + +``` +==> exp/cedict <== +# CC-CEDICT +# Community maintained free Chinese-English dictionary. +# +# Published by MDBG +# +# License: +# Creative Commons Attribution-ShareAlike 4.0 International License +# https://creativecommons.org/licenses/by-sa/4.0/ +# +# Referenced works: + +==> exp/cedict.json <== +{"traditional": "2019\u51a0\u72c0\u75c5\u6bd2\u75c5", "simplified": "2019\u51a0\u72b6\u75c5\u6bd2\u75c5", "pinyin": "er4 ling2 yi1 jiu3 guan1 zhuang4 bing4 du2 bing4", "english": "COVID-19, the coronavirus disease identified in 2019"} +{"traditional": "21\u4e09\u9ad4\u7d9c\u5408\u75c7", "simplified": "21\u4e09\u4f53\u7efc\u5408\u75c7", "pinyin": "er4 shi2 yi1 san1 ti3 zong1 he2 zheng4", "english": "trisomy"} +{"traditional": "3C", "simplified": "3C", "pinyin": "san1 C", "english": "abbr. for computers, communications, and consumer electronics"} +{"traditional": "3P", "simplified": "3P", "pinyin": "san1 P", "english": "(slang) threesome"} +{"traditional": "3Q", "simplified": "3Q", "pinyin": "san1 Q", "english": "(Internet slang) thank you (loanword)"} +{"traditional": "421", "simplified": "421", "pinyin": "si4 er4 yi1", "english": "four grandparents, two parents and an only child"} +{"traditional": "502\u81a0", "simplified": "502\u80f6", "pinyin": "wu3 ling2 er4 jiao1", "english": "cyanoacrylate glue"} +{"traditional": "88", "simplified": "88", "pinyin": "ba1 ba1", "english": "(Internet slang) bye-bye (alternative for \u62dc\u62dc[bai2 bai2])"} +{"traditional": "996", "simplified": "996", "pinyin": "jiu3 jiu3 liu4", "english": "9am-9pm, six days a week (work schedule)"} +{"traditional": "A", "simplified": "A", "pinyin": "A", "english": "(slang) (Tw) to steal"} +``` diff --git a/examples/cc-cedict/path.sh b/examples/cc-cedict/path.sh index 84e2de7d0..f8fdd82d7 100644 --- a/examples/cc-cedict/path.sh +++ b/examples/cc-cedict/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../ +export MAIN_ROOT=`realpath ${PWD}/../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/chinese_g2p/README.md b/examples/chinese_g2p/README.md deleted file mode 100644 index e3fdfe684..000000000 --- a/examples/chinese_g2p/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Download Baker dataset - -Baker dataset has to be downloaded mannually and moved to 'data/', because you will have to pass the CATTCHA from a browswe to download the dataset. - -Download URL https://test.data-baker.com/#/data/index/source. diff --git a/examples/dataset/aidatatang_200zh/.gitignore b/examples/dataset/aidatatang_200zh/.gitignore new file mode 100644 index 000000000..fc56525e6 --- /dev/null +++ b/examples/dataset/aidatatang_200zh/.gitignore @@ -0,0 +1,4 @@ +*.tgz +manifest.* +*.meta +aidatatang_200zh/ \ No newline at end of file diff --git a/examples/dataset/aidatatang_200zh/README.md b/examples/dataset/aidatatang_200zh/README.md new file mode 100644 index 000000000..e6f1eefbd --- /dev/null +++ b/examples/dataset/aidatatang_200zh/README.md @@ -0,0 +1,14 @@ +# [Aidatatang_200zh](http://www.openslr.org/62/) + +Aidatatang_200zh is a free Chinese Mandarin speech corpus provided by Beijing DataTang Technology Co., Ltd under Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License. +The contents and the corresponding descriptions of the corpus include: + +* The corpus contains 200 hours of acoustic data, which is mostly mobile recorded data. +* 600 speakers from different accent areas in China are invited to participate in the recording. +* The transcription accuracy for each sentence is larger than 98%. +* Recordings are conducted in a quiet indoor environment. +* The database is divided into training set, validation set, and testing set in a ratio of 7: 1: 2. +* Detail information such as speech data coding and speaker information is preserved in the metadata file. +* Segmented transcripts are also provided. + +The corpus aims to support researchers in speech recognition, machine translation, voiceprint recognition, and other speech-related fields. Therefore, the corpus is totally free for academic use. diff --git a/examples/dataset/aidatatang_200zh/aidatatang_200zh.py b/examples/dataset/aidatatang_200zh/aidatatang_200zh.py new file mode 100644 index 000000000..e32f619e9 --- /dev/null +++ b/examples/dataset/aidatatang_200zh/aidatatang_200zh.py @@ -0,0 +1,153 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Prepare aidatatang_200zh mandarin dataset + +Download, unpack and create manifest files. +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +of each audio file in the data set. +""" +import argparse +import codecs +import json +import os + +import soundfile + +from utils.utility import download +from utils.utility import unpack + +DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset/speech') + +URL_ROOT = 'http://www.openslr.org/resources/62' +# URL_ROOT = 'https://openslr.magicdatatech.com/resources/62' +DATA_URL = URL_ROOT + '/aidatatang_200zh.tgz' +MD5_DATA = '6e0f4f39cd5f667a7ee53c397c8d0949' + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--target_dir", + default=DATA_HOME + "/aidatatang_200zh", + type=str, + help="Directory to save the dataset. (default: %(default)s)") +parser.add_argument( + "--manifest_prefix", + default="manifest", + type=str, + help="Filepath prefix for output manifests. (default: %(default)s)") +args = parser.parse_args() + + +def create_manifest(data_dir, manifest_path_prefix): + print("Creating manifest %s ..." % manifest_path_prefix) + json_lines = [] + transcript_path = os.path.join(data_dir, 'transcript', + 'aidatatang_200_zh_transcript.txt') + transcript_dict = {} + for line in codecs.open(transcript_path, 'r', 'utf-8'): + line = line.strip() + if line == '': + continue + audio_id, text = line.split(' ', 1) + # remove withespace, charactor text + text = ''.join(text.split()) + transcript_dict[audio_id] = text + + data_types = ['train', 'dev', 'test'] + for dtype in data_types: + del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + + audio_dir = os.path.join(data_dir, 'corpus/', dtype) + for subfolder, _, filelist in sorted(os.walk(audio_dir)): + for fname in filelist: + if not fname.endswith('.wav'): + continue + + audio_path = os.path.abspath(os.path.join(subfolder, fname)) + audio_id = os.path.basename(fname)[:-4] + + audio_data, samplerate = soundfile.read(audio_path) + duration = float(len(audio_data) / samplerate) + text = transcript_dict[audio_id] + json_lines.append( + json.dumps( + { + 'utt': audio_id, + 'feat': audio_path, + 'feat_shape': (duration, ), # second + 'text': text, + }, + ensure_ascii=False)) + + total_sec += duration + total_text += len(text) + total_num += 1 + + manifest_path = manifest_path_prefix + '.' + dtype + with codecs.open(manifest_path, 'w', 'utf-8') as fout: + for line in json_lines: + fout.write(line + '\n') + + manifest_dir = os.path.dirname(manifest_path_prefix) + meta_path = os.path.join(manifest_dir, dtype) + '.meta' + with open(meta_path, 'w') as f: + print(f"{dtype}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) + + +def prepare_dataset(url, md5sum, target_dir, manifest_path, subset): + """Download, unpack and create manifest file.""" + data_dir = os.path.join(target_dir, subset) + if not os.path.exists(data_dir): + filepath = download(url, md5sum, target_dir) + unpack(filepath, target_dir) + # unpack all audio tar files + audio_dir = os.path.join(data_dir, 'corpus') + for subfolder, dirlist, filelist in sorted(os.walk(audio_dir)): + for sub in dirlist: + print(f"unpack dir {sub}...") + for folder, _, filelist in sorted( + os.walk(os.path.join(subfolder, sub))): + for ftar in filelist: + unpack(os.path.join(folder, ftar), folder, True) + else: + print("Skip downloading and unpacking. Data already exists in %s." % + target_dir) + + create_manifest(data_dir, manifest_path) + + +def main(): + if args.target_dir.startswith('~'): + args.target_dir = os.path.expanduser(args.target_dir) + + prepare_dataset( + url=DATA_URL, + md5sum=MD5_DATA, + target_dir=args.target_dir, + manifest_path=args.manifest_prefix, + subset='aidatatang_200zh') + + print("Data download and manifest prepare done!") + + +if __name__ == '__main__': + main() diff --git a/examples/dataset/aishell/.gitignore b/examples/dataset/aishell/.gitignore index 9c6e517e5..27194aab8 100644 --- a/examples/dataset/aishell/.gitignore +++ b/examples/dataset/aishell/.gitignore @@ -1 +1,5 @@ data_aishell* +*.meta +manifest.* +*.tgz +resource_aishell diff --git a/examples/dataset/aishell/README.md b/examples/dataset/aishell/README.md new file mode 100644 index 000000000..6770cd207 --- /dev/null +++ b/examples/dataset/aishell/README.md @@ -0,0 +1,3 @@ +# [Aishell1](http://www.openslr.org/33/) + +This Open Source Mandarin Speech Corpus, AISHELL-ASR0009-OS1, is 178 hours long. It is a part of AISHELL-ASR0009, of which utterance contains 11 domains, including smart home, autonomous driving, and industrial production. The whole recording was put in quiet indoor environment, using 3 different devices at the same time: high fidelity microphone (44.1kHz, 16-bit,); Android-system mobile phone (16kHz, 16-bit), iOS-system mobile phone (16kHz, 16-bit). Audios in high fidelity were re-sampled to 16kHz to build AISHELL- ASR0009-OS1. 400 speakers from different accent areas in China were invited to participate in the recording. The manual transcription accuracy rate is above 95%, through professional speech annotation and strict quality inspection. The corpus is divided into training, development and testing sets. ( This database is free for academic research, not in the commerce, if without permission. ) diff --git a/examples/dataset/aishell/aishell.py b/examples/dataset/aishell/aishell.py index a0cabe352..66e069013 100644 --- a/examples/dataset/aishell/aishell.py +++ b/examples/dataset/aishell/aishell.py @@ -31,9 +31,11 @@ from utils.utility import unpack DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset/speech') URL_ROOT = 'http://www.openslr.org/resources/33' -URL_ROOT = 'https://openslr.magicdatatech.com/resources/33' +# URL_ROOT = 'https://openslr.magicdatatech.com/resources/33' DATA_URL = URL_ROOT + '/data_aishell.tgz' MD5_DATA = '2f494334227864a8a8fec932999db9d8' +RESOURCE_URL = URL_ROOT + '/resource_aishell.tgz' +MD5_RESOURCE = '957d480a0fcac85fc18e550756f624e5' parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( @@ -60,18 +62,22 @@ def create_manifest(data_dir, manifest_path_prefix): if line == '': continue audio_id, text = line.split(' ', 1) - # remove withespace + # remove withespace, charactor text text = ''.join(text.split()) transcript_dict[audio_id] = text data_types = ['train', 'dev', 'test'] for dtype in data_types: del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + audio_dir = os.path.join(data_dir, 'wav', dtype) for subfolder, _, filelist in sorted(os.walk(audio_dir)): for fname in filelist: - audio_path = os.path.join(subfolder, fname) - audio_id = fname[:-4] + audio_path = os.path.abspath(os.path.join(subfolder, fname)) + audio_id = os.path.basename(fname)[:-4] # if no transcription for audio then skipped if audio_id not in transcript_dict: continue @@ -81,22 +87,34 @@ def create_manifest(data_dir, manifest_path_prefix): json_lines.append( json.dumps( { - 'utt': - os.path.splitext(os.path.basename(audio_path))[0], - 'feat': - audio_path, + 'utt': audio_id, + 'feat': audio_path, 'feat_shape': (duration, ), # second - 'text': - text + 'text': text }, ensure_ascii=False)) + + total_sec += duration + total_text += len(text) + total_num += 1 + manifest_path = manifest_path_prefix + '.' + dtype with codecs.open(manifest_path, 'w', 'utf-8') as fout: for line in json_lines: fout.write(line + '\n') + manifest_dir = os.path.dirname(manifest_path_prefix) + meta_path = os.path.join(manifest_dir, dtype) + '.meta' + with open(meta_path, 'w') as f: + print(f"{dtype}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) -def prepare_dataset(url, md5sum, target_dir, manifest_path): + +def prepare_dataset(url, md5sum, target_dir, manifest_path=None): """Download, unpack and create manifest file.""" data_dir = os.path.join(target_dir, 'data_aishell') if not os.path.exists(data_dir): @@ -110,7 +128,9 @@ def prepare_dataset(url, md5sum, target_dir, manifest_path): else: print("Skip downloading and unpacking. Data already exists in %s." % target_dir) - create_manifest(data_dir, manifest_path) + + if manifest_path: + create_manifest(data_dir, manifest_path) def main(): @@ -123,6 +143,14 @@ def main(): target_dir=args.target_dir, manifest_path=args.manifest_prefix) + prepare_dataset( + url=RESOURCE_URL, + md5sum=MD5_RESOURCE, + target_dir=args.target_dir, + manifest_path=None) + + print("Data download and manifest prepare done!") + if __name__ == '__main__': main() diff --git a/examples/dataset/aishell3/README.md b/examples/dataset/aishell3/README.md new file mode 100644 index 000000000..8a29a6d0f --- /dev/null +++ b/examples/dataset/aishell3/README.md @@ -0,0 +1,3 @@ +# [Aishell3](http://www.openslr.org/93/) + +AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus which could be used to train multi-speaker Text-to-Speech (TTS) systems. The corpus contains roughly **85 hours** of emotion-neutral recordings spoken by 218 native Chinese mandarin speakers and total 88035 utterances. Their auxiliary attributes such as gender, age group and native accents are explicitly marked and provided in the corpus. Accordingly, transcripts in Chinese character-level and pinyin-level are provided along with the recordings. The word & tone transcription accuracy rate is above 98%, through professional speech annotation and strict quality inspection for tone and prosody. ( This database is free for academic research, not in the commerce, if without permission. ) diff --git a/examples/dataset/gigaspeech/.gitignore b/examples/dataset/gigaspeech/.gitignore new file mode 100644 index 000000000..7f78176b7 --- /dev/null +++ b/examples/dataset/gigaspeech/.gitignore @@ -0,0 +1 @@ +GigaSpeech/ diff --git a/examples/dataset/gigaspeech/README.md b/examples/dataset/gigaspeech/README.md new file mode 100644 index 000000000..4a1715cb8 --- /dev/null +++ b/examples/dataset/gigaspeech/README.md @@ -0,0 +1,10 @@ +# [GigaSpeech](https://github.com/SpeechColab/GigaSpeech) + +``` +git clone https://github.com/SpeechColab/GigaSpeech.git + +cd GigaSpeech +utils/gigaspeech_download.sh /disk1/audio_data/gigaspeech +toolkits/kaldi/gigaspeech_data_prep.sh --train-subset XL /disk1/audio_data/gigaspeech ../data +cd .. +``` diff --git a/examples/dataset/gigaspeech/gigaspeech.py b/examples/dataset/gigaspeech/gigaspeech.py new file mode 100644 index 000000000..185a92b8d --- /dev/null +++ b/examples/dataset/gigaspeech/gigaspeech.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/examples/dataset/gigaspeech/run.sh b/examples/dataset/gigaspeech/run.sh new file mode 100755 index 000000000..a1ad8610c --- /dev/null +++ b/examples/dataset/gigaspeech/run.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +curdir=$PWD + +test -d GigaSpeech || git clone https://github.com/SpeechColab/GigaSpeech.git + + +pushd GigaSpeech +source env_vars.sh +./utils/download_gigaspeech.sh ${curdir}/ +#toolkits/kaldi/gigaspeech_data_prep.sh --train-subset XL /disk1/audio_data/gigaspeech ../data +popd diff --git a/examples/dataset/librispeech/.gitignore b/examples/dataset/librispeech/.gitignore index dfd5c67b5..465806def 100644 --- a/examples/dataset/librispeech/.gitignore +++ b/examples/dataset/librispeech/.gitignore @@ -5,3 +5,5 @@ test-other train-clean-100 train-clean-360 train-other-500 +*.meta +manifest.* diff --git a/examples/dataset/librispeech/librispeech.py b/examples/dataset/librispeech/librispeech.py index 55012f73c..e85bbb3aa 100644 --- a/examples/dataset/librispeech/librispeech.py +++ b/examples/dataset/librispeech/librispeech.py @@ -77,6 +77,10 @@ def create_manifest(data_dir, manifest_path): """ print("Creating manifest %s ..." % manifest_path) json_lines = [] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + for subfolder, _, filelist in sorted(os.walk(data_dir)): text_filelist = [ filename for filename in filelist if filename.endswith('trans.txt') @@ -86,7 +90,9 @@ def create_manifest(data_dir, manifest_path): for line in io.open(text_filepath, encoding="utf8"): segments = line.strip().split() text = ' '.join(segments[1:]).lower() - audio_filepath = os.path.join(subfolder, segments[0] + '.flac') + + audio_filepath = os.path.abspath( + os.path.join(subfolder, segments[0] + '.flac')) audio_data, samplerate = soundfile.read(audio_filepath) duration = float(len(audio_data)) / samplerate json_lines.append( @@ -99,10 +105,27 @@ def create_manifest(data_dir, manifest_path): 'text': text })) + + total_sec += duration + total_text += len(text) + total_num += 1 + with codecs.open(manifest_path, 'w', 'utf-8') as out_file: for line in json_lines: out_file.write(line + '\n') + subset = os.path.splitext(manifest_path)[1][1:] + manifest_dir = os.path.dirname(manifest_path) + data_dir_name = os.path.split(data_dir)[-1] + meta_path = os.path.join(manifest_dir, data_dir_name) + '.meta' + with open(meta_path, 'w') as f: + print(f"{subset}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) + def prepare_dataset(url, md5sum, target_dir, manifest_path): """Download, unpack and create summmary manifest file. diff --git a/examples/dataset/magicdata/README.md b/examples/dataset/magicdata/README.md new file mode 100644 index 000000000..083aee97b --- /dev/null +++ b/examples/dataset/magicdata/README.md @@ -0,0 +1,15 @@ +# [MagicData](http://www.openslr.org/68/) + +MAGICDATA Mandarin Chinese Read Speech Corpus was developed by MAGIC DATA Technology Co., Ltd. and freely published for non-commercial use. +The contents and the corresponding descriptions of the corpus include: + +* The corpus contains 755 hours of speech data, which is mostly mobile recorded data. +* 1080 speakers from different accent areas in China are invited to participate in the recording. +* The sentence transcription accuracy is higher than 98%. +* Recordings are conducted in a quiet indoor environment. +* The database is divided into training set, validation set, and testing set in a ratio of 51: 1: 2. +* Detail information such as speech data coding and speaker information is preserved in the metadata file. +* The domain of recording texts is diversified, including interactive Q&A, music search, SNS messages, home command and control, etc. +* Segmented transcripts are also provided. + +The corpus aims to support researchers in speech recognition, machine translation, speaker recognition, and other speech-related fields. Therefore, the corpus is totally free for academic use. diff --git a/examples/dataset/mini_librispeech/.gitignore b/examples/dataset/mini_librispeech/.gitignore index 61f54c966..7fbcfd65d 100644 --- a/examples/dataset/mini_librispeech/.gitignore +++ b/examples/dataset/mini_librispeech/.gitignore @@ -2,3 +2,4 @@ dev-clean/ manifest.dev-clean manifest.train-clean train-clean/ +*.meta diff --git a/examples/dataset/mini_librispeech/mini_librispeech.py b/examples/dataset/mini_librispeech/mini_librispeech.py index f5bc13933..65fee81a7 100644 --- a/examples/dataset/mini_librispeech/mini_librispeech.py +++ b/examples/dataset/mini_librispeech/mini_librispeech.py @@ -58,6 +58,10 @@ def create_manifest(data_dir, manifest_path): """ print("Creating manifest %s ..." % manifest_path) json_lines = [] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + for subfolder, _, filelist in sorted(os.walk(data_dir)): text_filelist = [ filename for filename in filelist if filename.endswith('trans.txt') @@ -80,10 +84,27 @@ def create_manifest(data_dir, manifest_path): 'text': text })) + + total_sec += duration + total_text += len(text) + total_num += 1 + with codecs.open(manifest_path, 'w', 'utf-8') as out_file: for line in json_lines: out_file.write(line + '\n') + subset = os.path.splitext(manifest_path)[1][1:] + manifest_dir = os.path.dirname(manifest_path) + data_dir_name = os.path.split(data_dir)[-1] + meta_path = os.path.join(manifest_dir, data_dir_name) + '.meta' + with open(meta_path, 'w') as f: + print(f"{subset}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) + def prepare_dataset(url, md5sum, target_dir, manifest_path): """Download, unpack and create summmary manifest file. diff --git a/examples/dataset/multi_cn/README.md b/examples/dataset/multi_cn/README.md new file mode 100644 index 000000000..d59b11b6d --- /dev/null +++ b/examples/dataset/multi_cn/README.md @@ -0,0 +1,11 @@ +# multi-cn + +This is a Chinese speech recognition recipe that trains on all Chinese corpora on OpenSLR, including: + +* Aidatatang (140 hours) +* Aishell (151 hours) +* MagicData (712 hours) +* Primewords (99 hours) +* ST-CMDS (110 hours) +* THCHS-30 (26 hours) +* optional AISHELL2 (~1000 hours) if available diff --git a/examples/dataset/primewords/README.md b/examples/dataset/primewords/README.md new file mode 100644 index 000000000..a4f1ed65d --- /dev/null +++ b/examples/dataset/primewords/README.md @@ -0,0 +1,6 @@ +# [Primewords](http://www.openslr.org/47/) + +This free Chinese Mandarin speech corpus set is released by Shanghai Primewords Information Technology Co., Ltd. +The corpus is recorded by smart mobile phones from 296 native Chinese speakers. The transcription accuracy is larger than 98%, at the confidence level of 95%. It is free for academic use. + +The mapping between the transcript and utterance is given in JSON format. diff --git a/examples/dataset/st-cmds/README.md b/examples/dataset/st-cmds/README.md new file mode 100644 index 000000000..c7ae50e59 --- /dev/null +++ b/examples/dataset/st-cmds/README.md @@ -0,0 +1 @@ +# [FreeST](http://www.openslr.org/38/) diff --git a/examples/dataset/ted_en_zh/.gitignore b/examples/dataset/ted_en_zh/.gitignore new file mode 100644 index 000000000..ad6ab64af --- /dev/null +++ b/examples/dataset/ted_en_zh/.gitignore @@ -0,0 +1,6 @@ +*.tar.gz.* +manifest.* +*.md +EN-ZH/ +train-split/ +test-segment/ \ No newline at end of file diff --git a/examples/dataset/ted_en_zh/ted_en_zh.py b/examples/dataset/ted_en_zh/ted_en_zh.py new file mode 100644 index 000000000..14bef01d2 --- /dev/null +++ b/examples/dataset/ted_en_zh/ted_en_zh.py @@ -0,0 +1,116 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Prepare Ted-En-Zh speech translation dataset + +Create manifest files from splited datased. +dev set: tst2010, test set: tst2015 +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +of each audio file in the data set. +""" +import argparse +import codecs +import json +import os + +import soundfile + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--src_dir", + default="", + type=str, + help="Directory to kaldi splited data. (default: %(default)s)") +parser.add_argument( + "--manifest_prefix", + default="manifest", + type=str, + help="Filepath prefix for output manifests. (default: %(default)s)") +args = parser.parse_args() + + +def create_manifest(data_dir, manifest_path_prefix): + print("Creating manifest %s ..." % manifest_path_prefix) + json_lines = [] + + data_types_infos = [ + ('train', 'train-split/train-segment', 'En-Zh/train.en-zh'), + ('dev', 'test-segment/tst2010', 'En-Zh/tst2010.en-zh'), + ('test', 'test-segment/tst2015', 'En-Zh/tst2015.en-zh') + ] + for data_info in data_types_infos: + dtype, audio_relative_dir, text_relative_path = data_info + del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + + text_path = os.path.join(data_dir, text_relative_path) + audio_dir = os.path.join(data_dir, audio_relative_dir) + + for line in codecs.open(text_path, 'r', 'utf-8', errors='ignore'): + line = line.strip() + if len(line) < 1: + continue + audio_id, trancription, translation = line.split('\t') + utt = audio_id.split('.')[0] + + audio_path = os.path.join(audio_dir, audio_id) + if os.path.exists(audio_path): + if os.path.getsize(audio_path) < 30000: + continue + audio_data, samplerate = soundfile.read(audio_path) + duration = float(len(audio_data) / samplerate) + json_lines.append( + json.dumps( + { + 'utt': utt, + 'feat': audio_path, + 'feat_shape': (duration, ), # second + 'text': " ".join(translation.split()), + 'text1': " ".join(trancription.split()) + }, + ensure_ascii=False)) + + total_sec += duration + total_text += len(translation.split()) + total_num += 1 + if not total_num % 1000: + print(dtype, 'Processed:', total_num) + + manifest_path = manifest_path_prefix + '.' + dtype + '.raw' + with codecs.open(manifest_path, 'w', 'utf-8') as fout: + for line in json_lines: + fout.write(line + '\n') + + +def prepare_dataset(src_dir, manifest_path=None): + """create manifest file.""" + if os.path.isdir(manifest_path): + manifest_path = os.path.join(manifest_path, 'manifest') + if manifest_path: + create_manifest(src_dir, manifest_path) + + +def main(): + if args.src_dir.startswith('~'): + args.src_dir = os.path.expanduser(args.src_dir) + + prepare_dataset(src_dir=args.src_dir, manifest_path=args.manifest_prefix) + + print("manifest prepare done!") + + +if __name__ == '__main__': + main() diff --git a/examples/dataset/thchs30/.gitignore b/examples/dataset/thchs30/.gitignore new file mode 100644 index 000000000..b94cd7e40 --- /dev/null +++ b/examples/dataset/thchs30/.gitignore @@ -0,0 +1,6 @@ +*.tgz +manifest.* +data_thchs30 +resource +test-noise +*.meta diff --git a/examples/dataset/thchs30/README.md b/examples/dataset/thchs30/README.md new file mode 100644 index 000000000..6b59d663a --- /dev/null +++ b/examples/dataset/thchs30/README.md @@ -0,0 +1,55 @@ +# [THCHS30](http://www.openslr.org/18/) + +This is the *data part* of the `THCHS30 2015` acoustic data +& scripts dataset. + +The dataset is described in more detail in the paper ``THCHS-30 : A Free +Chinese Speech Corpus`` by Dong Wang, Xuewei Zhang. + +A paper (if it can be called a paper) 13 years ago regarding the database: + +Dong Wang, Dalei Wu, Xiaoyan Zhu, ``TCMSD: A new Chinese Continuous Speech Database``, +International Conference on Chinese Computing (ICCC'01), 2001, Singapore. + +The layout of this data pack is the following: + + ``data`` + ``*.wav`` + audio data + + ``*.wav.trn`` + transcriptions + + ``{train,dev,test}`` + contain symlinks into the ``data`` directory for both audio and + transcription files. Contents of these directories define the + train/dev/test split of the data. + + ``{lm_word}`` + ``word.3gram.lm`` + trigram LM based on word + ``lexicon.txt`` + lexicon based on word + + ``{lm_phone}`` + ``phone.3gram.lm`` + trigram LM based on phone + ``lexicon.txt`` + lexicon based on phone + + ``README.TXT`` + this file + + +Data statistics +=============== + +Statistics for the data are as follows: + + =========== ========== ========== =========== + **dataset** **audio** **#sents** **#words** + =========== ========== ========== =========== + train 25 10,000 198,252 + dev 2:14 893 17,743 + test 6:15 2,495 49,085 + =========== ========== ========== =========== diff --git a/examples/dataset/thchs30/thchs30.py b/examples/dataset/thchs30/thchs30.py new file mode 100644 index 000000000..77a264cbb --- /dev/null +++ b/examples/dataset/thchs30/thchs30.py @@ -0,0 +1,186 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Prepare THCHS-30 mandarin dataset + +Download, unpack and create manifest files. +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +of each audio file in the data set. +""" +import argparse +import codecs +import json +import os +from multiprocessing.pool import Pool +from pathlib import Path + +import soundfile + +from utils.utility import download +from utils.utility import unpack + +DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset/speech') + +URL_ROOT = 'http://www.openslr.org/resources/18' +# URL_ROOT = 'https://openslr.magicdatatech.com/resources/18' +DATA_URL = URL_ROOT + '/data_thchs30.tgz' +TEST_NOISE_URL = URL_ROOT + '/test-noise.tgz' +RESOURCE_URL = URL_ROOT + '/resource.tgz' +MD5_DATA = '2d2252bde5c8429929e1841d4cb95e90' +MD5_TEST_NOISE = '7e8a985fb965b84141b68c68556c2030' +MD5_RESOURCE = 'c0b2a565b4970a0c4fe89fefbf2d97e1' + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--target_dir", + default=DATA_HOME + "/THCHS30", + type=str, + help="Directory to save the dataset. (default: %(default)s)") +parser.add_argument( + "--manifest_prefix", + default="manifest", + type=str, + help="Filepath prefix for output manifests. (default: %(default)s)") +args = parser.parse_args() + + +def read_trn(filepath): + """read trn file. + word text in first line. + syllable text in second line. + phoneme text in third line. + + Args: + filepath (str): trn path. + + Returns: + list(str): (word, syllable, phone) + """ + texts = [] + with open(filepath, 'r') as f: + lines = f.read().strip().split('\n') + assert len(lines) == 3, lines + # charactor text, remove withespace + texts.append(''.join(lines[0].split())) + texts.extend(lines[1:]) + return texts + + +def resolve_symlink(filepath): + """resolve symlink which content is norm file. + + Args: + filepath (str): norm file symlink. + """ + sym_path = Path(filepath) + relative_link = sym_path.read_text().strip() + relative = Path(relative_link) + relpath = sym_path.parent / relative + return relpath.resolve() + + +def create_manifest(data_dir, manifest_path_prefix): + print("Creating manifest %s ..." % manifest_path_prefix) + json_lines = [] + data_types = ['train', 'dev', 'test'] + for dtype in data_types: + del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + + audio_dir = os.path.join(data_dir, dtype) + for subfolder, _, filelist in sorted(os.walk(audio_dir)): + for fname in filelist: + file_path = os.path.join(subfolder, fname) + if file_path.endswith('.wav'): + audio_path = os.path.abspath(file_path) + text_path = resolve_symlink(audio_path + '.trn') + else: + continue + + assert os.path.exists(audio_path) and os.path.exists(text_path) + + audio_id = os.path.basename(audio_path)[:-4] + word_text, syllable_text, phone_text = read_trn(text_path) + audio_data, samplerate = soundfile.read(audio_path) + duration = float(len(audio_data) / samplerate) + + # not dump alignment infos + json_lines.append( + json.dumps( + { + 'utt': audio_id, + 'feat': audio_path, + 'feat_shape': (duration, ), # second + 'text': word_text, # charactor + 'syllable': syllable_text, + 'phone': phone_text, + }, + ensure_ascii=False)) + + total_sec += duration + total_text += len(word_text) + total_num += 1 + + manifest_path = manifest_path_prefix + '.' + dtype + with codecs.open(manifest_path, 'w', 'utf-8') as fout: + for line in json_lines: + fout.write(line + '\n') + + manifest_dir = os.path.dirname(manifest_path_prefix) + meta_path = os.path.join(manifest_dir, dtype) + '.meta' + with open(meta_path, 'w') as f: + print(f"{dtype}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) + + +def prepare_dataset(url, md5sum, target_dir, manifest_path, subset): + """Download, unpack and create manifest file.""" + datadir = os.path.join(target_dir, subset) + if not os.path.exists(datadir): + filepath = download(url, md5sum, target_dir) + unpack(filepath, target_dir) + else: + print("Skip downloading and unpacking. Data already exists in %s." % + target_dir) + + if subset == 'data_thchs30': + create_manifest(datadir, manifest_path) + + +def main(): + if args.target_dir.startswith('~'): + args.target_dir = os.path.expanduser(args.target_dir) + + tasks = [ + (DATA_URL, MD5_DATA, args.target_dir, args.manifest_prefix, + "data_thchs30"), + (TEST_NOISE_URL, MD5_TEST_NOISE, args.target_dir, args.manifest_prefix, + "test-noise"), + (RESOURCE_URL, MD5_RESOURCE, args.target_dir, args.manifest_prefix, + "resource"), + ] + with Pool(7) as pool: + pool.starmap(prepare_dataset, tasks) + + print("Data download and manifest prepare done!") + + +if __name__ == '__main__': + main() diff --git a/examples/dataset/timit/.gitignore b/examples/dataset/timit/.gitignore new file mode 100644 index 000000000..9a3f42281 --- /dev/null +++ b/examples/dataset/timit/.gitignore @@ -0,0 +1,4 @@ +TIMIT.* +TIMIT +manifest.* +*.meta diff --git a/examples/dataset/timit/timit.py b/examples/dataset/timit/timit.py new file mode 100644 index 000000000..311d445cb --- /dev/null +++ b/examples/dataset/timit/timit.py @@ -0,0 +1,241 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Prepare Librispeech ASR datasets. + +Download, unpack and create manifest files. +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +of each audio file in the data set. +""" +import argparse +import codecs +import json +import os +import re +import string +from pathlib import Path + +import soundfile + +from utils.utility import unzip + +URL_ROOT = "" +MD5_DATA = "45c68037c7fdfe063a43c851f181fb2d" + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--target_dir", + default='~/.cache/paddle/dataset/speech/timit', + type=str, + help="Directory to save the dataset. (default: %(default)s)") +parser.add_argument( + "--manifest_prefix", + default="manifest", + type=str, + help="Filepath prefix for output manifests. (default: %(default)s)") +args = parser.parse_args() + +#: A string containing Chinese punctuation marks (non-stops). +non_stops = ( + # Fullwidth ASCII variants + '\uFF02\uFF03\uFF04\uFF05\uFF06\uFF07\uFF08\uFF09\uFF0A\uFF0B\uFF0C\uFF0D' + '\uFF0F\uFF1A\uFF1B\uFF1C\uFF1D\uFF1E\uFF20\uFF3B\uFF3C\uFF3D\uFF3E\uFF3F' + '\uFF40\uFF5B\uFF5C\uFF5D\uFF5E\uFF5F\uFF60' + + # Halfwidth CJK punctuation + '\uFF62\uFF63\uFF64' + + # CJK symbols and punctuation + '\u3000\u3001\u3003' + + # CJK angle and corner brackets + '\u3008\u3009\u300A\u300B\u300C\u300D\u300E\u300F\u3010\u3011' + + # CJK brackets and symbols/punctuation + '\u3014\u3015\u3016\u3017\u3018\u3019\u301A\u301B\u301C\u301D\u301E\u301F' + + # Other CJK symbols + '\u3030' + + # Special CJK indicators + '\u303E\u303F' + + # Dashes + '\u2013\u2014' + + # Quotation marks and apostrophe + '\u2018\u2019\u201B\u201C\u201D\u201E\u201F' + + # General punctuation + '\u2026\u2027' + + # Overscores and underscores + '\uFE4F' + + # Small form variants + '\uFE51\uFE54' + + # Latin punctuation + '\u00B7') + +#: A string of Chinese stops. +stops = ( + '\uFF01' # Fullwidth exclamation mark + '\uFF1F' # Fullwidth question mark + '\uFF61' # Halfwidth ideographic full stop + '\u3002' # Ideographic full stop +) + +#: A string containing all Chinese punctuation. +punctuation = non_stops + stops + + +def tn(text): + # lower text + text = text.lower() + # remove punc + text = re.sub(f'[{punctuation}{string.punctuation}]', "", text) + return text + + +def read_txt(filepath: str) -> str: + with open(filepath, 'r') as f: + line = f.read().strip().split(maxsplit=2)[2] + return tn(line) + + +def read_algin(filepath: str) -> str: + """read word or phone alignment file. + + + Args: + filepath (str): [description] + + Returns: + str: token sepearte by + """ + aligns = [] # (start, end, token) + with open(filepath, 'r') as f: + for line in f: + items = line.strip().split() + # for phone: (Note: beginning and ending silence regions are marked with h#) + if items[2].strip() == 'h#': + continue + aligns.append(items) + return ' '.join([item[2] for item in aligns]) + + +def create_manifest(data_dir, manifest_path_prefix): + """Create a manifest json file summarizing the data set, with each line + containing the meta data (i.e. audio filepath, transcription text, audio + duration) of each audio file within the data set. + """ + print("Creating manifest %s ..." % manifest_path_prefix) + json_lines = [] + utts = set() + + data_types = ['TRAIN', 'TEST'] + for dtype in data_types: + del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + + audio_dir = Path(os.path.join(data_dir, dtype)) + for fname in sorted(audio_dir.rglob('*.WAV')): + audio_path = fname.resolve() # .WAV + audio_id = audio_path.stem + # if uttid exits, then skipped + if audio_id in utts: + continue + + utts.add(audio_id) + text_path = audio_path.with_suffix('.TXT') + phone_path = audio_path.with_suffix('.PHN') + word_path = audio_path.with_suffix('.WRD') + + audio_data, samplerate = soundfile.read( + str(audio_path), dtype='int16') + duration = float(len(audio_data) / samplerate) + word_text = read_txt(text_path) + phone_text = read_algin(phone_path) + + gender_spk = str(audio_path.parent.stem) + spk = gender_spk[1:] + gender = gender_spk[0] + utt_id = '_'.join([spk, gender, audio_id]) + # not dump alignment infos + json_lines.append( + json.dumps( + { + 'utt': utt_id, + 'feat': str(audio_path), + 'feat_shape': (duration, ), # second + 'text': word_text, # word + 'phone': phone_text, + 'spk': spk, + 'gender': gender, + }, + ensure_ascii=False)) + + total_sec += duration + total_text += len(word_text.split()) + total_num += 1 + + manifest_path = manifest_path_prefix + '.' + dtype.lower() + with codecs.open(manifest_path, 'w', 'utf-8') as fout: + for line in json_lines: + fout.write(line + '\n') + + manifest_dir = os.path.dirname(manifest_path_prefix) + meta_path = os.path.join(manifest_dir, dtype.lower()) + '.meta' + with open(meta_path, 'w') as f: + print(f"{dtype}:", file=f) + print(f"{total_num} utts", file=f) + print(f"{total_sec / (60*60)} h", file=f) + print(f"{total_text} text", file=f) + print(f"{total_text / total_sec} text/sec", file=f) + print(f"{total_sec / total_num} sec/utt", file=f) + + +def prepare_dataset(url, md5sum, target_dir, manifest_path): + """Download, unpack and create summmary manifest file. + """ + filepath = os.path.join(target_dir, "TIMIT.zip") + if not os.path.exists(filepath): + print(f"Please download TIMIT.zip into {target_dir}.") + raise FileNotFoundError + + if not os.path.exists(os.path.join(target_dir, "TIMIT")): + # check md5sum + assert check_md5sum(filepath, md5sum) + # unpack + unzip(filepath, target_dir) + else: + print("Skip downloading and unpacking. Data already exists in %s." % + target_dir) + # create manifest json file + create_manifest(os.path.join(target_dir, "TIMIT"), manifest_path) + + +def main(): + if args.target_dir.startswith('~'): + args.target_dir = os.path.expanduser(args.target_dir) + + prepare_dataset(URL_ROOT, MD5_DATA, args.target_dir, args.manifest_prefix) + print("Data download and manifest prepare done!") + + +if __name__ == '__main__': + main() diff --git a/examples/dataset/timit/timit_kaldi_standard_split.py b/examples/dataset/timit/timit_kaldi_standard_split.py new file mode 100644 index 000000000..2b494c06d --- /dev/null +++ b/examples/dataset/timit/timit_kaldi_standard_split.py @@ -0,0 +1,108 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Prepare TIMIT dataset (Standard split from Kaldi) + +Create manifest files from splited datased. +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +of each audio file in the data set. +""" +import argparse +import codecs +import json +import os + +import soundfile + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "--src_dir", + default="", + type=str, + help="Directory to kaldi splited data. (default: %(default)s)") +parser.add_argument( + "--manifest_prefix", + default="manifest", + type=str, + help="Filepath prefix for output manifests. (default: %(default)s)") +args = parser.parse_args() + + +def create_manifest(data_dir, manifest_path_prefix): + print("Creating manifest %s ..." % manifest_path_prefix) + json_lines = [] + + data_types = ['train', 'dev', 'test'] + for dtype in data_types: + del json_lines[:] + total_sec = 0.0 + total_text = 0.0 + total_num = 0 + + phn_path = os.path.join(data_dir, dtype + '.text') + phn_dict = {} + for line in codecs.open(phn_path, 'r', 'utf-8'): + line = line.strip() + if line == '': + continue + audio_id, text = line.split(' ', 1) + phn_dict[audio_id] = text + + audio_dir = os.path.join(data_dir, dtype + '_sph.scp') + for line in codecs.open(audio_dir, 'r', 'utf-8'): + audio_id, audio_path = line.strip().split() + # if no transcription for audio then raise error + assert audio_id in phn_dict + audio_data, samplerate = soundfile.read(audio_path) + duration = float(len(audio_data) / samplerate) + text = phn_dict[audio_id] + json_lines.append( + json.dumps( + { + 'utt': audio_id, + 'feat': audio_path, + 'feat_shape': (duration, ), # second + 'text': text + }, + ensure_ascii=False)) + + total_sec += duration + total_text += len(text) + total_num += 1 + + manifest_path = manifest_path_prefix + '.' + dtype + '.raw' + with codecs.open(manifest_path, 'w', 'utf-8') as fout: + for line in json_lines: + fout.write(line + '\n') + + +def prepare_dataset(src_dir, manifest_path=None): + """create manifest file.""" + if os.path.isdir(manifest_path): + manifest_path = os.path.join(manifest_path, 'manifest') + if manifest_path: + create_manifest(src_dir, manifest_path) + + +def main(): + if args.src_dir.startswith('~'): + args.src_dir = os.path.expanduser(args.src_dir) + + prepare_dataset(src_dir=args.src_dir, manifest_path=args.manifest_prefix) + + print("manifest prepare done!") + + +if __name__ == '__main__': + main() diff --git a/examples/chinese_g2p/.gitignore b/examples/g2p/.gitignore similarity index 100% rename from examples/chinese_g2p/.gitignore rename to examples/g2p/.gitignore diff --git a/examples/g2p/README.md b/examples/g2p/README.md new file mode 100644 index 000000000..4ec5922b3 --- /dev/null +++ b/examples/g2p/README.md @@ -0,0 +1,3 @@ +# G2P + +* zh - Chinese G2P diff --git a/examples/g2p/zh/README.md b/examples/g2p/zh/README.md new file mode 100644 index 000000000..de5573565 --- /dev/null +++ b/examples/g2p/zh/README.md @@ -0,0 +1,93 @@ +# G2P + +* WS +jieba +* G2P +pypinyin +* Tone sandhi +simple + +We recommend using [Paraket](https://github.com/PaddlePaddle/Parakeet] [TextFrontEnd](https://github.com/PaddlePaddle/Parakeet/blob/develop/parakeet/frontend/__init__.py) to do G2P. +The phoneme set should be changed, you can reference `examples/thchs30/a0/data/dict/syllable.lexicon`. + +## Download Baker dataset + +[Baker](https://test.data-baker.com/#/data/index/source) dataset has to be downloaded mannually and moved to './data', +because you will have to pass the `CATTCHA` from a browswe to download the dataset. + + +## RUN + +``` +. path.sh +./run.sh +``` + +## Result + +``` +exp/ +|-- 000001-010000.txt +|-- ref.pinyin +|-- trans.jieba.pinyin +`-- trans.pinyin + +0 directories, 4 files +``` + +``` +4f5a368441eb16aaf43dc1972f8b63dd exp/000001-010000.txt +01707896391c2de9b6fc4a39654be942 exp/ref.pinyin +43380ef160f65a23a3a0544700aa49b8 exp/trans.jieba.pinyin +8e6ff1fc22d8e8584082e804e8bcdeb7 exp/trans.pinyin +``` + +``` +==> exp/000001-010000.txt <== +000001 卡尔普#2陪外孙#1玩滑梯#4。 + ka2 er2 pu3 pei2 wai4 sun1 wan2 hua2 ti1 +000002 假语村言#2别再#1拥抱我#4。 + jia2 yu3 cun1 yan2 bie2 zai4 yong1 bao4 wo3 +000003 宝马#1配挂#1跛骡鞍#3,貂蝉#1怨枕#2董翁榻#4。 + bao2 ma3 pei4 gua4 bo3 luo2 an1 diao1 chan2 yuan4 zhen3 dong3 weng1 ta4 +000004 邓小平#2与#1撒切尔#2会晤#4。 + deng4 xiao3 ping2 yu3 sa4 qie4 er3 hui4 wu4 +000005 老虎#1幼崽#2与#1宠物犬#1玩耍#4。 + lao2 hu3 you4 zai3 yu2 chong3 wu4 quan3 wan2 shua3 + +==> exp/ref.pinyin <== +000001 ka2 er2 pu3 pei2 wai4 sun1 wan2 hua2 ti1 +000002 jia2 yu3 cun1 yan2 bie2 zai4 yong1 bao4 wo3 +000003 bao2 ma3 pei4 gua4 bo3 luo2 an1 diao1 chan2 yuan4 zhen3 dong3 weng1 ta4 +000004 deng4 xiao3 ping2 yu3 sa4 qie4 er3 hui4 wu4 +000005 lao2 hu3 you4 zai3 yu2 chong3 wu4 quan3 wan2 shua3 +000006 shen1 chang2 yue1 wu2 chi3 er4 cun4 wu3 fen1 huo4 yi3 shang4 +000007 zhao4 di2 yue1 cao2 yun2 teng2 qu4 gui3 wu1 +000008 zhan2 pin3 sui1 you3 zhan3 yuan2 que4 tui2 +000009 yi2 san3 ju1 er2 tong2 he2 you4 tuo1 er2 tong2 wei2 zhu3 +000010 ke1 te4 ni1 shen1 chuan1 bao4 wen2 da4 yi1 + +==> exp/trans.jieba.pinyin <== +000001 ka3 er3 pu3 pei2 wai4 sun1 wan2 hua2 ti1 +000002 jia3 yu3 cun1 yan2 bie2 zai4 yong1 bao4 wo3 +000003 bao3 ma3 pei4 gua4 bo3 luo2 an1 diao1 chan2 yuan4 zhen3 dong3 weng1 ta4 +000004 deng4 xiao3 ping2 yu3 sa1 qie4 er3 hui4 wu4 +000005 lao3 hu3 you4 zai3 yu3 chong3 wu4 quan3 wan2 shua3 +000006 shen1 chang2 yue1 wu3 chi3 er4 cun4 wu3 fen1 huo4 yi3 shang4 +000007 zhao4 di2 yue1 cao2 yun2 teng2 qu4 gui3 wu1 +000008 zhan3 pin3 sui1 you3 zhan3 yuan2 que4 tui2 +000009 yi3 san3 ju1 er2 tong2 he2 you4 tuo1 er2 tong2 wei2 zhu3 +000010 ke1 te4 ni1 shen1 chuan1 bao4 wen2 da4 yi1 + +==> exp/trans.pinyin <== +000001 ka3 er3 pu3 pei2 wai4 sun1 wan2 hua2 ti1 +000002 jia3 yu3 cun1 yan2 bie2 zai4 yong1 bao4 wo3 +000003 bao3 ma3 pei4 gua4 bo3 luo2 an1 diao1 chan2 yuan4 zhen3 dong3 weng1 ta4 +000004 deng4 xiao3 ping2 yu3 sa1 qie4 er3 hui4 wu4 +000005 lao3 hu3 you4 zai3 yu3 chong3 wu4 quan3 wan2 shua3 +000006 shen1 chang2 yue1 wu3 chi3 er4 cun4 wu3 fen1 huo4 yi3 shang4 +000007 zhao4 di2 yue1 cao2 yun2 teng2 qu4 gui3 wu1 +000008 zhan3 pin3 sui1 you3 zhan3 yuan2 que4 tui2 +000009 yi3 san3 ju1 er2 tong2 he2 you4 tuo1 er2 tong2 wei2 zhu3 +000010 ke1 te4 ni1 shen1 chuan1 bao4 wen2 da4 yi1 +``` diff --git a/examples/chinese_g2p/local/convert_transcription.py b/examples/g2p/zh/local/convert_transcription.py similarity index 100% rename from examples/chinese_g2p/local/convert_transcription.py rename to examples/g2p/zh/local/convert_transcription.py diff --git a/examples/chinese_g2p/local/extract_pinyin_label.py b/examples/g2p/zh/local/extract_pinyin_label.py similarity index 100% rename from examples/chinese_g2p/local/extract_pinyin_label.py rename to examples/g2p/zh/local/extract_pinyin_label.py diff --git a/examples/chinese_g2p/local/ignore_sandhi.py b/examples/g2p/zh/local/ignore_sandhi.py similarity index 100% rename from examples/chinese_g2p/local/ignore_sandhi.py rename to examples/g2p/zh/local/ignore_sandhi.py diff --git a/examples/chinese_g2p/local/prepare_dataset.sh b/examples/g2p/zh/local/prepare_dataset.sh similarity index 100% rename from examples/chinese_g2p/local/prepare_dataset.sh rename to examples/g2p/zh/local/prepare_dataset.sh diff --git a/examples/chinese_g2p/path.sh b/examples/g2p/zh/path.sh similarity index 82% rename from examples/chinese_g2p/path.sh rename to examples/g2p/zh/path.sh index b4c625f95..f475ed833 100644 --- a/examples/chinese_g2p/path.sh +++ b/examples/g2p/zh/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/chinese_g2p/requirements.txt b/examples/g2p/zh/requirements.txt similarity index 100% rename from examples/chinese_g2p/requirements.txt rename to examples/g2p/zh/requirements.txt diff --git a/examples/chinese_g2p/run.sh b/examples/g2p/zh/run.sh similarity index 82% rename from examples/chinese_g2p/run.sh rename to examples/g2p/zh/run.sh index 8197dce4b..25b713110 100755 --- a/examples/chinese_g2p/run.sh +++ b/examples/g2p/zh/run.sh @@ -6,16 +6,19 @@ stage=-1 stop_stage=100 exp_dir=exp -data_dir=data +data=data source ${MAIN_ROOT}/utils/parse_options.sh || exit -1 mkdir -p ${exp_dir} +if [ $stage -le -1 ] && [ $stop_stage -ge -1 ];then + test -e ${data}/BZNSYP.rar || { echo "Please download BZNSYP.rar and put it in ${data}; exit -1; } +fi if [ $stage -le 0 ] && [ $stop_stage -ge 0 ];then echo "stage 0: Extracting Prosody Labeling" - bash local/prepare_dataset.sh --exp-dir ${exp_dir} --data-dir ${data_dir} + bash local/prepare_dataset.sh --exp-dir ${exp_dir} --data-dir ${data} fi # convert transcription in chinese into pinyin with pypinyin or jieba+pypinyin diff --git a/examples/librispeech/README.md b/examples/librispeech/README.md index c351c1f65..2718988f8 100644 --- a/examples/librispeech/README.md +++ b/examples/librispeech/README.md @@ -1,3 +1,6 @@ # ASR -* s0 is for deepspeech2 + +* s0 is for deepspeech2 offline * s1 is for transformer/conformer/U2 +* s2 is for transformer/conformer/U2 w/ kaldi feat +need install Kaldi diff --git a/examples/librispeech/s0/README.md b/examples/librispeech/s0/README.md index 393dd4579..11bcf5f65 100644 --- a/examples/librispeech/s0/README.md +++ b/examples/librispeech/s0/README.md @@ -1,9 +1,17 @@ # LibriSpeech +## Data +| Data Subset | Duration in Seconds | +| --- | --- | +| data/manifest.train | 0.83s ~ 29.735s | +| data/manifest.dev | 1.065 ~ 35.155s | +| data/manifest.test-clean | 1.285s ~ 34.955s | + ## Deepspeech2 -| Model | release | Config | Test set | Loss | WER | -| --- | --- | --- | --- | --- | --- | -| DeepSpeech2 | 2.1.0 | conf/deepspeech2.yaml | 15.184467315673828 | test-clean | 0.072154 | -| DeepSpeech2 | 2.0.0 | conf/deepspeech2.yaml | - | test-clean | 0.073973 | -| DeepSpeech2 | 1.8.5 | - | test-clean | - | 0.074939 | +| Model | Params | release | Config | Test set | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | +| DeepSpeech2 | 42.96M | 2.2.0 | conf/deepspeech2.yaml + spec_aug | test-clean | 14.49190807 | 0.067283 | +| DeepSpeech2 | 42.96M | 2.1.0 | conf/deepspeech2.yaml | test-clean | 15.184467315673828 | 0.072154 | +| DeepSpeech2 | 42.96M | 2.0.0 | conf/deepspeech2.yaml | test-clean | - | 0.073973 | +| DeepSpeech2 | 42.96M | 1.8.5 | - | test-clean | - | 0.074939 | diff --git a/examples/librispeech/s0/conf/augmentation.json b/examples/librispeech/s0/conf/augmentation.json index 5635d9c84..31c481c8d 100644 --- a/examples/librispeech/s0/conf/augmentation.json +++ b/examples/librispeech/s0/conf/augmentation.json @@ -15,5 +15,22 @@ "max_shift_ms": 5 }, "prob": 1.0 + }, + { + "type": "specaug", + "params": { + "W": 0, + "warp_mode": "PIL", + "F": 10, + "n_freq_masks": 2, + "T": 50, + "n_time_masks": 2, + "p": 1.0, + "adaptive_number_ratio": 0, + "adaptive_size_ratio": 0, + "max_n_time_masks": 20, + "replace_with_zero": true + }, + "prob": 1.0 } ] diff --git a/examples/librispeech/s0/conf/deepspeech2.yaml b/examples/librispeech/s0/conf/deepspeech2.yaml index d1746bff3..3f1a376f1 100644 --- a/examples/librispeech/s0/conf/deepspeech2.yaml +++ b/examples/librispeech/s0/conf/deepspeech2.yaml @@ -3,16 +3,21 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev-clean test_manifest: data/manifest.test-clean - mean_std_filepath: data/mean_std.json - vocab_filepath: data/vocab.txt - augmentation_config: conf/augmentation.json - batch_size: 20 min_input_len: 0.0 - max_input_len: 27.0 # second + max_input_len: 30.0 # second min_output_len: 0.0 max_output_len: .inf min_output_input_ratio: 0.00 max_output_input_ratio: .inf + +collator: + batch_size: 20 + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: specgram_type: linear target_sample_rate: 16000 max_freq: None @@ -27,7 +32,7 @@ data: keep_transcription_text: False sortagrad: True shuffle_method: batch_shuffle - num_workers: 0 + num_workers: 2 model: num_conv_layers: 2 @@ -35,14 +40,20 @@ model: rnn_layer_size: 2048 use_gru: False share_rnn_weights: True + blank_id: 0 + ctc_grad_norm_type: instance training: n_epoch: 50 + accum_grad: 1 lr: 1e-3 lr_decay: 0.83 weight_decay: 1e-06 global_grad_clip: 5.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: batch_size: 128 diff --git a/examples/librispeech/s0/conf/deepspeech2_online.yaml b/examples/librispeech/s0/conf/deepspeech2_online.yaml new file mode 100644 index 000000000..180a6205f --- /dev/null +++ b/examples/librispeech/s0/conf/deepspeech2_online.yaml @@ -0,0 +1,70 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev-clean + test_manifest: data/manifest.test-clean + min_input_len: 0.0 + max_input_len: 30.0 # second + min_output_len: 0.0 + max_output_len: .inf + min_output_input_ratio: 0.00 + max_output_input_ratio: .inf + +collator: + batch_size: 15 + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: + specgram_type: linear + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 20.0 + delta_delta: False + dither: 1.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 0 + +model: + num_conv_layers: 2 + num_rnn_layers: 3 + rnn_layer_size: 2048 + rnn_direction: forward + num_fc_layers: 2 + fc_layers_size_list: 512, 256 + use_gru: False + blank_id: 0 + ctc_grad_norm_type: instance + +training: + n_epoch: 50 + accum_grad: 4 + lr: 1e-3 + lr_decay: 0.83 + weight_decay: 1e-06 + global_grad_clip: 5.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + +decoding: + batch_size: 128 + error_rate_type: wer + decoding_method: ctc_beam_search + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 1.9 + beta: 0.3 + beam_size: 500 + cutoff_prob: 1.0 + cutoff_top_n: 40 + num_proc_bsearch: 8 diff --git a/examples/librispeech/s0/local/data.sh b/examples/librispeech/s0/local/data.sh index 921f1f49a..b71809869 100755 --- a/examples/librispeech/s0/local/data.sh +++ b/examples/librispeech/s0/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/librispeech/s0/local/download_lm_en.sh b/examples/librispeech/s0/local/download_lm_en.sh index 05ea793fb..dc1bdf665 100755 --- a/examples/librispeech/s0/local/download_lm_en.sh +++ b/examples/librispeech/s0/local/download_lm_en.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash . ${MAIN_ROOT}/utils/utility.sh diff --git a/examples/librispeech/s0/local/export.sh b/examples/librispeech/s0/local/export.sh index 1b19d5720..2e09e5f5e 100755 --- a/examples/librispeech/s0/local/export.sh +++ b/examples/librispeech/s0/local/export.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 3 ];then - echo "usage: $0 config_path ckpt_prefix jit_model_path" +if [ $# != 4 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path model_type" exit -1 fi @@ -11,9 +11,10 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_path_prefix=$2 jit_model_export_path=$3 +model_type=$4 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi @@ -22,8 +23,8 @@ python3 -u ${BIN_DIR}/export.py \ --nproc ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ ---export_path ${jit_model_export_path} - +--export_path ${jit_model_export_path} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in export!" diff --git a/examples/librispeech/s0/local/test.sh b/examples/librispeech/s0/local/test.sh index 79e05838c..b5b68c599 100755 --- a/examples/librispeech/s0/local/test.sh +++ b/examples/librispeech/s0/local/test.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: ${0} config_path ckpt_path_prefix" +if [ $# != 3 ];then + echo "usage: ${0} config_path ckpt_path_prefix model_type" exit -1 fi @@ -9,11 +9,12 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi config_path=$1 ckpt_prefix=$2 +model_type=$3 # download language model bash local/download_lm_en.sh @@ -26,7 +27,8 @@ python3 -u ${BIN_DIR}/test.py \ --nproc 1 \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ ---checkpoint_path ${ckpt_prefix} +--checkpoint_path ${ckpt_prefix} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in evaluation!" diff --git a/examples/librispeech/s0/local/train.sh b/examples/librispeech/s0/local/train.sh index a4218aa86..6aee372a4 100755 --- a/examples/librispeech/s0/local/train.sh +++ b/examples/librispeech/s0/local/train.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" +if [ $# != 3 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name model_type" exit -1 fi @@ -10,20 +10,33 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_name=$2 +model_type=$3 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi echo "using ${device}..." mkdir -p exp +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + python3 -u ${BIN_DIR}/train.py \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/librispeech/s0/local/tune.sh b/examples/librispeech/s0/local/tune.sh deleted file mode 100755 index 4bb81d29b..000000000 --- a/examples/librispeech/s0/local/tune.sh +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env bash - -if [ $# != 1 ];then - echo "usage: tune ckpt_path" - exit 1 -fi - -# grid-search for hyper-parameters in language model -python3 -u ${BIN_DIR}/tune.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---num_batches=-1 \ ---batch_size=128 \ ---beam_size=500 \ ---num_proc_bsearch=12 \ ---num_alphas=45 \ ---num_betas=8 \ ---alpha_from=1.0 \ ---alpha_to=3.2 \ ---beta_from=0.1 \ ---beta_to=0.45 \ ---cutoff_prob=1.0 \ ---cutoff_top_n=40 \ ---checkpoint_path ${1} - -if [ $? -ne 0 ]; then - echo "Failed in tuning!" - exit 1 -fi - - -exit 0 diff --git a/examples/librispeech/s0/path.sh b/examples/librispeech/s0/path.sh index 777da29ef..8a9345f2e 100644 --- a/examples/librispeech/s0/path.sh +++ b/examples/librispeech/s0/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/librispeech/s0/run.sh b/examples/librispeech/s0/run.sh index 6553e073d..af47fb9b8 100755 --- a/examples/librispeech/s0/run.sh +++ b/examples/librispeech/s0/run.sh @@ -6,6 +6,7 @@ stage=0 stop_stage=100 conf_path=conf/deepspeech2.yaml avg_num=30 +model_type=offline source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; avg_ckpt=avg_${avg_num} @@ -19,20 +20,20 @@ fi if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # train model, all `ckpt` under `exp` dir - CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./local/train.sh ${conf_path} ${ckpt} + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./local/train.sh ${conf_path} ${ckpt} ${model_type} fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # test ckpt avg_n - CUDA_VISIBLE_DEVICES=7 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 + CUDA_VISIBLE_DEVICES=7 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} || exit -1 fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then # export ckpt avg_n - CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type} fi diff --git a/examples/librispeech/s1/README.md b/examples/librispeech/s1/README.md index 73f6156d9..4cb3629de 100644 --- a/examples/librispeech/s1/README.md +++ b/examples/librispeech/s1/README.md @@ -1,18 +1,44 @@ # LibriSpeech +## Data +| Data Subset | Duration in Seconds | +| --- | --- | +| data/manifest.train | 0.83s ~ 29.735s | +| data/manifest.dev | 1.065 ~ 35.155s | +| data/manifest.test-clean | 1.285s ~ 34.955s | + + ## Conformer +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | attention | 6.35 | 0.030162 | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_greedy_search | 6.35 | 0.037910 | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_prefix_beam_search | 6.35 | 0.037761 | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | attention_rescoring | 6.35 | 0.032115 | + +### Test w/o length filter +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean-all | attention | 6.35 | 0.057117 | + +## Chunk Conformer +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size & Left Chunks | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | attention | 16, -1 | 7.11 | 0.063193 | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | ctc_greedy_search | 16, -1 | 7.11 | 0.082394 | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | ctc_prefix_beam_search | 16, -1 | 7.11 | 0.082156 | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | attention_rescoring | 16, -1 | 7.11 | 0.071000 | -| Model | Config | Augmentation| Test set | Decode method | Loss | WER | -| --- | --- | --- | --- | --- | --- | --- | -| conformer | conf/conformer.yaml | spec_aug + shift | test-all | attention | test-all 6.35 | 0.057117 | -| conformer | conf/conformer.yaml | spec_aug + shift | test-clean | attention | test-all 6.35 | 0.030162 | -| conformer | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_greedy_search | test-all 6.35 | 0.037910 | -| conformer | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_prefix_beam_search | test-all 6.35 | 0.037761 | -| conformer | conf/conformer.yaml | spec_aug + shift | test-clean | attention_rescoring | test-all 6.35 | 0.032115 | ## Transformer +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean | attention | 6.98 | 0.036 | -| Model | Config | Augmentation| Test set | Decode method | Loss | WER | -| --- | --- | --- | --- | --- | --- | --- | -| transformer | conf/transformer.yaml | spec_aug + shift | test-all | attention | test-all 6.98 | 0.066500 | -| transformer | conf/transformer.yaml | spec_aug + shift | test-clean | attention | test-all 6.98 | 0.036 | +### Test w/o length filter +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean-all | attention | 7.63 | 0.056832 | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean-all | ctc_greedy_search | 7.63 | 0.059742 | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean-all | ctc_prefix_beam_search | 7.63 | 0.059057 | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean-all | attention_rescoring | 7.63 | 0.047417 | diff --git a/examples/librispeech/s1/conf/augmentation.json b/examples/librispeech/s1/conf/augmentation.json index c1078393d..8e6e97040 100644 --- a/examples/librispeech/s1/conf/augmentation.json +++ b/examples/librispeech/s1/conf/augmentation.json @@ -27,7 +27,9 @@ "W": 80, "adaptive_number_ratio": 0, "adaptive_size_ratio": 0, - "max_n_time_masks": 20 + "max_n_time_masks": 20, + "replace_with_zero": true, + "warp_mode": "PIL" }, "prob": 1.0 } diff --git a/examples/librispeech/s1/conf/chunk_confermer.yaml b/examples/librispeech/s1/conf/chunk_conformer.yaml similarity index 95% rename from examples/librispeech/s1/conf/chunk_confermer.yaml rename to examples/librispeech/s1/conf/chunk_conformer.yaml index ec945a188..92db20f66 100644 --- a/examples/librispeech/s1/conf/chunk_confermer.yaml +++ b/examples/librispeech/s1/conf/chunk_conformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test - vocab_filepath: data/vocab.txt - unit_type: 'spm' - spm_model_prefix: 'data/bpe_unigram_5000' - mean_std_filepath: "" - augmentation_config: conf/augmentation.json - batch_size: 4 min_input_len: 0.5 max_input_len: 20.0 min_output_len: 0.0 max_output_len: 400.0 min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 16 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -74,13 +76,15 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false training: - n_epoch: 120 - accum_grad: 1 + n_epoch: 240 + accum_grad: 8 global_grad_clip: 5.0 optim: adam optim_conf: @@ -91,6 +95,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: diff --git a/examples/librispeech/s1/conf/chunk_transformer.yaml b/examples/librispeech/s1/conf/chunk_transformer.yaml index 3939ffc68..e0bc3135e 100644 --- a/examples/librispeech/s1/conf/chunk_transformer.yaml +++ b/examples/librispeech/s1/conf/chunk_transformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test - vocab_filepath: data/vocab.txt - unit_type: 'spm' - spm_model_prefix: 'data/bpe_unigram_5000' - mean_std_filepath: "" - augmentation_config: conf/augmentation.json - batch_size: 64 min_input_len: 0.5 # second max_input_len: 20.0 # second min_output_len: 0.0 # tokens max_output_len: 400.0 # tokens min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 64 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -67,6 +69,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -84,6 +88,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: @@ -103,6 +110,6 @@ decoding: # >0: for decoding, use fixed chunk size as set. # 0: used for training, it's prohibited here. num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. - simulate_streaming: False # simulate streaming inference. Defaults to False. + simulate_streaming: true # simulate streaming inference. Defaults to False. diff --git a/examples/librispeech/s1/conf/conformer.yaml b/examples/librispeech/s1/conf/conformer.yaml index 8f8bf4539..78be249cb 100644 --- a/examples/librispeech/s1/conf/conformer.yaml +++ b/examples/librispeech/s1/conf/conformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test-clean - vocab_filepath: data/vocab.txt - unit_type: 'spm' - spm_model_prefix: 'data/bpe_unigram_5000' - mean_std_filepath: "" - augmentation_config: conf/augmentation.json - batch_size: 16 min_input_len: 0.5 # seconds max_input_len: 20.0 # seconds min_output_len: 0.0 # tokens max_output_len: 400.0 # tokens min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 32 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -70,13 +72,15 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false training: n_epoch: 120 - accum_grad: 8 + accum_grad: 4 global_grad_clip: 3.0 optim: adam optim_conf: @@ -87,6 +91,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: diff --git a/examples/librispeech/s1/conf/transformer.yaml b/examples/librispeech/s1/conf/transformer.yaml index a094b0fba..4aa7b9158 100644 --- a/examples/librispeech/s1/conf/transformer.yaml +++ b/examples/librispeech/s1/conf/transformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.train dev_manifest: data/manifest.dev test_manifest: data/manifest.test-clean + min_input_len: 0.5 # second + max_input_len: 30.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: vocab_filepath: data/vocab.txt unit_type: 'spm' spm_model_prefix: 'data/bpe_unigram_5000' mean_std_filepath: "" augmentation_config: conf/augmentation.json batch_size: 64 - min_input_len: 0.5 # second - max_input_len: 20.0 # second - min_output_len: 0.0 # tokens - max_output_len: 400.0 # tokens - min_output_input_ratio: 0.05 - max_output_input_ratio: 10.0 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -31,7 +33,7 @@ data: keep_transcription_text: False sortagrad: True shuffle_method: batch_shuffle - num_workers: 2 + num_workers: 0 # network architecture @@ -65,6 +67,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -82,6 +86,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 decoding: diff --git a/examples/librispeech/s1/local/align.sh b/examples/librispeech/s1/local/align.sh new file mode 100755 index 000000000..ad6c84bc8 --- /dev/null +++ b/examples/librispeech/s1/local/align.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/alignment.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/librispeech/s1/local/data.sh b/examples/librispeech/s1/local/data.sh index fbdd17d58..4ad476d37 100755 --- a/examples/librispeech/s1/local/data.sh +++ b/examples/librispeech/s1/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/librispeech/s1/local/download_lm_en.sh b/examples/librispeech/s1/local/download_lm_en.sh index 05ea793fb..dc1bdf665 100755 --- a/examples/librispeech/s1/local/download_lm_en.sh +++ b/examples/librispeech/s1/local/download_lm_en.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash . ${MAIN_ROOT}/utils/utility.sh diff --git a/examples/librispeech/s1/local/export.sh b/examples/librispeech/s1/local/export.sh index 1b19d5720..f99a15bad 100755 --- a/examples/librispeech/s1/local/export.sh +++ b/examples/librispeech/s1/local/export.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 3 ];then echo "usage: $0 config_path ckpt_prefix jit_model_path" @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi diff --git a/examples/librispeech/s1/local/test.sh b/examples/librispeech/s1/local/test.sh index 8c323e002..3bd3f0bba 100755 --- a/examples/librispeech/s1/local/test.sh +++ b/examples/librispeech/s1/local/test.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 2 ];then echo "usage: ${0} config_path ckpt_path_prefix" @@ -9,12 +9,20 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi + config_path=$1 ckpt_prefix=$2 +chunk_mode=false +if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then + chunk_mode=true +fi +echo "chunk mode ${chunk_mode}" + + # download language model #bash local/download_lm_en.sh #if [ $? -ne 0 ]; then @@ -23,7 +31,12 @@ ckpt_prefix=$2 for type in attention ctc_greedy_search; do echo "decoding ${type}" - batch_size=64 + if [ ${chunk_mode} == true ];then + # stream decoding only support batchsize=1 + batch_size=1 + else + batch_size=64 + fi python3 -u ${BIN_DIR}/test.py \ --device ${device} \ --nproc 1 \ diff --git a/examples/librispeech/s1/local/train.sh b/examples/librispeech/s1/local/train.sh index a4218aa86..f905b766e 100755 --- a/examples/librispeech/s1/local/train.sh +++ b/examples/librispeech/s1/local/train.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 2 ];then echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" @@ -12,18 +12,29 @@ config_path=$1 ckpt_name=$2 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi echo "using ${device}..." mkdir -p exp +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + python3 -u ${BIN_DIR}/train.py \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/librispeech/s1/path.sh b/examples/librispeech/s1/path.sh index 30adb6ca0..457f7e548 100644 --- a/examples/librispeech/s1/path.sh +++ b/examples/librispeech/s1/path.sh @@ -1,10 +1,10 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` -export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export PATH=${MAIN_ROOT}:${PWD}/utils:${PATH} export LC_ALL=C # Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C -export PYTHONIOENCODING=UTF-8 +export PYTHONIOENCODING=UTF-8 export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ diff --git a/examples/librispeech/s1/run.sh b/examples/librispeech/s1/run.sh index 65194d902..aecd3f617 100755 --- a/examples/librispeech/s1/run.sh +++ b/examples/librispeech/s1/run.sh @@ -5,7 +5,7 @@ source path.sh stage=0 stop_stage=100 conf_path=conf/transformer.yaml -avg_num=30 +avg_num=5 source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; avg_ckpt=avg_${avg_num} @@ -19,20 +19,25 @@ fi if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # train model, all `ckpt` under `exp` dir - CUDA_VISIBLE_DEVICES=4,5,6,7 ./local/train.sh ${conf_path} ${ckpt} + CUDA_VISIBLE_DEVICES=0,1,2,3 ./local/train.sh ${conf_path} ${ckpt} fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # test ckpt avg_n - CUDA_VISIBLE_DEVICES=7 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 + CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # export ckpt avg_n CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit fi diff --git a/examples/librispeech/s1/utils b/examples/librispeech/s1/utils new file mode 120000 index 000000000..973afe674 --- /dev/null +++ b/examples/librispeech/s1/utils @@ -0,0 +1 @@ +../../../utils \ No newline at end of file diff --git a/examples/librispeech/s2/README.md b/examples/librispeech/s2/README.md new file mode 100644 index 000000000..e4022f014 --- /dev/null +++ b/examples/librispeech/s2/README.md @@ -0,0 +1,41 @@ +# LibriSpeech + +## Data +| Data Subset | Duration in Seconds | +| data/manifest.train | 0.83s ~ 29.735s | +| data/manifest.dev | 1.065 ~ 35.155s | +| data/manifest.test-clean | 1.285s ~ 34.955s | + +## Conformer +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | attention | - | - | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_greedy_search | | | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | ctc_prefix_beam_search | | | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean | attention_rescoring | | | + +### Test w/o length filter +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/conformer.yaml | spec_aug + shift | test-clean-all | attention | | | + + +## Chunk Conformer + +| Model | Params | Config | Augmentation| Test set | Decode method | Chunk Size & Left Chunks | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | attention | 16, -1 | | | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | ctc_greedy_search | 16, -1 | | | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | ctc_prefix_beam_search | 16, -1 | | - | +| conformer | 47.63 M | conf/chunk_conformer.yaml | spec_aug + shift | test-clean | attention_rescoring | 16, -1 | | - | + + +## Transformer +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean | attention | | | + +### Test w/o length filter +| Model | Params | Config | Augmentation| Test set | Decode method | Loss | WER | +| --- | --- | --- | --- | --- | --- | --- | --- | +| transformer | 32.52 M | conf/transformer.yaml | spec_aug + shift | test-clean-all | attention | | | diff --git a/examples/librispeech/s2/conf/augmentation.json b/examples/librispeech/s2/conf/augmentation.json new file mode 100644 index 000000000..3b14b9d0c --- /dev/null +++ b/examples/librispeech/s2/conf/augmentation.json @@ -0,0 +1,19 @@ +[ + { + "type": "specaug", + "params": { + "W": 5, + "warp_mode": "PIL", + "F": 30, + "n_freq_masks": 2, + "T": 40, + "n_time_masks": 2, + "p": 1.0, + "adaptive_number_ratio": 0, + "adaptive_size_ratio": 0, + "max_n_time_masks": 20, + "replace_with_zero": false + }, + "prob": 1.0 + } +] diff --git a/examples/librispeech/s2/conf/chunk_conformer.yaml b/examples/librispeech/s2/conf/chunk_conformer.yaml new file mode 100644 index 000000000..92db20f66 --- /dev/null +++ b/examples/librispeech/s2/conf/chunk_conformer.yaml @@ -0,0 +1,122 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.5 + max_input_len: 20.0 + min_output_len: 0.0 + max_output_len: 400.0 + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 16 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: conformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: True + use_cnn_module: True + cnn_module_kernel: 15 + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + selfattention_layer_type: 'rel_selfattn' + causal: True + use_dynamic_chunk: true + cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster + use_dynamic_left_chunk: false + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 240 + accum_grad: 8 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.001 + weight_decay: 1e-06 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 128 + error_rate_type: wer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: true # simulate streaming inference. Defaults to False. + + diff --git a/examples/librispeech/s2/conf/chunk_transformer.yaml b/examples/librispeech/s2/conf/chunk_transformer.yaml new file mode 100644 index 000000000..e0bc3135e --- /dev/null +++ b/examples/librispeech/s2/conf/chunk_transformer.yaml @@ -0,0 +1,115 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.5 # second + max_input_len: 20.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 64 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: transformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: true + use_dynamic_chunk: true + use_dynamic_left_chunk: false + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 1 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.001 + weight_decay: 1e-06 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 64 + error_rate_type: wer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: true # simulate streaming inference. Defaults to False. + + diff --git a/examples/librispeech/s2/conf/conformer.yaml b/examples/librispeech/s2/conf/conformer.yaml new file mode 100644 index 000000000..9a7274135 --- /dev/null +++ b/examples/librispeech/s2/conf/conformer.yaml @@ -0,0 +1,118 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test-clean + min_input_len: 0.5 # seconds + max_input_len: 20.0 # seconds + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_5000' + mean_std_filepath: "" + augmentation_config: conf/augmentation.json + batch_size: 16 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: conformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: True + use_cnn_module: True + cnn_module_kernel: 15 + activation_type: 'swish' + pos_enc_layer_type: 'rel_pos' + selfattention_layer_type: 'rel_selfattn' + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 8 + global_grad_clip: 3.0 + optim: adam + optim_conf: + lr: 0.004 + weight_decay: 1e-06 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 64 + error_rate_type: wer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. + + diff --git a/examples/librispeech/s2/conf/transformer.yaml b/examples/librispeech/s2/conf/transformer.yaml new file mode 100644 index 000000000..edf5b81dc --- /dev/null +++ b/examples/librispeech/s2/conf/transformer.yaml @@ -0,0 +1,104 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test-clean + +collator: + vocab_filepath: data/train_960_unigram5000_units.txt + unit_type: 'spm' + spm_model_prefix: 'data/train_960_unigram5000' + feat_dim: 83 + stride_ms: 10.0 + window_ms: 25.0 + sortagrad: 0 # Feed samples from shortest to longest ; -1: enabled for all epochs, 0: disabled, other: enabled for 'other' epochs + batch_size: 32 + maxlen_in: 512 # if input length > maxlen-in, batchsize is automatically reduced + maxlen_out: 150 # if output length > maxlen-out, batchsize is automatically reduced + minibatches: 0 # for debug + batch_count: auto + batch_bins: 0 + batch_frames_in: 0 + batch_frames_out: 0 + batch_frames_inout: 0 + augmentation_config: conf/augmentation.json + num_workers: 0 + subsampling_factor: 1 + num_encs: 1 + + +# network architecture +model: + cmvn_file: + cmvn_file_type: "json" + # encoder related + encoder: transformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: true + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 2 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + +optim: adam +optim_conf: + global_grad_clip: 5.0 + weight_decay: 1.0e-06 +scheduler: warmuplr # pytorch v1.1.0+ required +scheduler_conf: + lr: 0.004 + warmup_steps: 25000 + lr_decay: 1.0 + +decoding: + batch_size: 64 + error_rate_type: wer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. + + diff --git a/examples/librispeech/s2/local/align.sh b/examples/librispeech/s2/local/align.sh new file mode 100755 index 000000000..b3d8fa5f5 --- /dev/null +++ b/examples/librispeech/s2/local/align.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +if [ $# != 3 ];then + echo "usage: ${0} config_path dict_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +dict_path=$2 +ckpt_prefix=$3 + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/test.py \ +--model-name 'u2_kaldi' \ +--run-mode 'align' \ +--dict-path ${dict_path} \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result-file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/librispeech/s2/local/data.sh b/examples/librispeech/s2/local/data.sh new file mode 100755 index 000000000..4ad476d37 --- /dev/null +++ b/examples/librispeech/s2/local/data.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +stage=-1 +stop_stage=100 + +# bpemode (unigram or bpe) +nbpe=5000 +bpemode=unigram +bpeprefix="data/bpe_${bpemode}_${nbpe}" + +source ${MAIN_ROOT}/utils/parse_options.sh + + +mkdir -p data +TARGET_DIR=${MAIN_ROOT}/examples/dataset +mkdir -p ${TARGET_DIR} + +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + # download data, generate manifests + python3 ${TARGET_DIR}/librispeech/librispeech.py \ + --manifest_prefix="data/manifest" \ + --target_dir="${TARGET_DIR}/librispeech" \ + --full_download="True" + + if [ $? -ne 0 ]; then + echo "Prepare LibriSpeech failed. Terminated." + exit 1 + fi + + for set in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do + mv data/manifest.${set} data/manifest.${set}.raw + done + + rm -rf data/manifest.train.raw data/manifest.dev.raw data/manifest.test.raw + for set in train-clean-100 train-clean-360 train-other-500; do + cat data/manifest.${set}.raw >> data/manifest.train.raw + done + + for set in dev-clean dev-other; do + cat data/manifest.${set}.raw >> data/manifest.dev.raw + done + + for set in test-clean test-other; do + cat data/manifest.${set}.raw >> data/manifest.test.raw + done +fi + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # build vocabulary + python3 ${MAIN_ROOT}/utils/build_vocab.py \ + --unit_type "spm" \ + --spm_vocab_size=${nbpe} \ + --spm_mode ${bpemode} \ + --spm_model_prefix ${bpeprefix} \ + --vocab_path="data/vocab.txt" \ + --manifest_paths="data/manifest.train.raw" + + if [ $? -ne 0 ]; then + echo "Build vocabulary failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # compute mean and stddev for normalizer + num_workers=$(nproc) + python3 ${MAIN_ROOT}/utils/compute_mean_std.py \ + --manifest_path="data/manifest.train.raw" \ + --num_samples=-1 \ + --specgram_type="fbank" \ + --feat_dim=80 \ + --delta_delta=false \ + --sample_rate=16000 \ + --stride_ms=10.0 \ + --window_ms=25.0 \ + --use_dB_normalization=False \ + --num_workers=${num_workers} \ + --output_path="data/mean_std.json" + + if [ $? -ne 0 ]; then + echo "Compute mean and stddev failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # format manifest with tokenids, vocab size + for set in train dev test dev-clean dev-other test-clean test-other; do + { + python3 ${MAIN_ROOT}/utils/format_data.py \ + --feat_type "raw" \ + --cmvn_path "data/mean_std.json" \ + --unit_type "spm" \ + --spm_model_prefix ${bpeprefix} \ + --vocab_path="data/vocab.txt" \ + --manifest_path="data/manifest.${set}.raw" \ + --output_path="data/manifest.${set}" + + if [ $? -ne 0 ]; then + echo "Formt mnaifest failed. Terminated." + exit 1 + fi + }& + done + wait +fi + +echo "LibriSpeech Data preparation done." +exit 0 diff --git a/examples/librispeech/s2/local/download_lm_en.sh b/examples/librispeech/s2/local/download_lm_en.sh new file mode 100755 index 000000000..dc1bdf665 --- /dev/null +++ b/examples/librispeech/s2/local/download_lm_en.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +. ${MAIN_ROOT}/utils/utility.sh + +DIR=data/lm +mkdir -p ${DIR} + +URL=https://deepspeech.bj.bcebos.com/en_lm/common_crawl_00.prune01111.trie.klm +MD5="099a601759d467cd0a8523ff939819c5" +TARGET=${DIR}/common_crawl_00.prune01111.trie.klm + +echo "Download language model ..." +download $URL $MD5 $TARGET +if [ $? -ne 0 ]; then + echo "Fail to download the language model!" + exit 1 +fi + + +exit 0 diff --git a/examples/librispeech/s2/local/espnet_json_to_manifest.py b/examples/librispeech/s2/local/espnet_json_to_manifest.py new file mode 100755 index 000000000..acfa46681 --- /dev/null +++ b/examples/librispeech/s2/local/espnet_json_to_manifest.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +import argparse +import json + + +def main(args): + with open(args.json_file, 'r') as fin: + data_json = json.load(fin) + + # manifest format: + # {"input": [ + # {"feat": "dev/deltafalse/feats.1.ark:842920", "name": "input1", "shape": [349, 83]} + # ], + # "output": [ + # {"name": "target1", "shape": [12, 5002], "text": "NO APOLLO", "token": "▁NO ▁A PO LL O", "tokenid": "3144 482 352 269 317"} + # ], + # "utt2spk": "116-288045", + # "utt": "116-288045-0019"} + with open(args.manifest_file, 'w') as fout: + for key, value in data_json['utts'].items(): + value['utt'] = key + fout.write(json.dumps(value, ensure_ascii=False)) + fout.write("\n") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--json-file', type=str, default=None, help="espnet data json file.") + parser.add_argument( + '--manifest-file', + type=str, + default='maniefst.train', + help='manifest data json line file.') + args = parser.parse_args() + main(args) diff --git a/examples/librispeech/s2/local/export.sh b/examples/librispeech/s2/local/export.sh new file mode 100755 index 000000000..efa70a2b9 --- /dev/null +++ b/examples/librispeech/s2/local/export.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +if [ $# != 3 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_path_prefix=$2 +jit_model_export_path=$3 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi + +python3 -u ${BIN_DIR}/test.py \ +--model-name 'u2_kaldi' \ +--run-mode 'export' \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--checkpoint_path ${ckpt_path_prefix} \ +--export_path ${jit_model_export_path} + + +if [ $? -ne 0 ]; then + echo "Failed in export!" + exit 1 +fi + + +exit 0 diff --git a/examples/librispeech/s2/local/test.sh b/examples/librispeech/s2/local/test.sh new file mode 100755 index 000000000..efd06f35e --- /dev/null +++ b/examples/librispeech/s2/local/test.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +if [ $# != 3 ];then + echo "usage: ${0} config_path dict_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi + +config_path=$1 +dict_path=$2 +ckpt_prefix=$3 + +chunk_mode=false +if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then + chunk_mode=true +fi +echo "chunk mode ${chunk_mode}" + + +# download language model +#bash local/download_lm_en.sh +#if [ $? -ne 0 ]; then +# exit 1 +#fi + +for type in attention ctc_greedy_search; do + echo "decoding ${type}" + if [ ${chunk_mode} == true ];then + # stream decoding only support batchsize=1 + batch_size=1 + else + batch_size=64 + fi + python3 -u ${BIN_DIR}/test.py \ + --model-name u2_kaldi \ + --run-mode test \ + --dict-path ${dict_path} \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result-file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +for type in ctc_prefix_beam_search attention_rescoring; do + echo "decoding ${type}" + batch_size=1 + python3 -u ${BIN_DIR}/test.py \ + --model-name u2_kaldi \ + --run-mode test \ + --dict-path ${dict_path} \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result-file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + + +exit 0 diff --git a/examples/librispeech/s2/local/train.sh b/examples/librispeech/s2/local/train.sh new file mode 100755 index 000000000..66754201f --- /dev/null +++ b/examples/librispeech/s2/local/train.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_name=$2 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +echo "using ${device}..." + +mkdir -p exp + +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + +python3 -u ${BIN_DIR}/train.py \ +--model-name u2_kaldi \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi + +if [ $? -ne 0 ]; then + echo "Failed in training!" + exit 1 +fi + +exit 0 diff --git a/examples/librispeech/s2/path.sh b/examples/librispeech/s2/path.sh new file mode 100644 index 000000000..c90e27821 --- /dev/null +++ b/examples/librispeech/s2/path.sh @@ -0,0 +1,14 @@ +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${PWD}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ + + +MODEL=u2_kaldi +export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin diff --git a/examples/librispeech/s2/run.sh b/examples/librispeech/s2/run.sh new file mode 100755 index 000000000..46c8ea5d8 --- /dev/null +++ b/examples/librispeech/s2/run.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -e +source path.sh + +stage=0 +stop_stage=100 +conf_path=conf/transformer.yaml +dict_path=data/train_960_unigram5000_units.txt +avg_num=5 +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +avg_ckpt=avg_${avg_num} +ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') +echo "checkpoint name ${ckpt}" + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + bash ./local/data.sh || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `exp` dir + CUDA_VISIBLE_DEVICES=0,1,2,3 ./local/train.sh ${conf_path} ${ckpt} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # avg n best model + avg.sh best exp/${ckpt}/checkpoints ${avg_num} +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # test ckpt avg_n + CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} ${dict_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} ${dict_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # export ckpt avg_n + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit +fi diff --git a/examples/librispeech/s2/utils b/examples/librispeech/s2/utils new file mode 120000 index 000000000..256f914ab --- /dev/null +++ b/examples/librispeech/s2/utils @@ -0,0 +1 @@ +../../../utils/ \ No newline at end of file diff --git a/examples/ngram_lm/READEME.md b/examples/ngram_lm/READEME.md new file mode 100644 index 000000000..84e1380c3 --- /dev/null +++ b/examples/ngram_lm/READEME.md @@ -0,0 +1,3 @@ +# Ngram LM + +* s0 - kenlm ngram lm diff --git a/examples/ngram_lm/README.md b/examples/ngram_lm/README.md deleted file mode 100644 index 698d7c290..000000000 --- a/examples/ngram_lm/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Ngram LM - -Train chinese chararctor ngram lm by [kenlm](https://github.com/kpu/kenlm). - -``` -bash run.sh -``` diff --git a/examples/ngram_lm/s0/.gitignore b/examples/ngram_lm/s0/.gitignore new file mode 100644 index 000000000..b20d93aa5 --- /dev/null +++ b/examples/ngram_lm/s0/.gitignore @@ -0,0 +1 @@ +data/lm diff --git a/examples/ngram_lm/s0/README.md b/examples/ngram_lm/s0/README.md new file mode 100644 index 000000000..65916ec54 --- /dev/null +++ b/examples/ngram_lm/s0/README.md @@ -0,0 +1,96 @@ +# Ngram LM + +Train chinese chararctor ngram lm by [kenlm](https://github.com/kpu/kenlm). + +## Run +``` +. path.sh +bash run.sh +``` + +## Results + +``` +exp/ +|-- text +|-- text.char.tn +|-- text.word.tn +|-- text_zh_char_o5_p0_1_2_4_4_a22_q8_b8.arpa +|-- text_zh_char_o5_p0_1_2_4_4_a22_q8_b8.arpa.klm.bin +|-- text_zh_word_o3_p0_0_0_a22_q8_b8.arpa +`-- text_zh_word_o3_p0_0_0_a22_q8_b8.arpa.klm.bin + +0 directories, 7 files +``` + +``` +3ae083627b9b6cef1a82d574d8483f97 exp/text +d97da252d2a63a662af22f98af30cb8c exp/text.char.tn +c18b03005bd094dbfd9b46442be361fd exp/text.word.tn +73dbf50097896eda33985e11e1ba9a3a exp/text_zh_char_o5_p0_1_2_4_4_a22_q8_b8.arpa +01334e2044c474b99c4f2ffbed790626 exp/text_zh_char_o5_p0_1_2_4_4_a22_q8_b8.arpa.klm.bin +36a42de548045b54662411ae7982c77f exp/text_zh_word_o3_p0_0_0_a22_q8_b8.arpa +332422803ffd73dd7ffd16cd2b0abcd5 exp/text_zh_word_o3_p0_0_0_a22_q8_b8.arpa.klm.bin +``` + +``` +==> exp/text <== +少先队员因该为老人让坐 +祛痘印可以吗?有效果吗? +不知这款牛奶口感怎样? 小孩子喝行吗! +是转基因油? +我家宝宝13斤用多大码的 +会起坨吗? +请问给送上楼吗? +亲是送赁上门吗 +送货时候有外包装没有还是直接发货过来 +会不会有坏的? + +==> exp/text.char.tn <== +少 先 队 员 因 该 为 老 人 让 坐 +祛 痘 印 可 以 吗 有 效 果 吗 +不 知 这 款 牛 奶 口 感 怎 样 小 孩 子 喝 行 吗 +是 转 基 因 油 +我 家 宝 宝 十 三 斤 用 多 大 码 的 +会 起 坨 吗 +请 问 给 送 上 楼 吗 +亲 是 送 赁 上 门 吗 +送 货 时 候 有 外 包 装 没 有 还 是 直 接 发 货 过 来 +会 不 会 有 坏 的 + +==> exp/text.word.tn <== +少先队员 因该 为 老人 让 坐 +祛痘 印 可以 吗 有 效果 吗 +不知 这 款 牛奶 口感 怎样 小孩子 喝行 吗 +是 转基因 油 +我家 宝宝 十三斤 用多大码 的 +会起 坨 吗 +请问 给 送 上楼 吗 +亲是 送赁 上门 吗 +送货 时候 有 外包装 没有 还是 直接 发货 过来 +会 不会 有坏 的 + +==> exp/text_zh_char_o5_p0_1_2_4_4_a22_q8_b8.arpa <== +\data\ +ngram 1=587 +ngram 2=395 +ngram 3=100 +ngram 4=2 +ngram 5=0 + +\1-grams: +-3.272324 0 +0 -0.36706257 + +==> exp/text_zh_word_o3_p0_0_0_a22_q8_b8.arpa <== +\data\ +ngram 1=689 +ngram 2=1398 +ngram 3=1506 + +\1-grams: +-3.1755018 0 +0 -0.23069073 +-1.2318869 0 +-3.067262 少先队员 -0.051341705 +``` diff --git a/examples/ngram_lm/data/README.md b/examples/ngram_lm/s0/data/README.md similarity index 100% rename from examples/ngram_lm/data/README.md rename to examples/ngram_lm/s0/data/README.md diff --git a/examples/ngram_lm/data/custom_confusion.txt b/examples/ngram_lm/s0/data/custom_confusion.txt similarity index 100% rename from examples/ngram_lm/data/custom_confusion.txt rename to examples/ngram_lm/s0/data/custom_confusion.txt diff --git a/examples/ngram_lm/data/text_correct.txt b/examples/ngram_lm/s0/data/text_correct.txt similarity index 100% rename from examples/ngram_lm/data/text_correct.txt rename to examples/ngram_lm/s0/data/text_correct.txt diff --git a/examples/ngram_lm/local/build_zh_lm.sh b/examples/ngram_lm/s0/local/build_zh_lm.sh similarity index 100% rename from examples/ngram_lm/local/build_zh_lm.sh rename to examples/ngram_lm/s0/local/build_zh_lm.sh diff --git a/examples/ngram_lm/local/download_lm_zh.sh b/examples/ngram_lm/s0/local/download_lm_zh.sh similarity index 100% rename from examples/ngram_lm/local/download_lm_zh.sh rename to examples/ngram_lm/s0/local/download_lm_zh.sh diff --git a/examples/ngram_lm/local/kenlm_score_test.py b/examples/ngram_lm/s0/local/kenlm_score_test.py similarity index 100% rename from examples/ngram_lm/local/kenlm_score_test.py rename to examples/ngram_lm/s0/local/kenlm_score_test.py diff --git a/examples/ngram_lm/path.sh b/examples/ngram_lm/s0/path.sh similarity index 67% rename from examples/ngram_lm/path.sh rename to examples/ngram_lm/s0/path.sh index 84e2de7d0..cbd1d82c0 100644 --- a/examples/ngram_lm/path.sh +++ b/examples/ngram_lm/s0/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C @@ -7,4 +7,4 @@ export LC_ALL=C export PYTHONIOENCODING=UTF-8 export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} -export LD_LIBRARY_PATH=/usr/local/lib/:${LD_LIBRARY_PATH} \ No newline at end of file +export LD_LIBRARY_PATH=/usr/local/lib/:${LD_LIBRARY_PATH} diff --git a/examples/ngram_lm/requirements.txt b/examples/ngram_lm/s0/requirements.txt similarity index 100% rename from examples/ngram_lm/requirements.txt rename to examples/ngram_lm/s0/requirements.txt diff --git a/examples/ngram_lm/run.sh b/examples/ngram_lm/s0/run.sh similarity index 100% rename from examples/ngram_lm/run.sh rename to examples/ngram_lm/s0/run.sh diff --git a/examples/punctuation_restoration/README.md b/examples/punctuation_restoration/README.md new file mode 100644 index 000000000..42ae0db3a --- /dev/null +++ b/examples/punctuation_restoration/README.md @@ -0,0 +1,3 @@ +# Punctation Restoration + +Please using [PaddleSpeechTask](https://github.com/745165806/PaddleSpeechTask] to do this task. diff --git a/examples/spm/README.md b/examples/spm/README.md index 3109d3ffb..fc4478ebb 100644 --- a/examples/spm/README.md +++ b/examples/spm/README.md @@ -1,7 +1,96 @@ # [SentencePiece Model](https://github.com/google/sentencepiece) +## Run Train a `spm` model for English tokenizer. ``` +. path.sh bash run.sh ``` + +## Results + +``` +data/ +└── lang_char + ├── input.bpe + ├── input.decode + ├── input.txt + ├── train_unigram100.model + ├── train_unigram100_units.txt + └── train_unigram100.vocab + +1 directory, 6 files +``` + +``` +b5a230c26c61db5c36f34e503102f936 data/lang_char/input.bpe +ec5a9b24acc35469229e41256ceaf77d data/lang_char/input.decode +ec5a9b24acc35469229e41256ceaf77d data/lang_char/input.txt +124bf3fe7ce3b73b1994234c15268577 data/lang_char/train_unigram100.model +0df2488cc8eaace95eb12713facb5cf0 data/lang_char/train_unigram100_units.txt +46360cac35c751310e8e8ffd3a034cb5 data/lang_char/train_unigram100.vocab +``` + +``` +==> data/lang_char/input.bpe <== +▁mi ster ▁quilter ▁ is ▁the ▁a p ost le ▁o f ▁the ▁mi d d le ▁c las s es ▁ and ▁we ▁ar e ▁g l a d ▁ to ▁we l c om e ▁h is ▁g o s pe l +▁ n or ▁ is ▁mi ster ▁quilter ' s ▁ma nne r ▁ l ess ▁in ter es t ing ▁tha n ▁h is ▁ma t ter +▁h e ▁ t e ll s ▁us ▁tha t ▁ at ▁ t h is ▁f es t ive ▁ s e ason ▁o f ▁the ▁ y e ar ▁w ith ▁ ch r is t m a s ▁ and ▁ro a s t ▁be e f ▁ l o om ing ▁be fore ▁us ▁ s i mile s ▁d r a w n ▁f r om ▁ e at ing ▁ and ▁it s ▁re s u l t s ▁o c c ur ▁m ost ▁re a di l y ▁ to ▁the ▁ mind +▁h e ▁ ha s ▁g r a v e ▁d o u b t s ▁w h e t h er ▁ s i r ▁f r e d er ic k ▁ l eig h to n ' s ▁w or k ▁ is ▁re all y ▁gre e k ▁a f ter ▁ all ▁ and ▁c a n ▁di s c o v er ▁in ▁it ▁b u t ▁li t t le ▁o f ▁ro ck y ▁it ha c a +▁li nne ll ' s ▁ p ic tur es ▁ar e ▁a ▁ s or t ▁o f ▁ u p ▁g u ar d s ▁ and ▁ at ▁ em ▁painting s ▁ and ▁m ason ' s ▁ e x q u is i t e ▁ i d y ll s ▁ar e ▁a s ▁ n at ion a l ▁a s ▁a ▁ j ing o ▁ p o em ▁mi ster ▁b i r k e t ▁f o ster ' s ▁ l and s c a pe s ▁ s mile ▁ at ▁on e ▁m u ch ▁in ▁the ▁ s a m e ▁w a y ▁tha t ▁mi ster ▁c ar k er ▁us e d ▁ to ▁f las h ▁h is ▁ t e e t h ▁ and ▁mi ster ▁ j o h n ▁c o ll i er ▁g ive s ▁h is ▁ s i t ter ▁a ▁ ch e er f u l ▁ s l a p ▁on ▁the ▁b a ck ▁be fore ▁h +e ▁ s a y s ▁li k e ▁a ▁ s ha m p o o er ▁in ▁a ▁ tur k is h ▁b at h ▁ n e x t ▁ma n +▁it ▁ is ▁o b v i o u s l y ▁ u nne c ess ar y ▁for ▁us ▁ to ▁ p o i n t ▁o u t ▁h o w ▁ l u m i n o u s ▁the s e ▁c rit ic is m s ▁ar e ▁h o w ▁d e l ic at e ▁in ▁ e x p r ess ion +▁on ▁the ▁g e n er a l ▁ p r i n c i p l es ▁o f ▁ar t ▁mi ster ▁quilter ▁w rit es ▁w ith ▁ e qual ▁ l u c i di t y +▁painting ▁h e ▁ t e ll s ▁us ▁ is ▁o f ▁a ▁di f f er e n t ▁ qual i t y ▁ to ▁ma t h em at ic s ▁ and ▁f i nish ▁in ▁ar t ▁ is ▁a d d ing ▁m or e ▁f a c t +▁a s ▁for ▁ e t ch ing s ▁the y ▁ar e ▁o f ▁ t w o ▁ k i n d s ▁b rit is h ▁ and ▁for eig n +▁h e ▁ l a ment s ▁m ost ▁b i t ter l y ▁the ▁di v or c e ▁tha t ▁ ha s ▁be e n ▁ma d e ▁be t w e e n ▁d e c or at ive ▁ar t ▁ and ▁w ha t ▁we ▁us u all y ▁c all ▁ p ic tur es ▁ma k es ▁the ▁c u s t om ar y ▁a p pe a l ▁ to ▁the ▁ las t ▁ j u d g ment ▁ and ▁re mind s ▁us ▁tha t ▁in ▁the ▁gre at ▁d a y s ▁o f ▁ar t ▁mi c ha e l ▁a n g e l o ▁w a s ▁the ▁f ur nish ing ▁ u p h o l ster er + +==> data/lang_char/input.decode <== +mister quilter is the apostle of the middle classes and we are glad to welcome his gospel +nor is mister quilter's manner less interesting than his matter +he tells us that at this festive season of the year with christmas and roast beef looming before us similes drawn from eating and its results occur most readily to the mind +he has grave doubts whether sir frederick leighton's work is really greek after all and can discover in it but little of rocky ithaca +linnell's pictures are a sort of up guards and at em paintings and mason's exquisite idylls are as national as a jingo poem mister birket foster's landscapes smile at one much in the same way that mister carker used to flash his teeth and mister john collier gives his sitter a cheerful slap on the back before he says like a shampooer in a turkish bath next man +it is obviously unnecessary for us to point out how luminous these criticisms are how delicate in expression +on the general principles of art mister quilter writes with equal lucidity +painting he tells us is of a different quality to mathematics and finish in art is adding more fact +as for etchings they are of two kinds british and foreign +he laments most bitterly the divorce that has been made between decorative art and what we usually call pictures makes the customary appeal to the last judgment and reminds us that in the great days of art michael angelo was the furnishing upholsterer + +==> data/lang_char/input.txt <== +mister quilter is the apostle of the middle classes and we are glad to welcome his gospel +nor is mister quilter's manner less interesting than his matter +he tells us that at this festive season of the year with christmas and roast beef looming before us similes drawn from eating and its results occur most readily to the mind +he has grave doubts whether sir frederick leighton's work is really greek after all and can discover in it but little of rocky ithaca +linnell's pictures are a sort of up guards and at em paintings and mason's exquisite idylls are as national as a jingo poem mister birket foster's landscapes smile at one much in the same way that mister carker used to flash his teeth and mister john collier gives his sitter a cheerful slap on the back before he says like a shampooer in a turkish bath next man +it is obviously unnecessary for us to point out how luminous these criticisms are how delicate in expression +on the general principles of art mister quilter writes with equal lucidity +painting he tells us is of a different quality to mathematics and finish in art is adding more fact +as for etchings they are of two kinds british and foreign +he laments most bitterly the divorce that has been made between decorative art and what we usually call pictures makes the customary appeal to the last judgment and reminds us that in the great days of art michael angelo was the furnishing upholsterer + +==> data/lang_char/train_unigram100_units.txt <== + 0 + 1 +' 2 +a 3 +all 4 +and 5 +ar 6 +ason 7 +at 8 +b 9 + +==> data/lang_char/train_unigram100.vocab <== + 0 + 0 + 0 +▁ -2.01742 +e -2.7203 +s -2.82989 +t -2.99689 +l -3.53267 +n -3.84935 +o -3.88229 +``` diff --git a/examples/spm/path.sh b/examples/spm/path.sh index 9da641e19..202378894 100644 --- a/examples/spm/path.sh +++ b/examples/spm/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../ +export MAIN_ROOT=`realpath ${PWD}/../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/ted_en_zh/README.md b/examples/ted_en_zh/README.md new file mode 100644 index 000000000..5664b06b3 --- /dev/null +++ b/examples/ted_en_zh/README.md @@ -0,0 +1,3 @@ +# TED En -> Zh + +* t0 for u2 speech translation diff --git a/examples/ted_en_zh/t0/.gitignore b/examples/ted_en_zh/t0/.gitignore new file mode 100644 index 000000000..469c61715 --- /dev/null +++ b/examples/ted_en_zh/t0/.gitignore @@ -0,0 +1,3 @@ +TED-En-Zh +data +exp diff --git a/examples/ted_en_zh/t0/README.md b/examples/ted_en_zh/t0/README.md new file mode 100644 index 000000000..e2443d363 --- /dev/null +++ b/examples/ted_en_zh/t0/README.md @@ -0,0 +1,10 @@ + +# TED En-Zh + +## Dataset + +| Data Subset | Duration in Seconds | +| --- | --- | +| data/manifest.train | 0.942 ~ 60 | +| data/manifest.dev | 1.151 ~ 39 | +| data/manifest.test | 1.1 ~ 42.746 | diff --git a/examples/ted_en_zh/t0/conf/transformer.yaml b/examples/ted_en_zh/t0/conf/transformer.yaml new file mode 100644 index 000000000..1aad86d22 --- /dev/null +++ b/examples/ted_en_zh/t0/conf/transformer.yaml @@ -0,0 +1,111 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train.tiny + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.05 # second + max_input_len: 30.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.01 + max_output_input_ratio: 20.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: data/bpe_unigram_8000 + mean_std_filepath: "" + # augmentation_config: conf/augmentation.json + batch_size: 10 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: transformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: true + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + asr_weight: 0.0 + ctc_weight: 0.0 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 2 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.004 + weight_decay: 1e-06 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 5 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 5 + error_rate_type: char-bleu + decoding_method: fullsentence # 'fullsentence', 'simultaneous' + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. diff --git a/examples/ted_en_zh/t0/conf/transformer_joint_noam.yaml b/examples/ted_en_zh/t0/conf/transformer_joint_noam.yaml new file mode 100644 index 000000000..0144c40d4 --- /dev/null +++ b/examples/ted_en_zh/t0/conf/transformer_joint_noam.yaml @@ -0,0 +1,113 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.05 # second + max_input_len: 30.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.01 + max_output_input_ratio: 20.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: data/bpe_unigram_8000 + mean_std_filepath: "" + # augmentation_config: conf/augmentation.json + batch_size: 10 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: transformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: true + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + asr_weight: 0.5 + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 2 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 2.5 + weight_decay: 1e-06 + scheduler: noam + scheduler_conf: + warmup_steps: 25000 + lr_decay: 1.0 + log_interval: 5 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 5 + error_rate_type: char-bleu + decoding_method: fullsentence # 'fullsentence', 'simultaneous' + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. + + diff --git a/examples/ted_en_zh/t0/local/data.sh b/examples/ted_en_zh/t0/local/data.sh new file mode 100755 index 000000000..32cfd9d7a --- /dev/null +++ b/examples/ted_en_zh/t0/local/data.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +stage=-1 +stop_stage=100 + +# bpemode (unigram or bpe) +nbpe=8000 +bpemode=unigram +bpeprefix="data/bpe_${bpemode}_${nbpe}" +data_dir=/mnt/dataset/TED_EnZh + + +source ${MAIN_ROOT}/utils/parse_options.sh + +TARGET_DIR=${MAIN_ROOT}/examples/dataset +mkdir -p ${TARGET_DIR} +mkdir -p data + + +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + if [ ! -e ${data_dir} ]; then + echo "Error: Dataset is not avaiable. Please download and unzip the dataset" + echo "Download Link: https://pan.baidu.com/s/18L-59wgeS96WkObISrytQQ Passwd: bva0" + echo "The tree of the directory should be:" + echo "." + echo "|-- En-Zh" + echo "|-- test-segment" + echo " |-- tst2010" + echo " |-- ..." + echo "|-- train-split" + echo " |-- train-segment" + echo "|-- README.md" + + exit 1 + fi + + # generate manifests + python3 ${TARGET_DIR}/ted_en_zh/ted_en_zh.py \ + --manifest_prefix="data/manifest" \ + --src_dir="${data_dir}" + + echo "Complete raw data pre-process." +fi + + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # build vocabulary + python3 ${MAIN_ROOT}/utils/build_vocab.py \ + --unit_type "spm" \ + --spm_vocab_size=${nbpe} \ + --spm_mode ${bpemode} \ + --spm_model_prefix ${bpeprefix} \ + --vocab_path="data/vocab.txt" \ + --text_keys 'text' 'text1' \ + --manifest_paths="data/manifest.train.raw" + + + if [ $? -ne 0 ]; then + echo "Build vocabulary failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # compute mean and stddev for normalizer + num_workers=$(nproc) + python3 ${MAIN_ROOT}/utils/compute_mean_std.py \ + --manifest_path="data/manifest.train.raw" \ + --num_samples=-1 \ + --specgram_type="fbank" \ + --feat_dim=80 \ + --delta_delta=false \ + --sample_rate=16000 \ + --stride_ms=10.0 \ + --window_ms=25.0 \ + --use_dB_normalization=False \ + --num_workers=${num_workers} \ + --output_path="data/mean_std.json" + + if [ $? -ne 0 ]; then + echo "Compute mean and stddev failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # format manifest with tokenids, vocab size + for set in train dev test; do + { + python3 ${MAIN_ROOT}/utils/format_triplet_data.py \ + --feat_type "raw" \ + --cmvn_path "data/mean_std.json" \ + --unit_type "spm" \ + --spm_model_prefix ${bpeprefix} \ + --vocab_path="data/vocab.txt" \ + --manifest_path="data/manifest.${set}.raw" \ + --output_path="data/manifest.${set}" + + if [ $? -ne 0 ]; then + echo "Formt mnaifest failed. Terminated." + exit 1 + fi + }& + done + wait +fi + +echo "Ted En-Zh Data preparation done." +exit 0 diff --git a/examples/ted_en_zh/t0/local/test.sh b/examples/ted_en_zh/t0/local/test.sh new file mode 100755 index 000000000..642328e88 --- /dev/null +++ b/examples/ted_en_zh/t0/local/test.sh @@ -0,0 +1,35 @@ +#! /usr/bin/env bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +for type in fullsentence; do + echo "decoding ${type}" + batch_size=32 + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +exit 0 diff --git a/examples/ted_en_zh/t0/local/train.sh b/examples/ted_en_zh/t0/local/train.sh new file mode 100755 index 000000000..f905b766e --- /dev/null +++ b/examples/ted_en_zh/t0/local/train.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_name=$2 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +echo "using ${device}..." + +mkdir -p exp + +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + +python3 -u ${BIN_DIR}/train.py \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi + +if [ $? -ne 0 ]; then + echo "Failed in training!" + exit 1 +fi + +exit 0 diff --git a/examples/ted_en_zh/t0/path.sh b/examples/ted_en_zh/t0/path.sh new file mode 100644 index 000000000..a7f60425f --- /dev/null +++ b/examples/ted_en_zh/t0/path.sh @@ -0,0 +1,14 @@ +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ + + +MODEL=u2_st +export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin diff --git a/examples/ted_en_zh/t0/run.sh b/examples/ted_en_zh/t0/run.sh new file mode 100755 index 000000000..7508f0e8a --- /dev/null +++ b/examples/ted_en_zh/t0/run.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -e +source path.sh + +stage=0 +stop_stage=100 +conf_path=conf/transformer_joint_noam.yaml +avg_num=5 +data_path=./TED-En-Zh # path to unzipped data +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +avg_ckpt=avg_${avg_num} +ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') +echo "checkpoint name ${ckpt}" + + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + bash ./local/data.sh --data_dir ${data_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `exp` dir + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 ./local/train.sh ${conf_path} ${ckpt} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # avg n best model + avg.sh best exp/${ckpt}/checkpoints ${avg_num} +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # test ckpt avg_n + CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # export ckpt avg_n + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit +fi diff --git a/examples/thchs30/README.md b/examples/thchs30/README.md new file mode 100644 index 000000000..7b3cc3d95 --- /dev/null +++ b/examples/thchs30/README.md @@ -0,0 +1,3 @@ +# thchs30 + +* a0 for mfa alignment diff --git a/examples/thchs30/a0/README.md b/examples/thchs30/a0/README.md new file mode 100644 index 000000000..da56fffc8 --- /dev/null +++ b/examples/thchs30/a0/README.md @@ -0,0 +1,42 @@ +# THCHS-30 数据集强制对齐实验 +----- +本实验对 THCHS-30 中文数据集用 [Montreal-Forced-Aligner](https://montreal-forced-aligner.readthedocs.io/en/latest/index.html) 进行强制对齐。 +THCHS-30 的文本标注数据分为: + 1. 汉字级别(word),该数据集用空格对词进行了划分,我们在使用时按照将不同字之间按空格划分 + 2. 音节级别(syllable),即汉语中的一个拼音 + 3. 音素级别(phone),一个拼音有多个音素组成,汉语的声母韵母可以理解为音素,不同的数据集有各自的音素标准,THCHS-30 数据集与标贝 BZNSYP 数据集的音素标准略有不同 + + 数据 A11_0 文本示例如下: +``` +绿 是 阳春 烟 景 大块 文章 的 底色 四月 的 林 峦 更是 绿 得 鲜活 秀媚 诗意 盎然↩ +lv4 shi4 yang2 chun1 yan1 jing3 da4 kuai4 wen2 zhang1 de5 di3 se4 si4 yue4 de5 lin2 luan2 geng4 shi4 lv4 de5 xian1 huo2 xiu4 mei4 shi1 yi4 ang4 ran2↩ +l v4 sh ix4 ii iang2 ch un1 ii ian1 j ing3 d a4 k uai4 uu un2 zh ang1 d e5 d i3 s e4 s iy4 vv ve4 d e5 l in2 l uan2 g eng4 sh ix4 l v4 d e5 x ian1 h uo2 x iu4 m ei4 sh ix1 ii i4 aa ang4 r an2 +``` +## 开始实验 +--- +在本项目的 根目录/tools 执行 +``` +make +``` +下载 MFA 的可执行包(也会同时下载本项目所需的其他工具) +执行如下命令: +``` +cd a0 +./run.sh +``` +应用程序会自动下载 THCHS-30数据集,处理成 MFA 所需的文件格式并开始训练,您可以修改 `run.sh` 中的参数 `LEXICON_NAME` 来决定您需要强制对齐的级别(word、syllable 和 phone) +## MFA 所使用的字典 +--- +MFA 字典的格式请参考: [MFA 官方文档 Dictionary format ](https://montreal-forced-aligner.readthedocs.io/en/latest/dictionary.html) +phone.lexicon 直接使用的是 `THCHS-30/data_thchs30/lm_phone/lexicon.txt` +word.lexicon 考虑到了中文的多音字,使用**带概率的字典**, 生成规则请参考 `local/gen_word2phone.py` +`syllable.lexicon` 获取自 [DNSun/thchs30-pinyin2tone](https://github.com/DNSun/thchs30-pinyin2tone) +## 对齐结果 +--- +我们提供了三种级别 MFA 训练好的对齐结果、模型和字典(`syllable.lexicon` 在 `data/dict` 中,`phone.lexicon` 和` word.lexicon` 运行数据预处理代码后会自动从原始数据集复制或生成) + +**phone 级别:** [phone.lexicon](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/phone/phone.lexicon)、 [对齐结果](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/phone/thchs30_alignment.tar.gz)、[模型](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/phone/thchs30_model.zip) +**syllabel 级别:** [syllable.lexicon](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/syllable/syllable.lexicon)、[对齐结果](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/syllable/thchs30_alignment.tar.gz)、[模型](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/syllable/thchs30_model.zip) +**word 级别:** [word.lexicon](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/word/word.lexicon)、[对齐结果](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/word/thchs30_alignment.tar.gz)、[模型](https://paddlespeech.bj.bcebos.com/MFA/THCHS30/word/thchs30_model.zip) + +随后,您可以参考 [MFA 官方文档 Align using pretrained models](https://montreal-forced-aligner.readthedocs.io/en/stable/aligning.html#align-using-pretrained-models) 使用我们给您提供好的模型直接对自己的数据集进行强制对齐,注意,您需要使用和模型对应的 lexicon 文件,当文本是汉字时,您需要用空格把不同的**汉字**(而不是词语)分开 diff --git a/examples/thchs30/a0/data/dict/syllable.lexicon b/examples/thchs30/a0/data/dict/syllable.lexicon new file mode 100644 index 000000000..e1da4e04c --- /dev/null +++ b/examples/thchs30/a0/data/dict/syllable.lexicon @@ -0,0 +1,2490 @@ +A0 aa a0 +A1 aa a1 +A2 aa a2 +A3 aa a3 +A4 aa a4 +AI0 aa ai0 +AI1 aa ai1 +AI2 aa ai2 +AI3 aa ai3 +AI4 aa ai4 +AN0 aa an0 +AN1 aa an1 +AN2 aa an2 +AN3 aa an3 +AN4 aa an4 +ANG0 aa ang0 +ANG1 aa ang1 +ANG2 aa ang2 +ANG3 aa ang3 +ANG4 aa ang4 +AO0 aa ao0 +AO1 aa ao1 +AO2 aa ao2 +AO3 aa ao3 +AO4 aa ao4 +BA0 b a0 +BA1 b a1 +BA2 b a2 +BA3 b a3 +BA4 b a4 +BAI0 b ai0 +BAI1 b ai1 +BAI2 b ai2 +BAI3 b ai3 +BAI4 b ai4 +BAN0 b an0 +BAN1 b an1 +BAN2 b an2 +BAN3 b an3 +BAN4 b an4 +BANG0 b ang0 +BANG1 b ang1 +BANG2 b ang2 +BANG3 b ang3 +BANG4 b ang4 +BAO0 b ao0 +BAO1 b ao1 +BAO2 b ao2 +BAO3 b ao3 +BAO4 b ao4 +BEI0 b ei0 +BEI1 b ei1 +BEI2 b ei2 +BEI3 b ei3 +BEI4 b ei4 +BEN0 b en0 +BEN1 b en1 +BEN2 b en2 +BEN3 b en3 +BEN4 b en4 +BENG0 b eng0 +BENG1 b eng1 +BENG2 b eng2 +BENG3 b eng3 +BENG4 b eng4 +BI0 b i0 +BI1 b i1 +BI2 b i2 +BI3 b i3 +BI4 b i4 +BIAN0 b ian0 +BIAN1 b ian1 +BIAN2 b ian2 +BIAN3 b ian3 +BIAN4 b ian4 +BIAO0 b iao0 +BIAO1 b iao1 +BIAO2 b iao2 +BIAO3 b iao3 +BIAO4 b iao4 +BIE0 b ie0 +BIE1 b ie1 +BIE2 b ie2 +BIE3 b ie3 +BIE4 b ie4 +BIN0 b in0 +BIN1 b in1 +BIN2 b in2 +BIN3 b in3 +BIN4 b in4 +BING0 b ing0 +BING1 b ing1 +BING2 b ing2 +BING3 b ing3 +BING4 b ing4 +BO0 b o0 +BO1 b o1 +BO2 b o2 +BO3 b o3 +BO4 b o4 +BU0 b u0 +BU1 b u1 +BU2 b u2 +BU3 b u3 +BU4 b u4 +CA0 c a0 +CA1 c a1 +CA2 c a2 +CA3 c a3 +CA4 c a4 +CAI0 c ai0 +CAI1 c ai1 +CAI2 c ai2 +CAI3 c ai3 +CAI4 c ai4 +CAN0 c an0 +CAN1 c an1 +CAN2 c an2 +CAN3 c an3 +CAN4 c an4 +CANG0 c ang0 +CANG1 c ang1 +CANG2 c ang2 +CANG3 c ang3 +CANG4 c ang4 +CAO0 c ao0 +CAO1 c ao1 +CAO2 c ao2 +CAO3 c ao3 +CAO4 c ao4 +CE0 c e0 +CE1 c e1 +CE2 c e2 +CE3 c e3 +CE4 c e4 +CEN0 c en0 +CEN1 c en1 +CEN2 c en2 +CEN3 c en3 +CEN4 c en4 +CENG0 c eng0 +CENG1 c eng1 +CENG2 c eng2 +CENG3 c eng3 +CENG4 c eng4 +CHA0 ch a0 +CHA1 ch a1 +CHA2 ch a2 +CHA3 ch a3 +CHA4 ch a4 +CHAI0 ch ai0 +CHAI1 ch ai1 +CHAI2 ch ai2 +CHAI3 ch ai3 +CHAI4 ch ai4 +CHAN0 ch an0 +CHAN1 ch an1 +CHAN2 ch an2 +CHAN3 ch an3 +CHAN4 ch an4 +CHANG0 ch ang0 +CHANG1 ch ang1 +CHANG2 ch ang2 +CHANG3 ch ang3 +CHANG4 ch ang4 +CHAO0 ch ao0 +CHAO1 ch ao1 +CHAO2 ch ao2 +CHAO3 ch ao3 +CHAO4 ch ao4 +CHE0 ch e0 +CHE1 ch e1 +CHE2 ch e2 +CHE3 ch e3 +CHE4 ch e4 +CHEN0 ch en0 +CHEN1 ch en1 +CHEN2 ch en2 +CHEN3 ch en3 +CHEN4 ch en4 +CHENG0 ch eng0 +CHENG1 ch eng1 +CHENG2 ch eng2 +CHENG3 ch eng3 +CHENG4 ch eng4 +CHI0 ch ix0 +CHI1 ch ix1 +CHI2 ch ix2 +CHI3 ch ix3 +CHI4 ch ix4 +CHONG0 ch ong0 +CHONG1 ch ong1 +CHONG2 ch ong2 +CHONG3 ch ong3 +CHONG4 ch ong4 +CHOU0 ch ou0 +CHOU1 ch ou1 +CHOU2 ch ou2 +CHOU3 ch ou3 +CHOU4 ch ou4 +CHU0 ch u0 +CHU1 ch u1 +CHU2 ch u2 +CHU3 ch u3 +CHU4 ch u4 +CHUAI0 ch uai0 +CHUAI1 ch uai1 +CHUAI2 ch uai2 +CHUAI3 ch uai3 +CHUAI4 ch uai4 +CHUAN0 ch uan0 +CHUAN1 ch uan1 +CHUAN2 ch uan2 +CHUAN3 ch uan3 +CHUAN4 ch uan4 +CHUANG0 ch uang0 +CHUANG1 ch uang1 +CHUANG2 ch uang2 +CHUANG3 ch uang3 +CHUANG4 ch uang4 +CHUI0 ch ui0 +CHUI1 ch ui1 +CHUI2 ch ui2 +CHUI3 ch ui3 +CHUI4 ch ui4 +CHUN0 ch un0 +CHUN1 ch un1 +CHUN2 ch un2 +CHUN3 ch un3 +CHUN4 ch un4 +CHUO0 ch uo0 +CHUO1 ch uo1 +CHUO2 ch uo2 +CHUO3 ch uo3 +CHUO4 ch uo4 +CI0 c iy0 +CI1 c iy1 +CI2 c iy2 +CI3 c iy3 +CI4 c iy4 +CONG0 c ong0 +CONG1 c ong1 +CONG2 c ong2 +CONG3 c ong3 +CONG4 c ong4 +COU0 c ou0 +COU1 c ou1 +COU2 c ou2 +COU3 c ou3 +COU4 c ou4 +CU0 c u0 +CU1 c u1 +CU2 c u2 +CU3 c u3 +CU4 c u4 +CUAN0 c uan0 +CUAN1 c uan1 +CUAN2 c uan2 +CUAN3 c uan3 +CUAN4 c uan4 +CUI0 c ui0 +CUI1 c ui1 +CUI2 c ui2 +CUI3 c ui3 +CUI4 c ui4 +CUN0 c un0 +CUN1 c un1 +CUN2 c un2 +CUN3 c un3 +CUN4 c un4 +CUO0 c uo0 +CUO1 c uo1 +CUO2 c uo2 +CUO3 c uo3 +CUO4 c uo4 +DA0 d a0 +DA1 d a1 +DA2 d a2 +DA3 d a3 +DA4 d a4 +DAI0 d ai0 +DAI1 d ai1 +DAI2 d ai2 +DAI3 d ai3 +DAI4 d ai4 +DAN0 d an0 +DAN1 d an1 +DAN2 d an2 +DAN3 d an3 +DAN4 d an4 +DANG0 d ang0 +DANG1 d ang1 +DANG2 d ang2 +DANG3 d ang3 +DANG4 d ang4 +DAO0 d ao0 +DAO1 d ao1 +DAO2 d ao2 +DAO3 d ao3 +DAO4 d ao4 +DE0 d e0 +DE1 d e1 +DE2 d e2 +DE3 d e3 +DE4 d e4 +DEI0 d ei0 +DEI1 d ei1 +DEI2 d ei2 +DEI3 d ei3 +DEI4 d ei4 +DEN0 d en0 +DEN1 d en1 +DEN2 d en2 +DEN3 d en3 +DEN4 d en4 +DENG0 d eng0 +DENG1 d eng1 +DENG2 d eng2 +DENG3 d eng3 +DENG4 d eng4 +DI0 d i0 +DI1 d i1 +DI2 d i2 +DI3 d i3 +DI4 d i4 +DIA0 d ia0 +DIA1 d ia1 +DIA2 d ia2 +DIA3 d ia3 +DIA4 d ia4 +DIAN0 d ian0 +DIAN1 d ian1 +DIAN2 d ian2 +DIAN3 d ian3 +DIAN4 d ian4 +DIAO0 d iao0 +DIAO1 d iao1 +DIAO2 d iao2 +DIAO3 d iao3 +DIAO4 d iao4 +DIE0 d ie0 +DIE1 d ie1 +DIE2 d ie2 +DIE3 d ie3 +DIE4 d ie4 +DING0 d ing0 +DING1 d ing1 +DING2 d ing2 +DING3 d ing3 +DING4 d ing4 +DIU0 d iu0 +DIU1 d iu1 +DIU2 d iu2 +DIU3 d iu3 +DIU4 d iu4 +DONG0 d ong0 +DONG1 d ong1 +DONG2 d ong2 +DONG3 d ong3 +DONG4 d ong4 +DOU0 d ou0 +DOU1 d ou1 +DOU2 d ou2 +DOU3 d ou3 +DOU4 d ou4 +DU0 d u0 +DU1 d u1 +DU2 d u2 +DU3 d u3 +DU4 d u4 +DUAN0 d uan0 +DUAN1 d uan1 +DUAN2 d uan2 +DUAN3 d uan3 +DUAN4 d uan4 +DUI0 d ui0 +DUI1 d ui1 +DUI2 d ui2 +DUI3 d ui3 +DUI4 d ui4 +DUN0 d un0 +DUN1 d un1 +DUN2 d un2 +DUN3 d un3 +DUN4 d un4 +DUO0 d uo0 +DUO1 d uo1 +DUO2 d uo2 +DUO3 d uo3 +DUO4 d uo4 +E0 ee e0 +E1 ee e1 +E2 ee e2 +E3 ee e3 +E4 ee e4 +EN0 ee en0 +EN1 ee en1 +EN2 ee en2 +EN3 ee en3 +EN4 ee en4 +ER0 ee er0 +ER1 ee er1 +ER2 ee er2 +ER3 ee er3 +ER4 ee er4 +FA0 f a0 +FA1 f a1 +FA2 f a2 +FA3 f a3 +FA4 f a4 +FAN0 f an0 +FAN1 f an1 +FAN2 f an2 +FAN3 f an3 +FAN4 f an4 +FANG0 f ang0 +FANG1 f ang1 +FANG2 f ang2 +FANG3 f ang3 +FANG4 f ang4 +FEI0 f ei0 +FEI1 f ei1 +FEI2 f ei2 +FEI3 f ei3 +FEI4 f ei4 +FEN0 f en0 +FEN1 f en1 +FEN2 f en2 +FEN3 f en3 +FEN4 f en4 +FENG0 f eng0 +FENG1 f eng1 +FENG2 f eng2 +FENG3 f eng3 +FENG4 f eng4 +FO0 f o0 +FO1 f o1 +FO2 f o2 +FO3 f o3 +FO4 f o4 +FOU0 f ou0 +FOU1 f ou1 +FOU2 f ou2 +FOU3 f ou3 +FOU4 f ou4 +FU0 f u0 +FU1 f u1 +FU2 f u2 +FU3 f u3 +FU4 f u4 +GA0 g a0 +GA1 g a1 +GA2 g a2 +GA3 g a3 +GA4 g a4 +GAI0 g ai0 +GAI1 g ai1 +GAI2 g ai2 +GAI3 g ai3 +GAI4 g ai4 +GAN0 g an0 +GAN1 g an1 +GAN2 g an2 +GAN3 g an3 +GAN4 g an4 +GANG0 g ang0 +GANG1 g ang1 +GANG2 g ang2 +GANG3 g ang3 +GANG4 g ang4 +GAO0 g ao0 +GAO1 g ao1 +GAO2 g ao2 +GAO3 g ao3 +GAO4 g ao4 +GE0 g e0 +GE1 g e1 +GE2 g e2 +GE3 g e3 +GE4 g e4 +GEI0 g ei0 +GEI1 g ei1 +GEI2 g ei2 +GEI3 g ei3 +GEI4 g ei4 +GEN0 g en0 +GEN1 g en1 +GEN2 g en2 +GEN3 g en3 +GEN4 g en4 +GENG0 g eng0 +GENG1 g eng1 +GENG2 g eng2 +GENG3 g eng3 +GENG4 g eng4 +GONG0 g ong0 +GONG1 g ong1 +GONG2 g ong2 +GONG3 g ong3 +GONG4 g ong4 +GOU0 g ou0 +GOU1 g ou1 +GOU2 g ou2 +GOU3 g ou3 +GOU4 g ou4 +GU0 g u0 +GU1 g u1 +GU2 g u2 +GU3 g u3 +GU4 g u4 +GUA0 g ua0 +GUA1 g ua1 +GUA2 g ua2 +GUA3 g ua3 +GUA4 g ua4 +GUAI0 g uai0 +GUAI1 g uai1 +GUAI2 g uai2 +GUAI3 g uai3 +GUAI4 g uai4 +GUAN0 g uan0 +GUAN1 g uan1 +GUAN2 g uan2 +GUAN3 g uan3 +GUAN4 g uan4 +GUANG0 g uang0 +GUANG1 g uang1 +GUANG2 g uang2 +GUANG3 g uang3 +GUANG4 g uang4 +GUI0 g ui0 +GUI1 g ui1 +GUI2 g ui2 +GUI3 g ui3 +GUI4 g ui4 +GUN0 g un0 +GUN1 g un1 +GUN2 g un2 +GUN3 g un3 +GUN4 g un4 +GUO0 g uo0 +GUO1 g uo1 +GUO2 g uo2 +GUO3 g uo3 +GUO4 g uo4 +HA0 h a0 +HA1 h a1 +HA2 h a2 +HA3 h a3 +HA4 h a4 +HAI0 h ai0 +HAI1 h ai1 +HAI2 h ai2 +HAI3 h ai3 +HAI4 h ai4 +HAN0 h an0 +HAN1 h an1 +HAN2 h an2 +HAN3 h an3 +HAN4 h an4 +HANG0 h ang0 +HANG1 h ang1 +HANG2 h ang2 +HANG3 h ang3 +HANG4 h ang4 +HAO0 h ao0 +HAO1 h ao1 +HAO2 h ao2 +HAO3 h ao3 +HAO4 h ao4 +HE0 h e0 +HE1 h e1 +HE2 h e2 +HE3 h e3 +HE4 h e4 +HEI0 h ei0 +HEI1 h ei1 +HEI2 h ei2 +HEI3 h ei3 +HEI4 h ei4 +HEN0 h en0 +HEN1 h en1 +HEN2 h en2 +HEN3 h en3 +HEN4 h en4 +HENG0 h eng0 +HENG1 h eng1 +HENG2 h eng2 +HENG3 h eng3 +HENG4 h eng4 +HONG0 h ong0 +HONG1 h ong1 +HONG2 h ong2 +HONG3 h ong3 +HONG4 h ong4 +HOU0 h ou0 +HOU1 h ou1 +HOU2 h ou2 +HOU3 h ou3 +HOU4 h ou4 +HU0 h u0 +HU1 h u1 +HU2 h u2 +HU3 h u3 +HU4 h u4 +HUA0 h ua0 +HUA1 h ua1 +HUA2 h ua2 +HUA3 h ua3 +HUA4 h ua4 +HUAI0 h uai0 +HUAI1 h uai1 +HUAI2 h uai2 +HUAI3 h uai3 +HUAI4 h uai4 +HUAN0 h uan0 +HUAN1 h uan1 +HUAN2 h uan2 +HUAN3 h uan3 +HUAN4 h uan4 +HUANG0 h uang0 +HUANG1 h uang1 +HUANG2 h uang2 +HUANG3 h uang3 +HUANG4 h uang4 +HUI0 h ui0 +HUI1 h ui1 +HUI2 h ui2 +HUI3 h ui3 +HUI4 h ui4 +HUN0 h un0 +HUN1 h un1 +HUN2 h un2 +HUN3 h un3 +HUN4 h un4 +HUO0 h uo0 +HUO1 h uo1 +HUO2 h uo2 +HUO3 h uo3 +HUO4 h uo4 +JI0 j i0 +JI1 j i1 +JI2 j i2 +JI3 j i3 +JI4 j i4 +JIA0 j ia0 +JIA1 j ia1 +JIA2 j ia2 +JIA3 j ia3 +JIA4 j ia4 +JIAN0 j ian0 +JIAN1 j ian1 +JIAN2 j ian2 +JIAN3 j ian3 +JIAN4 j ian4 +JIANG0 j iang0 +JIANG1 j iang1 +JIANG2 j iang2 +JIANG3 j iang3 +JIANG4 j iang4 +JIAO0 j iao0 +JIAO1 j iao1 +JIAO2 j iao2 +JIAO3 j iao3 +JIAO4 j iao4 +JIE0 j ie0 +JIE1 j ie1 +JIE2 j ie2 +JIE3 j ie3 +JIE4 j ie4 +JIN0 j in0 +JIN1 j in1 +JIN2 j in2 +JIN3 j in3 +JIN4 j in4 +JING0 j ing0 +JING1 j ing1 +JING2 j ing2 +JING3 j ing3 +JING4 j ing4 +JIONG0 j iong0 +JIONG1 j iong1 +JIONG2 j iong2 +JIONG3 j iong3 +JIONG4 j iong4 +JIU0 j iu0 +JIU1 j iu1 +JIU2 j iu2 +JIU3 j iu3 +JIU4 j iu4 +JU0 j v0 +JU1 j v1 +JU2 j v2 +JU3 j v3 +JU4 j v4 +JUAN0 j van0 +JUAN1 j van1 +JUAN2 j van2 +JUAN3 j van3 +JUAN4 j van4 +JUE0 j ve0 +JUE1 j ve1 +JUE2 j ve2 +JUE3 j ve3 +JUE4 j ve4 +JUN0 j vn0 +JUN1 j vn1 +JUN2 j vn2 +JUN3 j vn3 +JUN4 j vn4 +KA0 k a0 +KA1 k a1 +KA2 k a2 +KA3 k a3 +KA4 k a4 +KAI0 k ai0 +KAI1 k ai1 +KAI2 k ai2 +KAI3 k ai3 +KAI4 k ai4 +KAN0 k an0 +KAN1 k an1 +KAN2 k an2 +KAN3 k an3 +KAN4 k an4 +KANG0 k ang0 +KANG1 k ang1 +KANG2 k ang2 +KANG3 k ang3 +KANG4 k ang4 +KAO0 k ao0 +KAO1 k ao1 +KAO2 k ao2 +KAO3 k ao3 +KAO4 k ao4 +KE0 k e0 +KE1 k e1 +KE2 k e2 +KE3 k e3 +KE4 k e4 +KEI0 k ei0 +KEI1 k ei1 +KEI2 k ei2 +KEI3 k ei3 +KEI4 k ei4 +KEN0 k en0 +KEN1 k en1 +KEN2 k en2 +KEN3 k en3 +KEN4 k en4 +KENG0 k eng0 +KENG1 k eng1 +KENG2 k eng2 +KENG3 k eng3 +KENG4 k eng4 +KONG0 k ong0 +KONG1 k ong1 +KONG2 k ong2 +KONG3 k ong3 +KONG4 k ong4 +KOU0 k ou0 +KOU1 k ou1 +KOU2 k ou2 +KOU3 k ou3 +KOU4 k ou4 +KU0 k u0 +KU1 k u1 +KU2 k u2 +KU3 k u3 +KU4 k u4 +KUA0 k ua0 +KUA1 k ua1 +KUA2 k ua2 +KUA3 k ua3 +KUA4 k ua4 +KUAI0 k uai0 +KUAI1 k uai1 +KUAI2 k uai2 +KUAI3 k uai3 +KUAI4 k uai4 +KUAN0 k uan0 +KUAN1 k uan1 +KUAN2 k uan2 +KUAN3 k uan3 +KUAN4 k uan4 +KUANG0 k uang0 +KUANG1 k uang1 +KUANG2 k uang2 +KUANG3 k uang3 +KUANG4 k uang4 +KUI0 k ui0 +KUI1 k ui1 +KUI2 k ui2 +KUI3 k ui3 +KUI4 k ui4 +KUN0 k un0 +KUN1 k un1 +KUN2 k un2 +KUN3 k un3 +KUN4 k un4 +KUO0 k uo0 +KUO1 k uo1 +KUO2 k uo2 +KUO3 k uo3 +KUO4 k uo4 +LA0 l a0 +LA1 l a1 +LA2 l a2 +LA3 l a3 +LA4 l a4 +LAI0 l ai0 +LAI1 l ai1 +LAI2 l ai2 +LAI3 l ai3 +LAI4 l ai4 +LAN0 l an0 +LAN1 l an1 +LAN2 l an2 +LAN3 l an3 +LAN4 l an4 +LANG0 l ang0 +LANG1 l ang1 +LANG2 l ang2 +LANG3 l ang3 +LANG4 l ang4 +LAO0 l ao0 +LAO1 l ao1 +LAO2 l ao2 +LAO3 l ao3 +LAO4 l ao4 +LE0 l e0 +LE1 l e1 +LE2 l e2 +LE3 l e3 +LE4 l e4 +LEI0 l ei0 +LEI1 l ei1 +LEI2 l ei2 +LEI3 l ei3 +LEI4 l ei4 +LENG0 l eng0 +LENG1 l eng1 +LENG2 l eng2 +LENG3 l eng3 +LENG4 l eng4 +LI0 l i0 +LI1 l i1 +LI2 l i2 +LI3 l i3 +LI4 l i4 +LIA0 l ia0 +LIA1 l ia1 +LIA2 l ia2 +LIA3 l ia3 +LIA4 l ia4 +LIAN0 l ian0 +LIAN1 l ian1 +LIAN2 l ian2 +LIAN3 l ian3 +LIAN4 l ian4 +LIANG0 l iang0 +LIANG1 l iang1 +LIANG2 l iang2 +LIANG3 l iang3 +LIANG4 l iang4 +LIAO0 l iao0 +LIAO1 l iao1 +LIAO2 l iao2 +LIAO3 l iao3 +LIAO4 l iao4 +LIE0 l ie0 +LIE1 l ie1 +LIE2 l ie2 +LIE3 l ie3 +LIE4 l ie4 +LIN0 l in0 +LIN1 l in1 +LIN2 l in2 +LIN3 l in3 +LIN4 l in4 +LING0 l ing0 +LING1 l ing1 +LING2 l ing2 +LING3 l ing3 +LING4 l ing4 +LIU0 l iu0 +LIU1 l iu1 +LIU2 l iu2 +LIU3 l iu3 +LIU4 l iu4 +LONG0 l ong0 +LONG1 l ong1 +LONG2 l ong2 +LONG3 l ong3 +LONG4 l ong4 +LOU0 l ou0 +LOU1 l ou1 +LOU2 l ou2 +LOU3 l ou3 +LOU4 l ou4 +LU0 l u0 +LU1 l u1 +LU2 l u2 +LU3 l u3 +LU4 l u4 +LUAN0 l uan0 +LUAN1 l uan1 +LUAN2 l uan2 +LUAN3 l uan3 +LUAN4 l uan4 +LUE0 l ve0 +LUE1 l ve1 +LUE2 l ve2 +LUE3 l ve3 +LUE4 l ve4 +LVE0 l ve0 +LVE1 l ve1 +LVE2 l ve2 +LVE3 l ve3 +LVE4 l ve4 +LUN0 l un0 +LUN1 l un1 +LUN2 l un2 +LUN3 l un3 +LUN4 l un4 +LUO0 l uo0 +LUO1 l uo1 +LUO2 l uo2 +LUO3 l uo3 +LUO4 l uo4 +LV0 l v0 +LV1 l v1 +LV2 l v2 +LV3 l v3 +LV4 l v4 +MA0 m a0 +MA1 m a1 +MA2 m a2 +MA3 m a3 +MA4 m a4 +MAI0 m ai0 +MAI1 m ai1 +MAI2 m ai2 +MAI3 m ai3 +MAI4 m ai4 +MAN0 m an0 +MAN1 m an1 +MAN2 m an2 +MAN3 m an3 +MAN4 m an4 +MANG0 m ang0 +MANG1 m ang1 +MANG2 m ang2 +MANG3 m ang3 +MANG4 m ang4 +MAO0 m ao0 +MAO1 m ao1 +MAO2 m ao2 +MAO3 m ao3 +MAO4 m ao4 +ME0 m e0 +ME1 m e1 +ME2 m e2 +ME3 m e3 +ME4 m e4 +MEI0 m ei0 +MEI1 m ei1 +MEI2 m ei2 +MEI3 m ei3 +MEI4 m ei4 +MEN0 m en0 +MEN1 m en1 +MEN2 m en2 +MEN3 m en3 +MEN4 m en4 +MENG0 m eng0 +MENG1 m eng1 +MENG2 m eng2 +MENG3 m eng3 +MENG4 m eng4 +MI0 m i0 +MI1 m i1 +MI2 m i2 +MI3 m i3 +MI4 m i4 +MIAN0 m ian0 +MIAN1 m ian1 +MIAN2 m ian2 +MIAN3 m ian3 +MIAN4 m ian4 +MIAO0 m iao0 +MIAO1 m iao1 +MIAO2 m iao2 +MIAO3 m iao3 +MIAO4 m iao4 +MIE0 m ie0 +MIE1 m ie1 +MIE2 m ie2 +MIE3 m ie3 +MIE4 m ie4 +MIN0 m in0 +MIN1 m in1 +MIN2 m in2 +MIN3 m in3 +MIN4 m in4 +MING0 m ing0 +MING1 m ing1 +MING2 m ing2 +MING3 m ing3 +MING4 m ing4 +MIU0 m iu0 +MIU1 m iu1 +MIU2 m iu2 +MIU3 m iu3 +MIU4 m iu4 +MO0 m o0 +MO1 m o1 +MO2 m o2 +MO3 m o3 +MO4 m o4 +MOU0 m ou0 +MOU1 m ou1 +MOU2 m ou2 +MOU3 m ou3 +MOU4 m ou4 +MU0 m u0 +MU1 m u1 +MU2 m u2 +MU3 m u3 +MU4 m u4 +NA0 n a0 +NA1 n a1 +NA2 n a2 +NA3 n a3 +NA4 n a4 +NAI0 n ai0 +NAI1 n ai1 +NAI2 n ai2 +NAI3 n ai3 +NAI4 n ai4 +NAN0 n an0 +NAN1 n an1 +NAN2 n an2 +NAN3 n an3 +NAN4 n an4 +NANG0 n ang0 +NANG1 n ang1 +NANG2 n ang2 +NANG3 n ang3 +NANG4 n ang4 +NAO0 n ao0 +NAO1 n ao1 +NAO2 n ao2 +NAO3 n ao3 +NAO4 n ao4 +NE0 n e0 +NE1 n e1 +NE2 n e2 +NE3 n e3 +NE4 n e4 +NEI0 n ei0 +NEI1 n ei1 +NEI2 n ei2 +NEI3 n ei3 +NEI4 n ei4 +NEN0 n en0 +NEN1 n en1 +NEN2 n en2 +NEN3 n en3 +NEN4 n en4 +NENG0 n eng0 +NENG1 n eng1 +NENG2 n eng2 +NENG3 n eng3 +NENG4 n eng4 +NI0 n i0 +NI1 n i1 +NI2 n i2 +NI3 n i3 +NI4 n i4 +NIAN0 n ian0 +NIAN1 n ian1 +NIAN2 n ian2 +NIAN3 n ian3 +NIAN4 n ian4 +NIANG0 n iang0 +NIANG1 n iang1 +NIANG2 n iang2 +NIANG3 n iang3 +NIANG4 n iang4 +NIAO0 n iao0 +NIAO1 n iao1 +NIAO2 n iao2 +NIAO3 n iao3 +NIAO4 n iao4 +NIE0 n ie0 +NIE1 n ie1 +NIE2 n ie2 +NIE3 n ie3 +NIE4 n ie4 +NIN0 n in0 +NIN1 n in1 +NIN2 n in2 +NIN3 n in3 +NIN4 n in4 +NING0 n ing0 +NING1 n ing1 +NING2 n ing2 +NING3 n ing3 +NING4 n ing4 +NIU0 n iu0 +NIU1 n iu1 +NIU2 n iu2 +NIU3 n iu3 +NIU4 n iu4 +NONG0 n ong0 +NONG1 n ong1 +NONG2 n ong2 +NONG3 n ong3 +NONG4 n ong4 +NU0 n u0 +NU1 n u1 +NU2 n u2 +NU3 n u3 +NU4 n u4 +NUAN0 n uan0 +NUAN1 n uan1 +NUAN2 n uan2 +NUAN3 n uan3 +NUAN4 n uan4 +NUE0 n ve0 +NUE1 n ve1 +NUE2 n ve2 +NUE3 n ve3 +NUE4 n ve4 +NVE0 n ve0 +NVE1 n ve1 +NVE2 n ve2 +NVE3 n ve3 +NVE4 n ve4 +NUO0 n uo0 +NUO1 n uo1 +NUO2 n uo2 +NUO3 n uo3 +NUO4 n uo4 +NV0 n v0 +NV1 n v1 +NV2 n v2 +NV3 n v3 +NV4 n v4 +O0 oo o0 +O1 oo o1 +O2 oo o2 +O3 oo o3 +O4 oo o4 +OU0 oo ou0 +OU1 oo ou1 +OU2 oo ou2 +OU3 oo ou3 +OU4 oo ou4 +PA0 p a0 +PA1 p a1 +PA2 p a2 +PA3 p a3 +PA4 p a4 +PAI0 p ai0 +PAI1 p ai1 +PAI2 p ai2 +PAI3 p ai3 +PAI4 p ai4 +PAN0 p an0 +PAN1 p an1 +PAN2 p an2 +PAN3 p an3 +PAN4 p an4 +PANG0 p ang0 +PANG1 p ang1 +PANG2 p ang2 +PANG3 p ang3 +PANG4 p ang4 +PAO0 p ao0 +PAO1 p ao1 +PAO2 p ao2 +PAO3 p ao3 +PAO4 p ao4 +PEI0 p ei0 +PEI1 p ei1 +PEI2 p ei2 +PEI3 p ei3 +PEI4 p ei4 +PEN0 p en0 +PEN1 p en1 +PEN2 p en2 +PEN3 p en3 +PEN4 p en4 +PENG0 p eng0 +PENG1 p eng1 +PENG2 p eng2 +PENG3 p eng3 +PENG4 p eng4 +PI0 p i0 +PI1 p i1 +PI2 p i2 +PI3 p i3 +PI4 p i4 +PIAN0 p ian0 +PIAN1 p ian1 +PIAN2 p ian2 +PIAN3 p ian3 +PIAN4 p ian4 +PIAO0 p iao0 +PIAO1 p iao1 +PIAO2 p iao2 +PIAO3 p iao3 +PIAO4 p iao4 +PIE0 p ie0 +PIE1 p ie1 +PIE2 p ie2 +PIE3 p ie3 +PIE4 p ie4 +PIN0 p in0 +PIN1 p in1 +PIN2 p in2 +PIN3 p in3 +PIN4 p in4 +PING0 p ing0 +PING1 p ing1 +PING2 p ing2 +PING3 p ing3 +PING4 p ing4 +PO0 p o0 +PO1 p o1 +PO2 p o2 +PO3 p o3 +PO4 p o4 +POU0 p ou0 +POU1 p ou1 +POU2 p ou2 +POU3 p ou3 +POU4 p ou4 +PU0 p u0 +PU1 p u1 +PU2 p u2 +PU3 p u3 +PU4 p u4 +QI0 q i0 +QI1 q i1 +QI2 q i2 +QI3 q i3 +QI4 q i4 +QIA0 q ia0 +QIA1 q ia1 +QIA2 q ia2 +QIA3 q ia3 +QIA4 q ia4 +QIAN0 q ian0 +QIAN1 q ian1 +QIAN2 q ian2 +QIAN3 q ian3 +QIAN4 q ian4 +QIANG0 q iang0 +QIANG1 q iang1 +QIANG2 q iang2 +QIANG3 q iang3 +QIANG4 q iang4 +QIAO0 q iao0 +QIAO1 q iao1 +QIAO2 q iao2 +QIAO3 q iao3 +QIAO4 q iao4 +QIE0 q ie0 +QIE1 q ie1 +QIE2 q ie2 +QIE3 q ie3 +QIE4 q ie4 +QIN0 q in0 +QIN1 q in1 +QIN2 q in2 +QIN3 q in3 +QIN4 q in4 +QING0 q ing0 +QING1 q ing1 +QING2 q ing2 +QING3 q ing3 +QING4 q ing4 +QIONG0 q iong0 +QIONG1 q iong1 +QIONG2 q iong2 +QIONG3 q iong3 +QIONG4 q iong4 +QIU0 q iu0 +QIU1 q iu1 +QIU2 q iu2 +QIU3 q iu3 +QIU4 q iu4 +QU0 q v0 +QU1 q v1 +QU2 q v2 +QU3 q v3 +QU4 q v4 +QUAN0 q van0 +QUAN1 q van1 +QUAN2 q van2 +QUAN3 q van3 +QUAN4 q van4 +QUE0 q ve0 +QUE1 q ve1 +QUE2 q ve2 +QUE3 q ve3 +QUE4 q ve4 +QUN0 q vn0 +QUN1 q vn1 +QUN2 q vn2 +QUN3 q vn3 +QUN4 q vn4 +RAN0 r an0 +RAN1 r an1 +RAN2 r an2 +RAN3 r an3 +RAN4 r an4 +RANG0 r ang0 +RANG1 r ang1 +RANG2 r ang2 +RANG3 r ang3 +RANG4 r ang4 +RAO0 r ao0 +RAO1 r ao1 +RAO2 r ao2 +RAO3 r ao3 +RAO4 r ao4 +RE0 r e0 +RE1 r e1 +RE2 r e2 +RE3 r e3 +RE4 r e4 +REN0 r en0 +REN1 r en1 +REN2 r en2 +REN3 r en3 +REN4 r en4 +RENG0 r eng0 +RENG1 r eng1 +RENG2 r eng2 +RENG3 r eng3 +RENG4 r eng4 +RI0 r iz0 +RI1 r iz1 +RI2 r iz2 +RI3 r iz3 +RI4 r iz4 +RONG0 r ong0 +RONG1 r ong1 +RONG2 r ong2 +RONG3 r ong3 +RONG4 r ong4 +ROU0 r ou0 +ROU1 r ou1 +ROU2 r ou2 +ROU3 r ou3 +ROU4 r ou4 +RU0 r u0 +RU1 r u1 +RU2 r u2 +RU3 r u3 +RU4 r u4 +RUAN0 r uan0 +RUAN1 r uan1 +RUAN2 r uan2 +RUAN3 r uan3 +RUAN4 r uan4 +RUI0 r ui0 +RUI1 r ui1 +RUI2 r ui2 +RUI3 r ui3 +RUI4 r ui4 +RUN0 r un0 +RUN1 r un1 +RUN2 r un2 +RUN3 r un3 +RUN4 r un4 +RUO0 r uo0 +RUO1 r uo1 +RUO2 r uo2 +RUO3 r uo3 +RUO4 r uo4 +SA0 s a0 +SA1 s a1 +SA2 s a2 +SA3 s a3 +SA4 s a4 +SAI0 s ai0 +SAI1 s ai1 +SAI2 s ai2 +SAI3 s ai3 +SAI4 s ai4 +SAN0 s an0 +SAN1 s an1 +SAN2 s an2 +SAN3 s an3 +SAN4 s an4 +SANG0 s ang0 +SANG1 s ang1 +SANG2 s ang2 +SANG3 s ang3 +SANG4 s ang4 +SAO0 s ao0 +SAO1 s ao1 +SAO2 s ao2 +SAO3 s ao3 +SAO4 s ao4 +SE0 s e0 +SE1 s e1 +SE2 s e2 +SE3 s e3 +SE4 s e4 +SEN0 s en0 +SEN1 s en1 +SEN2 s en2 +SEN3 s en3 +SEN4 s en4 +SENG0 s eng0 +SENG1 s eng1 +SENG2 s eng2 +SENG3 s eng3 +SENG4 s eng4 +SHA0 sh a0 +SHA1 sh a1 +SHA2 sh a2 +SHA3 sh a3 +SHA4 sh a4 +SHAI0 sh ai0 +SHAI1 sh ai1 +SHAI2 sh ai2 +SHAI3 sh ai3 +SHAI4 sh ai4 +SHAN0 sh an0 +SHAN1 sh an1 +SHAN2 sh an2 +SHAN3 sh an3 +SHAN4 sh an4 +SHANG0 sh ang0 +SHANG1 sh ang1 +SHANG2 sh ang2 +SHANG3 sh ang3 +SHANG4 sh ang4 +SHAO0 sh ao0 +SHAO1 sh ao1 +SHAO2 sh ao2 +SHAO3 sh ao3 +SHAO4 sh ao4 +SHE0 sh e0 +SHE1 sh e1 +SHE2 sh e2 +SHE3 sh e3 +SHE4 sh e4 +SHEI0 sh ei0 +SHEI1 sh ei1 +SHEI2 sh ei2 +SHEI3 sh ei3 +SHEI4 sh ei4 +SHEN0 sh en0 +SHEN1 sh en1 +SHEN2 sh en2 +SHEN3 sh en3 +SHEN4 sh en4 +SHENG0 sh eng0 +SHENG1 sh eng1 +SHENG2 sh eng2 +SHENG3 sh eng3 +SHENG4 sh eng4 +SHI0 sh ix0 +SHI1 sh ix1 +SHI2 sh ix2 +SHI3 sh ix3 +SHI4 sh ix4 +SHOU0 sh ou0 +SHOU1 sh ou1 +SHOU2 sh ou2 +SHOU3 sh ou3 +SHOU4 sh ou4 +SHU0 sh u0 +SHU1 sh u1 +SHU2 sh u2 +SHU3 sh u3 +SHU4 sh u4 +SHUA0 sh ua0 +SHUA1 sh ua1 +SHUA2 sh ua2 +SHUA3 sh ua3 +SHUA4 sh ua4 +SHUAI0 sh uai0 +SHUAI1 sh uai1 +SHUAI2 sh uai2 +SHUAI3 sh uai3 +SHUAI4 sh uai4 +SHUAN0 sh uan0 +SHUAN1 sh uan1 +SHUAN2 sh uan2 +SHUAN3 sh uan3 +SHUAN4 sh uan4 +SHUANG0 sh uang0 +SHUANG1 sh uang1 +SHUANG2 sh uang2 +SHUANG3 sh uang3 +SHUANG4 sh uang4 +SHUI0 sh ui0 +SHUI1 sh ui1 +SHUI2 sh ui2 +SHUI3 sh ui3 +SHUI4 sh ui4 +SHUN0 sh un0 +SHUN1 sh un1 +SHUN2 sh un2 +SHUN3 sh un3 +SHUN4 sh un4 +SHUO0 sh uo0 +SHUO1 sh uo1 +SHUO2 sh uo2 +SHUO3 sh uo3 +SHUO4 sh uo4 +SI0 s iy0 +SI1 s iy1 +SI2 s iy2 +SI3 s iy3 +SI4 s iy4 +SONG0 s ong0 +SONG1 s ong1 +SONG2 s ong2 +SONG3 s ong3 +SONG4 s ong4 +SOU0 s ou0 +SOU1 s ou1 +SOU2 s ou2 +SOU3 s ou3 +SOU4 s ou4 +SU0 s u0 +SU1 s u1 +SU2 s u2 +SU3 s u3 +SU4 s u4 +SUAN0 s uan0 +SUAN1 s uan1 +SUAN2 s uan2 +SUAN3 s uan3 +SUAN4 s uan4 +SUI0 s ui0 +SUI1 s ui1 +SUI2 s ui2 +SUI3 s ui3 +SUI4 s ui4 +SUN0 s un0 +SUN1 s un1 +SUN2 s un2 +SUN3 s un3 +SUN4 s un4 +SUO0 s uo0 +SUO1 s uo1 +SUO2 s uo2 +SUO3 s uo3 +SUO4 s uo4 +TA0 t a0 +TA1 t a1 +TA2 t a2 +TA3 t a3 +TA4 t a4 +TAI0 t ai0 +TAI1 t ai1 +TAI2 t ai2 +TAI3 t ai3 +TAI4 t ai4 +TAN0 t an0 +TAN1 t an1 +TAN2 t an2 +TAN3 t an3 +TAN4 t an4 +TANG0 t ang0 +TANG1 t ang1 +TANG2 t ang2 +TANG3 t ang3 +TANG4 t ang4 +TAO0 t ao0 +TAO1 t ao1 +TAO2 t ao2 +TAO3 t ao3 +TAO4 t ao4 +TE0 t e0 +TE1 t e1 +TE2 t e2 +TE3 t e3 +TE4 t e4 +TENG0 t eng0 +TENG1 t eng1 +TENG2 t eng2 +TENG3 t eng3 +TENG4 t eng4 +TI0 t i0 +TI1 t i1 +TI2 t i2 +TI3 t i3 +TI4 t i4 +TIAN0 t ian0 +TIAN1 t ian1 +TIAN2 t ian2 +TIAN3 t ian3 +TIAN4 t ian4 +TIAO0 t iao0 +TIAO1 t iao1 +TIAO2 t iao2 +TIAO3 t iao3 +TIAO4 t iao4 +TIE0 t ie0 +TIE1 t ie1 +TIE2 t ie2 +TIE3 t ie3 +TIE4 t ie4 +TING0 t ing0 +TING1 t ing1 +TING2 t ing2 +TING3 t ing3 +TING4 t ing4 +TONG0 t ong0 +TONG1 t ong1 +TONG2 t ong2 +TONG3 t ong3 +TONG4 t ong4 +TOU0 t ou0 +TOU1 t ou1 +TOU2 t ou2 +TOU3 t ou3 +TOU4 t ou4 +TU0 t u0 +TU1 t u1 +TU2 t u2 +TU3 t u3 +TU4 t u4 +TUAN0 t uan0 +TUAN1 t uan1 +TUAN2 t uan2 +TUAN3 t uan3 +TUAN4 t uan4 +TUI0 t ui0 +TUI1 t ui1 +TUI2 t ui2 +TUI3 t ui3 +TUI4 t ui4 +TUN0 t un0 +TUN1 t un1 +TUN2 t un2 +TUN3 t un3 +TUN4 t un4 +TUO0 t uo0 +TUO1 t uo1 +TUO2 t uo2 +TUO3 t uo3 +TUO4 t uo4 +WA0 uu ua0 +WA1 uu ua1 +WA2 uu ua2 +WA3 uu ua3 +WA4 uu ua4 +WAI0 uu uai0 +WAI1 uu uai1 +WAI2 uu uai2 +WAI3 uu uai3 +WAI4 uu uai4 +WAN0 uu uan0 +WAN1 uu uan1 +WAN2 uu uan2 +WAN3 uu uan3 +WAN4 uu uan4 +WANG0 uu uang0 +WANG1 uu uang1 +WANG2 uu uang2 +WANG3 uu uang3 +WANG4 uu uang4 +WEI0 uu ui0 +WEI1 uu ui1 +WEI2 uu ui2 +WEI3 uu ui3 +WEI4 uu ui4 +WEN0 uu un0 +WEN1 uu un1 +WEN2 uu un2 +WEN3 uu un3 +WEN4 uu un4 +WENG0 uu ueng0 +WENG1 uu ueng1 +WENG2 uu ueng2 +WENG3 uu ueng3 +WENG4 uu ueng4 +WO0 uu uo0 +WO1 uu uo1 +WO2 uu uo2 +WO3 uu uo3 +WO4 uu uo4 +WU0 uu u0 +WU1 uu u1 +WU2 uu u2 +WU3 uu u3 +WU4 uu u4 +XI0 x i0 +XI1 x i1 +XI2 x i2 +XI3 x i3 +XI4 x i4 +XIA0 x ia0 +XIA1 x ia1 +XIA2 x ia2 +XIA3 x ia3 +XIA4 x ia4 +XIAN0 x ian0 +XIAN1 x ian1 +XIAN2 x ian2 +XIAN3 x ian3 +XIAN4 x ian4 +XIANG0 x iang0 +XIANG1 x iang1 +XIANG2 x iang2 +XIANG3 x iang3 +XIANG4 x iang4 +XIAO0 x iao0 +XIAO1 x iao1 +XIAO2 x iao2 +XIAO3 x iao3 +XIAO4 x iao4 +XIE0 x ie0 +XIE1 x ie1 +XIE2 x ie2 +XIE3 x ie3 +XIE4 x ie4 +XIN0 x in0 +XIN1 x in1 +XIN2 x in2 +XIN3 x in3 +XIN4 x in4 +XING0 x ing0 +XING1 x ing1 +XING2 x ing2 +XING3 x ing3 +XING4 x ing4 +XIONG0 x iong0 +XIONG1 x iong1 +XIONG2 x iong2 +XIONG3 x iong3 +XIONG4 x iong4 +XIU0 x iu0 +XIU1 x iu1 +XIU2 x iu2 +XIU3 x iu3 +XIU4 x iu4 +XU0 x v0 +XU1 x v1 +XU2 x v2 +XU3 x v3 +XU4 x v4 +XUAN0 x van0 +XUAN1 x van1 +XUAN2 x van2 +XUAN3 x van3 +XUAN4 x van4 +XUE0 x ve0 +XUE1 x ve1 +XUE2 x ve2 +XUE3 x ve3 +XUE4 x ve4 +XUN0 x vn0 +XUN1 x vn1 +XUN2 x vn2 +XUN3 x vn3 +XUN4 x vn4 +YA0 ii ia0 +YA1 ii ia1 +YA2 ii ia2 +YA3 ii ia3 +YA4 ii ia4 +YAN0 ii ian0 +YAN1 ii ian1 +YAN2 ii ian2 +YAN3 ii ian3 +YAN4 ii ian4 +YANG0 ii iang0 +YANG1 ii iang1 +YANG2 ii iang2 +YANG3 ii iang3 +YANG4 ii iang4 +YAO0 ii iao0 +YAO1 ii iao1 +YAO2 ii iao2 +YAO3 ii iao3 +YAO4 ii iao4 +YE0 ii ie0 +YE1 ii ie1 +YE2 ii ie2 +YE3 ii ie3 +YE4 ii ie4 +YI0 ii i0 +YI1 ii i1 +YI2 ii i2 +YI3 ii i3 +YI4 ii i4 +YIN0 ii in0 +YIN1 ii in1 +YIN2 ii in2 +YIN3 ii in3 +YIN4 ii in4 +YING0 ii ing0 +YING1 ii ing1 +YING2 ii ing2 +YING3 ii ing3 +YING4 ii ing4 +YO0 ii ou0 +YO1 ii ou1 +YO2 ii ou2 +YO3 ii ou3 +YO4 ii ou4 +YONG0 ii iong0 +YONG1 ii iong1 +YONG2 ii iong2 +YONG3 ii iong3 +YONG4 ii iong4 +YOU0 ii iu0 +YOU1 ii iu1 +YOU2 ii iu2 +YOU3 ii iu3 +YOU4 ii iu4 +YU0 vv v0 +YU1 vv v1 +YU2 vv v2 +YU3 vv v3 +YU4 vv v4 +YUAN0 vv van0 +YUAN1 vv van1 +YUAN2 vv van2 +YUAN3 vv van3 +YUAN4 vv van4 +YUE0 vv ve0 +YUE1 vv ve1 +YUE2 vv ve2 +YUE3 vv ve3 +YUE4 vv ve4 +YUN0 vv vn0 +YUN1 vv vn1 +YUN2 vv vn2 +YUN3 vv vn3 +YUN4 vv vn4 +YUO0 ii ou0 +YUO1 ii ou1 +YUO2 ii ou2 +YUO3 ii ou3 +YUO4 ii ou4 +ZA0 z a0 +ZA1 z a1 +ZA2 z a2 +ZA3 z a3 +ZA4 z a4 +ZAI0 z ai0 +ZAI1 z ai1 +ZAI2 z ai2 +ZAI3 z ai3 +ZAI4 z ai4 +ZAN0 z an0 +ZAN1 z an1 +ZAN2 z an2 +ZAN3 z an3 +ZAN4 z an4 +ZANG0 z ang0 +ZANG1 z ang1 +ZANG2 z ang2 +ZANG3 z ang3 +ZANG4 z ang4 +ZAO0 z ao0 +ZAO1 z ao1 +ZAO2 z ao2 +ZAO3 z ao3 +ZAO4 z ao4 +ZE0 z e0 +ZE1 z e1 +ZE2 z e2 +ZE3 z e3 +ZE4 z e4 +ZEI0 z ei0 +ZEI1 z ei1 +ZEI2 z ei2 +ZEI3 z ei3 +ZEI4 z ei4 +ZEN0 z en0 +ZEN1 z en1 +ZEN2 z en2 +ZEN3 z en3 +ZEN4 z en4 +ZENG0 z eng0 +ZENG1 z eng1 +ZENG2 z eng2 +ZENG3 z eng3 +ZENG4 z eng4 +ZHA0 zh a0 +ZHA1 zh a1 +ZHA2 zh a2 +ZHA3 zh a3 +ZHA4 zh a4 +ZHAI0 zh ai0 +ZHAI1 zh ai1 +ZHAI2 zh ai2 +ZHAI3 zh ai3 +ZHAI4 zh ai4 +ZHAN0 zh an0 +ZHAN1 zh an1 +ZHAN2 zh an2 +ZHAN3 zh an3 +ZHAN4 zh an4 +ZHANG0 zh ang0 +ZHANG1 zh ang1 +ZHANG2 zh ang2 +ZHANG3 zh ang3 +ZHANG4 zh ang4 +ZHAO0 zh ao0 +ZHAO1 zh ao1 +ZHAO2 zh ao2 +ZHAO3 zh ao3 +ZHAO4 zh ao4 +ZHE0 zh e0 +ZHE1 zh e1 +ZHE2 zh e2 +ZHE3 zh e3 +ZHE4 zh e4 +ZHEI0 zh ei0 +ZHEI1 zh ei1 +ZHEI2 zh ei2 +ZHEI3 zh ei3 +ZHEI4 zh ei4 +ZHEN0 zh en0 +ZHEN1 zh en1 +ZHEN2 zh en2 +ZHEN3 zh en3 +ZHEN4 zh en4 +ZHENG0 zh eng0 +ZHENG1 zh eng1 +ZHENG2 zh eng2 +ZHENG3 zh eng3 +ZHENG4 zh eng4 +ZHI0 zh ix0 +ZHI1 zh ix1 +ZHI2 zh ix2 +ZHI3 zh ix3 +ZHI4 zh ix4 +ZHONG0 zh ong0 +ZHONG1 zh ong1 +ZHONG2 zh ong2 +ZHONG3 zh ong3 +ZHONG4 zh ong4 +ZHOU0 zh ou0 +ZHOU1 zh ou1 +ZHOU2 zh ou2 +ZHOU3 zh ou3 +ZHOU4 zh ou4 +ZHU0 zh u0 +ZHU1 zh u1 +ZHU2 zh u2 +ZHU3 zh u3 +ZHU4 zh u4 +ZHUA0 zh ua0 +ZHUA1 zh ua1 +ZHUA2 zh ua2 +ZHUA3 zh ua3 +ZHUA4 zh ua4 +ZHUAI0 zh uai0 +ZHUAI1 zh uai1 +ZHUAI2 zh uai2 +ZHUAI3 zh uai3 +ZHUAI4 zh uai4 +ZHUAN0 zh uan0 +ZHUAN1 zh uan1 +ZHUAN2 zh uan2 +ZHUAN3 zh uan3 +ZHUAN4 zh uan4 +ZHUANG0 zh uang0 +ZHUANG1 zh uang1 +ZHUANG2 zh uang2 +ZHUANG3 zh uang3 +ZHUANG4 zh uang4 +ZHUI0 zh ui0 +ZHUI1 zh ui1 +ZHUI2 zh ui2 +ZHUI3 zh ui3 +ZHUI4 zh ui4 +ZHUN0 zh un0 +ZHUN1 zh un1 +ZHUN2 zh un2 +ZHUN3 zh un3 +ZHUN4 zh un4 +ZHUO0 zh uo0 +ZHUO1 zh uo1 +ZHUO2 zh uo2 +ZHUO3 zh uo3 +ZHUO4 zh uo4 +ZI0 z iy0 +ZI1 z iy1 +ZI2 z iy2 +ZI3 z iy3 +ZI4 z iy4 +ZONG0 z ong0 +ZONG1 z ong1 +ZONG2 z ong2 +ZONG3 z ong3 +ZONG4 z ong4 +ZOU0 z ou0 +ZOU1 z ou1 +ZOU2 z ou2 +ZOU3 z ou3 +ZOU4 z ou4 +ZU0 z u0 +ZU1 z u1 +ZU2 z u2 +ZU3 z u3 +ZU4 z u4 +ZUAN0 z uan0 +ZUAN1 z uan1 +ZUAN2 z uan2 +ZUAN3 z uan3 +ZUAN4 z uan4 +ZUI0 z ui0 +ZUI1 z ui1 +ZUI2 z ui2 +ZUI3 z ui3 +ZUI4 z ui4 +ZUN0 z un0 +ZUN1 z un1 +ZUN2 z un2 +ZUN3 z un3 +ZUN4 z un4 +ZUO0 z uo0 +ZUO1 z uo1 +ZUO2 z uo2 +ZUO3 z uo3 +ZUO4 z uo4 +EI0 ee ei0 +EI1 ee ei1 +EI2 ee ei2 +EI3 ee ei3 +EI4 ee ei4 +TEI0 t ei0 +TEI1 t ei1 +TEI2 t ei2 +TEI3 t ei3 +TEI4 t ei4 +HNG0 ee eng0 +HNG1 ee eng1 +HNG2 ee eng2 +HNG3 ee eng3 +HNG4 ee eng4 +LO0 l o0 +LO1 l o1 +LO2 l o2 +LO3 l o3 +LO4 l o4 +N0 ee en0 +N1 ee en1 +N2 ee en2 +N3 ee en3 +N4 ee en4 +NG0 ee eng0 +NG1 ee eng1 +NG2 ee eng2 +NG3 ee eng3 +NG4 ee eng4 +NOU0 n ao0 +NOU1 n ao1 +NOU2 n ao2 +NOU3 n ao3 +NOU4 n ao4 +SEI0 s ei0 +SEI1 s ei1 +SEI2 s ei2 +SEI3 s ei3 +SEI4 s ei4 +A5 aa a5 +AI5 aa ai5 +AN5 aa an5 +ANG5 aa ang5 +AO5 aa ao5 +BA5 b a5 +BAI5 b ai5 +BAN5 b an5 +BANG5 b ang5 +BAO5 b ao5 +BEI5 b ei5 +BEN5 b en5 +BENG5 b eng5 +BI5 b i5 +BIAN5 b ian5 +BIAO5 b iao5 +BIE5 b ie5 +BIN5 b in5 +BING5 b ing5 +BO5 b o5 +BU5 b u5 +CA5 c a5 +CAI5 c ai5 +CAN5 c an5 +CANG5 c ang5 +CAO5 c ao5 +CE5 c e5 +CEN5 c en5 +CENG5 c eng5 +CHA5 ch a5 +CHAI5 ch ai5 +CHAN5 ch an5 +CHANG5 ch ang5 +CHAO5 ch ao5 +CHE5 ch e5 +CHEN5 ch en5 +CHENG5 ch eng5 +CHI5 ch ix5 +CHONG5 ch ong5 +CHOU5 ch ou5 +CHU5 ch u5 +CHUAI5 ch uai5 +CHUAN5 ch uan5 +CHUANG5 ch uang5 +CHUI5 ch ui5 +CHUN5 ch un5 +CHUO5 ch uo5 +CI5 c iy5 +CONG5 c ong5 +COU5 c ou5 +CU5 c u5 +CUAN5 c uan5 +CUI5 c ui5 +CUN5 c un5 +CUO5 c uo5 +DA5 d a5 +DAI5 d ai5 +DAN5 d an5 +DANG5 d ang5 +DAO5 d ao5 +DE5 d e5 +DEI5 d ei5 +DEN5 d en5 +DENG5 d eng5 +DI5 d i5 +DIA5 d ia5 +DIAN5 d ian5 +DIAO5 d iao5 +DIE5 d ie5 +DING5 d ing5 +DIU5 d iu5 +DONG5 d ong5 +DOU5 d ou5 +DU5 d u5 +DUAN5 d uan5 +DUI5 d ui5 +DUN5 d un5 +DUO5 d uo5 +E5 ee e5 +EN5 ee en5 +ER5 ee er5 +FA5 f a5 +FAN5 f an5 +FANG5 f ang5 +FEI5 f ei5 +FEN5 f en5 +FENG5 f eng5 +FO5 f o5 +FOU5 f ou5 +FU5 f u5 +GA5 g a5 +GAI5 g ai5 +GAN5 g an5 +GANG5 g ang5 +GAO5 g ao5 +GE5 g e5 +GEI5 g ei5 +GEN5 g en5 +GENG5 g eng5 +GONG5 g ong5 +GOU5 g ou5 +GU5 g u5 +GUA5 g ua5 +GUAI5 g uai5 +GUAN5 g uan5 +GUANG5 g uang5 +GUI5 g ui5 +GUN5 g un5 +GUO5 g uo5 +HA5 h a5 +HAI5 h ai5 +HAN5 h an5 +HANG5 h ang5 +HAO5 h ao5 +HE5 h e5 +HEI5 h ei5 +HEN5 h en5 +HENG5 h eng5 +HONG5 h ong5 +HOU5 h ou5 +HU5 h u5 +HUA5 h ua5 +HUAI5 h uai5 +HUAN5 h uan5 +HUANG5 h uang5 +HUI5 h ui5 +HUN5 h un5 +HUO5 h uo5 +JI5 j i5 +JIA5 j ia5 +JIAN5 j ian5 +JIANG5 j iang5 +JIAO5 j iao5 +JIE5 j ie5 +JIN5 j in5 +JING5 j ing5 +JIONG5 j iong5 +JIU5 j iu5 +JU5 j v5 +JUAN5 j van5 +JUE5 j ve5 +JUN5 j vn5 +KA5 k a5 +KAI5 k ai5 +KAN5 k an5 +KANG5 k ang5 +KAO5 k ao5 +KE5 k e5 +KEI5 k ei5 +KEN5 k en5 +KENG5 k eng5 +KONG5 k ong5 +KOU5 k ou5 +KU5 k u5 +KUA5 k ua5 +KUAI5 k uai5 +KUAN5 k uan5 +KUANG5 k uang5 +KUI5 k ui5 +KUN5 k un5 +KUO5 k uo5 +LA5 l a5 +LAI5 l ai5 +LAN5 l an5 +LANG5 l ang5 +LAO5 l ao5 +LE5 l e5 +LEI5 l ei5 +LENG5 l eng5 +LI5 l i5 +LIA5 l ia5 +LIAN5 l ian5 +LIANG5 l iang5 +LIAO5 l iao5 +LIE5 l ie5 +LIN5 l in5 +LING5 l ing5 +LIU5 l iu5 +LONG5 l ong5 +LOU5 l ou5 +LU5 l u5 +LUAN5 l uan5 +LUE5 l ve5 +LVE5 l ve5 +LUN5 l un5 +LUO5 l uo5 +LV5 l v5 +MA5 m a5 +MAI5 m ai5 +MAN5 m an5 +MANG5 m ang5 +MAO5 m ao5 +ME5 m e5 +MEI5 m ei5 +MEN5 m en5 +MENG5 m eng5 +MI5 m i5 +MIAN5 m ian5 +MIAO5 m iao5 +MIE5 m ie5 +MIN5 m in5 +MING5 m ing5 +MIU5 m iu5 +MO5 m o5 +MOU5 m ou5 +MU5 m u5 +NA5 n a5 +NAI5 n ai5 +NAN5 n an5 +NANG5 n ang5 +NAO5 n ao5 +NE5 n e5 +NEI5 n ei5 +NEN5 n en5 +NENG5 n eng5 +NI5 n i5 +NIAN5 n ian5 +NIANG5 n iang5 +NIAO5 n iao5 +NIE5 n ie5 +NIN5 n in5 +NING5 n ing5 +NIU5 n iu5 +NONG5 n ong5 +NU5 n u5 +NUAN5 n uan5 +NUE5 n ve5 +NVE5 n ve5 +NUO5 n uo5 +NV5 n v5 +O5 oo o5 +OU5 oo ou5 +PA5 p a5 +PAI5 p ai5 +PAN5 p an5 +PANG5 p ang5 +PAO5 p ao5 +PEI5 p ei5 +PEN5 p en5 +PENG5 p eng5 +PI5 p i5 +PIAN5 p ian5 +PIAO5 p iao5 +PIE5 p ie5 +PIN5 p in5 +PING5 p ing5 +PO5 p o5 +POU5 p ou5 +PU5 p u5 +QI5 q i5 +QIA5 q ia5 +QIAN5 q ian5 +QIANG5 q iang5 +QIAO5 q iao5 +QIE5 q ie5 +QIN5 q in5 +QING5 q ing5 +QIONG5 q iong5 +QIU5 q iu5 +QU5 q v5 +QUAN5 q van5 +QUE5 q ve5 +QUN5 q vn5 +RAN5 r an5 +RANG5 r ang5 +RAO5 r ao5 +RE5 r e5 +REN5 r en5 +RENG5 r eng5 +RI5 r iz5 +RONG5 r ong5 +ROU5 r ou5 +RU5 r u5 +RUAN5 r uan5 +RUI5 r ui5 +RUN5 r un5 +RUO5 r uo5 +SA5 s a5 +SAI5 s ai5 +SAN5 s an5 +SANG5 s ang5 +SAO5 s ao5 +SE5 s e5 +SEN5 s en5 +SENG5 s eng5 +SHA5 sh a5 +SHAI5 sh ai5 +SHAN5 sh an5 +SHANG5 sh ang5 +SHAO5 sh ao5 +SHE5 sh e5 +SHEI5 sh ei5 +SHEN5 sh en5 +SHENG5 sh eng5 +SHI5 sh ix5 +SHOU5 sh ou5 +SHU5 sh u5 +SHUA5 sh ua5 +SHUAI5 sh uai5 +SHUAN5 sh uan5 +SHUANG5 sh uang5 +SHUI5 sh ui5 +SHUN5 sh un5 +SHUO5 sh uo5 +SI5 s iy5 +SONG5 s ong5 +SOU5 s ou5 +SU5 s u5 +SUAN5 s uan5 +SUI5 s ui5 +SUN5 s un5 +SUO5 s uo5 +TA5 t a5 +TAI5 t ai5 +TAN5 t an5 +TANG5 t ang5 +TAO5 t ao5 +TE5 t e5 +TENG5 t eng5 +TI5 t i5 +TIAN5 t ian5 +TIAO5 t iao5 +TIE5 t ie5 +TING5 t ing5 +TONG5 t ong5 +TOU5 t ou5 +TU5 t u5 +TUAN5 t uan5 +TUI5 t ui5 +TUN5 t un5 +TUO5 t uo5 +WA5 uu ua5 +WAI5 uu uai5 +WAN5 uu uan5 +WANG5 uu uang5 +WEI5 uu ui5 +WEN5 uu un5 +WENG5 uu ueng5 +WO5 uu uo5 +WU5 uu u5 +XI5 x i5 +XIA5 x ia5 +XIAN5 x ian5 +XIANG5 x iang5 +XIAO5 x iao5 +XIE5 x ie5 +XIN5 x in5 +XING5 x ing5 +XIONG5 x iong5 +XIU5 x iu5 +XU5 x v5 +XUAN5 x van5 +XUE5 x ve5 +XUN5 x vn5 +YA5 ii ia5 +YAN5 ii ian5 +YANG5 ii iang5 +YAO5 ii iao5 +YE5 ii ie5 +YI5 ii i5 +YIN5 ii in5 +YING5 ii ing5 +YO5 ii ou5 +YONG5 ii iong5 +YOU5 ii iu5 +YU5 vv v5 +YUAN5 vv van5 +YUE5 vv ve5 +YUN5 vv vn5 +YUO5 ii ou5 +ZA5 z a5 +ZAI5 z ai5 +ZAN5 z an5 +ZANG5 z ang5 +ZAO5 z ao5 +ZE5 z e5 +ZEI5 z ei5 +ZEN5 z en5 +ZENG5 z eng5 +ZHA5 zh a5 +ZHAI5 zh ai5 +ZHAN5 zh an5 +ZHANG5 zh ang5 +ZHAO5 zh ao5 +ZHE5 zh e5 +ZHEI5 zh ei5 +ZHEN5 zh en5 +ZHENG5 zh eng5 +ZHI5 zh ix5 +ZHONG5 zh ong5 +ZHOU5 zh ou5 +ZHU5 zh u5 +ZHUA5 zh ua5 +ZHUAI5 zh uai5 +ZHUAN5 zh uan5 +ZHUANG5 zh uang5 +ZHUI5 zh ui5 +ZHUN5 zh un5 +ZHUO5 zh uo5 +ZI5 z iy5 +ZONG5 z ong5 +ZOU5 z ou5 +ZU5 z u5 +ZUAN5 z uan5 +ZUI5 z ui5 +ZUN5 z un5 +ZUO5 z uo5 +EI5 ee ei5 +TEI5 t ei5 +HNG5 ee eng5 +LO5 l o5 +N5 ee en5 +NG5 ee eng5 +NOU5 n ao5 +SEI5 s ei5 \ No newline at end of file diff --git a/examples/thchs30/a0/local/data.sh b/examples/thchs30/a0/local/data.sh new file mode 100644 index 000000000..8614a0415 --- /dev/null +++ b/examples/thchs30/a0/local/data.sh @@ -0,0 +1,53 @@ +#! /usr/bin/env bash + +stage=-1 +stop_stage=100 + +source ${MAIN_ROOT}/utils/parse_options.sh + +mkdir -p data +TARGET_DIR=${MAIN_ROOT}/examples/dataset +mkdir -p ${TARGET_DIR} +LEXICON_NAME=$1 + +# download data, generate manifests +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + python3 ${TARGET_DIR}/thchs30/thchs30.py \ + --manifest_prefix="data/manifest" \ + --target_dir="${TARGET_DIR}/thchs30" + + if [ $? -ne 0 ]; then + echo "Prepare THCHS-30 failed. Terminated." + exit 1 + fi +fi + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # dump manifest to data/ + python3 ${MAIN_ROOT}/utils/dump_manifest.py --manifest-path=data/manifest.train --output-dir=data +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # copy files to data/dict to gen word.lexicon + cp ${TARGET_DIR}/thchs30/data_thchs30/lm_word/lexicon.txt data/dict/lm_word_lexicon_1 + cp ${TARGET_DIR}/thchs30/resource/dict/lexicon.txt data/dict/lm_word_lexicon_2 + # copy phone.lexicon to data/dict + cp ${TARGET_DIR}/thchs30/data_thchs30/lm_phone/lexicon.txt data/dict/phone.lexicon +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # gen word.lexicon + python local/gen_word2phone.py --lexicon-files="data/dict/lm_word_lexicon_1 data/dict/lm_word_lexicon_2" --output-path=data/dict/word.lexicon +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # reorganize dataset for MFA + if [ ! -d $EXP_DIR/thchs30_corpus ]; then + echo "reorganizing thchs30 corpus..." + python local/reorganize_thchs30.py --root-dir=data --output-dir=data/thchs30_corpus --script-type=$LEXICON_NAME + echo "reorganization done." + fi +fi + +echo "THCHS-30 data preparation done." +exit 0 diff --git a/examples/thchs30/a0/local/gen_word2phone.py b/examples/thchs30/a0/local/gen_word2phone.py new file mode 100644 index 000000000..9bc0249bf --- /dev/null +++ b/examples/thchs30/a0/local/gen_word2phone.py @@ -0,0 +1,114 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Gen Chinese characters to THCHS30-30 phone lexicon using THCHS30-30's lexicon +file1: THCHS-30/data_thchs30/lm_word/lexicon.txt +file2: THCHS-30/resource/dict/lexicon.txt +""" +import argparse +from collections import defaultdict +from pathlib import Path +from typing import List +from typing import Union + +# key: (cn, ('ee', 'er4')),value: count +cn_phones_counter = defaultdict(int) +# key: cn, value: list of (phones, num) +cn_counter = defaultdict(list) +# key: cn, value: list of (phones, probabilities) +cn_counter_p = defaultdict(list) + + +def is_Chinese(ch): + if '\u4e00' <= ch <= '\u9fff': + return True + return False + + +def proc_line(line: str): + line = line.strip() + if is_Chinese(line[0]): + line_list = line.split() + cn_list = line_list[0] + phone_list = line_list[1:] + if len(cn_list) == len(phone_list) / 2: + new_phone_list = [(phone_list[i], phone_list[i + 1]) + for i in range(0, len(phone_list), 2)] + assert len(cn_list) == len(new_phone_list) + for idx, cn in enumerate(cn_list): + phones = new_phone_list[idx] + cn_phones_counter[(cn, phones)] += 1 + + +""" +example lines of output +the first column is a Chinese character +the second is the probability of this pronunciation +and the rest are the phones of this pronunciation +一 0.22 ii i1↩ +一 0.45 ii i4↩ +一 0.32 ii i2↩ +一 0.01 ii i5 +""" + + +def gen_lexicon(lexicon_files: List[Union[str, Path]], + output_path: Union[str, Path]): + for file_path in lexicon_files: + with open(file_path, "r") as f1: + for line in f1: + proc_line(line) + + for key in cn_phones_counter: + cn = key[0] + cn_counter[cn].append((key[1], cn_phones_counter[key])) + + for key in cn_counter: + phone_count_list = cn_counter[key] + count_sum = sum([x[1] for x in phone_count_list]) + for item in phone_count_list: + p = item[1] / count_sum + p = round(p, 2) + if p > 0: + cn_counter_p[key].append((item[0], p)) + + with open(output_path, "w") as wf: + for key in cn_counter_p: + phone_p_list = cn_counter_p[key] + for item in phone_p_list: + phones, p = item + wf.write(key + " " + str(p) + " " + " ".join(phones) + "\n") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Gen Chinese characters to phone lexicon for THCHS-30 dataset" + ) + # A line of word_lexicon: + # 一丁点 ii i4 d ing1 d ian3 + # the first is word, and the rest are the phones of the word, and the len of phones is twice of the word's len + parser.add_argument( + "--lexicon-files", + type=str, + default="data/dict/lm_word_lexicon_1 data/dict/lm_word_lexicon_2", + help="lm_word_lexicon files") + parser.add_argument( + "--output-path", + type=str, + default="data/dict/word.lexicon", + help="path to save output word2phone lexicon") + args = parser.parse_args() + lexicon_files = args.lexicon_files.split(" ") + output_path = Path(args.output_path).expanduser() + + gen_lexicon(lexicon_files, output_path) diff --git a/examples/thchs30/a0/local/reorganize_thchs30.py b/examples/thchs30/a0/local/reorganize_thchs30.py new file mode 100644 index 000000000..c7c6248bc --- /dev/null +++ b/examples/thchs30/a0/local/reorganize_thchs30.py @@ -0,0 +1,84 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Recorganize THCHS-30 for MFA +read manifest.train from root-dir +Link *.wav to output-dir +dump *.lab from manifest.train, such as: text、syllable and phone +Manifest file is a json-format file with each line containing the +meta data (i.e. audio filepath, transcript and audio duration) +""" +import argparse +import os +from pathlib import Path +from typing import Union + + +def link_wav(root_dir: Union[str, Path], output_dir: Union[str, Path]): + wav_scp_path = root_dir / 'wav.scp' + with open(wav_scp_path, 'r') as rf: + for line in rf: + utt, feat = line.strip().split() + wav_path = feat + wav_name = wav_path.split("/")[-1] + new_wav_path = output_dir / wav_name + os.symlink(wav_path, new_wav_path) + + +def write_lab(root_dir: Union[str, Path], + output_dir: Union[str, Path], + script_type='phone'): + # script_type can in {'word', 'syllable', 'phone'} + json_name = 'text.' + script_type + json_path = root_dir / json_name + with open(json_path, 'r') as rf: + for line in rf: + line = line.strip().split() + utt_id = line[0] + context = ' '.join(line[1:]) + transcript_name = utt_id + '.lab' + transcript_path = output_dir / transcript_name + with open(transcript_path, 'wt') as wf: + if script_type == 'word': + # add space between chinese char + context = ''.join([f + ' ' for f in context])[:-1] + wf.write(context + "\n") + + +def reorganize_thchs30(root_dir: Union[str, Path], + output_dir: Union[str, Path]=None, + script_type='phone'): + output_dir.mkdir(parents=True, exist_ok=True) + link_wav(root_dir, output_dir) + write_lab(root_dir, output_dir, script_type) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Reorganize THCHS-30 dataset for MFA") + parser.add_argument("--root-dir", type=str, help="path to thchs30 dataset.") + parser.add_argument( + "--output-dir", + type=str, + help="path to save outputs (audio and transcriptions)") + + parser.add_argument( + "--script-type", + type=str, + default="phone", + help="type of lab ('word'/'syllable'/'phone')") + + args = parser.parse_args() + root_dir = Path(args.root_dir).expanduser() + output_dir = Path(args.output_dir).expanduser() + reorganize_thchs30(root_dir, output_dir, args.script_type) diff --git a/examples/thchs30/a0/path.sh b/examples/thchs30/a0/path.sh new file mode 100644 index 000000000..fc953bebf --- /dev/null +++ b/examples/thchs30/a0/path.sh @@ -0,0 +1,13 @@ +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ + +# MFA is in tools +export PATH=${MAIN_ROOT}/tools/montreal-forced-aligner/bin:$PATH \ No newline at end of file diff --git a/examples/thchs30/a0/run.sh b/examples/thchs30/a0/run.sh new file mode 100755 index 000000000..5081b612a --- /dev/null +++ b/examples/thchs30/a0/run.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e +source path.sh +stage=0 +stop_stage=100 +EXP_DIR=exp +# LEXICON_NAME in {'phone', 'syllable', 'word'} +LEXICON_NAME='phone' +# set MFA num_jobs as half of machine's cpu core number +NUM_JOBS=$((`nproc`/2)) +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +# download dataset、unzip and generate manifest +# gen lexicon relink gen dump +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + echo "Start prepare thchs30 data for MFA ..." + bash ./local/data.sh $LEXICON_NAME || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # run MFA + if [ ! -d "$EXP_DIR/thchs30_alignment" ]; then + echo "Start MFA training ..." + mfa_train_and_align data/thchs30_corpus data/dict/$LEXICON_NAME.lexicon $EXP_DIR/thchs30_alignment -o $EXP_DIR/thchs30_model --clean --verbose --temp_directory exp/.mfa_train_and_align --num_jobs $NUM_JOBS + echo "MFA training done! \nresults: $EXP_DIR/thchs30_alignment \nmodel: $EXP_DIR/thchs30_model\n" + fi +fi + + + + + + + diff --git a/examples/timit/README.md b/examples/timit/README.md new file mode 100644 index 000000000..b7c8b7545 --- /dev/null +++ b/examples/timit/README.md @@ -0,0 +1,3 @@ +# TIMIT + +* s1 u2 model with phone unit diff --git a/examples/timit/s1/README.md b/examples/timit/s1/README.md new file mode 100644 index 000000000..4d9b146af --- /dev/null +++ b/examples/timit/s1/README.md @@ -0,0 +1,3 @@ +# TIMIT + +Results will be organized and updated soon. diff --git a/examples/timit/s1/conf/augmentation.json b/examples/timit/s1/conf/augmentation.json new file mode 100644 index 000000000..8e6e97040 --- /dev/null +++ b/examples/timit/s1/conf/augmentation.json @@ -0,0 +1,36 @@ +[ + { + "type": "shift", + "params": { + "min_shift_ms": -5, + "max_shift_ms": 5 + }, + "prob": 1.0 + }, + { + "type": "speed", + "params": { + "min_speed_rate": 0.9, + "max_speed_rate": 1.1, + "num_rates": 3 + }, + "prob": 0.0 + }, + { + "type": "specaug", + "params": { + "F": 10, + "T": 50, + "n_freq_masks": 2, + "n_time_masks": 2, + "p": 1.0, + "W": 80, + "adaptive_number_ratio": 0, + "adaptive_size_ratio": 0, + "max_n_time_masks": 20, + "replace_with_zero": true, + "warp_mode": "PIL" + }, + "prob": 1.0 + } +] diff --git a/examples/timit/s1/conf/dev_spk.list b/examples/timit/s1/conf/dev_spk.list new file mode 100644 index 000000000..edcb3ef77 --- /dev/null +++ b/examples/timit/s1/conf/dev_spk.list @@ -0,0 +1,50 @@ +faks0 +fdac1 +fjem0 +mgwt0 +mjar0 +mmdb1 +mmdm2 +mpdf0 +fcmh0 +fkms0 +mbdg0 +mbwm0 +mcsh0 +fadg0 +fdms0 +fedw0 +mgjf0 +mglb0 +mrtk0 +mtaa0 +mtdt0 +mthc0 +mwjg0 +fnmr0 +frew0 +fsem0 +mbns0 +mmjr0 +mdls0 +mdlf0 +mdvc0 +mers0 +fmah0 +fdrw0 +mrcs0 +mrjm4 +fcal1 +mmwh0 +fjsj0 +majc0 +mjsw0 +mreb0 +fgjd0 +fjmg0 +mroa0 +mteb0 +mjfc0 +mrjr0 +fmml0 +mrws1 \ No newline at end of file diff --git a/examples/timit/s1/conf/phones.60-48-39.map b/examples/timit/s1/conf/phones.60-48-39.map new file mode 100644 index 000000000..946f3befa --- /dev/null +++ b/examples/timit/s1/conf/phones.60-48-39.map @@ -0,0 +1,61 @@ +aa aa aa +ae ae ae +ah ah ah +ao ao aa +aw aw aw +ax ax ah +ax-h ax ah +axr er er +ay ay ay +b b b +bcl vcl sil +ch ch ch +d d d +dcl vcl sil +dh dh dh +dx dx dx +eh eh eh +el el l +em m m +en en n +eng ng ng +epi epi sil +er er er +ey ey ey +f f f +g g g +gcl vcl sil +h# sil sil +hh hh hh +hv hh hh +ih ih ih +ix ix ih +iy iy iy +jh jh jh +k k k +kcl cl sil +l l l +m m m +n n n +ng ng ng +nx n n +ow ow ow +oy oy oy +p p p +pau sil sil +pcl cl sil +q +r r r +s s s +sh sh sh +t t t +tcl cl sil +th th th +uh uh uh +uw uw uw +ux uw uw +v v v +w w w +y y y +z z z +zh zh sh \ No newline at end of file diff --git a/examples/timit/s1/conf/test_spk.list b/examples/timit/s1/conf/test_spk.list new file mode 100644 index 000000000..3cfa8f5de --- /dev/null +++ b/examples/timit/s1/conf/test_spk.list @@ -0,0 +1,24 @@ +mdab0 +mwbt0 +felc0 +mtas1 +mwew0 +fpas0 +mjmp0 +mlnt0 +fpkt0 +mlll0 +mtls0 +fjlm0 +mbpm0 +mklt0 +fnlp0 +mcmj0 +mjdh0 +fmgd0 +mgrt0 +mnjm0 +fdhc0 +mjln0 +mpam0 +fmld0 \ No newline at end of file diff --git a/examples/timit/s1/conf/transformer.yaml b/examples/timit/s1/conf/transformer.yaml new file mode 100644 index 000000000..c3b519968 --- /dev/null +++ b/examples/timit/s1/conf/transformer.yaml @@ -0,0 +1,112 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.train + dev_manifest: data/manifest.dev + test_manifest: data/manifest.test + min_input_len: 0.5 # second + max_input_len: 30.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 100.0 + +collator: + vocab_filepath: data/vocab.txt + unit_type: "word" + mean_std_filepath: "" + augmentation_config: "" + batch_size: 64 + raw_wav: True # use raw_wav or kaldi feature + specgram_type: fbank #linear, mfcc, fbank + feat_dim: 80 + delta_delta: False + dither: 1.0 + target_sample_rate: 16000 + max_freq: None + n_fft: None + stride_ms: 10.0 + window_ms: 25.0 + use_dB_normalization: True + target_dB: -20 + random_seed: 0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 2 + + +# network architecture +model: + cmvn_file: "data/mean_std.json" + cmvn_file_type: "json" + # encoder related + encoder: transformer + encoder_conf: + output_size: 256 # dimension of attention + attention_heads: 4 + linear_units: 2048 # the number of units of position-wise feed forward + num_blocks: 12 # the number of encoder blocks + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.0 + input_layer: conv2d # encoder input type, you can chose conv2d, conv2d6 and conv2d8 + normalize_before: true + + # decoder related + decoder: transformer + decoder_conf: + attention_heads: 4 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + self_attention_dropout_rate: 0.0 + src_attention_dropout_rate: 0.0 + + # hybrid CTC/attention + model_conf: + ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance + lsm_weight: 0.1 # label smoothing option + length_normalized_loss: false + + +training: + n_epoch: 120 + accum_grad: 2 + global_grad_clip: 5.0 + optim: adam + optim_conf: + lr: 0.002 + weight_decay: 1e-06 + scheduler: warmuplr # pytorch v1.1.0+ required + scheduler_conf: + warmup_steps: 400 + lr_decay: 1.0 + log_interval: 100 + checkpoint: + kbest_n: 50 + latest_n: 5 + + +decoding: + batch_size: 64 + error_rate_type: wer + decoding_method: attention # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring' + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 10 + cutoff_prob: 1.0 + cutoff_top_n: 0 + num_proc_bsearch: 8 + ctc_weight: 0.5 # ctc weight for attention rescoring decode mode. + decoding_chunk_size: -1 # decoding chunk size. Defaults to -1. + # <0: for decoding, use full chunk. + # >0: for decoding, use fixed chunk size as set. + # 0: used for training, it's prohibited here. + num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1. + simulate_streaming: False # simulate streaming inference. Defaults to False. + + diff --git a/examples/timit/s1/local/align.sh b/examples/timit/s1/local/align.sh new file mode 100755 index 000000000..ad6c84bc8 --- /dev/null +++ b/examples/timit/s1/local/align.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/alignment.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/timit/s1/local/data.sh b/examples/timit/s1/local/data.sh new file mode 100755 index 000000000..1d16f454a --- /dev/null +++ b/examples/timit/s1/local/data.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +stage=-1 +stop_stage=100 + +unit_type=word +TIMIT_path= + +source ${MAIN_ROOT}/utils/parse_options.sh + +mkdir -p data +TARGET_DIR=${MAIN_ROOT}/examples/dataset +mkdir -p ${TARGET_DIR} + +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + # download data, generate manifests + python3 ${TARGET_DIR}/timit/timit_kaldi_standard_split.py \ + --manifest_prefix="data/manifest" \ + --src="data/local" \ + + if [ $? -ne 0 ]; then + echo "Prepare TIMIT failed. Terminated." + exit 1 + fi +fi + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # build vocabulary + python3 ${MAIN_ROOT}/utils/build_vocab.py \ + --unit_type ${unit_type} \ + --count_threshold=0 \ + --vocab_path="data/vocab.txt" \ + --manifest_paths="data/manifest.train.raw" + + if [ $? -ne 0 ]; then + echo "Build vocabulary failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # compute mean and stddev for normalizer + num_workers=$(nproc) + python3 ${MAIN_ROOT}/utils/compute_mean_std.py \ + --manifest_path="data/manifest.train.raw" \ + --num_samples=-1 \ + --specgram_type="fbank" \ + --feat_dim=80 \ + --delta_delta=false \ + --sample_rate=16000 \ + --stride_ms=10.0 \ + --window_ms=25.0 \ + --use_dB_normalization=False \ + --num_workers=${num_workers} \ + --output_path="data/mean_std.json" + + if [ $? -ne 0 ]; then + echo "Compute mean and stddev failed. Terminated." + exit 1 + fi +fi + + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # format manifest with tokenids, vocab size + for set in train dev test; do + { + python3 ${MAIN_ROOT}/utils/format_data.py \ + --feat_type "raw" \ + --cmvn_path "data/mean_std.json" \ + --unit_type ${unit_type} \ + --vocab_path="data/vocab.txt" \ + --manifest_path="data/manifest.${set}.raw" \ + --output_path="data/manifest.${set}" + + if [ $? -ne 0 ]; then + echo "Formt mnaifest.${set} failed. Terminated." + exit 1 + fi + }& + done + wait +fi + +echo "TIMIT Data preparation done." +exit 0 diff --git a/examples/timit/s1/local/export.sh b/examples/timit/s1/local/export.sh new file mode 100755 index 000000000..f99a15bad --- /dev/null +++ b/examples/timit/s1/local/export.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +if [ $# != 3 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_path_prefix=$2 +jit_model_export_path=$3 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi + +python3 -u ${BIN_DIR}/export.py \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--checkpoint_path ${ckpt_path_prefix} \ +--export_path ${jit_model_export_path} + + +if [ $? -ne 0 ]; then + echo "Failed in export!" + exit 1 +fi + + +exit 0 diff --git a/examples/timit/s1/local/test.sh b/examples/timit/s1/local/test.sh new file mode 100755 index 000000000..a137924e2 --- /dev/null +++ b/examples/timit/s1/local/test.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi + +config_path=$1 +ckpt_prefix=$2 + +chunk_mode=false +if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then + chunk_mode=true +fi + + +# download language model +#bash local/download_lm_en.sh +#if [ $? -ne 0 ]; then +# exit 1 +#fi + +for type in attention ctc_greedy_search; do + echo "decoding ${type}" + if [ ${chunk_mode} == true ];then + # stream decoding only support batchsize=1 + batch_size=1 + else + batch_size=64 + fi + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +for type in ctc_prefix_beam_search attention_rescoring; do + echo "decoding ${type}" + batch_size=1 + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + + +exit 0 diff --git a/examples/timit/s1/local/timit_data_prep.sh b/examples/timit/s1/local/timit_data_prep.sh new file mode 100644 index 000000000..4bea057d0 --- /dev/null +++ b/examples/timit/s1/local/timit_data_prep.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +# Copyright 2013 (Authors: Bagher BabaAli, Daniel Povey, Arnab Ghoshal) +# 2014 Brno University of Technology (Author: Karel Vesely) +# Apache 2.0. + +if [ $# -ne 1 ]; then + echo "Argument should be the Timit directory, see ../run.sh for example." + exit 1; +fi + +dir=`pwd`/data/local +mkdir -p $dir +local=`pwd`/local +utils=`pwd`/utils +conf=`pwd`/conf + +function error_exit () { + echo -e "$@" >&2; exit 1; +} +PROG=$(basename $0) + +[ -f $conf/test_spk.list ] || error_exit "$PROG line $LINENO: Eval-set speaker list not found."; +[ -f $conf/dev_spk.list ] || error_exit "$PROG line $LINENO: dev-set speaker list not found."; + +# First check if the train & test directories exist (these can either be upper- +# or lower-cased +if [ ! -d $*/TRAIN -o ! -d $*/TEST ] && [ ! -d $*/train -o ! -d $*/test ]; then + echo "timit_data_prep.sh: Spot check of command line argument failed" + echo "Command line argument must be absolute pathname to TIMIT directory" + echo "with name like /export/corpora5/LDC/LDC93S1/timit/TIMIT" + exit 1; +fi + +# Now check what case the directory structure is +uppercased=false +train_dir=train +test_dir=test +if [ -d $*/TRAIN ]; then + uppercased=true + train_dir=TRAIN + test_dir=TEST +fi + +tmpdir=$(mktemp -d /tmp/kaldi.XXXX); +trap 'rm -rf "$tmpdir"' EXIT + +# Get the list of speakers. The list of speakers in the 24-speaker core test +# set and the 50-speaker development set must be supplied to the script. All +# speakers in the 'train' directory are used for training. +if $uppercased; then + tr '[:lower:]' '[:upper:]' < $conf/dev_spk.list > $tmpdir/dev_spk + tr '[:lower:]' '[:upper:]' < $conf/test_spk.list > $tmpdir/test_spk + ls -d "$*"/TRAIN/DR*/* | sed -e "s:^.*/::" > $tmpdir/train_spk +else + tr '[:upper:]' '[:lower:]' < $conf/dev_spk.list > $tmpdir/dev_spk + tr '[:upper:]' '[:lower:]' < $conf/test_spk.list > $tmpdir/test_spk + ls -d "$*"/train/dr*/* | sed -e "s:^.*/::" > $tmpdir/train_spk +fi + +cd $dir +for x in train dev test; do + # First, find the list of audio files (use only si & sx utterances). + # Note: train & test sets are under different directories, but doing find on + # both and grepping for the speakers will work correctly. + find $*/{$train_dir,$test_dir} -not \( -iname 'SA*' \) -iname '*.WAV' \ + | grep -f $tmpdir/${x}_spk > ${x}_sph.flist + + sed -e 's:.*/\(.*\)/\(.*\).\(WAV\|wav\)$:\1_\2:' ${x}_sph.flist \ + > $tmpdir/${x}_sph.uttids + paste $tmpdir/${x}_sph.uttids ${x}_sph.flist \ + | sort -k1,1 > ${x}_sph.scp + + cat ${x}_sph.scp | awk '{print $1}' > ${x}.uttids + + # Now, Convert the transcripts into our format (no normalization yet) + # Get the transcripts: each line of the output contains an utterance + # ID followed by the transcript. + find $*/{$train_dir,$test_dir} -not \( -iname 'SA*' \) -iname '*.PHN' \ + | grep -f $tmpdir/${x}_spk > $tmpdir/${x}_phn.flist + sed -e 's:.*/\(.*\)/\(.*\).\(PHN\|phn\)$:\1_\2:' $tmpdir/${x}_phn.flist \ + > $tmpdir/${x}_phn.uttids + while read line; do + [ -f $line ] || error_exit "Cannot find transcription file '$line'"; + cut -f3 -d' ' "$line" | tr '\n' ' ' | perl -ape 's: *$:\n:;' + done < $tmpdir/${x}_phn.flist > $tmpdir/${x}_phn.trans + paste $tmpdir/${x}_phn.uttids $tmpdir/${x}_phn.trans \ + | sort -k1,1 > ${x}.trans + + # Do normalization steps. + cat ${x}.trans | $local/timit_norm_trans.pl -i - -m $conf/phones.60-48-39.map -to 39 | sort > $x.text || exit 1; + +done + +echo "Data preparation succeeded" \ No newline at end of file diff --git a/examples/timit/s1/local/timit_norm_trans.pl b/examples/timit/s1/local/timit_norm_trans.pl new file mode 100644 index 000000000..702d9b152 --- /dev/null +++ b/examples/timit/s1/local/timit_norm_trans.pl @@ -0,0 +1,91 @@ +#!/usr/bin/env perl +use warnings; #sed replacement for -w perl parameter + +# Copyright 2012 Arnab Ghoshal + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + + +# This script normalizes the TIMIT phonetic transcripts that have been +# extracted in a format where each line contains an utterance ID followed by +# the transcript, e.g.: +# fcke0_si1111 h# hh ah dx ux w iy dcl d ix f ay n ih q h# + +my $usage = "Usage: timit_norm_trans.pl -i transcript -m phone_map -from [60|48] -to [48|39] > normalized\n +Normalizes phonetic transcriptions for TIMIT, by mapping the phones to a +smaller set defined by the -m option. This script assumes that the mapping is +done in the \"standard\" fashion, i.e. to 48 or 39 phones. The input is +assumed to have 60 phones (+1 for glottal stop, which is deleted), but that can +be changed using the -from option. The input format is assumed to be utterance +ID followed by transcript on the same line.\n"; + +use strict; +use Getopt::Long; +die "$usage" unless(@ARGV >= 1); +my ($in_trans, $phone_map, $num_phones_out); +my $num_phones_in = 60; +GetOptions ("i=s" => \$in_trans, # Input transcription + "m=s" => \$phone_map, # File containing phone mappings + "from=i" => \$num_phones_in, # Input #phones: must be 60 or 48 + "to=i" => \$num_phones_out ); # Output #phones: must be 48 or 39 + +die $usage unless(defined($in_trans) && defined($phone_map) && + defined($num_phones_out)); +if ($num_phones_in != 60 && $num_phones_in != 48) { + die "Can only used 60 or 48 for -from (used $num_phones_in)." +} +if ($num_phones_out != 48 && $num_phones_out != 39) { + die "Can only used 48 or 39 for -to (used $num_phones_out)." +} +unless ($num_phones_out < $num_phones_in) { + die "Argument to -from ($num_phones_in) must be greater than that to -to ($num_phones_out)." +} + + +open(M, "<$phone_map") or die "Cannot open mappings file '$phone_map': $!"; +my (%phonemap, %seen_phones); +my $num_seen_phones = 0; +while () { + chomp; + next if ($_ =~ /^q\s*.*$/); # Ignore glottal stops. + m:^(\S+)\s+(\S+)\s+(\S+)$: or die "Bad line: $_"; + my $mapped_from = ($num_phones_in == 60)? $1 : $2; + my $mapped_to = ($num_phones_out == 48)? $2 : $3; + if (!defined($seen_phones{$mapped_to})) { + $seen_phones{$mapped_to} = 1; + $num_seen_phones += 1; + } + $phonemap{$mapped_from} = $mapped_to; +} +if ($num_seen_phones != $num_phones_out) { + die "Trying to map to $num_phones_out phones, but seen only $num_seen_phones"; +} + +open(T, "<$in_trans") or die "Cannot open transcription file '$in_trans': $!"; +while () { + chomp; + $_ =~ m:^(\S+)\s+(.+): or die "Bad line: $_"; + my $utt_id = $1; + my $trans = $2; + + $trans =~ s/q//g; # Remove glottal stops. + $trans =~ s/^\s*//; $trans =~ s/\s*$//; # Normalize spaces + + print $utt_id; + for my $phone (split(/\s+/, $trans)) { + if(exists $phonemap{$phone}) { print " $phonemap{$phone}"; } + if(not exists $phonemap{$phone}) { print " $phone"; } + } + print "\n"; +} \ No newline at end of file diff --git a/examples/timit/s1/local/train.sh b/examples/timit/s1/local/train.sh new file mode 100755 index 000000000..180d8b5a7 --- /dev/null +++ b/examples/timit/s1/local/train.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +config_path=$1 +ckpt_name=$2 + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +echo "using ${device}..." + +mkdir -p exp + +# seed may break model convergence +seed=0 +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True +fi + +python3 -u ${BIN_DIR}/train.py \ +--device ${device} \ +--nproc ${ngpu} \ +--config ${config_path} \ +--output exp/${ckpt_name} \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi + +if [ $? -ne 0 ]; then + echo "Failed in training!" + exit 1 +fi + +exit 0 diff --git a/examples/timit/s1/path.sh b/examples/timit/s1/path.sh new file mode 100644 index 000000000..29841bc10 --- /dev/null +++ b/examples/timit/s1/path.sh @@ -0,0 +1,14 @@ +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/ + + +MODEL=u2 +export BIN_DIR=${MAIN_ROOT}/deepspeech/exps/${MODEL}/bin diff --git a/examples/timit/s1/run.sh b/examples/timit/s1/run.sh new file mode 100755 index 000000000..75a2e0c52 --- /dev/null +++ b/examples/timit/s1/run.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -e +source path.sh + +stage=0 +stop_stage=50 +conf_path=conf/transformer.yaml +avg_num=10 +TIMIT_path= #path of TIMIT (Required, e.g. /export/corpora5/LDC/LDC93S1/timit/TIMIT) +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; + +avg_ckpt=avg_${avg_num} +ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}') +echo "checkpoint name ${ckpt}" + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + bash ./local/timit_data_prep.sh ${TIMIT_path} + bash ./local/data.sh || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `exp` dir + CUDA_VISIBLE_DEVICES=0,1,2,3 ./local/train.sh ${conf_path} ${ckpt} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # avg n best model + avg.sh best exp/${ckpt}/checkpoints ${avg_num} +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # test ckpt avg_n + CUDA_VISIBLE_DEVICES=7 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # export ckpt avg_n + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit +fi diff --git a/examples/tiny/s0/conf/augmentation.json b/examples/tiny/s0/conf/augmentation.json index a1a759e67..4480307b9 100644 --- a/examples/tiny/s0/conf/augmentation.json +++ b/examples/tiny/s0/conf/augmentation.json @@ -1,4 +1,13 @@ [ + { + "type": "speed", + "params": { + "min_speed_rate": 0.9, + "max_speed_rate": 1.1, + "num_rates": 3 + }, + "prob": 0.0 + }, { "type": "shift", "params": { @@ -6,5 +15,22 @@ "max_shift_ms": 5 }, "prob": 1.0 + }, + { + "type": "specaug", + "params": { + "W": 5, + "warp_mode": "PIL", + "F": 30, + "n_freq_masks": 2, + "T": 40, + "n_time_masks": 2, + "p": 1.0, + "adaptive_number_ratio": 0, + "adaptive_size_ratio": 0, + "max_n_time_masks": 20, + "replace_with_zero": true + }, + "prob": 1.0 } ] diff --git a/examples/tiny/s0/conf/deepspeech2.yaml b/examples/tiny/s0/conf/deepspeech2.yaml index dd9ce51f0..408996557 100644 --- a/examples/tiny/s0/conf/deepspeech2.yaml +++ b/examples/tiny/s0/conf/deepspeech2.yaml @@ -2,32 +2,38 @@ data: train_manifest: data/manifest.tiny dev_manifest: data/manifest.tiny - test_manifest: data/manifest.tiny - mean_std_filepath: data/mean_std.json - vocab_filepath: data/vocab.txt - augmentation_config: conf/augmentation.json - batch_size: 4 + test_manifest: data/manifest.tiny min_input_len: 0.0 - max_input_len: 27.0 + max_input_len: 30.0 min_output_len: 0.0 max_output_len: 400.0 min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + + +collator: + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: specgram_type: linear - target_sample_rate: 16000 - max_freq: None - n_fft: None + feat_dim: + delta_delta: False stride_ms: 10.0 window_ms: 20.0 - delta_delta: False - dither: 1.0 + n_fft: None + max_freq: None + target_sample_rate: 16000 use_dB_normalization: True target_dB: -20 - random_seed: 0 + dither: 1.0 keep_transcription_text: False sortagrad: True shuffle_method: batch_shuffle - num_workers: 0 + num_workers: 2 + batch_size: 4 model: num_conv_layers: 2 @@ -35,14 +41,21 @@ model: rnn_layer_size: 2048 use_gru: False share_rnn_weights: True + blank_id: 0 + ctc_grad_norm_type: instance training: - n_epoch: 20 + n_epoch: 10 + accum_grad: 1 lr: 1e-5 - lr_decay: 1.0 + lr_decay: 0.8 weight_decay: 1e-06 global_grad_clip: 5.0 log_interval: 1 + checkpoint: + kbest_n: 3 + latest_n: 2 + decoding: batch_size: 128 diff --git a/examples/tiny/s0/conf/deepspeech2_online.yaml b/examples/tiny/s0/conf/deepspeech2_online.yaml new file mode 100644 index 000000000..0098a226c --- /dev/null +++ b/examples/tiny/s0/conf/deepspeech2_online.yaml @@ -0,0 +1,72 @@ +# https://yaml.org/type/float.html +data: + train_manifest: data/manifest.tiny + dev_manifest: data/manifest.tiny + test_manifest: data/manifest.tiny + min_input_len: 0.0 + max_input_len: 30.0 + min_output_len: 0.0 + max_output_len: 400.0 + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + + +collator: + mean_std_filepath: data/mean_std.json + unit_type: char + vocab_filepath: data/vocab.txt + augmentation_config: conf/augmentation.json + random_seed: 0 + spm_model_prefix: + specgram_type: linear + feat_dim: + delta_delta: False + stride_ms: 10.0 + window_ms: 20.0 + n_fft: None + max_freq: None + target_sample_rate: 16000 + use_dB_normalization: True + target_dB: -20 + dither: 1.0 + keep_transcription_text: False + sortagrad: True + shuffle_method: batch_shuffle + num_workers: 0 + batch_size: 4 + +model: + num_conv_layers: 2 + num_rnn_layers: 4 + rnn_layer_size: 2048 + rnn_direction: forward + num_fc_layers: 2 + fc_layers_size_list: 512, 256 + use_gru: True + blank_id: 0 + ctc_grad_norm_type: instance + +training: + n_epoch: 10 + accum_grad: 1 + lr: 1e-5 + lr_decay: 1.0 + weight_decay: 1e-06 + global_grad_clip: 5.0 + log_interval: 1 + checkpoint: + kbest_n: 3 + latest_n: 2 + + +decoding: + batch_size: 128 + error_rate_type: wer + decoding_method: ctc_beam_search + lang_model_path: data/lm/common_crawl_00.prune01111.trie.klm + alpha: 2.5 + beta: 0.3 + beam_size: 500 + cutoff_prob: 1.0 + cutoff_top_n: 40 + num_proc_bsearch: 8 diff --git a/examples/tiny/s0/local/data.sh b/examples/tiny/s0/local/data.sh index 727a3da95..02fdb7067 100755 --- a/examples/tiny/s0/local/data.sh +++ b/examples/tiny/s0/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/tiny/s0/local/download_lm_en.sh b/examples/tiny/s0/local/download_lm_en.sh index 05ea793fb..dc1bdf665 100755 --- a/examples/tiny/s0/local/download_lm_en.sh +++ b/examples/tiny/s0/local/download_lm_en.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash . ${MAIN_ROOT}/utils/utility.sh diff --git a/examples/tiny/s0/local/export.sh b/examples/tiny/s0/local/export.sh index 1b19d5720..2e09e5f5e 100755 --- a/examples/tiny/s0/local/export.sh +++ b/examples/tiny/s0/local/export.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 3 ];then - echo "usage: $0 config_path ckpt_prefix jit_model_path" +if [ $# != 4 ];then + echo "usage: $0 config_path ckpt_prefix jit_model_path model_type" exit -1 fi @@ -11,9 +11,10 @@ echo "using $ngpu gpus..." config_path=$1 ckpt_path_prefix=$2 jit_model_export_path=$3 +model_type=$4 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi @@ -22,8 +23,8 @@ python3 -u ${BIN_DIR}/export.py \ --nproc ${ngpu} \ --config ${config_path} \ --checkpoint_path ${ckpt_path_prefix} \ ---export_path ${jit_model_export_path} - +--export_path ${jit_model_export_path} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in export!" diff --git a/examples/tiny/s0/local/test.sh b/examples/tiny/s0/local/test.sh index 79e05838c..b5b68c599 100755 --- a/examples/tiny/s0/local/test.sh +++ b/examples/tiny/s0/local/test.sh @@ -1,7 +1,7 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: ${0} config_path ckpt_path_prefix" +if [ $# != 3 ];then + echo "usage: ${0} config_path ckpt_path_prefix model_type" exit -1 fi @@ -9,11 +9,12 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi config_path=$1 ckpt_prefix=$2 +model_type=$3 # download language model bash local/download_lm_en.sh @@ -26,7 +27,8 @@ python3 -u ${BIN_DIR}/test.py \ --nproc 1 \ --config ${config_path} \ --result_file ${ckpt_prefix}.rsl \ ---checkpoint_path ${ckpt_prefix} +--checkpoint_path ${ckpt_prefix} \ +--model_type ${model_type} if [ $? -ne 0 ]; then echo "Failed in evaluation!" diff --git a/examples/tiny/s0/local/train.sh b/examples/tiny/s0/local/train.sh index f8c9dbc0b..9a76c7ade 100755 --- a/examples/tiny/s0/local/train.sh +++ b/examples/tiny/s0/local/train.sh @@ -1,28 +1,49 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" - exit -1 -fi +profiler_options= + +# seed may break model convergence +seed=0 + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." -config_path=$1 -ckpt_name=$2 - device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True + echo "using seed $seed & FLAGS_cudnn_deterministic=True ..." +fi + + +if [ $# != 3 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name model_type" + exit -1 +fi + +config_path=$1 +ckpt_name=$2 +model_type=$3 + mkdir -p exp python3 -u ${BIN_DIR}/train.py \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--model_type ${model_type} \ +--profiler-options "${profiler_options}" \ +--seed ${seed} + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/tiny/s0/local/tune.sh b/examples/tiny/s0/local/tune.sh deleted file mode 100755 index 4bb81d29b..000000000 --- a/examples/tiny/s0/local/tune.sh +++ /dev/null @@ -1,33 +0,0 @@ -#! /usr/bin/env bash - -if [ $# != 1 ];then - echo "usage: tune ckpt_path" - exit 1 -fi - -# grid-search for hyper-parameters in language model -python3 -u ${BIN_DIR}/tune.py \ ---device 'gpu' \ ---nproc 1 \ ---config conf/deepspeech2.yaml \ ---num_batches=-1 \ ---batch_size=128 \ ---beam_size=500 \ ---num_proc_bsearch=12 \ ---num_alphas=45 \ ---num_betas=8 \ ---alpha_from=1.0 \ ---alpha_to=3.2 \ ---beta_from=0.1 \ ---beta_to=0.45 \ ---cutoff_prob=1.0 \ ---cutoff_top_n=40 \ ---checkpoint_path ${1} - -if [ $? -ne 0 ]; then - echo "Failed in tuning!" - exit 1 -fi - - -exit 0 diff --git a/examples/tiny/s0/path.sh b/examples/tiny/s0/path.sh index 777da29ef..8a9345f2e 100644 --- a/examples/tiny/s0/path.sh +++ b/examples/tiny/s0/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/tiny/s0/run.sh b/examples/tiny/s0/run.sh index d7e153e8d..f39fb3fa0 100755 --- a/examples/tiny/s0/run.sh +++ b/examples/tiny/s0/run.sh @@ -7,6 +7,7 @@ stage=0 stop_stage=100 conf_path=conf/deepspeech2.yaml avg_num=1 +model_type=offline source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; @@ -21,20 +22,20 @@ fi if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # train model, all `ckpt` under `exp` dir - CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} ${model_type} fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # test ckpt avg_n - CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 + CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${model_type} || exit -1 fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then # export ckpt avg_n - CUDA_VISIBLE_DEVICES=${gpus} ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit + CUDA_VISIBLE_DEVICES=${gpus} ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit ${model_type} fi diff --git a/examples/tiny/s1/conf/augmentation.json b/examples/tiny/s1/conf/augmentation.json index f26c282e7..6010c2e47 100644 --- a/examples/tiny/s1/conf/augmentation.json +++ b/examples/tiny/s1/conf/augmentation.json @@ -27,7 +27,9 @@ "W": 80, "adaptive_number_ratio": 0, "adaptive_size_ratio": 0, - "max_n_time_masks": 20 + "max_n_time_masks": 20, + "replace_with_zero": true, + "warp_mode": "PIL" }, "prob": 1.0 } diff --git a/examples/tiny/s1/conf/chunk_confermer.yaml b/examples/tiny/s1/conf/chunk_confermer.yaml index 790066264..be2e82f9e 100644 --- a/examples/tiny/s1/conf/chunk_confermer.yaml +++ b/examples/tiny/s1/conf/chunk_confermer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.tiny dev_manifest: data/manifest.tiny test_manifest: data/manifest.tiny + min_input_len: 0.5 # second + max_input_len: 30.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: + mean_std_filepath: "" vocab_filepath: data/vocab.txt unit_type: 'spm' spm_model_prefix: 'data/bpe_unigram_200' - mean_std_filepath: "" augmentation_config: conf/augmentation.json batch_size: 4 - min_input_len: 0.5 - max_input_len: 20.0 - min_output_len: 0.0 - max_output_len: 400.0 - min_output_input_ratio: 0.05 - max_output_input_ratio: 10.0 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -74,6 +76,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -91,6 +95,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 1 + checkpoint: + kbest_n: 10 + latest_n: 1 decoding: diff --git a/examples/tiny/s1/conf/chunk_transformer.yaml b/examples/tiny/s1/conf/chunk_transformer.yaml index aa2b145a6..93439a857 100644 --- a/examples/tiny/s1/conf/chunk_transformer.yaml +++ b/examples/tiny/s1/conf/chunk_transformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.tiny dev_manifest: data/manifest.tiny test_manifest: data/manifest.tiny - vocab_filepath: data/vocab.txt - unit_type: 'spm' - spm_model_prefix: 'data/bpe_unigram_200' - mean_std_filepath: "" - augmentation_config: conf/augmentation.json - batch_size: 4 min_input_len: 0.5 # second max_input_len: 20.0 # second min_output_len: 0.0 # tokens max_output_len: 400.0 # tokens min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + +collator: + mean_std_filepath: "" + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_200' + augmentation_config: conf/augmentation.json + batch_size: 4 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -67,6 +69,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -84,6 +88,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 1 + checkpoint: + kbest_n: 10 + latest_n: 1 decoding: diff --git a/examples/tiny/s1/conf/conformer.yaml b/examples/tiny/s1/conf/conformer.yaml index 3813daa04..9bb67c44e 100644 --- a/examples/tiny/s1/conf/conformer.yaml +++ b/examples/tiny/s1/conf/conformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.tiny dev_manifest: data/manifest.tiny test_manifest: data/manifest.tiny + min_input_len: 0.5 # second + max_input_len: 20.0 # second + min_output_len: 0.0 # tokens + max_output_len: 400.0 # tokens + min_output_input_ratio: 0.05 + max_output_input_ratio: 10.0 + +collator: + mean_std_filepath: "" vocab_filepath: data/vocab.txt unit_type: 'spm' spm_model_prefix: 'data/bpe_unigram_200' - mean_std_filepath: "" augmentation_config: conf/augmentation.json batch_size: 4 - min_input_len: 0.5 - max_input_len: 20.0 - min_output_len: 0.0 - max_output_len: 400.0 - min_output_input_ratio: 0.05 - max_output_input_ratio: 10.0 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -70,6 +72,8 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false @@ -87,6 +91,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 1 + checkpoint: + kbest_n: 10 + latest_n: 1 decoding: diff --git a/examples/tiny/s1/conf/transformer.yaml b/examples/tiny/s1/conf/transformer.yaml index 0a7cf3be8..fcbe1da4a 100644 --- a/examples/tiny/s1/conf/transformer.yaml +++ b/examples/tiny/s1/conf/transformer.yaml @@ -3,18 +3,20 @@ data: train_manifest: data/manifest.tiny dev_manifest: data/manifest.tiny test_manifest: data/manifest.tiny - vocab_filepath: data/vocab.txt - unit_type: 'spm' - spm_model_prefix: 'data/bpe_unigram_200' - mean_std_filepath: "" - augmentation_config: conf/augmentation.json - batch_size: 4 min_input_len: 0.5 # second max_input_len: 20.0 # second min_output_len: 0.0 # tokens max_output_len: 400.0 # tokens min_output_input_ratio: 0.05 max_output_input_ratio: 10.0 + +collator: + mean_std_filepath: "" + vocab_filepath: data/vocab.txt + unit_type: 'spm' + spm_model_prefix: 'data/bpe_unigram_200' + augmentation_config: conf/augmentation.json + batch_size: 4 raw_wav: True # use raw_wav or kaldi feature specgram_type: fbank #linear, mfcc, fbank feat_dim: 80 @@ -33,7 +35,6 @@ data: shuffle_method: batch_shuffle num_workers: 2 - # network architecture model: cmvn_file: "data/mean_std.json" @@ -65,12 +66,14 @@ model: # hybrid CTC/attention model_conf: ctc_weight: 0.3 + ctc_dropoutrate: 0.0 + ctc_grad_norm_type: instance lsm_weight: 0.1 # label smoothing option length_normalized_loss: false training: - n_epoch: 2 + n_epoch: 21 accum_grad: 1 global_grad_clip: 5.0 optim: adam @@ -82,6 +85,9 @@ training: warmup_steps: 25000 lr_decay: 1.0 log_interval: 1 + checkpoint: + kbest_n: 2 + latest_n: 1 decoding: diff --git a/examples/tiny/s1/local/align.sh b/examples/tiny/s1/local/align.sh new file mode 100755 index 000000000..ad6c84bc8 --- /dev/null +++ b/examples/tiny/s1/local/align.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ $# != 2 ];then + echo "usage: ${0} config_path ckpt_path_prefix" + exit -1 +fi + +ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +echo "using $ngpu gpus..." + +device=gpu +if [ ${ngpu} == 0 ];then + device=cpu +fi +config_path=$1 +ckpt_prefix=$2 + +batch_size=1 +output_dir=${ckpt_prefix} +mkdir -p ${output_dir} + +# align dump in `result_file` +# .tier, .TextGrid dump in `dir of result_file` +python3 -u ${BIN_DIR}/alignment.py \ +--device ${device} \ +--nproc 1 \ +--config ${config_path} \ +--result_file ${output_dir}/${type}.align \ +--checkpoint_path ${ckpt_prefix} \ +--opts decoding.batch_size ${batch_size} + +if [ $? -ne 0 ]; then + echo "Failed in ctc alignment!" + exit 1 +fi + +exit 0 diff --git a/examples/tiny/s1/local/data.sh b/examples/tiny/s1/local/data.sh index deff91e03..2aea250b5 100755 --- a/examples/tiny/s1/local/data.sh +++ b/examples/tiny/s1/local/data.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash stage=-1 stop_stage=100 diff --git a/examples/tiny/s1/local/download_lm_en.sh b/examples/tiny/s1/local/download_lm_en.sh deleted file mode 120000 index 831f3c31c..000000000 --- a/examples/tiny/s1/local/download_lm_en.sh +++ /dev/null @@ -1 +0,0 @@ -../../s0/local/download_lm_en.sh \ No newline at end of file diff --git a/examples/tiny/s1/local/export.sh b/examples/tiny/s1/local/export.sh index 1b19d5720..f99a15bad 100755 --- a/examples/tiny/s1/local/export.sh +++ b/examples/tiny/s1/local/export.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 3 ];then echo "usage: $0 config_path ckpt_prefix jit_model_path" @@ -13,7 +13,7 @@ ckpt_path_prefix=$2 jit_model_export_path=$3 device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi diff --git a/examples/tiny/s1/local/test.sh b/examples/tiny/s1/local/test.sh index 240a63b06..4d3ed081a 100755 --- a/examples/tiny/s1/local/test.sh +++ b/examples/tiny/s1/local/test.sh @@ -1,4 +1,4 @@ -#! /usr/bin/env bash +#!/bin/bash if [ $# != 2 ];then echo "usage: ${0} config_path ckpt_path_prefix" @@ -9,29 +9,60 @@ ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi config_path=$1 ckpt_prefix=$2 +chunk_mode=false +if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then + chunk_mode=true +fi + # download language model #bash local/download_lm_en.sh #if [ $? -ne 0 ]; then # exit 1 #fi -python3 -u ${BIN_DIR}/test.py \ ---device ${device} \ ---nproc 1 \ ---config ${config_path} \ ---result_file ${ckpt_prefix}.rsl \ ---checkpoint_path ${ckpt_prefix} +for type in attention ctc_greedy_search; do + echo "decoding ${type}" + if [ ${chunk_mode} == true ];then + # stream decoding only support batchsize=1 + batch_size=1 + else + batch_size=64 + fi + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} -if [ $? -ne 0 ]; then - echo "Failed in evaluation!" - exit 1 -fi + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done + +for type in ctc_prefix_beam_search attention_rescoring; do + echo "decoding ${type}" + batch_size=1 + python3 -u ${BIN_DIR}/test.py \ + --device ${device} \ + --nproc 1 \ + --config ${config_path} \ + --result_file ${ckpt_prefix}.${type}.rsl \ + --checkpoint_path ${ckpt_prefix} \ + --opts decoding.decoding_method ${type} decoding.batch_size ${batch_size} + if [ $? -ne 0 ]; then + echo "Failed in evaluation!" + exit 1 + fi +done exit 0 diff --git a/examples/tiny/s1/local/train.sh b/examples/tiny/s1/local/train.sh index f8c9dbc0b..5097d4d03 100755 --- a/examples/tiny/s1/local/train.sh +++ b/examples/tiny/s1/local/train.sh @@ -1,28 +1,51 @@ -#! /usr/bin/env bash +#!/bin/bash -if [ $# != 2 ];then - echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" - exit -1 -fi +profiler_options= +benchmark_batch_size=0 +benchmark_max_step=0 + +# seed may break model convergence +seed=0 + +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1; ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') echo "using $ngpu gpus..." -config_path=$1 -ckpt_name=$2 - device=gpu -if [ ngpu == 0 ];then +if [ ${ngpu} == 0 ];then device=cpu fi +if [ ${seed} != 0 ]; then + export FLAGS_cudnn_deterministic=True + echo "using seed $seed & FLAGS_cudnn_deterministic=True ..." +fi + +if [ $# != 2 ];then + echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name" + exit -1 +fi + +config_path=$1 +ckpt_name=$2 + mkdir -p exp python3 -u ${BIN_DIR}/train.py \ +--seed ${seed} \ --device ${device} \ --nproc ${ngpu} \ --config ${config_path} \ ---output exp/${ckpt_name} +--output exp/${ckpt_name} \ +--profiler-options "${profiler_options}" \ +--benchmark-batch-size ${benchmark_batch_size} \ +--benchmark-max-step ${benchmark_max_step} + + +if [ ${seed} != 0 ]; then + unset FLAGS_cudnn_deterministic +fi if [ $? -ne 0 ]; then echo "Failed in training!" diff --git a/examples/tiny/s1/path.sh b/examples/tiny/s1/path.sh index 30adb6ca0..29841bc10 100644 --- a/examples/tiny/s1/path.sh +++ b/examples/tiny/s1/path.sh @@ -1,4 +1,4 @@ -export MAIN_ROOT=${PWD}/../../../ +export MAIN_ROOT=`realpath ${PWD}/../../../` export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} export LC_ALL=C diff --git a/examples/tiny/s1/run.sh b/examples/tiny/s1/run.sh index b148869b7..d288e31a4 100755 --- a/examples/tiny/s1/run.sh +++ b/examples/tiny/s1/run.sh @@ -25,7 +25,7 @@ fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # avg n best model - avg.sh exp/${ckpt}/checkpoints ${avg_num} + avg.sh best exp/${ckpt}/checkpoints ${avg_num} fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then @@ -34,6 +34,12 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # ctc alignment of test data + CUDA_VISIBLE_DEVICES=0 ./local/align.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # export ckpt avg_n - CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit + CUDA_VISIBLE_DEVICES= ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit fi + diff --git a/examples/tn/.gitignore b/examples/tn/.gitignore new file mode 100644 index 000000000..0f2503386 --- /dev/null +++ b/examples/tn/.gitignore @@ -0,0 +1 @@ +exp diff --git a/examples/tn/README.md b/examples/tn/README.md new file mode 100644 index 000000000..ff7be2934 --- /dev/null +++ b/examples/tn/README.md @@ -0,0 +1,36 @@ +# Regular expression based text normalization for Chinese + +For simplicity and ease of implementation, text normalization is basically done by rules and dictionaries. Here's an example. + +## Run + +``` +. path.sh +bash run.sh +``` + +## Results + +``` +exp/ +`-- normalized.txt + +0 directories, 1 file +``` + +``` +aff31f8aa08e2a7360228c9ce5886b98 exp/normalized.txt +``` + +``` +今天的最低气温达到零下十度. +只要有四分之三十三的人同意,就可以通过决议。 +一九四五年五月二日,苏联士兵在德国国会大厦上升起了胜利旗,象征着攻占柏林并战胜了纳粹德国。 +四月十六日,清晨的战斗以炮击揭幕,数以千计的大炮和喀秋莎火箭炮开始炮轰德军阵地,炮击持续了数天之久。 +如果剩下的百分之三十点六是过去,那么还有百分之六十九点四. +事情发生在二零二零年三月三十一日的上午八点. +警方正在找一支点二二口径的手枪。 +欢迎致电中国联通,北京二零二二年冬奥会官方合作伙伴为您服务 +充值缴费请按一,查询话费及余量请按二,跳过本次提醒请按井号键。 +快速解除流量封顶请按星号键,腾讯王卡产品介绍、使用说明、特权及活动请按九,查询话费、套餐余量、积分及活动返款请按一,手机上网流量开通及取消请按二,查���本机号码及本号所使用套餐请按四,密码修改及重置请按五,紧急开机请按六,挂失请按七,查询充值记录请按八,其它自助服务及工服务请按零 +``` diff --git a/examples/tn/data/sentences.txt b/examples/tn/data/sentences.txt new file mode 100644 index 000000000..d15bfe46b --- /dev/null +++ b/examples/tn/data/sentences.txt @@ -0,0 +1,26 @@ +今天的最低气温达到-10°C. +只要有33/4的人同意,就可以通过决议。 +1945年5月2日,苏联士兵在德国国会大厦上升起了胜利旗,象征着攻占柏林并战胜了纳粹德国。 +4月16日,清晨的战斗以炮击揭幕,数以千计的大炮和喀秋莎火箭炮开始炮轰德军阵地,炮击持续了数天之久。 +如果剩下的30.6%是过去,那么还有69.4%. +事情发生在2020/03/31的上午8:00. +警方正在找一支.22口径的手枪。 +欢迎致电中国联通,北京2022年冬奥会官方合作伙伴为您服务 +充值缴费请按1,查询话费及余量请按2,跳过本次提醒请按井号键。 +快速解除流量封顶请按星号键,腾讯王卡产品介绍、使用说明、特权及活动请按9,查询话费、套餐余量、积分及活动返款请按1,手机上网流量开通及取消请按2,查询本机号码及本号所使用套餐请按4,密码修改及重置请按5,紧急开机请按6,挂失请按7,查询充值记录请按8,其它自助服务及人工服务请按0 +智能客服助理快速查话费、查流量请按9,了解北京联通业务请按1,宽带IPTV新装、查询请按2,障碍报修请按3,充值缴费请按4,投诉建议请按5,政企业务请按7,人工服务请按0,for english severice press star key +您的帐户当前可用余额为63.89元,本月消费为2.17元。您的消费、套餐余量和其它信息将以短信形式下发,请您注意查收。谢谢使用,再见!。 +您的帐户当前可用余额为负15.5元,本月消费为59.6元。您的消费、套餐余量和其它信息将以短信形式下发,请您注意查收。谢谢使用,再见!。 +尊敬的客户,您目前的话费余额为负14.60元,已低于10元,为保证您的通信畅通,请及时缴纳费用。 +您的流量已用完,为避免您产生额外费用,建议您根据需求开通一个流量包以作补充。 +您可以直接说,查询话费及余量、开通流量包、缴费,您也可以说出其它需求,请问有什么可以帮您? +您的账户当前可用余额为负36.00元,本月消费36.00元。 +请问你是电话13985608526的机主吗? +如您对处理结果不满意,可拨打中国联通集团投诉电话10015进行投诉,按本地通话费收费,返回自助服务请按井号键 +“26314”号VIP客服代表为您服务。 +尊敬的5G用户,欢迎您致电中国联通 +首先是应用了M1芯片的iPad Pro,新款的iPad Pro支持5G,这也是苹果的第二款5G产品线。 +除此之外,摄像头方面再次升级,增加了前摄全新超广角摄像头,支持人物居中功能,搭配超广角可实现视频中始终让人物居中效果。 +屏幕方面,iPad Pro 12.9版本支持XDR体验的Mini-LEDS显示屏,支持HDR10、杜比视界,还支持杜比全景声。 +iPad Pro的秒控键盘这次也推出白色版本。 +售价方面,11英寸版本售价799美元起,12.9英寸售价1099美元起。 diff --git a/examples/tn/local/test_normalization.py b/examples/tn/local/test_normalization.py new file mode 100644 index 000000000..bcf7ee0da --- /dev/null +++ b/examples/tn/local/test_normalization.py @@ -0,0 +1,29 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse + +from text_processing import normalization + +parser = argparse.ArgumentParser( + description="Normalize text in Chinese with some rules.") +parser.add_argument("input", type=str, help="the input sentences") +parser.add_argument("output", type=str, help="path to save the output file.") +args = parser.parse_args() + +with open(args.input, 'rt') as fin: + with open(args.output, 'wt') as fout: + for sent in fin: + sent = normalization.normalize_sentence(sent.strip()) + fout.write(sent) + fout.write('\n') diff --git a/examples/tn/path.sh b/examples/tn/path.sh new file mode 100644 index 000000000..30689eee7 --- /dev/null +++ b/examples/tn/path.sh @@ -0,0 +1,8 @@ +export MAIN_ROOT=`realpath ${PWD}/../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${MAIN_ROOT}/third_party:${PYTHONPATH}# diff --git a/examples/tn/run.sh b/examples/tn/run.sh new file mode 100755 index 000000000..c4043a319 --- /dev/null +++ b/examples/tn/run.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +source path.sh + +stage=-1 +stop_stage=100 + +exp_dir=exp +data_dir=data +filename="sentences.txt" + +source ${MAIN_ROOT}/utils/parse_options.sh || exit -1 + +mkdir -p ${exp_dir} + + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + echo "stage 1: Processing " + python3 local/test_normalization.py ${data_dir}/${filename} ${exp_dir}/normalized.txt + if [ -f "${exp_dir}/normalized.txt" ]; then + echo "Normalized text save at ${exp_dir}/normalized.txt" + fi + # TODO(chenfeiyu): compute edit distance against ground-truth +fi + +echo "done" +exit 0 diff --git a/requirements.txt b/requirements.txt index 57a951bbd..ebf879b51 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,14 @@ coverage +gpustat +jsonlines +jsonlines +kaldiio +loguru +Pillow pre-commit pybind11 resampy==0.2.2 +sacrebleu scipy==1.2.1 sentencepiece snakeviz @@ -9,5 +16,7 @@ SoundFile==0.9.0.post1 sox tensorboardX textgrid +tqdm typeguard +visualdl==2.2.0 yacs diff --git a/setup.sh b/setup.sh index 11daa102a..6e472c47d 100644 --- a/setup.sh +++ b/setup.sh @@ -9,14 +9,21 @@ if [ $(id -u) -eq 0 ]; then fi if [ -e /etc/lsb-release ];then - #${SUDO} apt-get update - ${SUDO} apt-get install -y vim tig tree sox pkg-config libflac-dev libogg-dev libvorbis-dev libboost-dev swig python3-dev + ${SUDO} apt-get update -y + ${SUDO} apt-get install -y jq vim tig tree sox pkg-config libflac-dev libogg-dev libvorbis-dev libboost-dev swig python3-dev if [ $? != 0 ]; then error_msg "Please using Ubuntu or install pkg-config libflac-dev libogg-dev libvorbis-dev libboost-dev swig python3-dev by user." exit -1 fi fi + +# tools/make +rm tools/*.done +pushd tools && make && popd + +source tools/venv/bin/activate + # install python dependencies if [ -f "requirements.txt" ]; then pip3 install -r requirements.txt @@ -43,6 +50,22 @@ if [ $? != 0 ]; then rm libsndfile-1.0.28.tar.gz fi +#install auto-log +python -c "import auto_log" +if [ $? != 0 ]; then + info_msg "Install auto_log into default system path" + test -d AutoLog || git clone https://github.com/LDOUBLEV/AutoLog + if [ $? != 0 ]; then + error_msg "Download auto_log failed !!!" + exit 1 + fi + cd AutoLog + pip install -r requirements.txt + python setup.py install + cd .. + rm -rf AutoLog +fi + # install decoders python3 -c "import pkg_resources; pkg_resources.require(\"swig_decoders==1.1\")" if [ $? != 0 ]; then @@ -66,4 +89,5 @@ if [ $? != 0 ]; then fi popd + info_msg "Install all dependencies successfully." diff --git a/speechnn/.gitignore b/speechnn/.gitignore new file mode 100644 index 000000000..378eac25d --- /dev/null +++ b/speechnn/.gitignore @@ -0,0 +1 @@ +build diff --git a/speechnn/CMakeLists.txt b/speechnn/CMakeLists.txt new file mode 100644 index 000000000..88182eb4c --- /dev/null +++ b/speechnn/CMakeLists.txt @@ -0,0 +1,56 @@ +cmake_minimum_required(VERSION 3.14 FATAL_ERROR) + +project(speechnn VERSION 0.1) + +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + set(CMAKE_INSTALL_PREFIX ${CMAKE_CURRENT_SOURCE_DIR}/src CACHE PATH "Install path prefix." FORCE) +endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake;${CMAKE_MODULE_PATH}") + +# include file +include(cmake/third_party.cmake) + + +set(CMAKE_VERBOSE_MAKEFILE on) +# set std-14 +set(CMAKE_CXX_STANDARD 14) + + +# # fc_patch dir +# set(FETCHCONTENT_QUIET off) +# get_filename_component(fc_patch "fc_patch" REALPATH BASE_DIR "${CMAKE_SOURCE_DIR}") +# set(FETCHCONTENT_BASE_DIR ${fc_patch}) +# +# +# ############################################################################### +# # Option Configurations +# ############################################################################### +# # option configurations +# option(TEST_DEBUG "option for debug" OFF) +# +# +# ############################################################################### +# # Add local library +# ############################################################################### +# # system lib +# find_package() +# # if dir have CmakeLists.txt +# add_subdirectory() +# # if dir do not have CmakeLists.txt +# add_library(lib_name STATIC file.cc) +# target_link_libraries(lib_name item0 item1) +# add_dependencies(lib_name depend-target) +# +# +# ############################################################################### +# # Library installation +# ############################################################################### +# install() +# +# +# ############################################################################### +# # Build binary file +# ############################################################################### +# add_executable() +# target_link_libraries() +# diff --git a/speechnn/cmake/third_party.cmake b/speechnn/cmake/third_party.cmake new file mode 100644 index 000000000..fdd7b53c2 --- /dev/null +++ b/speechnn/cmake/third_party.cmake @@ -0,0 +1,197 @@ +include(ExternalProject) +# Creat a target named "third_party", which can compile external dependencies on all platform(windows/linux/mac) + +set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING + "A path setting third party libraries download & build directories.") +set(THIRD_PARTY_CACHE_PATH "${CMAKE_SOURCE_DIR}" CACHE STRING + "A path cache third party source code to avoid repeated download.") + +set(THIRD_PARTY_BUILD_TYPE Release) +set(third_party_deps) + + +# cache funciton to avoid repeat download code of third_party. +# This function has 4 parameters, URL / REPOSITOR / TAG / DIR: +# 1. URL: specify download url of 3rd party +# 2. REPOSITORY: specify git REPOSITORY of 3rd party +# 3. TAG: specify git tag/branch/commitID of 3rd party +# 4. DIR: overwrite the original SOURCE_DIR when cache directory +# +# The function Return 1 PARENT_SCOPE variables: +# - ${TARGET}_DOWNLOAD_CMD: Simply place "${TARGET}_DOWNLOAD_CMD" in ExternalProject_Add, +# and you no longer need to set any donwnload steps in ExternalProject_Add. +# For example: +# Cache_third_party(${TARGET} +# REPOSITORY ${TARGET_REPOSITORY} +# TAG ${TARGET_TAG} +# DIR ${TARGET_SOURCE_DIR}) + +FUNCTION(cache_third_party TARGET) + SET(options "") + SET(oneValueArgs URL REPOSITORY TAG DIR) + SET(multiValueArgs "") + cmake_parse_arguments(cache_third_party "${optionps}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + STRING(REPLACE "extern_" "" TARGET_NAME ${TARGET}) + STRING(REGEX REPLACE "[0-9]+" "" TARGET_NAME ${TARGET_NAME}) + STRING(TOUPPER ${TARGET_NAME} TARGET_NAME) + IF(cache_third_party_REPOSITORY) + SET(${TARGET_NAME}_DOWNLOAD_CMD + GIT_REPOSITORY ${cache_third_party_REPOSITORY}) + IF(cache_third_party_TAG) + LIST(APPEND ${TARGET_NAME}_DOWNLOAD_CMD + GIT_TAG ${cache_third_party_TAG}) + ENDIF() + ELSEIF(cache_third_party_URL) + SET(${TARGET_NAME}_DOWNLOAD_CMD + URL ${cache_third_party_URL}) + ELSE() + MESSAGE(FATAL_ERROR "Download link (Git repo or URL) must be specified for cache!") + ENDIF() + IF(WITH_TP_CACHE) + IF(NOT cache_third_party_DIR) + MESSAGE(FATAL_ERROR "Please input the ${TARGET_NAME}_SOURCE_DIR for overwriting when -DWITH_TP_CACHE=ON") + ENDIF() + # Generate and verify cache dir for third_party source code + SET(cache_third_party_REPOSITORY ${cache_third_party_REPOSITORY} ${cache_third_party_URL}) + IF(cache_third_party_REPOSITORY AND cache_third_party_TAG) + STRING(MD5 HASH_REPO ${cache_third_party_REPOSITORY}) + STRING(MD5 HASH_GIT ${cache_third_party_TAG}) + STRING(SUBSTRING ${HASH_REPO} 0 8 HASH_REPO) + STRING(SUBSTRING ${HASH_GIT} 0 8 HASH_GIT) + STRING(CONCAT HASH ${HASH_REPO} ${HASH_GIT}) + # overwrite the original SOURCE_DIR when cache directory + SET(${cache_third_party_DIR} ${THIRD_PARTY_CACHE_PATH}/third_party/${TARGET}_${HASH}) + ELSEIF(cache_third_party_REPOSITORY) + STRING(MD5 HASH_REPO ${cache_third_party_REPOSITORY}) + STRING(SUBSTRING ${HASH_REPO} 0 16 HASH) + # overwrite the original SOURCE_DIR when cache directory + SET(${cache_third_party_DIR} ${THIRD_PARTY_CACHE_PATH}/third_party/${TARGET}_${HASH}) + ENDIF() + + IF(EXISTS ${${cache_third_party_DIR}}) + # judge whether the cache dir is empty + FILE(GLOB files ${${cache_third_party_DIR}}/*) + LIST(LENGTH files files_len) + IF(files_len GREATER 0) + list(APPEND ${TARGET_NAME}_DOWNLOAD_CMD DOWNLOAD_COMMAND "") + ENDIF() + ENDIF() + SET(${cache_third_party_DIR} ${${cache_third_party_DIR}} PARENT_SCOPE) + ENDIF() + + # Pass ${TARGET_NAME}_DOWNLOAD_CMD to parent scope, the double quotation marks can't be removed + SET(${TARGET_NAME}_DOWNLOAD_CMD "${${TARGET_NAME}_DOWNLOAD_CMD}" PARENT_SCOPE) +ENDFUNCTION() + +MACRO(UNSET_VAR VAR_NAME) + UNSET(${VAR_NAME} CACHE) + UNSET(${VAR_NAME}) +ENDMACRO() + +# Funciton to Download the dependencies during compilation +# This function has 2 parameters, URL / DIRNAME: +# 1. URL: The download url of 3rd dependencies +# 2. NAME: The name of file, that determin the dirname +# +FUNCTION(file_download_and_uncompress URL NAME) + set(options "") + set(oneValueArgs MD5) + set(multiValueArgs "") + cmake_parse_arguments(URL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + MESSAGE(STATUS "Download dependence[${NAME}] from ${URL}, MD5: ${URL_MD5}") + SET(${NAME}_INCLUDE_DIR ${THIRD_PARTY_PATH}/${NAME}/data PARENT_SCOPE) + ExternalProject_Add( + download_${NAME} + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${THIRD_PARTY_PATH}/${NAME} + URL ${URL} + URL_MD5 ${URL_MD5} + TIMEOUT 120 + DOWNLOAD_DIR ${THIRD_PARTY_PATH}/${NAME}/data/ + SOURCE_DIR ${THIRD_PARTY_PATH}/${NAME}/data/ + DOWNLOAD_NO_PROGRESS 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + UPDATE_COMMAND "" + INSTALL_COMMAND "" + ) + set(third_party_deps ${third_party_deps} download_${NAME} PARENT_SCOPE) +ENDFUNCTION() + + +# Correction of flags on different Platform(WIN/MAC) and Print Warning Message +if (APPLE) + if(WITH_MKL) + MESSAGE(WARNING + "Mac is not supported with MKL in Paddle yet. Force WITH_MKL=OFF.") + set(WITH_MKL OFF CACHE STRING "Disable MKL for building on mac" FORCE) + endif() +endif() + +if(WIN32 OR APPLE) + MESSAGE(STATUS "Disable XBYAK in Windows and MacOS") + SET(WITH_XBYAK OFF CACHE STRING "Disable XBYAK in Windows and MacOS" FORCE) + + if(WITH_LIBXSMM) + MESSAGE(WARNING + "Windows, Mac are not supported with libxsmm in Paddle yet." + "Force WITH_LIBXSMM=OFF") + SET(WITH_LIBXSMM OFF CACHE STRING "Disable LIBXSMM in Windows and MacOS" FORCE) + endif() + + if(WITH_BOX_PS) + MESSAGE(WARNING + "Windows or Mac is not supported with BOX_PS in Paddle yet." + "Force WITH_BOX_PS=OFF") + SET(WITH_BOX_PS OFF CACHE STRING "Disable BOX_PS package in Windows and MacOS" FORCE) + endif() + + if(WITH_PSLIB) + MESSAGE(WARNING + "Windows or Mac is not supported with PSLIB in Paddle yet." + "Force WITH_PSLIB=OFF") + SET(WITH_PSLIB OFF CACHE STRING "Disable PSLIB package in Windows and MacOS" FORCE) + endif() + + if(WITH_LIBMCT) + MESSAGE(WARNING + "Windows or Mac is not supported with LIBMCT in Paddle yet." + "Force WITH_LIBMCT=OFF") + SET(WITH_LIBMCT OFF CACHE STRING "Disable LIBMCT package in Windows and MacOS" FORCE) + endif() + + if(WITH_PSLIB_BRPC) + MESSAGE(WARNING + "Windows or Mac is not supported with PSLIB_BRPC in Paddle yet." + "Force WITH_PSLIB_BRPC=OFF") + SET(WITH_PSLIB_BRPC OFF CACHE STRING "Disable PSLIB_BRPC package in Windows and MacOS" FORCE) + endif() +endif() + +set(WITH_MKLML ${WITH_MKL}) +if(NOT DEFINED WITH_MKLDNN) + if(WITH_MKL AND AVX2_FOUND) + set(WITH_MKLDNN ON) + else() + message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN") + set(WITH_MKLDNN OFF) + endif() +endif() + +if(WIN32 OR APPLE OR NOT WITH_GPU OR ON_INFER) + set(WITH_DGC OFF) +endif() + +if(${CMAKE_VERSION} VERSION_GREATER "3.5.2") + set(SHALLOW_CLONE "GIT_SHALLOW TRUE") # adds --depth=1 arg to git clone of External_Projects +endif() + + +########################### include third_party according to flags ############################### +include(third_party/libsndfile) # download, build, install libsndfile +include(third_party/boost) # download boost +include(third_party/eigen) # download eigen3 +include(third_party/threadpool) # download threadpool + + diff --git a/speechnn/cmake/third_party/absl.cmake b/speechnn/cmake/third_party/absl.cmake new file mode 100644 index 000000000..c2a8eceb5 --- /dev/null +++ b/speechnn/cmake/third_party/absl.cmake @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.14) +include(ExternalProject) +include(FetchContent) + +FetchContent_Declare( + absl + GIT_REPOSITORY "https://github.com/abseil/abseil-cpp.git" + GIT_TAG "20210324.1" +) + +FetchContent_MakeAvailable(absl) + + diff --git a/speechnn/cmake/third_party/boost.cmake b/speechnn/cmake/third_party/boost.cmake new file mode 100644 index 000000000..eb0b2c150 --- /dev/null +++ b/speechnn/cmake/third_party/boost.cmake @@ -0,0 +1,49 @@ +include(ExternalProject) + +set(BOOST_PROJECT "extern_boost") +# To release PaddlePaddle as a pip package, we have to follow the +# manylinux1 standard, which features as old Linux kernels and +# compilers as possible and recommends CentOS 5. Indeed, the earliest +# CentOS version that works with NVIDIA CUDA is CentOS 6. And a new +# version of boost, say, 1.66.0, doesn't build on CentOS 6. We +# checked that the devtools package of CentOS 6 installs boost 1.41.0. +# So we use 1.41.0 here. +set(BOOST_VER "1.41.0") +set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) +set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) + +MESSAGE(STATUS "BOOST_VERSION: ${BOOST_VER}, BOOST_URL: ${BOOST_URL}") + +set(BOOST_PREFIX_DIR ${THIRD_PARTY_PATH}/boost) +set(BOOST_SOURCE_DIR ${THIRD_PARTY_PATH}/boost/src/extern_boost) +cache_third_party(${BOOST_PROJECT} + URL ${BOOST_URL} + DIR BOOST_SOURCE_DIR) + +set(BOOST_INCLUDE_DIR "${BOOST_SOURCE_DIR}" CACHE PATH "boost include directory." FORCE) +set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) +include_directories(${BOOST_INCLUDE_DIR}) + +if(WIN32 AND MSVC_VERSION GREATER_EQUAL 1600) + add_definitions(-DBOOST_HAS_STATIC_ASSERT) +endif() + +ExternalProject_Add( + ${BOOST_PROJECT} + ${EXTERNAL_PROJECT_LOG_ARGS} + "${BOOST_DOWNLOAD_CMD}" + URL_MD5 f891e8c2c9424f0565f0129ad9ab4aff + PREFIX ${BOOST_PREFIX_DIR} + DOWNLOAD_DIR ${BOOST_SOURCE_DIR} + SOURCE_DIR ${BOOST_SOURCE_DIR} + DOWNLOAD_NO_PROGRESS 1 + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + UPDATE_COMMAND "" + ) + +add_library(boost INTERFACE) + +add_dependencies(boost ${BOOST_PROJECT}) +set(Boost_INCLUDE_DIR ${BOOST_INCLUDE_DIR}) diff --git a/speechnn/cmake/third_party/eigen.cmake b/speechnn/cmake/third_party/eigen.cmake new file mode 100644 index 000000000..6a0323071 --- /dev/null +++ b/speechnn/cmake/third_party/eigen.cmake @@ -0,0 +1,53 @@ +include(ExternalProject) + +# update eigen to the commit id f612df27 on 03/16/2021 +set(EIGEN_PREFIX_DIR ${THIRD_PARTY_PATH}/eigen3) +set(EIGEN_SOURCE_DIR ${THIRD_PARTY_PATH}/eigen3/src/extern_eigen3) +set(EIGEN_REPOSITORY https://gitlab.com/libeigen/eigen.git) +set(EIGEN_TAG f612df273689a19d25b45ca4f8269463207c4fee) + +cache_third_party(extern_eigen3 + REPOSITORY ${EIGEN_REPOSITORY} + TAG ${EIGEN_TAG} + DIR EIGEN_SOURCE_DIR) + +if(WIN32) + add_definitions(-DEIGEN_STRONG_INLINE=inline) +elseif(LINUX) + if(WITH_ROCM) + # For HIPCC Eigen::internal::device::numeric_limits is not EIGEN_DEVICE_FUNC + # which will cause compiler error of using __host__ funciont in __host__ __device__ + file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Meta.h native_src) + file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/util/Meta.h native_dst) + file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/TensorReductionGpu.h native_src1) + file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h native_dst1) + set(EIGEN_PATCH_COMMAND cp ${native_src} ${native_dst} && cp ${native_src1} ${native_dst1}) + endif() +endif() + +set(EIGEN_INCLUDE_DIR ${EIGEN_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${EIGEN_INCLUDE_DIR}) + +ExternalProject_Add( + extern_eigen3 + ${EXTERNAL_PROJECT_LOG_ARGS} + ${SHALLOW_CLONE} + "${EIGEN_DOWNLOAD_CMD}" + PREFIX ${EIGEN_PREFIX_DIR} + SOURCE_DIR ${EIGEN_SOURCE_DIR} + UPDATE_COMMAND "" + PATCH_COMMAND ${EIGEN_PATCH_COMMAND} + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +add_library(eigen3 INTERFACE) + +add_dependencies(eigen3 extern_eigen3) + +# sw not support thread_local semantic +if(WITH_SW) + add_definitions(-DEIGEN_AVOID_THREAD_LOCAL) +endif() diff --git a/speechnn/cmake/third_party/libsndfile.cmake b/speechnn/cmake/third_party/libsndfile.cmake new file mode 100644 index 000000000..05d5c6ed4 --- /dev/null +++ b/speechnn/cmake/third_party/libsndfile.cmake @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.14) +include(ExternalProject) +include(FetchContent) + +FetchContent_Declare( + libsndfile + GIT_REPOSITORY https://github.com/libsndfile/libsndfile.git + GIT_TAG v1.0.30 # tag v1.0.30 +) + +FetchContent_GetProperties(libsndfile) diff --git a/speechnn/cmake/third_party/openfst.cmake b/speechnn/cmake/third_party/openfst.cmake new file mode 100644 index 000000000..39f335a1c --- /dev/null +++ b/speechnn/cmake/third_party/openfst.cmake @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.14) +include(ExternalProject) +include(FetchContent) + +FetchContent_Declare( + openfst + GIT_REPOSITORY https://github.com/kkm000/openfst + GIT_TAG 338225416178ac36b8002d70387f5556e44c8d05 # tag win/1.7.2.1 +) + +FetchContent_GetProperties(openfst) +if(NOT openfst_POPULATED) + FetchContent_Populate(openfst) + include_directories(${openfst_SOURCE_DIR}/src/include) + + add_subdirectory(${openfst_SOURCE_DIR} ${openfst_BINARY_DIR}) + + install(DIRECTORY ${openfst_SOURCE_DIR}/src/include/ DESTINATION include/ + FILES_MATCHING PATTERN "*.h") + + install(TARGETS fst + EXPORT kaldi-targets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) +endif() diff --git a/speechnn/cmake/third_party/openfst_lib_target.cmake b/speechnn/cmake/third_party/openfst_lib_target.cmake new file mode 100644 index 000000000..dde5efc40 --- /dev/null +++ b/speechnn/cmake/third_party/openfst_lib_target.cmake @@ -0,0 +1,31 @@ +if(NOT OPENFST_ROOT_DIR) + message(FATAL_ERROR) +endif() + +set(fst_source_dir ${OPENFST_ROOT_DIR}/src/lib) +set(fst_include_dir ${OPENFST_ROOT_DIR}/src/include) + +include_directories(${fst_include_dir}) +file(GLOB fst_sources "${fst_source_dir}/*.cc") + +add_library(fst ${fst_sources}) +target_include_directories(fst PUBLIC + $ + $ +) + +install(TARGETS fst + EXPORT kaldi-targets + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} +) + +install(DIRECTORY ${fst_include_dir}/fst + DESTINATION include/openfst + PATTERN "test/*.h" EXCLUDE +) + +unset(fst_source_dir) +unset(fst_include_dir) +unset(fst_sources) diff --git a/speechnn/cmake/third_party/threadpool.cmake b/speechnn/cmake/third_party/threadpool.cmake new file mode 100644 index 000000000..d2c249e9b --- /dev/null +++ b/speechnn/cmake/third_party/threadpool.cmake @@ -0,0 +1,36 @@ +INCLUDE(ExternalProject) + +SET(THREADPOOL_PREFIX_DIR ${THIRD_PARTY_PATH}/threadpool) +SET(THREADPOOL_SOURCE_DIR ${THIRD_PARTY_PATH}/threadpool/src/extern_threadpool) +if(WITH_ASCEND OR WITH_ASCEND_CL) + SET(THREADPOOL_REPOSITORY https://gitee.com/tianjianhe/ThreadPool.git) +else() + SET(THREADPOOL_REPOSITORY ${GIT_URL}/progschj/ThreadPool.git) +endif() +SET(THREADPOOL_TAG 9a42ec1329f259a5f4881a291db1dcb8f2ad9040) + +cache_third_party(extern_threadpool + REPOSITORY ${THREADPOOL_REPOSITORY} + TAG ${THREADPOOL_TAG} + DIR THREADPOOL_SOURCE_DIR) + +SET(THREADPOOL_INCLUDE_DIR ${THREADPOOL_SOURCE_DIR}) +INCLUDE_DIRECTORIES(${THREADPOOL_INCLUDE_DIR}) + +ExternalProject_Add( + extern_threadpool + ${EXTERNAL_PROJECT_LOG_ARGS} + ${SHALLOW_CLONE} + "${THREADPOOL_DOWNLOAD_CMD}" + PREFIX ${THREADPOOL_PREFIX_DIR} + SOURCE_DIR ${THREADPOOL_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +add_library(simple_threadpool INTERFACE) + +add_dependencies(simple_threadpool extern_threadpool) diff --git a/speechnn/cmake/third_party/version.cmake b/speechnn/cmake/third_party/version.cmake new file mode 100644 index 000000000..c3780ee69 --- /dev/null +++ b/speechnn/cmake/third_party/version.cmake @@ -0,0 +1,15 @@ +function(get_version) + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/src/.version version) + string(STRIP ${version} version) + execute_process(COMMAND git log -n1 --format=%H src/.version + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE version_commit + OUTPUT_STRIP_TRAILING_WHITESPACE) + execute_process(COMMAND git rev-list --count "${version_commit}..HEAD" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE patch_number) + string(STRIP ${patch_number} patch_number) + + set(KALDI_VERSION ${version} PARENT_SCOPE) + set(KALDI_PATCH_NUMBER ${patch_number} PARENT_SCOPE) +endfunction() diff --git a/speechnn/core/transformers/.gitkeep b/speechnn/core/transformers/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/core/transformers/README.md b/speechnn/core/transformers/README.md new file mode 100644 index 000000000..edbcb9cc3 --- /dev/null +++ b/speechnn/core/transformers/README.md @@ -0,0 +1,9 @@ +# Fast Transformers for Speech + +- Conformer +- Transformer + +## Reference + +* https://github.com/NVIDIA/FasterTransformer.git +* https://github.com/idiap/fast-transformers diff --git a/speechnn/examples/.gitkeep b/speechnn/examples/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/examples/CMakeLists.txt b/speechnn/examples/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/CMakeLists.txt b/speechnn/speechnn/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/decoder/CMakeLists.txt b/speechnn/speechnn/decoder/CMakeLists.txt new file mode 100644 index 000000000..259261bdf --- /dev/null +++ b/speechnn/speechnn/decoder/CMakeLists.txt @@ -0,0 +1,2 @@ +aux_source_directory(. DIR_LIB_SRCS) +add_library(decoder STATIC ${DIR_LIB_SRCS}) diff --git a/speechnn/speechnn/frontend/CMakeLists.txt b/speechnn/speechnn/frontend/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/frontend/audio/CMakeLists.txt b/speechnn/speechnn/frontend/audio/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/frontend/text/CMakeLists.txt b/speechnn/speechnn/frontend/text/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/model/CMakeLists.txt b/speechnn/speechnn/model/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/nn/CMakeLists.txt b/speechnn/speechnn/nn/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/protocol/CMakeLists.txt b/speechnn/speechnn/protocol/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/speechnn/speechnn/utils/CMakeLists.txt b/speechnn/speechnn/utils/CMakeLists.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tests/benchmark/.gitignore b/tests/benchmark/.gitignore new file mode 100644 index 000000000..7d166b066 --- /dev/null +++ b/tests/benchmark/.gitignore @@ -0,0 +1,2 @@ +old-pd_env.txt +pd_env.txt diff --git a/tests/benchmark/README.md b/tests/benchmark/README.md new file mode 100644 index 000000000..d21999ab3 --- /dev/null +++ b/tests/benchmark/README.md @@ -0,0 +1,11 @@ +# Benchmark Test + +## Data + +* Aishell + +## Docker + +``` +registry.baidubce.com/paddlepaddle/paddle 2.1.1-gpu-cuda10.2-cudnn7 59d5ec1de486 +``` diff --git a/tests/benchmark/run_all.sh b/tests/benchmark/run_all.sh new file mode 100755 index 000000000..6f707cdcb --- /dev/null +++ b/tests/benchmark/run_all.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +CUR_DIR=${PWD} +ROOT_DIR=../../ + +# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: +# collect env info +bash ${ROOT_DIR}/utils/pd_env_collect.sh +#cat pd_env.txt + + +# 1 安装该模型需要的依赖 (如需开启优化策略请注明) +#pushd ${ROOT_DIR}/tools; make; popd +#source ${ROOT_DIR}/tools/venv/bin/activate +#pushd ${ROOT_DIR}; bash setup.sh; popd + + +# 2 拷贝该模型需要数据、预训练模型 + +# 执行目录:需说明 +#pushd ${ROOT_DIR}/examples/aishell/s1 +pushd ${ROOT_DIR}/examples/tiny/s1 + +mkdir -p exp/log +. path.sh +#bash local/data.sh &> exp/log/data.log + +# 3 批量运行(如不方便批量,1,2需放到单个模型中) + +model_mode_list=(conformer transformer) +fp_item_list=(fp32) +bs_item_list=(32 64 96) +for model_mode in ${model_mode_list[@]}; do + for fp_item in ${fp_item_list[@]}; do + for bs_item in ${bs_item_list[@]} + do + echo "index is speed, 1gpus, begin, ${model_name}" + run_mode=sp + CUDA_VISIBLE_DEVICES=0 bash ${CUR_DIR}/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} # (5min) + sleep 60 + echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}" + run_mode=mp + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash ${CUR_DIR}/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} + sleep 60 + done + done +done + +popd # aishell/s1 diff --git a/tests/benchmark/run_benchmark.sh b/tests/benchmark/run_benchmark.sh new file mode 100755 index 000000000..eb1117936 --- /dev/null +++ b/tests/benchmark/run_benchmark.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +set -xe + +# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} +# 参数说明 +function _set_params(){ + run_mode=${1:-"sp"} # 单卡sp|多卡mp + batch_size=${2:-"64"} + fp_item=${3:-"fp32"} # fp32|fp16 + max_iter=${4:-"500"} # 可选,如果需要修改代码提前中断 + model_name=${5:-"model_name"} + run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数 + +# 以下不用修改 + device=${CUDA_VISIBLE_DEVICES//,/ } + arr=(${device}) + num_gpu_devices=${#arr[*]} + log_file=${run_log_path}/${model_name}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices} +} + +function _train(){ + echo "Train on ${num_gpu_devices} GPUs" + echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size" + + train_cmd="--benchmark-batch-size ${batch_size} + --benchmark-max-step ${max_iter} + conf/${model_name}.yaml ${model_name}" + + case ${run_mode} in + sp) train_cmd="bash local/train.sh "${train_cmd}"" ;; + mp) + train_cmd="bash local/train.sh "${train_cmd}"" ;; + *) echo "choose run_mode(sp or mp)"; exit 1; + esac + + # 以下不用修改 + CUDA_VISIBLE_DEVICES=${device} timeout 15m ${train_cmd} > ${log_file} 2>&1 + if [ $? -ne 0 ];then + echo -e "${model_name}, FAIL" + export job_fail_flag=1 + else + echo -e "${model_name}, SUCCESS" + export job_fail_flag=0 + fi + + trap 'for pid in $(jobs -pr); do kill -KILL $pid; done' INT QUIT TERM + + if [ $run_mode = "mp" -a -d mylog ]; then + rm ${log_file} + cp mylog/workerlog.0 ${log_file} + fi +} + +_set_params $@ +_train + diff --git a/tests/chains/ds2_params_lite_train_infer.txt b/tests/chains/ds2_params_lite_train_infer.txt new file mode 100644 index 000000000..82a9da9a9 --- /dev/null +++ b/tests/chains/ds2_params_lite_train_infer.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:deepspeech2 +python:python3.8 +gpu_list:0 +null:null +null:null +null:null +--output:null +null:null +--checkpoint_path: +train_model_name:checkpoints/9 +null:null +null:null +## +trainer:norm_train +norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --device gpu +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/9.rsl --model_type offline --device gpu +null:null +## +===========================infer_params=========================== +--export_path:checkpoints/9.jit +--checkpoint_path:checkpoints/9 +norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +infer_model:null +infer_export:null +infer_quant:null +inference:null +--use_gpu:null +--enable_mkldnn:null +--cpu_threads:null +--rec_batch_num:null +--use_tensorrt:null +--precision:null +--det_model_dir:null +--image_dir:null +--save_log_path:null +--benchmark:null +null:null diff --git a/tests/chains/ds2_params_whole_train_infer.txt b/tests/chains/ds2_params_whole_train_infer.txt new file mode 100644 index 000000000..e97051c41 --- /dev/null +++ b/tests/chains/ds2_params_whole_train_infer.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:deepspeech2 +python:python3.8 +gpu_list:0 +null:null +null:null +null:null +--output:null +null:null +--checkpoint_path: +train_model_name:checkpoints/1 +null:null +null:null +## +trainer:norm_train +norm_train: ../../../deepspeech/exps/deepspeech2/bin/train.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline --device gpu +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval: ../../../deepspeech/exps/deepspeech2/bin/test.py --nproc 1 --config conf/deepspeech2.yaml --result_file tests/1.rsl --model_type offline --device gpu +null:null +## +===========================infer_params=========================== +--export_path:checkpoints/1.jit +--checkpoint_path:checkpoints/1 +norm_export: ../../../deepspeech/exps/deepspeech2/bin/export.py --nproc 1 --config conf/deepspeech2.yaml --model_type offline +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +infer_model:null +infer_export:null +infer_quant:null +inference:null +--use_gpu:null +--enable_mkldnn:null +--cpu_threads:null +--rec_batch_num:null +--use_tensorrt:null +--precision:null +--det_model_dir:null +--image_dir:null +--save_log_path:null +--benchmark:null +null:null diff --git a/tests/chains/lite_train_infer.sh b/tests/chains/lite_train_infer.sh new file mode 100644 index 000000000..76b22a38c --- /dev/null +++ b/tests/chains/lite_train_infer.sh @@ -0,0 +1,5 @@ +bash prepare.sh ds2_params_lite_train_infer.txt lite_train_infer +cd ../../examples/tiny/s0 +source path.sh +bash ../../../tests/chains/test.sh ../../../tests/chains/ds2_params_lite_train_infer.txt lite_train_infer +cd ../../../tests/chains diff --git a/tests/chains/prepare.sh b/tests/chains/prepare.sh new file mode 100644 index 000000000..73a302836 --- /dev/null +++ b/tests/chains/prepare.sh @@ -0,0 +1,84 @@ +#!/bin/bash +FILENAME=$1 +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[1]}") + +trainer_list=$(func_parser_value "${lines[14]}") + +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] +MODE=$2 + +if [ ${MODE} = "lite_train_infer" ];then + # pretrain lite train data + curPath=$(readlink -f "$(dirname "$0")") + cd ${curPath}/../../examples/tiny/s0 + source path.sh + # download audio data + bash ./local/data.sh || exit -1 + # download language model + bash local/download_lm_en.sh + if [ $? -ne 0 ]; then + exit 1 + fi + cd ${curPath} + +elif [ ${MODE} = "whole_train_infer" ];then + curPath=$(readlink -f "$(dirname "$0")") + cd ${curPath}/../../examples/aishell/s0 + source path.sh + # download audio data + bash ./local/data.sh || exit -1 + # download language model + bash local/download_lm_ch.sh + if [ $? -ne 0 ]; then + exit 1 + fi + cd ${curPath} +elif [ ${MODE} = "whole_infer" ];then + curPath=$(readlink -f "$(dirname "$0")") + cd ${curPath}/../../examples/aishell/s0 + source path.sh + # download audio data + bash ./local/data.sh || exit -1 + # download language model + bash local/download_lm_ch.sh + if [ $? -ne 0 ]; then + exit 1 + fi + cd ${curPath} +else + curPath=$(readlink -f "$(dirname "$0")") + cd ${curPath}/../../examples/aishell/s0 + source path.sh + # download audio data + bash ./local/data.sh || exit -1 + # download language model + bash local/download_lm_ch.sh + if [ $? -ne 0 ]; then + exit 1 + fi + cd ${curPath} +fi diff --git a/tests/chains/test.sh b/tests/chains/test.sh new file mode 100644 index 000000000..6a48ba765 --- /dev/null +++ b/tests/chains/test.sh @@ -0,0 +1,365 @@ +#!/bin/bash +FILENAME=$1 +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +function func_set_params(){ + key=$1 + value=$2 + if [ ${key} = "null" ];then + echo " " + elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then + echo " " + else + echo "${key}=${value}" + fi +} +function func_parser_params(){ + strs=$1 + IFS=":" + array=(${strs}) + key=${array[0]} + tmp=${array[1]} + IFS="|" + res="" + for _params in ${tmp[*]}; do + IFS="=" + array=(${_params}) + mode=${array[0]} + value=${array[1]} + if [[ ${mode} = ${MODE} ]]; then + IFS="|" + #echo $(func_set_params "${mode}" "${value}") + echo $value + break + fi + IFS="|" + done + echo ${res} +} +function status_check(){ + last_status=$1 # the exit code + run_command=$2 + run_log=$3 + if [ $last_status -eq 0 ]; then + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + else + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + fi +} + +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +gpu_list=$(func_parser_value "${lines[3]}") +train_use_gpu_key=$(func_parser_key "${lines[4]}") +train_use_gpu_value=$(func_parser_value "${lines[4]}") +autocast_list=$(func_parser_value "${lines[5]}") +autocast_key=$(func_parser_key "${lines[5]}") +epoch_key=$(func_parser_key "${lines[6]}") +epoch_num=$(func_parser_params "${lines[6]}") +save_model_key=$(func_parser_key "${lines[7]}") +train_batch_key=$(func_parser_key "${lines[8]}") +train_batch_value=$(func_parser_params "${lines[8]}") +pretrain_model_key=$(func_parser_key "${lines[9]}") +pretrain_model_value=$(func_parser_value "${lines[9]}") +train_model_name=$(func_parser_value "${lines[10]}") +train_infer_img_dir=$(func_parser_value "${lines[11]}") +train_param_key1=$(func_parser_key "${lines[12]}") +train_param_value1=$(func_parser_value "${lines[12]}") + +trainer_list=$(func_parser_value "${lines[14]}") +trainer_norm=$(func_parser_key "${lines[15]}") +norm_trainer=$(func_parser_value "${lines[15]}") +pact_key=$(func_parser_key "${lines[16]}") +pact_trainer=$(func_parser_value "${lines[16]}") +fpgm_key=$(func_parser_key "${lines[17]}") +fpgm_trainer=$(func_parser_value "${lines[17]}") +distill_key=$(func_parser_key "${lines[18]}") +distill_trainer=$(func_parser_value "${lines[18]}") +trainer_key1=$(func_parser_key "${lines[19]}") +trainer_value1=$(func_parser_value "${lines[19]}") +trainer_key2=$(func_parser_key "${lines[20]}") +trainer_value2=$(func_parser_value "${lines[20]}") + +eval_py=$(func_parser_value "${lines[23]}") +eval_key1=$(func_parser_key "${lines[24]}") +eval_value1=$(func_parser_value "${lines[24]}") + +save_infer_key=$(func_parser_key "${lines[27]}") +export_weight=$(func_parser_key "${lines[28]}") +norm_export=$(func_parser_value "${lines[29]}") +pact_export=$(func_parser_value "${lines[30]}") +fpgm_export=$(func_parser_value "${lines[31]}") +distill_export=$(func_parser_value "${lines[32]}") +export_key1=$(func_parser_key "${lines[33]}") +export_value1=$(func_parser_value "${lines[33]}") +export_key2=$(func_parser_key "${lines[34]}") +export_value2=$(func_parser_value "${lines[34]}") + +# parser inference model +infer_model_dir_list=$(func_parser_value "${lines[36]}") +infer_export_list=$(func_parser_value "${lines[37]}") +infer_is_quant=$(func_parser_value "${lines[38]}") +# parser inference +inference_py=$(func_parser_value "${lines[39]}") +use_gpu_key=$(func_parser_key "${lines[40]}") +use_gpu_list=$(func_parser_value "${lines[40]}") +use_mkldnn_key=$(func_parser_key "${lines[41]}") +use_mkldnn_list=$(func_parser_value "${lines[41]}") +cpu_threads_key=$(func_parser_key "${lines[42]}") +cpu_threads_list=$(func_parser_value "${lines[42]}") +batch_size_key=$(func_parser_key "${lines[43]}") +batch_size_list=$(func_parser_value "${lines[43]}") +use_trt_key=$(func_parser_key "${lines[44]}") +use_trt_list=$(func_parser_value "${lines[44]}") +precision_key=$(func_parser_key "${lines[45]}") +precision_list=$(func_parser_value "${lines[45]}") +infer_model_key=$(func_parser_key "${lines[46]}") +image_dir_key=$(func_parser_key "${lines[47]}") +infer_img_dir=$(func_parser_value "${lines[47]}") +save_log_key=$(func_parser_key "${lines[48]}") +benchmark_key=$(func_parser_key "${lines[49]}") +benchmark_value=$(func_parser_value "${lines[49]}") +infer_key1=$(func_parser_key "${lines[50]}") +infer_value1=$(func_parser_value "${lines[50]}") + +LOG_PATH="./tests/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir=$(dirname $infer_model) + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${norm_export} ${set_export_weight} ${set_save_infer_key}" + eval $export_cmd + status_export=$? + if [ ${status_export} = 0 ];then + status_check $status_export "${export_cmd}" "${status_log}" + fi + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done + +else + IFS="|" + export Count=0 + USE_GPU_KEY=(${train_use_gpu_value}) + for gpu in ${gpu_list[*]}; do + use_gpu=${USE_GPU_KEY[Count]} + Count=$(($Count + 1)) + if [ ${gpu} = "-1" ];then + env="" + elif [ ${#gpu} -le 1 ];then + env="export CUDA_VISIBLE_DEVICES=${gpu}" + eval ${env} + elif [ ${#gpu} -le 15 ];then + IFS="," + array=(${gpu}) + env="export CUDA_VISIBLE_DEVICES=${array[0]}" + IFS="|" + else + IFS=";" + array=(${gpu}) + ips=${array[0]} + gpu=${array[1]} + IFS="|" + env=" " + fi + for autocast in ${autocast_list[*]}; do + for trainer in ${trainer_list[*]}; do + flag_quant=False + if [ ${trainer} = ${pact_key} ]; then + run_train=${pact_trainer} + run_export=${pact_export} + flag_quant=True + elif [ ${trainer} = "${fpgm_key}" ]; then + run_train=${fpgm_trainer} + run_export=${fpgm_export} + elif [ ${trainer} = "${distill_key}" ]; then + run_train=${distill_trainer} + run_export=${distill_export} + elif [ ${trainer} = ${trainer_key1} ]; then + run_train=${trainer_value1} + run_export=${export_value1} + elif [[ ${trainer} = ${trainer_key2} ]]; then + run_train=${trainer_value2} + run_export=${export_value2} + else + run_train=${norm_trainer} + run_export=${norm_export} + fi + + if [ ${run_train} = "null" ]; then + continue + fi + + set_autocast=$(func_set_params "${autocast_key}" "${autocast}") + set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") + set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") + set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") + set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") + set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + + # load pretrain from norm training if current trainer is pact or fpgm trainer + if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then + set_pretrain="${load_norm_train_model}" + fi + + set_save_model=$(func_set_params "${save_model_key}" "${save_log}") + if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " + elif [ ${#gpu} -le 15 ];then # train with multi-gpu + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" + else # train with multi-machine + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" + fi + # run train + #eval "unset CUDA_VISIBLE_DEVICES" + eval $cmd + status_check $? "${cmd}" "${status_log}" + + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") + # save norm trained models to set pretrain for pact training and fpgm training + if [ ${trainer} = ${trainer_norm} ]; then + load_norm_train_model=${set_eval_pretrain} + fi + # run eval + if [ ${eval_py} != "null" ]; then + set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval $eval_cmd + status_check $? "${eval_cmd}" "${status_log}" + fi + # run export model + if [ ${run_export} != "null" ]; then + # run export model + save_infer_path="${save_log}" + set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") + export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" + eval $export_cmd + status_check $? "${export_cmd}" "${status_log}" + + #run inference + eval $env + save_infer_path="${save_log}" + func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" + eval "unset CUDA_VISIBLE_DEVICES" + fi + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for gpu in ${gpu_list[*]}; do +fi # end if [ ${MODE} = "infer" ]; then diff --git a/tests/chains/whole_train_infer.sh b/tests/chains/whole_train_infer.sh new file mode 100644 index 000000000..496041a7b --- /dev/null +++ b/tests/chains/whole_train_infer.sh @@ -0,0 +1,5 @@ +bash prepare.sh ds2_params_whole_train_infer.txt whole_train_infer +cd ../../examples/aishell/s0 +source path.sh +bash ../../../tests/chains/test.sh ../../../tests/chains/ds2_params_whole_train_infer.txt whole_train_infer +cd ../../../tests/chains diff --git a/tests/deepspeech2_model_test.py b/tests/deepspeech2_model_test.py index 1776736f5..00df8195b 100644 --- a/tests/deepspeech2_model_test.py +++ b/tests/deepspeech2_model_test.py @@ -16,7 +16,7 @@ import unittest import numpy as np import paddle -from deepspeech.models.deepspeech2 import DeepSpeech2Model +from deepspeech.models.ds2 import DeepSpeech2Model class TestDeepSpeech2Model(unittest.TestCase): diff --git a/tests/deepspeech2_online_model_test.py b/tests/deepspeech2_online_model_test.py new file mode 100644 index 000000000..6264070be --- /dev/null +++ b/tests/deepspeech2_online_model_test.py @@ -0,0 +1,186 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import numpy as np +import paddle + +from deepspeech.models.ds2_online import DeepSpeech2ModelOnline + + +class TestDeepSpeech2ModelOnline(unittest.TestCase): + def setUp(self): + paddle.set_device('cpu') + + self.batch_size = 2 + self.feat_dim = 161 + max_len = 210 + + # (B, T, D) + audio = np.random.randn(self.batch_size, max_len, self.feat_dim) + audio_len = np.random.randint(max_len, size=self.batch_size) + audio_len[-1] = max_len + # (B, U) + text = np.array([[1, 2], [1, 2]]) + text_len = np.array([2] * self.batch_size) + + self.audio = paddle.to_tensor(audio, dtype='float32') + self.audio_len = paddle.to_tensor(audio_len, dtype='int64') + self.text = paddle.to_tensor(text, dtype='int32') + self.text_len = paddle.to_tensor(text_len, dtype='int64') + + def test_ds2_1(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_2(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=True) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_3(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_4(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=True) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_5(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_6(self): + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=3, + rnn_size=1024, + rnn_direction='bidirect', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=False) + loss = model(self.audio, self.audio_len, self.text, self.text_len) + self.assertEqual(loss.numel(), 1) + + def test_ds2_7(self): + use_gru = False + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=1, + rnn_size=1024, + rnn_direction='forward', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=use_gru) + model.eval() + paddle.device.set_device("cpu") + de_ch_size = 8 + + eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder( + self.audio, self.audio_len) + eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk( + self.audio, self.audio_len, de_ch_size) + eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1) + eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list) + decode_max_len = eouts.shape[1] + eouts_by_chk = eouts_by_chk[:, :decode_max_len, :] + self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True) + self.assertEqual( + paddle.allclose(final_state_h_box, final_state_h_box_chk), True) + if use_gru is False: + self.assertEqual( + paddle.allclose(final_state_c_box, final_state_c_box_chk), True) + + def test_ds2_8(self): + use_gru = True + model = DeepSpeech2ModelOnline( + feat_size=self.feat_dim, + dict_size=10, + num_conv_layers=2, + num_rnn_layers=1, + rnn_size=1024, + rnn_direction='forward', + num_fc_layers=2, + fc_layers_size_list=[512, 256], + use_gru=use_gru) + model.eval() + paddle.device.set_device("cpu") + de_ch_size = 8 + + eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder( + self.audio, self.audio_len) + eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk( + self.audio, self.audio_len, de_ch_size) + eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1) + eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list) + decode_max_len = eouts.shape[1] + eouts_by_chk = eouts_by_chk[:, :decode_max_len, :] + self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True) + self.assertEqual( + paddle.allclose(final_state_h_box, final_state_h_box_chk), True) + if use_gru is False: + self.assertEqual( + paddle.allclose(final_state_c_box, final_state_c_box_chk), True) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/mask_test.py b/tests/mask_test.py index f44aca8fc..dbe8c4b09 100644 --- a/tests/mask_test.py +++ b/tests/mask_test.py @@ -37,13 +37,13 @@ class TestU2Model(unittest.TestCase): def test_make_non_pad_mask(self): res = make_non_pad_mask(self.lengths) - res2 = make_pad_mask(self.lengths).logical_not() + res2 = ~make_pad_mask(self.lengths) self.assertSequenceEqual(res.numpy().tolist(), self.masks.tolist()) self.assertSequenceEqual(res.numpy().tolist(), res2.numpy().tolist()) def test_make_pad_mask(self): res = make_pad_mask(self.lengths) - res1 = make_non_pad_mask(self.lengths).logical_not() + res1 = ~make_non_pad_mask(self.lengths) self.assertSequenceEqual(res.numpy().tolist(), self.pad_masks.tolist()) self.assertSequenceEqual(res.numpy().tolist(), res1.tolist()) diff --git a/third_party/text_processing/__ini__.py b/third_party/text_processing/__ini__.py new file mode 100644 index 000000000..8d1c8b69c --- /dev/null +++ b/third_party/text_processing/__ini__.py @@ -0,0 +1 @@ + diff --git a/third_party/text_processing/__init__.py b/third_party/text_processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/third_party/text_processing/normalization/__init__.py b/third_party/text_processing/normalization/__init__.py new file mode 100644 index 000000000..0b4f0e7f8 --- /dev/null +++ b/third_party/text_processing/normalization/__init__.py @@ -0,0 +1,42 @@ +from .sentence_split import split +from .num import RE_NUMBER, RE_FRAC, RE_PERCENTAGE, RE_RANGE, RE_INTEGER, RE_DEFAULT_NUM +from .num import replace_number, replace_frac, replace_percentage, replace_range, replace_default_num + +from .chronology import RE_TIME, RE_DATE, RE_DATE2 +from .chronology import replace_time, replace_date, replace_date2 + +from .quantifier import RE_TEMPERATURE +from .quantifier import replace_temperature + +from .phone import RE_MOBILE_PHONE, RE_TELEPHONE, replace_phone + +from .char_convert import tranditional_to_simplified +from .constants import F2H_ASCII_LETTERS, F2H_DIGITS, F2H_SPACE + + +def normalize_sentence(sentence): + # basic character conversions + sentence = tranditional_to_simplified(sentence) + sentence = sentence.translate(F2H_ASCII_LETTERS).translate( + F2H_DIGITS).translate(F2H_SPACE) + + # number related NSW verbalization + sentence = RE_DATE.sub(replace_date, sentence) + sentence = RE_DATE2.sub(replace_date2, sentence) + sentence = RE_TIME.sub(replace_time, sentence) + sentence = RE_TEMPERATURE.sub(replace_temperature, sentence) + sentence = RE_RANGE.sub(replace_range, sentence) + sentence = RE_FRAC.sub(replace_frac, sentence) + sentence = RE_PERCENTAGE.sub(replace_percentage, sentence) + sentence = RE_MOBILE_PHONE.sub(replace_phone, sentence) + sentence = RE_TELEPHONE.sub(replace_phone, sentence) + sentence = RE_DEFAULT_NUM.sub(replace_default_num, sentence) + sentence = RE_NUMBER.sub(replace_number, sentence) + + return sentence + + +def normalize(text): + sentences = split(text) + sentences = [normalize_sentence(sent) for sent in sentences] + return sentences diff --git a/third_party/text_processing/normalization/char_convert.py b/third_party/text_processing/normalization/char_convert.py new file mode 100644 index 000000000..bd328f695 --- /dev/null +++ b/third_party/text_processing/normalization/char_convert.py @@ -0,0 +1,15 @@ +"""Traditional and simplified Chinese conversion with +`opencc `_. +""" + + +import opencc + +_t2s_converter = opencc.OpenCC("t2s.json") +_s2t_converter = opencc.OpenCC('s2t.json') + +def tranditional_to_simplified(text: str) -> str: + return _t2s_converter.convert(text) + +def simplified_to_traditional(text: str) -> str: + return _s2t_converter.convert(text) diff --git a/third_party/text_processing/normalization/chronology.py b/third_party/text_processing/normalization/chronology.py new file mode 100644 index 000000000..7143eb58c --- /dev/null +++ b/third_party/text_processing/normalization/chronology.py @@ -0,0 +1,64 @@ +import re +from .num import verbalize_cardinal, verbalize_digit, num2str, DIGITS + + +def _time_num2str(num_string: str) -> str: + """A special case for verbalizing number in time.""" + result = num2str(num_string.lstrip('0')) + if num_string.startswith('0'): + result = DIGITS['0'] + result + return result + +# 时刻表达式 +RE_TIME = re.compile( + r'([0-1]?[0-9]|2[0-3])' + r':([0-5][0-9])' + r'(:([0-5][0-9]))?' +) +def replace_time(match: re.Match) -> str: + hour = match.group(1) + minute = match.group(2) + second = match.group(4) + + result = f"{num2str(hour)}点" + if minute.lstrip('0'): + result += f"{_time_num2str(minute)}分" + if second and second.lstrip('0'): + result += f"{_time_num2str(second)}秒" + return result + + +RE_DATE = re.compile( + r'(\d{4}|\d{2})年' + r'((0?[1-9]|1[0-2])月)?' + r'(((0?[1-9])|((1|2)[0-9])|30|31)([日号]))?' +) +def replace_date(match: re.Match) -> str: + year = match.group(1) + month = match.group(3) + day = match.group(5) + result = "" + if year: + result += f"{verbalize_digit(year)}年" + if month: + result += f"{verbalize_cardinal(month)}月" + if day: + result += f"{verbalize_cardinal(day)}{match.group(9)}" + return result + +# 用 / 或者 - 分隔的 YY/MM/DD 或者 YY-MM-DD 日期 +RE_DATE2 = re.compile( + r'(\d{4})([- /.])(0[1-9]|1[012])\2(0[1-9]|[12][0-9]|3[01])' +) +def replace_date2(match: re.Match) -> str: + year = match.group(1) + month = match.group(3) + day = match.group(4) + result = "" + if year: + result += f"{verbalize_digit(year)}年" + if month: + result += f"{verbalize_cardinal(month)}月" + if day: + result += f"{verbalize_cardinal(day)}日" + return result diff --git a/third_party/text_processing/normalization/constants.py b/third_party/text_processing/normalization/constants.py new file mode 100644 index 000000000..d5c04a761 --- /dev/null +++ b/third_party/text_processing/normalization/constants.py @@ -0,0 +1,58 @@ +import string +import re +from pypinyin.constants import SUPPORT_UCS4 + + +# 全角半角转换 +# 英文字符全角 -> 半角映射表 (num: 52) +F2H_ASCII_LETTERS = { + chr(ord(char) + 65248): char + for char in string.ascii_letters +} + +# 英文字符半角 -> 全角映射表 +H2F_ASCII_LETTERS = {value: key for key, value in F2H_ASCII_LETTERS.items()} + +# 数字字符全角 -> 半角映射表 (num: 10) +F2H_DIGITS = { + chr(ord(char) + 65248): char + for char in string.digits +} +# 数字字符半角 -> 全角映射表 +H2F_DIGITS = {value: key for key, value in F2H_DIGITS.items()} + +# 标点符号全角 -> 半角映射表 (num: 32) +F2H_PUNCTUATIONS = { + chr(ord(char) + 65248): char + for char in string.punctuation +} +# 标点符号半角 -> 全角映射表 +H2F_PUNCTUATIONS = {value: key for key, value in F2H_PUNCTUATIONS.items()} + +# 空格 (num: 1) +F2H_SPACE = {'\u3000': ' '} +H2F_SPACE = {' ': '\u3000'} + +# 非"有拼音的汉字"的字符串,可用于NSW提取 +if SUPPORT_UCS4: + RE_NSW = re.compile( + r'(?:[^' + r'\u3007' # 〇 + r'\u3400-\u4dbf' # CJK扩展A:[3400-4DBF] + r'\u4e00-\u9fff' # CJK基本:[4E00-9FFF] + r'\uf900-\ufaff' # CJK兼容:[F900-FAFF] + r'\U00020000-\U0002A6DF' # CJK扩展B:[20000-2A6DF] + r'\U0002A703-\U0002B73F' # CJK扩展C:[2A700-2B73F] + r'\U0002B740-\U0002B81D' # CJK扩展D:[2B740-2B81D] + r'\U0002F80A-\U0002FA1F' # CJK兼容扩展:[2F800-2FA1F] + r'])+' + ) +else: + RE_NSW = re.compile( # pragma: no cover + r'(?:[^' + r'\u3007' # 〇 + r'\u3400-\u4dbf' # CJK扩展A:[3400-4DBF] + r'\u4e00-\u9fff' # CJK基本:[4E00-9FFF] + r'\uf900-\ufaff' # CJK兼容:[F900-FAFF] + r'])+' + ) diff --git a/third_party/text_processing/normalization/num.py b/third_party/text_processing/normalization/num.py new file mode 100644 index 000000000..60fc1686d --- /dev/null +++ b/third_party/text_processing/normalization/num.py @@ -0,0 +1,155 @@ +""" +Rules to verbalize numbers into Chinese characters. +https://zh.wikipedia.org/wiki/中文数字#現代中文 +""" + +import re +from typing import List +from collections import OrderedDict + +DIGITS = {str(i): tran for i, tran in enumerate('零一二三四五六七八九')} +UNITS = OrderedDict({ + 1: '十', + 2: '百', + 3: '千', + 4: '万', + 8: '亿', +}) + +# 分数表达式 +RE_FRAC = re.compile(r'(-?)(\d+)/(\d+)') +def replace_frac(match: re.Match) -> str: + sign = match.group(1) + nominator = match.group(2) + denominator = match.group(3) + sign: str = "负" if sign else "" + nominator: str = num2str(nominator) + denominator: str = num2str(denominator) + result = f"{sign}{denominator}分之{nominator}" + return result + + +# 百分数表达式 +RE_PERCENTAGE = re.compile(r'(-?)(\d+(\.\d+)?)%') +def replace_percentage(match: re.Match) -> str: + sign = match.group(1) + percent = match.group(2) + sign: str = "负" if sign else "" + percent: str = num2str(percent) + result = f"{sign}百分之{percent}" + return result + +# 整数表达式 +# 带负号或者不带负号的整数 12, -10 +RE_INTEGER = re.compile( + r'(-?)' + r'(\d+)' +) + +# 编号-无符号整形 +# 00078 +RE_DEFAULT_NUM = re.compile(r'\d{4}\d*') +def replace_default_num(match: re.Match): + number = match.group(0) + return verbalize_digit(number) + +# 数字表达式 +# 1. 整数: -10, 10; +# 2. 浮点数: 10.2, -0.3 +# 3. 不带符号和整数部分的纯浮点数: .22, .38 +RE_NUMBER = re.compile( + r'(-?)((\d+)(\.\d+)?)' + r'|(\.(\d+))' +) +def replace_number(match: re.Match) -> str: + sign = match.group(1) + number = match.group(2) + pure_decimal = match.group(5) + if pure_decimal: + result = num2str(pure_decimal) + else: + sign: str = "负" if sign else "" + number: str = num2str(number) + result = f"{sign}{number}" + return result + +# 范围表达式 +# 12-23, 12~23 +RE_RANGE = re.compile( + r'(\d+)[-~](\d+)' +) +def replace_range(match: re.Match) -> str: + first, second = match.group(1), match.group(2) + first: str = num2str(first) + second: str = num2str(second) + result = f"{first}到{second}" + return result + + +def _get_value(value_string: str, use_zero: bool=True) -> List[str]: + stripped = value_string.lstrip('0') + if len(stripped) == 0: + return [] + elif len(stripped) == 1: + if use_zero and len(stripped) < len(value_string): + return [DIGITS['0'], DIGITS[stripped]] + else: + return [DIGITS[stripped]] + else: + largest_unit = next(power for power in reversed(UNITS.keys()) if power < len(stripped)) + first_part = value_string[:-largest_unit] + second_part = value_string[-largest_unit:] + return _get_value(first_part) + [UNITS[largest_unit]] + _get_value(second_part) + +def verbalize_cardinal(value_string: str) -> str: + if not value_string: + return '' + + # 000 -> '零' , 0 -> '零' + value_string = value_string.lstrip('0') + if len(value_string) == 0: + return DIGITS['0'] + + result_symbols = _get_value(value_string) + # verbalized number starting with '一十*' is abbreviated as `十*` + if len(result_symbols) >= 2 and result_symbols[0] == DIGITS['1'] and result_symbols[1] == UNITS[1]: + result_symbols = result_symbols[1:] + return ''.join(result_symbols) + +def verbalize_digit(value_string: str, alt_one=False) -> str: + result_symbols = [DIGITS[digit] for digit in value_string] + result = ''.join(result_symbols) + if alt_one: + result.replace("一", "幺") + return result + +def num2str(value_string: str) -> str: + integer_decimal = value_string.split('.') + if len(integer_decimal) == 1: + integer = integer_decimal[0] + decimal = '' + elif len(integer_decimal) == 2: + integer, decimal = integer_decimal + else: + raise ValueError(f"The value string: '${value_string}' has more than one point in it.") + + result = verbalize_cardinal(integer) + + decimal = decimal.rstrip('0') + if decimal: + # '.22' is verbalized as '点二二' + # '3.20' is verbalized as '三点二 + result += '点' + verbalize_digit(decimal) + return result + + + + + + + + + + + + diff --git a/third_party/text_processing/normalization/phone.py b/third_party/text_processing/normalization/phone.py new file mode 100644 index 000000000..1acc18365 --- /dev/null +++ b/third_party/text_processing/normalization/phone.py @@ -0,0 +1,31 @@ +import re +from .num import verbalize_digit + + +# 规范化固话/手机号码 +# 手机 +# http://www.jihaoba.com/news/show/13680 +# 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198 +# 联通:130、131、132、156、155、186、185、176 +# 电信:133、153、189、180、181、177 +RE_MOBILE_PHONE= re.compile( + r"(? str: + if mobile: + sp_parts = phone_string.strip('+').split() + result = ''.join( + [verbalize_digit(part, alt_one=True) for part in sp_parts]) + return result + else: + sil_parts = phone_string.split('-') + result = ''.join( + [verbalize_digit(part, alt_one=True) for part in sil_parts]) + return result + + +def replace_phone(match: re.Match) -> str: + return phone2str(match.group(0)) diff --git a/third_party/text_processing/normalization/quantifier.py b/third_party/text_processing/normalization/quantifier.py new file mode 100644 index 000000000..024eb6e01 --- /dev/null +++ b/third_party/text_processing/normalization/quantifier.py @@ -0,0 +1,18 @@ +import re +from .num import num2str + + +# 温度表达式,温度会影响负号的读法 +# -3°C 零下三度 +RE_TEMPERATURE = re.compile( + r'(-?)(\d+(\.\d+)?)(°C|℃|度|摄氏度)' +) +def replace_temperature(match: re.Match) -> str: + sign = match.group(1) + temperature = match.group(2) + unit = match.group(3) + sign: str = "零下" if sign else "" + temperature: str = num2str(temperature) + unit: str = "摄氏度" if unit == "摄氏度" else "度" + result = f"{sign}{temperature}{unit}" + return result diff --git a/third_party/text_processing/normalization/sentence_split.py b/third_party/text_processing/normalization/sentence_split.py new file mode 100644 index 000000000..5867342ba --- /dev/null +++ b/third_party/text_processing/normalization/sentence_split.py @@ -0,0 +1,23 @@ +import re +from typing import List + + +SENTENCE_SPLITOR = re.compile(r'([。!?][”’]?)') + +def split(text: str) -> List[str]: + """Split long text into sentences with sentence-splitting punctuations. + + Parameters + ---------- + text : str + The input text. + + Returns + ------- + List[str] + Sentences. + """ + text = SENTENCE_SPLITOR.sub(r'\1\n', text) + text = text.strip() + sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] + return sentences diff --git a/tools/Makefile b/tools/Makefile index c129bf5a2..62cf990fa 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -1,7 +1,8 @@ +SHELL:= /bin/bash PYTHON:= python3.7 .PHONY: all clean -all: virtualenv kenlm.done sox.done soxbindings.done +all: virtualenv kenlm.done sox.done soxbindings.done mfa.done virtualenv: test -d venv || virtualenv -p $(PYTHON) venv @@ -18,8 +19,8 @@ kenlm.done: apt install -y build-essential cmake libboost-system-dev libboost-thread-dev libboost-program-options-dev libboost-test-dev libeigen3-dev zlib1g-dev libbz2-dev liblzma-dev apt-get install -y gcc-5 g++-5 && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 50 test -d kenlm || wget -O - https://kheafield.com/code/kenlm.tar.gz | tar xz - mkdir -p kenlm/build && cd kenlm/build && cmake .. && make -j4 && make install - cd kenlm && python setup.py install + rm -rf kenlm/build && mkdir -p kenlm/build && cd kenlm/build && cmake .. && make -j4 && make install + source venv/bin/activate; cd kenlm && python setup.py install touch kenlm.done sox.done: @@ -31,5 +32,10 @@ sox.done: soxbindings.done: test -d soxbindings || git clone https://github.com/pseeth/soxbindings.git - source venv/bin/activate; cd soxbindings && python3 setup.py install + source venv/bin/activate; cd soxbindings && python setup.py install touch soxbindings.done + +mfa.done: + test -d montreal-forced-aligner || wget https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/releases/download/v1.0.1/montreal-forced-aligner_linux.tar.gz + tar xvf montreal-forced-aligner_linux.tar.gz + touch mfa.done diff --git a/tools/extras/README.md b/tools/extras/README.md new file mode 100644 index 000000000..19c06a134 --- /dev/null +++ b/tools/extras/README.md @@ -0,0 +1,11 @@ +1. kaldi + +deps gcc, mkl or openblas + +2. OpenFST/ngram/pynini + +deps gcc + +3. MFA + +deps kaldi diff --git a/tools/extras/install_gcc.sh b/tools/extras/install_gcc.sh new file mode 100755 index 000000000..eb4ea1f05 --- /dev/null +++ b/tools/extras/install_gcc.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e +set -x + +# gcc +apt update -y +apt install build-essential -y +apt install software-properties-common -y +add-apt-repository ppa:ubuntu-toolchain-r/test +apt install gcc-8 g++-8 -y +update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 80 +update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 80 +update-alternatives --config gcc + +# gfortran +apt-get install gfortran-8 diff --git a/tools/extras/install_kaldi.sh b/tools/extras/install_kaldi.sh new file mode 100755 index 000000000..b87232b01 --- /dev/null +++ b/tools/extras/install_kaldi.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Installation script for Kaldi +# +set -e + +apt-get install subversion -y + +KALDI_GIT="--depth 1 -b master https://github.com/kaldi-asr/kaldi.git" + +KALDI_DIR="$PWD/kaldi" + +if [ ! -d "$KALDI_DIR" ]; then + git clone $KALDI_GIT $KALDI_DIR +else + echo "$KALDI_DIR already exists!" +fi + +cd "$KALDI_DIR/tools" +git pull + +# Prevent kaldi from switching default python version +mkdir -p "python" +touch "python/.use_default_python" + +./extras/check_dependencies.sh + +make -j4 + +pushd ../src +./configure --shared --use-cuda=no --static-math --mathlib=OPENBLAS --openblas-root=${KALDI_DIR}/../OpenBLAS/install +make clean -j && make depend -j && make -j4 +popd + +echo "Done installing Kaldi." diff --git a/tools/extras/install_kenlm.sh b/tools/extras/install_kenlm.sh new file mode 100755 index 000000000..100225bf9 --- /dev/null +++ b/tools/extras/install_kenlm.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +apt install -y build-essential cmake libboost-system-dev libboost-thread-dev libboost-program-options-dev libboost-test-dev libeigen3-dev zlib1g-dev libbz2-dev liblzma-dev + +apt-get install -y gcc-5 g++-5 && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 && update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 50 + +test -d kenlm || wget -O - https://kheafield.com/code/kenlm.tar.gz | tar xz + +rm -rf kenlm/build && mkdir -p kenlm/build && cd kenlm/build && cmake .. && make -j4 && make install diff --git a/tools/extras/install_liblbfgs.sh b/tools/extras/install_liblbfgs.sh new file mode 100755 index 000000000..8d6ae4ab7 --- /dev/null +++ b/tools/extras/install_liblbfgs.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +VER=1.10 + +WGET=${WGET:-wget} + +if [ ! -f liblbfgs-$VER.tar.gz ]; then + if [ -d "$DOWNLOAD_DIR" ]; then + cp -p "$DOWNLOAD_DIR/liblbfgs-$VER.tar.gz" . || exit 1 + else + $WGET https://github.com/downloads/chokkan/liblbfgs/liblbfgs-$VER.tar.gz || exit 1 + fi +fi + +tar -xzf liblbfgs-$VER.tar.gz +cd liblbfgs-$VER +./configure --prefix=`pwd` +make +# due to the liblbfgs project directory structure, we have to use -i +# but the erros are completely harmless +make -i install +cd .. + +( + [ ! -z "${LIBLBFGS}" ] && \ + echo >&2 "LIBLBFGS variable is aleady defined. Undefining..." && \ + unset LIBLBFGS + + [ -f ./env.sh ] && . ./env.sh + + [ ! -z "${LIBLBFGS}" ] && \ + echo >&2 "libLBFGS config is already in env.sh" && exit + + wd=`pwd` + wd=`readlink -f $wd || pwd` + + echo "export LIBLBFGS=$wd/liblbfgs-1.10" + echo export LD_LIBRARY_PATH='${LD_LIBRARY_PATH:-}':'${LIBLBFGS}'/lib/.libs +) >> env.sh + diff --git a/tools/extras/install_mfa.sh b/tools/extras/install_mfa.sh new file mode 100755 index 000000000..ae126fa62 --- /dev/null +++ b/tools/extras/install_mfa.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# install openblas, kaldi before + +test -d Montreal-Forced-Aligner || git clone https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner.git + +pushd Montreal-Forced-Aligner && python setup.py install && popd + +test -d kaldi || { echo "need install kaldi first"; exit 1;} + +mfa thirdparty kaldi $PWD/kaldi + +mfa thirdparty validate + +echo "install mfa pass." diff --git a/tools/extras/install_miniconda.sh b/tools/extras/install_miniconda.sh new file mode 100755 index 000000000..3d1909af6 --- /dev/null +++ b/tools/extras/install_miniconda.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +WGET=${WGET:-wget} + +# The script automatically choose default settings of miniconda for installation +# Miniconda will be installed in the HOME directory. ($HOME/miniconda3). +# Also don't make miniconda's python as default. + +if [ -d "$DOWNLOAD_DIR" ]; then + cp -p "$DOWNLOAD_DIR/Miniconda3-latest-Linux-x86_64.sh" . || exit 1 +else + $WGET https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh || exit 1 +fi +bash Miniconda3-latest-Linux-x86_64.sh -b + +$HOME/miniconda3/bin/python -m pip install --user tqdm +$HOME/miniconda3/bin/python -m pip install --user scikit-learn +$HOME/miniconda3/bin/python -m pip install --user librosa +$HOME/miniconda3/bin/python -m pip install --user h5py diff --git a/tools/extras/install_mkl.sh b/tools/extras/install_mkl.sh new file mode 100755 index 000000000..8c1899bdf --- /dev/null +++ b/tools/extras/install_mkl.sh @@ -0,0 +1,277 @@ +#!/usr/bin/env bash + +# Intel MKL is now freely available even for commercial use. This script +# attempts to install the MKL package automatically from Intel's repository. +# +# For manual repository setup instructions, see: +# https://software.intel.com/articles/installing-intel-free-libs-and-python-yum-repo +# https://software.intel.com/articles/installing-intel-free-libs-and-python-apt-repo +# +# For other package managers, or non-Linux platforms, see: +# https://software.intel.com/mkl/choose-download + +set -o pipefail + +default_package=intel-mkl-64bit-2020.0-088 + +yum_repo='https://yum.repos.intel.com/mkl/setup/intel-mkl.repo' +apt_repo='https://apt.repos.intel.com/mkl' +intel_key_url='https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB' + +Usage () { + cat >&2 <] + +Checks if MKL is present on the system, and/or attempts to install it. + +If is not provided, ${default_package} will be installed. + +Intel packages are installed under the /opt/intel directory. You should be root +to install MKL into this directory; run this script using the sudo command. + +Options: + -s - Skip check for MKL being already present. + -p -- Force type of package management. Use only + if automatic detection fails, as instructed. + -h - Show this message. + +Environment: + CC The C compiler to use for MKL check. If not set, uses 'cc'. +EOF + exit 2 +} + +Fatal () { echo "$0: $@"; exit 1; } + +Have () { type -t "$1" >/dev/null; } + +# Option values. +skip_cc= +distro= + +while getopts ":hksp:" opt; do + case ${opt} in + h) Usage ;; + s) skip_cc=yes ;; + p) case $OPTARG in + suse|redhat|debian|fedora|arch) distro=$OPTARG ;; + *) Fatal "invalid value -p '${OPTARG}'. " \ + "Allowed: 'suse', 'redhat', 'debian', 'fedora', or 'arch'." + esac ;; + \?) echo >&2 "$0: invalid option -${OPTARG}."; Usage ;; + esac +done +shift $((OPTIND-1)) + +orig_arg_package=${1-''} +package=${1:-$default_package} + +# Check that we are actually on Linux, otherwise give a helpful reference. +[[ $(uname) == Linux ]] || Fatal "\ +This script can be used on Linux only, and your system is $(uname). + +Installer packages for Mac and Windows are available for download from Intel: +https://software.intel.com/mkl/choose-download" + +# Test if MKL is already installed on the system. +if [[ ! $skip_cc ]]; then + : ${CC:=cc} + Have "$CC" || Fatal "\ +C compiler $CC not found. + +You can skip the check for MKL presence by invoking this script with the '-s' +option to this script, but you will need a functional compiler anyway, so we +recommend that you install it first." + + mkl_version=$($CC -E -I /opt/intel/mkl/include - <<< \ + '#include + __INTEL_MKL__.__INTEL_MKL_MINOR__.__INTEL_MKL_UPDATE__' 2>/dev/null | + tail -n 1 ) || mkl_version= + mkl_version=${mkl_version// /} + + [[ $mkl_version ]] && Fatal "\ +MKL version $mkl_version is already installed. + +You can skip the check for MKL presence by invoking this script with the '-s' +option and proceed with automated installation, but we highly discourage +this. This script will register Intel repositories with your system, and it +seems that they have been already registered, or MKL has been installed some +other way. + +You should use your package manager to check which MKL package is already +installed. Note that Intel packages register the latest installed version of +the library as the default. If your installed version is older than +$package, it makes sense to upgrade." +fi + +# Try to determine which package manager the distro uses, unless overridden. +if [[ ! $distro ]]; then + dist_vars=$(cat /etc/os-release 2>/dev/null) + eval "$dist_vars" + for rune in $CPE_NAME $ID $ID_LIKE; do + case "$rune" in + cpe:/o:fedoraproject:fedora:2[01]) distro=redhat; break;; # Use yum. + rhel|centos) distro=redhat; break;; + redhat|suse|fedora|debian|arch) distro=$rune; break;; + esac + done + + # Certain old distributions do not have /etc/os-release. We are unlikely to + # encounter these in the wild, but just in case. + # NOTE: Do not try to guess Fedora specifically here! Fedora 20 and below + # detect as redhat, and this is good, because they use yum by default. + [[ ! $distro && -f /etc/redhat-release ]] && distro=redhat + [[ ! $distro && -f /etc/SuSE-release ]] && distro=suse + [[ ! $distro && -f /etc/debian_release ]] && distro=debian + [[ ! $distro && -f /etc/arch-release ]] && distro=arch + + [[ ! $distro ]] && Fatal "\ +Unable to determine package management style. + +Invoke this script with the option '-p