From 6e429f051316628f99ed5e68ccaa91f6d1a32cc0 Mon Sep 17 00:00:00 2001
From: tianhao zhang <15600919271@163.com>
Date: Mon, 10 Oct 2022 11:42:44 +0000
Subject: [PATCH 1/5] support wav2vec2ASR on librispeech
---
examples/librispeech/asr3/README.md | 191 +++
examples/librispeech/asr3/RESULTS.md | 8 +
examples/librispeech/asr3/cmd.sh | 89 ++
.../librispeech/asr3/conf/preprocess.yaml | 4 +
.../librispeech/asr3/conf/tuning/decode.yaml | 11 +
.../librispeech/asr3/conf/wav2vec2ASR.yaml | 120 ++
examples/librispeech/asr3/local/test.sh | 84 ++
examples/librispeech/asr3/local/test_wav.sh | 58 +
examples/librispeech/asr3/local/train.sh | 55 +
examples/librispeech/asr3/path.sh | 15 +
examples/librispeech/asr3/run.sh | 48 +
examples/librispeech/asr3/utils | 1 +
.../s2t/exps/wav2vec2/bin/__init__.py | 13 +
paddlespeech/s2t/exps/wav2vec2/bin/test.py | 66 +
.../s2t/exps/wav2vec2/bin/test_wav.py | 118 ++
paddlespeech/s2t/exps/wav2vec2/bin/train.py | 54 +
paddlespeech/s2t/exps/wav2vec2/model.py | 435 +++++++
paddlespeech/s2t/models/wav2vec2/__init__.py | 0
.../s2t/models/wav2vec2/modules/VanillaNN.py | 45 +
.../models/wav2vec2/modules/activations.py | 175 +++
.../s2t/models/wav2vec2/modules/containers.py | 131 ++
.../s2t/models/wav2vec2/modules/linear.py | 73 ++
.../wav2vec2/modules/modeling_outputs.py | 1129 ++++++++++++++++
.../wav2vec2/modules/modeling_wav2vec2.py | 1131 +++++++++++++++++
.../wav2vec2/processing/signal_processing.py | 242 ++++
.../processing/speech_augmentation.py | 727 +++++++++++
.../s2t/models/wav2vec2/wav2vec2_ASR.py | 247 ++++
27 files changed, 5270 insertions(+)
create mode 100644 examples/librispeech/asr3/README.md
create mode 100644 examples/librispeech/asr3/RESULTS.md
create mode 100644 examples/librispeech/asr3/cmd.sh
create mode 100644 examples/librispeech/asr3/conf/preprocess.yaml
create mode 100644 examples/librispeech/asr3/conf/tuning/decode.yaml
create mode 100644 examples/librispeech/asr3/conf/wav2vec2ASR.yaml
create mode 100644 examples/librispeech/asr3/local/test.sh
create mode 100644 examples/librispeech/asr3/local/test_wav.sh
create mode 100644 examples/librispeech/asr3/local/train.sh
create mode 100644 examples/librispeech/asr3/path.sh
create mode 100644 examples/librispeech/asr3/run.sh
create mode 120000 examples/librispeech/asr3/utils
create mode 100644 paddlespeech/s2t/exps/wav2vec2/bin/__init__.py
create mode 100644 paddlespeech/s2t/exps/wav2vec2/bin/test.py
create mode 100644 paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
create mode 100644 paddlespeech/s2t/exps/wav2vec2/bin/train.py
create mode 100644 paddlespeech/s2t/exps/wav2vec2/model.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/__init__.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/activations.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/containers.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/linear.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
create mode 100644 paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
diff --git a/examples/librispeech/asr3/README.md b/examples/librispeech/asr3/README.md
new file mode 100644
index 000000000..bd96af86f
--- /dev/null
+++ b/examples/librispeech/asr3/README.md
@@ -0,0 +1,191 @@
+# Wav2vec2ASR with Librispeech
+This example contains code used to finetune [wav2vec2.0](https://https://arxiv.org/pdf/2006.11477.pdf) model with [Librispeech dataset](http://www.openslr.org/resources/12)
+## Overview
+All the scripts you need are in `run.sh`. There are several stages in `run.sh`, and each stage has its function.
+| Stage | Function |
+|:---- |:----------------------------------------------------------- |
+| 0 | Process data. It includes: (1) Download the dataset (2) Calculate the CMVN of the train dataset (3) Get the vocabulary file (4) Get the manifest files of the train, development and test dataset (5) Download the pretrained wav2vec2 model |
+| 1 | Train the model |
+| 2 | Get the final model by averaging the top-k models, set k = 1 means to choose the best model |
+| 3 | Test the final model performance |
+| 4 | Infer the single audio file |
+
+
+You can choose to run a range of stages by setting `stage` and `stop_stage `.
+
+For example, if you want to execute the code in stage 2 and stage 3, you can run this script:
+```bash
+bash run.sh --stage 2 --stop_stage 3
+```
+Or you can set `stage` equal to `stop-stage` to only run one stage.
+For example, if you only want to run `stage 0`, you can use the script below:
+```bash
+bash run.sh --stage 0 --stop_stage 0
+```
+The document below will describe the scripts in `run.sh` in detail.
+## The Environment Variables
+The path.sh contains the environment variables.
+```bash
+. ./path.sh
+. ./cmd.sh
+```
+This script needs to be run first. And another script is also needed:
+```bash
+source ${MAIN_ROOT}/utils/parse_options.sh
+```
+It will support the way of using `--variable value` in the shell scripts.
+## The Local Variables
+Some local variables are set in `run.sh`.
+`gpus` denotes the GPU number you want to use. If you set `gpus=`, it means you only use CPU.
+`stage` denotes the number of stages you want to start from in the experiments.
+`stop stage` denotes the number of the stage you want to end at in the experiments.
+`conf_path` denotes the config path of the model.
+`avg_num` denotes the number K of top-K models you want to average to get the final model.
+`audio file` denotes the file path of the single file you want to infer in stage 5
+`ckpt` denotes the checkpoint prefix of the model, e.g. "wav2vec2ASR"
+
+You can set the local variables (except `ckpt`) when you use `run.sh`
+
+For example, you can set the `gpus` and `avg_num` when you use the command line:
+```bash
+bash run.sh --gpus 0,1 --avg_num 20
+```
+## Stage 0: Data Processing
+To use this example, you need to process data firstly and you can use stage 0 in `run.sh` to do this. The code is shown below:
+```bash
+ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # prepare data
+ bash ./local/data.sh || exit -1
+ fi
+```
+Stage 0 is for processing the data.
+
+If you only want to process the data. You can run
+```bash
+bash run.sh --stage 0 --stop_stage 0
+```
+You can also just run these scripts in your command line.
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+```
+After processing the data, the `data` directory will look like this:
+```bash
+data/
+|-- dev.meta
+|-- lang_char
+| `-- bpe_unigram_5000.model
+| `-- bpe_unigram_5000.vocab
+| `-- vocab.txt
+|-- manifest.dev
+|-- manifest.dev.raw
+|-- manifest.test
+|-- manifest.test.raw
+|-- manifest.train
+|-- manifest.train.raw
+|-- mean_std.json
+|-- test.meta
+`-- train.meta
+```
+## Stage 1: Model Training
+If you want to train the model. you can use stage 1 in `run.sh`. The code is shown below.
+```bash
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # train model, all `ckpt` under `exp` dir
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt}
+ fi
+```
+If you want to train the model, you can use the script below to execute stage 0 and stage 1:
+```bash
+bash run.sh --stage 0 --stop_stage 1
+```
+or you can run these scripts in the command line (only use CPU).
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+```
+## Stage 2: Top-k Models Averaging
+After training the model, we need to get the final model for testing and inference. In every epoch, the model checkpoint is saved, so we can choose the best model from them based on the validation loss or we can sort them and average the parameters of the top-k models to get the final model. We can use stage 2 to do this, and the code is shown below. Note: We only train one epoch for wav2vec2ASR, thus the `avg_num` is set to 1.
+```bash
+ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # avg n best model
+ avg.sh best exp/${ckpt}/checkpoints ${avg_num}
+ fi
+```
+The `avg.sh` is in the `../../../utils/` which is define in the `path.sh`.
+If you want to get the final model, you can use the script below to execute stage 0, stage 1, and stage 2:
+```bash
+bash run.sh --stage 0 --stop_stage 2
+```
+or you can run these scripts in the command line (only use CPU).
+
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+avg.sh best exp/wav2vec2ASR/checkpoints 1
+```
+## Stage 3: Model Testing
+The test stage is to evaluate the model performance. The code of test stage is shown below:
+```bash
+ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ # test ckpt avg_n
+ CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
+ fi
+```
+If you want to train a model and test it, you can use the script below to execute stage 0, stage 1, stage 2, and stage 3 :
+```bash
+bash run.sh --stage 0 --stop_stage 3
+```
+or you can run these scripts in the command line (only use CPU).
+```bash
+. ./path.sh
+. ./cmd.sh
+bash ./local/data.sh
+CUDA_VISIBLE_DEVICES= ./local/train.sh conf/wav2vec2ASR.yaml wav2vec2ASR
+avg.sh best exp/wav2vec2ASR/checkpoints 1
+CUDA_VISIBLE_DEVICES= ./local/test.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1
+```
+## Pretrained Model
+You can get the pretrained wav2vec2ASR from [this](../../../docs/source/released_model.md).
+
+using the `tar` scripts to unpack the model and then you can use the script to test the model.
+
+For example:
+```bash
+wget https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+tar xzvf wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+source path.sh
+# If you have process the data and get the manifest file, you can skip the following 2 steps
+bash local/data.sh --stage -1 --stop_stage -1
+bash local/data.sh --stage 2 --stop_stage 2
+CUDA_VISIBLE_DEVICES= ./local/test.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1
+```
+The performance of the released models are shown in [here](./RESULTS.md).
+
+
+## Stage 4: Single Audio File Inference
+In some situations, you want to use the trained model to do the inference for the single audio file. You can use stage 5. The code is shown below
+```bash
+ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ # test a single .wav file
+ CUDA_VISIBLE_DEVICES=0 ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1
+ fi
+```
+you can train the model by yourself using ```bash run.sh --stage 0 --stop_stage 3```, or you can download the pretrained model through the script below:
+```bash
+wget https://paddlespeech.bj.bcebos.com/s2t/librispeech/asr3/wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+tar xzvf wav2vec2ASR-large-960h-librispeech_ckpt_1.3.0.model.tar.gz
+```
+You can download the audio demo:
+```bash
+wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/
+```
+You need to prepare an audio file or use the audio demo above, please confirm the sample rate of the audio is 16K. You can get the result of the audio demo by running the script below.
+```bash
+CUDA_VISIBLE_DEVICES= ./local/test_wav.sh conf/wav2vec2ASR.yaml conf/tuning/decode.yaml exp/wav2vec2ASR/checkpoints/avg_1 data/demo_002_en.wav
+```
diff --git a/examples/librispeech/asr3/RESULTS.md b/examples/librispeech/asr3/RESULTS.md
new file mode 100644
index 000000000..1c5626d9e
--- /dev/null
+++ b/examples/librispeech/asr3/RESULTS.md
@@ -0,0 +1,8 @@
+# LibriSpeech
+
+## Wav2VecASR
+train: Epoch 1, 1*V100-32G, batchsize:10
+
+| Model | Params | Config | Augmentation| Test set | Decode method | WER |
+| --- | --- | --- | --- | --- | --- | --- |
+| wav2vec2ASR | 302.86 M | conf/wav2vec2ASR.yaml | spec_aug | test-clean | greedy search | 0.018887 |
diff --git a/examples/librispeech/asr3/cmd.sh b/examples/librispeech/asr3/cmd.sh
new file mode 100644
index 000000000..7b70ef5e0
--- /dev/null
+++ b/examples/librispeech/asr3/cmd.sh
@@ -0,0 +1,89 @@
+# ====== About run.pl, queue.pl, slurm.pl, and ssh.pl ======
+# Usage: .pl [options] JOB=1:
+# e.g.
+# run.pl --mem 4G JOB=1:10 echo.JOB.log echo JOB
+#
+# Options:
+# --time : Limit the maximum time to execute.
+# --mem : Limit the maximum memory usage.
+# -–max-jobs-run : Limit the number parallel jobs. This is ignored for non-array jobs.
+# --num-threads : Specify the number of CPU core.
+# --gpu : Specify the number of GPU devices.
+# --config: Change the configuration file from default.
+#
+# "JOB=1:10" is used for "array jobs" and it can control the number of parallel jobs.
+# The left string of "=", i.e. "JOB", is replaced by (Nth job) in the command and the log file name,
+# e.g. "echo JOB" is changed to "echo 3" for the 3rd job and "echo 8" for 8th job respectively.
+# Note that the number must start with a positive number, so you can't use "JOB=0:10" for example.
+#
+# run.pl, queue.pl, slurm.pl, and ssh.pl have unified interface, not depending on its backend.
+# These options are mapping to specific options for each backend and
+# it is configured by "conf/queue.conf" and "conf/slurm.conf" by default.
+# If jobs failed, your configuration might be wrong for your environment.
+#
+#
+# The official documentation for run.pl, queue.pl, slurm.pl, and ssh.pl:
+# "Parallelization in Kaldi": http://kaldi-asr.org/doc/queue.html
+# =========================================================~
+
+
+# Select the backend used by run.sh from "local", "sge", "slurm", or "ssh"
+cmd_backend='local'
+
+# Local machine, without any Job scheduling system
+if [ "${cmd_backend}" = local ]; then
+
+ # The other usage
+ export train_cmd="run.pl"
+ # Used for "*_train.py": "--gpu" is appended optionally by run.sh
+ export cuda_cmd="run.pl"
+ # Used for "*_recog.py"
+ export decode_cmd="run.pl"
+
+# "qsub" (SGE, Torque, PBS, etc.)
+elif [ "${cmd_backend}" = sge ]; then
+ # The default setting is written in conf/queue.conf.
+ # You must change "-q g.q" for the "queue" for your environment.
+ # To know the "queue" names, type "qhost -q"
+ # Note that to use "--gpu *", you have to setup "complex_value" for the system scheduler.
+
+ export train_cmd="queue.pl"
+ export cuda_cmd="queue.pl"
+ export decode_cmd="queue.pl"
+
+# "sbatch" (Slurm)
+elif [ "${cmd_backend}" = slurm ]; then
+ # The default setting is written in conf/slurm.conf.
+ # You must change "-p cpu" and "-p gpu" for the "partion" for your environment.
+ # To know the "partion" names, type "sinfo".
+ # You can use "--gpu * " by default for slurm and it is interpreted as "--gres gpu:*"
+ # The devices are allocated exclusively using "${CUDA_VISIBLE_DEVICES}".
+
+ export train_cmd="slurm.pl"
+ export cuda_cmd="slurm.pl"
+ export decode_cmd="slurm.pl"
+
+elif [ "${cmd_backend}" = ssh ]; then
+ # You have to create ".queue/machines" to specify the host to execute jobs.
+ # e.g. .queue/machines
+ # host1
+ # host2
+ # host3
+ # Assuming you can login them without any password, i.e. You have to set ssh keys.
+
+ export train_cmd="ssh.pl"
+ export cuda_cmd="ssh.pl"
+ export decode_cmd="ssh.pl"
+
+# This is an example of specifying several unique options in the JHU CLSP cluster setup.
+# Users can modify/add their own command options according to their cluster environments.
+elif [ "${cmd_backend}" = jhu ]; then
+
+ export train_cmd="queue.pl --mem 2G"
+ export cuda_cmd="queue-freegpu.pl --mem 2G --gpu 1 --config conf/gpu.conf"
+ export decode_cmd="queue.pl --mem 4G"
+
+else
+ echo "$0: Error: Unknown cmd_backend=${cmd_backend}" 1>&2
+ return 1
+fi
diff --git a/examples/librispeech/asr3/conf/preprocess.yaml b/examples/librispeech/asr3/conf/preprocess.yaml
new file mode 100644
index 000000000..3979d256b
--- /dev/null
+++ b/examples/librispeech/asr3/conf/preprocess.yaml
@@ -0,0 +1,4 @@
+process:
+ # extract kaldi fbank from PCM
+ - type: wav_process
+ dither: 0.1
diff --git a/examples/librispeech/asr3/conf/tuning/decode.yaml b/examples/librispeech/asr3/conf/tuning/decode.yaml
new file mode 100644
index 000000000..c2261fb28
--- /dev/null
+++ b/examples/librispeech/asr3/conf/tuning/decode.yaml
@@ -0,0 +1,11 @@
+decode_batch_size: 1
+error_rate_type: wer
+decoding_method: ctc_greedy_search # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
+beam_size: 10
+ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
+decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
+ # <0: for decoding, use full chunk.
+ # >0: for decoding, use fixed chunk size as set.
+ # 0: used for training, it's prohibited here.
+num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
+simulate_streaming: False # simulate streaming inference. Defaults to False.
diff --git a/examples/librispeech/asr3/conf/wav2vec2ASR.yaml b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
new file mode 100644
index 000000000..63f5d37cc
--- /dev/null
+++ b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
@@ -0,0 +1,120 @@
+############################################
+# Network Architecture #
+############################################
+freeze_wav2vec2: False
+normalize_wav: True
+output_norm: True
+dnn_blocks: 2
+dnn_neurons: 1024
+blank_id: 0
+ctc_dropout_rate: 0.0
+wav2vec2_params_path: "exp/wav2vec2/wav2vec2-large-960h-lv60-self.pdparams"
+
+############################################
+# Wav2Vec2.0 #
+############################################
+vocab_size: 32
+hidden_size: 1024
+num_hidden_layers: 24
+num_attention_heads: 16
+intermediate_size: 4096
+hidden_act: "gelu"
+hidden_dropout: 0.1
+activation_dropout: 0.1
+attention_dropout: 0.1
+feat_proj_dropout: 0.1
+feat_quantizer_dropout: 0.0
+final_dropout: 0.1
+layerdrop: 0.1
+initializer_range: 0.02
+layer_norm_eps: 1e-5
+feat_extract_norm: "layer"
+feat_extract_activation: "gelu"
+conv_dim: [512, 512, 512, 512, 512, 512, 512]
+conv_stride: [5, 2, 2, 2, 2, 2, 2]
+conv_kernel: [10, 3, 3, 3, 3, 2, 2]
+conv_bias: True
+num_conv_pos_embeddings: 128
+num_conv_pos_embedding_groups: 16
+do_stable_layer_norm: True
+apply_spec_augment: False
+mask_time_prob: 0.05
+mask_time_length: 10
+mask_time_min_masks: 2
+mask_feature_prob: 0.0
+mask_feature_length: 10
+mask_feature_min_masks: 0
+num_codevectors_per_group: 320
+num_codevector_groups: 2
+contrastive_logits_temperature: 0.1
+num_negatives: 100
+codevector_dim: 256
+proj_codevector_dim: 256
+diversity_loss_weight: 0.1
+ctc_loss_reduction: "sum"
+ctc_zero_infinity: False
+use_weighted_layer_sum: False
+pad_token_id: 0
+bos_token_id: 1
+eos_token_id: 2
+add_adapter: False
+adapter_kernel_size: 3
+adapter_stride: 2
+num_adapter_layers: 3
+output_hidden_size: None
+
+###########################################
+# Data #
+###########################################
+train_manifest: data/manifest.train
+dev_manifest: data/manifest.dev
+test_manifest: data/manifest.test-clean
+
+
+###########################################
+# Dataloader #
+###########################################
+vocab_filepath: data/lang_char/vocab.txt
+unit_type: 'char'
+mean_std_filepath: ""
+preprocess_config: conf/preprocess.yaml
+sortagrad: -1 # Feed samples from shortest to longest ; -1: enabled for all epochs 0: disabled other: enabled for 'other' epochs
+batch_size: 10 # Different batch_size may cause large differences in results
+maxlen_in: 51200000000 # if input length > maxlen-in batchsize is automatically reduced
+maxlen_out: 1500000 # if output length > maxlen-out batchsize is automatically reduced
+minibatches: 0 # for debug
+batch_count: auto
+batch_bins: 0
+batch_frames_in: 0
+batch_frames_out: 0
+batch_frames_inout: 0
+num_workers: 0
+subsampling_factor: 1
+num_encs: 1
+dist_sampler: True
+shortest_first: True
+return_lens_rate: True
+
+
+###########################################
+# Training #
+###########################################
+n_epoch: 1
+accum_grad: 1
+global_grad_clip: 3.0
+model_optim: adadelta
+model_optim_conf:
+ lr: 0.9
+ epsilon: 1.0e-6
+ rho: 0.95
+scheduler: constantlr
+scheduler_conf:
+ warmup_steps: 25000
+ lr_decay: 1.0
+log_interval: 1
+checkpoint:
+ kbest_n: 50
+ latest_n: 5
+augment: True
+
+
diff --git a/examples/librispeech/asr3/local/test.sh b/examples/librispeech/asr3/local/test.sh
new file mode 100644
index 000000000..ccc0d84de
--- /dev/null
+++ b/examples/librispeech/asr3/local/test.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+set -e
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+expdir=exp
+datadir=data
+
+train_set=train_960
+recog_set="test-clean test-other dev-clean dev-other"
+recog_set="test-clean"
+
+config_path=$1
+decode_config_path=$2
+ckpt_prefix=$3
+
+source ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
+
+# download language model
+#bash local/download_lm_en.sh
+#if [ $? -ne 0 ]; then
+# exit 1
+#fi
+
+python3 utils/format_rsl.py \
+ --origin_ref data/manifest.test-clean.raw \
+ --trans_ref data/manifest.test-clean.text
+
+
+for type in ctc_greedy_search; do
+ echo "decoding ${type}"
+ batch_size=16
+ python3 -u ${BIN_DIR}/test.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${ckpt_prefix}.${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+ python3 utils/format_rsl.py \
+ --origin_hyp ${ckpt_prefix}.${type}.rsl \
+ --trans_hyp ${ckpt_prefix}.${type}.rsl.text
+
+ python3 utils/compute-wer.py --char=1 --v=1 \
+ data/manifest.test-clean.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error
+ echo "decoding ${type} done."
+done
+
+for type in ctc_prefix_beam_search; do
+ echo "decoding ${type}"
+ batch_size=1
+ python3 -u ${BIN_DIR}/test.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${ckpt_prefix}.${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+ python3 utils/format_rsl.py \
+ --origin_hyp ${ckpt_prefix}.${type}.rsl \
+ --trans_hyp ${ckpt_prefix}.${type}.rsl.text
+
+ python3 utils/compute-wer.py --char=1 --v=1 \
+ data/manifest.test-clean.text ${ckpt_prefix}.${type}.rsl.text > ${ckpt_prefix}.${type}.error
+ echo "decoding ${type} done."
+done
+
+echo "Finished"
+
+exit 0
diff --git a/examples/librispeech/asr3/local/test_wav.sh b/examples/librispeech/asr3/local/test_wav.sh
new file mode 100644
index 000000000..fdf3589f4
--- /dev/null
+++ b/examples/librispeech/asr3/local/test_wav.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+if [ $# != 4 ];then
+ echo "usage: ${0} config_path decode_config_path ckpt_path_prefix audio_file"
+ exit -1
+fi
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+config_path=$1
+decode_config_path=$2
+ckpt_prefix=$3
+audio_file=$4
+
+mkdir -p data
+wget -nc https://paddlespeech.bj.bcebos.com/datasets/single_wav/en/demo_002_en.wav -P data/
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+if [ ! -f ${audio_file} ]; then
+ echo "Plase input the right audio_file path"
+ exit 1
+fi
+
+chunk_mode=false
+if [[ ${config_path} =~ ^.*chunk_.*yaml$ ]];then
+ chunk_mode=true
+fi
+
+# download language model
+#bash local/download_lm_ch.sh
+#if [ $? -ne 0 ]; then
+# exit 1
+#fi
+
+for type in ctc_greedy_search; do
+ echo "decoding ${type}"
+ batch_size=1
+ output_dir=${ckpt_prefix}
+ mkdir -p ${output_dir}
+ python3 -u ${BIN_DIR}/test_wav.py \
+ --ngpu ${ngpu} \
+ --config ${config_path} \
+ --decode_cfg ${decode_config_path} \
+ --result_file ${output_dir}/${type}.rsl \
+ --checkpoint_path ${ckpt_prefix} \
+ --opts decode.decoding_method ${type} \
+ --opts decode.decode_batch_size ${batch_size} \
+ --audio_file ${audio_file}
+
+ if [ $? -ne 0 ]; then
+ echo "Failed in evaluation!"
+ exit 1
+ fi
+done
+exit 0
diff --git a/examples/librispeech/asr3/local/train.sh b/examples/librispeech/asr3/local/train.sh
new file mode 100644
index 000000000..6913ed17e
--- /dev/null
+++ b/examples/librispeech/asr3/local/train.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+if [ $# -lt 2 ] && [ $# -gt 3 ];then
+ echo "usage: CUDA_VISIBLE_DEVICES=0 ${0} config_path ckpt_name ips(optional)"
+ exit -1
+fi
+
+ngpu=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
+echo "using $ngpu gpus..."
+
+config_path=$1
+ckpt_name=$2
+ips=$3
+
+if [ ! $ips ];then
+ ips_config=
+else
+ ips_config="--ips="${ips}
+fi
+
+mkdir -p exp
+
+# seed may break model convergence
+seed=1998
+if [ ${seed} != 0 ]; then
+ export FLAGS_cudnn_deterministic=True
+fi
+
+# export FLAGS_cudnn_exhaustive_search=true
+# export FLAGS_conv_workspace_size_limit=4000
+export FLAGS_allocator_strategy=naive_best_fit
+if [ ${ngpu} == 0 ]; then
+python3 -u ${BIN_DIR}/train.py \
+--ngpu ${ngpu} \
+--config ${config_path} \
+--output exp/${ckpt_name} \
+--seed ${seed}
+else
+python3 -m paddle.distributed.launch --gpus=${CUDA_VISIBLE_DEVICES} ${ips_config} ${BIN_DIR}/train.py \
+--ngpu ${ngpu} \
+--config ${config_path} \
+--output exp/${ckpt_name} \
+--seed ${seed}
+fi
+
+if [ ${seed} != 0 ]; then
+ unset FLAGS_cudnn_deterministic
+fi
+
+if [ $? -ne 0 ]; then
+ echo "Failed in training!"
+ exit 1
+fi
+
+exit 0
diff --git a/examples/librispeech/asr3/path.sh b/examples/librispeech/asr3/path.sh
new file mode 100644
index 000000000..f47178382
--- /dev/null
+++ b/examples/librispeech/asr3/path.sh
@@ -0,0 +1,15 @@
+export MAIN_ROOT=`realpath ${PWD}/../../../`
+
+export PATH=${MAIN_ROOT}:${MAIN_ROOT}/tools/sctk/bin:${PWD}/utils:${PATH}
+export LC_ALL=C
+
+export PYTHONDONTWRITEBYTECODE=1
+# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
+export PYTHONIOENCODING=UTF-8
+export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH}
+
+export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib/
+
+
+MODEL=wav2vec2
+export BIN_DIR=${MAIN_ROOT}/paddlespeech/s2t/exps/${MODEL}/bin
diff --git a/examples/librispeech/asr3/run.sh b/examples/librispeech/asr3/run.sh
new file mode 100644
index 000000000..55b2ca86d
--- /dev/null
+++ b/examples/librispeech/asr3/run.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+set -e
+
+. ./path.sh || exit 1;
+. ./cmd.sh || exit 1;
+
+gpus=0
+stage=0
+stop_stage=0
+conf_path=conf/wav2vec2ASR.yaml
+ips= #xx.xx.xx.xx,xx.xx.xx.xx
+decode_conf_path=conf/tuning/decode.yaml
+avg_num=1
+dict_path=data/lang_char/vocab.txt
+
+. ${MAIN_ROOT}/utils/parse_options.sh || exit 1;
+
+audio_file=data/demo_002_en.wav
+
+avg_ckpt=avg_${avg_num}
+ckpt=$(basename ${conf_path} | awk -F'.' '{print $1}')
+echo "checkpoint name ${ckpt}"
+
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # prepare data
+ bash ./local/data.sh || exit -1
+fi
+
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # train model, all `ckpt` under `exp` dir
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${ckpt} ${ips}
+fi
+
+if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # avg n best model
+ avg.sh best exp/${ckpt}/checkpoints ${avg_num}
+fi
+
+
+if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ # attetion resocre decoder
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
+fi
+
+if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
+ # test a single .wav file
+ CUDA_VISIBLE_DEVICES=${gpus} ./local/test_wav.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} ${audio_file} || exit -1
+fi
diff --git a/examples/librispeech/asr3/utils b/examples/librispeech/asr3/utils
new file mode 120000
index 000000000..973afe674
--- /dev/null
+++ b/examples/librispeech/asr3/utils
@@ -0,0 +1 @@
+../../../utils
\ No newline at end of file
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py b/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py
new file mode 100644
index 000000000..185a92b8d
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test.py b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
new file mode 100644
index 000000000..4fa224c33
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluation for wav2vec2.0 model."""
+import cProfile
+
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.exps.wav2vec2.model import Wav2Vec2ASRTester as Tester
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.utility import print_arguments
+
+# TODO(hui zhang): dynamic load
+
+
+def main_sp(config, args):
+ exp = Tester(config, args)
+ with exp.eval():
+ exp.setup()
+ exp.run_test()
+
+
+def main(config, args):
+ main_sp(config, args)
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ # save asr result to
+ parser.add_argument(
+ '--dict-path', type=str, default=None, help='dict path.')
+ parser.add_argument(
+ "--result_file", type=str, help="path of save the asr result")
+ args = parser.parse_args()
+ print_arguments(args, globals())
+
+ # https://yaml.org/type/float.html
+ config = CfgNode(new_allowed=True)
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.decode_cfg:
+ decode_confs = CfgNode(new_allowed=True)
+ decode_confs.merge_from_file(args.decode_cfg)
+ config.decode = decode_confs
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ print(config)
+ if args.dump_config:
+ with open(args.dump_config, 'w') as f:
+ print(config, file=f)
+
+ # Setting for profiling
+ pr = cProfile.Profile()
+ pr.runcall(main, config, args)
+ pr.dump_stats('test.profile')
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
new file mode 100644
index 000000000..5306d7f81
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Evaluation for wav2vec2.0 model."""
+import os
+import sys
+from pathlib import Path
+
+import paddle
+import soundfile
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer
+from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.log import Log
+from paddlespeech.s2t.utils.utility import UpdateConfig
+logger = Log(__name__).getlog()
+
+class Wav2vec2Infer():
+ def __init__(self, config, args):
+ self.args = args
+ self.config = config
+ self.audio_file = args.audio_file
+
+ self.text_feature = TextFeaturizer(
+ unit_type=config.unit_type,
+ vocab=config.vocab_filepath)
+ paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu')
+
+ # model
+ model_conf = config
+ with UpdateConfig(model_conf):
+ model_conf.output_dim = self.text_feature.vocab_size
+ model = Wav2vec2ASR.from_config(model_conf)
+ self.model = model
+ self.model.eval()
+
+ # load model
+ params_path = self.args.checkpoint_path + ".pdparams"
+ model_dict = paddle.load(params_path)
+ self.model.set_state_dict(model_dict)
+
+ def run(self):
+ check(args.audio_file)
+
+ with paddle.no_grad():
+ # read
+ audio, _ = soundfile.read(
+ self.audio_file, dtype="int16", always_2d=True)
+ logger.info(f"audio shape: {audio.shape}")
+
+ xs = paddle.to_tensor(audio, dtype='float32').unsqueeze(axis=0)
+ decode_config = self.config.decode
+ result_transcripts, result_tokenids = self.model.decode(
+ xs,
+ text_feature=self.text_feature,
+ decoding_method=decode_config.decoding_method,
+ beam_size=decode_config.beam_size)
+ rsl = result_transcripts[0]
+ utt = Path(self.audio_file).name
+ logger.info(f"hyp: {utt} {rsl}")
+ return rsl
+
+
+def check(audio_file):
+ if not os.path.isfile(audio_file):
+ print("Please input the right audio file path")
+ sys.exit(-1)
+
+ logger.info("checking the audio file format......")
+ try:
+ sig, sample_rate = soundfile.read(audio_file)
+ except Exception as e:
+ logger.error(str(e))
+ logger.error(
+ "can not open the wav file, please check the audio file format")
+ sys.exit(-1)
+ logger.info("The sample rate is %d" % sample_rate)
+ assert (sample_rate == 16000)
+ logger.info("The audio file format is right")
+
+
+def main(config, args):
+ Wav2vec2Infer(config, args).run()
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ # save asr result to
+ parser.add_argument(
+ "--result_file", type=str, help="path of save the asr result")
+ parser.add_argument(
+ "--audio_file", type=str, help="path of the input audio file")
+ args = parser.parse_args()
+
+ config = CfgNode(new_allowed=True)
+
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.decode_cfg:
+ decode_confs = CfgNode(new_allowed=True)
+ decode_confs.merge_from_file(args.decode_cfg)
+ config.decode = decode_confs
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ main(config, args)
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/train.py b/paddlespeech/s2t/exps/wav2vec2/bin/train.py
new file mode 100644
index 000000000..b2edecca1
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/train.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Trainer for wav2vec2.0 model."""
+import cProfile
+import os
+
+from yacs.config import CfgNode
+
+from paddlespeech.s2t.exps.wav2vec2.model import Wav2Vec2ASRTrainer as Trainer
+from paddlespeech.s2t.training.cli import default_argument_parser
+from paddlespeech.s2t.utils.utility import print_arguments
+
+
+def main_sp(config, args):
+ exp = Trainer(config, args)
+ exp.setup()
+ exp.run()
+
+
+def main(config, args):
+ main_sp(config, args)
+
+
+if __name__ == "__main__":
+ parser = default_argument_parser()
+ args = parser.parse_args()
+ print_arguments(args, globals())
+
+ # https://yaml.org/type/float.html
+ config = CfgNode(new_allowed=True)
+ if args.config:
+ config.merge_from_file(args.config)
+ if args.opts:
+ config.merge_from_list(args.opts)
+ config.freeze()
+ if args.dump_config:
+ with open(args.dump_config, 'w') as f:
+ print(config, file=f)
+
+ # Setting for profiling
+ pr = cProfile.Profile()
+ pr.runcall(main, config, args)
+ pr.dump_stats(os.path.join(args.output, 'train.profile'))
diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py
new file mode 100644
index 000000000..3d9c266e7
--- /dev/null
+++ b/paddlespeech/s2t/exps/wav2vec2/model.py
@@ -0,0 +1,435 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Contains wav2vec2 model."""
+import json
+import os
+import time
+from collections import defaultdict
+from collections import OrderedDict
+from contextlib import nullcontext
+from paddlespeech.s2t.utils import mp_tools
+
+import jsonlines
+import numpy as np
+import paddle
+from paddle import distributed as dist
+from paddlespeech.s2t.frontend.featurizer import TextFeaturizer
+from paddlespeech.s2t.io.dataloader import BatchDataLoader
+from paddlespeech.s2t.io.dataloader import StreamDataLoader
+from paddlespeech.s2t.io.dataloader import DataLoaderFactory
+from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
+from paddlespeech.s2t.models.wav2vec2.processing.speech_augmentation import TimeDomainSpecAugment
+from paddlespeech.s2t.utils import error_rate
+
+from paddlespeech.s2t.training.optimizer import OptimizerFactory
+from paddlespeech.s2t.training.reporter import ObsScope
+from paddlespeech.s2t.training.reporter import report
+from paddlespeech.s2t.training.scheduler import LRSchedulerFactory
+from paddlespeech.s2t.training.timer import Timer
+from paddlespeech.s2t.training.trainer import Trainer
+from paddlespeech.s2t.utils.utility import UpdateConfig
+from paddlespeech.s2t.utils import layer_tools
+from paddlespeech.s2t.utils.log import Log
+
+
+
+logger = Log(__name__).getlog()
+
+class Wav2Vec2ASRTrainer(Trainer):
+ def __init__(self, config, args):
+ super().__init__(config, args)
+ self.avg_train_loss = 0
+ def train_batch(self, batch_index, batch, msg):
+ train_conf = self.config
+ start = time.time()
+
+ # forward
+ utt, wav, wavs_lens, target, target_lens = batch
+ wavs_lens_rate = wavs_lens / wav.shape[1]
+ target_lens_rate = target_lens / target.shape[1]
+ wav = wav[:,:,0]
+ wav = self.speech_augmentation(wav, wavs_lens_rate)
+ loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
+ # pring(wav, wavs_lens_rate, target, target_lens_rate)
+ # loss div by `batch_size * accum_grad`
+ loss /= train_conf.accum_grad
+
+ losses_np = {'loss': float(loss) * train_conf.accum_grad}
+
+ # loss backward
+ if (batch_index + 1) % train_conf.accum_grad != 0:
+ # Disable gradient synchronizations across DDP processes.
+ # Within this context, gradients will be accumulated on module
+ # variables, which will later be synchronized.
+ # When using cpu w/o DDP, model does not have `no_sync`
+ context = self.model.no_sync if (hasattr(self.model, "no_sync") and
+ self.parallel) else nullcontext
+ else:
+ # Used for single gpu training and DDP gradient synchronization
+ # processes.
+ context = nullcontext
+ with context():
+ loss.backward()
+ layer_tools.print_grads(self.model, print_func=None)
+
+ # optimizer step old
+ if (batch_index + 1) % train_conf.accum_grad == 0:
+ self.optimizer.step()
+ self.optimizer.clear_grad()
+ self.lr_scheduler.step()
+ self.iteration += 1
+ iteration_time = time.time() - start
+ for k, v in losses_np.items():
+ report(k, v)
+ report("batch_size", self.config.batch_size)
+ report("accum", train_conf.accum_grad)
+ report("step_cost", iteration_time)
+
+ if (batch_index + 1) % train_conf.accum_grad == 0:
+ if dist.get_rank() == 0 and self.visualizer:
+ losses_np_v = losses_np.copy()
+ losses_np_v.update({"lr": self.lr_scheduler()})
+ for key, val in losses_np_v.items():
+ self.visualizer.add_scalar(
+ tag='train/' + key, value=val, step=self.iteration - 1)
+
+ @paddle.no_grad()
+ def valid(self):
+ self.model.eval()
+ if not self.use_streamdata:
+ logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}")
+ valid_losses = defaultdict(list)
+ num_seen_utts = 1
+ total_loss = 0.0
+ for i, batch in enumerate(self.valid_loader):
+ utt, wav, wavs_lens, target, target_lens = batch
+ wavs_lens_rate = wavs_lens / wav.shape[1]
+ target_lens_rate = target_lens / target.shape[1]
+ wav = wav[:,:,0]
+ loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
+
+ if paddle.isfinite(loss):
+ num_utts = batch[1].shape[0]
+ num_seen_utts += num_utts
+ total_loss += float(loss) * num_utts
+ valid_losses['val_loss'].append(float(loss))
+
+ if (i + 1) % self.config.log_interval == 0:
+ valid_dump = {k: np.mean(v) for k, v in valid_losses.items()}
+ valid_dump['val_history_loss'] = total_loss / num_seen_utts
+
+ # logging
+ msg = f"Valid: Rank: {dist.get_rank()}, "
+ msg += "epoch: {}, ".format(self.epoch)
+ msg += "step: {}, ".format(self.iteration)
+ if not self.use_streamdata:
+ msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader))
+ msg += ', '.join('{}: {:>.6f}'.format(k, v)
+ for k, v in valid_dump.items())
+ logger.info(msg)
+
+ logger.info('Rank {} Val info val_loss {}'.format(
+ dist.get_rank(), total_loss / num_seen_utts))
+ return total_loss, num_seen_utts
+
+ def do_train(self):
+ """The training process control by step."""
+ # !!!IMPORTANT!!!
+ # Try to export the model by script, if fails, we should refine
+ # the code to satisfy the script export requirements
+ # script_model = paddle.jit.to_static(self.model)
+ # script_model_path = str(self.checkpoint_dir / 'init')
+ # paddle.jit.save(script_model, script_model_path)
+
+ self.before_train()
+
+ if not self.use_streamdata:
+ logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
+ while self.epoch < self.config.n_epoch:
+ with Timer("Epoch-Train Time Cost: {}"):
+ self.model.train()
+ try:
+ data_start_time = time.time()
+ for batch_index, batch in enumerate(self.train_loader):
+ dataload_time = time.time() - data_start_time
+ msg = "Train:"
+ observation = OrderedDict()
+ with ObsScope(observation):
+ report("Rank", dist.get_rank())
+ report("epoch", self.epoch)
+ report('step', self.iteration)
+ report("lr", self.lr_scheduler())
+ self.train_batch(batch_index, batch, msg)
+ self.after_train_batch()
+ report('iter', batch_index + 1)
+ if not self.use_streamdata:
+ report('total', len(self.train_loader))
+ report('reader_cost', dataload_time)
+ observation['batch_cost'] = observation[
+ 'reader_cost'] + observation['step_cost']
+ observation['samples'] = observation['batch_size']
+ observation['ips,samples/s'] = observation[
+ 'batch_size'] / observation['batch_cost']
+ for k, v in observation.items():
+ msg += f" {k.split(',')[0]}: "
+ msg += f"{v:>.8f}" if isinstance(v,
+ float) else f"{v}"
+ msg += f" {k.split(',')[1]}" if len(
+ k.split(',')) == 2 else ""
+ msg += ","
+ msg = msg[:-1] # remove the last ","
+ if (batch_index + 1) % self.config.log_interval == 0:
+ logger.info(msg)
+ data_start_time = time.time()
+ except Exception as e:
+ logger.error(e)
+ raise e
+ with Timer("Eval Time Cost: {}"):
+ total_loss, num_seen_utts = self.valid()
+ if dist.get_world_size() > 1:
+ num_seen_utts = paddle.to_tensor(num_seen_utts)
+ # the default operator in all_reduce function is sum.
+ dist.all_reduce(num_seen_utts)
+ total_loss = paddle.to_tensor(total_loss)
+ dist.all_reduce(total_loss)
+ cv_loss = total_loss / num_seen_utts
+ cv_loss = float(cv_loss)
+ else:
+ cv_loss = total_loss / num_seen_utts
+
+ logger.info(
+ 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss))
+ if self.visualizer:
+ self.visualizer.add_scalar(
+ tag='eval/cv_loss', value=cv_loss, step=self.epoch)
+ self.visualizer.add_scalar(
+ tag='eval/lr', value=self.lr_scheduler(), step=self.epoch)
+
+ self.save(tag=self.epoch, infos={'val_loss': cv_loss})
+ self.new_epoch()
+
+ def setup_dataloader(self):
+ config = self.config.clone()
+ self.use_streamdata = config.get("use_stream_data", False)
+ if self.train:
+ self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args)
+ self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args)
+ logger.info("Setup train/valid Dataloader!")
+ else:
+ decode_batch_size = config.get('decode', dict()).get(
+ 'decode_batch_size', 1)
+ self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args)
+ self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args)
+ logger.info("Setup test/align Dataloader!")
+
+ def setup_model(self):
+ config = self.config
+ model_conf = config
+
+ with UpdateConfig(model_conf):
+ if self.train:
+ model_conf.input_dim = self.train_loader.feat_dim
+ model_conf.output_dim = self.train_loader.vocab_size
+ else:
+ model_conf.input_dim = self.test_loader.feat_dim
+ model_conf.output_dim = self.test_loader.vocab_size
+
+ model = Wav2vec2ASR.from_config(model_conf)
+
+ if self.parallel:
+ model = paddle.DataParallel(model, find_unused_parameters=True)
+
+ logger.info(f"{model}")
+ layer_tools.print_params(model, logger.info)
+ self.model = model
+ logger.info("Setup model!")
+
+ # setup speech augmentation for wav2vec2
+ self.speech_augmentation = TimeDomainSpecAugment()
+
+ if not self.train:
+ return
+
+ train_config = config
+ optim_type = train_config.model_optim
+ optim_conf = train_config.model_optim_conf
+ scheduler_type = train_config.scheduler
+ scheduler_conf = train_config.scheduler_conf
+
+ scheduler_args = {
+ "learning_rate": optim_conf.lr,
+ "verbose": False,
+ "warmup_steps": scheduler_conf.warmup_steps,
+ "gamma": scheduler_conf.lr_decay,
+ "d_model": model_conf.dnn_neurons,
+ }
+ lr_scheduler = LRSchedulerFactory.from_args(scheduler_type,
+ scheduler_args)
+
+ def optimizer_args(
+ config,
+ parameters,
+ lr_scheduler=None, ):
+ train_config = config
+ optim_type = train_config.model_optim
+ optim_conf = train_config.model_optim_conf
+ scheduler_type = train_config.scheduler
+ scheduler_conf = train_config.scheduler_conf
+ return {
+ "grad_clip": train_config.global_grad_clip,
+ "learning_rate": lr_scheduler
+ if lr_scheduler else optim_conf.lr,
+ "epsilon": optim_conf.epsilon,
+ "rho": optim_conf.rho,
+ "parameters": parameters,
+ "epsilon": 1e-9 if optim_type == 'noam' else None,
+ "beta1": 0.9 if optim_type == 'noam' else None,
+ "beat2": 0.98 if optim_type == 'noam' else None,
+ }
+
+ optimzer_args = optimizer_args(config, model.parameters(), lr_scheduler)
+ optimizer = OptimizerFactory.from_args(optim_type, optimzer_args)
+
+ self.optimizer = optimizer
+ self.lr_scheduler = lr_scheduler
+ logger.info("Setup optimizer/lr_scheduler!")
+
+
+class Wav2Vec2ASRTester(Wav2Vec2ASRTrainer):
+ def __init__(self, config, args):
+ super().__init__(config, args)
+ self.text_featurizer = TextFeaturizer(
+ unit_type=config.unit_type, vocab=config.vocab_filepath)
+ self.vocab_list = self.text_featurizer.vocab_list
+ def id2token(self, texts, texts_len):
+ """ ord() id to chr() chr """
+ trans = []
+ for text, n in zip(texts, texts_len):
+ n = n.numpy().item()
+ ids = text[:n]
+ trans.append(
+ self.text_featurizer.defeaturize(ids.numpy().tolist()))
+ return trans
+
+ def compute_metrics(self,
+ utts,
+ audio,
+ audio_len,
+ texts,
+ texts_len,
+ fout=None):
+ decode_cfg = self.config.decode
+ errors_sum, len_refs, num_ins = 0.0, 0, 0
+ errors_func = error_rate.char_errors if decode_cfg.error_rate_type == 'cer' else error_rate.word_errors
+ error_rate_func = error_rate.cer if decode_cfg.error_rate_type == 'cer' else error_rate.wer
+
+ start_time = time.time()
+ target_transcripts = self.id2token(texts, texts_len)
+ result_transcripts, result_tokenids = self.model.decode(
+ audio,
+ text_feature=self.text_featurizer,
+ decoding_method=decode_cfg.decoding_method,
+ beam_size=decode_cfg.beam_size)
+ decode_time = time.time() - start_time
+
+ for utt, target, result, rec_tids in zip(
+ utts, target_transcripts, result_transcripts, result_tokenids):
+ errors, len_ref = errors_func(target, result)
+ errors_sum += errors
+ len_refs += len_ref
+ num_ins += 1
+ if fout:
+ fout.write({
+ "utt": utt,
+ "refs": [target],
+ "hyps": [result],
+ "hyps_tokenid": [rec_tids],
+ })
+ logger.info(f"Utt: {utt}")
+ logger.info(f"Ref: {target}")
+ logger.info(f"Hyp: {result}")
+ logger.info("One example error rate [%s] = %f" % (
+ decode_cfg.error_rate_type, error_rate_func(target, result)))
+
+ return dict(
+ errors_sum=errors_sum,
+ len_refs=len_refs,
+ num_ins=num_ins, # num examples
+ error_rate=errors_sum / len_refs,
+ error_rate_type=decode_cfg.error_rate_type,
+ num_frames=audio_len.sum().numpy().item(),
+ decode_time=decode_time)
+
+ @mp_tools.rank_zero_only
+ @paddle.no_grad()
+ def test(self):
+ logger.info(f"Test Total Examples: {len(self.test_loader.dataset)}")
+ self.model.eval()
+
+ error_rate_type = None
+ errors_sum, len_refs, num_ins = 0.0, 0, 0
+ num_frames = 0.0
+ num_time = 0.0
+ # Initialized the decoder in model
+ decode_cfg = self.config.decode
+ vocab_list = self.vocab_list
+ decode_batch_size = decode_cfg.decode_batch_size
+
+ with jsonlines.open(self.args.result_file, 'w') as fout:
+ for i, batch in enumerate(self.test_loader):
+ metrics = self.compute_metrics(*batch, fout=fout)
+ num_frames += metrics['num_frames']
+ num_time += metrics["decode_time"]
+ errors_sum += metrics['errors_sum']
+ len_refs += metrics['len_refs']
+ num_ins += metrics['num_ins']
+ error_rate_type = metrics['error_rate_type']
+ rtf = num_time / (num_frames)
+ logger.info(
+ "RTF: %f, Error rate [%s] (%d/?) = %f" %
+ (rtf, error_rate_type, num_ins, errors_sum / len_refs))
+
+ # logging
+ msg = "Test: "
+ msg += "epoch: {}, ".format(self.epoch)
+ msg += "step: {}, ".format(self.iteration)
+ msg += "Final error rate [%s] (%d/%d) = %f" % (
+ error_rate_type, num_ins, num_ins, errors_sum / len_refs)
+ logger.info(msg)
+
+ err_meta_path = os.path.splitext(self.args.result_file)[0] + '.err'
+ err_type_str = "{}".format(error_rate_type)
+ with open(err_meta_path, 'w') as f:
+ data = json.dumps({
+ "epoch":
+ self.epoch,
+ "step":
+ self.iteration,
+ "rtf":
+ rtf,
+ error_rate_type:
+ errors_sum / len_refs,
+ "dataset_hour": (num_frames) / 1000.0 / 3600.0,
+ "process_hour":
+ num_time / 1000.0 / 3600.0,
+ "num_examples":
+ num_ins,
+ "err_sum":
+ errors_sum,
+ "ref_len":
+ len_refs,
+ "decode_method":
+ self.config.decode.decoding_method,
+ })
+ f.write(data + '\n')
\ No newline at end of file
diff --git a/paddlespeech/s2t/models/wav2vec2/__init__.py b/paddlespeech/s2t/models/wav2vec2/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
new file mode 100644
index 000000000..a8f5f5cb1
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
@@ -0,0 +1,45 @@
+"""Vanilla Neural Network for simple tests.
+Authors
+* Elena Rastorgueva 2020
+"""
+import paddle
+from paddlespeech.s2t.models.wav2vec2.modules import containers
+from paddlespeech.s2t.models.wav2vec2.modules import linear
+
+
+class VanillaNN(containers.Sequential):
+ """A simple vanilla Deep Neural Network.
+ Arguments
+ ---------
+ activation : paddle class
+ A class used for constructing the activation layers.
+ dnn_blocks : int
+ The number of linear neural blocks to include.
+ dnn_neurons : int
+ The number of neurons in the linear layers.
+ Example
+ -------
+ >>> inputs = paddle.rand([10, 120, 60])
+ >>> model = VanillaNN(input_shape=inputs.shape)
+ >>> outputs = model(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 120, 512])
+ """
+
+ def __init__(
+ self,
+ input_shape,
+ activation=paddle.nn.LeakyReLU,
+ dnn_blocks=2,
+ dnn_neurons=512,
+ ):
+ super().__init__(input_shape=input_shape)
+
+ for block_index in range(dnn_blocks):
+ self.append(
+ linear.Linear,
+ n_neurons=dnn_neurons,
+ bias=True,
+ layer_name="linear",
+ )
+ self.append(activation(), layer_name="act")
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/activations.py b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
new file mode 100644
index 000000000..9df652c23
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
@@ -0,0 +1,175 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+
+from packaging import version
+from paddle import Tensor, nn
+
+
+from paddlespeech.s2t.utils.log import Log
+logger = Log(__name__).getlog()
+
+
+class NewGELUActivation(nn.Layer):
+ """
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return 0.5 * input * (1.0 + paddle.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * paddle.pow(input, 3.0))))
+
+
+class GELUActivation(nn.Layer):
+ """
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
+ paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))) This is now written in C in nn.functional
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
+ """
+
+ def __init__(self, use_gelu_python: bool = False):
+ super().__init__()
+ self.act = nn.functional.gelu
+
+ def _gelu_python(self, input: Tensor) -> Tensor:
+ return input * 0.5 * (1.0 + paddle.erf(input / math.sqrt(2.0)))
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class FastGELUActivation(nn.Layer):
+ """
+ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return 0.5 * input * (1.0 + paddle.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
+
+
+class QuickGELUActivation(nn.Layer):
+ """
+ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return input * paddle.sigmoid(1.702 * input)
+
+
+class ClippedGELUActivation(nn.Layer):
+ """
+ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
+ it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
+ https://arxiv.org/abs/2004.09602.
+
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
+ initially created.
+
+ For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
+ paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))). See https://arxiv.org/abs/1606.08415
+ """
+
+ def __init__(self, min: float, max: float):
+ if min > max:
+ raise ValueError(f"min should be < max (got min: {min}, max: {max})")
+
+ super().__init__()
+ self.min = min
+ self.max = max
+
+ def forward(self, x: Tensor) -> Tensor:
+ return paddle.clip(gelu(x), self.min, self.max)
+
+
+class SiLUActivation(nn.Layer):
+ """
+ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
+ Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
+ Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
+ Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
+ later.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.act = nn.functional.silu
+
+ def _silu_python(self, input: Tensor) -> Tensor:
+ return input * paddle.sigmoid(input)
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class MishActivation(nn.Layer):
+ """
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.act = nn.functional.mish
+
+ def _mish_python(self, input: Tensor) -> Tensor:
+ return input * paddle.tanh(nn.functional.softplus(input))
+
+ def forward(self, input: Tensor) -> Tensor:
+ return self.act(input)
+
+
+class LinearActivation(nn.Layer):
+ """
+ Applies the linear activation function, i.e. forwarding input directly to output.
+ """
+
+ def forward(self, input: Tensor) -> Tensor:
+ return input
+
+
+ACT2FN = {
+ "gelu": GELUActivation(),
+ "gelu_10": ClippedGELUActivation(-10, 10),
+ "gelu_fast": FastGELUActivation(),
+ "gelu_new": NewGELUActivation(),
+ "gelu_python": GELUActivation(use_gelu_python=True),
+ "linear": LinearActivation(),
+ "mish": MishActivation(),
+ "quick_gelu": QuickGELUActivation(),
+ "relu": nn.ReLU(),
+ "sigmoid": nn.Sigmoid(),
+ "silu": SiLUActivation(),
+ "swish": SiLUActivation(),
+ "tanh": nn.Tanh(),
+}
+
+
+def get_activation(activation_string):
+ if activation_string in ACT2FN:
+ return ACT2FN[activation_string]
+ else:
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
+
+
+# For backwards compatibility with: from activations import gelu_python
+gelu_python = get_activation("gelu_python")
+gelu_new = get_activation("gelu_new")
+gelu = get_activation("gelu")
+gelu_fast = get_activation("gelu_fast")
+quick_gelu = get_activation("quick_gelu")
+silu = get_activation("silu")
+mish = get_activation("mish")
+linear_act = get_activation("linear")
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/containers.py b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
new file mode 100644
index 000000000..2b961a59b
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
@@ -0,0 +1,131 @@
+import paddle
+import inspect
+import logging
+import operator
+import functools
+
+class Sequential(paddle.nn.LayerDict):
+ """A sequence of modules with potentially inferring shape on construction.
+ If layers are passed with names, these can be referenced with dot notation.
+ Arguments
+ ---------
+ input_shape : iterable
+ A list or tuple of ints or None, representing the expected shape of an
+ input tensor. None represents a variable-length dimension. If no
+ ``input_shape`` is passed, no shape inference will be performed.
+ *layers, **named_layers
+ The inputs are treated as a list of layers to be
+ applied in sequence. The output shape of each layer is used to
+ infer the shape of the following layer. If a tuple is returned,
+ only the shape of the first element is used to determine input
+ shape of the next layer (e.g. RNN returns output, hidden).
+ Example
+ -------
+ >>> inputs = paddle.rand(10, 40, 50)
+ >>> model = Sequential(input_shape=inputs.shape)
+ >>> model.append(Linear, n_neurons=100, layer_name="layer1")
+ >>> model.append(Linear, n_neurons=200, layer_name="layer2")
+ >>> outputs = model(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 40, 200])
+ >>> outputs = model.layer1(inputs)
+ >>> outputs.shape
+ paddle.shape([10, 40, 100])
+ """
+
+ def __init__(self, *layers, input_shape=None, **named_layers):
+ super().__init__()
+
+ # Make sure either layers or input_shape is passed
+ if not layers and input_shape is None and not named_layers:
+ raise ValueError("Must pass either layers or input shape")
+
+ # Keep track of what layers need "lengths" passed
+ self.length_layers = []
+
+ # Replace None dimensions with arbitrary value
+ self.input_shape = input_shape
+ if input_shape and None in input_shape:
+ self.input_shape = list(input_shape)
+ for i, dim in enumerate(self.input_shape):
+
+ # To reduce size of dummy tensors, use 1 for batch dim
+ if i == 0 and dim is None:
+ dim = 1
+
+ # Use 64 as nice round arbitrary value, big enough that
+ # halving this dimension a few times doesn't reach 1
+ self.input_shape[i] = dim or 256
+
+ # Append non-named layers
+ for layer in layers:
+ self.append(layer)
+
+ # Append named layers
+ for name, layer in named_layers.items():
+ self.append(layer, layer_name=name)
+
+ def append(self, layer, *args, layer_name=None, **kwargs):
+ """Add a layer to the list of layers, inferring shape if necessary.
+ Arguments
+ ---------
+ layer : A paddle.nn.Module class or object
+ If the layer is a class, it should accept an argument called
+ ``input_shape`` which will be inferred and passed. If the layer
+ is a module object, it is added as-is.
+ layer_name : str
+ The name of the layer, for reference. If the name is in use,
+ ``_{count}`` will be appended.
+ *args, **kwargs
+ These are passed to the layer if it is constructed.
+ """
+
+ # Compute layer_name
+ if layer_name is None:
+ layer_name = str(len(self))
+ elif layer_name in self:
+ index = 0
+ while f"{layer_name}_{index}" in self:
+ index += 1
+ layer_name = f"{layer_name}_{index}"
+ # Check if it needs to be constructed with input shape
+ if self.input_shape:
+ argspec = inspect.getfullargspec(layer)
+ if "input_shape" in argspec.args + argspec.kwonlyargs:
+ input_shape = self.get_output_shape()
+ layer = layer(*args, input_shape=input_shape, **kwargs)
+
+ # Finally, append the layer.
+ try:
+ self[layer_name] = layer
+ # self.add_module(layer_name, layer)
+ except TypeError:
+ raise ValueError(
+ "Must pass `input_shape` at initialization and use "
+ "modules that take `input_shape` to infer shape when "
+ "using `append()`."
+ )
+
+ def get_output_shape(self):
+ """Returns expected shape of the output.
+ Computed by passing dummy input constructed with the
+ ``self.input_shape`` attribute.
+ """
+ with paddle.no_grad():
+ dummy_input = paddle.zeros(self.input_shape)
+ dummy_output = self(dummy_input)
+ return dummy_output.shape
+
+ def forward(self, x):
+ """Applies layers in sequence, passing only the first element of tuples.
+ Arguments
+ ---------
+ x : paddle.Tensor
+ The input tensor to run through the network.
+ """
+ for layer in self.values():
+ x = layer(x)
+ if isinstance(x, tuple):
+ x = x[0]
+
+ return x
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/linear.py b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
new file mode 100644
index 000000000..26389d908
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
@@ -0,0 +1,73 @@
+"""Library implementing linear transformation.
+Authors
+ * Mirco Ravanelli 2020
+ * Davide Borra 2021
+"""
+
+import logging
+import paddle
+import paddle.nn as nn
+from paddlespeech.s2t.modules import align
+
+logger = logging.getLogger(__name__)
+
+
+class Linear(paddle.nn.Layer):
+ """Computes a linear transformation y = wx + b.
+ Arguments
+ ---------
+ n_neurons : int
+ It is the number of output neurons (i.e, the dimensionality of the
+ output).
+ input_shape: tuple
+ It is the shape of the input tensor.
+ input_size: int
+ Size of the input tensor.
+ bias : bool
+ If True, the additive bias b is adopted.
+ combine_dims : bool
+ If True and the input is 4D, combine 3rd and 4th dimensions of input.
+ Example
+ -------
+ >>> inputs = paddle.rand(10, 50, 40)
+ >>> lin_t = Linear(input_shape=(10, 50, 40), n_neurons=100)
+ >>> output = lin_t(inputs)
+ >>> output.shape
+ paddle.shape([10, 50, 100])
+ """
+
+ def __init__(
+ self,
+ n_neurons,
+ input_shape=None,
+ input_size=None,
+ bias=True,
+ combine_dims=False,
+ ):
+ super().__init__()
+ self.combine_dims = combine_dims
+
+ if input_shape is None and input_size is None:
+ raise ValueError("Expected one of input_shape or input_size")
+
+ if input_size is None:
+ input_size = input_shape[-1]
+ if len(input_shape) == 4 and self.combine_dims:
+ input_size = input_shape[2] * input_shape[3]
+
+ # Weights are initialized following paddle approach
+ self.w = align.Linear(input_size, n_neurons, bias_attr=bias)
+
+ def forward(self, x):
+ """Returns the linear transformation of input tensor.
+ Arguments
+ ---------
+ x : paddle.Tensor
+ Input to transform linearly.
+ """
+ if x.rank == 4 and self.combine_dims:
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
+
+ wx = self.w(x)
+
+ return wx
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
new file mode 100644
index 000000000..a5b509b66
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
@@ -0,0 +1,1129 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from dataclasses import dataclass
+from typing import Optional, Tuple
+from collections import OrderedDict
+
+from dataclasses import fields
+import paddle
+
+
+class ModelOutput(OrderedDict):
+ """
+ Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
+ tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
+ python dictionary.
+
+
+
+ You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple
+ before.
+
+
+ """
+
+ def __post_init__(self):
+ class_fields = fields(self)
+
+ # Safety and consistency checks
+ if not len(class_fields):
+ raise ValueError(f"{self.__class__.__name__} has no fields.")
+ if not all(field.default is None for field in class_fields[1:]):
+ raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
+
+ first_field = getattr(self, class_fields[0].name)
+ other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
+
+ if other_fields_are_none and not paddle.is_tensor(first_field):
+ if isinstance(first_field, dict):
+ iterator = first_field.items()
+ first_field_iterator = True
+ else:
+ try:
+ iterator = iter(first_field)
+ first_field_iterator = True
+ except TypeError:
+ first_field_iterator = False
+
+ # if we provided an iterator as first field and the iterator is a (key, value) iterator
+ # set the associated fields
+ if first_field_iterator:
+ for element in iterator:
+ if (
+ not isinstance(element, (list, tuple))
+ or not len(element) == 2
+ or not isinstance(element[0], str)
+ ):
+ break
+ setattr(self, element[0], element[1])
+ if element[1] is not None:
+ self[element[0]] = element[1]
+ elif first_field is not None:
+ self[class_fields[0].name] = first_field
+ else:
+ for field in class_fields:
+ v = getattr(self, field.name)
+ if v is not None:
+ self[field.name] = v
+
+ def __delitem__(self, *args, **kwargs):
+ raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
+
+ def setdefault(self, *args, **kwargs):
+ raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
+
+ def pop(self, *args, **kwargs):
+ raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
+
+ def update(self, *args, **kwargs):
+ raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
+
+ def __getitem__(self, k):
+ if isinstance(k, str):
+ inner_dict = {k: v for (k, v) in self.items()}
+ return inner_dict[k]
+ else:
+ return self.to_tuple()[k]
+
+ def __setattr__(self, name, value):
+ if name in self.keys() and value is not None:
+ # Don't call self.__setitem__ to avoid recursion errors
+ super().__setitem__(name, value)
+ super().__setattr__(name, value)
+
+ def __setitem__(self, key, value):
+ # Will raise a KeyException if needed
+ super().__setitem__(key, value)
+ # Don't call self.__setattr__ to avoid recursion errors
+ super().__setattr__(key, value)
+
+ def to_tuple(self) -> Tuple:
+ """
+ Convert self to a tuple containing all the attributes/keys that are not `None`.
+ """
+ return tuple(self[k] for k in self.keys())
+
+
+@dataclass
+class BaseModelOutput(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithNoAttention(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ last_hidden_state: paddle = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPooling(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) after further processing
+ through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
+ the classification token after processing through a linear layer and a tanh activation function. The linear
+ layer weights are trained from the next sentence prediction (classification) objective during pretraining.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPoolingAndNoAttention(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state after a pooling operation on the spatial dimensions.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPast(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs, with potential hidden states and attentions.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs that also contains a pooling of the last hidden states.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
+ Last layer hidden-state of the first token of the sequence (classification token) after further processing
+ through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
+ the classification token after processing through a linear layer and a tanh activation function. The linear
+ layer weights are trained from the next sentence prediction (classification) objective during pretraining.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ pooler_output: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
+ """
+ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqModelOutput(ModelOutput):
+ """
+ Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
+ decoding.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the decoder of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutput(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class CausalLMOutputWithCrossAttentions(ModelOutput):
+ """
+ Base class for causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Cross attentions weights after the attention softmax, used to compute the weighted average in the
+ cross-attention heads.
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `paddle.Tensor` tuples of length `config.n_layers`, with each tuple containing the cached key,
+ value states of the self-attention and the cross-attention layers if model is used in encoder-decoder
+ setting. Only relevant if `config.is_decoder = True`.
+
+ Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SequenceClassifierOutputWithPast(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class MaskedLMOutput(ModelOutput):
+ """
+ Base class for masked language models outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Masked language modeling (MLM) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqLMOutput(ModelOutput):
+ """
+ Base class for sequence-to-sequence language models outputs.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class NextSentencePredictorOutput(ModelOutput):
+ """
+ Base class for outputs of models predicting if two sentences are consecutive or not.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided):
+ Next sequence prediction (classification) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, 2)`):
+ Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
+ before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqSequenceClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of sequence-to-sequence sentence classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class MultipleChoiceModelOutput(ModelOutput):
+ """
+ Base class for outputs of multiple choice models.
+
+ Args:
+ loss (`paddle.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, num_choices)`):
+ *num_choices* is the second dimension of the input tensors. (see *input_ids* above).
+
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class TokenClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of token classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
+ Classification scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class QuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of question answering models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ start_logits: paddle.Tensor = None
+ end_logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
+ """
+ Base class for outputs of sequence-to-sequence question answering models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
+ start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-start scores (before SoftMax).
+ end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
+ Span-end scores (before SoftMax).
+ past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+ decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
+ decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
+ weighted average in the cross-attention heads.
+ encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder of the model.
+ encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
+ encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
+ self-attention heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ start_logits: paddle.Tensor = None
+ end_logits: paddle.Tensor = None
+ past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
+ decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+ cross_attentions: Optional[Tuple[paddle.Tensor]] = None
+ encoder_last_hidden_state: Optional[paddle.Tensor] = None
+ encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class SemanticSegmenterOutput(ModelOutput):
+ """
+ Base class for outputs of semantic segmentation models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class ImageClassifierOutput(ModelOutput):
+ """
+ Base class for outputs of image classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
+ (also called feature maps) of the model at the output of each stage.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class ImageClassifierOutputWithNoAttention(ModelOutput):
+ """
+ Base class for outputs of image classification models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
+ Classification (or regression if config.num_labels==1) scores (before SoftMax).
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
+ called feature maps) of the model at the output of each stage.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class DepthEstimatorOutput(ModelOutput):
+ """
+ Base class for outputs of depth estimation models.
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification (or regression if config.num_labels==1) loss.
+ predicted_depth (`paddle.Tensor` of shape `(batch_size, height, width)`):
+ Predicted depth for each pixel.
+
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ predicted_depth: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class Wav2Vec2BaseModelOutput(ModelOutput):
+ """
+ Base class for models that have been trained with the Wav2Vec2 loss objective.
+
+ Args:
+ last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ extract_features (`paddle.Tensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):
+ Sequence of extracted feature vectors of the last convolutional layer of the model.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ last_hidden_state: paddle.Tensor = None
+ extract_features: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+
+
+@dataclass
+class XVectorOutput(ModelOutput):
+ """
+ Output type of [`Wav2Vec2ForXVector`].
+
+ Args:
+ loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Classification loss.
+ logits (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
+ Classification hidden states before AMSoftmax.
+ embeddings (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
+ Utterance embeddings used for vector similarity-based retrieval.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ logits: paddle.Tensor = None
+ embeddings: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
new file mode 100644
index 000000000..6988aa6aa
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
@@ -0,0 +1,1131 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Paddle Wav2Vec2 model."""
+
+import math
+import warnings
+import paddle
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import numpy as np
+from paddle import nn
+
+from paddlespeech.s2t.models.wav2vec2.modules.activations import ACT2FN
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import (
+ BaseModelOutput,
+ Wav2Vec2BaseModelOutput,
+ ModelOutput
+)
+import pdb
+
+from paddlespeech.s2t.utils.log import Log
+logger = Log(__name__).getlog()
+
+
+@dataclass
+class Wav2Vec2ForPreTrainingOutput(ModelOutput):
+ """
+ Output type of [`Wav2Vec2ForPreTraining`], with potential hidden states and attentions.
+
+ Args:
+ loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
+ paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
+ projected_states (`paddle.Tensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
+ projected quantized states.
+ projected_quantized_states (`paddle.Tensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
+ Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
+ target vectors for contrastive loss.
+ hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
+ attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ The contrastive loss (L_m) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `paddle.Tensor` of shape `(1,)`):
+ The diversity loss (L_d) as stated in the [official paper](https://arxiv.org/pdf/2006.11477.pdf) .
+ """
+
+ loss: Optional[paddle.Tensor] = None
+ projected_states: paddle.Tensor = None
+ projected_quantized_states: paddle.Tensor = None
+ codevector_perplexity: paddle.Tensor = None
+ hidden_states: Optional[Tuple[paddle.Tensor]] = None
+ attentions: Optional[Tuple[paddle.Tensor]] = None
+ contrastive_loss: Optional[paddle.Tensor] = None
+ diversity_loss: Optional[paddle.Tensor] = None
+
+
+def _compute_mask_indices(
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[paddle.Tensor] = None,
+ min_masks: int = 0,
+) -> np.ndarray:
+ """
+ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
+ ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
+ CPU as part of the preprocessing during training.
+
+ Args:
+ shape: The shape for which to compute masks. This should be of a tuple of size 2 where
+ the first element is the batch size and the second element is the length of the axis to span.
+ mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
+ independently generated mask spans of length `mask_length` is computed by
+ `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
+ actual percentage will be smaller.
+ mask_length: size of the mask
+ min_masks: minimum number of masked spans
+ attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
+ each batch dimension.
+ """
+ batch_size, sequence_length = shape
+
+ if mask_length < 1:
+ raise ValueError("`mask_length` has to be bigger than 0.")
+
+ if mask_length > sequence_length:
+ raise ValueError(
+ f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
+ f" and `sequence_length`: {sequence_length}`"
+ )
+
+ # epsilon is used for probabilistic rounding
+ epsilon = np.random.rand(1).item()
+
+ def compute_num_masked_span(input_length):
+ """Given input length, compute how many spans should be masked"""
+ num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
+ num_masked_span = max(num_masked_span, min_masks)
+
+ # make sure num masked span <= sequence_length
+ if num_masked_span * mask_length > sequence_length:
+ num_masked_span = sequence_length // mask_length
+
+ # make sure num_masked span is also <= input_length - (mask_length - 1)
+ if input_length - (mask_length - 1) < num_masked_span:
+ num_masked_span = max(input_length - (mask_length - 1), 0)
+
+ return num_masked_span
+
+ # compute number of masked spans in batch
+ input_lengths = (
+ attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None
+ else [sequence_length for _ in range(batch_size)]
+ )
+
+ # SpecAugment mask to fill
+ spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
+ spec_aug_mask_idxs = []
+
+ max_num_masked_span = compute_num_masked_span(sequence_length)
+
+ if max_num_masked_span == 0:
+ return spec_aug_mask
+
+ for input_length in input_lengths:
+ # compute num of masked spans for this input
+ num_masked_span = compute_num_masked_span(input_length)
+
+ # get random indices to mask
+ spec_aug_mask_idx = np.random.choice(
+ np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
+ )
+
+ # pick first sampled index that will serve as a dummy index to pad vector
+ # to ensure same dimension for all batches due to probabilistic rounding
+ # Picking first sample just pads those vectors twice.
+ if len(spec_aug_mask_idx) == 0:
+ # this case can only happen if `input_length` is strictly smaller then
+ # `sequence_length` in which case the last token has to be a padding
+ # token which we can use as a dummy mask id
+ dummy_mask_idx = sequence_length - 1
+ else:
+ dummy_mask_idx = spec_aug_mask_idx[0]
+
+ spec_aug_mask_idx = np.concatenate(
+ [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
+ )
+ spec_aug_mask_idxs.append(spec_aug_mask_idx)
+
+ spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
+
+ # expand masked indices to masked spans
+ spec_aug_mask_idxs = np.broadcast_to(
+ spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape((batch_size, max_num_masked_span * mask_length))
+
+ # add offset to the starting indexes so that indexes now create a span
+ offsets = np.arange(mask_length)[None, None, :]
+ offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
+ (batch_size, max_num_masked_span * mask_length)
+ )
+ spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
+
+ # ensure that we cannot have indices larger than sequence_length
+ if spec_aug_mask_idxs.max() > sequence_length - 1:
+ spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+
+ # scatter indices to mask
+ np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
+
+ return spec_aug_mask
+
+
+def _sample_negative_indices(
+ features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
+):
+ """
+ Sample `num_negatives` vectors from feature vectors.
+ """
+ batch_size, sequence_length = features_shape
+
+ # generate indices of the positive vectors themselves, repeat them `num_negatives` times
+ sequence_length_range = np.arange(sequence_length)
+
+ # get `num_negatives` random vector indices from the same utterance
+ sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
+
+ mask_time_indices = (
+ mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)
+ )
+
+ for batch_idx in range(batch_size):
+ high = mask_time_indices[batch_idx].sum() - 1
+ mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
+
+ feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
+ sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
+ # avoid sampling the same positive vector, but keep the distribution uniform
+ sampled_indices[sampled_indices >= feature_indices] += 1
+
+ # remap to actual indices
+ sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
+
+ # correct for batch size
+ sampled_negative_indices[batch_idx] += batch_idx * sequence_length
+
+ return sampled_negative_indices
+
+
+class Wav2Vec2NoLayerNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2LayerNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias,
+ )
+ self.layer_norm = nn.LayerNorm(self.out_conv_dim)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2GroupNormConvLayer(nn.Layer):
+ def __init__(self, config, layer_id=0):
+ super().__init__()
+ self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
+ self.out_conv_dim = config.conv_dim[layer_id]
+
+ self.conv = nn.Conv1D(
+ self.in_conv_dim,
+ self.out_conv_dim,
+ kernel_size=config.conv_kernel[layer_id],
+ stride=config.conv_stride[layer_id],
+ bias_attr=config.conv_bias,
+ )
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim)
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.activation(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2PositionalConvEmbedding(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1D(
+ config.hidden_size,
+ config.hidden_size,
+ kernel_size=config.num_conv_pos_embeddings,
+ padding=config.num_conv_pos_embeddings // 2,
+ groups=config.num_conv_pos_embedding_groups,
+ )
+
+ self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
+
+ self.padding = Wav2Vec2SamePadLayer(config.num_conv_pos_embeddings)
+ self.activation = ACT2FN[config.feat_extract_activation]
+
+ def forward(self, hidden_states):
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ hidden_states = self.conv(hidden_states)
+ hidden_states = self.padding(hidden_states)
+ hidden_states = self.activation(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ return hidden_states
+
+
+class Wav2Vec2SamePadLayer(nn.Layer):
+ def __init__(self, num_conv_pos_embeddings):
+ super().__init__()
+ self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
+
+ def forward(self, hidden_states):
+ if self.num_pad_remove > 0:
+ hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ return hidden_states
+
+
+class Wav2Vec2FeatureEncoder(nn.Layer):
+ """Construct the features from raw audio waveform"""
+
+ def __init__(self, config):
+ super().__init__()
+
+ if config.feat_extract_norm == "group":
+ conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [
+ Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ ]
+ elif config.feat_extract_norm == "layer":
+ conv_layers = [
+ Wav2Vec2LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
+ ]
+ else:
+ raise ValueError(
+ f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
+ )
+ self.conv_layers = nn.LayerList(conv_layers)
+ self.gradient_checkpointing = False
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.trainable = False
+
+ def forward(self, input_values):
+ hidden_states = input_values[:, None]
+ for conv_layer in self.conv_layers:
+ hidden_states = conv_layer(hidden_states)
+
+ return hidden_states
+
+class Wav2Vec2FeatureProjection(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.layer_norm = nn.LayerNorm(config.conv_dim[-1], epsilon=config.layer_norm_eps)
+ self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
+ self.dropout = nn.Dropout(config.feat_proj_dropout)
+
+ def forward(self, hidden_states):
+ # non-projected hidden states are needed for quantization
+ norm_hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.projection(norm_hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states, norm_hidden_states
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Wav2Vec2
+class Wav2Vec2Attention(nn.Layer):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
+
+ def _shape(self, tensor: paddle.Tensor, seq_len: int, bsz: int):
+ return paddle.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)).transpose([0, 2, 1, 3])
+
+ def forward(
+ self,
+ hidden_states: paddle.Tensor,
+ key_value_states: Optional[paddle.Tensor] = None,
+ past_key_value: Optional[Tuple[paddle.Tensor]] = None,
+ attention_mask: Optional[paddle.Tensor] = None,
+ layer_head_mask: Optional[paddle.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[paddle.Tensor, Optional[paddle.Tensor], Optional[Tuple[paddle.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.shape
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = paddle.concat([past_key_value[0], key_states], axis=2)
+ value_states = paddle.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(paddle.Tensor, paddle.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(paddle.Tensor, paddle.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).reshape(proj_shape)
+ key_states = key_states.reshape(proj_shape)
+ value_states = value_states.reshape(proj_shape)
+
+ src_len = key_states.shape[1]
+ attn_weights = paddle.bmm(query_states, key_states.transpose([0, 2, 1]))
+
+
+ if attn_weights.shape != [bsz * self.num_heads, tgt_len, src_len]:
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.shape}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.shape != [bsz, 1, tgt_len, src_len]:
+ raise ValueError(
+ f"Attention mask should be of size {[bsz, 1, tgt_len, src_len]}, but is {attention_mask.shape}"
+ )
+ attn_weights = attn_weights.reshape(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.reshape(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, axis=- 1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.shape != [self.num_heads,]:
+ raise ValueError(
+ f"Head mask for a single layer should be of size {[self.num_heads,]}, but is"
+ f" {layer_head_mask.shape}"
+ )
+ attn_weights = layer_head_mask.reshape((1, -1, 1, 1)) * attn_weights.reshape((bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights.reshape((bsz * self.num_heads, tgt_len, src_len))
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.reshape((bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights_reshaped.reshape((bsz * self.num_heads, tgt_len, src_len))
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = paddle.bmm(attn_probs, value_states)
+
+ if attn_output.shape != [bsz * self.num_heads, tgt_len, self.head_dim]:
+ raise ValueError(
+ f"`attn_output` should be of size {[bsz, self.num_heads, tgt_len, self.head_dim]}, but is"
+ f" {attn_output.shape}"
+ )
+
+ attn_output = attn_output.reshape((bsz, self.num_heads, tgt_len, self.head_dim))
+ attn_output = attn_output.transpose([0, 2, 1, 3])
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned aross GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape((bsz, tgt_len, self.embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class Wav2Vec2FeedForward(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.intermediate_dropout = nn.Dropout(config.activation_dropout)
+
+ self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.output_dropout = nn.Dropout(config.hidden_dropout)
+
+ def forward(self, hidden_states):
+ hidden_states = self.intermediate_dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ hidden_states = self.intermediate_dropout(hidden_states)
+
+ hidden_states = self.output_dense(hidden_states)
+ hidden_states = self.output_dropout(hidden_states)
+ return hidden_states
+
+
+class Wav2Vec2EncoderLayer(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = Wav2Vec2Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.feed_forward = Wav2Vec2FeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = hidden_states + self.feed_forward(hidden_states)
+ hidden_states = self.final_layer_norm(hidden_states)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class Wav2Vec2EncoderLayerStableLayerNorm(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = Wav2Vec2Attention(
+ embed_dim=config.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=False,
+ )
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.feed_forward = Wav2Vec2FeedForward(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ attn_residual = hidden_states
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.attention(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = attn_residual + hidden_states
+ hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class Wav2Vec2Encoder(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.LayerList([Wav2Vec2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens output 0
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * np.iinfo(np.float32).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.layer_norm(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+
+ #deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer:# or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ if self.gradient_checkpointing and self.training:
+ # create gradient checkpointing function
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+ else:
+ layer_outputs = layer(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout)
+ self.layers = nn.LayerList(
+ [Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attentions = () if output_attentions else None
+
+ if attention_mask is not None:
+ # make sure padded tokens are not attended to
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat_interleave(hidden_states.shape[2], axis=2)
+ hidden_states[~expand_attention_mask] = 0
+
+ # extend attention_mask
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = attention_mask * np.iinfo(np.float32).min
+ attention_mask = attention_mask.expand(
+ attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
+ )
+
+ position_embeddings = self.pos_conv_embed(hidden_states)
+ hidden_states = hidden_states + position_embeddings
+ hidden_states = self.dropout(hidden_states)
+
+ for layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = np.random.uniform(0, 1)
+
+ skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer:# or deepspeed_zero3_is_enabled:
+ # under deepspeed zero3 all gpus must run in sync
+ # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
+ if self.gradient_checkpointing and self.training:
+ # create gradient checkpointing function
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs, output_attentions)
+
+ return custom_forward
+ else:
+ layer_outputs = layer(
+ hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
+ )
+ hidden_states = layer_outputs[0]
+
+ if skip_the_layer:
+ layer_outputs = (None, None)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
+ """
+ Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
+ GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.num_groups = config.num_codevector_groups
+ self.num_vars = config.num_codevectors_per_group
+
+ if config.codevector_dim % self.num_groups != 0:
+ raise ValueError(
+ f"`config.codevector_dim {config.codevector_dim} must be divisible "
+ f"by `config.num_codevector_groups` {self.num_groups} for concatenation"
+ )
+
+ # storage for codebook variables (codewords)
+ self.codevectors = paddle.static.create_parameter(
+ shape=[1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups], dtype='float32'
+ )
+ self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
+
+ # can be decayed for training
+ self.temperature = 2
+
+ @staticmethod
+ def _compute_perplexity(probs, mask=None):
+ if mask is not None:
+ mask_extended = mask.flatten()[:, None, None].expand(probs.shape)
+ probs = paddle.where(mask_extended, probs, paddle.zeros_like(probs))
+ marginal_probs = probs.sum(dim=0) / mask.sum()
+ else:
+ marginal_probs = probs.mean(dim=0)
+
+ perplexity = paddle.exp(-paddle.sum(marginal_probs * paddle.log(marginal_probs + 1e-7), dim=-1)).sum()
+ return perplexity
+
+ def forward(self, hidden_states, mask_time_indices=None):
+ batch_size, sequence_length, hidden_size = hidden_states.shape
+
+ # project to codevector dim
+ hidden_states = self.weight_proj(hidden_states)
+ hidden_states = hidden_states.reshape((batch_size * sequence_length * self.num_groups, -1))
+
+ if self.training:
+ # sample code vector probs via gumbel in differentiateable way
+ codevector_probs = nn.functional.gumbel_softmax(
+ hidden_states.float(), tau=self.temperature, hard=True
+ ).type_as(hidden_states)
+
+ # compute perplexity
+ codevector_soft_dist = paddle.softmax(
+ hidden_states.reshape((batch_size * sequence_length, self.num_groups, -1)).float(), axis=-1
+ )
+ perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices)
+ else:
+ # take argmax in non-differentiable way
+ # comptute hard codevector distribution (one hot)
+ codevector_idx = hidden_states.argmax(dim=-1)
+ codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
+ -1, codevector_idx.reshape((-1, 1)), 1.0
+ )
+ codevector_probs = codevector_probs.reshape((batch_size * sequence_length, self.num_groups, -1))
+
+ perplexity = self._compute_perplexity(codevector_probs, mask_time_indices)
+
+ codevector_probs = codevector_probs.reshape((batch_size * sequence_length, -1))
+ # use probs to retrieve codevectors
+ codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
+ codevectors = codevectors_per_group.reshape((batch_size * sequence_length, self.num_groups, self.num_vars, -1))
+ codevectors = codevectors.sum(-2).reshape((batch_size, sequence_length, -1))
+
+ return codevectors, perplexity
+
+
+class Wav2Vec2Adapter(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+
+ # feature dim might need to be down-projected
+ if config.output_hidden_size != config.hidden_size:
+ self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
+ self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)
+ else:
+ self.proj = self.proj_layer_norm = None
+
+ self.layers = nn.LayerList(Wav2Vec2AdapterLayer(config) for _ in range(config.num_adapter_layers))
+ self.layerdrop = config.layerdrop
+
+ def forward(self, hidden_states):
+ # down project hidden_states if necessary
+ if self.proj is not None and self.proj_layer_norm is not None:
+ hidden_states = self.proj(hidden_states)
+ hidden_states = self.proj_layer_norm(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+
+ for layer in self.layers:
+ layerdrop_prob = np.random.random()
+ if not self.training or (layerdrop_prob > self.layerdrop):
+ hidden_states = layer(hidden_states)
+
+ hidden_states = hidden_states.transpose([0, 2, 1])
+ return hidden_states
+
+
+class Wav2Vec2AdapterLayer(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.conv = nn.Conv1D(
+ config.output_hidden_size,
+ 2 * config.output_hidden_size,
+ config.adapter_kernel_size,
+ stride=config.adapter_stride,
+ padding=1,
+ )
+
+ def forward(self, hidden_states):
+ hidden_states = self.conv(hidden_states)
+ hidden_states = nn.functional.glu(hidden_states, axis=1)
+
+ return hidden_states
+
+
+class Wav2Vec2Model(nn.Layer):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.feature_extractor = Wav2Vec2FeatureEncoder(config)
+ self.feature_projection = Wav2Vec2FeatureProjection(config)
+
+ # model only needs masking vector if mask prob is > 0.0
+ if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
+ # self.masked_spec_embed = nn.Parameter(paddle.Tensor(config.hidden_size).uniform_())
+ #self.masked_spec_embed = paddle.uniform([config.hidden_size])
+ self.masked_spec_embed = paddle.static.create_parameter(shape=[config.hidden_size], dtype='float32', default_initializer=paddle.nn.initializer.Uniform(low=0, high=1.0))
+ if config.do_stable_layer_norm:
+ self.encoder = Wav2Vec2EncoderStableLayerNorm(config)
+ else:
+ self.encoder = Wav2Vec2Encoder(config)
+
+ self.adapter = Wav2Vec2Adapter(config) if config.add_adapter else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def freeze_feature_encoder(self):
+ """
+ Calling this function will disable the gradient computation for the feature encoder so that its parameter will
+ not be updated during training.
+ """
+ self.feature_extractor._freeze_parameters()
+
+ def _mask_hidden_states(
+ self,
+ hidden_states: paddle.Tensor,
+ mask_time_indices: Optional[paddle.Tensor] = None,
+ attention_mask: Optional[paddle.Tensor] = None,
+ ):
+ """
+ Masks extracted features along time axis and/or along feature axis according to
+ [SpecAugment](https://arxiv.org/abs/1904.08779).
+ """
+ # `config.apply_spec_augment` can set masking to False
+ if not getattr(self.config, "apply_spec_augment", True):
+ return hidden_states
+
+ # generate indices & apply SpecAugment along time axis
+ batch_size, sequence_length, hidden_size = hidden_states.shape
+ if mask_time_indices is not None:
+ # apply SpecAugment along time axis with given mask_time_indices
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ elif self.config.mask_time_prob > 0 and self.training:
+ mask_time_indices = _compute_mask_indices(
+ (batch_size, sequence_length),
+ mask_prob=self.config.mask_time_prob,
+ mask_length=self.config.mask_time_length,
+ attention_mask=attention_mask,
+ min_masks=self.config.mask_time_min_masks,
+ )
+ mask_time_indices = paddle.to_tensor(mask_time_indices, dtype=paddle.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+
+ if self.config.mask_feature_prob > 0 and self.training:
+ # generate indices & apply SpecAugment along feature axis
+ mask_feature_indices = _compute_mask_indices(
+ (batch_size, hidden_size),
+ mask_prob=self.config.mask_feature_prob,
+ mask_length=self.config.mask_feature_length,
+ min_masks=self.config.mask_feature_min_masks,
+ )
+ mask_feature_indices = paddle.to_tensor(mask_feature_indices, dtype=paddle.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ hidden_states[mask_feature_indices] = 0
+
+ return hidden_states
+
+ def forward(
+ self,
+ input_values: Optional[paddle.Tensor],
+ attention_mask: Optional[paddle.Tensor] = None,
+ mask_time_indices: Optional[paddle.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ extract_features = self.feature_extractor(input_values)
+ extract_features = extract_features.transpose([0, 2, 1])
+
+ if attention_mask is not None:
+ # compute reduced attention_mask corresponding to feature vectors
+ attention_mask = self._get_feature_vector_attention_mask(
+ extract_features.shape[1], attention_mask, add_adapter=False
+ )
+ hidden_states, extract_features = self.feature_projection(extract_features)
+ hidden_states = self._mask_hidden_states(
+ hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
+ )
+
+ encoder_outputs = self.encoder(
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = encoder_outputs[0]
+
+ if self.adapter is not None:
+ hidden_states = self.adapter(hidden_states)
+
+ if not return_dict:
+ return (hidden_states, extract_features) + encoder_outputs[1:]
+
+ return Wav2Vec2BaseModelOutput(
+ last_hidden_state=hidden_states,
+ extract_features=extract_features,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def post_init(self):
+ """
+ A method executed at the end of each Transformer model initialization, to execute code that needs the model's
+ modules properly initialized (such as weight initialization).
+ """
+ # self.init_weights()
+ # self._backward_compatibility_gradient_checkpointing()
+ pass
+
+class Wav2Vec2ConfigPure():
+ model_type = "wav2vec2"
+ def __init__(self, config):
+ self.output_attentions = False
+ self.output_hidden_states = False
+ self.use_return_dict = True
+
+ self.pad_token_id = config.pad_token_id
+ self.bos_token_id = config.bos_token_id
+ self.eos_token_id = config.eos_token_id
+ self.hidden_size = config.hidden_size
+ self.feat_extract_norm = config.feat_extract_norm
+ self.feat_extract_activation = config.feat_extract_activation
+ self.conv_dim = config.conv_dim
+ self.conv_stride = config.conv_stride
+ self.conv_kernel = config.conv_kernel
+ self.conv_bias = config.conv_bias
+ self.num_conv_pos_embeddings = config.num_conv_pos_embeddings
+ self.num_conv_pos_embedding_groups = config.num_conv_pos_embedding_groups
+ self.num_feat_extract_layers = len(self.conv_dim)
+ self.num_hidden_layers = config.num_hidden_layers
+ self.intermediate_size = config.intermediate_size
+ self.hidden_act = config.hidden_act
+ self.num_attention_heads = config.num_attention_heads
+ self.hidden_dropout = config.hidden_dropout
+ self.attention_dropout = config.attention_dropout
+ self.activation_dropout = config.activation_dropout
+ self.feat_proj_dropout = config.feat_proj_dropout
+ self.final_dropout = config.final_dropout
+ self.layerdrop = config.layerdrop
+ self.layer_norm_eps = config.layer_norm_eps
+ self.initializer_range = config.initializer_range
+ self.vocab_size = config.vocab_size
+ self.do_stable_layer_norm = config.do_stable_layer_norm
+ self.use_weighted_layer_sum = config.use_weighted_layer_sum
+
+ if (
+ (len(self.conv_stride) != self.num_feat_extract_layers)
+ or (len(self.conv_kernel) != self.num_feat_extract_layers)
+ or (len(self.conv_dim) != self.num_feat_extract_layers)
+ ):
+ raise ValueError(
+ "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
+ " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
+ f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
+ )
+
+ # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
+ self.apply_spec_augment = config.apply_spec_augment
+ self.mask_time_prob = config.mask_time_prob
+ self.mask_time_length = config.mask_time_length
+ self.mask_time_min_masks = config.mask_time_min_masks
+ self.mask_feature_prob = config.mask_feature_prob
+ self.mask_feature_length = config.mask_feature_length
+ self.mask_feature_min_masks = config.mask_feature_min_masks
+
+ # parameters for pretraining with codevector quantized representations
+ self.num_codevectors_per_group = config.num_codevectors_per_group
+ self.num_codevector_groups = config.num_codevector_groups
+ self.contrastive_logits_temperature = config.contrastive_logits_temperature
+ self.feat_quantizer_dropout = config.feat_quantizer_dropout
+ self.num_negatives = config.num_negatives
+ self.codevector_dim = config.codevector_dim
+ self.proj_codevector_dim = config.proj_codevector_dim
+ self.diversity_loss_weight = config.diversity_loss_weight
+
+ # ctc loss
+ self.ctc_loss_reduction = config.ctc_loss_reduction
+ self.ctc_zero_infinity = config.ctc_zero_infinity
+
+ # adapter
+ self.add_adapter = config.add_adapter
+ self.adapter_kernel_size = config.adapter_kernel_size
+ self.adapter_stride = config.adapter_stride
+ self.num_adapter_layers = config.num_adapter_layers
+ self.output_hidden_size = config.output_hidden_size or config.hidden_size
+
+ @property
+ def inputs_to_logits_ratio(self):
+ return functools.reduce(operator.mul, self.conv_stride, 1)
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
new file mode 100644
index 000000000..8eb9b4adf
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
@@ -0,0 +1,242 @@
+"""
+Low level signal processing utilities
+Authors
+ * Peter Plantinga 2020
+ * Francois Grondin 2020
+ * William Aris 2020
+ * Samuele Cornell 2020
+ * Sarthak Yadav 2022
+"""
+import paddle
+import math
+from packaging import version
+import numpy as np
+
+
+def blackman_window(window_length, periodic=True):
+ """Blackman window function.
+ Arguments
+ ---------
+ window_length : int
+ Controlling the returned window size.
+ periodic : bool
+ Determines whether the returned window trims off the
+ last duplicate value from the symmetric window
+
+ Returns
+ -------
+ A 1-D tensor of size (window_length) containing the window
+ """
+ if window_length == 0:
+ return []
+ if window_length == 1:
+ return paddle.ones([1])
+ if periodic:
+ window_length += 1
+ window = paddle.arange(window_length) * (np.pi / (window_length - 1))
+ window = 0.08 * paddle.cos(window * 4) - 0.5 * paddle.cos(window * 2) + 0.42
+ return window[:-1] if periodic else window
+
+
+def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
+ """Compute amplitude of a batch of waveforms.
+ Arguments
+ ---------
+ waveform : tensor
+ The waveforms used for computing amplitude.
+ Shape should be `[time]` or `[batch, time]` or
+ `[batch, time, channels]`.
+ lengths : tensor
+ The lengths of the waveforms excluding the padding.
+ Shape should be a single dimension, `[batch]`.
+ amp_type : str
+ Whether to compute "avg" average or "peak" amplitude.
+ Choose between ["avg", "peak"].
+ scale : str
+ Whether to compute amplitude in "dB" or "linear" scale.
+ Choose between ["linear", "dB"].
+ Returns
+ -------
+ The average amplitude of the waveforms.
+ Example
+ -------
+ >>> signal = paddle.sin(paddle.arange(16000.0)).unsqueeze(0)
+ >>> compute_amplitude(signal, signal.size(1))
+ tensor([[0.6366]])
+ """
+ if len(waveforms.shape) == 1:
+ waveforms = waveforms.unsqueeze(0)
+
+ assert amp_type in ["avg", "peak"]
+ assert scale in ["linear", "dB"]
+
+ if amp_type == "avg":
+ if lengths is None:
+ out = paddle.mean(paddle.abs(waveforms), axis=1, keepdim=True)
+ else:
+ wav_sum = paddle.sum(paddle.abs(waveforms), axis=1, keepdim=True)
+ out = wav_sum / lengths
+ elif amp_type == "peak":
+ out = paddle.max(paddle.abs(waveforms), axis=1, keepdim=True)[0]
+ else:
+ raise NotImplementedError
+
+ if scale == "linear":
+ return out
+ elif scale == "dB":
+ return paddle.clip(20 * paddle.log10(out), min=-80) # clamp zeros
+ else:
+ raise NotImplementedError
+
+
+def convolve1d(
+ waveform,
+ kernel,
+ padding=0,
+ pad_type="constant",
+ stride=1,
+ groups=1,
+ use_fft=False,
+ rotation_index=0,
+):
+ """Use paddle.nn.functional to perform 1d padding and conv.
+ Arguments
+ ---------
+ waveform : tensor
+ The tensor to perform operations on.
+ kernel : tensor
+ The filter to apply during convolution.
+ padding : int or tuple
+ The padding (pad_left, pad_right) to apply.
+ If an integer is passed instead, this is passed
+ to the conv1d function and pad_type is ignored.
+ pad_type : str
+ The type of padding to use. Passed directly to
+ `paddle.nn.functional.pad`, see Paddle documentation
+ for available options.
+ stride : int
+ The number of units to move each time convolution is applied.
+ Passed to conv1d. Has no effect if `use_fft` is True.
+ groups : int
+ This option is passed to `conv1d` to split the input into groups for
+ convolution. Input channels should be divisible by the number of groups.
+ use_fft : bool
+ When `use_fft` is passed `True`, then compute the convolution in the
+ spectral domain using complex multiply. This is more efficient on CPU
+ when the size of the kernel is large (e.g. reverberation). WARNING:
+ Without padding, circular convolution occurs. This makes little
+ difference in the case of reverberation, but may make more difference
+ with different kernels.
+ rotation_index : int
+ This option only applies if `use_fft` is true. If so, the kernel is
+ rolled by this amount before convolution to shift the output location.
+ Returns
+ -------
+ The convolved waveform.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> signal = signal.unsqueeze(0).unsqueeze(2)
+ >>> kernel = paddle.rand([1, 10, 1])
+ >>> signal = convolve1d(signal, kernel, padding=(9, 0))
+ """
+ if len(waveform.shape) != 3:
+ raise ValueError("Convolve1D expects a 3-dimensional tensor")
+
+ # Move time dimension last, which pad and fft and conv expect.
+ waveform = waveform.transpose([0, 2, 1])
+ kernel = kernel.transpose([0, 2, 1])
+ # Padding can be a tuple (left_pad, right_pad) or an int
+ if isinstance(padding, tuple):
+ waveform = paddle.nn.functional.pad(
+ x=waveform, pad=padding, mode=pad_type, data_format='NCL'
+ )
+
+ # This approach uses FFT, which is more efficient if the kernel is large
+ if use_fft:
+ # Pad kernel to same length as signal, ensuring correct alignment
+ zero_length = waveform.shape[-1] - kernel.shape[-1]
+
+ # Handle case where signal is shorter
+ if zero_length < 0:
+ kernel = kernel[..., :zero_length]
+ zero_length = 0
+
+ # Perform rotation to ensure alignment
+ zeros = paddle.zeros(
+ [kernel.shape[0], kernel.shape[1], zero_length],
+ dtype=kernel.dtype
+ )
+ after_index = kernel[..., rotation_index:]
+ before_index = kernel[..., :rotation_index]
+ kernel = paddle.concat((after_index, zeros, before_index), axis=-1)
+
+ # Multiply in frequency domain to convolve in time domain
+ import paddle.fft as fft
+
+ result = fft.rfft(waveform) * fft.rfft(kernel)
+ convolved = fft.irfft(result, n=waveform.shape[-1])
+
+ # Use the implementation given by paddle, which should be efficient on GPU
+ else:
+ convolved = paddle.nn.functional.conv1d(
+ x=waveform,
+ weight=kernel,
+ stride=stride,
+ groups=groups,
+ padding=padding if not isinstance(padding, tuple) else 0,
+ )
+
+ # Return time dimension to the second dimension.
+ return convolved.transpose([0, 2, 1])
+
+def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
+ """Returns a notch filter constructed from a high-pass and low-pass filter.
+ (from https://tomroelandts.com/articles/
+ how-to-create-simple-band-pass-and-band-reject-filters)
+ Arguments
+ ---------
+ notch_freq : float
+ frequency to put notch as a fraction of the
+ sampling rate / 2. The range of possible inputs is 0 to 1.
+ filter_width : int
+ Filter width in samples. Longer filters have
+ smaller transition bands, but are more inefficient.
+ notch_width : float
+ Width of the notch, as a fraction of the sampling_rate / 2.
+ """
+
+ # Check inputs
+ assert 0 < notch_freq <= 1
+ assert filter_width % 2 != 0
+ pad = filter_width // 2
+ inputs = paddle.arange(filter_width) - pad
+
+ # Avoid frequencies that are too low
+ notch_freq += notch_width
+
+ # Define sinc function, avoiding division by zero
+ def sinc(x):
+ "Computes the sinc function."
+
+ def _sinc(x):
+ return paddle.sin(x) / x
+
+ # The zero is at the middle index
+ return paddle.concat([_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1 :])])
+
+ # Compute a low-pass filter with cutoff frequency notch_freq.
+ hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
+ hlpf *= blackman_window(filter_width)
+ hlpf /= paddle.sum(hlpf)
+
+ # Compute a high-pass filter with cutoff frequency notch_freq.
+ hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
+ hhpf *= blackman_window(filter_width)
+ hhpf /= -paddle.sum(hhpf)
+ hhpf[pad] += 1
+
+ # Adding filters creates notch filter
+ return (hlpf + hhpf).view(1, -1, 1)
+
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
new file mode 100644
index 000000000..f67121ede
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
@@ -0,0 +1,727 @@
+import math
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import (
+ compute_amplitude,
+ convolve1d,
+ notch_filter)
+
+class SpeedPerturb(nn.Layer):
+ """Slightly speed up or slow down an audio signal.
+ Resample the audio signal at a rate that is similar to the original rate,
+ to achieve a slightly slower or slightly faster signal. This technique is
+ outlined in the paper: "Audio Augmentation for Speech Recognition"
+ Arguments
+ ---------
+ orig_freq : int
+ The frequency of the original signal.
+ speeds : list
+ The speeds that the signal should be changed to, as a percentage of the
+ original signal (i.e. `speeds` is divided by 100 to get a ratio).
+ perturb_prob : float
+ The chance that the batch will be speed-
+ perturbed. By default, every batch is perturbed.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> perturbator = SpeedPerturb(orig_freq=16000, speeds=[90])
+ >>> clean = signal.unsqueeze(0)
+ >>> perturbed = perturbator(clean)
+ >>> clean.shape
+ paddle.shape([1, 52173])
+ >>> perturbed.shape
+ paddle.shape([1, 46956])
+ """
+
+ def __init__(
+ self, orig_freq, speeds=[90, 100, 110], perturb_prob=1.0,
+ ):
+ super().__init__()
+ self.orig_freq = orig_freq
+ self.speeds = speeds
+ self.perturb_prob = perturb_prob
+
+ # Initialize index of perturbation
+ self.samp_index = 0
+
+ # Initialize resamplers
+ self.resamplers = []
+ for speed in self.speeds:
+ config = {
+ "orig_freq": self.orig_freq,
+ "new_freq": self.orig_freq * speed // 100,
+ }
+ self.resamplers.append(Resample(**config))
+
+ def forward(self, waveform):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ # Don't perturb (return early) 1-`perturb_prob` portion of the batches
+ if paddle.rand([1]) > self.perturb_prob:
+
+ return waveform.clone()
+ # Perform a random perturbation
+ self.samp_index = paddle.randint(len(self.speeds), shape=(1,))[0]
+ perturbed_waveform = self.resamplers[self.samp_index](waveform)
+
+ return perturbed_waveform
+
+class Resample(nn.Layer):
+ """This class resamples an audio signal using sinc-based interpolation.
+
+ It is a modification of the `resample` function from torchaudio
+ (https://pytorch.org/audio/stable/tutorials/audio_resampling_tutorial.html)
+
+ Arguments
+ ---------
+ orig_freq : int
+ the sampling frequency of the input signal.
+ new_freq : int
+ the new sampling frequency after this operation is performed.
+ lowpass_filter_width : int
+ Controls the sharpness of the filter, larger numbers result in a
+ sharper filter, but they are less efficient. Values from 4 to 10 are allowed.
+ """
+ def __init__(
+ self, orig_freq=16000, new_freq=16000, lowpass_filter_width=6,
+ ):
+ super().__init__()
+ self.orig_freq = orig_freq
+ self.new_freq = new_freq
+ self.lowpass_filter_width = lowpass_filter_width
+
+ # Compute rate for striding
+ self._compute_strides()
+ assert self.orig_freq % self.conv_stride == 0
+ assert self.new_freq % self.conv_transpose_stride == 0
+
+ def _compute_strides(self):
+ """Compute the phases in polyphase filter.
+
+ (almost directly from torchaudio.compliance.kaldi)
+ """
+
+ # Compute new unit based on ratio of in/out frequencies
+ base_freq = math.gcd(self.orig_freq, self.new_freq)
+ input_samples_in_unit = self.orig_freq // base_freq
+ self.output_samples = self.new_freq // base_freq
+
+ # Store the appropriate stride based on the new units
+ self.conv_stride = input_samples_in_unit
+ self.conv_transpose_stride = self.output_samples
+
+ def forward(self, waveforms):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ if not hasattr(self, "first_indices"):
+ self._indices_and_weights(waveforms)
+
+ # Don't do anything if the frequencies are the same
+ if self.orig_freq == self.new_freq:
+ return waveforms
+ unsqueezed = False
+ if len(waveforms.shape) == 2:
+ waveforms = waveforms.unsqueeze(1)
+ unsqueezed = True
+ elif len(waveforms.shape) == 3:
+ waveforms = waveforms.transpose([0, 2, 1])
+ else:
+ raise ValueError("Input must be 2 or 3 dimensions")
+
+ # Do resampling
+ resampled_waveform = self._perform_resample(waveforms)
+
+ if unsqueezed:
+ resampled_waveform = resampled_waveform.squeeze(1)
+ else:
+ resampled_waveform = resampled_waveform.transpose([0, 2, 1])
+
+ return resampled_waveform
+
+ def _perform_resample(self, waveforms):
+ """Resamples the waveform at the new frequency.
+
+ This matches Kaldi's OfflineFeatureTpl ResampleWaveform which uses a
+ LinearResample (resample a signal at linearly spaced intervals to
+ up/downsample a signal). LinearResample (LR) means that the output
+ signal is at linearly spaced intervals (i.e the output signal has a
+ frequency of `new_freq`). It uses sinc/bandlimited interpolation to
+ upsample/downsample the signal.
+
+ (almost directly from torchaudio.compliance.kaldi)
+
+ https://ccrma.stanford.edu/~jos/resample/
+ Theory_Ideal_Bandlimited_Interpolation.html
+
+ https://github.com/kaldi-asr/kaldi/blob/master/src/feat/resample.h#L56
+
+ Arguments
+ ---------
+ waveforms : tensor
+ The batch of audio signals to resample.
+
+ Returns
+ -------
+ The waveforms at the new frequency.
+ """
+
+ # Compute output size and initialize
+ batch_size, num_channels, wave_len = waveforms.shape
+ window_size = self.weights.shape[1]
+ tot_output_samp = self._output_samples(wave_len)
+ resampled_waveform = paddle.zeros(
+ (batch_size, num_channels, tot_output_samp)
+ )
+ # self.weights = self.weights.to(waveforms.device)
+
+ # Check weights are on correct device
+ # if waveforms.device != self.weights.device:
+ # self.weights = self.weights.to(waveforms.device)
+
+ # eye size: (num_channels, num_channels, 1)
+ eye = paddle.eye(num_channels).unsqueeze(2)
+
+ # Iterate over the phases in the polyphase filter
+ for i in range(self.first_indices.shape[0]):
+ wave_to_conv = waveforms
+ first_index = int(self.first_indices[i].item())
+ if first_index >= 0:
+ # trim the signal as the filter will not be applied
+ # before the first_index
+ wave_to_conv = wave_to_conv[..., first_index:]
+
+ # pad the right of the signal to allow partial convolutions
+ # meaning compute values for partial windows (e.g. end of the
+ # window is outside the signal length)
+ max_index = (tot_output_samp - 1) // self.output_samples
+ end_index = max_index * self.conv_stride + window_size
+ current_wave_len = wave_len - first_index
+ right_padding = max(0, end_index + 1 - current_wave_len)
+ left_padding = max(0, -first_index)
+ wave_to_conv = paddle.nn.functional.pad(
+ wave_to_conv, (left_padding, right_padding), data_format='NCL'
+ )
+ conv_wave = paddle.nn.functional.conv1d(
+ x=wave_to_conv,
+ weight=self.weights[i].repeat(num_channels, 1, 1),
+ stride=self.conv_stride,
+ groups=num_channels,
+ )
+
+ # we want conv_wave[:, i] to be at
+ # output[:, i + n*conv_transpose_stride]
+ dilated_conv_wave = paddle.nn.functional.conv1d_transpose(
+ conv_wave, eye, stride=self.conv_transpose_stride
+ )
+
+ # pad dilated_conv_wave so it reaches the output length if needed.
+ left_padding = i
+ previous_padding = left_padding + dilated_conv_wave.shape[-1]
+ right_padding = max(0, tot_output_samp - previous_padding)
+ dilated_conv_wave = paddle.nn.functional.pad(
+ dilated_conv_wave, (left_padding, right_padding), data_format='NCL'
+ )
+ dilated_conv_wave = dilated_conv_wave[..., :tot_output_samp]
+
+ resampled_waveform += dilated_conv_wave
+
+ return resampled_waveform
+
+ def _output_samples(self, input_num_samp):
+ """Based on LinearResample::GetNumOutputSamples.
+
+ LinearResample (LR) means that the output signal is at
+ linearly spaced intervals (i.e the output signal has a
+ frequency of ``new_freq``). It uses sinc/bandlimited
+ interpolation to upsample/downsample the signal.
+
+ (almost directly from torchaudio.compliance.kaldi)
+
+ Arguments
+ ---------
+ input_num_samp : int
+ The number of samples in each example in the batch.
+
+ Returns
+ -------
+ Number of samples in the output waveform.
+ """
+
+ # For exact computation, we measure time in "ticks" of 1.0 / tick_freq,
+ # where tick_freq is the least common multiple of samp_in and
+ # samp_out.
+ samp_in = int(self.orig_freq)
+ samp_out = int(self.new_freq)
+
+ tick_freq = abs(samp_in * samp_out) // math.gcd(samp_in, samp_out)
+ ticks_per_input_period = tick_freq // samp_in
+
+ # work out the number of ticks in the time interval
+ # [ 0, input_num_samp/samp_in ).
+ interval_length = input_num_samp * ticks_per_input_period
+ if interval_length <= 0:
+ return 0
+ ticks_per_output_period = tick_freq // samp_out
+
+ # Get the last output-sample in the closed interval,
+ # i.e. replacing [ ) with [ ]. Note: integer division rounds down.
+ # See http://en.wikipedia.org/wiki/Interval_(mathematics) for an
+ # explanation of the notation.
+ last_output_samp = interval_length // ticks_per_output_period
+
+ # We need the last output-sample in the open interval, so if it
+ # takes us to the end of the interval exactly, subtract one.
+ if last_output_samp * ticks_per_output_period == interval_length:
+ last_output_samp -= 1
+
+ # First output-sample index is zero, so the number of output samples
+ # is the last output-sample plus one.
+ num_output_samp = last_output_samp + 1
+
+ return num_output_samp
+
+ def _indices_and_weights(self, waveforms):
+ """Based on LinearResample::SetIndexesAndWeights
+
+ Retrieves the weights for resampling as well as the indices in which
+ they are valid. LinearResample (LR) means that the output signal is at
+ linearly spaced intervals (i.e the output signal has a frequency
+ of ``new_freq``). It uses sinc/bandlimited interpolation to
+ upsample/downsample the signal.
+
+ Returns
+ -------
+ - the place where each filter should start being applied
+ - the filters to be applied to the signal for resampling
+ """
+
+ # Lowpass filter frequency depends on smaller of two frequencies
+ min_freq = min(self.orig_freq, self.new_freq)
+ lowpass_cutoff = 0.99 * 0.5 * min_freq
+
+ assert lowpass_cutoff * 2 <= min_freq
+ window_width = self.lowpass_filter_width / (2.0 * lowpass_cutoff)
+
+ assert lowpass_cutoff < min(self.orig_freq, self.new_freq) / 2
+ output_t = paddle.arange(
+ start=0.0, end=self.output_samples
+ )
+ output_t /= self.new_freq
+ min_t = output_t - window_width
+ max_t = output_t + window_width
+
+ min_input_index = paddle.ceil(min_t * self.orig_freq)
+ max_input_index = paddle.floor(max_t * self.orig_freq)
+ num_indices = max_input_index - min_input_index + 1
+
+ max_weight_width = num_indices.max()
+ j = paddle.arange(max_weight_width)
+ input_index = min_input_index.unsqueeze(1) + j.unsqueeze(0)
+ delta_t = (input_index / self.orig_freq) - output_t.unsqueeze(1)
+
+ weights = paddle.zeros_like(delta_t)
+
+ inside_window_indices = delta_t.abs() < (window_width)
+ # raised-cosine (Hanning) window with width `window_width`
+ weights[inside_window_indices] = 0.5 * (
+ 1
+ + paddle.cos(
+ 2
+ * math.pi
+ * lowpass_cutoff
+ / self.lowpass_filter_width
+ * delta_t[inside_window_indices]
+ )
+ )
+ t_eq_zero_indices = delta_t == 0.0
+ t_not_eq_zero_indices = ~t_eq_zero_indices
+
+ # sinc filter function
+ weights[t_not_eq_zero_indices] *= paddle.sin(
+ 2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]
+ ) / (math.pi * delta_t[t_not_eq_zero_indices])
+
+ # limit of the function at t = 0
+ weights[t_eq_zero_indices] *= 2 * lowpass_cutoff
+
+ # size (output_samples, max_weight_width)
+ weights /= self.orig_freq
+
+ self.first_indices = min_input_index
+ self.weights = weights
+
+
+class DropFreq(nn.Layer):
+ """This class drops a random frequency from the signal.
+ The purpose of this class is to teach models to learn to rely on all parts
+ of the signal, not just a few frequency bands.
+ Arguments
+ ---------
+ drop_freq_low : float
+ The low end of frequencies that can be dropped,
+ as a fraction of the sampling rate / 2.
+ drop_freq_high : float
+ The high end of frequencies that can be
+ dropped, as a fraction of the sampling rate / 2.
+ drop_count_low : int
+ The low end of number of frequencies that could be dropped.
+ drop_count_high : int
+ The high end of number of frequencies that could be dropped.
+ drop_width : float
+ The width of the frequency band to drop, as
+ a fraction of the sampling_rate / 2.
+ drop_prob : float
+ The probability that the batch of signals will have a frequency
+ dropped. By default, every batch has frequencies dropped.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> dropper = DropFreq()
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> dropped_signal = dropper(signal.unsqueeze(0))
+ """
+
+ def __init__(
+ self,
+ drop_freq_low=1e-14,
+ drop_freq_high=1,
+ drop_count_low=1,
+ drop_count_high=2,
+ drop_width=0.05,
+ drop_prob=1,
+ ):
+ super().__init__()
+ self.drop_freq_low = drop_freq_low
+ self.drop_freq_high = drop_freq_high
+ self.drop_count_low = drop_count_low
+ self.drop_count_high = drop_count_high
+ self.drop_width = drop_width
+ self.drop_prob = drop_prob
+
+ def forward(self, waveforms):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or `[batch, time, channels]`.
+ """
+
+ # Don't drop (return early) 1-`drop_prob` portion of the batches
+ dropped_waveform = waveforms.clone()
+ if paddle.rand([1]) > self.drop_prob:
+ return dropped_waveform
+
+ # Add channels dimension
+ if len(waveforms.shape) == 2:
+ dropped_waveform = dropped_waveform.unsqueeze(-1)
+
+ # Pick number of frequencies to drop
+ drop_count = paddle.randint(
+ low=self.drop_count_low, high=self.drop_count_high + 1, shape=(1,),
+ )
+
+ # Pick a frequency to drop
+ drop_range = self.drop_freq_high - self.drop_freq_low
+ drop_frequency = (
+ paddle.rand(drop_count) * drop_range + self.drop_freq_low
+ )
+ # Filter parameters
+ filter_length = 101
+ pad = filter_length // 2
+
+ # Start with delta function
+ drop_filter = paddle.zeros([1, filter_length, 1])
+ drop_filter[0, pad, 0] = 1
+ # Subtract each frequency
+ for frequency in drop_frequency:
+ notch_kernel = notch_filter(
+ frequency, filter_length, self.drop_width,
+ )
+ drop_filter = convolve1d(drop_filter, notch_kernel, pad)
+
+ # Apply filter
+ dropped_waveform = convolve1d(dropped_waveform, drop_filter, pad)
+
+ # Remove channels dimension if added
+ return dropped_waveform.squeeze(-1)
+
+class DropChunk(nn.Layer):
+ """This class drops portions of the input signal.
+ Using `DropChunk` as an augmentation strategy helps a models learn to rely
+ on all parts of the signal, since it can't expect a given part to be
+ present.
+ Arguments
+ ---------
+ drop_length_low : int
+ The low end of lengths for which to set the
+ signal to zero, in samples.
+ drop_length_high : int
+ The high end of lengths for which to set the
+ signal to zero, in samples.
+ drop_count_low : int
+ The low end of number of times that the signal
+ can be dropped to zero.
+ drop_count_high : int
+ The high end of number of times that the signal
+ can be dropped to zero.
+ drop_start : int
+ The first index for which dropping will be allowed.
+ drop_end : int
+ The last index for which dropping will be allowed.
+ drop_prob : float
+ The probability that the batch of signals will
+ have a portion dropped. By default, every batch
+ has portions dropped.
+ noise_factor : float
+ The factor relative to average amplitude of an utterance
+ to use for scaling the white noise inserted. 1 keeps
+ the average amplitude the same, while 0 inserts all 0's.
+ Example
+ -------
+ >>> from speechbrain.dataio.dataio import read_audio
+ >>> dropper = DropChunk(drop_start=100, drop_end=200, noise_factor=0.)
+ >>> signal = read_audio('tests/samples/single-mic/example1.wav')
+ >>> signal = signal.unsqueeze(0) # [batch, time, channels]
+ >>> length = paddle.ones([1])
+ >>> dropped_signal = dropper(signal, length)
+ >>> float(dropped_signal[:, 150])
+ 0.0
+ """
+
+ def __init__(
+ self,
+ drop_length_low=100,
+ drop_length_high=1000,
+ drop_count_low=1,
+ drop_count_high=10,
+ drop_start=0,
+ drop_end=None,
+ drop_prob=1,
+ noise_factor=0.0,
+ ):
+ super().__init__()
+ self.drop_length_low = drop_length_low
+ self.drop_length_high = drop_length_high
+ self.drop_count_low = drop_count_low
+ self.drop_count_high = drop_count_high
+ self.drop_start = drop_start
+ self.drop_end = drop_end
+ self.drop_prob = drop_prob
+ self.noise_factor = noise_factor
+
+ # Validate low < high
+ if drop_length_low > drop_length_high:
+ raise ValueError("Low limit must not be more than high limit")
+ if drop_count_low > drop_count_high:
+ raise ValueError("Low limit must not be more than high limit")
+
+ # Make sure the length doesn't exceed end - start
+ if drop_end is not None and drop_end >= 0:
+ if drop_start > drop_end:
+ raise ValueError("Low limit must not be more than high limit")
+
+ drop_range = drop_end - drop_start
+ self.drop_length_low = min(drop_length_low, drop_range)
+ self.drop_length_high = min(drop_length_high, drop_range)
+
+ def forward(self, waveforms, lengths):
+ """
+ Arguments
+ ---------
+ waveforms : tensor
+ Shape should be `[batch, time]` or `[batch, time, channels]`.
+ lengths : tensor
+ Shape should be a single dimension, `[batch]`.
+ Returns
+ -------
+ Tensor of shape `[batch, time]` or
+ `[batch, time, channels]`
+ """
+
+ # Reading input list
+ lengths = (lengths * waveforms.shape[1]).long()
+ batch_size = waveforms.shape[0]
+ dropped_waveform = waveforms.clone()
+
+ # Don't drop (return early) 1-`drop_prob` portion of the batches
+ if paddle.rand([1]) > self.drop_prob:
+ return dropped_waveform
+
+ # Store original amplitude for computing white noise amplitude
+ clean_amplitude = compute_amplitude(waveforms, lengths.unsqueeze(1))
+
+ # Pick a number of times to drop
+ drop_times = paddle.randint(
+ low=self.drop_count_low,
+ high=self.drop_count_high + 1,
+ shape=(batch_size,),
+ )
+
+ # Iterate batch to set mask
+ for i in range(batch_size):
+ if drop_times[i] == 0:
+ continue
+
+ # Pick lengths
+ length = paddle.randint(
+ low=self.drop_length_low,
+ high=self.drop_length_high + 1,
+ shape=(drop_times[i],),
+ )
+
+ # Compute range of starting locations
+ start_min = self.drop_start
+ if start_min < 0:
+ start_min += lengths[i]
+ start_max = self.drop_end
+ if start_max is None:
+ start_max = lengths[i]
+ if start_max < 0:
+ start_max += lengths[i]
+ start_max = max(0, start_max - length.max())
+
+ # Pick starting locations
+ start = paddle.randint(
+ low=start_min, high=start_max + 1, shape=(drop_times[i],),
+ )
+
+ end = start + length
+
+ # Update waveform
+ if not self.noise_factor:
+ for j in range(drop_times[i]):
+ dropped_waveform[i, start[j] : end[j]] = 0.0
+ else:
+ # Uniform distribution of -2 to +2 * avg amplitude should
+ # preserve the average for normalization
+ noise_max = 2 * clean_amplitude[i] * self.noise_factor
+ for j in range(drop_times[i]):
+ # zero-center the noise distribution
+ noise_vec = paddle.rand([length[j]])
+ noise_vec = 2 * noise_max * noise_vec - noise_max
+ dropped_waveform[i, start[j] : end[j]] = noise_vec
+
+ return dropped_waveform
+
+
+class TimeDomainSpecAugment(nn.Layer):
+ """A time-domain approximation of the SpecAugment algorithm.
+
+ This augmentation module implements three augmentations in
+ the time-domain.
+
+ 1. Drop chunks of the audio (zero amplitude or white noise)
+ 2. Drop frequency bands (with band-drop filters)
+ 3. Speed peturbation (via resampling to slightly different rate)
+
+ Arguments
+ ---------
+ perturb_prob : float from 0 to 1
+ The probability that a batch will have speed perturbation applied.
+ drop_freq_prob : float from 0 to 1
+ The probability that a batch will have frequencies dropped.
+ drop_chunk_prob : float from 0 to 1
+ The probability that a batch will have chunks dropped.
+ speeds : list of ints
+ A set of different speeds to use to perturb each batch.
+ See ``speechbrain.processing.speech_augmentation.SpeedPerturb``
+ sample_rate : int
+ Sampling rate of the input waveforms.
+ drop_freq_count_low : int
+ Lowest number of frequencies that could be dropped.
+ drop_freq_count_high : int
+ Highest number of frequencies that could be dropped.
+ drop_chunk_count_low : int
+ Lowest number of chunks that could be dropped.
+ drop_chunk_count_high : int
+ Highest number of chunks that could be dropped.
+ drop_chunk_length_low : int
+ Lowest length of chunks that could be dropped.
+ drop_chunk_length_high : int
+ Highest length of chunks that could be dropped.
+ drop_chunk_noise_factor : float
+ The noise factor used to scale the white noise inserted, relative to
+ the average amplitude of the utterance. Default 0 (no noise inserted).
+
+ Example
+ -------
+ >>> inputs = paddle.randn([10, 16000])
+ >>> feature_maker = TimeDomainSpecAugment(speeds=[80])
+ >>> feats = feature_maker(inputs, paddle.ones(10))
+ >>> feats.shape
+ paddle.shape([10, 12800])
+ """
+
+ def __init__(
+ self,
+ perturb_prob=1.0,
+ drop_freq_prob=1.0,
+ drop_chunk_prob=1.0,
+ speeds=[95, 100, 105],
+ sample_rate=16000,
+ drop_freq_count_low=0,
+ drop_freq_count_high=3,
+ drop_chunk_count_low=0,
+ drop_chunk_count_high=5,
+ drop_chunk_length_low=1000,
+ drop_chunk_length_high=2000,
+ drop_chunk_noise_factor=0,
+ ):
+ super().__init__()
+ self.speed_perturb = SpeedPerturb(
+ perturb_prob=perturb_prob, orig_freq=sample_rate, speeds=speeds
+ )
+ self.drop_freq = DropFreq(
+ drop_prob=drop_freq_prob,
+ drop_count_low=drop_freq_count_low,
+ drop_count_high=drop_freq_count_high,
+ )
+ self.drop_chunk = DropChunk(
+ drop_prob=drop_chunk_prob,
+ drop_count_low=drop_chunk_count_low,
+ drop_count_high=drop_chunk_count_high,
+ drop_length_low=drop_chunk_length_low,
+ drop_length_high=drop_chunk_length_high,
+ noise_factor=drop_chunk_noise_factor,
+ )
+
+ def forward(self, waveforms, lengths):
+ """Returns the distorted waveforms.
+
+ Arguments
+ ---------
+ waveforms : tensor
+ The waveforms to distort
+ """
+ # Augmentation
+ with paddle.no_grad():
+ waveforms = self.speed_perturb(waveforms)
+ waveforms = self.drop_freq(waveforms)
+ waveforms = self.drop_chunk(waveforms, lengths)
+ return waveforms
\ No newline at end of file
diff --git a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
new file mode 100644
index 000000000..6c8b0ee4c
--- /dev/null
+++ b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
@@ -0,0 +1,247 @@
+import numpy as np
+import os
+
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+
+import paddle
+import paddle.nn as nn
+import paddle.nn.functional as F
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2ConfigPure
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2Model
+from paddlespeech.s2t.modules.mask import make_pad_mask
+from paddlespeech.s2t.utils.utility import log_add
+
+from collections import defaultdict
+
+from paddlespeech.s2t.models.wav2vec2.modules.VanillaNN import VanillaNN
+from paddlespeech.s2t.modules.ctc import CTCDecoderBase as CTC
+from paddlespeech.s2t.utils.ctc_utils import remove_duplicates_and_blank
+from yacs.config import CfgNode
+
+class Wav2vec2ASR(nn.Layer):
+ def __init__(self, config: dict):
+ super().__init__()
+
+ wav2vec2_config = Wav2Vec2ConfigPure(config)
+ wav2vec2 = Wav2Vec2Model(wav2vec2_config)
+ model_dict = paddle.load(config.wav2vec2_params_path)
+ wav2vec2.set_state_dict(model_dict)
+ self.normalize_wav = config.normalize_wav
+ self.output_norm = config.output_norm
+ if config.freeze_wav2vec2:
+ wav2vec2.eval()
+ for parm in wav2vec2.parameters():
+ parm.trainable = False
+ self.wav2vec2 = wav2vec2
+ self.enc = VanillaNN(input_shape=[None,None,wav2vec2_config.hidden_size], activation=nn.LeakyReLU, dnn_blocks=config.dnn_blocks, dnn_neurons=config.dnn_neurons)
+ self.ctc = CTC(odim=config.output_dim, enc_n_units=config.dnn_neurons, blank_id=config.blank_id, dropout_rate=config.ctc_dropout_rate, reduction=True)
+
+ def forward(self, wav, wavs_lens_rate, target, target_lens_rate):
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+
+ x = self.enc(feats)
+ x_lens = (wavs_lens_rate * x.shape[1]).round().astype(paddle.int64)
+ target_lens = (target_lens_rate * target.shape[1]).round().astype(paddle.int64)
+
+ ctc_loss = self.ctc(x, x_lens, target, target_lens)
+ return ctc_loss
+
+ @paddle.no_grad()
+ def decode(self,
+ feats: paddle.Tensor,
+ text_feature: Dict[str, int],
+ decoding_method: str,
+ beam_size: int):
+ batch_size = feats.shape[0]
+ if decoding_method is 'ctc_prefix_beam_search' and batch_size > 1:
+ logger.error(
+ f'decoding mode {decoding_method} must be running with batch_size == 1'
+ )
+ logger.error(f"current batch_size is {batch_size}")
+ sys.exit(1)
+
+ if decoding_method == 'ctc_greedy_search':
+ hyps = self.ctc_greedy_search(feats)
+ res = [text_feature.defeaturize(hyp) for hyp in hyps]
+ res_tokenids = [hyp for hyp in hyps]
+ # ctc_prefix_beam_search and attention_rescoring only return one
+ # result in List[int], change it to List[List[int]] for compatible
+ # with other batch decoding mode
+ elif decoding_method == 'ctc_prefix_beam_search':
+ assert feats.shape[0] == 1
+ hyp = self.ctc_prefix_beam_search(
+ feats,
+ beam_size)
+ res = [text_feature.defeaturize(hyp)]
+ res_tokenids = [hyp]
+ else:
+ raise ValueError(f"wav2vec2 not support decoding method: {decoding_method}")
+
+ return res, res_tokenids
+
+ @classmethod
+ def from_config(cls, config):
+ model = cls(config)
+ return model
+
+ def ctc_greedy_search(
+ self, wav) -> List[List[int]]:
+ """ Apply CTC greedy search
+ Args:
+ speech (paddle.Tensor): (batch, max_len)
+ speech_length (paddle.Tensor): (batch, )
+ Returns:
+ List[List[int]]: best path result
+ """
+ batch_size = wav.shape[0]
+ wav = wav[:,:,0]
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+ x = self.enc(feats)
+ x_lens = x.shape[1]
+ ctc_probs = self.ctc.log_softmax(x) # (B, maxlen, vocab_size)
+ topk_prob, topk_index = ctc_probs.topk(1, axis=2) # (B, maxlen, 1)
+ topk_index = topk_index.view(batch_size, x_lens) # (B, maxlen)
+
+ hyps = [hyp.tolist() for hyp in topk_index]
+ hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps]
+ return hyps
+
+ def _ctc_prefix_beam_search(
+ self, wav, beam_size, blank_id: int=0, ) -> Tuple[List[Tuple[int, float]], paddle.Tensor]:
+ """ CTC prefix beam search inner implementation
+ Args:
+ speech (paddle.Tensor): (batch, max_len, feat_dim)
+ speech_length (paddle.Tensor): (batch, )
+ beam_size (int): beam size for beam search
+ decoding_chunk_size (int): decoding chunk for dynamic chunk
+ trained model.
+ <0: for decoding, use full chunk.
+ >0: for decoding, use fixed chunk size as set.
+ 0: used for training, it's prohibited here
+ simulate_streaming (bool): whether do encoder forward in a
+ streaming fashion
+ Returns:
+ List[Tuple[int, float]]: nbest results, (N,1), (text, likelihood)
+ paddle.Tensor: encoder output, (1, max_len, encoder_dim),
+ it will be used for rescoring in attention rescoring mode
+ """
+ wav = wav[:,:,0]
+
+ if self.normalize_wav:
+ wav = F.layer_norm(wav, wav.shape[1:])
+ # Extract wav2vec output
+ out = self.wav2vec2(wav)[0]
+ # We normalize the output if required
+ if self.output_norm:
+ out = F.layer_norm(out, out.shape[1:])
+ feats = out
+
+ x = self.enc(feats)
+ maxlen = x.shape[1]
+ ctc_probs = self.ctc.log_softmax(x) # (1, maxlen, vocab_size)
+ ctc_probs = ctc_probs.squeeze(0)
+
+ # cur_hyps: (prefix, (blank_ending_score, none_blank_ending_score))
+ # blank_ending_score and none_blank_ending_score in ln domain
+ cur_hyps = [(tuple(), (0.0, -float('inf')))]
+ # 2. CTC beam search step by step
+ for t in range(0, maxlen):
+ logp = ctc_probs[t] # (vocab_size,)
+ # key: prefix, value (pb, pnb), default value(-inf, -inf)
+ next_hyps = defaultdict(lambda: (-float('inf'), -float('inf')))
+ # 2.1 First beam prune: select topk best
+ top_k_logp, top_k_index = logp.topk(beam_size) # (beam_size,)
+ for s in top_k_index:
+ s = s.item()
+ ps = logp[s].item()
+ for prefix, (pb, pnb) in cur_hyps:
+ last = prefix[-1] if len(prefix) > 0 else None
+ if s == blank_id: # blank
+ n_pb, n_pnb = next_hyps[prefix]
+ n_pb = log_add([n_pb, pb + ps, pnb + ps])
+ next_hyps[prefix] = (n_pb, n_pnb)
+ elif s == last:
+ # Update *ss -> *s;
+ n_pb, n_pnb = next_hyps[prefix]
+ n_pnb = log_add([n_pnb, pnb + ps])
+ next_hyps[prefix] = (n_pb, n_pnb)
+ # Update *s-s -> *ss, - is for blank
+ n_prefix = prefix + (s, )
+ n_pb, n_pnb = next_hyps[n_prefix]
+ n_pnb = log_add([n_pnb, pb + ps])
+ next_hyps[n_prefix] = (n_pb, n_pnb)
+ else:
+ n_prefix = prefix + (s, )
+ n_pb, n_pnb = next_hyps[n_prefix]
+ n_pnb = log_add([n_pnb, pb + ps, pnb + ps])
+ next_hyps[n_prefix] = (n_pb, n_pnb)
+
+ # 2.2 Second beam prune
+ next_hyps = sorted(
+ next_hyps.items(),
+ key=lambda x: log_add(list(x[1])),
+ reverse=True)
+ cur_hyps = next_hyps[:beam_size]
+
+ hyps = [(y[0], log_add([y[1][0], y[1][1]])) for y in cur_hyps]
+ return hyps
+
+ def ctc_prefix_beam_search(self, wav, beam_size) -> List[int]:
+ """ Apply CTC prefix beam search
+ Args:
+ speech (paddle.Tensor): (batch, max_len, feat_dim)
+ speech_length (paddle.Tensor): (batch, )
+ beam_size (int): beam size for beam search
+ decoding_chunk_size (int): decoding chunk for dynamic chunk
+ trained model.
+ <0: for decoding, use full chunk.
+ >0: for decoding, use fixed chunk size as set.
+ 0: used for training, it's prohibited here
+ simulate_streaming (bool): whether do encoder forward in a
+ streaming fashion
+ Returns:
+ List[int]: CTC prefix beam search nbest results
+ """
+ hyps = self._ctc_prefix_beam_search(
+ wav, beam_size)
+ return hyps[0][0]
+
+ # @jit.to_static
+ # def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor:
+ # """ Export interface for c++ call, apply linear transform and log
+ # softmax before ctc
+ # Args:
+ # xs (paddle.Tensor): encoder output, (B, T, D)
+ # Returns:
+ # paddle.Tensor: activation before ctc
+ # """
+ # return self.ctc.log_softmax(xs)
+
+
+ # def _get_data(self):
+ # data_dir = "data"
+ # wavs = np.load(os.path.join(data_dir, "wavs.npy"))
+ # wavs_lens = np.load(os.path.join(data_dir, "wavs_lens.npy"))
+ # tokens = np.load(os.path.join(data_dir, "tokens.npy"))
+ # tokens_lens = np.load(os.path.join(data_dir, "tokens_lens.npy"))
+
+ # batch = (paddle.to_tensor(wavs), paddle.to_tensor(wavs_lens, dtype='float32'),
+ # paddle.to_tensor(tokens, dtype='int32'), paddle.to_tensor(tokens_lens, dtype='float32'))
+ # return batch
From 19180d359d6db8d516e163554eb85a5b4d6c3bf1 Mon Sep 17 00:00:00 2001
From: tianhao zhang <15600919271@163.com>
Date: Mon, 10 Oct 2022 12:12:59 +0000
Subject: [PATCH 2/5] format wav2vec2 demo
---
.flake8 | 2 +-
examples/librispeech/README.md | 2 +-
paddlespeech/audio/transform/spectrogram.py | 30 +
.../audio/transform/transformation.py | 1 +
.../s2t/exps/wav2vec2/bin/test_wav.py | 12 +-
paddlespeech/s2t/exps/wav2vec2/model.py | 63 +-
.../s2t/models/wav2vec2/modules/VanillaNN.py | 15 +-
.../models/wav2vec2/modules/activations.py | 23 +-
.../s2t/models/wav2vec2/modules/containers.py | 12 +-
.../s2t/models/wav2vec2/modules/linear.py | 17 +-
.../wav2vec2/modules/modeling_outputs.py | 38 +-
.../wav2vec2/modules/modeling_wav2vec2.py | 540 ++++++++++--------
.../wav2vec2/processing/signal_processing.py | 36 +-
.../processing/speech_augmentation.py | 167 +++---
.../s2t/models/wav2vec2/wav2vec2_ASR.py | 85 ++-
15 files changed, 558 insertions(+), 485 deletions(-)
diff --git a/.flake8 b/.flake8
index 6b50de7ed..ae15ad2be 100644
--- a/.flake8
+++ b/.flake8
@@ -33,7 +33,7 @@ filename =
# Specify a list of codes to ignore.
ignore =
W503
- E252,E262,E127,E265,E126,E266,E241,E261,E128,E125
+ E252,E262,E127,E265,E126,E266,E241,E261,E128,E125,E129
W291,W293,W605
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
diff --git a/examples/librispeech/README.md b/examples/librispeech/README.md
index 74441fd09..9fcbde97a 100644
--- a/examples/librispeech/README.md
+++ b/examples/librispeech/README.md
@@ -3,7 +3,7 @@
* asr0 - deepspeech2 Streaming/Non-Streaming
* asr1 - transformer/conformer Streaming/Non-Streaming
* asr2 - transformer/conformer Streaming/Non-Streaming with Kaldi feature
-
+* asr3 - wav2vecASR, ASR model with pre-trained wav2vec2 and CTC
## Data
| Data Subset | Duration in Seconds |
diff --git a/paddlespeech/audio/transform/spectrogram.py b/paddlespeech/audio/transform/spectrogram.py
index 864f3f994..2e5199394 100644
--- a/paddlespeech/audio/transform/spectrogram.py
+++ b/paddlespeech/audio/transform/spectrogram.py
@@ -382,6 +382,36 @@ class LogMelSpectrogramKaldi():
return mat
+class WavProcess():
+ def __init__(self, dither=0.1):
+ """
+ Args:
+ dither (float): Dithering constant
+
+ Returns:
+ """
+
+ self.dither = dither
+
+ def __call__(self, x, train):
+ """
+ Args:
+ x (np.ndarray): shape (Ti,)
+ train (bool): True, train mode.
+
+ Raises:
+ ValueError: not support (Ti, C)
+
+ Returns:
+ np.ndarray: (T, D)
+ """
+ dither = self.dither if train else 0.0
+ if x.ndim != 1:
+ raise ValueError("Not support x: [Time, Channel]")
+ waveform = np.expand_dims(x, -1)
+ return waveform
+
+
class LogMelSpectrogramKaldi_decay():
def __init__(
self,
diff --git a/paddlespeech/audio/transform/transformation.py b/paddlespeech/audio/transform/transformation.py
index d24d6437c..e2f66dbf2 100644
--- a/paddlespeech/audio/transform/transformation.py
+++ b/paddlespeech/audio/transform/transformation.py
@@ -41,6 +41,7 @@ import_alias = dict(
utterance_cmvn="paddlespeech.audio.transform.cmvn:UtteranceCMVN",
fbank="paddlespeech.audio.transform.spectrogram:LogMelSpectrogram",
spectrogram="paddlespeech.audio.transform.spectrogram:Spectrogram",
+ wav_process="paddlespeech.audio.transform.spectrogram:WavProcess",
stft="paddlespeech.audio.transform.spectrogram:Stft",
istft="paddlespeech.audio.transform.spectrogram:IStft",
stft2fbank="paddlespeech.audio.transform.spectrogram:Stft2LogMelSpectrogram",
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
index 5306d7f81..3a537bce5 100644
--- a/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test_wav.py
@@ -27,6 +27,7 @@ from paddlespeech.s2t.utils.log import Log
from paddlespeech.s2t.utils.utility import UpdateConfig
logger = Log(__name__).getlog()
+
class Wav2vec2Infer():
def __init__(self, config, args):
self.args = args
@@ -34,8 +35,7 @@ class Wav2vec2Infer():
self.audio_file = args.audio_file
self.text_feature = TextFeaturizer(
- unit_type=config.unit_type,
- vocab=config.vocab_filepath)
+ unit_type=config.unit_type, vocab=config.vocab_filepath)
paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu')
# model
@@ -63,10 +63,10 @@ class Wav2vec2Infer():
xs = paddle.to_tensor(audio, dtype='float32').unsqueeze(axis=0)
decode_config = self.config.decode
result_transcripts, result_tokenids = self.model.decode(
- xs,
- text_feature=self.text_feature,
- decoding_method=decode_config.decoding_method,
- beam_size=decode_config.beam_size)
+ xs,
+ text_feature=self.text_feature,
+ decoding_method=decode_config.decoding_method,
+ beam_size=decode_config.beam_size)
rsl = result_transcripts[0]
utt = Path(self.audio_file).name
logger.info(f"hyp: {utt} {rsl}")
diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py
index 3d9c266e7..32cf0b473 100644
--- a/paddlespeech/s2t/exps/wav2vec2/model.py
+++ b/paddlespeech/s2t/exps/wav2vec2/model.py
@@ -18,53 +18,53 @@ import time
from collections import defaultdict
from collections import OrderedDict
from contextlib import nullcontext
-from paddlespeech.s2t.utils import mp_tools
import jsonlines
import numpy as np
import paddle
from paddle import distributed as dist
+
from paddlespeech.s2t.frontend.featurizer import TextFeaturizer
from paddlespeech.s2t.io.dataloader import BatchDataLoader
-from paddlespeech.s2t.io.dataloader import StreamDataLoader
from paddlespeech.s2t.io.dataloader import DataLoaderFactory
-from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
+from paddlespeech.s2t.io.dataloader import StreamDataLoader
from paddlespeech.s2t.models.wav2vec2.processing.speech_augmentation import TimeDomainSpecAugment
-from paddlespeech.s2t.utils import error_rate
-
+from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
from paddlespeech.s2t.training.optimizer import OptimizerFactory
from paddlespeech.s2t.training.reporter import ObsScope
from paddlespeech.s2t.training.reporter import report
from paddlespeech.s2t.training.scheduler import LRSchedulerFactory
from paddlespeech.s2t.training.timer import Timer
from paddlespeech.s2t.training.trainer import Trainer
-from paddlespeech.s2t.utils.utility import UpdateConfig
+from paddlespeech.s2t.utils import error_rate
from paddlespeech.s2t.utils import layer_tools
+from paddlespeech.s2t.utils import mp_tools
from paddlespeech.s2t.utils.log import Log
-
-
+from paddlespeech.s2t.utils.utility import UpdateConfig
logger = Log(__name__).getlog()
+
class Wav2Vec2ASRTrainer(Trainer):
def __init__(self, config, args):
super().__init__(config, args)
self.avg_train_loss = 0
+
def train_batch(self, batch_index, batch, msg):
train_conf = self.config
start = time.time()
# forward
utt, wav, wavs_lens, target, target_lens = batch
- wavs_lens_rate = wavs_lens / wav.shape[1]
+ wavs_lens_rate = wavs_lens / wav.shape[1]
target_lens_rate = target_lens / target.shape[1]
- wav = wav[:,:,0]
+ wav = wav[:, :, 0]
wav = self.speech_augmentation(wav, wavs_lens_rate)
loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
# pring(wav, wavs_lens_rate, target, target_lens_rate)
# loss div by `batch_size * accum_grad`
loss /= train_conf.accum_grad
-
+
losses_np = {'loss': float(loss) * train_conf.accum_grad}
# loss backward
@@ -108,15 +108,16 @@ class Wav2Vec2ASRTrainer(Trainer):
def valid(self):
self.model.eval()
if not self.use_streamdata:
- logger.info(f"Valid Total Examples: {len(self.valid_loader.dataset)}")
+ logger.info(
+ f"Valid Total Examples: {len(self.valid_loader.dataset)}")
valid_losses = defaultdict(list)
num_seen_utts = 1
total_loss = 0.0
for i, batch in enumerate(self.valid_loader):
utt, wav, wavs_lens, target, target_lens = batch
- wavs_lens_rate = wavs_lens / wav.shape[1]
+ wavs_lens_rate = wavs_lens / wav.shape[1]
target_lens_rate = target_lens / target.shape[1]
- wav = wav[:,:,0]
+ wav = wav[:, :, 0]
loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
if paddle.isfinite(loss):
@@ -134,7 +135,8 @@ class Wav2Vec2ASRTrainer(Trainer):
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
if not self.use_streamdata:
- msg += "batch: {}/{}, ".format(i + 1, len(self.valid_loader))
+ msg += "batch: {}/{}, ".format(i + 1,
+ len(self.valid_loader))
msg += ', '.join('{}: {:>.6f}'.format(k, v)
for k, v in valid_dump.items())
logger.info(msg)
@@ -155,7 +157,8 @@ class Wav2Vec2ASRTrainer(Trainer):
self.before_train()
if not self.use_streamdata:
- logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
+ logger.info(
+ f"Train Total Examples: {len(self.train_loader.dataset)}")
while self.epoch < self.config.n_epoch:
with Timer("Epoch-Train Time Cost: {}"):
self.model.train()
@@ -223,14 +226,18 @@ class Wav2Vec2ASRTrainer(Trainer):
config = self.config.clone()
self.use_streamdata = config.get("use_stream_data", False)
if self.train:
- self.train_loader = DataLoaderFactory.get_dataloader('train', config, self.args)
- self.valid_loader = DataLoaderFactory.get_dataloader('valid', config, self.args)
+ self.train_loader = DataLoaderFactory.get_dataloader(
+ 'train', config, self.args)
+ self.valid_loader = DataLoaderFactory.get_dataloader(
+ 'valid', config, self.args)
logger.info("Setup train/valid Dataloader!")
else:
decode_batch_size = config.get('decode', dict()).get(
'decode_batch_size', 1)
- self.test_loader = DataLoaderFactory.get_dataloader('test', config, self.args)
- self.align_loader = DataLoaderFactory.get_dataloader('align', config, self.args)
+ self.test_loader = DataLoaderFactory.get_dataloader('test', config,
+ self.args)
+ self.align_loader = DataLoaderFactory.get_dataloader(
+ 'align', config, self.args)
logger.info("Setup test/align Dataloader!")
def setup_model(self):
@@ -248,7 +255,7 @@ class Wav2Vec2ASRTrainer(Trainer):
model = Wav2vec2ASR.from_config(model_conf)
if self.parallel:
- model = paddle.DataParallel(model, find_unused_parameters=True)
+ model = paddle.DataParallel(model, find_unused_parameters=True)
logger.info(f"{model}")
layer_tools.print_params(model, logger.info)
@@ -312,14 +319,14 @@ class Wav2Vec2ASRTester(Wav2Vec2ASRTrainer):
self.text_featurizer = TextFeaturizer(
unit_type=config.unit_type, vocab=config.vocab_filepath)
self.vocab_list = self.text_featurizer.vocab_list
+
def id2token(self, texts, texts_len):
""" ord() id to chr() chr """
trans = []
for text, n in zip(texts, texts_len):
n = n.numpy().item()
ids = text[:n]
- trans.append(
- self.text_featurizer.defeaturize(ids.numpy().tolist()))
+ trans.append(self.text_featurizer.defeaturize(ids.numpy().tolist()))
return trans
def compute_metrics(self,
@@ -337,10 +344,10 @@ class Wav2Vec2ASRTester(Wav2Vec2ASRTrainer):
start_time = time.time()
target_transcripts = self.id2token(texts, texts_len)
result_transcripts, result_tokenids = self.model.decode(
- audio,
- text_feature=self.text_featurizer,
- decoding_method=decode_cfg.decoding_method,
- beam_size=decode_cfg.beam_size)
+ audio,
+ text_feature=self.text_featurizer,
+ decoding_method=decode_cfg.decoding_method,
+ beam_size=decode_cfg.beam_size)
decode_time = time.time() - start_time
for utt, target, result, rec_tids in zip(
@@ -432,4 +439,4 @@ class Wav2Vec2ASRTester(Wav2Vec2ASRTrainer):
"decode_method":
self.config.decode.decoding_method,
})
- f.write(data + '\n')
\ No newline at end of file
+ f.write(data + '\n')
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
index a8f5f5cb1..ae141d1b3 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/VanillaNN.py
@@ -3,6 +3,7 @@ Authors
* Elena Rastorgueva 2020
"""
import paddle
+
from paddlespeech.s2t.models.wav2vec2.modules import containers
from paddlespeech.s2t.models.wav2vec2.modules import linear
@@ -27,12 +28,11 @@ class VanillaNN(containers.Sequential):
"""
def __init__(
- self,
- input_shape,
- activation=paddle.nn.LeakyReLU,
- dnn_blocks=2,
- dnn_neurons=512,
- ):
+ self,
+ input_shape,
+ activation=paddle.nn.LeakyReLU,
+ dnn_blocks=2,
+ dnn_neurons=512, ):
super().__init__(input_shape=input_shape)
for block_index in range(dnn_blocks):
@@ -40,6 +40,5 @@ class VanillaNN(containers.Sequential):
linear.Linear,
n_neurons=dnn_neurons,
bias=True,
- layer_name="linear",
- )
+ layer_name="linear", )
self.append(activation(), layer_name="act")
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/activations.py b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
index 9df652c23..722d8a0d6 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/activations.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/activations.py
@@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
import math
-from packaging import version
-from paddle import Tensor, nn
-
+from paddle import nn
+from paddle import Tensor
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
@@ -29,7 +27,9 @@ class NewGELUActivation(nn.Layer):
"""
def forward(self, input: Tensor) -> Tensor:
- return 0.5 * input * (1.0 + paddle.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * paddle.pow(input, 3.0))))
+ return 0.5 * input * (1.0 + paddle.tanh(
+ math.sqrt(2.0 / math.pi) *
+ (input + 0.044715 * paddle.pow(input, 3.0))))
class GELUActivation(nn.Layer):
@@ -40,7 +40,7 @@ class GELUActivation(nn.Layer):
Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
- def __init__(self, use_gelu_python: bool = False):
+ def __init__(self, use_gelu_python: bool=False):
super().__init__()
self.act = nn.functional.gelu
@@ -57,7 +57,9 @@ class FastGELUActivation(nn.Layer):
"""
def forward(self, input: Tensor) -> Tensor:
- return 0.5 * input * (1.0 + paddle.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
+ return 0.5 * input * (
+ 1.0 + paddle.tanh(input * 0.7978845608 *
+ (1.0 + 0.044715 * input * input)))
class QuickGELUActivation(nn.Layer):
@@ -84,7 +86,8 @@ class ClippedGELUActivation(nn.Layer):
def __init__(self, min: float, max: float):
if min > max:
- raise ValueError(f"min should be < max (got min: {min}, max: {max})")
+ raise ValueError(
+ f"min should be < max (got min: {min}, max: {max})")
super().__init__()
self.min = min
@@ -161,7 +164,9 @@ def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
- raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
+ raise KeyError(
+ f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}"
+ )
# For backwards compatibility with: from activations import gelu_python
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/containers.py b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
index 2b961a59b..b39733570 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/containers.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/containers.py
@@ -1,8 +1,7 @@
-import paddle
import inspect
-import logging
-import operator
-import functools
+
+import paddle
+
class Sequential(paddle.nn.LayerDict):
"""A sequence of modules with potentially inferring shape on construction.
@@ -98,13 +97,12 @@ class Sequential(paddle.nn.LayerDict):
# Finally, append the layer.
try:
self[layer_name] = layer
- # self.add_module(layer_name, layer)
+ # self.add_module(layer_name, layer)
except TypeError:
raise ValueError(
"Must pass `input_shape` at initialization and use "
"modules that take `input_shape` to infer shape when "
- "using `append()`."
- )
+ "using `append()`.")
def get_output_shape(self):
"""Returns expected shape of the output.
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/linear.py b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
index 26389d908..488949d14 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/linear.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/linear.py
@@ -3,10 +3,10 @@ Authors
* Mirco Ravanelli 2020
* Davide Borra 2021
"""
-
import logging
+
import paddle
-import paddle.nn as nn
+
from paddlespeech.s2t.modules import align
logger = logging.getLogger(__name__)
@@ -37,13 +37,12 @@ class Linear(paddle.nn.Layer):
"""
def __init__(
- self,
- n_neurons,
- input_shape=None,
- input_size=None,
- bias=True,
- combine_dims=False,
- ):
+ self,
+ n_neurons,
+ input_shape=None,
+ input_size=None,
+ bias=True,
+ combine_dims=False, ):
super().__init__()
self.combine_dims = combine_dims
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
index a5b509b66..fb2a87122 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_outputs.py
@@ -11,12 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-from dataclasses import dataclass
-from typing import Optional, Tuple
from collections import OrderedDict
-
+from dataclasses import dataclass
from dataclasses import fields
+from typing import Optional
+from typing import Tuple
+
import paddle
@@ -41,10 +41,13 @@ class ModelOutput(OrderedDict):
if not len(class_fields):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
- raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
+ raise ValueError(
+ f"{self.__class__.__name__} should not have more than one required field."
+ )
first_field = getattr(self, class_fields[0].name)
- other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
+ other_fields_are_none = all(
+ getattr(self, field.name) is None for field in class_fields[1:])
if other_fields_are_none and not paddle.is_tensor(first_field):
if isinstance(first_field, dict):
@@ -61,11 +64,9 @@ class ModelOutput(OrderedDict):
# set the associated fields
if first_field_iterator:
for element in iterator:
- if (
- not isinstance(element, (list, tuple))
- or not len(element) == 2
- or not isinstance(element[0], str)
- ):
+ if (not isinstance(element, (list, tuple)) or
+ not len(element) == 2 or
+ not isinstance(element[0], str)):
break
setattr(self, element[0], element[1])
if element[1] is not None:
@@ -79,16 +80,23 @@ class ModelOutput(OrderedDict):
self[field.name] = v
def __delitem__(self, *args, **kwargs):
- raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
+ raise Exception(
+ f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance."
+ )
def setdefault(self, *args, **kwargs):
- raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
+ raise Exception(
+ f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance."
+ )
def pop(self, *args, **kwargs):
- raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
+ raise Exception(
+ f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def update(self, *args, **kwargs):
- raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
+ raise Exception(
+ f"You cannot use ``update`` on a {self.__class__.__name__} instance."
+ )
def __getitem__(self, k):
if isinstance(k, str):
diff --git a/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
index 6988aa6aa..3d5e5fa64 100644
--- a/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
+++ b/paddlespeech/s2t/models/wav2vec2/modules/modeling_wav2vec2.py
@@ -13,24 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Wav2Vec2 model."""
-
-import math
-import warnings
-import paddle
from dataclasses import dataclass
-from typing import Optional, Tuple, Union
+from typing import Optional
+from typing import Tuple
+from typing import Union
import numpy as np
+import paddle
from paddle import nn
from paddlespeech.s2t.models.wav2vec2.modules.activations import ACT2FN
-from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import (
- BaseModelOutput,
- Wav2Vec2BaseModelOutput,
- ModelOutput
-)
-import pdb
-
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import BaseModelOutput
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import ModelOutput
+from paddlespeech.s2t.models.wav2vec2.modules.modeling_outputs import Wav2Vec2BaseModelOutput
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
@@ -78,12 +73,11 @@ class Wav2Vec2ForPreTrainingOutput(ModelOutput):
def _compute_mask_indices(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- attention_mask: Optional[paddle.Tensor] = None,
- min_masks: int = 0,
-) -> np.ndarray:
+ shape: Tuple[int, int],
+ mask_prob: float,
+ mask_length: int,
+ attention_mask: Optional[paddle.Tensor]=None,
+ min_masks: int=0, ) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
@@ -109,8 +103,7 @@ def _compute_mask_indices(
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
- f" and `sequence_length`: {sequence_length}`"
- )
+ f" and `sequence_length`: {sequence_length}`")
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
@@ -131,11 +124,9 @@ def _compute_mask_indices(
return num_masked_span
# compute number of masked spans in batch
- input_lengths = (
- attention_mask.sum(-1).detach().tolist()
- if attention_mask is not None
- else [sequence_length for _ in range(batch_size)]
- )
+ input_lengths = (attention_mask.sum(-1).detach().tolist()
+ if attention_mask is not None else
+ [sequence_length for _ in range(batch_size)])
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
@@ -152,8 +143,9 @@ def _compute_mask_indices(
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
- np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
- )
+ np.arange(input_length - (mask_length - 1)),
+ num_masked_span,
+ replace=False)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
@@ -166,29 +158,33 @@ def _compute_mask_indices(
else:
dummy_mask_idx = spec_aug_mask_idx[0]
- spec_aug_mask_idx = np.concatenate(
- [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
- )
+ spec_aug_mask_idx = np.concatenate([
+ spec_aug_mask_idx,
+ np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) *
+ dummy_mask_idx
+ ])
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
- spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
- )
- spec_aug_mask_idxs = spec_aug_mask_idxs.reshape((batch_size, max_num_masked_span * mask_length))
+ spec_aug_mask_idxs[:, :, None],
+ (batch_size, max_num_masked_span, mask_length))
+ spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(
+ (batch_size, max_num_masked_span * mask_length))
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
- offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
- (batch_size, max_num_masked_span * mask_length)
- )
+ offsets = np.broadcast_to(offsets, (
+ batch_size, max_num_masked_span, mask_length)).reshape(
+ (batch_size, max_num_masked_span * mask_length))
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
- spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
+ spec_aug_mask_idxs[spec_aug_mask_idxs >
+ sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
@@ -196,9 +192,9 @@ def _compute_mask_indices(
return spec_aug_mask
-def _sample_negative_indices(
- features_shape: Tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
-):
+def _sample_negative_indices(features_shape: Tuple,
+ num_negatives: int,
+ mask_time_indices: Optional[np.ndarray]=None):
"""
Sample `num_negatives` vectors from feature vectors.
"""
@@ -208,23 +204,28 @@ def _sample_negative_indices(
sequence_length_range = np.arange(sequence_length)
# get `num_negatives` random vector indices from the same utterance
- sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
+ sampled_negative_indices = np.zeros(
+ shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
- mask_time_indices = (
- mask_time_indices.astype(np.bool) if mask_time_indices is not None else np.ones(features_shape, dtype=np.bool)
- )
+ mask_time_indices = (mask_time_indices.astype(np.bool)
+ if mask_time_indices is not None else
+ np.ones(features_shape, dtype=np.bool))
for batch_idx in range(batch_size):
high = mask_time_indices[batch_idx].sum() - 1
- mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
+ mapped_masked_indices = sequence_length_range[mask_time_indices[
+ batch_idx]]
- feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
- sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
+ feature_indices = np.broadcast_to(
+ np.arange(high + 1)[:, None], (high + 1, num_negatives))
+ sampled_indices = np.random.randint(
+ 0, high, size=(high + 1, num_negatives))
# avoid sampling the same positive vector, but keep the distribution uniform
sampled_indices[sampled_indices >= feature_indices] += 1
# remap to actual indices
- sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
+ sampled_negative_indices[batch_idx][mask_time_indices[
+ batch_idx]] = mapped_masked_indices[sampled_indices]
# correct for batch size
sampled_negative_indices[batch_idx] += batch_idx * sequence_length
@@ -243,8 +244,7 @@ class Wav2Vec2NoLayerNormConvLayer(nn.Layer):
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
- bias_attr=config.conv_bias,
- )
+ bias_attr=config.conv_bias, )
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
@@ -264,8 +264,7 @@ class Wav2Vec2LayerNormConvLayer(nn.Layer):
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
- bias_attr=config.conv_bias,
- )
+ bias_attr=config.conv_bias, )
self.layer_norm = nn.LayerNorm(self.out_conv_dim)
self.activation = ACT2FN[config.feat_extract_activation]
@@ -290,11 +289,11 @@ class Wav2Vec2GroupNormConvLayer(nn.Layer):
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
- bias_attr=config.conv_bias,
- )
+ bias_attr=config.conv_bias, )
self.activation = ACT2FN[config.feat_extract_activation]
- self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim)
+ self.layer_norm = nn.GroupNorm(
+ num_groups=self.out_conv_dim, num_channels=self.out_conv_dim)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
@@ -311,8 +310,7 @@ class Wav2Vec2PositionalConvEmbedding(nn.Layer):
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
- groups=config.num_conv_pos_embedding_groups,
- )
+ groups=config.num_conv_pos_embedding_groups, )
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
@@ -337,7 +335,7 @@ class Wav2Vec2SamePadLayer(nn.Layer):
def forward(self, hidden_states):
if self.num_pad_remove > 0:
- hidden_states = hidden_states[:, :, : -self.num_pad_remove]
+ hidden_states = hidden_states[:, :, :-self.num_pad_remove]
return hidden_states
@@ -349,11 +347,13 @@ class Wav2Vec2FeatureEncoder(nn.Layer):
if config.feat_extract_norm == "group":
conv_layers = [Wav2Vec2GroupNormConvLayer(config, layer_id=0)] + [
- Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
+ Wav2Vec2NoLayerNormConvLayer(config, layer_id=i + 1)
+ for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
- Wav2Vec2LayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
+ Wav2Vec2LayerNormConvLayer(config, layer_id=i)
+ for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
@@ -373,10 +373,12 @@ class Wav2Vec2FeatureEncoder(nn.Layer):
return hidden_states
+
class Wav2Vec2FeatureProjection(nn.Layer):
def __init__(self, config):
super().__init__()
- self.layer_norm = nn.LayerNorm(config.conv_dim[-1], epsilon=config.layer_norm_eps)
+ self.layer_norm = nn.LayerNorm(
+ config.conv_dim[-1], epsilon=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
@@ -393,13 +395,12 @@ class Wav2Vec2Attention(nn.Layer):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
- self,
- embed_dim: int,
- num_heads: int,
- dropout: float = 0.0,
- is_decoder: bool = False,
- bias: bool = True,
- ):
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float=0.0,
+ is_decoder: bool=False,
+ bias: bool=True, ):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
@@ -409,8 +410,7 @@ class Wav2Vec2Attention(nn.Layer):
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
- f" and `num_heads`: {num_heads})."
- )
+ f" and `num_heads`: {num_heads}).")
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
@@ -420,17 +420,18 @@ class Wav2Vec2Attention(nn.Layer):
self.out_proj = nn.Linear(embed_dim, embed_dim, bias_attr=bias)
def _shape(self, tensor: paddle.Tensor, seq_len: int, bsz: int):
- return paddle.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)).transpose([0, 2, 1, 3])
+ return paddle.reshape(tensor, (bsz, seq_len, self.num_heads,
+ self.head_dim)).transpose([0, 2, 1, 3])
def forward(
- self,
- hidden_states: paddle.Tensor,
- key_value_states: Optional[paddle.Tensor] = None,
- past_key_value: Optional[Tuple[paddle.Tensor]] = None,
- attention_mask: Optional[paddle.Tensor] = None,
- layer_head_mask: Optional[paddle.Tensor] = None,
- output_attentions: bool = False,
- ) -> Tuple[paddle.Tensor, Optional[paddle.Tensor], Optional[Tuple[paddle.Tensor]]]:
+ self,
+ hidden_states: paddle.Tensor,
+ key_value_states: Optional[paddle.Tensor]=None,
+ past_key_value: Optional[Tuple[paddle.Tensor]]=None,
+ attention_mask: Optional[paddle.Tensor]=None,
+ layer_head_mask: Optional[paddle.Tensor]=None,
+ output_attentions: bool=False, ) -> Tuple[paddle.Tensor, Optional[
+ paddle.Tensor], Optional[Tuple[paddle.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
@@ -455,7 +456,8 @@ class Wav2Vec2Attention(nn.Layer):
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = paddle.concat([past_key_value[0], key_states], axis=2)
- value_states = paddle.concat([past_key_value[1], value_states], axis=2)
+ value_states = paddle.concat(
+ [past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
@@ -472,60 +474,68 @@ class Wav2Vec2Attention(nn.Layer):
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
- query_states = self._shape(query_states, tgt_len, bsz).reshape(proj_shape)
+ query_states = self._shape(query_states, tgt_len,
+ bsz).reshape(proj_shape)
key_states = key_states.reshape(proj_shape)
value_states = value_states.reshape(proj_shape)
src_len = key_states.shape[1]
attn_weights = paddle.bmm(query_states, key_states.transpose([0, 2, 1]))
-
-
+
if attn_weights.shape != [bsz * self.num_heads, tgt_len, src_len]:
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
- f" {attn_weights.shape}"
- )
+ f" {attn_weights.shape}")
if attention_mask is not None:
if attention_mask.shape != [bsz, 1, tgt_len, src_len]:
raise ValueError(
f"Attention mask should be of size {[bsz, 1, tgt_len, src_len]}, but is {attention_mask.shape}"
)
- attn_weights = attn_weights.reshape(bsz, self.num_heads, tgt_len, src_len) + attention_mask
- attn_weights = attn_weights.reshape(bsz * self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.reshape(bsz, self.num_heads, tgt_len,
+ src_len) + attention_mask
+ attn_weights = attn_weights.reshape(bsz * self.num_heads, tgt_len,
+ src_len)
- attn_weights = nn.functional.softmax(attn_weights, axis=- 1)
+ attn_weights = nn.functional.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
- if layer_head_mask.shape != [self.num_heads,]:
+ if layer_head_mask.shape != [
+ self.num_heads,
+ ]:
raise ValueError(
f"Head mask for a single layer should be of size {[self.num_heads,]}, but is"
- f" {layer_head_mask.shape}"
- )
- attn_weights = layer_head_mask.reshape((1, -1, 1, 1)) * attn_weights.reshape((bsz, self.num_heads, tgt_len, src_len))
- attn_weights = attn_weights.reshape((bsz * self.num_heads, tgt_len, src_len))
+ f" {layer_head_mask.shape}")
+ attn_weights = layer_head_mask.reshape(
+ (1, -1, 1, 1)) * attn_weights.reshape(
+ (bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights.reshape(
+ (bsz * self.num_heads, tgt_len, src_len))
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
- attn_weights_reshaped = attn_weights.reshape((bsz, self.num_heads, tgt_len, src_len))
- attn_weights = attn_weights_reshaped.reshape((bsz * self.num_heads, tgt_len, src_len))
+ attn_weights_reshaped = attn_weights.reshape(
+ (bsz, self.num_heads, tgt_len, src_len))
+ attn_weights = attn_weights_reshaped.reshape(
+ (bsz * self.num_heads, tgt_len, src_len))
else:
attn_weights_reshaped = None
- attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+ attn_probs = nn.functional.dropout(
+ attn_weights, p=self.dropout, training=self.training)
attn_output = paddle.bmm(attn_probs, value_states)
if attn_output.shape != [bsz * self.num_heads, tgt_len, self.head_dim]:
raise ValueError(
f"`attn_output` should be of size {[bsz, self.num_heads, tgt_len, self.head_dim]}, but is"
- f" {attn_output.shape}"
- )
+ f" {attn_output.shape}")
- attn_output = attn_output.reshape((bsz, self.num_heads, tgt_len, self.head_dim))
+ attn_output = attn_output.reshape(
+ (bsz, self.num_heads, tgt_len, self.head_dim))
attn_output = attn_output.transpose([0, 2, 1, 3])
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
@@ -542,13 +552,15 @@ class Wav2Vec2FeedForward(nn.Layer):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
- self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.intermediate_dense = nn.Linear(config.hidden_size,
+ config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
- self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.output_dense = nn.Linear(config.intermediate_size,
+ config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
@@ -568,18 +580,23 @@ class Wav2Vec2EncoderLayer(nn.Layer):
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
- is_decoder=False,
- )
+ is_decoder=False, )
self.dropout = nn.Dropout(config.hidden_dropout)
- self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
- self.final_layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.final_layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ def forward(self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
@@ -587,10 +604,10 @@ class Wav2Vec2EncoderLayer(nn.Layer):
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
- outputs = (hidden_states,)
+ outputs = (hidden_states, )
if output_attentions:
- outputs += (attn_weights,)
+ outputs += (attn_weights, )
return outputs
@@ -602,27 +619,33 @@ class Wav2Vec2EncoderLayerStableLayerNorm(nn.Layer):
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
- is_decoder=False,
- )
+ is_decoder=False, )
self.dropout = nn.Dropout(config.hidden_dropout)
- self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
- self.final_layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.final_layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
+ def forward(self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
- hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
+ hidden_states = hidden_states + self.feed_forward(
+ self.final_layer_norm(hidden_states))
- outputs = (hidden_states,)
+ outputs = (hidden_states, )
if output_attentions:
- outputs += (attn_weights,)
+ outputs += (attn_weights, )
return outputs
@@ -632,33 +655,38 @@ class Wav2Vec2Encoder(nn.Layer):
super().__init__()
self.config = config
self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
- self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
- self.layers = nn.LayerList([Wav2Vec2EncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.layers = nn.LayerList([
+ Wav2Vec2EncoderLayer(config)
+ for _ in range(config.num_hidden_layers)
+ ])
self.gradient_checkpointing = False
def forward(
- self,
- hidden_states,
- attention_mask=None,
- output_attentions=False,
- output_hidden_states=False,
- return_dict=True,
- ):
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True, ):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
- expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
+ expand_attention_mask = attention_mask.unsqueeze(-1).repeat(
+ 1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
# extend attention_mask
- attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(
+ dtype=hidden_states.dtype)
attention_mask = attention_mask * np.iinfo(np.float32).min
- attention_mask = attention_mask.expand(
- attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
- )
+ attention_mask = attention_mask.expand(attention_mask.shape[0], 1,
+ attention_mask.shape[-1],
+ attention_mask.shape[-1])
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
@@ -669,13 +697,14 @@ class Wav2Vec2Encoder(nn.Layer):
for layer in self.layers:
if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
+ all_hidden_states = all_hidden_states + (hidden_states, )
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
- skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
- if not skip_the_layer:# or deepspeed_zero3_is_enabled:
+ skip_the_layer = True if self.training and (
+ dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer: # or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
@@ -686,26 +715,30 @@ class Wav2Vec2Encoder(nn.Layer):
return custom_forward
else:
layer_outputs = layer(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
+ all_hidden_states = all_hidden_states + (hidden_states, )
if not return_dict:
- return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return tuple(
+ v
+ for v in
+ [hidden_states, all_hidden_states, all_self_attentions]
+ if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
+ attentions=all_self_attentions, )
class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
@@ -713,35 +746,39 @@ class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
super().__init__()
self.config = config
self.pos_conv_embed = Wav2Vec2PositionalConvEmbedding(config)
- self.layer_norm = nn.LayerNorm(config.hidden_size, epsilon=config.layer_norm_eps)
+ self.layer_norm = nn.LayerNorm(
+ config.hidden_size, epsilon=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
- self.layers = nn.LayerList(
- [Wav2Vec2EncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
- )
+ self.layers = nn.LayerList([
+ Wav2Vec2EncoderLayerStableLayerNorm(config)
+ for _ in range(config.num_hidden_layers)
+ ])
self.gradient_checkpointing = False
def forward(
- self,
- hidden_states,
- attention_mask=None,
- output_attentions=False,
- output_hidden_states=False,
- return_dict=True,
- ):
+ self,
+ hidden_states,
+ attention_mask=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True, ):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
- expand_attention_mask = attention_mask.unsqueeze(-1).repeat_interleave(hidden_states.shape[2], axis=2)
+ expand_attention_mask = attention_mask.unsqueeze(
+ -1).repeat_interleave(
+ hidden_states.shape[2], axis=2)
hidden_states[~expand_attention_mask] = 0
# extend attention_mask
- attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
+ attention_mask = 1.0 - attention_mask[:, None, None, :].to(
+ dtype=hidden_states.dtype)
attention_mask = attention_mask * np.iinfo(np.float32).min
- attention_mask = attention_mask.expand(
- attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
- )
+ attention_mask = attention_mask.expand(attention_mask.shape[0], 1,
+ attention_mask.shape[-1],
+ attention_mask.shape[-1])
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
@@ -749,13 +786,14 @@ class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
for layer in self.layers:
if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
+ all_hidden_states = all_hidden_states + (hidden_states, )
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
- skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
- if not skip_the_layer:# or deepspeed_zero3_is_enabled:
+ skip_the_layer = True if self.training and (
+ dropout_probability < self.config.layerdrop) else False
+ if not skip_the_layer: # or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
@@ -767,28 +805,32 @@ class Wav2Vec2EncoderStableLayerNorm(nn.Layer):
return custom_forward
else:
layer_outputs = layer(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
+ hidden_states,
+ attention_mask=attention_mask,
+ output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
+ all_self_attentions = all_self_attentions + (layer_outputs[1], )
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
+ all_hidden_states = all_hidden_states + (hidden_states, )
if not return_dict:
- return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
+ return tuple(
+ v
+ for v in
+ [hidden_states, all_hidden_states, all_self_attentions]
+ if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
+ attentions=all_self_attentions, )
class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
@@ -810,9 +852,13 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
# storage for codebook variables (codewords)
self.codevectors = paddle.static.create_parameter(
- shape=[1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups], dtype='float32'
- )
- self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
+ shape=[
+ 1, self.num_groups * self.num_vars,
+ config.codevector_dim // self.num_groups
+ ],
+ dtype='float32')
+ self.weight_proj = nn.Linear(config.conv_dim[-1],
+ self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@@ -826,7 +872,8 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
else:
marginal_probs = probs.mean(dim=0)
- perplexity = paddle.exp(-paddle.sum(marginal_probs * paddle.log(marginal_probs + 1e-7), dim=-1)).sum()
+ perplexity = paddle.exp(-paddle.sum(
+ marginal_probs * paddle.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states, mask_time_indices=None):
@@ -834,35 +881,45 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
- hidden_states = hidden_states.reshape((batch_size * sequence_length * self.num_groups, -1))
+ hidden_states = hidden_states.reshape(
+ (batch_size * sequence_length * self.num_groups, -1))
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
- hidden_states.float(), tau=self.temperature, hard=True
- ).type_as(hidden_states)
+ hidden_states.float(), tau=self.temperature,
+ hard=True).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = paddle.softmax(
- hidden_states.reshape((batch_size * sequence_length, self.num_groups, -1)).float(), axis=-1
- )
- perplexity = self._compute_perplexity(codevector_soft_dist, mask_time_indices)
+ hidden_states.reshape((batch_size * sequence_length,
+ self.num_groups, -1)).float(),
+ axis=-1)
+ perplexity = self._compute_perplexity(codevector_soft_dist,
+ mask_time_indices)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
- codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
- -1, codevector_idx.reshape((-1, 1)), 1.0
- )
- codevector_probs = codevector_probs.reshape((batch_size * sequence_length, self.num_groups, -1))
-
- perplexity = self._compute_perplexity(codevector_probs, mask_time_indices)
-
- codevector_probs = codevector_probs.reshape((batch_size * sequence_length, -1))
+ codevector_probs = hidden_states.new_zeros(
+ *hidden_states.shape).scatter_(-1,
+ codevector_idx.reshape((-1, 1)),
+ 1.0)
+ codevector_probs = codevector_probs.reshape(
+ (batch_size * sequence_length, self.num_groups, -1))
+
+ perplexity = self._compute_perplexity(codevector_probs,
+ mask_time_indices)
+
+ codevector_probs = codevector_probs.reshape(
+ (batch_size * sequence_length, -1))
# use probs to retrieve codevectors
- codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
- codevectors = codevectors_per_group.reshape((batch_size * sequence_length, self.num_groups, self.num_vars, -1))
- codevectors = codevectors.sum(-2).reshape((batch_size, sequence_length, -1))
+ codevectors_per_group = codevector_probs.unsqueeze(
+ -1) * self.codevectors
+ codevectors = codevectors_per_group.reshape(
+ (batch_size * sequence_length, self.num_groups, self.num_vars, -1))
+ codevectors = codevectors.sum(-2).reshape(
+ (batch_size, sequence_length, -1))
return codevectors, perplexity
@@ -878,7 +935,9 @@ class Wav2Vec2Adapter(nn.Layer):
else:
self.proj = self.proj_layer_norm = None
- self.layers = nn.LayerList(Wav2Vec2AdapterLayer(config) for _ in range(config.num_adapter_layers))
+ self.layers = nn.LayerList(
+ Wav2Vec2AdapterLayer(config)
+ for _ in range(config.num_adapter_layers))
self.layerdrop = config.layerdrop
def forward(self, hidden_states):
@@ -906,8 +965,7 @@ class Wav2Vec2AdapterLayer(nn.Layer):
2 * config.output_hidden_size,
config.adapter_kernel_size,
stride=config.adapter_stride,
- padding=1,
- )
+ padding=1, )
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
@@ -916,7 +974,7 @@ class Wav2Vec2AdapterLayer(nn.Layer):
return hidden_states
-class Wav2Vec2Model(nn.Layer):
+class Wav2Vec2Model(nn.Layer):
def __init__(self, config):
super().__init__()
self.config = config
@@ -925,9 +983,13 @@ class Wav2Vec2Model(nn.Layer):
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
- # self.masked_spec_embed = nn.Parameter(paddle.Tensor(config.hidden_size).uniform_())
+ # self.masked_spec_embed = nn.Parameter(paddle.Tensor(config.hidden_size).uniform_())
#self.masked_spec_embed = paddle.uniform([config.hidden_size])
- self.masked_spec_embed = paddle.static.create_parameter(shape=[config.hidden_size], dtype='float32', default_initializer=paddle.nn.initializer.Uniform(low=0, high=1.0))
+ self.masked_spec_embed = paddle.static.create_parameter(
+ shape=[config.hidden_size],
+ dtype='float32',
+ default_initializer=paddle.nn.initializer.Uniform(
+ low=0, high=1.0))
if config.do_stable_layer_norm:
self.encoder = Wav2Vec2EncoderStableLayerNorm(config)
else:
@@ -946,11 +1008,10 @@ class Wav2Vec2Model(nn.Layer):
self.feature_extractor._freeze_parameters()
def _mask_hidden_states(
- self,
- hidden_states: paddle.Tensor,
- mask_time_indices: Optional[paddle.Tensor] = None,
- attention_mask: Optional[paddle.Tensor] = None,
- ):
+ self,
+ hidden_states: paddle.Tensor,
+ mask_time_indices: Optional[paddle.Tensor]=None,
+ attention_mask: Optional[paddle.Tensor]=None, ):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
@@ -963,17 +1024,19 @@ class Wav2Vec2Model(nn.Layer):
batch_size, sequence_length, hidden_size = hidden_states.shape
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
- hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(
+ hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
- min_masks=self.config.mask_time_min_masks,
- )
- mask_time_indices = paddle.to_tensor(mask_time_indices, dtype=paddle.bool)
- hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
+ min_masks=self.config.mask_time_min_masks, )
+ mask_time_indices = paddle.to_tensor(
+ mask_time_indices, dtype=paddle.bool)
+ hidden_states[mask_time_indices] = self.masked_spec_embed.to(
+ hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
@@ -981,27 +1044,28 @@ class Wav2Vec2Model(nn.Layer):
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
- min_masks=self.config.mask_feature_min_masks,
- )
- mask_feature_indices = paddle.to_tensor(mask_feature_indices, dtype=paddle.bool)
- mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
+ min_masks=self.config.mask_feature_min_masks, )
+ mask_feature_indices = paddle.to_tensor(
+ mask_feature_indices, dtype=paddle.bool)
+ mask_feature_indices = mask_feature_indices[:, None].expand(
+ -1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
def forward(
- self,
- input_values: Optional[paddle.Tensor],
- attention_mask: Optional[paddle.Tensor] = None,
- mask_time_indices: Optional[paddle.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
+ self,
+ input_values: Optional[paddle.Tensor],
+ attention_mask: Optional[paddle.Tensor]=None,
+ mask_time_indices: Optional[paddle.Tensor]=None,
+ output_attentions: Optional[bool]=None,
+ output_hidden_states: Optional[bool]=None,
+ return_dict: Optional[bool]=None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
+ output_hidden_states = (output_hidden_states
+ if output_hidden_states is not None else
+ self.config.output_hidden_states)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose([0, 2, 1])
@@ -1009,20 +1073,20 @@ class Wav2Vec2Model(nn.Layer):
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
- extract_features.shape[1], attention_mask, add_adapter=False
- )
- hidden_states, extract_features = self.feature_projection(extract_features)
+ extract_features.shape[1], attention_mask, add_adapter=False)
+ hidden_states, extract_features = self.feature_projection(
+ extract_features)
hidden_states = self._mask_hidden_states(
- hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
- )
+ hidden_states,
+ mask_time_indices=mask_time_indices,
+ attention_mask=attention_mask)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
+ return_dict=return_dict, )
hidden_states = encoder_outputs[0]
@@ -1036,20 +1100,21 @@ class Wav2Vec2Model(nn.Layer):
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- )
+ attentions=encoder_outputs.attentions, )
def post_init(self):
"""
A method executed at the end of each Transformer model initialization, to execute code that needs the model's
modules properly initialized (such as weight initialization).
"""
- # self.init_weights()
- # self._backward_compatibility_gradient_checkpointing()
+ # self.init_weights()
+ # self._backward_compatibility_gradient_checkpointing()
pass
+
class Wav2Vec2ConfigPure():
model_type = "wav2vec2"
+
def __init__(self, config):
self.output_attentions = False
self.output_hidden_states = False
@@ -1084,17 +1149,14 @@ class Wav2Vec2ConfigPure():
self.do_stable_layer_norm = config.do_stable_layer_norm
self.use_weighted_layer_sum = config.use_weighted_layer_sum
- if (
- (len(self.conv_stride) != self.num_feat_extract_layers)
- or (len(self.conv_kernel) != self.num_feat_extract_layers)
- or (len(self.conv_dim) != self.num_feat_extract_layers)
- ):
+ if ((len(self.conv_stride) != self.num_feat_extract_layers) or
+ (len(self.conv_kernel) != self.num_feat_extract_layers) or
+ (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
- f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
- )
+ f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
self.apply_spec_augment = config.apply_spec_augment
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
index 8eb9b4adf..9998a8e5e 100644
--- a/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
+++ b/paddlespeech/s2t/models/wav2vec2/processing/signal_processing.py
@@ -7,10 +7,8 @@ Authors
* Samuele Cornell 2020
* Sarthak Yadav 2022
"""
-import paddle
-import math
-from packaging import version
import numpy as np
+import paddle
def blackman_window(window_length, periodic=True):
@@ -90,15 +88,14 @@ def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
def convolve1d(
- waveform,
- kernel,
- padding=0,
- pad_type="constant",
- stride=1,
- groups=1,
- use_fft=False,
- rotation_index=0,
-):
+ waveform,
+ kernel,
+ padding=0,
+ pad_type="constant",
+ stride=1,
+ groups=1,
+ use_fft=False,
+ rotation_index=0, ):
"""Use paddle.nn.functional to perform 1d padding and conv.
Arguments
---------
@@ -150,8 +147,7 @@ def convolve1d(
# Padding can be a tuple (left_pad, right_pad) or an int
if isinstance(padding, tuple):
waveform = paddle.nn.functional.pad(
- x=waveform, pad=padding, mode=pad_type, data_format='NCL'
- )
+ x=waveform, pad=padding, mode=pad_type, data_format='NCL')
# This approach uses FFT, which is more efficient if the kernel is large
if use_fft:
@@ -165,9 +161,7 @@ def convolve1d(
# Perform rotation to ensure alignment
zeros = paddle.zeros(
- [kernel.shape[0], kernel.shape[1], zero_length],
- dtype=kernel.dtype
- )
+ [kernel.shape[0], kernel.shape[1], zero_length], dtype=kernel.dtype)
after_index = kernel[..., rotation_index:]
before_index = kernel[..., :rotation_index]
kernel = paddle.concat((after_index, zeros, before_index), axis=-1)
@@ -185,12 +179,12 @@ def convolve1d(
weight=kernel,
stride=stride,
groups=groups,
- padding=padding if not isinstance(padding, tuple) else 0,
- )
+ padding=padding if not isinstance(padding, tuple) else 0, )
# Return time dimension to the second dimension.
return convolved.transpose([0, 2, 1])
+
def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
"""Returns a notch filter constructed from a high-pass and low-pass filter.
(from https://tomroelandts.com/articles/
@@ -224,7 +218,8 @@ def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
return paddle.sin(x) / x
# The zero is at the middle index
- return paddle.concat([_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1 :])])
+ return paddle.concat(
+ [_sinc(x[:pad]), paddle.ones([1]), _sinc(x[pad + 1:])])
# Compute a low-pass filter with cutoff frequency notch_freq.
hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
@@ -239,4 +234,3 @@ def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
# Adding filters creates notch filter
return (hlpf + hhpf).view(1, -1, 1)
-
diff --git a/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
index f67121ede..471ab7657 100644
--- a/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
+++ b/paddlespeech/s2t/models/wav2vec2/processing/speech_augmentation.py
@@ -1,11 +1,12 @@
import math
+
import paddle
import paddle.nn as nn
-import paddle.nn.functional as F
-from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import (
- compute_amplitude,
- convolve1d,
- notch_filter)
+
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import compute_amplitude
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import convolve1d
+from paddlespeech.s2t.models.wav2vec2.processing.signal_processing import notch_filter
+
class SpeedPerturb(nn.Layer):
"""Slightly speed up or slow down an audio signal.
@@ -36,8 +37,10 @@ class SpeedPerturb(nn.Layer):
"""
def __init__(
- self, orig_freq, speeds=[90, 100, 110], perturb_prob=1.0,
- ):
+ self,
+ orig_freq,
+ speeds=[90, 100, 110],
+ perturb_prob=1.0, ):
super().__init__()
self.orig_freq = orig_freq
self.speeds = speeds
@@ -70,14 +73,15 @@ class SpeedPerturb(nn.Layer):
# Don't perturb (return early) 1-`perturb_prob` portion of the batches
if paddle.rand([1]) > self.perturb_prob:
-
+
return waveform.clone()
# Perform a random perturbation
- self.samp_index = paddle.randint(len(self.speeds), shape=(1,))[0]
+ self.samp_index = paddle.randint(len(self.speeds), shape=(1, ))[0]
perturbed_waveform = self.resamplers[self.samp_index](waveform)
return perturbed_waveform
+
class Resample(nn.Layer):
"""This class resamples an audio signal using sinc-based interpolation.
@@ -94,9 +98,12 @@ class Resample(nn.Layer):
Controls the sharpness of the filter, larger numbers result in a
sharper filter, but they are less efficient. Values from 4 to 10 are allowed.
"""
+
def __init__(
- self, orig_freq=16000, new_freq=16000, lowpass_filter_width=6,
- ):
+ self,
+ orig_freq=16000,
+ new_freq=16000,
+ lowpass_filter_width=6, ):
super().__init__()
self.orig_freq = orig_freq
self.new_freq = new_freq
@@ -193,8 +200,7 @@ class Resample(nn.Layer):
window_size = self.weights.shape[1]
tot_output_samp = self._output_samples(wave_len)
resampled_waveform = paddle.zeros(
- (batch_size, num_channels, tot_output_samp)
- )
+ (batch_size, num_channels, tot_output_samp))
# self.weights = self.weights.to(waveforms.device)
# Check weights are on correct device
@@ -222,28 +228,25 @@ class Resample(nn.Layer):
right_padding = max(0, end_index + 1 - current_wave_len)
left_padding = max(0, -first_index)
wave_to_conv = paddle.nn.functional.pad(
- wave_to_conv, (left_padding, right_padding), data_format='NCL'
- )
+ wave_to_conv, (left_padding, right_padding), data_format='NCL')
conv_wave = paddle.nn.functional.conv1d(
x=wave_to_conv,
weight=self.weights[i].repeat(num_channels, 1, 1),
stride=self.conv_stride,
- groups=num_channels,
- )
+ groups=num_channels, )
# we want conv_wave[:, i] to be at
# output[:, i + n*conv_transpose_stride]
dilated_conv_wave = paddle.nn.functional.conv1d_transpose(
- conv_wave, eye, stride=self.conv_transpose_stride
- )
+ conv_wave, eye, stride=self.conv_transpose_stride)
# pad dilated_conv_wave so it reaches the output length if needed.
left_padding = i
previous_padding = left_padding + dilated_conv_wave.shape[-1]
right_padding = max(0, tot_output_samp - previous_padding)
dilated_conv_wave = paddle.nn.functional.pad(
- dilated_conv_wave, (left_padding, right_padding), data_format='NCL'
- )
+ dilated_conv_wave, (left_padding, right_padding),
+ data_format='NCL')
dilated_conv_wave = dilated_conv_wave[..., :tot_output_samp]
resampled_waveform += dilated_conv_wave
@@ -326,9 +329,7 @@ class Resample(nn.Layer):
window_width = self.lowpass_filter_width / (2.0 * lowpass_cutoff)
assert lowpass_cutoff < min(self.orig_freq, self.new_freq) / 2
- output_t = paddle.arange(
- start=0.0, end=self.output_samples
- )
+ output_t = paddle.arange(start=0.0, end=self.output_samples)
output_t /= self.new_freq
min_t = output_t - window_width
max_t = output_t + window_width
@@ -346,23 +347,16 @@ class Resample(nn.Layer):
inside_window_indices = delta_t.abs() < (window_width)
# raised-cosine (Hanning) window with width `window_width`
- weights[inside_window_indices] = 0.5 * (
- 1
- + paddle.cos(
- 2
- * math.pi
- * lowpass_cutoff
- / self.lowpass_filter_width
- * delta_t[inside_window_indices]
- )
- )
+ weights[inside_window_indices] = 0.5 * (1 + paddle.cos(
+ 2 * math.pi * lowpass_cutoff / self.lowpass_filter_width *
+ delta_t[inside_window_indices]))
t_eq_zero_indices = delta_t == 0.0
t_not_eq_zero_indices = ~t_eq_zero_indices
# sinc filter function
weights[t_not_eq_zero_indices] *= paddle.sin(
- 2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]
- ) / (math.pi * delta_t[t_not_eq_zero_indices])
+ 2 * math.pi * lowpass_cutoff * delta_t[t_not_eq_zero_indices]) / (
+ math.pi * delta_t[t_not_eq_zero_indices])
# limit of the function at t = 0
weights[t_eq_zero_indices] *= 2 * lowpass_cutoff
@@ -405,14 +399,13 @@ class DropFreq(nn.Layer):
"""
def __init__(
- self,
- drop_freq_low=1e-14,
- drop_freq_high=1,
- drop_count_low=1,
- drop_count_high=2,
- drop_width=0.05,
- drop_prob=1,
- ):
+ self,
+ drop_freq_low=1e-14,
+ drop_freq_high=1,
+ drop_count_low=1,
+ drop_count_high=2,
+ drop_width=0.05,
+ drop_prob=1, ):
super().__init__()
self.drop_freq_low = drop_freq_low
self.drop_freq_high = drop_freq_high
@@ -443,14 +436,14 @@ class DropFreq(nn.Layer):
# Pick number of frequencies to drop
drop_count = paddle.randint(
- low=self.drop_count_low, high=self.drop_count_high + 1, shape=(1,),
- )
+ low=self.drop_count_low,
+ high=self.drop_count_high + 1,
+ shape=(1, ), )
# Pick a frequency to drop
drop_range = self.drop_freq_high - self.drop_freq_low
drop_frequency = (
- paddle.rand(drop_count) * drop_range + self.drop_freq_low
- )
+ paddle.rand(drop_count) * drop_range + self.drop_freq_low)
# Filter parameters
filter_length = 101
pad = filter_length // 2
@@ -461,8 +454,9 @@ class DropFreq(nn.Layer):
# Subtract each frequency
for frequency in drop_frequency:
notch_kernel = notch_filter(
- frequency, filter_length, self.drop_width,
- )
+ frequency,
+ filter_length,
+ self.drop_width, )
drop_filter = convolve1d(drop_filter, notch_kernel, pad)
# Apply filter
@@ -471,6 +465,7 @@ class DropFreq(nn.Layer):
# Remove channels dimension if added
return dropped_waveform.squeeze(-1)
+
class DropChunk(nn.Layer):
"""This class drops portions of the input signal.
Using `DropChunk` as an augmentation strategy helps a models learn to rely
@@ -515,16 +510,15 @@ class DropChunk(nn.Layer):
"""
def __init__(
- self,
- drop_length_low=100,
- drop_length_high=1000,
- drop_count_low=1,
- drop_count_high=10,
- drop_start=0,
- drop_end=None,
- drop_prob=1,
- noise_factor=0.0,
- ):
+ self,
+ drop_length_low=100,
+ drop_length_high=1000,
+ drop_count_low=1,
+ drop_count_high=10,
+ drop_start=0,
+ drop_end=None,
+ drop_prob=1,
+ noise_factor=0.0, ):
super().__init__()
self.drop_length_low = drop_length_low
self.drop_length_high = drop_length_high
@@ -580,8 +574,7 @@ class DropChunk(nn.Layer):
drop_times = paddle.randint(
low=self.drop_count_low,
high=self.drop_count_high + 1,
- shape=(batch_size,),
- )
+ shape=(batch_size, ), )
# Iterate batch to set mask
for i in range(batch_size):
@@ -592,8 +585,7 @@ class DropChunk(nn.Layer):
length = paddle.randint(
low=self.drop_length_low,
high=self.drop_length_high + 1,
- shape=(drop_times[i],),
- )
+ shape=(drop_times[i], ), )
# Compute range of starting locations
start_min = self.drop_start
@@ -608,15 +600,16 @@ class DropChunk(nn.Layer):
# Pick starting locations
start = paddle.randint(
- low=start_min, high=start_max + 1, shape=(drop_times[i],),
- )
+ low=start_min,
+ high=start_max + 1,
+ shape=(drop_times[i], ), )
end = start + length
# Update waveform
if not self.noise_factor:
for j in range(drop_times[i]):
- dropped_waveform[i, start[j] : end[j]] = 0.0
+ dropped_waveform[i, start[j]:end[j]] = 0.0
else:
# Uniform distribution of -2 to +2 * avg amplitude should
# preserve the average for normalization
@@ -625,7 +618,7 @@ class DropChunk(nn.Layer):
# zero-center the noise distribution
noise_vec = paddle.rand([length[j]])
noise_vec = 2 * noise_max * noise_vec - noise_max
- dropped_waveform[i, start[j] : end[j]] = noise_vec
+ dropped_waveform[i, start[j]:end[j]] = noise_vec
return dropped_waveform
@@ -679,37 +672,33 @@ class TimeDomainSpecAugment(nn.Layer):
"""
def __init__(
- self,
- perturb_prob=1.0,
- drop_freq_prob=1.0,
- drop_chunk_prob=1.0,
- speeds=[95, 100, 105],
- sample_rate=16000,
- drop_freq_count_low=0,
- drop_freq_count_high=3,
- drop_chunk_count_low=0,
- drop_chunk_count_high=5,
- drop_chunk_length_low=1000,
- drop_chunk_length_high=2000,
- drop_chunk_noise_factor=0,
- ):
+ self,
+ perturb_prob=1.0,
+ drop_freq_prob=1.0,
+ drop_chunk_prob=1.0,
+ speeds=[95, 100, 105],
+ sample_rate=16000,
+ drop_freq_count_low=0,
+ drop_freq_count_high=3,
+ drop_chunk_count_low=0,
+ drop_chunk_count_high=5,
+ drop_chunk_length_low=1000,
+ drop_chunk_length_high=2000,
+ drop_chunk_noise_factor=0, ):
super().__init__()
self.speed_perturb = SpeedPerturb(
- perturb_prob=perturb_prob, orig_freq=sample_rate, speeds=speeds
- )
+ perturb_prob=perturb_prob, orig_freq=sample_rate, speeds=speeds)
self.drop_freq = DropFreq(
drop_prob=drop_freq_prob,
drop_count_low=drop_freq_count_low,
- drop_count_high=drop_freq_count_high,
- )
+ drop_count_high=drop_freq_count_high, )
self.drop_chunk = DropChunk(
drop_prob=drop_chunk_prob,
drop_count_low=drop_chunk_count_low,
drop_count_high=drop_chunk_count_high,
drop_length_low=drop_chunk_length_low,
drop_length_high=drop_chunk_length_high,
- noise_factor=drop_chunk_noise_factor,
- )
+ noise_factor=drop_chunk_noise_factor, )
def forward(self, waveforms, lengths):
"""Returns the distorted waveforms.
@@ -724,4 +713,4 @@ class TimeDomainSpecAugment(nn.Layer):
waveforms = self.speed_perturb(waveforms)
waveforms = self.drop_freq(waveforms)
waveforms = self.drop_chunk(waveforms, lengths)
- return waveforms
\ No newline at end of file
+ return waveforms
diff --git a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
index 6c8b0ee4c..f54748f8b 100644
--- a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
+++ b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
@@ -1,30 +1,24 @@
-import numpy as np
-import os
-
+from collections import defaultdict
from typing import Dict
from typing import List
-from typing import Optional
from typing import Tuple
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
+
from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2ConfigPure
from paddlespeech.s2t.models.wav2vec2.modules.modeling_wav2vec2 import Wav2Vec2Model
-from paddlespeech.s2t.modules.mask import make_pad_mask
-from paddlespeech.s2t.utils.utility import log_add
-
-from collections import defaultdict
-
from paddlespeech.s2t.models.wav2vec2.modules.VanillaNN import VanillaNN
from paddlespeech.s2t.modules.ctc import CTCDecoderBase as CTC
from paddlespeech.s2t.utils.ctc_utils import remove_duplicates_and_blank
-from yacs.config import CfgNode
+from paddlespeech.s2t.utils.utility import log_add
+
class Wav2vec2ASR(nn.Layer):
def __init__(self, config: dict):
super().__init__()
-
+
wav2vec2_config = Wav2Vec2ConfigPure(config)
wav2vec2 = Wav2Vec2Model(wav2vec2_config)
model_dict = paddle.load(config.wav2vec2_params_path)
@@ -36,8 +30,16 @@ class Wav2vec2ASR(nn.Layer):
for parm in wav2vec2.parameters():
parm.trainable = False
self.wav2vec2 = wav2vec2
- self.enc = VanillaNN(input_shape=[None,None,wav2vec2_config.hidden_size], activation=nn.LeakyReLU, dnn_blocks=config.dnn_blocks, dnn_neurons=config.dnn_neurons)
- self.ctc = CTC(odim=config.output_dim, enc_n_units=config.dnn_neurons, blank_id=config.blank_id, dropout_rate=config.ctc_dropout_rate, reduction=True)
+ self.enc = VanillaNN(
+ input_shape=[None, None, wav2vec2_config.hidden_size],
+ activation=nn.LeakyReLU,
+ dnn_blocks=config.dnn_blocks,
+ dnn_neurons=config.dnn_neurons)
+ self.ctc = CTC(odim=config.output_dim,
+ enc_n_units=config.dnn_neurons,
+ blank_id=config.blank_id,
+ dropout_rate=config.ctc_dropout_rate,
+ reduction=True)
def forward(self, wav, wavs_lens_rate, target, target_lens_rate):
if self.normalize_wav:
@@ -51,25 +53,27 @@ class Wav2vec2ASR(nn.Layer):
x = self.enc(feats)
x_lens = (wavs_lens_rate * x.shape[1]).round().astype(paddle.int64)
- target_lens = (target_lens_rate * target.shape[1]).round().astype(paddle.int64)
-
+ target_lens = (target_lens_rate *
+ target.shape[1]).round().astype(paddle.int64)
+
ctc_loss = self.ctc(x, x_lens, target, target_lens)
return ctc_loss
@paddle.no_grad()
- def decode(self,
+ def decode(self,
feats: paddle.Tensor,
text_feature: Dict[str, int],
decoding_method: str,
beam_size: int):
batch_size = feats.shape[0]
- if decoding_method is 'ctc_prefix_beam_search' and batch_size > 1:
+
+ if decoding_method == 'ctc_prefix_beam_search' and batch_size > 1:
logger.error(
f'decoding mode {decoding_method} must be running with batch_size == 1'
)
logger.error(f"current batch_size is {batch_size}")
sys.exit(1)
-
+
if decoding_method == 'ctc_greedy_search':
hyps = self.ctc_greedy_search(feats)
res = [text_feature.defeaturize(hyp) for hyp in hyps]
@@ -79,13 +83,12 @@ class Wav2vec2ASR(nn.Layer):
# with other batch decoding mode
elif decoding_method == 'ctc_prefix_beam_search':
assert feats.shape[0] == 1
- hyp = self.ctc_prefix_beam_search(
- feats,
- beam_size)
+ hyp = self.ctc_prefix_beam_search(feats, beam_size)
res = [text_feature.defeaturize(hyp)]
res_tokenids = [hyp]
else:
- raise ValueError(f"wav2vec2 not support decoding method: {decoding_method}")
+ raise ValueError(
+ f"wav2vec2 not support decoding method: {decoding_method}")
return res, res_tokenids
@@ -94,8 +97,7 @@ class Wav2vec2ASR(nn.Layer):
model = cls(config)
return model
- def ctc_greedy_search(
- self, wav) -> List[List[int]]:
+ def ctc_greedy_search(self, wav) -> List[List[int]]:
""" Apply CTC greedy search
Args:
speech (paddle.Tensor): (batch, max_len)
@@ -104,7 +106,7 @@ class Wav2vec2ASR(nn.Layer):
List[List[int]]: best path result
"""
batch_size = wav.shape[0]
- wav = wav[:,:,0]
+ wav = wav[:, :, 0]
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape[1:])
# Extract wav2vec output
@@ -124,7 +126,10 @@ class Wav2vec2ASR(nn.Layer):
return hyps
def _ctc_prefix_beam_search(
- self, wav, beam_size, blank_id: int=0, ) -> Tuple[List[Tuple[int, float]], paddle.Tensor]:
+ self,
+ wav,
+ beam_size,
+ blank_id: int=0, ) -> Tuple[List[Tuple[int, float]], paddle.Tensor]:
""" CTC prefix beam search inner implementation
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
@@ -142,7 +147,7 @@ class Wav2vec2ASR(nn.Layer):
paddle.Tensor: encoder output, (1, max_len, encoder_dim),
it will be used for rescoring in attention rescoring mode
"""
- wav = wav[:,:,0]
+ wav = wav[:, :, 0]
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape[1:])
@@ -219,29 +224,5 @@ class Wav2vec2ASR(nn.Layer):
Returns:
List[int]: CTC prefix beam search nbest results
"""
- hyps = self._ctc_prefix_beam_search(
- wav, beam_size)
+ hyps = self._ctc_prefix_beam_search(wav, beam_size)
return hyps[0][0]
-
- # @jit.to_static
- # def ctc_activation(self, xs: paddle.Tensor) -> paddle.Tensor:
- # """ Export interface for c++ call, apply linear transform and log
- # softmax before ctc
- # Args:
- # xs (paddle.Tensor): encoder output, (B, T, D)
- # Returns:
- # paddle.Tensor: activation before ctc
- # """
- # return self.ctc.log_softmax(xs)
-
-
- # def _get_data(self):
- # data_dir = "data"
- # wavs = np.load(os.path.join(data_dir, "wavs.npy"))
- # wavs_lens = np.load(os.path.join(data_dir, "wavs_lens.npy"))
- # tokens = np.load(os.path.join(data_dir, "tokens.npy"))
- # tokens_lens = np.load(os.path.join(data_dir, "tokens_lens.npy"))
-
- # batch = (paddle.to_tensor(wavs), paddle.to_tensor(wavs_lens, dtype='float32'),
- # paddle.to_tensor(tokens, dtype='int32'), paddle.to_tensor(tokens_lens, dtype='float32'))
- # return batch
From 7bee9d807f384101ee84f1153e2306b108419339 Mon Sep 17 00:00:00 2001
From: tianhao zhang <15600919271@163.com>
Date: Mon, 10 Oct 2022 12:21:53 +0000
Subject: [PATCH 3/5] format wav2vec2 demo
---
examples/librispeech/asr3/local/data.sh | 110 ++++++++++++++++++++++++
1 file changed, 110 insertions(+)
create mode 100644 examples/librispeech/asr3/local/data.sh
diff --git a/examples/librispeech/asr3/local/data.sh b/examples/librispeech/asr3/local/data.sh
new file mode 100644
index 000000000..8495a4ab6
--- /dev/null
+++ b/examples/librispeech/asr3/local/data.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+
+stage=-1
+stop_stage=100
+
+unit_type=char
+dict_dir=data/lang_char
+
+source ${MAIN_ROOT}/utils/parse_options.sh
+
+mkdir -p data
+mkdir -p ${dict_dir}
+TARGET_DIR=${MAIN_ROOT}/dataset
+mkdir -p ${TARGET_DIR}
+
+if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
+ # download data, generate manifests
+ python3 ${TARGET_DIR}/librispeech/librispeech.py \
+ --manifest_prefix="data/manifest" \
+ --target_dir="${TARGET_DIR}/librispeech" \
+ --full_download="True"
+
+ if [ $? -ne 0 ]; then
+ echo "Prepare LibriSpeech failed. Terminated."
+ exit 1
+ fi
+
+ for set in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
+ mv data/manifest.${set} data/manifest.${set}.raw
+ done
+
+ rm -rf data/manifest.train.raw data/manifest.dev.raw data/manifest.test.raw
+ for set in train-clean-100 train-clean-360 train-other-500; do
+ cat data/manifest.${set}.raw >> data/manifest.train.raw
+ done
+
+ for set in dev-clean dev-other; do
+ cat data/manifest.${set}.raw >> data/manifest.dev.raw
+ done
+
+ for set in test-clean test-other; do
+ cat data/manifest.${set}.raw >> data/manifest.test.raw
+ done
+fi
+
+if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
+ # compute mean and stddev for normalizer
+ num_workers=$(nproc)
+ python3 ${MAIN_ROOT}/utils/compute_mean_std.py \
+ --manifest_path="data/manifest.train.raw" \
+ --num_samples=2000 \
+ --spectrum_type="fbank" \
+ --feat_dim=161 \
+ --delta_delta=false \
+ --sample_rate=16000 \
+ --stride_ms=10 \
+ --window_ms=25 \
+ --use_dB_normalization=False \
+ --num_workers=${num_workers} \
+ --output_path="data/mean_std.json"
+
+ if [ $? -ne 0 ]; then
+ echo "Compute mean and stddev failed. Terminated."
+ exit 1
+ fi
+fi
+
+if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
+ # build vocabulary
+ python3 ${MAIN_ROOT}/utils/build_vocab.py \
+ --unit_type ${unit_type} \
+ --count_threshold=0 \
+ --vocab_path="${dict_dir}/vocab.txt" \
+ --manifest_paths="data/manifest.train.raw"
+
+ if [ $? -ne 0 ]; then
+ echo "Build vocabulary failed. Terminated."
+ exit 1
+ fi
+fi
+
+if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
+ # format manifest with tokenids, vocab size
+ for set in train dev test dev-clean dev-other test-clean test-other; do
+ {
+ python3 ${MAIN_ROOT}/utils/format_data.py \
+ --cmvn_path "data/mean_std.json" \
+ --unit_type ${unit_type} \
+ --vocab_path="${dict_dir}/vocab.txt" \
+ --manifest_path="data/manifest.${set}.raw" \
+ --output_path="data/manifest.${set}"
+
+ if [ $? -ne 0 ]; then
+ echo "Formt mnaifest.${set} failed. Terminated."
+ exit 1
+ fi
+ }&
+ done
+ wait
+fi
+
+echo "LibriSpeech Data preparation done."
+
+if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
+ mkdir -p exp/wav2vec2
+ echo "Pretrained wav2vec2 model download"
+ wget -P exp/wav2vec2 https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams
+fi
+
+exit 0
\ No newline at end of file
From 3d994f5c23a86b97d058400b6f6b06dafad064ce Mon Sep 17 00:00:00 2001
From: tianhao zhang <15600919271@163.com>
Date: Tue, 11 Oct 2022 16:53:10 +0000
Subject: [PATCH 4/5] format wav2vec2 demo
---
examples/librispeech/asr3/conf/preprocess.yaml | 4 ++--
examples/librispeech/asr3/conf/tuning/decode.yaml | 9 +--------
examples/librispeech/asr3/run.sh | 3 +--
paddlespeech/audio/transform/spectrogram.py | 2 +-
paddlespeech/s2t/exps/wav2vec2/bin/test.py | 2 --
paddlespeech/s2t/exps/wav2vec2/model.py | 3 ---
paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py | 2 +-
paddlespeech/s2t/modules/ctc.py | 7 +++++--
8 files changed, 11 insertions(+), 21 deletions(-)
diff --git a/examples/librispeech/asr3/conf/preprocess.yaml b/examples/librispeech/asr3/conf/preprocess.yaml
index 3979d256b..4a908a83b 100644
--- a/examples/librispeech/asr3/conf/preprocess.yaml
+++ b/examples/librispeech/asr3/conf/preprocess.yaml
@@ -1,4 +1,4 @@
process:
- # extract kaldi fbank from PCM
+ # use raw audio
- type: wav_process
- dither: 0.1
+ dither: 0.0
diff --git a/examples/librispeech/asr3/conf/tuning/decode.yaml b/examples/librispeech/asr3/conf/tuning/decode.yaml
index c2261fb28..2ba393264 100644
--- a/examples/librispeech/asr3/conf/tuning/decode.yaml
+++ b/examples/librispeech/asr3/conf/tuning/decode.yaml
@@ -1,11 +1,4 @@
decode_batch_size: 1
error_rate_type: wer
-decoding_method: ctc_greedy_search # 'attention', 'ctc_greedy_search', 'ctc_prefix_beam_search', 'attention_rescoring'
+decoding_method: ctc_greedy_search # 'ctc_greedy_search', 'ctc_prefix_beam_search'
beam_size: 10
-ctc_weight: 0.5 # ctc weight for attention rescoring decode mode.
-decoding_chunk_size: -1 # decoding chunk size. Defaults to -1.
- # <0: for decoding, use full chunk.
- # >0: for decoding, use fixed chunk size as set.
- # 0: used for training, it's prohibited here.
-num_decoding_left_chunks: -1 # number of left chunks for decoding. Defaults to -1.
-simulate_streaming: False # simulate streaming inference. Defaults to False.
diff --git a/examples/librispeech/asr3/run.sh b/examples/librispeech/asr3/run.sh
index 55b2ca86d..3b1abb11b 100644
--- a/examples/librispeech/asr3/run.sh
+++ b/examples/librispeech/asr3/run.sh
@@ -36,9 +36,8 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
avg.sh best exp/${ckpt}/checkpoints ${avg_num}
fi
-
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
- # attetion resocre decoder
+ # greedy search decoder
CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} ${decode_conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
fi
diff --git a/paddlespeech/audio/transform/spectrogram.py b/paddlespeech/audio/transform/spectrogram.py
index 2e5199394..cba60cfdb 100644
--- a/paddlespeech/audio/transform/spectrogram.py
+++ b/paddlespeech/audio/transform/spectrogram.py
@@ -383,7 +383,7 @@ class LogMelSpectrogramKaldi():
class WavProcess():
- def __init__(self, dither=0.1):
+ def __init__(self, dither=0.0):
"""
Args:
dither (float): Dithering constant
diff --git a/paddlespeech/s2t/exps/wav2vec2/bin/test.py b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
index 4fa224c33..d1a6fd405 100644
--- a/paddlespeech/s2t/exps/wav2vec2/bin/test.py
+++ b/paddlespeech/s2t/exps/wav2vec2/bin/test.py
@@ -20,8 +20,6 @@ from paddlespeech.s2t.exps.wav2vec2.model import Wav2Vec2ASRTester as Tester
from paddlespeech.s2t.training.cli import default_argument_parser
from paddlespeech.s2t.utils.utility import print_arguments
-# TODO(hui zhang): dynamic load
-
def main_sp(config, args):
exp = Tester(config, args)
diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py
index 32cf0b473..d845d8c67 100644
--- a/paddlespeech/s2t/exps/wav2vec2/model.py
+++ b/paddlespeech/s2t/exps/wav2vec2/model.py
@@ -25,9 +25,7 @@ import paddle
from paddle import distributed as dist
from paddlespeech.s2t.frontend.featurizer import TextFeaturizer
-from paddlespeech.s2t.io.dataloader import BatchDataLoader
from paddlespeech.s2t.io.dataloader import DataLoaderFactory
-from paddlespeech.s2t.io.dataloader import StreamDataLoader
from paddlespeech.s2t.models.wav2vec2.processing.speech_augmentation import TimeDomainSpecAugment
from paddlespeech.s2t.models.wav2vec2.wav2vec2_ASR import Wav2vec2ASR
from paddlespeech.s2t.training.optimizer import OptimizerFactory
@@ -300,7 +298,6 @@ class Wav2Vec2ASRTrainer(Trainer):
"epsilon": optim_conf.epsilon,
"rho": optim_conf.rho,
"parameters": parameters,
- "epsilon": 1e-9 if optim_type == 'noam' else None,
"beta1": 0.9 if optim_type == 'noam' else None,
"beat2": 0.98 if optim_type == 'noam' else None,
}
diff --git a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
index f54748f8b..0d99e8708 100644
--- a/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
+++ b/paddlespeech/s2t/models/wav2vec2/wav2vec2_ASR.py
@@ -39,7 +39,7 @@ class Wav2vec2ASR(nn.Layer):
enc_n_units=config.dnn_neurons,
blank_id=config.blank_id,
dropout_rate=config.ctc_dropout_rate,
- reduction=True)
+ reduction='mean')
def forward(self, wav, wavs_lens_rate, target, target_lens_rate):
if self.normalize_wav:
diff --git a/paddlespeech/s2t/modules/ctc.py b/paddlespeech/s2t/modules/ctc.py
index 0f50db21d..e0c01ab46 100644
--- a/paddlespeech/s2t/modules/ctc.py
+++ b/paddlespeech/s2t/modules/ctc.py
@@ -53,7 +53,7 @@ class CTCDecoderBase(nn.Layer):
enc_n_units,
blank_id=0,
dropout_rate: float=0.0,
- reduction: bool=True,
+ reduction: Union[str, bool]=True,
batch_average: bool=True,
grad_norm_type: Union[str, None]=None):
"""CTC decoder
@@ -73,7 +73,10 @@ class CTCDecoderBase(nn.Layer):
self.odim = odim
self.dropout = nn.Dropout(dropout_rate)
self.ctc_lo = Linear(enc_n_units, self.odim)
- reduction_type = "sum" if reduction else "none"
+ if isinstance(reduction, bool):
+ reduction_type = "sum" if reduction else "none"
+ else:
+ reduction_type = reduction
self.criterion = CTCLoss(
blank=self.blank_id,
reduction=reduction_type,
From 2ae94bd277ec3e5e5c74ba25f66f20d7cf102007 Mon Sep 17 00:00:00 2001
From: tianhao zhang <15600919271@163.com>
Date: Wed, 12 Oct 2022 01:50:04 +0000
Subject: [PATCH 5/5] freeze wav2vec2=True, change loss report and update
README.md
---
examples/librispeech/asr3/README.md | 6 +++++
.../librispeech/asr3/conf/wav2vec2ASR.yaml | 2 +-
paddlespeech/s2t/exps/wav2vec2/model.py | 24 +++++++++++++++++--
3 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/examples/librispeech/asr3/README.md b/examples/librispeech/asr3/README.md
index bd96af86f..f99beb338 100644
--- a/examples/librispeech/asr3/README.md
+++ b/examples/librispeech/asr3/README.md
@@ -88,6 +88,12 @@ data/
|-- test.meta
`-- train.meta
```
+
+Stage 0 also downloads the pre-trained [wav2vec2](https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams) model.
+```bash
+mkdir -p exp/wav2vec2
+wget -P exp/wav2vec2 https://paddlespeech.bj.bcebos.com/wav2vec/wav2vec2-large-960h-lv60-self.pdparams
+```
## Stage 1: Model Training
If you want to train the model. you can use stage 1 in `run.sh`. The code is shown below.
```bash
diff --git a/examples/librispeech/asr3/conf/wav2vec2ASR.yaml b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
index 63f5d37cc..b19881b70 100644
--- a/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
+++ b/examples/librispeech/asr3/conf/wav2vec2ASR.yaml
@@ -1,7 +1,7 @@
############################################
# Network Architecture #
############################################
-freeze_wav2vec2: False
+freeze_wav2vec2: True
normalize_wav: True
output_norm: True
dnn_blocks: 2
diff --git a/paddlespeech/s2t/exps/wav2vec2/model.py b/paddlespeech/s2t/exps/wav2vec2/model.py
index d845d8c67..de4c895f2 100644
--- a/paddlespeech/s2t/exps/wav2vec2/model.py
+++ b/paddlespeech/s2t/exps/wav2vec2/model.py
@@ -48,6 +48,24 @@ class Wav2Vec2ASRTrainer(Trainer):
super().__init__(config, args)
self.avg_train_loss = 0
+ def update_average(self, batch_index, loss, avg_loss):
+ """Update running average of the loss.
+ Arguments
+ ---------
+ loss : paddle.tensor
+ detached loss, a single float value.
+ avg_loss : float
+ current running average.
+ Returns
+ -------
+ avg_loss : float
+ The average loss.
+ """
+ if paddle.isfinite(loss):
+ avg_loss -= avg_loss / (batch_index + 1)
+ avg_loss += float(loss) / (batch_index + 1)
+ return avg_loss
+
def train_batch(self, batch_index, batch, msg):
train_conf = self.config
start = time.time()
@@ -59,11 +77,11 @@ class Wav2Vec2ASRTrainer(Trainer):
wav = wav[:, :, 0]
wav = self.speech_augmentation(wav, wavs_lens_rate)
loss = self.model(wav, wavs_lens_rate, target, target_lens_rate)
- # pring(wav, wavs_lens_rate, target, target_lens_rate)
# loss div by `batch_size * accum_grad`
loss /= train_conf.accum_grad
- losses_np = {'loss': float(loss) * train_conf.accum_grad}
+ self.avg_train_loss = self.update_average(batch_index, loss,
+ self.avg_train_loss)
# loss backward
if (batch_index + 1) % train_conf.accum_grad != 0:
@@ -87,6 +105,8 @@ class Wav2Vec2ASRTrainer(Trainer):
self.optimizer.clear_grad()
self.lr_scheduler.step()
self.iteration += 1
+
+ losses_np = {'loss': float(self.avg_train_loss) * train_conf.accum_grad}
iteration_time = time.time() - start
for k, v in losses_np.items():
report(k, v)