From 894556f8715021cf0b8fb7032df4fad3a114679a Mon Sep 17 00:00:00 2001 From: lym0302 Date: Tue, 9 Aug 2022 08:11:44 +0000 Subject: [PATCH 1/3] add zh_en mix example, test=tts --- examples/zh_en_tts/tts3/README.md | 280 ++++++++++++++++-- examples/zh_en_tts/tts3/conf/default.yaml | 104 +++++++ examples/zh_en_tts/tts3/local/inference.sh | 39 +++ examples/zh_en_tts/tts3/local/ort_predict.sh | 43 +++ examples/zh_en_tts/tts3/local/paddle2onnx.sh | 1 + examples/zh_en_tts/tts3/local/preprocess.sh | 149 ++++++++++ examples/zh_en_tts/tts3/local/synthesize.sh | 50 ++++ .../zh_en_tts/tts3/local/synthesize_e2e.sh | 64 ++-- examples/zh_en_tts/tts3/local/train.sh | 13 + examples/zh_en_tts/tts3/run.sh | 63 ++++ examples/zh_en_tts/tts3/test.sh | 23 -- .../t2s/exps/fastspeech2/preprocess.py | 24 +- paddlespeech/t2s/exps/inference.py | 9 +- paddlespeech/t2s/exps/ort_predict_e2e.py | 11 +- paddlespeech/t2s/exps/syn_utils.py | 8 +- paddlespeech/t2s/exps/synthesize.py | 2 +- paddlespeech/t2s/exps/synthesize_e2e.py | 2 +- 17 files changed, 810 insertions(+), 75 deletions(-) create mode 100644 examples/zh_en_tts/tts3/conf/default.yaml create mode 100755 examples/zh_en_tts/tts3/local/inference.sh create mode 100755 examples/zh_en_tts/tts3/local/ort_predict.sh create mode 120000 examples/zh_en_tts/tts3/local/paddle2onnx.sh create mode 100755 examples/zh_en_tts/tts3/local/preprocess.sh create mode 100755 examples/zh_en_tts/tts3/local/synthesize.sh create mode 100755 examples/zh_en_tts/tts3/local/train.sh create mode 100755 examples/zh_en_tts/tts3/run.sh delete mode 100755 examples/zh_en_tts/tts3/test.sh diff --git a/examples/zh_en_tts/tts3/README.md b/examples/zh_en_tts/tts3/README.md index 6d38181c..ead57429 100644 --- a/examples/zh_en_tts/tts3/README.md +++ b/examples/zh_en_tts/tts3/README.md @@ -1,26 +1,272 @@ -# Test -We train a Chinese-English mixed fastspeech2 model. The training code is still being sorted out, let's show how to use it first. -The sample rate of the synthesized audio is 22050 Hz. +# Mixed Chinese and English TTS with CSMSC, LJSpeech-1.1, AISHELL-3 and VCTK datasets -## Download pretrained models -Put pretrained models in a directory named `models`. +This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2006.04558) model with [CSMSC](https://www.data-baker.com/open_source.html), [LJSpeech-1.1](https://keithito.com/LJ-Speech-Dataset/), [AISHELL3](http://www.aishelltech.com/aishell_3) and [VCTK](https://datashare.ed.ac.uk/handle/10283/3443) datasets. -- [fastspeech2_csmscljspeech_add-zhen.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_csmscljspeech_add-zhen.zip) -- [hifigan_ljspeech_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_ljspeech_ckpt_0.2.0.zip) +## Dataset +### Download and Extract +Download all datasets and extract it to `~/datasets`. The CSMSC dataset is in the directory `~/datasets/BZNSYP`. The Ljspeech dataset is in the directory `~/datasets/LJSpeech-1.1`. The aishell3 dataset is in the directory `~/datasets/data_aishell3`. The vctk dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. + +### Get MFA Result and Extract +We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for the fastspeech2 training. +You can download from here [baker_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/BZNSYP/with_tone/baker_alignment_tone.tar.gz), [ljspeech_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/ljspeech_alignment.tar.gz), [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz) and [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz). Or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. + +## Get Started +Assume the paths of the datasets are `~/datasets/BZNSYP`, `~/datasets/LJSpeech-1.1`, `~/datasets/data_aishell3` and `~/datasets/VCTK-Corpus-0.92`. +Assume the path to the MFA result of the datasets are `./mfa_results/baker_alignment_tone`, `./mfa_results/ljspeech_alignment`, `./mfa_results/aishell3_alignment_tone` and `./mfa_results/vctk_alignment`. + +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from text file. +```bash +./run.sh +``` + +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` + +### Data Preprocessing ```bash -mkdir models -cd models -wget https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_csmscljspeech_add-zhen.zip -unzip fastspeech2_csmscljspeech_add-zhen.zip -wget https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_ljspeech_ckpt_0.2.0.zip -unzip hifigan_ljspeech_ckpt_0.2.0.zip -cd ../ +./local/preprocess.sh ${conf_path} ${datasets_root_dir} ${mfa_root_dir} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. +```text +dump +├── dev +│ ├── norm +│ └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│ ├── norm +│ └── raw +└── train + ├── energy_stats.npy + ├── norm + ├── pitch_stats.npy + ├── raw + └── speech_stats.npy ``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains speech, pitch and energy features of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/*_stats.npy`. -## test -You can choose `--spk_id` {0, 1} in `local/synthesize_e2e.sh`. +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, speech_lengths, durations, the path of speech features, the path of pitch features, a path of energy features, speaker, and id of each utterance. + +### Model Training +`./local/train.sh` calls `${BIN_DIR}/train.py`. ```bash -bash test.sh +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +Here's the complete help message. +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--phones-dict PHONES_DICT] + [--speaker-dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] + +Train a FastSpeech2 model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG fastspeech2 config file. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu=0, use cpu. + --phones-dict PHONES_DICT + phone vocabulary file. + --speaker-dict SPEAKER_DICT + speaker id map file for multiple speaker model. + --voice-cloning VOICE_CLONING + whether training voice cloning model. +``` +1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. +2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder. +3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory. +4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. +5. `--phones-dict` is the path of the phone vocabulary file. +6. `--speaker-dict` is the path of the speaker id map file when training a multi-speaker FastSpeech2. + + +### Synthesizing +We use [parallel wavegan](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1) as the neural vocoder. +Download the pretrained parallel wavegan model from [pwg_aishell3_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_aishell3_ckpt_0.5.zip) and unzip it. + +```bash +unzip pwg_aishell3_ckpt_0.5.zip +``` +Parallel WaveGAN checkpoint contains files listed below. +```text +pwg_aishell3_ckpt_0.5 +├── default.yaml # default config used to train parallel wavegan +├── feats_stats.npy # statistics used to normalize spectrogram when training parallel wavegan +└── snapshot_iter_1000000.pdz # generator parameters of parallel wavegan +``` +`./local/synthesize.sh` calls `${BIN_DIR}/../synthesize.py`, which can synthesize waveform from `metadata.jsonl`. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3, fastspeech2_mix}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] + [--voice-cloning VOICE_CLONING] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3, fastspeech2_mix} + Choose acoustic model type of tts task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --tones_dict TONES_DICT + tone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --voice-cloning VOICE_CLONING + whether training voice cloning model. + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} + Choose vocoder type of tts task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. + + +``` +`./local/synthesize_e2e.sh` calls `${BIN_DIR}/../synthesize_e2e.py`, which can synthesize waveform from text file. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize_e2e.py [-h] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech, fastspeech2_mix}] + [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] + [--am_stat AM_STAT] [--phones_dict PHONES_DICT] + [--tones_dict TONES_DICT] + [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] + [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] + [--voc_stat VOC_STAT] [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + +Synthesize with acoustic model & vocoder + +optional arguments: + -h, --help show this help message and exit + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech, fastspeech2_mix} + Choose acoustic model type of tts task. + --am_config AM_CONFIG + Config of acoustic model. + --am_ckpt AM_CKPT Checkpoint file of acoustic model. + --am_stat AM_STAT mean and standard deviation used to normalize + spectrogram when training acoustic model. + --phones_dict PHONES_DICT + phone vocabulary file. + --tones_dict TONES_DICT + tone vocabulary file. + --speaker_dict SPEAKER_DICT + speaker id map file. + --spk_id SPK_ID spk id for multi speaker acoustic model + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} + Choose vocoder type of tts task. + --voc_config VOC_CONFIG + Config of voc. + --voc_ckpt VOC_CKPT Checkpoint file of voc. + --voc_stat VOC_STAT mean and standard deviation used to normalize + spectrogram when training voc. + --lang LANG Choose model language. zh or en or mix + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize, a 'utt_id sentence' pair per line. + --output_dir OUTPUT_DIR + output dir. +``` +1. `--am` is acoustic model type with the format {model_name}_{dataset} +2. `--am_config`, `--am_ckpt`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. +3. `--voc` is vocoder type with the format {model_name}_{dataset} +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +5. `--lang` is the model language, which can be `zh` or `en` or `mix`. +6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. +7. `--text` is the text file, which contains sentences to synthesize. +8. `--output_dir` is the directory to save synthesized audio files. +9. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + + +## Pretrained Model +Pretrained FastSpeech2 model with no silence in the edge of audios: +- [fastspeech2_mix_ckpt_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_ckpt_0.2.0.zip) + +The static model can be downloaded here: +- [fastspeech2_mix_static_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_static_0.2.0.zip) + +The ONNX model can be downloaded here: +- [fastspeech2_mix_onnx_0.2.0.zip](https://paddlespeech.bj.bcebos.com/t2s/chinse_english_mixed/models/fastspeech2_mix_onnx_0.2.0.zip) + +FastSpeech2 checkpoint contains files listed below. + +```text +fastspeech2_mix_ckpt_0.2.0 +├── default.yaml # default config used to train fastspeech2 +├── phone_id_map.txt # phone vocabulary file when training fastspeech2 +├── snapshot_iter_99200.pdz # model parameters and optimizer states +├── speaker_id_map.txt # speaker id map file when training a multi-speaker fastspeech2 +└── speech_stats.npy # statistics used to normalize spectrogram when training fastspeech2 +``` + + +You can use the following scripts to synthesize for `${BIN_DIR}/../sentences_mix.txt` using pretrained fastspeech2 and parallel wavegan models. +174 means baker speaker, 175 means ljspeech speaker. For other speaker information, see `speaker_id_map.txt` + +```bash +source path.sh + +FLAGS_allocator_strategy=naive_best_fit \ +FLAGS_fraction_of_gpu_memory_to_use=0.01 \ +python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=fastspeech2_mix \ + --am_config=fastspeech2_mix_ckpt_0.2.0/default.yaml \ + --am_ckpt=fastspeech2_mix_ckpt_0.2.0/snapshot_iter_96400.pdz \ + --am_stat=fastspeech2_mix_ckpt_0.2.0/speech_stats.npy \ + --voc=pwgan_aishell3 \ + --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ + --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ + --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ + --lang=mix \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=exp/default/test_e2e \ + --phones_dict=fastspeech2_mix_ckpt_0.2.0/phone_id_map.txt \ + --speaker_dict=fastspeech2_mix_ckpt_0.2.0/speaker_id_map.txt \ + --spk_id=174 \ + --inference_dir=exp/default/inference + ``` diff --git a/examples/zh_en_tts/tts3/conf/default.yaml b/examples/zh_en_tts/tts3/conf/default.yaml new file mode 100644 index 00000000..e65b5d0e --- /dev/null +++ b/examples/zh_en_tts/tts3/conf/default.yaml @@ -0,0 +1,104 @@ +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 24000 # sr +n_fft: 2048 # FFT size (samples). +n_shift: 300 # Hop size (samples). 12.5ms +win_length: 1200 # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + +# Only used for feats_type != raw + +fmin: 80 # Minimum frequency of Mel basis. +fmax: 7600 # Maximum frequency of Mel basis. +n_mels: 80 # The number of mel basis. + +# Only used for the model using pitch features (e.g. FastSpeech2) +f0min: 80 # Minimum f0 for pitch extraction. +f0max: 400 # Maximum f0 for pitch extraction. + + +########################################################### +# DATA SETTING # +########################################################### +batch_size: 64 +num_workers: 2 + + +########################################################### +# MODEL SETTING # +########################################################### +model: + adim: 384 # attention dimension + aheads: 2 # number of attention heads + elayers: 4 # number of encoder layers + eunits: 1536 # number of encoder ff units + dlayers: 4 # number of decoder layers + dunits: 1536 # number of decoder ff units + positionwise_layer_type: conv1d # type of position-wise layer + positionwise_conv_kernel_size: 3 # kernel size of position wise conv layer + duration_predictor_layers: 2 # number of layers of duration predictor + duration_predictor_chans: 256 # number of channels of duration predictor + duration_predictor_kernel_size: 3 # filter size of duration predictor + postnet_layers: 5 # number of layers of postnset + postnet_filts: 5 # filter size of conv layers in postnet + postnet_chans: 256 # number of channels of conv layers in postnet + use_scaled_pos_enc: True # whether to use scaled positional encoding + encoder_normalize_before: True # whether to perform layer normalization before the input + decoder_normalize_before: True # whether to perform layer normalization before the input + reduction_factor: 1 # reduction factor + init_type: xavier_uniform # initialization type + init_enc_alpha: 1.0 # initial value of alpha of encoder scaled position encoding + init_dec_alpha: 1.0 # initial value of alpha of decoder scaled position encoding + transformer_enc_dropout_rate: 0.2 # dropout rate for transformer encoder layer + transformer_enc_positional_dropout_rate: 0.2 # dropout rate for transformer encoder positional encoding + transformer_enc_attn_dropout_rate: 0.2 # dropout rate for transformer encoder attention layer + transformer_dec_dropout_rate: 0.2 # dropout rate for transformer decoder layer + transformer_dec_positional_dropout_rate: 0.2 # dropout rate for transformer decoder positional encoding + transformer_dec_attn_dropout_rate: 0.2 # dropout rate for transformer decoder attention layer + pitch_predictor_layers: 5 # number of conv layers in pitch predictor + pitch_predictor_chans: 256 # number of channels of conv layers in pitch predictor + pitch_predictor_kernel_size: 5 # kernel size of conv leyers in pitch predictor + pitch_predictor_dropout: 0.5 # dropout rate in pitch predictor + pitch_embed_kernel_size: 1 # kernel size of conv embedding layer for pitch + pitch_embed_dropout: 0.0 # dropout rate after conv embedding layer for pitch + stop_gradient_from_pitch_predictor: True # whether to stop the gradient from pitch predictor to encoder + energy_predictor_layers: 2 # number of conv layers in energy predictor + energy_predictor_chans: 256 # number of channels of conv layers in energy predictor + energy_predictor_kernel_size: 3 # kernel size of conv leyers in energy predictor + energy_predictor_dropout: 0.5 # dropout rate in energy predictor + energy_embed_kernel_size: 1 # kernel size of conv embedding layer for energy + energy_embed_dropout: 0.0 # dropout rate after conv embedding layer for energy + stop_gradient_from_energy_predictor: False # whether to stop the gradient from energy predictor to encoder + spk_embed_dim: 256 # speaker embedding dimension + spk_embed_integration_type: concat # speaker embedding integration type + + + +########################################################### +# UPDATER SETTING # +########################################################### +updater: + use_masking: True # whether to apply masking for padded part in loss calculation + + +########################################################### +# OPTIMIZER SETTING # +########################################################### +optimizer: + optim: adam # optimizer type + learning_rate: 0.001 # learning rate + +########################################################### +# TRAINING SETTING # +########################################################### +max_epoch: 200 +num_snapshots: 5 + + +########################################################### +# OTHER SETTING # +########################################################### +seed: 10086 diff --git a/examples/zh_en_tts/tts3/local/inference.sh b/examples/zh_en_tts/tts3/local/inference.sh new file mode 100755 index 00000000..5d3bd09e --- /dev/null +++ b/examples/zh_en_tts/tts3/local/inference.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +train_output_path=$1 + +stage=0 +stop_stage=0 + +# voc: pwgan_aishell3 +# the spk_id=174 means baker speaker, default +# the spk_id=175 means ljspeech speaker +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + python3 ${BIN_DIR}/../inference.py \ + --inference_dir=${train_output_path}/inference \ + --am=fastspeech2_mix \ + --voc=pwgan_aishell3 \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=${train_output_path}/pd_infer_out \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --lang=mix \ + --spk_id=174 +fi + + +# voc: hifigan_aishell3 +# the spk_id=174 means baker speaker, default +# the spk_id=175 means ljspeech speaker +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + python3 ${BIN_DIR}/../inference.py \ + --inference_dir=${train_output_path}/inference \ + --am=fastspeech2_mix \ + --voc=hifigan_aishell3 \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=${train_output_path}/pd_infer_out \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --lang=mix \ + --spk_id=174 +fi diff --git a/examples/zh_en_tts/tts3/local/ort_predict.sh b/examples/zh_en_tts/tts3/local/ort_predict.sh new file mode 100755 index 00000000..86dcd115 --- /dev/null +++ b/examples/zh_en_tts/tts3/local/ort_predict.sh @@ -0,0 +1,43 @@ +train_output_path=$1 + +stage=0 +stop_stage=0 + +# e2e, synthesize from text +# voc: pwgan_aishell3 +# the spk_id=174 means baker speaker, default +# the spk_id=175 means ljspeech speaker +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + python3 ${BIN_DIR}/../ort_predict_e2e.py \ + --inference_dir=${train_output_path}/inference_onnx \ + --am=fastspeech2_mix \ + --voc=pwgan_aishell3 \ + --output_dir=${train_output_path}/onnx_infer_out_e2e \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --phones_dict=dump/phone_id_map.txt \ + --device=cpu \ + --cpu_threads=4 \ + --lang=mix \ + --spk_id=174 + + +fi + + +# voc: hifigan_aishell3 +# the spk_id=174 means baker speaker, default +# the spk_id=175 means ljspeech speaker +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + python3 ${BIN_DIR}/../ort_predict_e2e.py \ + --inference_dir=${train_output_path}/inference_onnx \ + --am=fastspeech2_mix \ + --voc=hifigan_aishell3 \ + --output_dir=${train_output_path}/onnx_infer_out_e2e \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --phones_dict=dump/phone_id_map.txt \ + --device=cpu \ + --cpu_threads=4 \ + --lang=mix \ + --spk_id=174 + +fi diff --git a/examples/zh_en_tts/tts3/local/paddle2onnx.sh b/examples/zh_en_tts/tts3/local/paddle2onnx.sh new file mode 120000 index 00000000..8d5dbef4 --- /dev/null +++ b/examples/zh_en_tts/tts3/local/paddle2onnx.sh @@ -0,0 +1 @@ +../../../csmsc/tts3/local/paddle2onnx.sh \ No newline at end of file diff --git a/examples/zh_en_tts/tts3/local/preprocess.sh b/examples/zh_en_tts/tts3/local/preprocess.sh new file mode 100755 index 00000000..a938f524 --- /dev/null +++ b/examples/zh_en_tts/tts3/local/preprocess.sh @@ -0,0 +1,149 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 +datasets_root_dir=$2 +mfa_root_dir=$3 + +# 1. get durations from MFA's result +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + echo "Generate durations_baker.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=${mfa_root_dir}/baker_alignment_tone \ + --output durations_baker.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo "Generate durations_ljspeech.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=${mfa_root_dir}/ljspeech_alignment \ + --output durations_ljspeech.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + echo "Generate durations_aishell3.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=${mfa_root_dir}/aishell3_alignment_tone \ + --output durations_aishell3.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + echo "Generate durations_vctk.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=${mfa_root_dir}/vctk_alignment \ + --output durations_vctk.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # concat duration file + echo "concat durations_baker.txt, durations_ljspeech.txt, durations_aishell3.txt and durations_vctk.txt to durations.txt" + cat durations_baker.txt durations_ljspeech.txt durations_aishell3.txt durations_vctk.txt > durations.txt +fi + +# 2. extract features +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + echo "Extract baker features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=baker \ + --rootdir=${datasets_root_dir}/BZNSYP/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --write_metadata_method=a +fi + +if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then + echo "Extract ljspeech features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=ljspeech \ + --rootdir=${datasets_root_dir}/LJSpeech-1.1/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --write_metadata_method=a +fi + +if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then + echo "Extract aishell3 features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=aishell3 \ + --rootdir=${datasets_root_dir}/data_aishell3/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --write_metadata_method=a +fi + +if [ ${stage} -le 8 ] && [ ${stop_stage} -ge 8 ]; then + echo "Extract vctk features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=vctk \ + --rootdir=${datasets_root_dir}/VCTK-Corpus-0.92/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True \ + --write_metadata_method=a +fi + + +# 3. get features' stats(mean and std) +if [ ${stage} -le 9 ] && [ ${stop_stage} -ge 9 ]; then + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="speech" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="pitch" + + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="energy" +fi + + +# 4. normalize and covert phone/speaker to id, dev and test should use train's stats +if [ ${stage} -le 10 ] && [ ${stop_stage} -ge 10 ]; then + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --speech-stats=dump/train/speech_stats.npy \ + --pitch-stats=dump/train/pitch_stats.npy \ + --energy-stats=dump/train/energy_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt +fi diff --git a/examples/zh_en_tts/tts3/local/synthesize.sh b/examples/zh_en_tts/tts3/local/synthesize.sh new file mode 100755 index 00000000..f3a0bf15 --- /dev/null +++ b/examples/zh_en_tts/tts3/local/synthesize.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 + +stage=0 +stop_stage=0 + +# voc: pwgan_aishell3 +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_mix \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_aishell3 \ + --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ + --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ + --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata2.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt +fi + + +# voc: hifigan_aishell3 +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize.py \ + --am=fastspeech2_mix \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt +fi + + + diff --git a/examples/zh_en_tts/tts3/local/synthesize_e2e.sh b/examples/zh_en_tts/tts3/local/synthesize_e2e.sh index a206c3a8..ae14e3cc 100755 --- a/examples/zh_en_tts/tts3/local/synthesize_e2e.sh +++ b/examples/zh_en_tts/tts3/local/synthesize_e2e.sh @@ -1,31 +1,57 @@ #!/bin/bash -model_dir=$1 -output=$2 -am_name=fastspeech2_csmscljspeech_add-zhen -am_model_dir=${model_dir}/${am_name}/ +config_path=$1 +train_output_path=$2 +ckpt_name=$3 -stage=1 -stop_stage=1 +stage=0 +stop_stage=0 +# voc: pwgan_aishell3 +# the spk_id=174 means baker speaker, default. +# the spk_id=175 means ljspeech speaker +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=fastspeech2_mix \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=pwgan_aishell3 \ + --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ + --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ + --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ + --lang=mix \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=${train_output_path}/test_e2e \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --spk_id=174 \ + --inference_dir=${train_output_path}/inference +fi -# hifigan +# voc: hifigan_aishell3 +# the spk_id=174 means baker speaker, default +# the spk_id=175 means ljspeech speaker if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo "in hifigan syn_e2e" FLAGS_allocator_strategy=naive_best_fit \ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/../synthesize_e2e.py \ --am=fastspeech2_mix \ - --am_config=${am_model_dir}/default.yaml \ - --am_ckpt=${am_model_dir}/snapshot_iter_94000.pdz \ - --am_stat=${am_model_dir}/speech_stats.npy \ - --voc=hifigan_ljspeech \ - --voc_config=${model_dir}/hifigan_ljspeech_ckpt_0.2.0/default.yaml \ - --voc_ckpt=${model_dir}/hifigan_ljspeech_ckpt_0.2.0/snapshot_iter_2500000.pdz \ - --voc_stat=${model_dir}/hifigan_ljspeech_ckpt_0.2.0/feats_stats.npy \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_aishell3 \ + --voc_config=hifigan_aishell3_ckpt_0.2.0/default.yaml \ + --voc_ckpt=hifigan_aishell3_ckpt_0.2.0/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_aishell3_ckpt_0.2.0/feats_stats.npy \ --lang=mix \ --text=${BIN_DIR}/../sentences_mix.txt \ - --output_dir=${output}/test_e2e \ - --phones_dict=${am_model_dir}/phone_id_map.txt \ - --speaker_dict=${am_model_dir}/speaker_id_map.txt \ - --spk_id 0 -fi + --output_dir=${train_output_path}/test_e2e \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --spk_id=174 \ + --inference_dir=${train_output_path}/inference + fi diff --git a/examples/zh_en_tts/tts3/local/train.sh b/examples/zh_en_tts/tts3/local/train.sh new file mode 100755 index 00000000..1da72f11 --- /dev/null +++ b/examples/zh_en_tts/tts3/local/train.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=2 \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh new file mode 100755 index 00000000..0a6a4972 --- /dev/null +++ b/examples/zh_en_tts/tts3/run.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0,1 +stage=0 +stop_stage=0 + +datasets_root_dir=./datasets/ +mfa_root_dir=./mfa_results/ +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_99200.pdz + + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} ${datasets_root_dir} ${mfa_root_dir} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # synthesize, vocoder is pwgan by default + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # synthesize_e2e, vocoder is pwgan by default + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # inference with static model, vocoder is pwgan by default + CUDA_VISIBLE_DEVICES=${gpus} ./local/inference.sh ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + # install paddle2onnx + version=$(echo `pip list |grep "paddle2onnx"` |awk -F" " '{print $2}') + if [[ -z "$version" || ${version} != '0.9.8' ]]; then + pip install paddle2onnx==0.9.8 + fi + ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix + # considering the balance between speed and quality, we recommend that you use hifigan as vocoder + ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 + #./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_aishell3 + +fi + +# inference with onnxruntime, use fastspeech2 + hifigan by default +if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then + ./local/ort_predict.sh ${train_output_path} +fi diff --git a/examples/zh_en_tts/tts3/test.sh b/examples/zh_en_tts/tts3/test.sh deleted file mode 100755 index ff34da14..00000000 --- a/examples/zh_en_tts/tts3/test.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -e -source path.sh - -gpus=0,1 -stage=3 -stop_stage=100 - -model_dir=models -output_dir=output - -# with the following command, you can choose the stage range you want to run -# such as `./run.sh --stage 0 --stop-stage 0` -# this can not be mixed use with `$1`, `$2` ... -source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 - - -if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then - # synthesize_e2e, vocoder is hifigan by default - CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${model_dir} ${output_dir} || exit -1 -fi - diff --git a/paddlespeech/t2s/exps/fastspeech2/preprocess.py b/paddlespeech/t2s/exps/fastspeech2/preprocess.py index 0045c5a3..c06492c6 100644 --- a/paddlespeech/t2s/exps/fastspeech2/preprocess.py +++ b/paddlespeech/t2s/exps/fastspeech2/preprocess.py @@ -144,7 +144,8 @@ def process_sentences(config, energy_extractor=None, nprocs: int=1, cut_sil: bool=True, - spk_emb_dir: Path=None): + spk_emb_dir: Path=None, + write_metadata_method: str='w'): if nprocs == 1: results = [] for fp in tqdm.tqdm(fps, total=len(fps)): @@ -179,7 +180,10 @@ def process_sentences(config, results.append(record) results.sort(key=itemgetter("utt_id")) - with jsonlines.open(output_dir / "metadata.jsonl", 'w') as writer: + print("wwwwwwwwwwwwwwwwwwwwwwwwwwwrite_metadata_method: ", + write_metadata_method) + with jsonlines.open(output_dir / "metadata.jsonl", + write_metadata_method) as writer: for item in results: writer.write(item) print("Done") @@ -223,6 +227,13 @@ def main(): default=None, type=str, help="directory to speaker embedding files.") + + parser.add_argument( + "--write_metadata_method", + default="w", + type=str, + choices=["w", "a"], + help="How the metadata.jsonl file is written.") args = parser.parse_args() rootdir = Path(args.rootdir).expanduser() @@ -340,7 +351,8 @@ def main(): energy_extractor=energy_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, - spk_emb_dir=spk_emb_dir) + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) if dev_wav_files: process_sentences( config=config, @@ -351,7 +363,8 @@ def main(): pitch_extractor=pitch_extractor, energy_extractor=energy_extractor, cut_sil=args.cut_sil, - spk_emb_dir=spk_emb_dir) + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) if test_wav_files: process_sentences( config=config, @@ -363,7 +376,8 @@ def main(): energy_extractor=energy_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, - spk_emb_dir=spk_emb_dir) + spk_emb_dir=spk_emb_dir, + write_metadata_method=args.write_metadata_method) if __name__ == "__main__": diff --git a/paddlespeech/t2s/exps/inference.py b/paddlespeech/t2s/exps/inference.py index ba951182..25c65c23 100644 --- a/paddlespeech/t2s/exps/inference.py +++ b/paddlespeech/t2s/exps/inference.py @@ -41,6 +41,7 @@ def parse_args(): 'fastspeech2_ljspeech', 'fastspeech2_vctk', 'tacotron2_csmsc', + 'fastspeech2_mix', ], help='Choose acoustic model type of tts task.') parser.add_argument( @@ -77,7 +78,7 @@ def parse_args(): '--lang', type=str, default='zh', - help='Choose model language. zh or en') + help='Choose model language. zh or en or mix') parser.add_argument( "--text", type=str, @@ -156,7 +157,8 @@ def main(): frontend=frontend, lang=args.lang, merge_sentences=merge_sentences, - speaker_dict=args.speaker_dict, ) + speaker_dict=args.speaker_dict, + spk_id=args.spk_id, ) wav = get_voc_output( voc_predictor=voc_predictor, input=am_output_data) speed = wav.size / t.elapse @@ -178,7 +180,8 @@ def main(): frontend=frontend, lang=args.lang, merge_sentences=merge_sentences, - speaker_dict=args.speaker_dict, ) + speaker_dict=args.speaker_dict, + spk_id=args.spk_id, ) wav = get_voc_output( voc_predictor=voc_predictor, input=am_output_data) diff --git a/paddlespeech/t2s/exps/ort_predict_e2e.py b/paddlespeech/t2s/exps/ort_predict_e2e.py index f33fc412..ee0704b8 100644 --- a/paddlespeech/t2s/exps/ort_predict_e2e.py +++ b/paddlespeech/t2s/exps/ort_predict_e2e.py @@ -76,7 +76,7 @@ def ort_predict(args): else: phone_ids = np.random.randint(1, 266, size=(T, )) am_input_feed.update({'text': phone_ids}) - if am_dataset in {"aishell3", "vctk"}: + if am_dataset in {"aishell3", "vctk", "mix"}: am_input_feed.update({'spk_id': spk_id}) elif am_name == 'speedyspeech': @@ -112,13 +112,17 @@ def ort_predict(args): input_ids = frontend.get_input_ids( sentence, merge_sentences=merge_sentences) phone_ids = input_ids["phone_ids"] + elif args.lang == 'mix': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences) + phone_ids = input_ids["phone_ids"] else: - print("lang should in {'zh', 'en'}!") + print("lang should in {'zh', 'en', 'mix'}!") # merge_sentences=True here, so we only use the first item of phone_ids phone_ids = phone_ids[0].numpy() if am_name == 'fastspeech2': am_input_feed.update({'text': phone_ids}) - if am_dataset in {"aishell3", "vctk"}: + if am_dataset in {"aishell3", "vctk", "mix"}: am_input_feed.update({'spk_id': spk_id}) elif am_name == 'speedyspeech': tone_ids = tone_ids[0].numpy() @@ -154,6 +158,7 @@ def parse_args(): 'fastspeech2_ljspeech', 'fastspeech2_vctk', 'speedyspeech_csmsc', + 'fastspeech2_mix', ], help='Choose acoustic model type of tts task.') parser.add_argument( diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index bade62ac..7380d57e 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -273,7 +273,8 @@ def am_to_static(am_inference, am_name = am[:am.rindex('_')] am_dataset = am[am.rindex('_') + 1:] if am_name == 'fastspeech2': - if am_dataset in {"aishell3", "vctk"} and speaker_dict is not None: + if am_dataset in {"aishell3", "vctk", "mix" + } and speaker_dict is not None: am_inference = jit.to_static( am_inference, input_spec=[ @@ -285,7 +286,8 @@ def am_to_static(am_inference, am_inference, input_spec=[InputSpec([-1], dtype=paddle.int64)]) elif am_name == 'speedyspeech': - if am_dataset in {"aishell3", "vctk"} and speaker_dict is not None: + if am_dataset in {"aishell3", "vctk", "mix" + } and speaker_dict is not None: am_inference = jit.to_static( am_inference, input_spec=[ @@ -356,7 +358,7 @@ def get_am_output( get_spk_id = False if am_name == 'speedyspeech': get_tone_ids = True - if am_dataset in {"aishell3", "vctk"} and speaker_dict: + if am_dataset in {"aishell3", "vctk", "mix"} and speaker_dict: get_spk_id = True spk_id = np.array([spk_id]) if lang == 'zh': diff --git a/paddlespeech/t2s/exps/synthesize.py b/paddlespeech/t2s/exps/synthesize.py index 9ddab726..a8e18150 100644 --- a/paddlespeech/t2s/exps/synthesize.py +++ b/paddlespeech/t2s/exps/synthesize.py @@ -136,7 +136,7 @@ def parse_args(): choices=[ 'speedyspeech_csmsc', 'fastspeech2_csmsc', 'fastspeech2_ljspeech', 'fastspeech2_aishell3', 'fastspeech2_vctk', 'tacotron2_csmsc', - 'tacotron2_ljspeech', 'tacotron2_aishell3' + 'tacotron2_ljspeech', 'tacotron2_aishell3', 'fastspeech2_mix' ], help='Choose acoustic model type of tts task.') parser.add_argument( diff --git a/paddlespeech/t2s/exps/synthesize_e2e.py b/paddlespeech/t2s/exps/synthesize_e2e.py index ef954329..ce32a6ac 100644 --- a/paddlespeech/t2s/exps/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/synthesize_e2e.py @@ -133,7 +133,7 @@ def evaluate(args): mel = am_inference(part_phone_ids) elif am_name == 'speedyspeech': part_tone_ids = tone_ids[i] - if am_dataset in {"aishell3", "vctk"}: + if am_dataset in {"aishell3", "vctk", "mix"}: spk_id = paddle.to_tensor(args.spk_id) mel = am_inference(part_phone_ids, part_tone_ids, spk_id) From 8dbefc0165404785a1a2281a5250b12dc8c25ad5 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 12 Aug 2022 12:02:31 +0000 Subject: [PATCH 2/3] fix preprocess bug, add hifigan_csmsc decoder, update readme --- examples/zh_en_tts/tts3/README.md | 9 ++++++- examples/zh_en_tts/tts3/local/inference.sh | 15 +++++++++++ examples/zh_en_tts/tts3/local/ort_predict.sh | 21 +++++++++++---- examples/zh_en_tts/tts3/local/synthesize.sh | 5 +--- .../zh_en_tts/tts3/local/synthesize_e2e.sh | 27 ++++++++++++++++++- examples/zh_en_tts/tts3/run.sh | 8 +++--- .../t2s/exps/fastspeech2/preprocess.py | 1 - 7 files changed, 70 insertions(+), 16 deletions(-) diff --git a/examples/zh_en_tts/tts3/README.md b/examples/zh_en_tts/tts3/README.md index ead57429..1752d246 100644 --- a/examples/zh_en_tts/tts3/README.md +++ b/examples/zh_en_tts/tts3/README.md @@ -98,9 +98,16 @@ optional arguments: ### Synthesizing -We use [parallel wavegan](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1) as the neural vocoder. +We use [parallel wavegan](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/aishell3/voc1) as the default neural vocoder. Download the pretrained parallel wavegan model from [pwg_aishell3_ckpt_0.5.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/pwgan/pwg_aishell3_ckpt_0.5.zip) and unzip it. +When speaker is `174` (csmsc), use csmsc's vocoder is better than aishell3's, we recommend that you use [hifigan_csmsc_ckpt_0.1.1.zip](https://paddlespeech.bj.bcebos.com/Parakeet/released_models/hifigan/hifigan_csmsc_ckpt_0.1.1.zip), please check `stage 2` of `synthesize_e2e.sh`. + +But if speaker is `175` (ljspeech), we **don't** recommend you to use ljspeech's vocoder, because ljspeech's vocoders are trained on sample rate 22.05kHz, but this acoustic model is trained on sample rate 24kHz, you can use csmsc's vocoder also, because ljspeech and csmsc are both female speakers. + +For speakers in aishell3 and vctk, we recommend you use aishell3 or vctk's vocoders, because ljspeech and csmsc are both female speakers, there vocoders may not perform well for male speakers in aishell3 and vctk, you can check speaker name and spk_id in `dump/speaker_id_map.txt` and check speakers' information ( Age / Gender / Accents / region, etc ) in [this issue](https://github.com/PaddlePaddle/PaddleSpeech/issues/1620) and choose the `spk_id` you want. + + ```bash unzip pwg_aishell3_ckpt_0.5.zip ``` diff --git a/examples/zh_en_tts/tts3/local/inference.sh b/examples/zh_en_tts/tts3/local/inference.sh index 5d3bd09e..16499ed0 100755 --- a/examples/zh_en_tts/tts3/local/inference.sh +++ b/examples/zh_en_tts/tts3/local/inference.sh @@ -37,3 +37,18 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --lang=mix \ --spk_id=174 fi + +# voc: hifigan_csmsc +# when speaker is 174 (csmsc), use csmsc's vocoder is better than aishell3's +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + python3 ${BIN_DIR}/../inference.py \ + --inference_dir=${train_output_path}/inference \ + --am=fastspeech2_mix \ + --voc=hifigan_csmsc \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=${train_output_path}/pd_infer_out \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --lang=mix \ + --spk_id=174 +fi diff --git a/examples/zh_en_tts/tts3/local/ort_predict.sh b/examples/zh_en_tts/tts3/local/ort_predict.sh index 86dcd115..d80da9c9 100755 --- a/examples/zh_en_tts/tts3/local/ort_predict.sh +++ b/examples/zh_en_tts/tts3/local/ort_predict.sh @@ -18,9 +18,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --device=cpu \ --cpu_threads=4 \ --lang=mix \ - --spk_id=174 - - + --spk_id=174 fi @@ -38,6 +36,19 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --device=cpu \ --cpu_threads=4 \ --lang=mix \ - --spk_id=174 - + --spk_id=174 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + python3 ${BIN_DIR}/../ort_predict_e2e.py \ + --inference_dir=${train_output_path}/inference_onnx \ + --am=fastspeech2_mix \ + --voc=hifigan_csmsc \ + --output_dir=${train_output_path}/onnx_infer_out_e2e \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --phones_dict=dump/phone_id_map.txt \ + --device=cpu \ + --cpu_threads=4 \ + --lang=mix \ + --spk_id=174 fi diff --git a/examples/zh_en_tts/tts3/local/synthesize.sh b/examples/zh_en_tts/tts3/local/synthesize.sh index f3a0bf15..5bb94746 100755 --- a/examples/zh_en_tts/tts3/local/synthesize.sh +++ b/examples/zh_en_tts/tts3/local/synthesize.sh @@ -20,7 +20,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ --voc_ckpt=pwg_aishell3_ckpt_0.5/snapshot_iter_1000000.pdz \ --voc_stat=pwg_aishell3_ckpt_0.5/feats_stats.npy \ - --test_metadata=dump/test/norm/metadata2.jsonl \ + --test_metadata=dump/test/norm/metadata.jsonl \ --output_dir=${train_output_path}/test \ --phones_dict=dump/phone_id_map.txt \ --speaker_dict=dump/speaker_id_map.txt @@ -45,6 +45,3 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --phones_dict=dump/phone_id_map.txt \ --speaker_dict=dump/speaker_id_map.txt fi - - - diff --git a/examples/zh_en_tts/tts3/local/synthesize_e2e.sh b/examples/zh_en_tts/tts3/local/synthesize_e2e.sh index ae14e3cc..f6ee04ae 100755 --- a/examples/zh_en_tts/tts3/local/synthesize_e2e.sh +++ b/examples/zh_en_tts/tts3/local/synthesize_e2e.sh @@ -54,4 +54,29 @@ if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then --speaker_dict=dump/speaker_id_map.txt \ --spk_id=174 \ --inference_dir=${train_output_path}/inference - fi +fi + + +# voc: hifigan_csmsc +# when speaker is 174 (csmsc), use csmsc's vocoder is better than aishell3's +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + echo "in csmsc's hifigan syn_e2e" + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/../synthesize_e2e.py \ + --am=fastspeech2_mix \ + --am_config=${config_path} \ + --am_ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --am_stat=dump/train/speech_stats.npy \ + --voc=hifigan_csmsc \ + --voc_config=hifigan_csmsc_ckpt_0.1.1/default.yaml \ + --voc_ckpt=hifigan_csmsc_ckpt_0.1.1/snapshot_iter_2500000.pdz \ + --voc_stat=hifigan_csmsc_ckpt_0.1.1/feats_stats.npy \ + --lang=mix \ + --text=${BIN_DIR}/../sentences_mix.txt \ + --output_dir=${train_output_path}/test_e2e \ + --phones_dict=dump/phone_id_map.txt \ + --speaker_dict=dump/speaker_id_map.txt \ + --spk_id=174 \ + --inference_dir=${train_output_path}/inference +fi \ No newline at end of file diff --git a/examples/zh_en_tts/tts3/run.sh b/examples/zh_en_tts/tts3/run.sh index 221ed7ee..204042b1 100755 --- a/examples/zh_en_tts/tts3/run.sh +++ b/examples/zh_en_tts/tts3/run.sh @@ -7,7 +7,7 @@ gpus=0,1 stage=0 stop_stage=100 -datasets_root_dir=./datasets/ +datasets_root_dir=~/datasets mfa_root_dir=./mfa_results/ conf_path=conf/default.yaml train_output_path=exp/default @@ -53,11 +53,11 @@ if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx fastspeech2_mix # considering the balance between speed and quality, we recommend that you use hifigan as vocoder ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx pwgan_aishell3 - #./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_aishell3 - + # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_aishell3 + # ./local/paddle2onnx.sh ${train_output_path} inference inference_onnx hifigan_csmsc fi -# inference with onnxruntime, use fastspeech2 + hifigan by default +# inference with onnxruntime, use fastspeech2 + pwgan by default if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then ./local/ort_predict.sh ${train_output_path} fi diff --git a/paddlespeech/t2s/exps/fastspeech2/preprocess.py b/paddlespeech/t2s/exps/fastspeech2/preprocess.py index 6e926d6e..f4acdc60 100644 --- a/paddlespeech/t2s/exps/fastspeech2/preprocess.py +++ b/paddlespeech/t2s/exps/fastspeech2/preprocess.py @@ -180,7 +180,6 @@ def process_sentences(config, results.append(record) results.sort(key=itemgetter("utt_id")) - write_metadata_method) with jsonlines.open(output_dir / "metadata.jsonl", write_metadata_method) as writer: for item in results: From 18b4fb57bed1ea9058f78b0a65fce64cf29a209f Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 12 Aug 2022 12:17:33 +0000 Subject: [PATCH 3/3] update readme --- examples/zh_en_tts/tts3/README.md | 33 ++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/examples/zh_en_tts/tts3/README.md b/examples/zh_en_tts/tts3/README.md index 1752d246..131d7f2c 100644 --- a/examples/zh_en_tts/tts3/README.md +++ b/examples/zh_en_tts/tts3/README.md @@ -1,3 +1,4 @@ + # Mixed Chinese and English TTS with CSMSC, LJSpeech-1.1, AISHELL-3 and VCTK datasets This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2006.04558) model with [CSMSC](https://www.data-baker.com/open_source.html), [LJSpeech-1.1](https://keithito.com/LJ-Speech-Dataset/), [AISHELL3](http://www.aishelltech.com/aishell_3) and [VCTK](https://datashare.ed.ac.uk/handle/10283/3443) datasets. @@ -5,15 +6,34 @@ This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2 ## Dataset ### Download and Extract -Download all datasets and extract it to `~/datasets`. The CSMSC dataset is in the directory `~/datasets/BZNSYP`. The Ljspeech dataset is in the directory `~/datasets/LJSpeech-1.1`. The aishell3 dataset is in the directory `~/datasets/data_aishell3`. The vctk dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. +Download all datasets and extract it to `~/datasets`: +- The CSMSC dataset is in the directory `~/datasets/BZNSYP` +- The Ljspeech dataset is in the directory `~/datasets/LJSpeech-1.1` +- The aishell3 dataset is in the directory `~/datasets/data_aishell3` +- The vctk dataset is in the directory `~/datasets/VCTK-Corpus-0.92` ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for the fastspeech2 training. -You can download from here [baker_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/BZNSYP/with_tone/baker_alignment_tone.tar.gz), [ljspeech_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/ljspeech_alignment.tar.gz), [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz) and [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz). Or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. +You can download from here: +- [baker_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/BZNSYP/with_tone/baker_alignment_tone.tar.gz) +- [ljspeech_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/ljspeech_alignment.tar.gz) +- [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz) +- [vctk_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/VCTK-Corpus-0.92/vctk_alignment.tar.gz) + +Or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. ## Get Started -Assume the paths of the datasets are `~/datasets/BZNSYP`, `~/datasets/LJSpeech-1.1`, `~/datasets/data_aishell3` and `~/datasets/VCTK-Corpus-0.92`. -Assume the path to the MFA result of the datasets are `./mfa_results/baker_alignment_tone`, `./mfa_results/ljspeech_alignment`, `./mfa_results/aishell3_alignment_tone` and `./mfa_results/vctk_alignment`. +Assume the paths to the datasets are: +- `~/datasets/BZNSYP` +- `~/datasets/LJSpeech-1.1` +- `~/datasets/data_aishell3` +- `~/datasets/VCTK-Corpus-0.92` + +Assume the path to the MFA results of the datasets are: +- `./mfa_results/baker_alignment_tone` +- `./mfa_results/ljspeech_alignment` +- `./mfa_results/aishell3_alignment_tone` +- `./mfa_results/vctk_alignment` Run the command below to 1. **source path**. @@ -252,7 +272,7 @@ fastspeech2_mix_ckpt_0.2.0 You can use the following scripts to synthesize for `${BIN_DIR}/../sentences_mix.txt` using pretrained fastspeech2 and parallel wavegan models. -174 means baker speaker, 175 means ljspeech speaker. For other speaker information, see `speaker_id_map.txt` +`174` means baker speaker, `175` means ljspeech speaker. For other speaker information, please see `speaker_id_map.txt`. ```bash source path.sh @@ -262,7 +282,7 @@ FLAGS_fraction_of_gpu_memory_to_use=0.01 \ python3 ${BIN_DIR}/../synthesize_e2e.py \ --am=fastspeech2_mix \ --am_config=fastspeech2_mix_ckpt_0.2.0/default.yaml \ - --am_ckpt=fastspeech2_mix_ckpt_0.2.0/snapshot_iter_96400.pdz \ + --am_ckpt=fastspeech2_mix_ckpt_0.2.0/snapshot_iter_99200.pdz \ --am_stat=fastspeech2_mix_ckpt_0.2.0/speech_stats.npy \ --voc=pwgan_aishell3 \ --voc_config=pwg_aishell3_ckpt_0.5/default.yaml \ @@ -275,5 +295,4 @@ python3 ${BIN_DIR}/../synthesize_e2e.py \ --speaker_dict=fastspeech2_mix_ckpt_0.2.0/speaker_id_map.txt \ --spk_id=174 \ --inference_dir=exp/default/inference - ```