pull/3984/head
co63oc 7 months ago committed by GitHub
parent bb77a7f7db
commit c74a6be998
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -21,7 +21,7 @@ __all__ = [
class Timer(object):
'''Calculate runing speed and estimated time of arrival(ETA)'''
'''Calculate running speed and estimated time of arrival(ETA)'''
def __init__(self, total_step: int):
self.total_step = total_step

@ -30,5 +30,5 @@ class BackendTest(unittest.TestCase):
urllib.request.urlretrieve(url, os.path.basename(url))
self.files.append(os.path.basename(url))
def initParmas(self):
def initParams(self):
raise NotImplementedError

@ -30,5 +30,5 @@ class BackendTest(unittest.TestCase):
urllib.request.urlretrieve(url, os.path.basename(url))
self.files.append(os.path.basename(url))
def initParmas(self):
def initParams(self):
raise NotImplementedError

@ -103,7 +103,7 @@ class MockedSaveTest(unittest.TestCase):
encoding=encoding,
bits_per_sample=bits_per_sample, )
# on +Py3.8 call_args.kwargs is more descreptive
# on +Py3.8 call_args.kwargs is more descriptive
args = mocked_write.call_args[1]
assert args["file"] == filepath
assert args["samplerate"] == sample_rate
@ -191,7 +191,7 @@ class SaveTestBase(TempDirMixin, unittest.TestCase):
def _assert_non_wav(self, fmt, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save non-wav format.
Due to precision missmatch, and the lack of alternative way to decode the
Due to precision mismatch, and the lack of alternative way to decode the
resulting files without using soundfile, only meta data are validated.
"""
num_frames = sample_rate * 3

@ -81,7 +81,7 @@ def convert_tensor_encoding(
#dtype = getattr(paddle, dtype)
#if dtype not in [paddle.float64, paddle.float32, paddle.int32, paddle.int16, paddle.uint8]:
#raise NotImplementedError(f"dtype {dtype} is not supported.")
## According to the doc, folking rng on all CUDA devices is slow when there are many CUDA devices,
## According to the doc, forking rng on all CUDA devices is slow when there are many CUDA devices,
## so we only fork on CPU, generate values and move the data to the given device
#with paddle.random.fork_rng([]):
#paddle.random.manual_seed(seed)

@ -24,20 +24,21 @@ def get_bit_depth(dtype):
def gen_audio_file(
path,
sample_rate,
num_channels,
*,
encoding=None,
bit_depth=None,
compression=None,
attenuation=None,
duration=1,
comment_file=None,
):
path,
sample_rate,
num_channels,
*,
encoding=None,
bit_depth=None,
compression=None,
attenuation=None,
duration=1,
comment_file=None, ):
"""Generate synthetic audio file with `sox` command."""
if path.endswith(".wav"):
warnings.warn("Use get_wav_data and save_wav to generate wav file for accurate result.")
warnings.warn(
"Use get_wav_data and save_wav to generate wav file for accurate result."
)
command = [
"sox",
"-V3", # verbose
@ -81,7 +82,12 @@ def gen_audio_file(
subprocess.run(command, check=True)
def convert_audio_file(src_path, dst_path, *, encoding=None, bit_depth=None, compression=None):
def convert_audio_file(src_path,
dst_path,
*,
encoding=None,
bit_depth=None,
compression=None):
"""Convert audio file with `sox` command."""
command = ["sox", "-V3", "--no-dither", "-R", str(src_path)]
if encoding is not None:
@ -95,7 +101,7 @@ def convert_audio_file(src_path, dst_path, *, encoding=None, bit_depth=None, com
subprocess.run(command, check=True)
def _flattern(effects):
def _flatten(effects):
if not effects:
return effects
if isinstance(effects[0], str):
@ -103,9 +109,14 @@ def _flattern(effects):
return [item for sublist in effects for item in sublist]
def run_sox_effect(input_file, output_file, effect, *, output_sample_rate=None, output_bitdepth=None):
def run_sox_effect(input_file,
output_file,
effect,
*,
output_sample_rate=None,
output_bitdepth=None):
"""Run sox effects"""
effect = _flattern(effect)
effect = _flatten(effect)
command = ["sox", "-V", "--no-dither", input_file]
if output_bitdepth:
command += ["--bits", str(output_bitdepth)]

@ -24,7 +24,7 @@ wav_url = 'https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav'
class FeatTest(unittest.TestCase):
def setUp(self):
self.initParmas()
self.initParams()
self.initWavInput()
self.setUpDevice()
@ -44,5 +44,5 @@ class FeatTest(unittest.TestCase):
if dim == 1:
self.waveform = np.expand_dims(self.waveform, 0)
def initParmas(self):
def initParams(self):
raise NotImplementedError

@ -23,7 +23,7 @@ from paddlespeech.audio.transform.spectrogram import Stft
class TestIstft(FeatTest):
def initParmas(self):
def initParams(self):
self.n_fft = 512
self.hop_length = 128
self.window_str = 'hann'

@ -18,12 +18,11 @@ import paddle
import paddleaudio
import torch
import torchaudio
from base import FeatTest
class TestKaldi(FeatTest):
def initParmas(self):
def initParams(self):
self.window_size = 1024
self.dtype = 'float32'

@ -17,13 +17,12 @@ import librosa
import numpy as np
import paddle
import paddleaudio
from paddleaudio.functional.window import get_window
from base import FeatTest
from paddleaudio.functional.window import get_window
class TestLibrosa(FeatTest):
def initParmas(self):
def initParams(self):
self.n_fft = 512
self.hop_length = 128
self.n_mels = 40

@ -22,7 +22,7 @@ from paddlespeech.audio.transform.spectrogram import LogMelSpectrogram
class TestLogMelSpectrogram(FeatTest):
def initParmas(self):
def initParams(self):
self.n_fft = 512
self.hop_length = 128
self.n_mels = 40

@ -22,7 +22,7 @@ from paddlespeech.audio.transform.spectrogram import Spectrogram
class TestSpectrogram(FeatTest):
def initParmas(self):
def initParams(self):
self.n_fft = 512
self.hop_length = 128

@ -22,7 +22,7 @@ from paddlespeech.audio.transform.spectrogram import Stft
class TestStft(FeatTest):
def initParmas(self):
def initParams(self):
self.n_fft = 512
self.hop_length = 128
self.window_str = 'hann'
@ -30,7 +30,7 @@ class TestStft(FeatTest):
def test_stft(self):
ps_stft = Stft(self.n_fft, self.hop_length)
ps_res = ps_stft(
self.waveform.T).squeeze(1).T # (n_fft//2 + 1, n_frmaes)
self.waveform.T).squeeze(1).T # (n_fft//2 + 1, n_frames)
x = paddle.to_tensor(self.waveform)
window = get_window(self.window_str, self.n_fft, dtype=x.dtype)

@ -132,7 +132,7 @@ def create_manifest(data_dir, manifest_path):
def prepare_dataset(url, md5sum, target_dir, manifest_path):
"""Download, unpack and create summmary manifest file.
"""Download, unpack and create summary manifest file.
"""
if not os.path.exists(os.path.join(target_dir, "LibriSpeech")):
# download

@ -13,7 +13,7 @@
# limitations under the License.
"""Prepare Ted-En-Zh speech translation dataset
Create manifest files from splited datased.
Create manifest files from splited dataset.
dev set: tst2010, test set: tst2015
Manifest file is a json-format file with each line containing the
meta data (i.e. audio filepath, transcript and audio duration)

@ -71,7 +71,7 @@ def read_trn(filepath):
with open(filepath, 'r') as f:
lines = f.read().strip().split('\n')
assert len(lines) == 3, lines
# charactor text, remove withespace
# character text, remove whitespace
texts.append(''.join(lines[0].split()))
texts.extend(lines[1:])
return texts
@ -127,7 +127,7 @@ def create_manifest(data_dir, manifest_path_prefix):
'utt2spk': spk,
'feat': audio_path,
'feat_shape': (duration, ), # second
'text': word_text, # charactor
'text': word_text, # character
'syllable': syllable_text,
'phone': phone_text,
},

@ -123,7 +123,7 @@ def read_algin(filepath: str) -> str:
filepath (str): [description]
Returns:
str: token sepearte by <space>
str: token separate by <space>
"""
aligns = [] # (start, end, token)
with open(filepath, 'r') as f:

@ -13,7 +13,7 @@
# limitations under the License.
"""Prepare TIMIT dataset (Standard split from Kaldi)
Create manifest files from splited datased.
Create manifest files from splited dataset.
Manifest file is a json-format file with each line containing the
meta data (i.e. audio filepath, transcript and audio duration)
of each audio file in the data set.

@ -167,7 +167,7 @@ def prepare_dataset(base_url, data_list, target_dir, manifest_path,
# check the target zip file md5sum
if not check_md5sum(target_name, target_md5sum):
raise RuntimeError("{} MD5 checkssum failed".format(target_name))
raise RuntimeError("{} MD5 checksum failed".format(target_name))
else:
print("Check {} md5sum successfully".format(target_name))

@ -179,7 +179,7 @@ def download_dataset(base_url, data_list, target_data, target_dir, dataset):
# check the target zip file md5sum
if not check_md5sum(target_name, target_md5sum):
raise RuntimeError("{} MD5 checkssum failed".format(target_name))
raise RuntimeError("{} MD5 checksum failed".format(target_name))
else:
print("Check {} md5sum successfully".format(target_name))
@ -187,7 +187,7 @@ def download_dataset(base_url, data_list, target_data, target_dir, dataset):
# we need make the test directory
unzip(target_name, os.path.join(target_dir, "test"))
else:
# upzip dev zip pacakge and will create the dev directory
# unzip dev zip package and will create the dev directory
unzip(target_name, target_dir)

@ -14,7 +14,7 @@ Now, the search word in demo is:
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from meduim and hard to install paddlespeech.
You can choose one way from medium and hard to install paddlespeech.
The dependency refers to the requirements.txt, and install the dependency as follows:

@ -19,7 +19,7 @@ Notethis demo uses the [CN-Celeb](http://openslr.org/82/) dataset of at least
### 1. Prepare PaddleSpeech
Audio vector extraction requires PaddleSpeech training model, so please make sure that PaddleSpeech has been installed before running. Specific installation steps: See [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare MySQL and Milvus services by docker-compose
The audio similarity search system requires Milvus, MySQL services. We can start these containers with one click through [docker-compose.yaml](./docker-compose.yaml), so please make sure you have [installed Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/) before running. then

@ -11,7 +11,7 @@ This demo is an implementation to tag an audio file with 527 [AudioSet](https://
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this demo should be a WAV file(`.wav`).

@ -10,7 +10,7 @@ This demo is an implementation to automatic video subtitles from a video file. I
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input
Get a video file with the speech of the specific language:

@ -10,7 +10,7 @@ This demo is an implementation to recognize keyword from a specific audio file.
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

@ -9,7 +9,7 @@ This demo is an implementation to restore punctuation from raw text. It can be d
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input
The input of this demo should be a text of the specific language that can be passed via argument.

@ -11,7 +11,7 @@ This demo is an implementation to extract speaker embedding from a specific audi
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this cli demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

@ -10,7 +10,7 @@ This demo is an implementation to recognize text from a specific audio file. It
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

@ -15,7 +15,7 @@ see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/doc
It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
**If you install in easy mode, you need to prepare the yaml file by yourself, you can refer to the yaml file in the conf directory.**

@ -10,7 +10,7 @@ This demo is an implementation to recognize text or produce the acoustic represe
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

@ -9,7 +9,7 @@ This demo is an implementation to recognize text from a specific audio file and
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File

@ -18,7 +18,7 @@ see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/doc
It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
**If you install in easy mode, you need to prepare the yaml file by yourself, you can refer to

@ -15,7 +15,7 @@ see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/doc
It is recommended to use **paddlepaddle 2.4rc** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
**If you install in easy mode, you need to prepare the yaml file by yourself, you can refer to the yaml file in the conf directory.**

@ -10,7 +10,7 @@ This demo is an implementation to generate audio from the given text. It can be
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input
The input of this demo should be a text of the specific language that can be passed via argument.

@ -9,7 +9,7 @@ Whisper model trained by OpenAI whisper https://github.com/openai/whisper
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
You can choose one way from easy, meduim and hard to install paddlespeech.
You can choose one way from easy, medium and hard to install paddlespeech.
### 2. Prepare Input File
The input of this demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

Loading…
Cancel
Save