|
|
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
"""This module provides functions to calculate error rate in different level.
|
|
|
|
e.g. wer for word-level, cer for char-level.
|
|
|
|
"""
|
|
|
|
import editdistance
|
|
|
|
import numpy as np
|
|
|
|
|
Support paddle 2.x (#538)
* 2.x model
* model test pass
* fix data
* fix soundfile with flac support
* one thread dataloader test pass
* export feasture size
add trainer and utils
add setup model and dataloader
update travis using Bionic dist
* add venv; test under venv
* fix unittest; train and valid
* add train and config
* add config and train script
* fix ctc cuda memcopy error
* fix imports
* fix train valid log
* fix dataset batch shuffle shift start from 1
fix rank_zero_only decreator error
close tensorboard when train over
add decoding config and code
* test process can run
* test with decoding
* test and infer with decoding
* fix infer
* fix ctc loss
lr schedule
sortagrad
logger
* aishell egs
* refactor train
add aishell egs
* fix dataset batch shuffle and add batch sampler log
print model parameter
* fix model and ctc
* sequence_mask make all inputs zeros, which cause grad be zero, this is a bug of LessThanOp
add grad clip by global norm
add model train test notebook
* ctc loss
remove run prefix
using ord value as text id
* using unk when training
compute_loss need text ids
ord id using in test mode, which compute wer/cer
* fix tester
* add lr_deacy
refactor code
* fix tools
* fix ci
add tune
fix gru model bugs
add dataset and model test
* fix decoding
* refactor repo
fix decoding
* fix musan and rir dataset
* refactor io, loss, conv, rnn, gradclip, model, utils
* fix ci and import
* refactor model
add export jit model
* add deploy bin and test it
* rm uselss egs
* add layer tools
* refactor socket server
new model from pretrain
* remve useless
* fix instability loss and grad nan or inf for librispeech training
* fix sampler
* fix libri train.sh
* fix doc
* add license on cpp
* fix doc
* fix libri script
* fix install
* clip 5 wer 7.39, clip 400 wer 7.54, 1.8 clip 400 baseline 7.49
4 years ago
|
|
|
__all__ = ['word_errors', 'char_errors', 'wer', 'cer']
|
|
|
|
|
|
|
|
editdistance.eval("a", "b")
|
|
|
|
|
|
|
|
|
|
|
|
def _levenshtein_distance(ref, hyp):
|
|
|
|
"""Levenshtein distance is a string metric for measuring the difference
|
|
|
|
between two sequences. Informally, the levenshtein disctance is defined as
|
|
|
|
the minimum number of single-character edits (substitutions, insertions or
|
|
|
|
deletions) required to change one word into the other. We can naturally
|
|
|
|
extend the edits to word level when calculate levenshtein disctance for
|
|
|
|
two sentences.
|
|
|
|
"""
|
|
|
|
m = len(ref)
|
|
|
|
n = len(hyp)
|
|
|
|
|
|
|
|
# special case
|
|
|
|
if ref == hyp:
|
|
|
|
return 0
|
|
|
|
if m == 0:
|
|
|
|
return n
|
|
|
|
if n == 0:
|
|
|
|
return m
|
|
|
|
|
|
|
|
if m < n:
|
|
|
|
ref, hyp = hyp, ref
|
|
|
|
m, n = n, m
|
|
|
|
|
|
|
|
# use O(min(m, n)) space
|
|
|
|
distance = np.zeros((2, n + 1), dtype=np.int32)
|
|
|
|
|
|
|
|
# initialize distance matrix
|
|
|
|
for j in range(n + 1):
|
|
|
|
distance[0][j] = j
|
|
|
|
|
|
|
|
# calculate levenshtein distance
|
|
|
|
for i in range(1, m + 1):
|
|
|
|
prev_row_idx = (i - 1) % 2
|
|
|
|
cur_row_idx = i % 2
|
|
|
|
distance[cur_row_idx][0] = i
|
|
|
|
for j in range(1, n + 1):
|
|
|
|
if ref[i - 1] == hyp[j - 1]:
|
|
|
|
distance[cur_row_idx][j] = distance[prev_row_idx][j - 1]
|
|
|
|
else:
|
|
|
|
s_num = distance[prev_row_idx][j - 1] + 1
|
|
|
|
i_num = distance[cur_row_idx][j - 1] + 1
|
|
|
|
d_num = distance[prev_row_idx][j] + 1
|
|
|
|
distance[cur_row_idx][j] = min(s_num, i_num, d_num)
|
|
|
|
|
|
|
|
return distance[m % 2][n]
|
|
|
|
|
|
|
|
|
|
|
|
def word_errors(reference, hypothesis, ignore_case=False, delimiter=' '):
|
|
|
|
"""Compute the levenshtein distance between reference sequence and
|
|
|
|
hypothesis sequence in word-level.
|
|
|
|
|
|
|
|
:param reference: The reference sentence.
|
|
|
|
:type reference: str
|
|
|
|
:param hypothesis: The hypothesis sentence.
|
|
|
|
:type hypothesis: str
|
|
|
|
:param ignore_case: Whether case-sensitive or not.
|
|
|
|
:type ignore_case: bool
|
|
|
|
:param delimiter: Delimiter of input sentences.
|
|
|
|
:type delimiter: char
|
|
|
|
:return: Levenshtein distance and word number of reference sentence.
|
|
|
|
:rtype: list
|
|
|
|
"""
|
E2E/Streaming Transformer/Conformer ASR (#578)
* add cmvn and label smoothing loss layer
* add layer for transformer
* add glu and conformer conv
* add torch compatiable hack, mask funcs
* not hack size since it exists
* add test; attention
* add attention, common utils, hack paddle
* add audio utils
* conformer batch padding mask bug fix #223
* fix typo, python infer fix rnn mem opt name error and batchnorm1d, will be available at 2.0.2
* fix ci
* fix ci
* add encoder
* refactor egs
* add decoder
* refactor ctc, add ctc align, refactor ckpt, add warmup lr scheduler, cmvn utils
* refactor docs
* add fix
* fix readme
* fix bugs, refactor collator, add pad_sequence, fix ckpt bugs
* fix docstring
* refactor data feed order
* add u2 model
* refactor cmvn, test
* add utils
* add u2 config
* fix bugs
* fix bugs
* fix autograd maybe has problem when using inplace operation
* refactor data, build vocab; add format data
* fix text featurizer
* refactor build vocab
* add fbank, refactor feature of speech
* refactor audio feat
* refactor data preprare
* refactor data
* model init from config
* add u2 bins
* flake8
* can train
* fix bugs, add coverage, add scripts
* test can run
* fix data
* speed perturb with sox
* add spec aug
* fix for train
* fix train logitc
* fix logger
* log valid loss, time dataset process
* using np for speed perturb, remove some debug log of grad clip
* fix logger
* fix build vocab
* fix logger name
* using module logger as default
* fix
* fix install
* reorder imports
* fix board logger
* fix logger
* kaldi fbank and mfcc
* fix cmvn and print prarams
* fix add_eos_sos and cmvn
* fix cmvn compute
* fix logger and cmvn
* fix subsampling, label smoothing loss, remove useless
* add notebook test
* fix log
* fix tb logger
* multi gpu valid
* fix log
* fix log
* fix config
* fix compute cmvn, need paddle 2.1
* add cmvn notebook
* fix layer tools
* fix compute cmvn
* add rtf
* fix decoding
* fix layer tools
* fix log, add avg script
* more avg and test info
* fix dataset pickle problem; using 2.1 paddle; num_workers can > 0; ckpt save in exp dir;fix setup.sh;
* add vimrc
* refactor tiny script, add transformer and stream conf
* spm demo; librisppech scripts and confs
* fix log
* add librispeech scripts
* refactor data pipe; fix conf; fix u2 default params
* fix bugs
* refactor aishell scripts
* fix test
* fix cmvn
* fix s0 scripts
* fix ds2 scripts and bugs
* fix dev & test dataset filter
* fix dataset filter
* filter dev
* fix ckpt path
* filter test, since librispeech will cause OOM, but all test wer will be worse, since mismatch train with test
* add comment
* add syllable doc
* fix ds2 configs
* add doc
* add pypinyin tools
* fix decoder using blank_id=0
* mmseg with pybind11
* format code
4 years ago
|
|
|
if ignore_case:
|
|
|
|
reference = reference.lower()
|
|
|
|
hypothesis = hypothesis.lower()
|
|
|
|
|
|
|
|
ref_words = list(filter(None, reference.split(delimiter)))
|
|
|
|
hyp_words = list(filter(None, hypothesis.split(delimiter)))
|
|
|
|
|
|
|
|
edit_distance = _levenshtein_distance(ref_words, hyp_words)
|
|
|
|
# `editdistance.eavl precision` less than `_levenshtein_distance`
|
|
|
|
# edit_distance = editdistance.eval(ref_words, hyp_words)
|
|
|
|
return float(edit_distance), len(ref_words)
|
|
|
|
|
|
|
|
|
|
|
|
def char_errors(reference, hypothesis, ignore_case=False, remove_space=False):
|
|
|
|
"""Compute the levenshtein distance between reference sequence and
|
|
|
|
hypothesis sequence in char-level.
|
|
|
|
|
|
|
|
:param reference: The reference sentence.
|
|
|
|
:type reference: str
|
|
|
|
:param hypothesis: The hypothesis sentence.
|
|
|
|
:type hypothesis: str
|
|
|
|
:param ignore_case: Whether case-sensitive or not.
|
|
|
|
:type ignore_case: bool
|
|
|
|
:param remove_space: Whether remove internal space characters
|
|
|
|
:type remove_space: bool
|
|
|
|
:return: Levenshtein distance and length of reference sentence.
|
|
|
|
:rtype: list
|
|
|
|
"""
|
E2E/Streaming Transformer/Conformer ASR (#578)
* add cmvn and label smoothing loss layer
* add layer for transformer
* add glu and conformer conv
* add torch compatiable hack, mask funcs
* not hack size since it exists
* add test; attention
* add attention, common utils, hack paddle
* add audio utils
* conformer batch padding mask bug fix #223
* fix typo, python infer fix rnn mem opt name error and batchnorm1d, will be available at 2.0.2
* fix ci
* fix ci
* add encoder
* refactor egs
* add decoder
* refactor ctc, add ctc align, refactor ckpt, add warmup lr scheduler, cmvn utils
* refactor docs
* add fix
* fix readme
* fix bugs, refactor collator, add pad_sequence, fix ckpt bugs
* fix docstring
* refactor data feed order
* add u2 model
* refactor cmvn, test
* add utils
* add u2 config
* fix bugs
* fix bugs
* fix autograd maybe has problem when using inplace operation
* refactor data, build vocab; add format data
* fix text featurizer
* refactor build vocab
* add fbank, refactor feature of speech
* refactor audio feat
* refactor data preprare
* refactor data
* model init from config
* add u2 bins
* flake8
* can train
* fix bugs, add coverage, add scripts
* test can run
* fix data
* speed perturb with sox
* add spec aug
* fix for train
* fix train logitc
* fix logger
* log valid loss, time dataset process
* using np for speed perturb, remove some debug log of grad clip
* fix logger
* fix build vocab
* fix logger name
* using module logger as default
* fix
* fix install
* reorder imports
* fix board logger
* fix logger
* kaldi fbank and mfcc
* fix cmvn and print prarams
* fix add_eos_sos and cmvn
* fix cmvn compute
* fix logger and cmvn
* fix subsampling, label smoothing loss, remove useless
* add notebook test
* fix log
* fix tb logger
* multi gpu valid
* fix log
* fix log
* fix config
* fix compute cmvn, need paddle 2.1
* add cmvn notebook
* fix layer tools
* fix compute cmvn
* add rtf
* fix decoding
* fix layer tools
* fix log, add avg script
* more avg and test info
* fix dataset pickle problem; using 2.1 paddle; num_workers can > 0; ckpt save in exp dir;fix setup.sh;
* add vimrc
* refactor tiny script, add transformer and stream conf
* spm demo; librisppech scripts and confs
* fix log
* add librispeech scripts
* refactor data pipe; fix conf; fix u2 default params
* fix bugs
* refactor aishell scripts
* fix test
* fix cmvn
* fix s0 scripts
* fix ds2 scripts and bugs
* fix dev & test dataset filter
* fix dataset filter
* filter dev
* fix ckpt path
* filter test, since librispeech will cause OOM, but all test wer will be worse, since mismatch train with test
* add comment
* add syllable doc
* fix ds2 configs
* add doc
* add pypinyin tools
* fix decoder using blank_id=0
* mmseg with pybind11
* format code
4 years ago
|
|
|
if ignore_case:
|
|
|
|
reference = reference.lower()
|
|
|
|
hypothesis = hypothesis.lower()
|
|
|
|
|
|
|
|
join_char = ' '
|
E2E/Streaming Transformer/Conformer ASR (#578)
* add cmvn and label smoothing loss layer
* add layer for transformer
* add glu and conformer conv
* add torch compatiable hack, mask funcs
* not hack size since it exists
* add test; attention
* add attention, common utils, hack paddle
* add audio utils
* conformer batch padding mask bug fix #223
* fix typo, python infer fix rnn mem opt name error and batchnorm1d, will be available at 2.0.2
* fix ci
* fix ci
* add encoder
* refactor egs
* add decoder
* refactor ctc, add ctc align, refactor ckpt, add warmup lr scheduler, cmvn utils
* refactor docs
* add fix
* fix readme
* fix bugs, refactor collator, add pad_sequence, fix ckpt bugs
* fix docstring
* refactor data feed order
* add u2 model
* refactor cmvn, test
* add utils
* add u2 config
* fix bugs
* fix bugs
* fix autograd maybe has problem when using inplace operation
* refactor data, build vocab; add format data
* fix text featurizer
* refactor build vocab
* add fbank, refactor feature of speech
* refactor audio feat
* refactor data preprare
* refactor data
* model init from config
* add u2 bins
* flake8
* can train
* fix bugs, add coverage, add scripts
* test can run
* fix data
* speed perturb with sox
* add spec aug
* fix for train
* fix train logitc
* fix logger
* log valid loss, time dataset process
* using np for speed perturb, remove some debug log of grad clip
* fix logger
* fix build vocab
* fix logger name
* using module logger as default
* fix
* fix install
* reorder imports
* fix board logger
* fix logger
* kaldi fbank and mfcc
* fix cmvn and print prarams
* fix add_eos_sos and cmvn
* fix cmvn compute
* fix logger and cmvn
* fix subsampling, label smoothing loss, remove useless
* add notebook test
* fix log
* fix tb logger
* multi gpu valid
* fix log
* fix log
* fix config
* fix compute cmvn, need paddle 2.1
* add cmvn notebook
* fix layer tools
* fix compute cmvn
* add rtf
* fix decoding
* fix layer tools
* fix log, add avg script
* more avg and test info
* fix dataset pickle problem; using 2.1 paddle; num_workers can > 0; ckpt save in exp dir;fix setup.sh;
* add vimrc
* refactor tiny script, add transformer and stream conf
* spm demo; librisppech scripts and confs
* fix log
* add librispeech scripts
* refactor data pipe; fix conf; fix u2 default params
* fix bugs
* refactor aishell scripts
* fix test
* fix cmvn
* fix s0 scripts
* fix ds2 scripts and bugs
* fix dev & test dataset filter
* fix dataset filter
* filter dev
* fix ckpt path
* filter test, since librispeech will cause OOM, but all test wer will be worse, since mismatch train with test
* add comment
* add syllable doc
* fix ds2 configs
* add doc
* add pypinyin tools
* fix decoder using blank_id=0
* mmseg with pybind11
* format code
4 years ago
|
|
|
if remove_space:
|
|
|
|
join_char = ''
|
|
|
|
|
|
|
|
reference = join_char.join(list(filter(None, reference.split(' '))))
|
|
|
|
hypothesis = join_char.join(list(filter(None, hypothesis.split(' '))))
|
|
|
|
|
|
|
|
edit_distance = _levenshtein_distance(reference, hypothesis)
|
|
|
|
# `editdistance.eavl precision` less than `_levenshtein_distance`
|
|
|
|
# edit_distance = editdistance.eval(reference, hypothesis)
|
|
|
|
return float(edit_distance), len(reference)
|
|
|
|
|
|
|
|
|
|
|
|
def wer(reference, hypothesis, ignore_case=False, delimiter=' '):
|
|
|
|
"""Calculate word error rate (WER). WER compares reference text and
|
|
|
|
hypothesis text in word-level. WER is defined as:
|
|
|
|
|
|
|
|
.. math::
|
|
|
|
WER = (Sw + Dw + Iw) / Nw
|
|
|
|
|
|
|
|
where
|
|
|
|
|
|
|
|
.. code-block:: text
|
|
|
|
|
|
|
|
Sw is the number of words subsituted,
|
|
|
|
Dw is the number of words deleted,
|
|
|
|
Iw is the number of words inserted,
|
|
|
|
Nw is the number of words in the reference
|
|
|
|
|
|
|
|
We can use levenshtein distance to calculate WER. Please draw an attention
|
|
|
|
that empty items will be removed when splitting sentences by delimiter.
|
|
|
|
|
|
|
|
:param reference: The reference sentence.
|
|
|
|
:type reference: str
|
|
|
|
:param hypothesis: The hypothesis sentence.
|
|
|
|
:type hypothesis: str
|
|
|
|
:param ignore_case: Whether case-sensitive or not.
|
|
|
|
:type ignore_case: bool
|
|
|
|
:param delimiter: Delimiter of input sentences.
|
|
|
|
:type delimiter: char
|
|
|
|
:return: Word error rate.
|
|
|
|
:rtype: float
|
|
|
|
:raises ValueError: If word number of reference is zero.
|
|
|
|
"""
|
|
|
|
edit_distance, ref_len = word_errors(reference, hypothesis, ignore_case,
|
|
|
|
delimiter)
|
|
|
|
|
|
|
|
if ref_len == 0:
|
|
|
|
raise ValueError("Reference's word number should be greater than 0.")
|
|
|
|
|
|
|
|
wer = float(edit_distance) / ref_len
|
|
|
|
return wer
|
|
|
|
|
|
|
|
|
|
|
|
def cer(reference, hypothesis, ignore_case=False, remove_space=False):
|
|
|
|
"""Calculate charactor error rate (CER). CER compares reference text and
|
|
|
|
hypothesis text in char-level. CER is defined as:
|
|
|
|
|
|
|
|
.. math::
|
|
|
|
CER = (Sc + Dc + Ic) / Nc
|
|
|
|
|
|
|
|
where
|
|
|
|
|
|
|
|
.. code-block:: text
|
|
|
|
|
|
|
|
Sc is the number of characters substituted,
|
|
|
|
Dc is the number of characters deleted,
|
|
|
|
Ic is the number of characters inserted
|
|
|
|
Nc is the number of characters in the reference
|
|
|
|
|
|
|
|
We can use levenshtein distance to calculate CER. Chinese input should be
|
|
|
|
encoded to unicode. Please draw an attention that the leading and tailing
|
|
|
|
space characters will be truncated and multiple consecutive space
|
|
|
|
characters in a sentence will be replaced by one space character.
|
|
|
|
|
|
|
|
:param reference: The reference sentence.
|
|
|
|
:type reference: str
|
|
|
|
:param hypothesis: The hypothesis sentence.
|
|
|
|
:type hypothesis: str
|
|
|
|
:param ignore_case: Whether case-sensitive or not.
|
|
|
|
:type ignore_case: bool
|
|
|
|
:param remove_space: Whether remove internal space characters
|
|
|
|
:type remove_space: bool
|
|
|
|
:return: Character error rate.
|
|
|
|
:rtype: float
|
|
|
|
:raises ValueError: If the reference length is zero.
|
|
|
|
"""
|
|
|
|
edit_distance, ref_len = char_errors(reference, hypothesis, ignore_case,
|
|
|
|
remove_space)
|
|
|
|
|
|
|
|
if ref_len == 0:
|
|
|
|
raise ValueError("Length of reference should be greater than 0.")
|
|
|
|
|
|
|
|
cer = float(edit_distance) / ref_len
|
|
|
|
return cer
|