From 22d7c06c7fb75caec08b29e072c416b22b812477 Mon Sep 17 00:00:00 2001 From: mmglove Date: Tue, 22 Mar 2022 20:07:03 +0800 Subject: [PATCH 001/127] fix conformer benchmark data --- tests/test_tipc/prepare.sh | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index 31dff320f..b62e54fd2 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -24,7 +24,7 @@ trainer_list=$(func_parser_value "${lines[14]}") if [ ${MODE} = "benchmark_train" ];then curPath=$(readlink -f "$(dirname "$0")") - echo "curPath:"${curPath} + echo "curPath:"${curPath} # /PaddleSpeech/tests/test_tipc/ cd ${curPath}/../.. apt-get install libsndfile1 -y pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple @@ -40,19 +40,17 @@ if [ ${MODE} = "benchmark_train" ];then echo "please contact author to get the URL.\n" exit else - wget -P ${curPath}/../../dataset/aishell/ ${URL} + wget -P ${curPath}/../../dataset/aishell/ ${URL} + mv ${curPath}/../../dataset/aishell/aishell.py ${curPath}/../../dataset/aishell/aishell_tiny.py fi - sed -i "s#^URL_ROOT_TAG#URL_ROOT = '${URL}'#g" ${curPath}/conformer/scripts/aishell_tiny.py - cp ${curPath}/conformer/scripts/aishell_tiny.py ${curPath}/../../dataset/aishell/ cd ${curPath}/../../examples/aishell/asr1 - source path.sh - # download audio data + + #Prepare the data sed -i "s#aishell.py#aishell_tiny.py#g" ./local/data.sh sed -i "s#python3#python#g" ./local/data.sh - bash ./local/data.sh || exit -1 - if [ $? -ne 0 ]; then - exit 1 - fi + bash run.sh --stage 0 --stop_stage 0 # 执行第一遍的时候会偶现报错 + bash run.sh --stage 0 --stop_stage 0 + mkdir -p ${curPath}/conformer/benchmark_train/ cp -rf conf ${curPath}/conformer/benchmark_train/ cp -rf data ${curPath}/conformer/benchmark_train/ From 4b7786f2ed316aaa5d2bb0185acd0960ce9e7eee Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 6 May 2022 08:02:16 +0000 Subject: [PATCH 002/127] add vits network scripts, test=tts --- examples/csmsc/vits/README.md | 0 examples/csmsc/vits/conf/default.yaml | 0 examples/csmsc/vits/local/preprocess.sh | 0 examples/csmsc/vits/local/synthesize.sh | 0 examples/csmsc/vits/local/train.sh | 0 examples/csmsc/vits/path.sh | 13 + examples/csmsc/vits/run.sh | 0 paddlespeech/t2s/datasets/get_feats.py | 114 ++-- paddlespeech/t2s/exps/vits/normalize.py | 13 + paddlespeech/t2s/exps/vits/preprocess.py | 13 + paddlespeech/t2s/exps/vits/synthesize.py | 13 + paddlespeech/t2s/exps/vits/train.py | 13 + paddlespeech/t2s/frontend/zh_frontend.py | 4 +- paddlespeech/t2s/models/hifigan/hifigan.py | 18 +- paddlespeech/t2s/models/vits/__init__.py | 13 + .../t2s/models/vits/duration_predictor.py | 172 ++++++ paddlespeech/t2s/models/vits/flow.py | 316 ++++++++++ paddlespeech/t2s/models/vits/generator.py | 551 +++++++++++++++++ .../models/vits/monotonic_align/__init__.py | 94 +++ .../t2s/models/vits/monotonic_align/core.pyx | 62 ++ .../t2s/models/vits/monotonic_align/setup.py | 39 ++ .../t2s/models/vits/posterior_encoder.py | 120 ++++ .../t2s/models/vits/residual_coupling.py | 244 ++++++++ paddlespeech/t2s/models/vits/text_encoder.py | 145 +++++ paddlespeech/t2s/models/vits/transform.py | 238 ++++++++ paddlespeech/t2s/models/vits/vits.py | 573 ++++++++++++++++++ paddlespeech/t2s/models/vits/vits_updater.py | 0 .../t2s/models/vits/wavenet/__init__.py | 13 + .../t2s/models/vits/wavenet/residual_block.py | 154 +++++ .../t2s/models/vits/wavenet/wavenet.py | 175 ++++++ paddlespeech/t2s/modules/losses.py | 37 ++ paddlespeech/t2s/modules/nets_utils.py | 65 ++ 32 files changed, 3173 insertions(+), 39 deletions(-) create mode 100644 examples/csmsc/vits/README.md create mode 100644 examples/csmsc/vits/conf/default.yaml create mode 100755 examples/csmsc/vits/local/preprocess.sh create mode 100755 examples/csmsc/vits/local/synthesize.sh create mode 100755 examples/csmsc/vits/local/train.sh create mode 100755 examples/csmsc/vits/path.sh create mode 100755 examples/csmsc/vits/run.sh create mode 100644 paddlespeech/t2s/exps/vits/normalize.py create mode 100644 paddlespeech/t2s/exps/vits/preprocess.py create mode 100644 paddlespeech/t2s/exps/vits/synthesize.py create mode 100644 paddlespeech/t2s/exps/vits/train.py create mode 100644 paddlespeech/t2s/models/vits/__init__.py create mode 100644 paddlespeech/t2s/models/vits/duration_predictor.py create mode 100644 paddlespeech/t2s/models/vits/flow.py create mode 100644 paddlespeech/t2s/models/vits/generator.py create mode 100644 paddlespeech/t2s/models/vits/monotonic_align/__init__.py create mode 100644 paddlespeech/t2s/models/vits/monotonic_align/core.pyx create mode 100644 paddlespeech/t2s/models/vits/monotonic_align/setup.py create mode 100644 paddlespeech/t2s/models/vits/posterior_encoder.py create mode 100644 paddlespeech/t2s/models/vits/residual_coupling.py create mode 100644 paddlespeech/t2s/models/vits/text_encoder.py create mode 100644 paddlespeech/t2s/models/vits/transform.py create mode 100644 paddlespeech/t2s/models/vits/vits.py create mode 100644 paddlespeech/t2s/models/vits/vits_updater.py create mode 100644 paddlespeech/t2s/models/vits/wavenet/__init__.py create mode 100644 paddlespeech/t2s/models/vits/wavenet/residual_block.py create mode 100644 paddlespeech/t2s/models/vits/wavenet/wavenet.py diff --git a/examples/csmsc/vits/README.md b/examples/csmsc/vits/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/examples/csmsc/vits/conf/default.yaml b/examples/csmsc/vits/conf/default.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/examples/csmsc/vits/local/preprocess.sh b/examples/csmsc/vits/local/preprocess.sh new file mode 100755 index 000000000..e69de29bb diff --git a/examples/csmsc/vits/local/synthesize.sh b/examples/csmsc/vits/local/synthesize.sh new file mode 100755 index 000000000..e69de29bb diff --git a/examples/csmsc/vits/local/train.sh b/examples/csmsc/vits/local/train.sh new file mode 100755 index 000000000..e69de29bb diff --git a/examples/csmsc/vits/path.sh b/examples/csmsc/vits/path.sh new file mode 100755 index 000000000..52d0c3783 --- /dev/null +++ b/examples/csmsc/vits/path.sh @@ -0,0 +1,13 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../../` + +export PATH=${MAIN_ROOT}:${MAIN_ROOT}/utils:${PATH} +export LC_ALL=C + +export PYTHONDONTWRITEBYTECODE=1 +# Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} + +MODEL=vits +export BIN_DIR=${MAIN_ROOT}/paddlespeech/t2s/exps/${MODEL} \ No newline at end of file diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh new file mode 100755 index 000000000..e69de29bb diff --git a/paddlespeech/t2s/datasets/get_feats.py b/paddlespeech/t2s/datasets/get_feats.py index b4bea0bd0..e1ca0eeb9 100644 --- a/paddlespeech/t2s/datasets/get_feats.py +++ b/paddlespeech/t2s/datasets/get_feats.py @@ -20,15 +20,14 @@ from scipy.interpolate import interp1d class LogMelFBank(): def __init__(self, - sr=24000, - n_fft=2048, - hop_length=300, - win_length=None, - window="hann", - n_mels=80, - fmin=80, - fmax=7600, - eps=1e-10): + sr: int=24000, + n_fft: int=2048, + hop_length: int=300, + win_length: int=None, + window: str="hann", + n_mels: int=80, + fmin: int=80, + fmax: int=7600): self.sr = sr # stft self.n_fft = n_fft @@ -54,7 +53,7 @@ class LogMelFBank(): fmax=self.fmax) return mel_filter - def _stft(self, wav): + def _stft(self, wav: np.ndarray): D = librosa.core.stft( wav, n_fft=self.n_fft, @@ -65,11 +64,11 @@ class LogMelFBank(): pad_mode=self.pad_mode) return D - def _spectrogram(self, wav): + def _spectrogram(self, wav: np.ndarray): D = self._stft(wav) return np.abs(D) - def _mel_spectrogram(self, wav): + def _mel_spectrogram(self, wav: np.ndarray): S = self._spectrogram(wav) mel = np.dot(self.mel_filter, S) return mel @@ -90,14 +89,18 @@ class LogMelFBank(): class Pitch(): - def __init__(self, sr=24000, hop_length=300, f0min=80, f0max=7600): + def __init__(self, + sr: int=24000, + hop_length: int=300, + f0min: int=80, + f0max: int=7600): self.sr = sr self.hop_length = hop_length self.f0min = f0min self.f0max = f0max - def _convert_to_continuous_f0(self, f0: np.array) -> np.array: + def _convert_to_continuous_f0(self, f0: np.ndarray) -> np.ndarray: if (f0 == 0).all(): print("All frames seems to be unvoiced.") return f0 @@ -120,9 +123,9 @@ class Pitch(): return f0 def _calculate_f0(self, - input: np.array, - use_continuous_f0=True, - use_log_f0=True) -> np.array: + input: np.ndarray, + use_continuous_f0: bool=True, + use_log_f0: bool=True) -> np.ndarray: input = input.astype(np.float) frame_period = 1000 * self.hop_length / self.sr f0, timeaxis = pyworld.dio( @@ -139,7 +142,8 @@ class Pitch(): f0[nonzero_idxs] = np.log(f0[nonzero_idxs]) return f0.reshape(-1) - def _average_by_duration(self, input: np.array, d: np.array) -> np.array: + def _average_by_duration(self, input: np.ndarray, + d: np.ndarray) -> np.ndarray: d_cumsum = np.pad(d.cumsum(0), (1, 0), 'constant') arr_list = [] for start, end in zip(d_cumsum[:-1], d_cumsum[1:]): @@ -154,11 +158,11 @@ class Pitch(): return arr_list def get_pitch(self, - wav, - use_continuous_f0=True, - use_log_f0=True, - use_token_averaged_f0=True, - duration=None): + wav: np.ndarray, + use_continuous_f0: bool=True, + use_log_f0: bool=True, + use_token_averaged_f0: bool=True, + duration: np.ndarray=None): f0 = self._calculate_f0(wav, use_continuous_f0, use_log_f0) if use_token_averaged_f0 and duration is not None: f0 = self._average_by_duration(f0, duration) @@ -167,13 +171,13 @@ class Pitch(): class Energy(): def __init__(self, - sr=24000, - n_fft=2048, - hop_length=300, - win_length=None, - window="hann", - center=True, - pad_mode="reflect"): + sr: int=24000, + n_fft: int=2048, + hop_length: int=300, + win_length: int=None, + window: str="hann", + center: bool=True, + pad_mode: str="reflect"): self.sr = sr self.n_fft = n_fft @@ -183,7 +187,7 @@ class Energy(): self.center = center self.pad_mode = pad_mode - def _stft(self, wav): + def _stft(self, wav: np.ndarray): D = librosa.core.stft( wav, n_fft=self.n_fft, @@ -194,7 +198,7 @@ class Energy(): pad_mode=self.pad_mode) return D - def _calculate_energy(self, input): + def _calculate_energy(self, input: np.ndarray): input = input.astype(np.float32) input_stft = self._stft(input) input_power = np.abs(input_stft)**2 @@ -203,7 +207,8 @@ class Energy(): np.sum(input_power, axis=0), a_min=1.0e-10, a_max=float('inf'))) return energy - def _average_by_duration(self, input: np.array, d: np.array) -> np.array: + def _average_by_duration(self, input: np.ndarray, + d: np.ndarray) -> np.ndarray: d_cumsum = np.pad(d.cumsum(0), (1, 0), 'constant') arr_list = [] for start, end in zip(d_cumsum[:-1], d_cumsum[1:]): @@ -214,8 +219,49 @@ class Energy(): arr_list = np.expand_dims(np.array(arr_list), 0).T return arr_list - def get_energy(self, wav, use_token_averaged_energy=True, duration=None): + def get_energy(self, + wav: np.ndarray, + use_token_averaged_energy: bool=True, + duration: np.ndarray=None): energy = self._calculate_energy(wav) if use_token_averaged_energy and duration is not None: energy = self._average_by_duration(energy, duration) return energy + + +class LinearSpectrogram(): + def __init__( + self, + n_fft: int=1024, + win_length: int=None, + hop_length: int=256, + window: str="hann", + center: bool=True, ): + self.n_fft = n_fft + self.hop_length = hop_length + self.win_length = win_length + self.window = window + self.center = center + self.n_fft = n_fft + self.pad_mode = "reflect" + + def _stft(self, wav: np.ndarray): + D = librosa.core.stft( + wav, + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window=self.window, + center=self.center, + pad_mode=self.pad_mode) + return D + + def _spectrogram(self, wav: np.ndarray): + D = self._stft(wav) + return np.abs(D) + + def get_linear_spectrogram(self, wav: np.ndarray): + linear_spectrogram = self._spectrogram(wav) + linear_spectrogram = np.clip( + linear_spectrogram, a_min=1e-10, a_max=float("inf")) + return linear_spectrogram.T diff --git a/paddlespeech/t2s/exps/vits/normalize.py b/paddlespeech/t2s/exps/vits/normalize.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/exps/vits/normalize.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/exps/vits/preprocess.py b/paddlespeech/t2s/exps/vits/preprocess.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/exps/vits/preprocess.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/exps/vits/synthesize.py b/paddlespeech/t2s/exps/vits/synthesize.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/exps/vits/synthesize.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/exps/vits/train.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/frontend/zh_frontend.py b/paddlespeech/t2s/frontend/zh_frontend.py index bb8ed5b49..129aa944e 100644 --- a/paddlespeech/t2s/frontend/zh_frontend.py +++ b/paddlespeech/t2s/frontend/zh_frontend.py @@ -195,7 +195,7 @@ class Frontend(): new_initials.append(initials[i]) return new_initials, new_finals - def _p2id(self, phonemes: List[str]) -> np.array: + def _p2id(self, phonemes: List[str]) -> np.ndarray: # replace unk phone with sp phonemes = [ phn if phn in self.vocab_phones else "sp" for phn in phonemes @@ -203,7 +203,7 @@ class Frontend(): phone_ids = [self.vocab_phones[item] for item in phonemes] return np.array(phone_ids, np.int64) - def _t2id(self, tones: List[str]) -> np.array: + def _t2id(self, tones: List[str]) -> np.ndarray: # replace unk phone with sp tones = [tone if tone in self.vocab_tones else "0" for tone in tones] tone_ids = [self.vocab_tones[item] for item in tones] diff --git a/paddlespeech/t2s/models/hifigan/hifigan.py b/paddlespeech/t2s/models/hifigan/hifigan.py index ac5ff204f..bea9dd9a3 100644 --- a/paddlespeech/t2s/models/hifigan/hifigan.py +++ b/paddlespeech/t2s/models/hifigan/hifigan.py @@ -16,6 +16,7 @@ import copy from typing import Any from typing import Dict from typing import List +from typing import Optional import paddle import paddle.nn.functional as F @@ -34,6 +35,7 @@ class HiFiGANGenerator(nn.Layer): in_channels: int=80, out_channels: int=1, channels: int=512, + global_channels: int=-1, kernel_size: int=7, upsample_scales: List[int]=(8, 8, 2, 2), upsample_kernel_sizes: List[int]=(16, 16, 4, 4), @@ -51,6 +53,7 @@ class HiFiGANGenerator(nn.Layer): in_channels (int): Number of input channels. out_channels (int): Number of output channels. channels (int): Number of hidden representation channels. + global_channels (int): Number of global conditioning channels. kernel_size (int): Kernel size of initial and final conv layer. upsample_scales (list): List of upsampling scales. upsample_kernel_sizes (list): List of kernel sizes for upsampling layers. @@ -119,6 +122,9 @@ class HiFiGANGenerator(nn.Layer): padding=(kernel_size - 1) // 2, ), nn.Tanh(), ) + if global_channels > 0: + self.global_conv = nn.Conv1D(global_channels, channels, 1) + nn.initializer.set_global_initializer(None) # apply weight norm @@ -128,15 +134,18 @@ class HiFiGANGenerator(nn.Layer): # reset parameters self.reset_parameters() - def forward(self, c): + def forward(self, c, g: Optional[paddle.Tensor]=None): """Calculate forward propagation. Args: c (Tensor): Input tensor (B, in_channels, T). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). Returns: Tensor: Output tensor (B, out_channels, T). """ c = self.input_conv(c) + if g is not None: + c = c + self.global_conv(g) for i in range(self.num_upsamples): c = self.upsamples[i](c) # initialize @@ -187,16 +196,19 @@ class HiFiGANGenerator(nn.Layer): self.apply(_remove_weight_norm) - def inference(self, c): + def inference(self, c, g: Optional[paddle.Tensor]=None): """Perform inference. Args: c (Tensor): Input tensor (T, in_channels). normalize_before (bool): Whether to perform normalization. + g (Optional[Tensor]): Global conditioning tensor (global_channels, 1). Returns: Tensor: Output tensor (T ** prod(upsample_scales), out_channels). """ - c = self.forward(c.transpose([1, 0]).unsqueeze(0)) + if g is not None: + g = g.unsqueeze(0) + c = self.forward(c.transpose([1, 0]).unsqueeze(0), g=g) return c.squeeze(0).transpose([1, 0]) diff --git a/paddlespeech/t2s/models/vits/__init__.py b/paddlespeech/t2s/models/vits/__init__.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/models/vits/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/models/vits/duration_predictor.py b/paddlespeech/t2s/models/vits/duration_predictor.py new file mode 100644 index 000000000..6197d5696 --- /dev/null +++ b/paddlespeech/t2s/models/vits/duration_predictor.py @@ -0,0 +1,172 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Stochastic duration predictor modules in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +import math +from typing import Optional + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from paddlespeech.t2s.models.vits.flow import ConvFlow +from paddlespeech.t2s.models.vits.flow import DilatedDepthSeparableConv +from paddlespeech.t2s.models.vits.flow import ElementwiseAffineFlow +from paddlespeech.t2s.models.vits.flow import FlipFlow +from paddlespeech.t2s.models.vits.flow import LogFlow + + +class StochasticDurationPredictor(nn.Layer): + """Stochastic duration predictor module. + This is a module of stochastic duration predictor described in `Conditional + Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech`_. + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2106.06103 + """ + + def __init__( + self, + channels: int=192, + kernel_size: int=3, + dropout_rate: float=0.5, + flows: int=4, + dds_conv_layers: int=3, + global_channels: int=-1, ): + """Initialize StochasticDurationPredictor module. + Args: + channels (int): Number of channels. + kernel_size (int): Kernel size. + dropout_rate (float): Dropout rate. + flows (int): Number of flows. + dds_conv_layers (int): Number of conv layers in DDS conv. + global_channels (int): Number of global conditioning channels. + """ + super().__init__() + + self.pre = nn.Conv1D(channels, channels, 1) + self.dds = DilatedDepthSeparableConv( + channels, + kernel_size, + layers=dds_conv_layers, + dropout_rate=dropout_rate, ) + self.proj = nn.Conv1D(channels, channels, 1) + + self.log_flow = LogFlow() + self.flows = nn.LayerList() + self.flows.append(ElementwiseAffineFlow(2)) + for i in range(flows): + self.flows.append( + ConvFlow( + 2, + channels, + kernel_size, + layers=dds_conv_layers, )) + self.flows.append(FlipFlow()) + + self.post_pre = nn.Conv1D(1, channels, 1) + self.post_dds = DilatedDepthSeparableConv( + channels, + kernel_size, + layers=dds_conv_layers, + dropout_rate=dropout_rate, ) + self.post_proj = nn.Conv1D(channels, channels, 1) + self.post_flows = nn.LayerList() + self.post_flows.append(ElementwiseAffineFlow(2)) + for i in range(flows): + self.post_flows.append( + ConvFlow( + 2, + channels, + kernel_size, + layers=dds_conv_layers, )) + self.post_flows.append(FlipFlow()) + + if global_channels > 0: + self.global_conv = nn.Conv1D(global_channels, channels, 1) + + def forward( + self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + w: Optional[paddle.Tensor]=None, + g: Optional[paddle.Tensor]=None, + inverse: bool=False, + noise_scale: float=1.0, ) -> paddle.Tensor: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, channels, T_text). + x_mask (Tensor): Mask tensor (B, 1, T_text). + w (Optional[Tensor]): Duration tensor (B, 1, T_text). + g (Optional[Tensor]): Global conditioning tensor (B, channels, 1) + inverse (bool): Whether to inverse the flow. + noise_scale (float): Noise scale value. + Returns: + Tensor: If not inverse, negative log-likelihood (NLL) tensor (B,). + If inverse, log-duration tensor (B, 1, T_text). + """ + # stop gradient + # x = x.detach() + x = self.pre(x) + if g is not None: + # stop gradient + x = x + self.global_conv(g.detach()) + x = self.dds(x, x_mask) + x = self.proj(x) * x_mask + + if not inverse: + assert w is not None, "w must be provided." + h_w = self.post_pre(w) + h_w = self.post_dds(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = (paddle.randn([paddle.shape(w)[0], 2, paddle.shape(w)[2]]) * + x_mask) + z_q = e_q + logdet_tot_q = 0.0 + for i, flow in enumerate(self.post_flows): + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = paddle.split(z_q, [1, 1], 1) + u = F.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += paddle.sum( + (F.log_sigmoid(z_u) + F.log_sigmoid(-z_u)) * x_mask, [1, 2]) + logq = (paddle.sum(-0.5 * + (math.log(2 * math.pi) + + (e_q**2)) * x_mask, [1, 2]) - logdet_tot_q) + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = paddle.concat([z0, z1], 1) + for flow in self.flows: + z, logdet = flow(z, x_mask, g=x, inverse=inverse) + logdet_tot = logdet_tot + logdet + nll = (paddle.sum(0.5 * (math.log(2 * math.pi) + + (z**2)) * x_mask, [1, 2]) - logdet_tot) + # (B,) + return nll + logq + else: + flows = list(reversed(self.flows)) + # remove a useless vflow + flows = flows[:-2] + [flows[-1]] + z = (paddle.randn([paddle.shape(x)[0], 2, paddle.shape(x)[2]]) * + noise_scale) + for flow in flows: + z = flow(z, x_mask, g=x, inverse=inverse) + z0, z1 = paddle.split(z, 2, axis=1) + logw = z0 + return logw diff --git a/paddlespeech/t2s/models/vits/flow.py b/paddlespeech/t2s/models/vits/flow.py new file mode 100644 index 000000000..8726748e5 --- /dev/null +++ b/paddlespeech/t2s/models/vits/flow.py @@ -0,0 +1,316 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Basic Flow modules used in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +import math +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +from paddle import nn + +from paddlespeech.t2s.models.vits.transform import piecewise_rational_quadratic_transform + + +class FlipFlow(nn.Layer): + """Flip flow module.""" + + def forward(self, x: paddle.Tensor, *args, inverse: bool=False, **kwargs + ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, paddle.Tensor]]: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, channels, T). + inverse (bool): Whether to inverse the flow. + Returns: + Tensor: Flipped tensor (B, channels, T). + Tensor: Log-determinant tensor for NLL (B,) if not inverse. + """ + x = paddle.flip(x, [1]) + if not inverse: + logdet = paddle.zeros(paddle.shape(x)[0], dtype=x.dtype) + return x, logdet + else: + return x + + +class LogFlow(nn.Layer): + """Log flow module.""" + + def forward(self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + inverse: bool=False, + eps: float=1e-5, + **kwargs + ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, paddle.Tensor]]: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, channels, T). + x_mask (Tensor): Mask tensor (B, 1, T). + inverse (bool): Whether to inverse the flow. + eps (float): Epsilon for log. + Returns: + Tensor: Output tensor (B, channels, T). + Tensor: Log-determinant tensor for NLL (B,) if not inverse. + """ + if not inverse: + y = paddle.log(paddle.clip(x, min=eps)) * x_mask + logdet = paddle.sum(-y, [1, 2]) + return y, logdet + else: + x = paddle.exp(x) * x_mask + return x + + +class ElementwiseAffineFlow(nn.Layer): + """Elementwise affine flow module.""" + + def __init__(self, channels: int): + """Initialize ElementwiseAffineFlow module. + Args: + channels (int): Number of channels. + """ + super().__init__() + self.channels = channels + + m = paddle.zeros([channels, 1]) + self.m = paddle.create_parameter( + shape=m.shape, + dtype=str(m.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(m)) + logs = paddle.zeros([channels, 1]) + self.logs = paddle.create_parameter( + shape=logs.shape, + dtype=str(logs.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(logs)) + + def forward(self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + inverse: bool=False, + **kwargs + ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, paddle.Tensor]]: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, channels, T). + x_mask (Tensor): Mask tensor (B, 1, T). + inverse (bool): Whether to inverse the flow. + Returns: + Tensor: Output tensor (B, channels, T). + Tensor: Log-determinant tensor for NLL (B,) if not inverse. + """ + if not inverse: + y = self.m + paddle.exp(self.logs) * x + y = y * x_mask + logdet = paddle.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * paddle.exp(-self.logs) * x_mask + return x + + +class Transpose(nn.Layer): + """Transpose module for paddle.nn.Sequential().""" + + def __init__(self, dim1: int, dim2: int): + """Initialize Transpose module.""" + super().__init__() + self.dim1 = dim1 + self.dim2 = dim2 + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Transpose.""" + len_dim = len(x.shape) + orig_perm = list(range(len_dim)) + new_perm = orig_perm[:] + temp = new_perm[self.dim1] + new_perm[self.dim1] = new_perm[self.dim2] + new_perm[self.dim2] = temp + + return paddle.transpose(x, new_perm) + + +class DilatedDepthSeparableConv(nn.Layer): + """Dilated depth-separable conv module.""" + + def __init__( + self, + channels: int, + kernel_size: int, + layers: int, + dropout_rate: float=0.0, + eps: float=1e-5, ): + """Initialize DilatedDepthSeparableConv module. + Args: + channels (int): Number of channels. + kernel_size (int): Kernel size. + layers (int): Number of layers. + dropout_rate (float): Dropout rate. + eps (float): Epsilon for layer norm. + """ + super().__init__() + + self.convs = nn.LayerList() + for i in range(layers): + dilation = kernel_size**i + padding = (kernel_size * dilation - dilation) // 2 + self.convs.append( + nn.Sequential( + nn.Conv1D( + channels, + channels, + kernel_size, + groups=channels, + dilation=dilation, + padding=padding, ), + Transpose(1, 2), + nn.LayerNorm(channels, epsilon=eps), + Transpose(1, 2), + nn.GELU(), + nn.Conv1D( + channels, + channels, + 1, ), + Transpose(1, 2), + nn.LayerNorm(channels, epsilon=eps), + Transpose(1, 2), + nn.GELU(), + nn.Dropout(dropout_rate), )) + + def forward(self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + g: Optional[paddle.Tensor]=None) -> paddle.Tensor: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, in_channels, T). + x_mask (Tensor): Mask tensor (B, 1, T). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). + Returns: + Tensor: Output tensor (B, channels, T). + """ + if g is not None: + x = x + g + for f in self.convs: + y = f(x * x_mask) + x = x + y + return x * x_mask + + +class ConvFlow(nn.Layer): + """Convolutional flow module.""" + + def __init__( + self, + in_channels: int, + hidden_channels: int, + kernel_size: int, + layers: int, + bins: int=10, + tail_bound: float=5.0, ): + """Initialize ConvFlow module. + Args: + in_channels (int): Number of input channels. + hidden_channels (int): Number of hidden channels. + kernel_size (int): Kernel size. + layers (int): Number of layers. + bins (int): Number of bins. + tail_bound (float): Tail bound value. + """ + super().__init__() + self.half_channels = in_channels // 2 + self.hidden_channels = hidden_channels + self.bins = bins + self.tail_bound = tail_bound + + self.input_conv = nn.Conv1D( + self.half_channels, + hidden_channels, + 1, ) + self.dds_conv = DilatedDepthSeparableConv( + hidden_channels, + kernel_size, + layers, + dropout_rate=0.0, ) + self.proj = nn.Conv1D( + hidden_channels, + self.half_channels * (bins * 3 - 1), + 1, ) + + # self.proj.weight.data.zero_() + # self.proj.bias.data.zero_() + + weight = paddle.zeros(paddle.shape(self.proj.weight)) + + self.proj.weight = paddle.create_parameter( + shape=weight.shape, + dtype=str(weight.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(weight)) + + bias = paddle.zeros(paddle.shape(self.proj.bias)) + + self.proj.bias = paddle.create_parameter( + shape=bias.shape, + dtype=str(bias.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(bias)) + + def forward( + self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + g: Optional[paddle.Tensor]=None, + inverse: bool=False, + ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, paddle.Tensor]]: + """Calculate forward propagation. + Args: + x (Tensor): Input tensor (B, channels, T). + x_mask (Tensor): Mask tensor (B, 1, T). + g (Optional[Tensor]): Global conditioning tensor (B, channels, 1). + inverse (bool): Whether to inverse the flow. + Returns: + Tensor: Output tensor (B, channels, T). + Tensor: Log-determinant tensor for NLL (B,) if not inverse. + """ + xa, xb = x.split(2, 1) + h = self.input_conv(xa) + h = self.dds_conv(h, x_mask, g=g) + # (B, half_channels * (bins * 3 - 1), T) + h = self.proj(h) * x_mask + + b, c, t = xa.shape + # (B, half_channels, bins * 3 - 1, T) -> (B, half_channels, T, bins * 3 - 1) + h = h.reshape([b, c, -1, t]).transpose([0, 1, 3, 2]) + + denom = math.sqrt(self.hidden_channels) + unnorm_widths = h[..., :self.bins] / denom + unnorm_heights = h[..., self.bins:2 * self.bins] / denom + unnorm_derivatives = h[..., 2 * self.bins:] + xb, logdet_abs = piecewise_rational_quadratic_transform( + xb, + unnorm_widths, + unnorm_heights, + unnorm_derivatives, + inverse=inverse, + tails="linear", + tail_bound=self.tail_bound, ) + x = paddle.concat([xa, xb], 1) * x_mask + logdet = paddle.sum(logdet_abs * x_mask, [1, 2]) + if not inverse: + return x, logdet + else: + return x diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py new file mode 100644 index 000000000..e35f9956a --- /dev/null +++ b/paddlespeech/t2s/models/vits/generator.py @@ -0,0 +1,551 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Generator module in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +import math +from typing import List +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn + +from paddlespeech.t2s.models.hifigan import HiFiGANGenerator +from paddlespeech.t2s.models.vits.duration_predictor import StochasticDurationPredictor +from paddlespeech.t2s.models.vits.posterior_encoder import PosteriorEncoder +from paddlespeech.t2s.models.vits.residual_coupling import ResidualAffineCouplingBlock +from paddlespeech.t2s.models.vits.text_encoder import TextEncoder +from paddlespeech.t2s.modules.nets_utils import get_random_segments +from paddlespeech.t2s.modules.nets_utils import make_non_pad_mask + + +class VITSGenerator(nn.Layer): + """Generator module in VITS. + This is a module of VITS described in `Conditional Variational Autoencoder + with Adversarial Learning for End-to-End Text-to-Speech`_. + As text encoder, we use conformer architecture instead of the relative positional + Transformer, which contains additional convolution layers. + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2006.04558 + """ + + def __init__( + self, + vocabs: int, + aux_channels: int=513, + hidden_channels: int=192, + spks: Optional[int]=None, + langs: Optional[int]=None, + spk_embed_dim: Optional[int]=None, + global_channels: int=-1, + segment_size: int=32, + text_encoder_attention_heads: int=2, + text_encoder_ffn_expand: int=4, + text_encoder_blocks: int=6, + text_encoder_positionwise_layer_type: str="conv1d", + text_encoder_positionwise_conv_kernel_size: int=1, + text_encoder_positional_encoding_layer_type: str="rel_pos", + text_encoder_self_attention_layer_type: str="rel_selfattn", + text_encoder_activation_type: str="swish", + text_encoder_normalize_before: bool=True, + text_encoder_dropout_rate: float=0.1, + text_encoder_positional_dropout_rate: float=0.0, + text_encoder_attention_dropout_rate: float=0.0, + text_encoder_conformer_kernel_size: int=7, + use_macaron_style_in_text_encoder: bool=True, + use_conformer_conv_in_text_encoder: bool=True, + decoder_kernel_size: int=7, + decoder_channels: int=512, + decoder_upsample_scales: List[int]=[8, 8, 2, 2], + decoder_upsample_kernel_sizes: List[int]=[16, 16, 4, 4], + decoder_resblock_kernel_sizes: List[int]=[3, 7, 11], + decoder_resblock_dilations: List[List[int]]=[[1, 3, 5], [1, 3, 5], + [1, 3, 5]], + use_weight_norm_in_decoder: bool=True, + posterior_encoder_kernel_size: int=5, + posterior_encoder_layers: int=16, + posterior_encoder_stacks: int=1, + posterior_encoder_base_dilation: int=1, + posterior_encoder_dropout_rate: float=0.0, + use_weight_norm_in_posterior_encoder: bool=True, + flow_flows: int=4, + flow_kernel_size: int=5, + flow_base_dilation: int=1, + flow_layers: int=4, + flow_dropout_rate: float=0.0, + use_weight_norm_in_flow: bool=True, + use_only_mean_in_flow: bool=True, + stochastic_duration_predictor_kernel_size: int=3, + stochastic_duration_predictor_dropout_rate: float=0.5, + stochastic_duration_predictor_flows: int=4, + stochastic_duration_predictor_dds_conv_layers: int=3, ): + """Initialize VITS generator module. + Args: + vocabs (int): Input vocabulary size. + aux_channels (int): Number of acoustic feature channels. + hidden_channels (int): Number of hidden channels. + spks (Optional[int]): Number of speakers. If set to > 1, assume that the + sids will be provided as the input and use sid embedding layer. + langs (Optional[int]): Number of languages. If set to > 1, assume that the + lids will be provided as the input and use sid embedding layer. + spk_embed_dim (Optional[int]): Speaker embedding dimension. If set to > 0, + assume that spembs will be provided as the input. + global_channels (int): Number of global conditioning channels. + segment_size (int): Segment size for decoder. + text_encoder_attention_heads (int): Number of heads in conformer block + of text encoder. + text_encoder_ffn_expand (int): Expansion ratio of FFN in conformer block + of text encoder. + text_encoder_blocks (int): Number of conformer blocks in text encoder. + text_encoder_positionwise_layer_type (str): Position-wise layer type in + conformer block of text encoder. + text_encoder_positionwise_conv_kernel_size (int): Position-wise convolution + kernel size in conformer block of text encoder. Only used when the + above layer type is conv1d or conv1d-linear. + text_encoder_positional_encoding_layer_type (str): Positional encoding layer + type in conformer block of text encoder. + text_encoder_self_attention_layer_type (str): Self-attention layer type in + conformer block of text encoder. + text_encoder_activation_type (str): Activation function type in conformer + block of text encoder. + text_encoder_normalize_before (bool): Whether to apply layer norm before + self-attention in conformer block of text encoder. + text_encoder_dropout_rate (float): Dropout rate in conformer block of + text encoder. + text_encoder_positional_dropout_rate (float): Dropout rate for positional + encoding in conformer block of text encoder. + text_encoder_attention_dropout_rate (float): Dropout rate for attention in + conformer block of text encoder. + text_encoder_conformer_kernel_size (int): Conformer conv kernel size. It + will be used when only use_conformer_conv_in_text_encoder = True. + use_macaron_style_in_text_encoder (bool): Whether to use macaron style FFN + in conformer block of text encoder. + use_conformer_conv_in_text_encoder (bool): Whether to use covolution in + conformer block of text encoder. + decoder_kernel_size (int): Decoder kernel size. + decoder_channels (int): Number of decoder initial channels. + decoder_upsample_scales (List[int]): List of upsampling scales in decoder. + decoder_upsample_kernel_sizes (List[int]): List of kernel size for + upsampling layers in decoder. + decoder_resblock_kernel_sizes (List[int]): List of kernel size for resblocks + in decoder. + decoder_resblock_dilations (List[List[int]]): List of list of dilations for + resblocks in decoder. + use_weight_norm_in_decoder (bool): Whether to apply weight normalization in + decoder. + posterior_encoder_kernel_size (int): Posterior encoder kernel size. + posterior_encoder_layers (int): Number of layers of posterior encoder. + posterior_encoder_stacks (int): Number of stacks of posterior encoder. + posterior_encoder_base_dilation (int): Base dilation of posterior encoder. + posterior_encoder_dropout_rate (float): Dropout rate for posterior encoder. + use_weight_norm_in_posterior_encoder (bool): Whether to apply weight + normalization in posterior encoder. + flow_flows (int): Number of flows in flow. + flow_kernel_size (int): Kernel size in flow. + flow_base_dilation (int): Base dilation in flow. + flow_layers (int): Number of layers in flow. + flow_dropout_rate (float): Dropout rate in flow + use_weight_norm_in_flow (bool): Whether to apply weight normalization in + flow. + use_only_mean_in_flow (bool): Whether to use only mean in flow. + stochastic_duration_predictor_kernel_size (int): Kernel size in stochastic + duration predictor. + stochastic_duration_predictor_dropout_rate (float): Dropout rate in + stochastic duration predictor. + stochastic_duration_predictor_flows (int): Number of flows in stochastic + duration predictor. + stochastic_duration_predictor_dds_conv_layers (int): Number of DDS conv + layers in stochastic duration predictor. + """ + super().__init__() + self.segment_size = segment_size + self.text_encoder = TextEncoder( + vocabs=vocabs, + attention_dim=hidden_channels, + attention_heads=text_encoder_attention_heads, + linear_units=hidden_channels * text_encoder_ffn_expand, + blocks=text_encoder_blocks, + positionwise_layer_type=text_encoder_positionwise_layer_type, + positionwise_conv_kernel_size=text_encoder_positionwise_conv_kernel_size, + positional_encoding_layer_type=text_encoder_positional_encoding_layer_type, + self_attention_layer_type=text_encoder_self_attention_layer_type, + activation_type=text_encoder_activation_type, + normalize_before=text_encoder_normalize_before, + dropout_rate=text_encoder_dropout_rate, + positional_dropout_rate=text_encoder_positional_dropout_rate, + attention_dropout_rate=text_encoder_attention_dropout_rate, + conformer_kernel_size=text_encoder_conformer_kernel_size, + use_macaron_style=use_macaron_style_in_text_encoder, + use_conformer_conv=use_conformer_conv_in_text_encoder, ) + self.decoder = HiFiGANGenerator( + in_channels=hidden_channels, + out_channels=1, + channels=decoder_channels, + global_channels=global_channels, + kernel_size=decoder_kernel_size, + upsample_scales=decoder_upsample_scales, + upsample_kernel_sizes=decoder_upsample_kernel_sizes, + resblock_kernel_sizes=decoder_resblock_kernel_sizes, + resblock_dilations=decoder_resblock_dilations, + use_weight_norm=use_weight_norm_in_decoder, ) + self.posterior_encoder = PosteriorEncoder( + in_channels=aux_channels, + out_channels=hidden_channels, + hidden_channels=hidden_channels, + kernel_size=posterior_encoder_kernel_size, + layers=posterior_encoder_layers, + stacks=posterior_encoder_stacks, + base_dilation=posterior_encoder_base_dilation, + global_channels=global_channels, + dropout_rate=posterior_encoder_dropout_rate, + use_weight_norm=use_weight_norm_in_posterior_encoder, ) + self.flow = ResidualAffineCouplingBlock( + in_channels=hidden_channels, + hidden_channels=hidden_channels, + flows=flow_flows, + kernel_size=flow_kernel_size, + base_dilation=flow_base_dilation, + layers=flow_layers, + global_channels=global_channels, + dropout_rate=flow_dropout_rate, + use_weight_norm=use_weight_norm_in_flow, + use_only_mean=use_only_mean_in_flow, ) + # TODO: Add deterministic version as an option + self.duration_predictor = StochasticDurationPredictor( + channels=hidden_channels, + kernel_size=stochastic_duration_predictor_kernel_size, + dropout_rate=stochastic_duration_predictor_dropout_rate, + flows=stochastic_duration_predictor_flows, + dds_conv_layers=stochastic_duration_predictor_dds_conv_layers, + global_channels=global_channels, ) + + self.upsample_factor = int(np.prod(decoder_upsample_scales)) + self.spks = None + if spks is not None and spks > 1: + assert global_channels > 0 + self.spks = spks + self.global_emb = nn.Embedding(spks, global_channels) + self.spk_embed_dim = None + if spk_embed_dim is not None and spk_embed_dim > 0: + assert global_channels > 0 + self.spk_embed_dim = spk_embed_dim + self.spemb_proj = nn.Linear(spk_embed_dim, global_channels) + self.langs = None + if langs is not None and langs > 1: + assert global_channels > 0 + self.langs = langs + self.lang_emb = nn.Embedding(langs, global_channels) + + # delayed import + from paddlespeech.t2s.models.vits.monotonic_align import maximum_path + + self.maximum_path = maximum_path + + def forward( + self, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + feats: paddle.Tensor, + feats_lengths: paddle.Tensor, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor, + paddle.Tensor, paddle.Tensor, + Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor, + paddle.Tensor, paddle.Tensor, ], ]: + """Calculate forward propagation. + Args: + text (Tensor): Text index tensor (B, T_text). + text_lengths (Tensor): Text length tensor (B,). + feats (Tensor): Feature tensor (B, aux_channels, T_feats). + feats_lengths (Tensor): Feature length tensor (B,). + sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + Returns: + Tensor: Waveform tensor (B, 1, segment_size * upsample_factor). + Tensor: Duration negative log-likelihood (NLL) tensor (B,). + Tensor: Monotonic attention weight tensor (B, 1, T_feats, T_text). + Tensor: Segments start index tensor (B,). + Tensor: Text mask tensor (B, 1, T_text). + Tensor: Feature mask tensor (B, 1, T_feats). + tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: + - Tensor: Posterior encoder hidden representation (B, H, T_feats). + - Tensor: Flow hidden representation (B, H, T_feats). + - Tensor: Expanded text encoder projected mean (B, H, T_feats). + - Tensor: Expanded text encoder projected scale (B, H, T_feats). + - Tensor: Posterior encoder projected mean (B, H, T_feats). + - Tensor: Posterior encoder projected scale (B, H, T_feats). + """ + # forward text encoder + x, m_p, logs_p, x_mask = self.text_encoder(text, text_lengths) + + # calculate global conditioning + g = None + if self.spks is not None: + # speaker one-hot vector embedding: (B, global_channels, 1) + g = self.global_emb(paddle.reshape(sids, [-1])).unsqueeze(-1) + if self.spk_embed_dim is not None: + # pretreined speaker embedding, e.g., X-vector (B, global_channels, 1) + g_ = self.spemb_proj(F.normalize(spembs)).unsqueeze(-1) + if g is None: + g = g_ + else: + g = g + g_ + if self.langs is not None: + # language one-hot vector embedding: (B, global_channels, 1) + g_ = self.lang_emb(paddle.reshape(lids, [-1])).unsqueeze(-1) + if g is None: + g = g_ + else: + g = g + g_ + + # forward posterior encoder + + z, m_q, logs_q, y_mask = self.posterior_encoder( + feats, feats_lengths, g=g) + + # forward flow + # (B, H, T_feats) + z_p = self.flow(z, y_mask, g=g) + + # monotonic alignment search + with paddle.no_grad(): + # negative cross-entropy + # (B, H, T_text) + s_p_sq_r = paddle.exp(-2 * logs_p) + # (B, 1, T_text) + neg_x_ent_1 = paddle.sum( + -0.5 * math.log(2 * math.pi) - logs_p, + [1], + keepdim=True, ) + # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) + neg_x_ent_2 = paddle.matmul( + -0.5 * (z_p**2).transpose([0, 2, 1]), + s_p_sq_r, ) + # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) + neg_x_ent_3 = paddle.matmul( + z_p.transpose([0, 2, 1]), + (m_p * s_p_sq_r), ) + # (B, 1, T_text) + neg_x_ent_4 = paddle.sum( + -0.5 * (m_p**2) * s_p_sq_r, + [1], + keepdim=True, ) + # (B, T_feats, T_text) + neg_x_ent = neg_x_ent_1 + neg_x_ent_2 + neg_x_ent_3 + neg_x_ent_4 + # (B, 1, T_feats, T_text) + attn_mask = paddle.unsqueeze(x_mask, 2) * paddle.unsqueeze(y_mask, + -1) + # monotonic attention weight: (B, 1, T_feats, T_text) + attn = (self.maximum_path( + neg_x_ent, + attn_mask.squeeze(1), ).unsqueeze(1).detach()) + + # forward duration predictor + # (B, 1, T_text) + w = attn.sum(2) + dur_nll = self.duration_predictor(x, x_mask, w=w, g=g) + dur_nll = dur_nll / paddle.sum(x_mask) + + # expand the length to match with the feature sequence + # (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats) + m_p = paddle.matmul(attn.squeeze(1), + m_p.transpose([0, 2, 1])).transpose([0, 2, 1]) + # (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats) + logs_p = paddle.matmul(attn.squeeze(1), + logs_p.transpose([0, 2, 1])).transpose([0, 2, 1]) + + # get random segments + z_segments, z_start_idxs = get_random_segments( + z, + feats_lengths, + self.segment_size, ) + + # forward decoder with random segments + wav = self.decoder(z_segments, g=g) + + return (wav, dur_nll, attn, z_start_idxs, x_mask, y_mask, + (z, z_p, m_p, logs_p, m_q, logs_q), ) + + def inference( + self, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + feats: Optional[paddle.Tensor]=None, + feats_lengths: Optional[paddle.Tensor]=None, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, + dur: Optional[paddle.Tensor]=None, + noise_scale: float=0.667, + noise_scale_dur: float=0.8, + alpha: float=1.0, + max_len: Optional[int]=None, + use_teacher_forcing: bool=False, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Run inference. + Args: + text (Tensor): Input text index tensor (B, T_text,). + text_lengths (Tensor): Text length tensor (B,). + feats (Tensor): Feature tensor (B, aux_channels, T_feats,). + feats_lengths (Tensor): Feature length tensor (B,). + sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + dur (Optional[Tensor]): Ground-truth duration (B, T_text,). If provided, + skip the prediction of durations (i.e., teacher forcing). + noise_scale (float): Noise scale parameter for flow. + noise_scale_dur (float): Noise scale parameter for duration predictor. + alpha (float): Alpha parameter to control the speed of generated speech. + max_len (Optional[int]): Maximum length of acoustic feature sequence. + use_teacher_forcing (bool): Whether to use teacher forcing. + Returns: + Tensor: Generated waveform tensor (B, T_wav). + Tensor: Monotonic attention weight tensor (B, T_feats, T_text). + Tensor: Duration tensor (B, T_text). + """ + # encoder + x, m_p, logs_p, x_mask = self.text_encoder(text, text_lengths) + g = None + if self.spks is not None: + # (B, global_channels, 1) + g = self.global_emb(paddle.reshape(sids, [-1])).unsqueeze(-1) + if self.spk_embed_dim is not None: + # (B, global_channels, 1) + g_ = self.spemb_proj(F.normalize(spembs.unsqueeze(0))).unsqueeze(-1) + if g is None: + g = g_ + else: + g = g + g_ + if self.langs is not None: + # (B, global_channels, 1) + g_ = self.lang_emb(paddle.reshape(lids, [-1])).unsqueeze(-1) + if g is None: + g = g_ + else: + g = g + g_ + + if use_teacher_forcing: + # forward posterior encoder + z, m_q, logs_q, y_mask = self.posterior_encoder( + feats, feats_lengths, g=g) + + # forward flow + # (B, H, T_feats) + z_p = self.flow(z, y_mask, g=g) + + # monotonic alignment search + # (B, H, T_text) + s_p_sq_r = paddle.exp(-2 * logs_p) + # (B, 1, T_text) + neg_x_ent_1 = paddle.sum( + -0.5 * math.log(2 * math.pi) - logs_p, + [1], + keepdim=True, ) + # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) + neg_x_ent_2 = paddle.matmul( + -0.5 * (z_p**2).transpose([0, 2, 1]), + s_p_sq_r, ) + # (B, T_feats, H) x (B, H, T_text) = (B, T_feats, T_text) + neg_x_ent_3 = paddle.matmul( + z_p.transpose([0, 2, 1]), + (m_p * s_p_sq_r), ) + # (B, 1, T_text) + neg_x_ent_4 = paddle.sum( + -0.5 * (m_p**2) * s_p_sq_r, + [1], + keepdim=True, ) + # (B, T_feats, T_text) + neg_x_ent = neg_x_ent_1 + neg_x_ent_2 + neg_x_ent_3 + neg_x_ent_4 + # (B, 1, T_feats, T_text) + attn_mask = paddle.unsqueeze(x_mask, 2) * paddle.unsqueeze(y_mask, + -1) + # monotonic attention weight: (B, 1, T_feats, T_text) + attn = self.maximum_path( + neg_x_ent, + attn_mask.squeeze(1), ).unsqueeze(1) + # (B, 1, T_text) + dur = attn.sum(2) + + # forward decoder with random segments + wav = self.decoder(z * y_mask, g=g) + else: + # duration + if dur is None: + logw = self.duration_predictor( + x, + x_mask, + g=g, + inverse=True, + noise_scale=noise_scale_dur, ) + w = paddle.exp(logw) * x_mask * alpha + dur = paddle.ceil(w) + y_lengths = paddle.cast( + paddle.clip(paddle.sum(dur, [1, 2]), min=1), dtype='int64') + y_mask = make_non_pad_mask(y_lengths).unsqueeze(1) + attn_mask = paddle.unsqueeze(x_mask, 2) * paddle.unsqueeze(y_mask, + -1) + attn = self._generate_path(dur, attn_mask) + + # expand the length to match with the feature sequence + # (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats) + m_p = paddle.matmul( + attn.squeeze(1), + m_p.transpose([0, 2, 1]), ).transpose([0, 2, 1]) + # (B, T_feats, T_text) x (B, T_text, H) -> (B, H, T_feats) + logs_p = paddle.matmul( + attn.squeeze(1), + logs_p.transpose([0, 2, 1]), ).transpose([0, 2, 1]) + + # decoder + z_p = m_p + paddle.randn( + paddle.shape(m_p)) * paddle.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, inverse=True) + wav = self.decoder((z * y_mask)[:, :, :max_len], g=g) + + return wav.squeeze(1), attn.squeeze(1), dur.squeeze(1) + + def _generate_path(self, dur: paddle.Tensor, + mask: paddle.Tensor) -> paddle.Tensor: + """Generate path a.k.a. monotonic attention. + Args: + dur (Tensor): Duration tensor (B, 1, T_text). + mask (Tensor): Attention mask tensor (B, 1, T_feats, T_text). + Returns: + Tensor: Path tensor (B, 1, T_feats, T_text). + """ + b, _, t_y, t_x = paddle.shape(mask) + cum_dur = paddle.cumsum(dur, -1) + cum_dur_flat = paddle.reshape(cum_dur, [b * t_x]) + + path = paddle.arange(t_y, dtype=dur.dtype) + path = path.unsqueeze(0) < cum_dur_flat.unsqueeze(1) + path = paddle.reshape(path, [b, t_x, t_y]) + ''' + path will be like (t_x = 3, t_y = 5): + [[[1., 1., 0., 0., 0.], [[[1., 1., 0., 0., 0.], + [1., 1., 1., 1., 0.], --> [0., 0., 1., 1., 0.], + [1., 1., 1., 1., 1.]]] [0., 0., 0., 0., 1.]]] + ''' + + path = paddle.cast(path, dtype='float32') + path = path - F.pad(path, [0, 0, 1, 0, 0, 0])[:, :-1] + return path.unsqueeze(1).transpose([0, 1, 3, 2]) * mask diff --git a/paddlespeech/t2s/models/vits/monotonic_align/__init__.py b/paddlespeech/t2s/models/vits/monotonic_align/__init__.py new file mode 100644 index 000000000..3aa47ed72 --- /dev/null +++ b/paddlespeech/t2s/models/vits/monotonic_align/__init__.py @@ -0,0 +1,94 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Maximum path calculation module. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +import warnings + +import numpy as np +import paddle +from numba import njit +from numba import prange + +try: + from .core import maximum_path_c + + is_cython_avalable = True +except ImportError: + is_cython_avalable = False + warnings.warn( + "Cython version is not available. Fallback to 'EXPERIMETAL' numba version. " + "If you want to use the cython version, please build it as follows: " + "`cd paddlespeech/t2s/models/vits/monotonic_align; python setup.py build_ext --inplace`" + ) + + +def maximum_path(neg_x_ent: paddle.Tensor, + attn_mask: paddle.Tensor) -> paddle.Tensor: + """Calculate maximum path. + + Args: + neg_x_ent (Tensor): Negative X entropy tensor (B, T_feats, T_text). + attn_mask (Tensor): Attention mask (B, T_feats, T_text). + + Returns: + Tensor: Maximum path tensor (B, T_feats, T_text). + + """ + dtype = neg_x_ent.dtype + neg_x_ent = neg_x_ent.numpy().astype(np.float32) + path = np.zeros(neg_x_ent.shape, dtype=np.int32) + t_t_max = attn_mask.sum(1)[:, 0].cpu().numpy().astype(np.int32) + t_s_max = attn_mask.sum(2)[:, 0].cpu().numpy().astype(np.int32) + if is_cython_avalable: + maximum_path_c(path, neg_x_ent, t_t_max, t_s_max) + else: + maximum_path_numba(path, neg_x_ent, t_t_max, t_s_max) + + return paddle.cast(paddle.to_tensor(path), dtype=dtype) + + +@njit +def maximum_path_each_numba(path, value, t_y, t_x, max_neg_val=-np.inf): + """Calculate a single maximum path with numba.""" + index = t_x - 1 + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or + value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@njit(parallel=True) +def maximum_path_numba(paths, values, t_ys, t_xs): + """Calculate batch maximum path with numba.""" + for i in prange(paths.shape[0]): + maximum_path_each_numba(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/paddlespeech/t2s/models/vits/monotonic_align/core.pyx b/paddlespeech/t2s/models/vits/monotonic_align/core.pyx new file mode 100644 index 000000000..5a573dc74 --- /dev/null +++ b/paddlespeech/t2s/models/vits/monotonic_align/core.pyx @@ -0,0 +1,62 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Maximum path calculation module with cython optimization. + +This code is copied from https://github.com/jaywalnut310/vits and modifed code format. + +""" + +cimport cython + +from cython.parallel import prange + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void maximum_path_each(int[:, ::1] path, float[:, ::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: + cdef int x + cdef int y + cdef float v_prev + cdef float v_cur + cdef float tmp + cdef int index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef void maximum_path_c(int[:, :, ::1] paths, float[:, :, ::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + cdef int b = paths.shape[0] + cdef int i + for i in prange(b, nogil=True): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/paddlespeech/t2s/models/vits/monotonic_align/setup.py b/paddlespeech/t2s/models/vits/monotonic_align/setup.py new file mode 100644 index 000000000..8df03ab12 --- /dev/null +++ b/paddlespeech/t2s/models/vits/monotonic_align/setup.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Setup cython code.""" +from Cython.Build import cythonize +from setuptools import Extension +from setuptools import setup +from setuptools.command.build_ext import build_ext as _build_ext + + +class build_ext(_build_ext): + """Overwrite build_ext.""" + + def finalize_options(self): + """Prevent numpy from thinking it is still in its setup process.""" + _build_ext.finalize_options(self) + __builtins__.__NUMPY_SETUP__ = False + import numpy + + self.include_dirs.append(numpy.get_include()) + + +exts = [Extension( + name="core", + sources=["core.pyx"], )] +setup( + name="monotonic_align", + ext_modules=cythonize(exts, language_level=3), + cmdclass={"build_ext": build_ext}, ) diff --git a/paddlespeech/t2s/models/vits/posterior_encoder.py b/paddlespeech/t2s/models/vits/posterior_encoder.py new file mode 100644 index 000000000..853237557 --- /dev/null +++ b/paddlespeech/t2s/models/vits/posterior_encoder.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Text encoder module in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +from typing import Optional +from typing import Tuple + +import paddle +from paddle import nn + +from paddlespeech.t2s.models.vits.wavenet.wavenet import WaveNet +from paddlespeech.t2s.modules.nets_utils import make_non_pad_mask + + +class PosteriorEncoder(nn.Layer): + """Posterior encoder module in VITS. + + This is a module of posterior encoder described in `Conditional Variational + Autoencoder with Adversarial Learning for End-to-End Text-to-Speech`_. + + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2006.04558 + """ + + def __init__( + self, + in_channels: int=513, + out_channels: int=192, + hidden_channels: int=192, + kernel_size: int=5, + layers: int=16, + stacks: int=1, + base_dilation: int=1, + global_channels: int=-1, + dropout_rate: float=0.0, + bias: bool=True, + use_weight_norm: bool=True, ): + """Initilialize PosteriorEncoder module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + hidden_channels (int): Number of hidden channels. + kernel_size (int): Kernel size in WaveNet. + layers (int): Number of layers of WaveNet. + stacks (int): Number of repeat stacking of WaveNet. + base_dilation (int): Base dilation factor. + global_channels (int): Number of global conditioning channels. + dropout_rate (float): Dropout rate. + bias (bool): Whether to use bias parameters in conv. + use_weight_norm (bool): Whether to apply weight norm. + + """ + super().__init__() + + # define modules + self.input_conv = nn.Conv1D(in_channels, hidden_channels, 1) + self.encoder = WaveNet( + in_channels=-1, + out_channels=-1, + kernel_size=kernel_size, + layers=layers, + stacks=stacks, + base_dilation=base_dilation, + residual_channels=hidden_channels, + aux_channels=-1, + gate_channels=hidden_channels * 2, + skip_channels=hidden_channels, + global_channels=global_channels, + dropout_rate=dropout_rate, + bias=bias, + use_weight_norm=use_weight_norm, + use_first_conv=False, + use_last_conv=False, + scale_residual=False, + scale_skip_connect=True, ) + self.proj = nn.Conv1D(hidden_channels, out_channels * 2, 1) + + def forward( + self, + x: paddle.Tensor, + x_lengths: paddle.Tensor, + g: Optional[paddle.Tensor]=None + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T_feats). + x_lengths (Tensor): Length tensor (B,). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). + + Returns: + Tensor: Encoded hidden representation tensor (B, out_channels, T_feats). + Tensor: Projected mean tensor (B, out_channels, T_feats). + Tensor: Projected scale tensor (B, out_channels, T_feats). + Tensor: Mask tensor for input tensor (B, 1, T_feats). + + """ + x_mask = make_non_pad_mask(x_lengths).unsqueeze(1) + x = self.input_conv(x) * x_mask + x = self.encoder(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = paddle.split(stats, 2, axis=1) + z = (m + paddle.randn(paddle.shape(m)) * paddle.exp(logs)) * x_mask + + return z, m, logs, x_mask diff --git a/paddlespeech/t2s/models/vits/residual_coupling.py b/paddlespeech/t2s/models/vits/residual_coupling.py new file mode 100644 index 000000000..8671462d8 --- /dev/null +++ b/paddlespeech/t2s/models/vits/residual_coupling.py @@ -0,0 +1,244 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Residual affine coupling modules in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +from paddle import nn + +from paddlespeech.t2s.models.vits.flow import FlipFlow +from paddlespeech.t2s.models.vits.wavenet.wavenet import WaveNet + + +class ResidualAffineCouplingBlock(nn.Layer): + """Residual affine coupling block module. + + This is a module of residual affine coupling block, which used as "Flow" in + `Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`_. + + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2006.04558 + + """ + + def __init__( + self, + in_channels: int=192, + hidden_channels: int=192, + flows: int=4, + kernel_size: int=5, + base_dilation: int=1, + layers: int=4, + global_channels: int=-1, + dropout_rate: float=0.0, + use_weight_norm: bool=True, + bias: bool=True, + use_only_mean: bool=True, ): + """Initilize ResidualAffineCouplingBlock module. + + Args: + in_channels (int): Number of input channels. + hidden_channels (int): Number of hidden channels. + flows (int): Number of flows. + kernel_size (int): Kernel size for WaveNet. + base_dilation (int): Base dilation factor for WaveNet. + layers (int): Number of layers of WaveNet. + stacks (int): Number of stacks of WaveNet. + global_channels (int): Number of global channels. + dropout_rate (float): Dropout rate. + use_weight_norm (bool): Whether to use weight normalization in WaveNet. + bias (bool): Whether to use bias paramters in WaveNet. + use_only_mean (bool): Whether to estimate only mean. + + """ + super().__init__() + + self.flows = nn.LayerList() + for i in range(flows): + self.flows.append( + ResidualAffineCouplingLayer( + in_channels=in_channels, + hidden_channels=hidden_channels, + kernel_size=kernel_size, + base_dilation=base_dilation, + layers=layers, + stacks=1, + global_channels=global_channels, + dropout_rate=dropout_rate, + use_weight_norm=use_weight_norm, + bias=bias, + use_only_mean=use_only_mean, )) + self.flows.append(FlipFlow()) + + def forward( + self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + g: Optional[paddle.Tensor]=None, + inverse: bool=False, ) -> paddle.Tensor: + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T). + x_mask (Tensor): Length tensor (B, 1, T). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). + inverse (bool): Whether to inverse the flow. + + Returns: + Tensor: Output tensor (B, in_channels, T). + + """ + if not inverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, inverse=inverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, inverse=inverse) + return x + + +class ResidualAffineCouplingLayer(nn.Layer): + """Residual affine coupling layer.""" + + def __init__( + self, + in_channels: int=192, + hidden_channels: int=192, + kernel_size: int=5, + base_dilation: int=1, + layers: int=5, + stacks: int=1, + global_channels: int=-1, + dropout_rate: float=0.0, + use_weight_norm: bool=True, + bias: bool=True, + use_only_mean: bool=True, ): + """Initialzie ResidualAffineCouplingLayer module. + + Args: + in_channels (int): Number of input channels. + hidden_channels (int): Number of hidden channels. + kernel_size (int): Kernel size for WaveNet. + base_dilation (int): Base dilation factor for WaveNet. + layers (int): Number of layers of WaveNet. + stacks (int): Number of stacks of WaveNet. + global_channels (int): Number of global channels. + dropout_rate (float): Dropout rate. + use_weight_norm (bool): Whether to use weight normalization in WaveNet. + bias (bool): Whether to use bias paramters in WaveNet. + use_only_mean (bool): Whether to estimate only mean. + + """ + assert in_channels % 2 == 0, "in_channels should be divisible by 2" + super().__init__() + self.half_channels = in_channels // 2 + self.use_only_mean = use_only_mean + + # define modules + self.input_conv = nn.Conv1D( + self.half_channels, + hidden_channels, + 1, ) + self.encoder = WaveNet( + in_channels=-1, + out_channels=-1, + kernel_size=kernel_size, + layers=layers, + stacks=stacks, + base_dilation=base_dilation, + residual_channels=hidden_channels, + aux_channels=-1, + gate_channels=hidden_channels * 2, + skip_channels=hidden_channels, + global_channels=global_channels, + dropout_rate=dropout_rate, + bias=bias, + use_weight_norm=use_weight_norm, + use_first_conv=False, + use_last_conv=False, + scale_residual=False, + scale_skip_connect=True, ) + if use_only_mean: + self.proj = nn.Conv1D( + hidden_channels, + self.half_channels, + 1, ) + else: + self.proj = nn.Conv1D( + hidden_channels, + self.half_channels * 2, + 1, ) + # self.proj.weight.data.zero_() + # self.proj.bias.data.zero_() + + weight = paddle.zeros(paddle.shape(self.proj.weight)) + + self.proj.weight = paddle.create_parameter( + shape=weight.shape, + dtype=str(weight.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(weight)) + + bias = paddle.zeros(paddle.shape(self.proj.bias)) + + self.proj.bias = paddle.create_parameter( + shape=bias.shape, + dtype=str(bias.numpy().dtype), + default_initializer=paddle.nn.initializer.Assign(bias)) + + def forward( + self, + x: paddle.Tensor, + x_mask: paddle.Tensor, + g: Optional[paddle.Tensor]=None, + inverse: bool=False, + ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, paddle.Tensor]]: + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, in_channels, T). + x_lengths (Tensor): Length tensor (B,). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). + inverse (bool): Whether to inverse the flow. + + Returns: + Tensor: Output tensor (B, in_channels, T). + Tensor: Log-determinant tensor for NLL (B,) if not inverse. + + """ + xa, xb = paddle.split(x, 2, axis=1) + h = self.input_conv(xa) * x_mask + h = self.encoder(h, x_mask, g=g) + stats = self.proj(h) * x_mask + if not self.use_only_mean: + m, logs = paddle.split(stats, 2, axis=1) + else: + m = stats + logs = paddle.zeros(paddle.shape(m)) + + if not inverse: + xb = m + xb * paddle.exp(logs) * x_mask + x = paddle.concat([xa, xb], 1) + logdet = paddle.sum(logs, [1, 2]) + return x, logdet + else: + xb = (xb - m) * paddle.exp(-logs) * x_mask + x = paddle.concat([xa, xb], 1) + return x diff --git a/paddlespeech/t2s/models/vits/text_encoder.py b/paddlespeech/t2s/models/vits/text_encoder.py new file mode 100644 index 000000000..3afc7831a --- /dev/null +++ b/paddlespeech/t2s/models/vits/text_encoder.py @@ -0,0 +1,145 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Text encoder module in VITS. + +This code is based on https://github.com/jaywalnut310/vits. + +""" +import math +from typing import Tuple + +import paddle +from paddle import nn + +from paddlespeech.t2s.modules.nets_utils import make_non_pad_mask +from paddlespeech.t2s.modules.transformer.encoder import ConformerEncoder as Encoder + + +class TextEncoder(nn.Layer): + """Text encoder module in VITS. + + This is a module of text encoder described in `Conditional Variational Autoencoder + with Adversarial Learning for End-to-End Text-to-Speech`_. + + Instead of the relative positional Transformer, we use conformer architecture as + the encoder module, which contains additional convolution layers. + + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2006.04558 + + """ + + def __init__( + self, + vocabs: int, + attention_dim: int=192, + attention_heads: int=2, + linear_units: int=768, + blocks: int=6, + positionwise_layer_type: str="conv1d", + positionwise_conv_kernel_size: int=3, + positional_encoding_layer_type: str="rel_pos", + self_attention_layer_type: str="rel_selfattn", + activation_type: str="swish", + normalize_before: bool=True, + use_macaron_style: bool=False, + use_conformer_conv: bool=False, + conformer_kernel_size: int=7, + dropout_rate: float=0.1, + positional_dropout_rate: float=0.0, + attention_dropout_rate: float=0.0, ): + """Initialize TextEncoder module. + + Args: + vocabs (int): Vocabulary size. + attention_dim (int): Attention dimension. + attention_heads (int): Number of attention heads. + linear_units (int): Number of linear units of positionwise layers. + blocks (int): Number of encoder blocks. + positionwise_layer_type (str): Positionwise layer type. + positionwise_conv_kernel_size (int): Positionwise layer's kernel size. + positional_encoding_layer_type (str): Positional encoding layer type. + self_attention_layer_type (str): Self-attention layer type. + activation_type (str): Activation function type. + normalize_before (bool): Whether to apply LayerNorm before attention. + use_macaron_style (bool): Whether to use macaron style components. + use_conformer_conv (bool): Whether to use conformer conv layers. + conformer_kernel_size (int): Conformer's conv kernel size. + dropout_rate (float): Dropout rate. + positional_dropout_rate (float): Dropout rate for positional encoding. + attention_dropout_rate (float): Dropout rate for attention. + + """ + super().__init__() + # store for forward + self.attention_dim = attention_dim + + # define modules + self.emb = nn.Embedding(vocabs, attention_dim) + + dist = paddle.distribution.Normal(loc=0.0, scale=attention_dim**-0.5) + w = dist.sample(self.emb.weight.shape) + self.emb.weight.set_value(w) + + self.encoder = Encoder( + idim=-1, + input_layer=None, + attention_dim=attention_dim, + attention_heads=attention_heads, + linear_units=linear_units, + num_blocks=blocks, + dropout_rate=dropout_rate, + positional_dropout_rate=positional_dropout_rate, + attention_dropout_rate=attention_dropout_rate, + normalize_before=normalize_before, + positionwise_layer_type=positionwise_layer_type, + positionwise_conv_kernel_size=positionwise_conv_kernel_size, + macaron_style=use_macaron_style, + pos_enc_layer_type=positional_encoding_layer_type, + selfattention_layer_type=self_attention_layer_type, + activation_type=activation_type, + use_cnn_module=use_conformer_conv, + cnn_module_kernel=conformer_kernel_size, ) + self.proj = nn.Conv1D(attention_dim, attention_dim * 2, 1) + + def forward( + self, + x: paddle.Tensor, + x_lengths: paddle.Tensor, + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Calculate forward propagation. + + Args: + x (Tensor): Input index tensor (B, T_text). + x_lengths (Tensor): Length tensor (B,). + + Returns: + Tensor: Encoded hidden representation (B, attention_dim, T_text). + Tensor: Projected mean tensor (B, attention_dim, T_text). + Tensor: Projected scale tensor (B, attention_dim, T_text). + Tensor: Mask tensor for input tensor (B, 1, T_text). + + """ + x = self.emb(x) * math.sqrt(self.attention_dim) + x_mask = make_non_pad_mask(x_lengths).unsqueeze(1) + # encoder assume the channel last (B, T_text, attention_dim) + # but mask shape shoud be (B, 1, T_text) + x, _ = self.encoder(x, x_mask) + + # convert the channel first (B, attention_dim, T_text) + x = paddle.transpose(x, [0, 2, 1]) + stats = self.proj(x) * x_mask + m, logs = paddle.split(stats, 2, axis=1) + + return x, m, logs, x_mask diff --git a/paddlespeech/t2s/models/vits/transform.py b/paddlespeech/t2s/models/vits/transform.py new file mode 100644 index 000000000..fec80377c --- /dev/null +++ b/paddlespeech/t2s/models/vits/transform.py @@ -0,0 +1,238 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Flow-related transformation. + +This code is based on https://github.com/bayesiains/nflows. + +""" +import numpy as np +import paddle +from paddle.nn import functional as F + +from paddlespeech.t2s.modules.nets_utils import paddle_gather + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, ): + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = {"tails": tails, "tail_bound": tail_bound} + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs) + return outputs, logabsdet + + +def mask_preprocess(x, mask): + B, C, T, bins = paddle.shape(x) + new_x = paddle.zeros([mask.sum(), bins]) + for i in range(bins): + new_x[:, i] = x[:, :, :, i][mask] + return new_x + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails="linear", + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, ): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = paddle.zeros(paddle.shape(inputs)) + logabsdet = paddle.zeros(paddle.shape(inputs)) + if tails == "linear": + unnormalized_derivatives = F.pad( + unnormalized_derivatives, + pad=[0] * (len(unnormalized_derivatives.shape) - 1) * 2 + [1, 1]) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError("{} tails are not implemented.".format(tails)) + + unnormalized_widths = mask_preprocess(unnormalized_widths, + inside_interval_mask) + unnormalized_heights = mask_preprocess(unnormalized_heights, + inside_interval_mask) + unnormalized_derivatives = mask_preprocess(unnormalized_derivatives, + inside_interval_mask) + + (outputs[inside_interval_mask], + logabsdet[inside_interval_mask], ) = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, ) + + return outputs, logabsdet + + +def rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0.0, + right=1.0, + bottom=0.0, + top=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, ): + if paddle.min(inputs) < left or paddle.max(inputs) > right: + raise ValueError("Input to a transform is not within its domain") + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError("Minimal bin width too large for the number of bins") + if min_bin_height * num_bins > 1.0: + raise ValueError("Minimal bin height too large for the number of bins") + + widths = F.softmax(unnormalized_widths, axis=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = paddle.cumsum(widths, axis=-1) + cumwidths = F.pad( + cumwidths, + pad=[0] * (len(cumwidths.shape) - 1) * 2 + [1, 0], + mode="constant", + value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, axis=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = paddle.cumsum(heights, axis=-1) + cumheights = F.pad( + cumheights, + pad=[0] * (len(cumheights.shape) - 1) * 2 + [1, 0], + mode="constant", + value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = _searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = _searchsorted(cumwidths, inputs)[..., None] + input_cumwidths = paddle_gather(cumwidths, -1, bin_idx)[..., 0] + input_bin_widths = paddle_gather(widths, -1, bin_idx)[..., 0] + + input_cumheights = paddle_gather(cumheights, -1, bin_idx)[..., 0] + delta = heights / widths + input_delta = paddle_gather(delta, -1, bin_idx)[..., 0] + + input_derivatives = paddle_gather(derivatives, -1, bin_idx)[..., 0] + input_derivatives_plus_one = paddle_gather(derivatives[..., 1:], -1, + bin_idx)[..., 0] + + input_heights = paddle_gather(heights, -1, bin_idx)[..., 0] + + if inverse: + a = (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta) + c = -input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - paddle.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + 2 * input_delta * + theta_one_minus_theta + input_derivatives * (1 - root).pow(2)) + logabsdet = paddle.log(derivative_numerator) - 2 * paddle.log( + denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + 2 * input_delta * + theta_one_minus_theta + input_derivatives * (1 - theta).pow(2)) + logabsdet = paddle.log(derivative_numerator) - 2 * paddle.log( + denominator) + + return outputs, logabsdet + + +def _searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return paddle.sum(inputs[..., None] >= bin_locations, axis=-1) - 1 diff --git a/paddlespeech/t2s/models/vits/vits.py b/paddlespeech/t2s/models/vits/vits.py new file mode 100644 index 000000000..f7f5ba968 --- /dev/null +++ b/paddlespeech/t2s/models/vits/vits.py @@ -0,0 +1,573 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +"""VITS module""" +from typing import Any +from typing import Dict +from typing import Optional + +import paddle +from paddle import nn +from typeguard import check_argument_types + +from paddlespeech.t2s.models.hifigan import HiFiGANMultiPeriodDiscriminator +from paddlespeech.t2s.models.hifigan import HiFiGANMultiScaleDiscriminator +from paddlespeech.t2s.models.hifigan import HiFiGANMultiScaleMultiPeriodDiscriminator +from paddlespeech.t2s.models.hifigan import HiFiGANPeriodDiscriminator +from paddlespeech.t2s.models.hifigan import HiFiGANScaleDiscriminator +from paddlespeech.t2s.models.vits.generator import VITSGenerator +from paddlespeech.t2s.modules.losses import DiscriminatorAdversarialLoss +from paddlespeech.t2s.modules.losses import FeatureMatchLoss +from paddlespeech.t2s.modules.losses import GeneratorAdversarialLoss +from paddlespeech.t2s.modules.losses import KLDivergenceLoss +from paddlespeech.t2s.modules.losses import MelSpectrogramLoss +from paddlespeech.t2s.modules.nets_utils import get_segments + +AVAILABLE_GENERATERS = { + "vits_generator": VITSGenerator, +} +AVAILABLE_DISCRIMINATORS = { + "hifigan_period_discriminator": + HiFiGANPeriodDiscriminator, + "hifigan_scale_discriminator": + HiFiGANScaleDiscriminator, + "hifigan_multi_period_discriminator": + HiFiGANMultiPeriodDiscriminator, + "hifigan_multi_scale_discriminator": + HiFiGANMultiScaleDiscriminator, + "hifigan_multi_scale_multi_period_discriminator": + HiFiGANMultiScaleMultiPeriodDiscriminator, +} + + +class VITS(nn.Layer): + """VITS module (generator + discriminator). + This is a module of VITS described in `Conditional Variational Autoencoder + with Adversarial Learning for End-to-End Text-to-Speech`_. + .. _`Conditional Variational Autoencoder with Adversarial Learning for End-to-End + Text-to-Speech`: https://arxiv.org/abs/2006.04558 + """ + + def __init__( + self, + # generator related + idim: int, + odim: int, + sampling_rate: int=22050, + generator_type: str="vits_generator", + generator_params: Dict[str, Any]={ + "hidden_channels": 192, + "spks": None, + "langs": None, + "spk_embed_dim": None, + "global_channels": -1, + "segment_size": 32, + "text_encoder_attention_heads": 2, + "text_encoder_ffn_expand": 4, + "text_encoder_blocks": 6, + "text_encoder_positionwise_layer_type": "conv1d", + "text_encoder_positionwise_conv_kernel_size": 1, + "text_encoder_positional_encoding_layer_type": "rel_pos", + "text_encoder_self_attention_layer_type": "rel_selfattn", + "text_encoder_activation_type": "swish", + "text_encoder_normalize_before": True, + "text_encoder_dropout_rate": 0.1, + "text_encoder_positional_dropout_rate": 0.0, + "text_encoder_attention_dropout_rate": 0.0, + "text_encoder_conformer_kernel_size": 7, + "use_macaron_style_in_text_encoder": True, + "use_conformer_conv_in_text_encoder": True, + "decoder_kernel_size": 7, + "decoder_channels": 512, + "decoder_upsample_scales": [8, 8, 2, 2], + "decoder_upsample_kernel_sizes": [16, 16, 4, 4], + "decoder_resblock_kernel_sizes": [3, 7, 11], + "decoder_resblock_dilations": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + "use_weight_norm_in_decoder": True, + "posterior_encoder_kernel_size": 5, + "posterior_encoder_layers": 16, + "posterior_encoder_stacks": 1, + "posterior_encoder_base_dilation": 1, + "posterior_encoder_dropout_rate": 0.0, + "use_weight_norm_in_posterior_encoder": True, + "flow_flows": 4, + "flow_kernel_size": 5, + "flow_base_dilation": 1, + "flow_layers": 4, + "flow_dropout_rate": 0.0, + "use_weight_norm_in_flow": True, + "use_only_mean_in_flow": True, + "stochastic_duration_predictor_kernel_size": 3, + "stochastic_duration_predictor_dropout_rate": 0.5, + "stochastic_duration_predictor_flows": 4, + "stochastic_duration_predictor_dds_conv_layers": 3, + }, + # discriminator related + discriminator_type: str="hifigan_multi_scale_multi_period_discriminator", + discriminator_params: Dict[str, Any]={ + "scales": 1, + "scale_downsample_pooling": "AvgPool1D", + "scale_downsample_pooling_params": { + "kernel_size": 4, + "stride": 2, + "padding": 2, + }, + "scale_discriminator_params": { + "in_channels": 1, + "out_channels": 1, + "kernel_sizes": [15, 41, 5, 3], + "channels": 128, + "max_downsample_channels": 1024, + "max_groups": 16, + "bias": True, + "downsample_scales": [2, 2, 4, 4, 1], + "nonlinear_activation": "leakyrelu", + "nonlinear_activation_params": { + "negative_slope": 0.1 + }, + "use_weight_norm": True, + "use_spectral_norm": False, + }, + "follow_official_norm": False, + "periods": [2, 3, 5, 7, 11], + "period_discriminator_params": { + "in_channels": 1, + "out_channels": 1, + "kernel_sizes": [5, 3], + "channels": 32, + "downsample_scales": [3, 3, 3, 3, 1], + "max_downsample_channels": 1024, + "bias": True, + "nonlinear_activation": "leakyrelu", + "nonlinear_activation_params": { + "negative_slope": 0.1 + }, + "use_weight_norm": True, + "use_spectral_norm": False, + }, + }, + # loss related + generator_adv_loss_params: Dict[str, Any]={ + "average_by_discriminators": False, + "loss_type": "mse", + }, + discriminator_adv_loss_params: Dict[str, Any]={ + "average_by_discriminators": False, + "loss_type": "mse", + }, + feat_match_loss_params: Dict[str, Any]={ + "average_by_discriminators": False, + "average_by_layers": False, + "include_final_outputs": True, + }, + mel_loss_params: Dict[str, Any]={ + "fs": 22050, + "fft_size": 1024, + "hop_size": 256, + "win_length": None, + "window": "hann", + "num_mels": 80, + "fmin": 0, + "fmax": None, + "log_base": None, + }, + lambda_adv: float=1.0, + lambda_mel: float=45.0, + lambda_feat_match: float=2.0, + lambda_dur: float=1.0, + lambda_kl: float=1.0, + cache_generator_outputs: bool=True, ): + """Initialize VITS module. + Args: + idim (int): Input vocabrary size. + odim (int): Acoustic feature dimension. The actual output channels will + be 1 since VITS is the end-to-end text-to-wave model but for the + compatibility odim is used to indicate the acoustic feature dimension. + sampling_rate (int): Sampling rate, not used for the training but it will + be referred in saving waveform during the inference. + generator_type (str): Generator type. + generator_params (Dict[str, Any]): Parameter dict for generator. + discriminator_type (str): Discriminator type. + discriminator_params (Dict[str, Any]): Parameter dict for discriminator. + generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator + adversarial loss. + discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for + discriminator adversarial loss. + feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss. + mel_loss_params (Dict[str, Any]): Parameter dict for mel loss. + lambda_adv (float): Loss scaling coefficient for adversarial loss. + lambda_mel (float): Loss scaling coefficient for mel spectrogram loss. + lambda_feat_match (float): Loss scaling coefficient for feat match loss. + lambda_dur (float): Loss scaling coefficient for duration loss. + lambda_kl (float): Loss scaling coefficient for KL divergence loss. + cache_generator_outputs (bool): Whether to cache generator outputs. + """ + assert check_argument_types() + super().__init__() + + # define modules + generator_class = AVAILABLE_GENERATERS[generator_type] + if generator_type == "vits_generator": + # NOTE: Update parameters for the compatibility. + # The idim and odim is automatically decided from input data, + # where idim represents #vocabularies and odim represents + # the input acoustic feature dimension. + generator_params.update(vocabs=idim, aux_channels=odim) + self.generator = generator_class( + **generator_params, ) + discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type] + self.discriminator = discriminator_class( + **discriminator_params, ) + self.generator_adv_loss = GeneratorAdversarialLoss( + **generator_adv_loss_params, ) + self.discriminator_adv_loss = DiscriminatorAdversarialLoss( + **discriminator_adv_loss_params, ) + self.feat_match_loss = FeatureMatchLoss( + **feat_match_loss_params, ) + self.mel_loss = MelSpectrogramLoss( + **mel_loss_params, ) + self.kl_loss = KLDivergenceLoss() + + # coefficients + self.lambda_adv = lambda_adv + self.lambda_mel = lambda_mel + self.lambda_kl = lambda_kl + self.lambda_feat_match = lambda_feat_match + self.lambda_dur = lambda_dur + + # cache + self.cache_generator_outputs = cache_generator_outputs + self._cache = None + + # store sampling rate for saving wav file + # (not used for the training) + self.fs = sampling_rate + + # store parameters for test compatibility + self.spks = self.generator.spks + self.langs = self.generator.langs + self.spk_embed_dim = self.generator.spk_embed_dim + + @property + def require_raw_speech(self): + """Return whether or not speech is required.""" + return True + + @property + def require_vocoder(self): + """Return whether or not vocoder is required.""" + return False + + def forward( + self, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + feats: paddle.Tensor, + feats_lengths: paddle.Tensor, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, + forward_generator: bool=True, ) -> Dict[str, Any]: + """Perform generator forward. + Args: + text (Tensor): Text index tensor (B, T_text). + text_lengths (Tensor): Text length tensor (B,). + feats (Tensor): Feature tensor (B, T_feats, aux_channels). + feats_lengths (Tensor): Feature length tensor (B,). + sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + forward_generator (bool): Whether to forward generator. + Returns: + Dict[str, Any]: + - loss (Tensor): Loss scalar tensor. + - stats (Dict[str, float]): Statistics to be monitored. + - weight (Tensor): Weight tensor to summarize losses. + - optim_idx (int): Optimizer index (0 for G and 1 for D). + """ + if forward_generator: + return self._forward_generator( + text=text, + text_lengths=text_lengths, + feats=feats, + feats_lengths=feats_lengths, + sids=sids, + spembs=spembs, + lids=lids, ) + else: + return self._forward_discrminator( + text=text, + text_lengths=text_lengths, + feats=feats, + feats_lengths=feats_lengths, + sids=sids, + spembs=spembs, + lids=lids, ) + + def _forward_generator( + self, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + feats: paddle.Tensor, + feats_lengths: paddle.Tensor, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, ) -> Dict[str, Any]: + """Perform generator forward. + Args: + text (Tensor): Text index tensor (B, T_text). + text_lengths (Tensor): Text length tensor (B,). + feats (Tensor): Feature tensor (B, T_feats, aux_channels). + feats_lengths (Tensor): Feature length tensor (B,). + sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + Returns: + Dict[str, Any]: + * loss (Tensor): Loss scalar tensor. + * stats (Dict[str, float]): Statistics to be monitored. + * weight (Tensor): Weight tensor to summarize losses. + * optim_idx (int): Optimizer index (0 for G and 1 for D). + """ + # setup + batch_size = paddle.shape(text)[0] + feats = feats.transpose([0, 2, 1]) + # speech = speech.unsqueeze(1) + + # calculate generator outputs + reuse_cache = True + if not self.cache_generator_outputs or self._cache is None: + reuse_cache = False + outs = self.generator( + text=text, + text_lengths=text_lengths, + feats=feats, + feats_lengths=feats_lengths, + sids=sids, + spembs=spembs, + lids=lids, ) + else: + outs = self._cache + + # store cache + if self.training and self.cache_generator_outputs and not reuse_cache: + self._cache = outs + + return outs + """ + # parse outputs + speech_hat_, dur_nll, _, start_idxs, _, z_mask, outs_ = outs + _, z_p, m_p, logs_p, _, logs_q = outs_ + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * self.generator.upsample_factor, + segment_size=self.generator.segment_size * + self.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.discriminator(speech_hat_) + with paddle.no_grad(): + # do not store discriminator gradient in generator turn + p = self.discriminator(speech_) + + # calculate losses + mel_loss = self.mel_loss(speech_hat_, speech_) + kl_loss = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) + dur_loss = paddle.sum(dur_nll.float()) + adv_loss = self.generator_adv_loss(p_hat) + feat_match_loss = self.feat_match_loss(p_hat, p) + + mel_loss = mel_loss * self.lambda_mel + kl_loss = kl_loss * self.lambda_kl + dur_loss = dur_loss * self.lambda_dur + adv_loss = adv_loss * self.lambda_adv + feat_match_loss = feat_match_loss * self.lambda_feat_match + loss = mel_loss + kl_loss + dur_loss + adv_loss + feat_match_loss + + stats = dict( + generator_loss=loss.item(), + generator_mel_loss=mel_loss.item(), + generator_kl_loss=kl_loss.item(), + generator_dur_loss=dur_loss.item(), + generator_adv_loss=adv_loss.item(), + generator_feat_match_loss=feat_match_loss.item(), ) + + # reset cache + if reuse_cache or not self.training: + self._cache = None + + return { + "loss": loss, + "stats": stats, + # "weight": weight, + "optim_idx": 0, # needed for trainer + } + """ + + def _forward_discrminator( + self, + text: paddle.Tensor, + text_lengths: paddle.Tensor, + feats: paddle.Tensor, + feats_lengths: paddle.Tensor, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, ) -> Dict[str, Any]: + """Perform discriminator forward. + Args: + text (Tensor): Text index tensor (B, T_text). + text_lengths (Tensor): Text length tensor (B,). + feats (Tensor): Feature tensor (B, T_feats, aux_channels). + feats_lengths (Tensor): Feature length tensor (B,). + sids (Optional[Tensor]): Speaker index tensor (B,) or (B, 1). + spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). + lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). + Returns: + Dict[str, Any]: + * loss (Tensor): Loss scalar tensor. + * stats (Dict[str, float]): Statistics to be monitored. + * weight (Tensor): Weight tensor to summarize losses. + * optim_idx (int): Optimizer index (0 for G and 1 for D). + """ + # setup + batch_size = paddle.shape(text)[0] + feats = feats.transpose([0, 2, 1]) + # speech = speech.unsqueeze(1) + + # calculate generator outputs + reuse_cache = True + if not self.cache_generator_outputs or self._cache is None: + reuse_cache = False + outs = self.generator( + text=text, + text_lengths=text_lengths, + feats=feats, + feats_lengths=feats_lengths, + sids=sids, + spembs=spembs, + lids=lids, ) + else: + outs = self._cache + + # store cache + if self.cache_generator_outputs and not reuse_cache: + self._cache = outs + + return outs + """ + + # parse outputs + speech_hat_, _, _, start_idxs, *_ = outs + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * self.generator.upsample_factor, + segment_size=self.generator.segment_size * + self.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.discriminator(speech_hat_.detach()) + p = self.discriminator(speech_) + + # calculate losses + real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p) + loss = real_loss + fake_loss + + stats = dict( + discriminator_loss=loss.item(), + discriminator_real_loss=real_loss.item(), + discriminator_fake_loss=fake_loss.item(), ) + + # reset cache + if reuse_cache or not self.training: + self._cache = None + + return { + "loss": loss, + "stats": stats, + # "weight": weight, + "optim_idx": 1, # needed for trainer + } + """ + + def inference( + self, + text: paddle.Tensor, + feats: Optional[paddle.Tensor]=None, + sids: Optional[paddle.Tensor]=None, + spembs: Optional[paddle.Tensor]=None, + lids: Optional[paddle.Tensor]=None, + durations: Optional[paddle.Tensor]=None, + noise_scale: float=0.667, + noise_scale_dur: float=0.8, + alpha: float=1.0, + max_len: Optional[int]=None, + use_teacher_forcing: bool=False, ) -> Dict[str, paddle.Tensor]: + """Run inference. + Args: + text (Tensor): Input text index tensor (T_text,). + feats (Tensor): Feature tensor (T_feats, aux_channels). + sids (Tensor): Speaker index tensor (1,). + spembs (Optional[Tensor]): Speaker embedding tensor (spk_embed_dim,). + lids (Tensor): Language index tensor (1,). + durations (Tensor): Ground-truth duration tensor (T_text,). + noise_scale (float): Noise scale value for flow. + noise_scale_dur (float): Noise scale value for duration predictor. + alpha (float): Alpha parameter to control the speed of generated speech. + max_len (Optional[int]): Maximum length. + use_teacher_forcing (bool): Whether to use teacher forcing. + Returns: + Dict[str, Tensor]: + * wav (Tensor): Generated waveform tensor (T_wav,). + * att_w (Tensor): Monotonic attention weight tensor (T_feats, T_text). + * duration (Tensor): Predicted duration tensor (T_text,). + """ + # setup + text = text[None] + text_lengths = paddle.to_tensor(paddle.shape(text)[1]) + # if sids is not None: + # sids = sids.view(1) + # if lids is not None: + # lids = lids.view(1) + if durations is not None: + durations = paddle.reshape(durations, [1, 1, -1]) + + # inference + if use_teacher_forcing: + assert feats is not None + feats = feats[None].transpose([0, 2, 1]) + feats_lengths = paddle.to_tensor([paddle.shape(feats)[2]]) + wav, att_w, dur = self.generator.inference( + text=text, + text_lengths=text_lengths, + feats=feats, + feats_lengths=feats_lengths, + sids=sids, + spembs=spembs, + lids=lids, + max_len=max_len, + use_teacher_forcing=use_teacher_forcing, ) + else: + wav, att_w, dur = self.generator.inference( + text=text, + text_lengths=text_lengths, + sids=sids, + spembs=spembs, + lids=lids, + dur=durations, + noise_scale=noise_scale, + noise_scale_dur=noise_scale_dur, + alpha=alpha, + max_len=max_len, ) + return dict( + wav=paddle.reshape(wav, [-1]), att_w=att_w[0], duration=dur[0]) diff --git a/paddlespeech/t2s/models/vits/vits_updater.py b/paddlespeech/t2s/models/vits/vits_updater.py new file mode 100644 index 000000000..e69de29bb diff --git a/paddlespeech/t2s/models/vits/wavenet/__init__.py b/paddlespeech/t2s/models/vits/wavenet/__init__.py new file mode 100644 index 000000000..97043fd7b --- /dev/null +++ b/paddlespeech/t2s/models/vits/wavenet/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/t2s/models/vits/wavenet/residual_block.py b/paddlespeech/t2s/models/vits/wavenet/residual_block.py new file mode 100644 index 000000000..197e74975 --- /dev/null +++ b/paddlespeech/t2s/models/vits/wavenet/residual_block.py @@ -0,0 +1,154 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +import math +from typing import Optional +from typing import Tuple + +import paddle +import paddle.nn.functional as F +from paddle import nn + + +class ResidualBlock(nn.Layer): + """Residual block module in WaveNet.""" + + def __init__( + self, + kernel_size: int=3, + residual_channels: int=64, + gate_channels: int=128, + skip_channels: int=64, + aux_channels: int=80, + global_channels: int=-1, + dropout_rate: float=0.0, + dilation: int=1, + bias: bool=True, + scale_residual: bool=False, ): + """Initialize ResidualBlock module. + + Args: + kernel_size (int): Kernel size of dilation convolution layer. + residual_channels (int): Number of channels for residual connection. + skip_channels (int): Number of channels for skip connection. + aux_channels (int): Number of local conditioning channels. + dropout (float): Dropout probability. + dilation (int): Dilation factor. + bias (bool): Whether to add bias parameter in convolution layers. + scale_residual (bool): Whether to scale the residual outputs. + + """ + super().__init__() + self.dropout_rate = dropout_rate + self.residual_channels = residual_channels + self.skip_channels = skip_channels + self.scale_residual = scale_residual + + # check + assert ( + kernel_size - 1) % 2 == 0, "Not support even number kernel size." + assert gate_channels % 2 == 0 + + # dilation conv + padding = (kernel_size - 1) // 2 * dilation + self.conv = nn.Conv1D( + residual_channels, + gate_channels, + kernel_size, + padding=padding, + dilation=dilation, + bias_attr=bias, ) + + # local conditioning + if aux_channels > 0: + self.conv1x1_aux = nn.Conv1D( + aux_channels, gate_channels, kernel_size=1, bias_attr=False) + else: + self.conv1x1_aux = None + + # global conditioning + if global_channels > 0: + self.conv1x1_glo = nn.Conv1D( + global_channels, gate_channels, kernel_size=1, bias_attr=False) + else: + self.conv1x1_glo = None + + # conv output is split into two groups + gate_out_channels = gate_channels // 2 + + # NOTE: concat two convs into a single conv for the efficiency + # (integrate res 1x1 + skip 1x1 convs) + self.conv1x1_out = nn.Conv1D( + gate_out_channels, + residual_channels + skip_channels, + kernel_size=1, + bias_attr=bias) + + def forward( + self, + x: paddle.Tensor, + x_mask: Optional[paddle.Tensor]=None, + c: Optional[paddle.Tensor]=None, + g: Optional[paddle.Tensor]=None, + ) -> Tuple[paddle.Tensor, paddle.Tensor]: + """Calculate forward propagation. + + Args: + x (Tensor): Input tensor (B, residual_channels, T). + x_mask Optional[paddle.Tensor]: Mask tensor (B, 1, T). + c (Optional[Tensor]): Local conditioning tensor (B, aux_channels, T). + g (Optional[Tensor]): Global conditioning tensor (B, global_channels, 1). + + Returns: + Tensor: Output tensor for residual connection (B, residual_channels, T). + Tensor: Output tensor for skip connection (B, skip_channels, T). + + """ + residual = x + x = F.dropout(x, p=self.dropout_rate, training=self.training) + x = self.conv(x) + + # split into two part for gated activation + splitdim = 1 + xa, xb = paddle.split(x, 2, axis=splitdim) + + # local conditioning + if c is not None: + c = self.conv1x1_aux(c) + ca, cb = paddle.split(c, 2, axis=splitdim) + xa, xb = xa + ca, xb + cb + + # global conditioning + if g is not None: + g = self.conv1x1_glo(g) + ga, gb = paddle.split(g, 2, axis=splitdim) + xa, xb = xa + ga, xb + gb + + x = paddle.tanh(xa) * F.sigmoid(xb) + + # residual + skip 1x1 conv + x = self.conv1x1_out(x) + if x_mask is not None: + x = x * x_mask + + # split integrated conv results + x, s = paddle.split( + x, [self.residual_channels, self.skip_channels], axis=1) + + # for residual connection + x = x + residual + if self.scale_residual: + x = x * math.sqrt(0.5) + + return x, s diff --git a/paddlespeech/t2s/models/vits/wavenet/wavenet.py b/paddlespeech/t2s/models/vits/wavenet/wavenet.py new file mode 100644 index 000000000..44693dac6 --- /dev/null +++ b/paddlespeech/t2s/models/vits/wavenet/wavenet.py @@ -0,0 +1,175 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +import math +from typing import Optional + +import paddle +from paddle import nn + +from paddlespeech.t2s.models.vits.wavenet.residual_block import ResidualBlock + + +class WaveNet(nn.Layer): + """WaveNet with global conditioning.""" + + def __init__( + self, + in_channels: int=1, + out_channels: int=1, + kernel_size: int=3, + layers: int=30, + stacks: int=3, + base_dilation: int=2, + residual_channels: int=64, + aux_channels: int=-1, + gate_channels: int=128, + skip_channels: int=64, + global_channels: int=-1, + dropout_rate: float=0.0, + bias: bool=True, + use_weight_norm: bool=True, + use_first_conv: bool=False, + use_last_conv: bool=False, + scale_residual: bool=False, + scale_skip_connect: bool=False, ): + """Initialize WaveNet module. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Kernel size of dilated convolution. + layers (int): Number of residual block layers. + stacks (int): Number of stacks i.e., dilation cycles. + base_dilation (int): Base dilation factor. + residual_channels (int): Number of channels in residual conv. + gate_channels (int): Number of channels in gated conv. + skip_channels (int): Number of channels in skip conv. + aux_channels (int): Number of channels for local conditioning feature. + global_channels (int): Number of channels for global conditioning feature. + dropout_rate (float): Dropout rate. 0.0 means no dropout applied. + bias (bool): Whether to use bias parameter in conv layer. + use_weight_norm (bool): Whether to use weight norm. If set to true, it will + be applied to all of the conv layers. + use_first_conv (bool): Whether to use the first conv layers. + use_last_conv (bool): Whether to use the last conv layers. + scale_residual (bool): Whether to scale the residual outputs. + scale_skip_connect (bool): Whether to scale the skip connection outputs. + + """ + super().__init__() + self.layers = layers + self.stacks = stacks + self.kernel_size = kernel_size + self.base_dilation = base_dilation + self.use_first_conv = use_first_conv + self.use_last_conv = use_last_conv + self.scale_skip_connect = scale_skip_connect + + # check the number of layers and stacks + assert layers % stacks == 0 + layers_per_stack = layers // stacks + + # define first convolution + if self.use_first_conv: + self.first_conv = nn.Conv1D( + in_channels, residual_channels, kernel_size=1, bias_attr=True) + + # define residual blocks + self.conv_layers = nn.LayerList() + for layer in range(layers): + dilation = base_dilation**(layer % layers_per_stack) + conv = ResidualBlock( + kernel_size=kernel_size, + residual_channels=residual_channels, + gate_channels=gate_channels, + skip_channels=skip_channels, + aux_channels=aux_channels, + global_channels=global_channels, + dilation=dilation, + dropout_rate=dropout_rate, + bias=bias, + scale_residual=scale_residual, ) + self.conv_layers.append(conv) + + # define output layers + if self.use_last_conv: + self.last_conv = nn.Sequential( + nn.ReLU(), + nn.Conv1D( + skip_channels, skip_channels, kernel_size=1, + bias_attr=True), + nn.ReLU(), + nn.Conv1D( + skip_channels, out_channels, kernel_size=1, bias_attr=True), + ) + + # apply weight norm + if use_weight_norm: + self.apply_weight_norm() + + def forward( + self, + x: paddle.Tensor, + x_mask: Optional[paddle.Tensor]=None, + c: Optional[paddle.Tensor]=None, + g: Optional[paddle.Tensor]=None, ) -> paddle.Tensor: + """Calculate forward propagation. + + Args: + x (Tensor): Input noise signal (B, 1, T) if use_first_conv else + (B, residual_channels, T). + x_mask (Optional[Tensor]): Mask tensor (B, 1, T). + c (Optional[Tensor]): Local conditioning features (B, aux_channels, T). + g (Optional[Tensor]): Global conditioning features (B, global_channels, 1). + + Returns: + Tensor: Output tensor (B, out_channels, T) if use_last_conv else + (B, residual_channels, T). + + """ + # encode to hidden representation + if self.use_first_conv: + x = self.first_conv(x) + + # residual block + skips = 0.0 + for f in self.conv_layers: + x, h = f(x, x_mask=x_mask, c=c, g=g) + skips = skips + h + x = skips + if self.scale_skip_connect: + x = x * math.sqrt(1.0 / len(self.conv_layers)) + + # apply final layers + if self.use_last_conv: + x = self.last_conv(x) + + return x + + def apply_weight_norm(self): + def _apply_weight_norm(layer): + if isinstance(layer, (nn.Conv1D, nn.Conv2D)): + nn.utils.weight_norm(layer) + + self.apply(_apply_weight_norm) + + def remove_weight_norm(self): + def _remove_weight_norm(layer): + try: + nn.utils.remove_weight_norm(layer) + except ValueError: + pass + + self.apply(_remove_weight_norm) diff --git a/paddlespeech/t2s/modules/losses.py b/paddlespeech/t2s/modules/losses.py index db31bcfbb..fa9e05915 100644 --- a/paddlespeech/t2s/modules/losses.py +++ b/paddlespeech/t2s/modules/losses.py @@ -1006,3 +1006,40 @@ class FeatureMatchLoss(nn.Layer): feat_match_loss /= i + 1 return feat_match_loss + +# loss for VITS +class KLDivergenceLoss(nn.Layer): + """KL divergence loss.""" + + def forward( + self, + z_p: paddle.Tensor, + logs_q: paddle.Tensor, + m_p: paddle.Tensor, + logs_p: paddle.Tensor, + z_mask: paddle.Tensor, + ) -> paddle.Tensor: + """Calculate KL divergence loss. + + Args: + z_p (Tensor): Flow hidden representation (B, H, T_feats). + logs_q (Tensor): Posterior encoder projected scale (B, H, T_feats). + m_p (Tensor): Expanded text encoder projected mean (B, H, T_feats). + logs_p (Tensor): Expanded text encoder projected scale (B, H, T_feats). + z_mask (Tensor): Mask tensor (B, 1, T_feats). + + Returns: + Tensor: KL divergence loss. + + """ + z_p = paddle.cast(z_p, 'float32') + logs_q = paddle.cast(logs_q, 'float32') + m_p = paddle.cast(m_p, 'float32') + logs_p = paddle.cast(logs_p, 'float32') + z_mask = paddle.cast(z_mask, 'float32') + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p) ** 2) * paddle.exp(-2.0 * logs_p) + kl = paddle.sum(kl * z_mask) + loss = kl / paddle.sum(z_mask) + + return loss \ No newline at end of file diff --git a/paddlespeech/t2s/modules/nets_utils.py b/paddlespeech/t2s/modules/nets_utils.py index 4207d316c..598b63164 100644 --- a/paddlespeech/t2s/modules/nets_utils.py +++ b/paddlespeech/t2s/modules/nets_utils.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # Modified from espnet(https://github.com/espnet/espnet) +from typing import Tuple + import paddle from paddle import nn from typeguard import check_argument_types @@ -129,3 +131,66 @@ def initialize(model: nn.Layer, init: str): nn.initializer.Constant()) else: raise ValueError("Unknown initialization: " + init) + + +# for VITS +def get_random_segments( + x: paddle.paddle, + x_lengths: paddle.Tensor, + segment_size: int, ) -> Tuple[paddle.Tensor, paddle.Tensor]: + """Get random segments. + Args: + x (Tensor): Input tensor (B, C, T). + x_lengths (Tensor): Length tensor (B,). + segment_size (int): Segment size. + Returns: + Tensor: Segmented tensor (B, C, segment_size). + Tensor: Start index tensor (B,). + """ + b, c, t = paddle.shape(x) + max_start_idx = x_lengths - segment_size + start_idxs = paddle.cast(paddle.rand([b]) * max_start_idx, 'int64') + segments = get_segments(x, start_idxs, segment_size) + + return segments, start_idxs + + +def get_segments( + x: paddle.Tensor, + start_idxs: paddle.Tensor, + segment_size: int, ) -> paddle.Tensor: + """Get segments. + Args: + x (Tensor): Input tensor (B, C, T). + start_idxs (Tensor): Start index tensor (B,). + segment_size (int): Segment size. + Returns: + Tensor: Segmented tensor (B, C, segment_size). + """ + b, c, t = paddle.shape(x) + segments = paddle.zeros([b, c, segment_size], dtype=x.dtype) + for i, start_idx in enumerate(start_idxs): + segments[i] = x[i, :, start_idx:start_idx + segment_size] + return segments + + +# see https://github.com/PaddlePaddle/X2Paddle/blob/develop/docs/pytorch_project_convertor/API_docs/ops/torch.gather.md +def paddle_gather(x, dim, index): + index_shape = index.shape + index_flatten = index.flatten() + if dim < 0: + dim = len(x.shape) + dim + nd_index = [] + for k in range(len(x.shape)): + if k == dim: + nd_index.append(index_flatten) + else: + reshape_shape = [1] * len(x.shape) + reshape_shape[k] = x.shape[k] + x_arange = paddle.arange(x.shape[k], dtype=index.dtype) + x_arange = x_arange.reshape(reshape_shape) + dim_index = paddle.expand(x_arange, index_shape).flatten() + nd_index.append(dim_index) + ind2 = paddle.transpose(paddle.stack(nd_index), [1, 0]).astype("int64") + paddle_out = paddle.gather_nd(x, ind2).reshape(index_shape) + return paddle_out From 9bf29091d67c8fe3271199869194155a46ec0d34 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 20 May 2022 02:37:01 +0000 Subject: [PATCH 003/127] update readme --- demos/speech_server/README.md | 95 ++++++++++++----------- demos/speech_server/README_cn.md | 127 ++++++++++++++++--------------- 2 files changed, 111 insertions(+), 111 deletions(-) diff --git a/demos/speech_server/README.md b/demos/speech_server/README.md index 5a3de0ccd..a03a43dff 100644 --- a/demos/speech_server/README.md +++ b/demos/speech_server/README.md @@ -257,13 +257,13 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee paddlespeech_client vector --task spk --server_ip 127.0.0.1 --port 8090 --input 85236145389.wav ``` - * Usage: + Usage: ``` bash paddlespeech_client vector --help ``` - * Arguments: + Arguments: * server_ip: server ip. Default: 127.0.0.1 * port: server port. Default: 8090 * input(required): Input text to generate. @@ -271,9 +271,9 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee * enroll: enroll audio * test: test audio - * Output: + Output: - ``` bash + ```bash [2022-05-08 00:18:44,249] [ INFO] - vector http client start [2022-05-08 00:18:44,250] [ INFO] - the input audio: 85236145389.wav [2022-05-08 00:18:44,250] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector @@ -284,19 +284,19 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee * Python API -``` python -from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor + ``` python + from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor -vectorclient_executor = VectorClientExecutor() -res = vectorclient_executor( - input="85236145389.wav", - server_ip="127.0.0.1", - port=8090, - task="spk") -print(res) -``` + vectorclient_executor = VectorClientExecutor() + res = vectorclient_executor( + input="85236145389.wav", + server_ip="127.0.0.1", + port=8090, + task="spk") + print(res) + ``` -* Output: + Output: ``` bash {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} @@ -314,13 +314,13 @@ print(res) paddlespeech_client vector --task score --server_ip 127.0.0.1 --port 8090 --enroll 85236145389.wav --test 123456789.wav ``` - * Usage: + Usage: ``` bash paddlespeech_client vector --help ``` - * Arguments: + Arguments: * server_ip: server ip. Default: 127.0.0.1 * port: server port. Default: 8090 * input(required): Input text to generate. @@ -328,42 +328,41 @@ print(res) * enroll: enroll audio * test: test audio -* Output: - -``` bash - [2022-05-09 10:28:40,556] [ INFO] - vector score http client start - [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score - [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} - [2022-05-09 10:28:40,731] [ INFO] - The vector: None - [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. -``` + Output: + + ``` bash + [2022-05-09 10:28:40,556] [ INFO] - vector score http client start + [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score + [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + [2022-05-09 10:28:40,731] [ INFO] - The vector: None + [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. + ``` * Python API -``` python -from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor - -vectorclient_executor = VectorClientExecutor() -res = vectorclient_executor( - input=None, - enroll_audio="85236145389.wav", - test_audio="123456789.wav", - server_ip="127.0.0.1", - port=8090, - task="score") -print(res) -``` + ``` python + from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor -* Output: + vectorclient_executor = VectorClientExecutor() + res = vectorclient_executor( + input=None, + enroll_audio="85236145389.wav", + test_audio="123456789.wav", + server_ip="127.0.0.1", + port=8090, + task="score") + print(res) + ``` -``` bash -[2022-05-09 10:34:54,769] [ INFO] - vector score http client start -[2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav -[2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score -[2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} -``` + Output: + ``` bash + [2022-05-09 10:34:54,769] [ INFO] - vector score http client start + [2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score + [2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + ``` ### 8. Punctuation prediction @@ -382,7 +381,7 @@ print(res) ```bash paddlespeech_client text --help ``` - 参数: + Arguments: - `server_ip`: server ip. Default: 127.0.0.1 - `port`: server port. Default: 8090 - `input`(required): Input text to get punctuation. diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md index 51b6caa40..d20f7136e 100644 --- a/demos/speech_server/README_cn.md +++ b/demos/speech_server/README_cn.md @@ -3,7 +3,7 @@ # 语音服务 ## 介绍 -这个demo是一个启动离线语音服务和访问服务的实现。它可以通过使用`paddlespeech_server` 和 `paddlespeech_client`的单个命令或 python 的几行代码来实现。 +这个 demo 是一个启动离线语音服务和访问服务的实现。它可以通过使用`paddlespeech_server` 和 `paddlespeech_client`的单个命令或 python 的几行代码来实现。 ## 使用方法 @@ -24,7 +24,7 @@ ASR client 的输入是一个 WAV 文件(`.wav`),并且采样率必须与模型的采样率相同。 -可以下载此 ASR client的示例音频: +可以下载此 ASR client 的示例音频: ```bash wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav ``` @@ -99,7 +99,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ``` 参数: - - `server_ip`: 服务端ip地址,默认: 127.0.0.1。 + - `server_ip`: 服务端 ip 地址,默认: 127.0.0.1。 - `port`: 服务端口,默认: 8090。 - `input`(必须输入): 用于识别的音频文件。 - `sample_rate`: 音频采样率,默认值:16000。 @@ -215,7 +215,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee paddlespeech_client cls --help ``` 参数: - - `server_ip`: 服务端ip地址,默认: 127.0.0.1。 + - `server_ip`: 服务端 ip 地址,默认: 127.0.0.1。 - `port`: 服务端口,默认: 8090。 - `input`(必须输入): 用于分类的音频文件。 - `topk`: 分类结果的topk。 @@ -261,48 +261,48 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee paddlespeech_client vector --task spk --server_ip 127.0.0.1 --port 8090 --input 85236145389.wav ``` -* 使用帮助: + 使用帮助: ``` bash paddlespeech_client vector --help ``` -* 参数: + 参数: * server_ip: 服务端ip地址,默认: 127.0.0.1。 * port: 服务端口,默认: 8090。 * input(必须输入): 用于识别的音频文件。 * task: vector 的任务,可选spk或者score。默认是 spk。 * enroll: 注册音频;。 * test: 测试音频。 -* 输出: + 输出: -``` bash - [2022-05-08 00:18:44,249] [ INFO] - vector http client start - [2022-05-08 00:18:44,250] [ INFO] - the input audio: 85236145389.wav - [2022-05-08 00:18:44,250] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector - [2022-05-08 00:18:44,250] [ INFO] - http://127.0.0.1:8590/paddlespeech/vector - [2022-05-08 00:18:44,406] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} - [2022-05-08 00:18:44,406] [ INFO] - Response time 0.156481 s. -``` + ``` bash + [2022-05-08 00:18:44,249] [ INFO] - vector http client start + [2022-05-08 00:18:44,250] [ INFO] - the input audio: 85236145389.wav + [2022-05-08 00:18:44,250] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector + [2022-05-08 00:18:44,250] [ INFO] - http://127.0.0.1:8590/paddlespeech/vector + [2022-05-08 00:18:44,406] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} + [2022-05-08 00:18:44,406] [ INFO] - Response time 0.156481 s. + ``` * Python API -``` python -from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor + ``` python + from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor -vectorclient_executor = VectorClientExecutor() -res = vectorclient_executor( - input="85236145389.wav", - server_ip="127.0.0.1", - port=8090, - task="spk") -print(res) -``` + vectorclient_executor = VectorClientExecutor() + res = vectorclient_executor( + input="85236145389.wav", + server_ip="127.0.0.1", + port=8090, + task="spk") + print(res) + ``` -* 输出: + 输出: -``` bash - {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} -``` + ``` bash + {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} + ``` #### 7.2 音频声纹打分 @@ -315,47 +315,48 @@ print(res) paddlespeech_client vector --task score --server_ip 127.0.0.1 --port 8090 --enroll 85236145389.wav --test 123456789.wav ``` -* 使用帮助: + 使用帮助: ``` bash paddlespeech_client vector --help ``` -* 参数: + 参数: * server_ip: 服务端ip地址,默认: 127.0.0.1。 * port: 服务端口,默认: 8090。 * input(必须输入): 用于识别的音频文件。 * task: vector 的任务,可选spk或者score。默认是 spk。 * enroll: 注册音频;。 * test: 测试音频。 -* 输出: -``` bash - [2022-05-09 10:28:40,556] [ INFO] - vector score http client start - [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score - [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} - [2022-05-09 10:28:40,731] [ INFO] - The vector: None - [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. -``` + 输出: + + ``` bash + [2022-05-09 10:28:40,556] [ INFO] - vector score http client start + [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score + [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + [2022-05-09 10:28:40,731] [ INFO] - The vector: None + [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. + ``` * Python API -``` python -from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor - -vectorclient_executor = VectorClientExecutor() -res = vectorclient_executor( - input=None, - enroll_audio="85236145389.wav", - test_audio="123456789.wav", - server_ip="127.0.0.1", - port=8090, - task="score") -print(res) -``` + ``` python + from paddlespeech.server.bin.paddlespeech_client import VectorClientExecutor + + vectorclient_executor = VectorClientExecutor() + res = vectorclient_executor( + input=None, + enroll_audio="85236145389.wav", + test_audio="123456789.wav", + server_ip="127.0.0.1", + port=8090, + task="score") + print(res) + ``` -* 输出: +输出: ``` bash [2022-05-09 10:34:54,769] [ INFO] - vector score http client start @@ -411,17 +412,17 @@ print(res) ``` ## 服务支持的模型 -### ASR支持的模型 -通过 `paddlespeech_server stats --task asr` 获取ASR服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 +### ASR 支持的模型 +通过 `paddlespeech_server stats --task asr` 获取 ASR 服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 -### TTS支持的模型 -通过 `paddlespeech_server stats --task tts` 获取TTS服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 +### TTS 支持的模型 +通过 `paddlespeech_server stats --task tts` 获取 TTS 服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 -### CLS支持的模型 -通过 `paddlespeech_server stats --task cls` 获取CLS服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 +### CLS 支持的模型 +通过 `paddlespeech_server stats --task cls` 获取 CLS 服务支持的所有模型,其中静态模型可用于 paddle inference 推理。 -### Vector支持的模型 -通过 `paddlespeech_server stats --task vector` 获取Vector服务支持的所有模型。 +### Vector 支持的模型 +通过 `paddlespeech_server stats --task vector` 获取 Vector 服务支持的所有模型。 ### Text支持的模型 -通过 `paddlespeech_server stats --task text` 获取Text服务支持的所有模型。 +通过 `paddlespeech_server stats --task text` 获取 Text 服务支持的所有模型。 From 16e50edf21d6698c28d416bddd2e4d12606f28ec Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 20 May 2022 02:40:05 +0000 Subject: [PATCH 004/127] update readme --- demos/speech_server/README_cn.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md index d20f7136e..4895b182b 100644 --- a/demos/speech_server/README_cn.md +++ b/demos/speech_server/README_cn.md @@ -198,10 +198,11 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee ``` - ### 6. CLS 客户端使用方法 +### 6. CLS 客户端使用方法 - **注意:** 初次使用客户端时响应时间会略长 - - 命令行 (推荐使用) +**注意:** 初次使用客户端时响应时间会略长 + +- 命令行 (推荐使用) 若 `127.0.0.1` 不能访问,则需要使用实际服务 IP 地址 @@ -356,20 +357,20 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee print(res) ``` -输出: + 输出: -``` bash -[2022-05-09 10:34:54,769] [ INFO] - vector score http client start -[2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav -[2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8590/paddlespeech/vector/score -[2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} -``` + ``` bash + [2022-05-09 10:34:54,769] [ INFO] - vector score http client start + [2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8590/paddlespeech/vector/score + [2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + ``` ### 8. 标点预测 **注意:** 初次使用客户端时响应时间会略长 - - 命令行 (推荐使用) +- 命令行 (推荐使用) 若 `127.0.0.1` 不能访问,则需要使用实际服务 IP 地址 From 8db06444c5f33c2c658de41bca4a4d165d59e8cf Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 20 May 2022 05:58:21 +0000 Subject: [PATCH 005/127] add vits trainer and synthesize --- examples/csmsc/tts3/conf/default.yaml | 4 +- examples/csmsc/vits/conf/default.yaml | 183 +++++++++ examples/csmsc/vits/local/preprocess.sh | 64 ++++ examples/csmsc/vits/local/synthesize.sh | 18 + examples/csmsc/vits/local/synthesize_e2e.sh | 18 + examples/csmsc/vits/local/train.sh | 12 + examples/csmsc/vits/run.sh | 36 ++ examples/csmsc/voc5/README.md | 2 +- paddlespeech/t2s/datasets/am_batch_fn.py | 42 +++ paddlespeech/t2s/datasets/batch.py | 1 - paddlespeech/t2s/datasets/get_feats.py | 2 - .../t2s/exps/fastspeech2/preprocess.py | 58 +-- .../t2s/exps/gan_vocoder/preprocess.py | 46 ++- .../t2s/exps/speedyspeech/preprocess.py | 40 +- paddlespeech/t2s/exps/syn_utils.py | 2 +- paddlespeech/t2s/exps/synthesize.py | 10 +- paddlespeech/t2s/exps/synthesize_e2e.py | 6 +- paddlespeech/t2s/exps/synthesize_streaming.py | 6 +- paddlespeech/t2s/exps/tacotron2/preprocess.py | 42 ++- .../t2s/exps/transformer_tts/preprocess.py | 39 +- paddlespeech/t2s/exps/vits/normalize.py | 154 +++++++- paddlespeech/t2s/exps/vits/preprocess.py | 335 +++++++++++++++++ paddlespeech/t2s/exps/vits/synthesize.py | 104 ++++++ paddlespeech/t2s/exps/vits/synthesize_e2e.py | 146 ++++++++ paddlespeech/t2s/exps/vits/train.py | 248 ++++++++++++ paddlespeech/t2s/exps/voice_cloning.py | 6 +- paddlespeech/t2s/models/__init__.py | 1 + .../parallel_wavegan_updater.py | 2 +- paddlespeech/t2s/models/vits/__init__.py | 2 + paddlespeech/t2s/models/vits/generator.py | 1 - paddlespeech/t2s/models/vits/vits.py | 197 +--------- paddlespeech/t2s/models/vits/vits_updater.py | 353 ++++++++++++++++++ paddlespeech/t2s/training/optimizer.py | 8 + paddlespeech/utils/__init__.py | 13 + paddlespeech/utils/dynamic_import.py | 38 ++ 35 files changed, 1939 insertions(+), 300 deletions(-) create mode 100755 examples/csmsc/vits/local/synthesize_e2e.sh create mode 100644 paddlespeech/t2s/exps/vits/synthesize_e2e.py create mode 100644 paddlespeech/utils/__init__.py create mode 100644 paddlespeech/utils/dynamic_import.py diff --git a/examples/csmsc/tts3/conf/default.yaml b/examples/csmsc/tts3/conf/default.yaml index 2c2a1ea10..08b6f75ba 100644 --- a/examples/csmsc/tts3/conf/default.yaml +++ b/examples/csmsc/tts3/conf/default.yaml @@ -86,8 +86,8 @@ updater: # OPTIMIZER SETTING # ########################################################### optimizer: - optim: adam # optimizer type - learning_rate: 0.001 # learning rate + optim: adam # optimizer type + learning_rate: 0.001 # learning rate ########################################################### # TRAINING SETTING # diff --git a/examples/csmsc/vits/conf/default.yaml b/examples/csmsc/vits/conf/default.yaml index e69de29bb..47af780dc 100644 --- a/examples/csmsc/vits/conf/default.yaml +++ b/examples/csmsc/vits/conf/default.yaml @@ -0,0 +1,183 @@ +# This configuration tested on 4 GPUs (V100) with 32GB GPU +# memory. It takes around 2 weeks to finish the training +# but 100k iters model should generate reasonable results. +########################################################### +# FEATURE EXTRACTION SETTING # +########################################################### + +fs: 22050 # sr +n_fft: 1024 # FFT size (samples). +n_shift: 256 # Hop size (samples). 12.5ms +win_length: null # Window length (samples). 50ms + # If set to null, it will be the same as fft_size. +window: "hann" # Window function. + + +########################################################## +# TTS MODEL SETTING # +########################################################## +model: + # generator related + generator_type: vits_generator + generator_params: + hidden_channels: 192 + spks: -1 + global_channels: -1 + segment_size: 32 + text_encoder_attention_heads: 2 + text_encoder_ffn_expand: 4 + text_encoder_blocks: 6 + text_encoder_positionwise_layer_type: "conv1d" + text_encoder_positionwise_conv_kernel_size: 3 + text_encoder_positional_encoding_layer_type: "rel_pos" + text_encoder_self_attention_layer_type: "rel_selfattn" + text_encoder_activation_type: "swish" + text_encoder_normalize_before: True + text_encoder_dropout_rate: 0.1 + text_encoder_positional_dropout_rate: 0.0 + text_encoder_attention_dropout_rate: 0.1 + use_macaron_style_in_text_encoder: True + use_conformer_conv_in_text_encoder: False + text_encoder_conformer_kernel_size: -1 + decoder_kernel_size: 7 + decoder_channels: 512 + decoder_upsample_scales: [8, 8, 2, 2] + decoder_upsample_kernel_sizes: [16, 16, 4, 4] + decoder_resblock_kernel_sizes: [3, 7, 11] + decoder_resblock_dilations: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + use_weight_norm_in_decoder: True + posterior_encoder_kernel_size: 5 + posterior_encoder_layers: 16 + posterior_encoder_stacks: 1 + posterior_encoder_base_dilation: 1 + posterior_encoder_dropout_rate: 0.0 + use_weight_norm_in_posterior_encoder: True + flow_flows: 4 + flow_kernel_size: 5 + flow_base_dilation: 1 + flow_layers: 4 + flow_dropout_rate: 0.0 + use_weight_norm_in_flow: True + use_only_mean_in_flow: True + stochastic_duration_predictor_kernel_size: 3 + stochastic_duration_predictor_dropout_rate: 0.5 + stochastic_duration_predictor_flows: 4 + stochastic_duration_predictor_dds_conv_layers: 3 + # discriminator related + discriminator_type: hifigan_multi_scale_multi_period_discriminator + discriminator_params: + scales: 1 + scale_downsample_pooling: "AvgPool1D" + scale_downsample_pooling_params: + kernel_size: 4 + stride: 2 + padding: 2 + scale_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [15, 41, 5, 3] + channels: 128 + max_downsample_channels: 1024 + max_groups: 16 + bias: True + downsample_scales: [2, 2, 4, 4, 1] + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + follow_official_norm: False + periods: [2, 3, 5, 7, 11] + period_discriminator_params: + in_channels: 1 + out_channels: 1 + kernel_sizes: [5, 3] + channels: 32 + downsample_scales: [3, 3, 3, 3, 1] + max_downsample_channels: 1024 + bias: True + nonlinear_activation: "leakyrelu" + nonlinear_activation_params: + negative_slope: 0.1 + use_weight_norm: True + use_spectral_norm: False + # others + sampling_rate: 22050 # needed in the inference for saving wav + cache_generator_outputs: True # whether to cache generator outputs in the training + +########################################################### +# LOSS SETTING # +########################################################### +# loss function related +generator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +discriminator_adv_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + loss_type: mse # loss type, "mse" or "hinge" +feat_match_loss_params: + average_by_discriminators: False # whether to average loss value by #discriminators + average_by_layers: False # whether to average loss value by #layers of each discriminator + include_final_outputs: True # whether to include final outputs for loss calculation +mel_loss_params: + fs: 22050 # must be the same as the training data + fft_size: 1024 # fft points + hop_size: 256 # hop size + win_length: null # window length + window: hann # window type + num_mels: 80 # number of Mel basis + fmin: 0 # minimum frequency for Mel basis + fmax: null # maximum frequency for Mel basis + log_base: null # null represent natural log + +########################################################### +# ADVERSARIAL LOSS SETTING # +########################################################### +lambda_adv: 1.0 # loss scaling coefficient for adversarial loss +lambda_mel: 45.0 # loss scaling coefficient for Mel loss +lambda_feat_match: 2.0 # loss scaling coefficient for feat match loss +lambda_dur: 1.0 # loss scaling coefficient for duration loss +lambda_kl: 1.0 # loss scaling coefficient for KL divergence loss +# others +sampling_rate: 22050 # needed in the inference for saving wav +cache_generator_outputs: True # whether to cache generator outputs in the training + + +########################################################### +# DATA LOADER SETTING # +########################################################### +batch_size: 64 # Batch size. +num_workers: 4 # Number of workers in DataLoader. + +########################################################## +# OPTIMIZER & SCHEDULER SETTING # +########################################################## +# optimizer setting for generator +generator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +generator_scheduler: exponential_decay +generator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 + +# optimizer setting for discriminator +discriminator_optimizer_params: + beta1: 0.8 + beta2: 0.99 + epsilon: 1.0e-9 + weight_decay: 0.0 +discriminator_scheduler: exponential_decay +discriminator_scheduler_params: + learning_rate: 2.0e-4 + gamma: 0.999875 +generator_first: False # whether to start updating generator first + +########################################################## +# OTHER TRAINING SETTING # +########################################################## +max_epoch: 1000 # number of epochs +num_snapshots: 10 # max number of snapshots to keep while training +seed: 777 # random seed number diff --git a/examples/csmsc/vits/local/preprocess.sh b/examples/csmsc/vits/local/preprocess.sh index e69de29bb..1d3ae5937 100755 --- a/examples/csmsc/vits/local/preprocess.sh +++ b/examples/csmsc/vits/local/preprocess.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +stage=0 +stop_stage=100 + +config_path=$1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # get durations from MFA's result + echo "Generate durations.txt from MFA results ..." + python3 ${MAIN_ROOT}/utils/gen_duration_from_textgrid.py \ + --inputdir=./baker_alignment_tone \ + --output=durations.txt \ + --config=${config_path} +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # extract features + echo "Extract features ..." + python3 ${BIN_DIR}/preprocess.py \ + --dataset=baker \ + --rootdir=~/datasets/BZNSYP/ \ + --dumpdir=dump \ + --dur-file=durations.txt \ + --config=${config_path} \ + --num-cpu=20 \ + --cut-sil=True +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # get features' stats(mean and std) + echo "Get features' stats ..." + python3 ${MAIN_ROOT}/utils/compute_statistics.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --field-name="feats" +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # normalize and covert phone/speaker to id, dev and test should use train's stats + echo "Normalize ..." + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/train/raw/metadata.jsonl \ + --dumpdir=dump/train/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/dev/raw/metadata.jsonl \ + --dumpdir=dump/dev/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --skip-wav-copy + + python3 ${BIN_DIR}/normalize.py \ + --metadata=dump/test/raw/metadata.jsonl \ + --dumpdir=dump/test/norm \ + --feats-stats=dump/train/feats_stats.npy \ + --phones-dict=dump/phone_id_map.txt \ + --speaker-dict=dump/speaker_id_map.txt \ + --skip-wav-copy +fi diff --git a/examples/csmsc/vits/local/synthesize.sh b/examples/csmsc/vits/local/synthesize.sh index e69de29bb..c15d5f99f 100755 --- a/examples/csmsc/vits/local/synthesize.sh +++ b/examples/csmsc/vits/local/synthesize.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +stage=0 +stop_stage=0 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --phones_dict=dump/phone_id_map.txt \ + --test_metadata=dump/test/norm/metadata.jsonl \ + --output_dir=${train_output_path}/test +fi \ No newline at end of file diff --git a/examples/csmsc/vits/local/synthesize_e2e.sh b/examples/csmsc/vits/local/synthesize_e2e.sh new file mode 100755 index 000000000..edbb07bfc --- /dev/null +++ b/examples/csmsc/vits/local/synthesize_e2e.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 +ckpt_name=$3 +stage=0 +stop_stage=0 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + FLAGS_allocator_strategy=naive_best_fit \ + FLAGS_fraction_of_gpu_memory_to_use=0.01 \ + python3 ${BIN_DIR}/synthesize_e2e.py \ + --config=${config_path} \ + --ckpt=${train_output_path}/checkpoints/${ckpt_name} \ + --phones_dict=dump/phone_id_map.txt \ + --output_dir=${train_output_path}/test_e2e \ + --text=${BIN_DIR}/../sentences.txt +fi diff --git a/examples/csmsc/vits/local/train.sh b/examples/csmsc/vits/local/train.sh index e69de29bb..42fff26ca 100755 --- a/examples/csmsc/vits/local/train.sh +++ b/examples/csmsc/vits/local/train.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +config_path=$1 +train_output_path=$2 + +python3 ${BIN_DIR}/train.py \ + --train-metadata=dump/train/norm/metadata.jsonl \ + --dev-metadata=dump/dev/norm/metadata.jsonl \ + --config=${config_path} \ + --output-dir=${train_output_path} \ + --ngpu=4 \ + --phones-dict=dump/phone_id_map.txt diff --git a/examples/csmsc/vits/run.sh b/examples/csmsc/vits/run.sh index e69de29bb..80e56e7c1 100755 --- a/examples/csmsc/vits/run.sh +++ b/examples/csmsc/vits/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -e +source path.sh + +gpus=0,1 +stage=0 +stop_stage=100 + +conf_path=conf/default.yaml +train_output_path=exp/default +ckpt_name=snapshot_iter_153.pdz + +# with the following command, you can choose the stage range you want to run +# such as `./run.sh --stage 0 --stop-stage 0` +# this can not be mixed use with `$1`, `$2` ... +source ${MAIN_ROOT}/utils/parse_options.sh || exit 1 + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + # prepare data + ./local/preprocess.sh ${conf_path} || exit -1 +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # train model, all `ckpt` under `train_output_path/checkpoints/` dir + CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} || exit -1 +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # synthesize_e2e, vocoder is pwgan + CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} || exit -1 +fi diff --git a/examples/csmsc/voc5/README.md b/examples/csmsc/voc5/README.md index 33e676165..94f93b48b 100644 --- a/examples/csmsc/voc5/README.md +++ b/examples/csmsc/voc5/README.md @@ -130,7 +130,7 @@ HiFiGAN checkpoint contains files listed below. ```text hifigan_csmsc_ckpt_0.1.1 ├── default.yaml # default config used to train hifigan -├── feats_stats.npy # statistics used to normalize spectrogram when training hifigan +├── feats_stats.npy # statistics used to normalize spectrogram when training hifigan └── snapshot_iter_2500000.pdz # generator parameters of hifigan ``` diff --git a/paddlespeech/t2s/datasets/am_batch_fn.py b/paddlespeech/t2s/datasets/am_batch_fn.py index 4e3ad3c12..0b278abaf 100644 --- a/paddlespeech/t2s/datasets/am_batch_fn.py +++ b/paddlespeech/t2s/datasets/am_batch_fn.py @@ -293,3 +293,45 @@ def transformer_single_spk_batch_fn(examples): "speech_lengths": speech_lengths, } return batch + + +def vits_single_spk_batch_fn(examples): + """ + Returns: + Dict[str, Any]: + - text (Tensor): Text index tensor (B, T_text). + - text_lengths (Tensor): Text length tensor (B,). + - feats (Tensor): Feature tensor (B, T_feats, aux_channels). + - feats_lengths (Tensor): Feature length tensor (B,). + - speech (Tensor): Speech waveform tensor (B, T_wav). + + """ + # fields = ["text", "text_lengths", "feats", "feats_lengths", "speech"] + text = [np.array(item["text"], dtype=np.int64) for item in examples] + feats = [np.array(item["feats"], dtype=np.float32) for item in examples] + speech = [np.array(item["wave"], dtype=np.float32) for item in examples] + text_lengths = [ + np.array(item["text_lengths"], dtype=np.int64) for item in examples + ] + feats_lengths = [ + np.array(item["feats_lengths"], dtype=np.int64) for item in examples + ] + + text = batch_sequences(text) + feats = batch_sequences(feats) + speech = batch_sequences(speech) + + # convert each batch to paddle.Tensor + text = paddle.to_tensor(text) + feats = paddle.to_tensor(feats) + text_lengths = paddle.to_tensor(text_lengths) + feats_lengths = paddle.to_tensor(feats_lengths) + + batch = { + "text": text, + "text_lengths": text_lengths, + "feats": feats, + "feats_lengths": feats_lengths, + "speech": speech + } + return batch diff --git a/paddlespeech/t2s/datasets/batch.py b/paddlespeech/t2s/datasets/batch.py index 9d83bbe09..4f21d4470 100644 --- a/paddlespeech/t2s/datasets/batch.py +++ b/paddlespeech/t2s/datasets/batch.py @@ -167,7 +167,6 @@ def batch_spec(minibatch, pad_value=0., time_major=False, dtype=np.float32): def batch_sequences(sequences, axis=0, pad_value=0): - # import pdb; pdb.set_trace() seq = sequences[0] ndim = seq.ndim if axis < 0: diff --git a/paddlespeech/t2s/datasets/get_feats.py b/paddlespeech/t2s/datasets/get_feats.py index e1ca0eeb9..21458f152 100644 --- a/paddlespeech/t2s/datasets/get_feats.py +++ b/paddlespeech/t2s/datasets/get_feats.py @@ -171,7 +171,6 @@ class Pitch(): class Energy(): def __init__(self, - sr: int=24000, n_fft: int=2048, hop_length: int=300, win_length: int=None, @@ -179,7 +178,6 @@ class Energy(): center: bool=True, pad_mode: str="reflect"): - self.sr = sr self.n_fft = n_fft self.win_length = win_length self.hop_length = hop_length diff --git a/paddlespeech/t2s/exps/fastspeech2/preprocess.py b/paddlespeech/t2s/exps/fastspeech2/preprocess.py index db1842b2e..55dc38089 100644 --- a/paddlespeech/t2s/exps/fastspeech2/preprocess.py +++ b/paddlespeech/t2s/exps/fastspeech2/preprocess.py @@ -144,10 +144,17 @@ def process_sentences(config, spk_emb_dir: Path=None): if nprocs == 1: results = [] - for fp in fps: - record = process_sentence(config, fp, sentences, output_dir, - mel_extractor, pitch_extractor, - energy_extractor, cut_sil, spk_emb_dir) + for fp in tqdm.tqdm(fps, total=len(fps)): + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir) if record: results.append(record) else: @@ -322,7 +329,6 @@ def main(): f0min=config.f0min, f0max=config.f0max) energy_extractor = Energy( - sr=config.fs, n_fft=config.n_fft, hop_length=config.n_shift, win_length=config.win_length, @@ -331,36 +337,36 @@ def main(): # process for the 3 sections if train_wav_files: process_sentences( - config, - train_wav_files, - sentences, - train_dump_dir, - mel_extractor, - pitch_extractor, - energy_extractor, + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) if dev_wav_files: process_sentences( - config, - dev_wav_files, - sentences, - dev_dump_dir, - mel_extractor, - pitch_extractor, - energy_extractor, + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) if test_wav_files: process_sentences( - config, - test_wav_files, - sentences, - test_dump_dir, - mel_extractor, - pitch_extractor, - energy_extractor, + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, + pitch_extractor=pitch_extractor, + energy_extractor=energy_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) diff --git a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py index 4871bca71..5a407f5ba 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/preprocess.py +++ b/paddlespeech/t2s/exps/gan_vocoder/preprocess.py @@ -85,15 +85,17 @@ def process_sentence(config: Dict[str, Any], y, (0, num_frames * config.n_shift - y.size), mode="reflect") else: y = y[:num_frames * config.n_shift] - num_sample = y.shape[0] + num_samples = y.shape[0] mel_path = output_dir / (utt_id + "_feats.npy") wav_path = output_dir / (utt_id + "_wave.npy") - np.save(wav_path, y) # (num_samples, ) - np.save(mel_path, logmel) # (num_frames, n_mels) + # (num_samples, ) + np.save(wav_path, y) + # (num_frames, n_mels) + np.save(mel_path, logmel) record = { "utt_id": utt_id, - "num_samples": num_sample, + "num_samples": num_samples, "num_frames": num_frames, "feats": str(mel_path), "wave": str(wav_path), @@ -108,11 +110,17 @@ def process_sentences(config, mel_extractor=None, nprocs: int=1, cut_sil: bool=True): + if nprocs == 1: results = [] for fp in tqdm.tqdm(fps, total=len(fps)): - record = process_sentence(config, fp, sentences, output_dir, - mel_extractor, cut_sil) + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor, + cut_sil=cut_sil) if record: results.append(record) else: @@ -147,7 +155,7 @@ def main(): "--dataset", default="baker", type=str, - help="name of dataset, should in {baker, ljspeech, vctk} now") + help="name of dataset, should in {baker, aishell3, ljspeech, vctk} now") parser.add_argument( "--rootdir", default=None, type=str, help="directory to dataset.") parser.add_argument( @@ -261,28 +269,28 @@ def main(): # process for the 3 sections if train_wav_files: process_sentences( - config, - train_wav_files, - sentences, - train_dump_dir, + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil) if dev_wav_files: process_sentences( - config, - dev_wav_files, - sentences, - dev_dump_dir, + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil) if test_wav_files: process_sentences( - config, - test_wav_files, - sentences, - test_dump_dir, + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil) diff --git a/paddlespeech/t2s/exps/speedyspeech/preprocess.py b/paddlespeech/t2s/exps/speedyspeech/preprocess.py index e833d1394..e8d89a4f5 100644 --- a/paddlespeech/t2s/exps/speedyspeech/preprocess.py +++ b/paddlespeech/t2s/exps/speedyspeech/preprocess.py @@ -123,11 +123,17 @@ def process_sentences(config, nprocs: int=1, cut_sil: bool=True, use_relative_path: bool=False): + if nprocs == 1: results = [] for fp in tqdm.tqdm(fps, total=len(fps)): - record = process_sentence(config, fp, sentences, output_dir, - mel_extractor, cut_sil) + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor, + cut_sil=cut_sil) if record: results.append(record) else: @@ -265,30 +271,30 @@ def main(): # process for the 3 sections if train_wav_files: process_sentences( - config, - train_wav_files, - sentences, - train_dump_dir, - mel_extractor, + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, use_relative_path=args.use_relative_path) if dev_wav_files: process_sentences( - config, - dev_wav_files, - sentences, - dev_dump_dir, - mel_extractor, + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, cut_sil=args.cut_sil, use_relative_path=args.use_relative_path) if test_wav_files: process_sentences( - config, - test_wav_files, - sentences, - test_dump_dir, - mel_extractor, + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, use_relative_path=args.use_relative_path) diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index ce0aee05e..41fd73174 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -27,11 +27,11 @@ from paddle import jit from paddle.static import InputSpec from yacs.config import CfgNode -from paddlespeech.s2t.utils.dynamic_import import dynamic_import from paddlespeech.t2s.datasets.data_table import DataTable from paddlespeech.t2s.frontend import English from paddlespeech.t2s.frontend.zh_frontend import Frontend from paddlespeech.t2s.modules.normalizer import ZScore +from paddlespeech.utils.dynamic_import import dynamic_import model_alias = { # acoustic model diff --git a/paddlespeech/t2s/exps/synthesize.py b/paddlespeech/t2s/exps/synthesize.py index 0855a6a2a..2b543ef7d 100644 --- a/paddlespeech/t2s/exps/synthesize.py +++ b/paddlespeech/t2s/exps/synthesize.py @@ -107,8 +107,8 @@ def evaluate(args): if args.voice_cloning and "spk_emb" in datum: spk_emb = paddle.to_tensor(np.load(datum["spk_emb"])) mel = am_inference(phone_ids, spk_emb=spk_emb) - # vocoder - wav = voc_inference(mel) + # vocoder + wav = voc_inference(mel) wav = wav.numpy() N += wav.size @@ -125,7 +125,7 @@ def evaluate(args): def parse_args(): - # parse args and config and redirect to train_sp + # parse args and config parser = argparse.ArgumentParser( description="Synthesize with acoustic model & vocoder") # acoustic model @@ -143,7 +143,7 @@ def parse_args(): '--am_config', type=str, default=None, - help='Config of acoustic model. Use deault config when it is None.') + help='Config of acoustic model.') parser.add_argument( '--am_ckpt', type=str, @@ -182,7 +182,7 @@ def parse_args(): '--voc_config', type=str, default=None, - help='Config of voc. Use deault config when it is None.') + help='Config of voc.') parser.add_argument( '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') parser.add_argument( diff --git a/paddlespeech/t2s/exps/synthesize_e2e.py b/paddlespeech/t2s/exps/synthesize_e2e.py index 2f14ef564..6101d1593 100644 --- a/paddlespeech/t2s/exps/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/synthesize_e2e.py @@ -159,7 +159,7 @@ def evaluate(args): def parse_args(): - # parse args and config and redirect to train_sp + # parse args and config parser = argparse.ArgumentParser( description="Synthesize with acoustic model & vocoder") # acoustic model @@ -177,7 +177,7 @@ def parse_args(): '--am_config', type=str, default=None, - help='Config of acoustic model. Use deault config when it is None.') + help='Config of acoustic model.') parser.add_argument( '--am_ckpt', type=str, @@ -223,7 +223,7 @@ def parse_args(): '--voc_config', type=str, default=None, - help='Config of voc. Use deault config when it is None.') + help='Config of voc.') parser.add_argument( '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') parser.add_argument( diff --git a/paddlespeech/t2s/exps/synthesize_streaming.py b/paddlespeech/t2s/exps/synthesize_streaming.py index 3659cb490..b11bc799b 100644 --- a/paddlespeech/t2s/exps/synthesize_streaming.py +++ b/paddlespeech/t2s/exps/synthesize_streaming.py @@ -201,7 +201,7 @@ def evaluate(args): def parse_args(): - # parse args and config and redirect to train_sp + # parse args and config parser = argparse.ArgumentParser( description="Synthesize with acoustic model & vocoder") # acoustic model @@ -215,7 +215,7 @@ def parse_args(): '--am_config', type=str, default=None, - help='Config of acoustic model. Use deault config when it is None.') + help='Config of acoustic model.') parser.add_argument( '--am_ckpt', type=str, @@ -248,7 +248,7 @@ def parse_args(): '--voc_config', type=str, default=None, - help='Config of voc. Use deault config when it is None.') + help='Config of voc.') parser.add_argument( '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') parser.add_argument( diff --git a/paddlespeech/t2s/exps/tacotron2/preprocess.py b/paddlespeech/t2s/exps/tacotron2/preprocess.py index 14a0d7eae..fe2a2b26f 100644 --- a/paddlespeech/t2s/exps/tacotron2/preprocess.py +++ b/paddlespeech/t2s/exps/tacotron2/preprocess.py @@ -122,9 +122,15 @@ def process_sentences(config, spk_emb_dir: Path=None): if nprocs == 1: results = [] - for fp in fps: - record = process_sentence(config, fp, sentences, output_dir, - mel_extractor, cut_sil, spk_emb_dir) + for fp in tqdm.tqdm(fps, total=len(fps)): + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir) if record: results.append(record) else: @@ -296,30 +302,30 @@ def main(): # process for the 3 sections if train_wav_files: process_sentences( - config, - train_wav_files, - sentences, - train_dump_dir, - mel_extractor, + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) if dev_wav_files: process_sentences( - config, - dev_wav_files, - sentences, - dev_dump_dir, - mel_extractor, + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) if test_wav_files: process_sentences( - config, - test_wav_files, - sentences, - test_dump_dir, - mel_extractor, + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu, cut_sil=args.cut_sil, spk_emb_dir=spk_emb_dir) diff --git a/paddlespeech/t2s/exps/transformer_tts/preprocess.py b/paddlespeech/t2s/exps/transformer_tts/preprocess.py index 9aa87e91a..28ca3de6e 100644 --- a/paddlespeech/t2s/exps/transformer_tts/preprocess.py +++ b/paddlespeech/t2s/exps/transformer_tts/preprocess.py @@ -125,11 +125,16 @@ def process_sentences(config, output_dir: Path, mel_extractor=None, nprocs: int=1): + if nprocs == 1: results = [] for fp in tqdm.tqdm(fps, total=len(fps)): - record = process_sentence(config, fp, sentences, output_dir, - mel_extractor) + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + mel_extractor=mel_extractor) if record: results.append(record) else: @@ -247,27 +252,27 @@ def main(): # process for the 3 sections if train_wav_files: process_sentences( - config, - train_wav_files, - sentences, - train_dump_dir, - mel_extractor, + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu) if dev_wav_files: process_sentences( - config, - dev_wav_files, - sentences, - dev_dump_dir, - mel_extractor, + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu) if test_wav_files: process_sentences( - config, - test_wav_files, - sentences, - test_dump_dir, - mel_extractor, + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + mel_extractor=mel_extractor, nprocs=args.num_cpu) diff --git a/paddlespeech/t2s/exps/vits/normalize.py b/paddlespeech/t2s/exps/vits/normalize.py index 97043fd7b..6fc8adb06 100644 --- a/paddlespeech/t2s/exps/vits/normalize.py +++ b/paddlespeech/t2s/exps/vits/normalize.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +11,155 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Normalize feature files and dump them.""" +import argparse +import logging +from operator import itemgetter +from pathlib import Path + +import jsonlines +import numpy as np +from sklearn.preprocessing import StandardScaler +from tqdm import tqdm + +from paddlespeech.t2s.datasets.data_table import DataTable + + +def main(): + """Run preprocessing process.""" + parser = argparse.ArgumentParser( + description="Normalize dumped raw features (See detail in parallel_wavegan/bin/normalize.py)." + ) + parser.add_argument( + "--metadata", + type=str, + required=True, + help="directory including feature files to be normalized. " + "you need to specify either *-scp or rootdir.") + + parser.add_argument( + "--dumpdir", + type=str, + required=True, + help="directory to dump normalized feature files.") + parser.add_argument( + "--feats-stats", + type=str, + required=True, + help="speech statistics file.") + parser.add_argument( + "--skip-wav-copy", + default=False, + action="store_true", + help="whether to skip the copy of wav files.") + + parser.add_argument( + "--phones-dict", type=str, default=None, help="phone vocabulary file.") + parser.add_argument( + "--speaker-dict", type=str, default=None, help="speaker id map file.") + parser.add_argument( + "--verbose", + type=int, + default=1, + help="logging level. higher is more logging. (default=1)") + args = parser.parse_args() + + # set logger + if args.verbose > 1: + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" + ) + elif args.verbose > 0: + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" + ) + else: + logging.basicConfig( + level=logging.WARN, + format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" + ) + logging.warning('Skip DEBUG/INFO messages') + + dumpdir = Path(args.dumpdir).expanduser() + # use absolute path + dumpdir = dumpdir.resolve() + dumpdir.mkdir(parents=True, exist_ok=True) + + # get dataset + with jsonlines.open(args.metadata, 'r') as reader: + metadata = list(reader) + dataset = DataTable( + metadata, + converters={ + "feats": np.load, + "wave": None if args.skip_wav_copy else np.load, + }) + logging.info(f"The number of files = {len(dataset)}.") + + # restore scaler + feats_scaler = StandardScaler() + feats_scaler.mean_ = np.load(args.feats_stats)[0] + feats_scaler.scale_ = np.load(args.feats_stats)[1] + feats_scaler.n_features_in_ = feats_scaler.mean_.shape[0] + + vocab_phones = {} + with open(args.phones_dict, 'rt') as f: + phn_id = [line.strip().split() for line in f.readlines()] + for phn, id in phn_id: + vocab_phones[phn] = int(id) + + vocab_speaker = {} + with open(args.speaker_dict, 'rt') as f: + spk_id = [line.strip().split() for line in f.readlines()] + for spk, id in spk_id: + vocab_speaker[spk] = int(id) + + # process each file + output_metadata = [] + + for item in tqdm(dataset): + utt_id = item['utt_id'] + feats = item['feats'] + wave = item['wave'] + + # normalize + feats = feats_scaler.transform(feats) + feats_path = dumpdir / f"{utt_id}_feats.npy" + np.save(feats_path, feats.astype(np.float32), allow_pickle=False) + + if not args.skip_wav_copy: + wav_path = dumpdir / f"{utt_id}_wave.npy" + np.save(wav_path, wave.astype(np.float32), allow_pickle=False) + else: + wav_path = wave + + phone_ids = [vocab_phones[p] for p in item['phones']] + spk_id = vocab_speaker[item["speaker"]] + + record = { + "utt_id": item['utt_id'], + "text": phone_ids, + "text_lengths": item['text_lengths'], + 'feats': str(feats_path), + "feats_lengths": item['feats_lengths'], + "wave": str(wav_path), + "spk_id": spk_id, + } + + # add spk_emb for voice cloning + if "spk_emb" in item: + record["spk_emb"] = str(item["spk_emb"]) + + output_metadata.append(record) + output_metadata.sort(key=itemgetter('utt_id')) + output_metadata_path = Path(args.dumpdir) / "metadata.jsonl" + with jsonlines.open(output_metadata_path, 'w') as writer: + for item in output_metadata: + writer.write(item) + logging.info(f"metadata dumped into {output_metadata_path}") + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/vits/preprocess.py b/paddlespeech/t2s/exps/vits/preprocess.py index 97043fd7b..6aa139fb5 100644 --- a/paddlespeech/t2s/exps/vits/preprocess.py +++ b/paddlespeech/t2s/exps/vits/preprocess.py @@ -11,3 +11,338 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import os +from concurrent.futures import ThreadPoolExecutor +from operator import itemgetter +from pathlib import Path +from typing import Any +from typing import Dict +from typing import List + +import jsonlines +import librosa +import numpy as np +import tqdm +import yaml +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.get_feats import LinearSpectrogram +from paddlespeech.t2s.datasets.preprocess_utils import compare_duration_and_mel_length +from paddlespeech.t2s.datasets.preprocess_utils import get_input_token +from paddlespeech.t2s.datasets.preprocess_utils import get_phn_dur +from paddlespeech.t2s.datasets.preprocess_utils import get_spk_id_map +from paddlespeech.t2s.datasets.preprocess_utils import merge_silence +from paddlespeech.t2s.utils import str2bool + + +def process_sentence(config: Dict[str, Any], + fp: Path, + sentences: Dict, + output_dir: Path, + spec_extractor=None, + cut_sil: bool=True, + spk_emb_dir: Path=None): + utt_id = fp.stem + # for vctk + if utt_id.endswith("_mic2"): + utt_id = utt_id[:-5] + record = None + if utt_id in sentences: + # reading, resampling may occur + wav, _ = librosa.load(str(fp), sr=config.fs) + if len(wav.shape) != 1: + return record + max_value = np.abs(wav).max() + if max_value > 1.0: + wav = wav / max_value + assert len(wav.shape) == 1, f"{utt_id} is not a mono-channel audio." + assert np.abs(wav).max( + ) <= 1.0, f"{utt_id} is seems to be different that 16 bit PCM." + phones = sentences[utt_id][0] + durations = sentences[utt_id][1] + speaker = sentences[utt_id][2] + d_cumsum = np.pad(np.array(durations).cumsum(0), (1, 0), 'constant') + # little imprecise than use *.TextGrid directly + times = librosa.frames_to_time( + d_cumsum, sr=config.fs, hop_length=config.n_shift) + if cut_sil: + start = 0 + end = d_cumsum[-1] + if phones[0] == "sil" and len(durations) > 1: + start = times[1] + durations = durations[1:] + phones = phones[1:] + if phones[-1] == 'sil' and len(durations) > 1: + end = times[-2] + durations = durations[:-1] + phones = phones[:-1] + sentences[utt_id][0] = phones + sentences[utt_id][1] = durations + start, end = librosa.time_to_samples([start, end], sr=config.fs) + wav = wav[start:end] + # extract mel feats + spec = spec_extractor.get_linear_spectrogram(wav) + # change duration according to mel_length + compare_duration_and_mel_length(sentences, utt_id, spec) + # utt_id may be popped in compare_duration_and_mel_length + if utt_id not in sentences: + return None + phones = sentences[utt_id][0] + durations = sentences[utt_id][1] + num_frames = spec.shape[0] + assert sum(durations) == num_frames + + if wav.size < num_frames * config.n_shift: + wav = np.pad( + wav, (0, num_frames * config.n_shift - wav.size), + mode="reflect") + else: + wav = wav[:num_frames * config.n_shift] + num_samples = wav.shape[0] + + spec_path = output_dir / (utt_id + "_feats.npy") + wav_path = output_dir / (utt_id + "_wave.npy") + # (num_samples, ) + np.save(wav_path, wav) + # (num_frames, aux_channels) + np.save(spec_path, spec) + + record = { + "utt_id": utt_id, + "phones": phones, + "text_lengths": len(phones), + "feats": str(spec_path), + "feats_lengths": num_frames, + "wave": str(wav_path), + "speaker": speaker + } + if spk_emb_dir: + if speaker in os.listdir(spk_emb_dir): + embed_name = utt_id + ".npy" + embed_path = spk_emb_dir / speaker / embed_name + if embed_path.is_file(): + record["spk_emb"] = str(embed_path) + else: + return None + return record + + +def process_sentences(config, + fps: List[Path], + sentences: Dict, + output_dir: Path, + spec_extractor=None, + nprocs: int=1, + cut_sil: bool=True, + spk_emb_dir: Path=None): + if nprocs == 1: + results = [] + for fp in tqdm.tqdm(fps, total=len(fps)): + record = process_sentence( + config=config, + fp=fp, + sentences=sentences, + output_dir=output_dir, + spec_extractor=spec_extractor, + cut_sil=cut_sil, + spk_emb_dir=spk_emb_dir) + if record: + results.append(record) + else: + with ThreadPoolExecutor(nprocs) as pool: + futures = [] + with tqdm.tqdm(total=len(fps)) as progress: + for fp in fps: + future = pool.submit(process_sentence, config, fp, + sentences, output_dir, spec_extractor, + cut_sil, spk_emb_dir) + future.add_done_callback(lambda p: progress.update()) + futures.append(future) + + results = [] + for ft in futures: + record = ft.result() + if record: + results.append(record) + + results.sort(key=itemgetter("utt_id")) + with jsonlines.open(output_dir / "metadata.jsonl", 'w') as writer: + for item in results: + writer.write(item) + print("Done") + + +def main(): + # parse config and args + parser = argparse.ArgumentParser( + description="Preprocess audio and then extract features.") + + parser.add_argument( + "--dataset", + default="baker", + type=str, + help="name of dataset, should in {baker, aishell3, ljspeech, vctk} now") + + parser.add_argument( + "--rootdir", default=None, type=str, help="directory to dataset.") + + parser.add_argument( + "--dumpdir", + type=str, + required=True, + help="directory to dump feature files.") + parser.add_argument( + "--dur-file", default=None, type=str, help="path to durations.txt.") + + parser.add_argument("--config", type=str, help="fastspeech2 config file.") + + parser.add_argument( + "--verbose", + type=int, + default=1, + help="logging level. higher is more logging. (default=1)") + parser.add_argument( + "--num-cpu", type=int, default=1, help="number of process.") + + parser.add_argument( + "--cut-sil", + type=str2bool, + default=True, + help="whether cut sil in the edge of audio") + + parser.add_argument( + "--spk_emb_dir", + default=None, + type=str, + help="directory to speaker embedding files.") + args = parser.parse_args() + + rootdir = Path(args.rootdir).expanduser() + dumpdir = Path(args.dumpdir).expanduser() + # use absolute path + dumpdir = dumpdir.resolve() + dumpdir.mkdir(parents=True, exist_ok=True) + dur_file = Path(args.dur_file).expanduser() + + if args.spk_emb_dir: + spk_emb_dir = Path(args.spk_emb_dir).expanduser().resolve() + else: + spk_emb_dir = None + + assert rootdir.is_dir() + assert dur_file.is_file() + + with open(args.config, 'rt') as f: + config = CfgNode(yaml.safe_load(f)) + + if args.verbose > 1: + print(vars(args)) + print(config) + + sentences, speaker_set = get_phn_dur(dur_file) + + merge_silence(sentences) + phone_id_map_path = dumpdir / "phone_id_map.txt" + speaker_id_map_path = dumpdir / "speaker_id_map.txt" + get_input_token(sentences, phone_id_map_path, args.dataset) + get_spk_id_map(speaker_set, speaker_id_map_path) + + if args.dataset == "baker": + wav_files = sorted(list((rootdir / "Wave").rglob("*.wav"))) + # split data into 3 sections + num_train = 9800 + num_dev = 100 + train_wav_files = wav_files[:num_train] + dev_wav_files = wav_files[num_train:num_train + num_dev] + test_wav_files = wav_files[num_train + num_dev:] + elif args.dataset == "aishell3": + sub_num_dev = 5 + wav_dir = rootdir / "train" / "wav" + train_wav_files = [] + dev_wav_files = [] + test_wav_files = [] + for speaker in os.listdir(wav_dir): + wav_files = sorted(list((wav_dir / speaker).rglob("*.wav"))) + if len(wav_files) > 100: + train_wav_files += wav_files[:-sub_num_dev * 2] + dev_wav_files += wav_files[-sub_num_dev * 2:-sub_num_dev] + test_wav_files += wav_files[-sub_num_dev:] + else: + train_wav_files += wav_files + + elif args.dataset == "ljspeech": + wav_files = sorted(list((rootdir / "wavs").rglob("*.wav"))) + # split data into 3 sections + num_train = 12900 + num_dev = 100 + train_wav_files = wav_files[:num_train] + dev_wav_files = wav_files[num_train:num_train + num_dev] + test_wav_files = wav_files[num_train + num_dev:] + elif args.dataset == "vctk": + sub_num_dev = 5 + wav_dir = rootdir / "wav48_silence_trimmed" + train_wav_files = [] + dev_wav_files = [] + test_wav_files = [] + for speaker in os.listdir(wav_dir): + wav_files = sorted(list((wav_dir / speaker).rglob("*_mic2.flac"))) + if len(wav_files) > 100: + train_wav_files += wav_files[:-sub_num_dev * 2] + dev_wav_files += wav_files[-sub_num_dev * 2:-sub_num_dev] + test_wav_files += wav_files[-sub_num_dev:] + else: + train_wav_files += wav_files + + else: + print("dataset should in {baker, aishell3, ljspeech, vctk} now!") + + train_dump_dir = dumpdir / "train" / "raw" + train_dump_dir.mkdir(parents=True, exist_ok=True) + dev_dump_dir = dumpdir / "dev" / "raw" + dev_dump_dir.mkdir(parents=True, exist_ok=True) + test_dump_dir = dumpdir / "test" / "raw" + test_dump_dir.mkdir(parents=True, exist_ok=True) + + # Extractor + + spec_extractor = LinearSpectrogram( + n_fft=config.n_fft, + hop_length=config.n_shift, + win_length=config.win_length, + window=config.window) + + # process for the 3 sections + if train_wav_files: + process_sentences( + config=config, + fps=train_wav_files, + sentences=sentences, + output_dir=train_dump_dir, + spec_extractor=spec_extractor, + nprocs=args.num_cpu, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir) + if dev_wav_files: + process_sentences( + config=config, + fps=dev_wav_files, + sentences=sentences, + output_dir=dev_dump_dir, + spec_extractor=spec_extractor, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir) + if test_wav_files: + process_sentences( + config=config, + fps=test_wav_files, + sentences=sentences, + output_dir=test_dump_dir, + spec_extractor=spec_extractor, + nprocs=args.num_cpu, + cut_sil=args.cut_sil, + spk_emb_dir=spk_emb_dir) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/vits/synthesize.py b/paddlespeech/t2s/exps/vits/synthesize.py index 97043fd7b..074b890f9 100644 --- a/paddlespeech/t2s/exps/vits/synthesize.py +++ b/paddlespeech/t2s/exps/vits/synthesize.py @@ -11,3 +11,107 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import argparse +from pathlib import Path + +import jsonlines +import paddle +import soundfile as sf +import yaml +from timer import timer +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.models.vits import VITS + + +def evaluate(args): + + # construct dataset for evaluation + with jsonlines.open(args.test_metadata, 'r') as reader: + test_metadata = list(reader) + # Init body. + with open(args.config) as f: + config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(config) + + fields = ["utt_id", "text"] + + test_dataset = DataTable(data=test_metadata, fields=fields) + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + odim = config.n_fft // 2 + 1 + + vits = VITS(idim=vocab_size, odim=odim, **config["model"]) + vits.set_state_dict(paddle.load(args.ckpt)["main_params"]) + vits.eval() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + N = 0 + T = 0 + + for datum in test_dataset: + utt_id = datum["utt_id"] + phone_ids = paddle.to_tensor(datum["text"]) + with timer() as t: + with paddle.no_grad(): + out = vits.inference(text=phone_ids) + wav = out["wav"] + wav = wav.numpy() + N += wav.size + T += t.elapse + speed = wav.size / t.elapse + rtf = config.fs / speed + print( + f"{utt_id}, wave: {wav.size}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}." + ) + sf.write(str(output_dir / (utt_id + ".wav")), wav, samplerate=config.fs) + print(f"{utt_id} done!") + print(f"generation speed: {N / T}Hz, RTF: {config.fs / (N / T) }") + + +def parse_args(): + # parse args and config + parser = argparse.ArgumentParser(description="Synthesize with VITS") + # model + parser.add_argument( + '--config', type=str, default=None, help='Config of VITS.') + parser.add_argument( + '--ckpt', type=str, default=None, help='Checkpoint file of VITS.') + parser.add_argument( + "--phones_dict", type=str, default=None, help="phone vocabulary file.") + # other + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") + parser.add_argument("--test_metadata", type=str, help="test metadata.") + parser.add_argument("--output_dir", type=str, help="output dir.") + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.ngpu == 0: + paddle.set_device("cpu") + elif args.ngpu > 0: + paddle.set_device("gpu") + else: + print("ngpu should >= 0 !") + + evaluate(args) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/vits/synthesize_e2e.py b/paddlespeech/t2s/exps/vits/synthesize_e2e.py new file mode 100644 index 000000000..c82e5c039 --- /dev/null +++ b/paddlespeech/t2s/exps/vits/synthesize_e2e.py @@ -0,0 +1,146 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +from pathlib import Path + +import paddle +import soundfile as sf +import yaml +from timer import timer +from yacs.config import CfgNode + +from paddlespeech.t2s.exps.syn_utils import get_frontend +from paddlespeech.t2s.exps.syn_utils import get_sentences +from paddlespeech.t2s.models.vits import VITS + + +def evaluate(args): + + # Init body. + with open(args.config) as f: + config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(config) + + sentences = get_sentences(text_file=args.text, lang=args.lang) + + # frontend + frontend = get_frontend(lang=args.lang, phones_dict=args.phones_dict) + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + odim = config.n_fft // 2 + 1 + + vits = VITS(idim=vocab_size, odim=odim, **config["model"]) + vits.set_state_dict(paddle.load(args.ckpt)["main_params"]) + vits.eval() + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + merge_sentences = False + + N = 0 + T = 0 + for utt_id, sentence in sentences: + with timer() as t: + if args.lang == 'zh': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences) + phone_ids = input_ids["phone_ids"] + elif args.lang == 'en': + input_ids = frontend.get_input_ids( + sentence, merge_sentences=merge_sentences) + phone_ids = input_ids["phone_ids"] + else: + print("lang should in {'zh', 'en'}!") + with paddle.no_grad(): + flags = 0 + for i in range(len(phone_ids)): + part_phone_ids = phone_ids[i] + out = vits.inference(text=part_phone_ids) + wav = out["wav"] + if flags == 0: + wav_all = wav + flags = 1 + else: + wav_all = paddle.concat([wav_all, wav]) + wav = wav_all.numpy() + N += wav.size + T += t.elapse + speed = wav.size / t.elapse + rtf = config.fs / speed + print( + f"{utt_id}, wave: {wav.shape}, time: {t.elapse}s, Hz: {speed}, RTF: {rtf}." + ) + sf.write(str(output_dir / (utt_id + ".wav")), wav, samplerate=config.fs) + print(f"{utt_id} done!") + print(f"generation speed: {N / T}Hz, RTF: {config.fs / (N / T) }") + + +def parse_args(): + # parse args and config + parser = argparse.ArgumentParser(description="Synthesize with VITS") + + # model + parser.add_argument( + '--config', type=str, default=None, help='Config of VITS.') + parser.add_argument( + '--ckpt', type=str, default=None, help='Checkpoint file of VITS.') + parser.add_argument( + "--phones_dict", type=str, default=None, help="phone vocabulary file.") + # other + parser.add_argument( + '--lang', + type=str, + default='zh', + help='Choose model language. zh or en') + + parser.add_argument( + "--inference_dir", + type=str, + default=None, + help="dir to save inference models") + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") + parser.add_argument( + "--text", + type=str, + help="text to synthesize, a 'utt_id sentence' pair per line.") + parser.add_argument("--output_dir", type=str, help="output dir.") + + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.ngpu == 0: + paddle.set_device("cpu") + elif args.ngpu > 0: + paddle.set_device("gpu") + else: + print("ngpu should >= 0 !") + + evaluate(args) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py index 97043fd7b..b921f92af 100644 --- a/paddlespeech/t2s/exps/vits/train.py +++ b/paddlespeech/t2s/exps/vits/train.py @@ -11,3 +11,251 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import logging +import os +import shutil +from pathlib import Path + +import jsonlines +import numpy as np +import paddle +import yaml +from paddle import DataParallel +from paddle import distributed as dist +from paddle.io import DataLoader +from paddle.io import DistributedBatchSampler +from paddle.optimizer import Adam +from yacs.config import CfgNode + +from paddlespeech.t2s.datasets.am_batch_fn import vits_single_spk_batch_fn +from paddlespeech.t2s.datasets.data_table import DataTable +from paddlespeech.t2s.models.vits import VITS +from paddlespeech.t2s.models.vits import VITSEvaluator +from paddlespeech.t2s.models.vits import VITSUpdater +from paddlespeech.t2s.modules.losses import DiscriminatorAdversarialLoss +from paddlespeech.t2s.modules.losses import FeatureMatchLoss +from paddlespeech.t2s.modules.losses import GeneratorAdversarialLoss +from paddlespeech.t2s.modules.losses import KLDivergenceLoss +from paddlespeech.t2s.modules.losses import MelSpectrogramLoss +from paddlespeech.t2s.training.extensions.snapshot import Snapshot +from paddlespeech.t2s.training.extensions.visualizer import VisualDL +from paddlespeech.t2s.training.optimizer import scheduler_classes +from paddlespeech.t2s.training.seeding import seed_everything +from paddlespeech.t2s.training.trainer import Trainer + + +def train_sp(args, config): + # decides device type and whether to run in parallel + # setup running environment correctly + world_size = paddle.distributed.get_world_size() + if (not paddle.is_compiled_with_cuda()) or args.ngpu == 0: + paddle.set_device("cpu") + else: + paddle.set_device("gpu") + if world_size > 1: + paddle.distributed.init_parallel_env() + + # set the random seed, it is a must for multiprocess training + seed_everything(config.seed) + + print( + f"rank: {dist.get_rank()}, pid: {os.getpid()}, parent_pid: {os.getppid()}", + ) + + # dataloader has been too verbose + logging.getLogger("DataLoader").disabled = True + + fields = ["text", "text_lengths", "feats", "feats_lengths", "wave"] + + converters = { + "wave": np.load, + "feats": np.load, + } + + # construct dataset for training and validation + with jsonlines.open(args.train_metadata, 'r') as reader: + train_metadata = list(reader) + train_dataset = DataTable( + data=train_metadata, + fields=fields, + converters=converters, ) + with jsonlines.open(args.dev_metadata, 'r') as reader: + dev_metadata = list(reader) + dev_dataset = DataTable( + data=dev_metadata, + fields=fields, + converters=converters, ) + + # collate function and dataloader + train_sampler = DistributedBatchSampler( + train_dataset, + batch_size=config.batch_size, + shuffle=True, + drop_last=True) + dev_sampler = DistributedBatchSampler( + dev_dataset, + batch_size=config.batch_size, + shuffle=False, + drop_last=False) + print("samplers done!") + + train_batch_fn = vits_single_spk_batch_fn + + train_dataloader = DataLoader( + train_dataset, + batch_sampler=train_sampler, + collate_fn=train_batch_fn, + num_workers=config.num_workers) + + dev_dataloader = DataLoader( + dev_dataset, + batch_sampler=dev_sampler, + collate_fn=train_batch_fn, + num_workers=config.num_workers) + print("dataloaders done!") + + with open(args.phones_dict, "r") as f: + phn_id = [line.strip().split() for line in f.readlines()] + vocab_size = len(phn_id) + print("vocab_size:", vocab_size) + + odim = config.n_fft // 2 + 1 + model = VITS(idim=vocab_size, odim=odim, **config["model"]) + gen_parameters = model.generator.parameters() + dis_parameters = model.discriminator.parameters() + if world_size > 1: + model = DataParallel(model) + gen_parameters = model._layers.generator.parameters() + dis_parameters = model._layers.discriminator.parameters() + + print("model done!") + + # loss + criterion_mel = MelSpectrogramLoss( + **config["mel_loss_params"], ) + criterion_feat_match = FeatureMatchLoss( + **config["feat_match_loss_params"], ) + criterion_gen_adv = GeneratorAdversarialLoss( + **config["generator_adv_loss_params"], ) + criterion_dis_adv = DiscriminatorAdversarialLoss( + **config["discriminator_adv_loss_params"], ) + criterion_kl = KLDivergenceLoss() + + print("criterions done!") + + lr_schedule_g = scheduler_classes[config["generator_scheduler"]]( + **config["generator_scheduler_params"]) + optimizer_g = Adam( + learning_rate=lr_schedule_g, + parameters=gen_parameters, + **config["generator_optimizer_params"]) + + lr_schedule_d = scheduler_classes[config["discriminator_scheduler"]]( + **config["discriminator_scheduler_params"]) + optimizer_d = Adam( + learning_rate=lr_schedule_d, + parameters=dis_parameters, + **config["discriminator_optimizer_params"]) + + print("optimizers done!") + + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + if dist.get_rank() == 0: + config_name = args.config.split("/")[-1] + # copy conf to output_dir + shutil.copyfile(args.config, output_dir / config_name) + + updater = VITSUpdater( + model=model, + optimizers={ + "generator": optimizer_g, + "discriminator": optimizer_d, + }, + criterions={ + "mel": criterion_mel, + "feat_match": criterion_feat_match, + "gen_adv": criterion_gen_adv, + "dis_adv": criterion_dis_adv, + "kl": criterion_kl, + }, + schedulers={ + "generator": lr_schedule_g, + "discriminator": lr_schedule_d, + }, + dataloader=train_dataloader, + lambda_adv=config.lambda_adv, + lambda_mel=config.lambda_mel, + lambda_kl=config.lambda_kl, + lambda_feat_match=config.lambda_feat_match, + lambda_dur=config.lambda_dur, + generator_first=config.generator_first, + output_dir=output_dir) + + evaluator = VITSEvaluator( + model=model, + criterions={ + "mel": criterion_mel, + "feat_match": criterion_feat_match, + "gen_adv": criterion_gen_adv, + "dis_adv": criterion_dis_adv, + "kl": criterion_kl, + }, + dataloader=dev_dataloader, + lambda_adv=config.lambda_adv, + lambda_mel=config.lambda_mel, + lambda_kl=config.lambda_kl, + lambda_feat_match=config.lambda_feat_match, + lambda_dur=config.lambda_dur, + generator_first=config.generator_first, + output_dir=output_dir) + + trainer = Trainer(updater, (config.max_epoch, 'epoch'), output_dir) + + if dist.get_rank() == 0: + trainer.extend(evaluator, trigger=(1, "epoch")) + trainer.extend(VisualDL(output_dir), trigger=(1, "iteration")) + trainer.extend( + Snapshot(max_size=config.num_snapshots), trigger=(1, 'epoch')) + + print("Trainer Done!") + trainer.run() + + +def main(): + # parse args and config and redirect to train_sp + + parser = argparse.ArgumentParser(description="Train a HiFiGAN model.") + parser.add_argument( + "--config", type=str, help="config file to overwrite default config.") + parser.add_argument("--train-metadata", type=str, help="training data.") + parser.add_argument("--dev-metadata", type=str, help="dev data.") + parser.add_argument("--output-dir", type=str, help="output dir.") + parser.add_argument( + "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu.") + parser.add_argument( + "--phones-dict", type=str, default=None, help="phone vocabulary file.") + + args = parser.parse_args() + + with open(args.config, 'rt') as f: + config = CfgNode(yaml.safe_load(f)) + + print("========Args========") + print(yaml.safe_dump(vars(args))) + print("========Config========") + print(config) + print( + f"master see the word size: {dist.get_world_size()}, from pid: {os.getpid()}" + ) + + # dispatch + if args.ngpu > 1: + dist.spawn(train_sp, (args, config), nprocs=args.ngpu) + else: + train_sp(args, config) + + +if __name__ == "__main__": + main() diff --git a/paddlespeech/t2s/exps/voice_cloning.py b/paddlespeech/t2s/exps/voice_cloning.py index 9257b07de..3cf1cabcf 100644 --- a/paddlespeech/t2s/exps/voice_cloning.py +++ b/paddlespeech/t2s/exps/voice_cloning.py @@ -122,7 +122,7 @@ def voice_cloning(args): def parse_args(): - # parse args and config and redirect to train_sp + # parse args and config parser = argparse.ArgumentParser(description="") parser.add_argument( '--am', @@ -134,7 +134,7 @@ def parse_args(): '--am_config', type=str, default=None, - help='Config of acoustic model. Use deault config when it is None.') + help='Config of acoustic model.') parser.add_argument( '--am_ckpt', type=str, @@ -163,7 +163,7 @@ def parse_args(): '--voc_config', type=str, default=None, - help='Config of voc. Use deault config when it is None.') + help='Config of voc.') parser.add_argument( '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') parser.add_argument( diff --git a/paddlespeech/t2s/models/__init__.py b/paddlespeech/t2s/models/__init__.py index 41be7c1db..0b6f29119 100644 --- a/paddlespeech/t2s/models/__init__.py +++ b/paddlespeech/t2s/models/__init__.py @@ -18,5 +18,6 @@ from .parallel_wavegan import * from .speedyspeech import * from .tacotron2 import * from .transformer_tts import * +from .vits import * from .waveflow import * from .wavernn import * diff --git a/paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py b/paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py index 40cfff5a5..c1cd73308 100644 --- a/paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py +++ b/paddlespeech/t2s/models/parallel_wavegan/parallel_wavegan_updater.py @@ -68,8 +68,8 @@ class PWGUpdater(StandardUpdater): self.discriminator_train_start_steps = discriminator_train_start_steps self.lambda_adv = lambda_adv self.lambda_aux = lambda_aux - self.state = UpdaterState(iteration=0, epoch=0) + self.state = UpdaterState(iteration=0, epoch=0) self.train_iterator = iter(self.dataloader) log_file = output_dir / 'worker_{}.log'.format(dist.get_rank()) diff --git a/paddlespeech/t2s/models/vits/__init__.py b/paddlespeech/t2s/models/vits/__init__.py index 97043fd7b..2c23aa3ec 100644 --- a/paddlespeech/t2s/models/vits/__init__.py +++ b/paddlespeech/t2s/models/vits/__init__.py @@ -11,3 +11,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from .vits import * +from .vits_updater import * \ No newline at end of file diff --git a/paddlespeech/t2s/models/vits/generator.py b/paddlespeech/t2s/models/vits/generator.py index e35f9956a..f87de91a2 100644 --- a/paddlespeech/t2s/models/vits/generator.py +++ b/paddlespeech/t2s/models/vits/generator.py @@ -318,7 +318,6 @@ class VITSGenerator(nn.Layer): g = g + g_ # forward posterior encoder - z, m_q, logs_q, y_mask = self.posterior_encoder( feats, feats_lengths, g=g) diff --git a/paddlespeech/t2s/models/vits/vits.py b/paddlespeech/t2s/models/vits/vits.py index f7f5ba968..ab8eda26d 100644 --- a/paddlespeech/t2s/models/vits/vits.py +++ b/paddlespeech/t2s/models/vits/vits.py @@ -27,12 +27,7 @@ from paddlespeech.t2s.models.hifigan import HiFiGANMultiScaleMultiPeriodDiscrimi from paddlespeech.t2s.models.hifigan import HiFiGANPeriodDiscriminator from paddlespeech.t2s.models.hifigan import HiFiGANScaleDiscriminator from paddlespeech.t2s.models.vits.generator import VITSGenerator -from paddlespeech.t2s.modules.losses import DiscriminatorAdversarialLoss -from paddlespeech.t2s.modules.losses import FeatureMatchLoss -from paddlespeech.t2s.modules.losses import GeneratorAdversarialLoss -from paddlespeech.t2s.modules.losses import KLDivergenceLoss -from paddlespeech.t2s.modules.losses import MelSpectrogramLoss -from paddlespeech.t2s.modules.nets_utils import get_segments +from paddlespeech.t2s.modules.nets_utils import initialize AVAILABLE_GENERATERS = { "vits_generator": VITSGenerator, @@ -157,37 +152,8 @@ class VITS(nn.Layer): "use_spectral_norm": False, }, }, - # loss related - generator_adv_loss_params: Dict[str, Any]={ - "average_by_discriminators": False, - "loss_type": "mse", - }, - discriminator_adv_loss_params: Dict[str, Any]={ - "average_by_discriminators": False, - "loss_type": "mse", - }, - feat_match_loss_params: Dict[str, Any]={ - "average_by_discriminators": False, - "average_by_layers": False, - "include_final_outputs": True, - }, - mel_loss_params: Dict[str, Any]={ - "fs": 22050, - "fft_size": 1024, - "hop_size": 256, - "win_length": None, - "window": "hann", - "num_mels": 80, - "fmin": 0, - "fmax": None, - "log_base": None, - }, - lambda_adv: float=1.0, - lambda_mel: float=45.0, - lambda_feat_match: float=2.0, - lambda_dur: float=1.0, - lambda_kl: float=1.0, - cache_generator_outputs: bool=True, ): + cache_generator_outputs: bool=True, + init_type: str="xavier_uniform", ): """Initialize VITS module. Args: idim (int): Input vocabrary size. @@ -200,22 +166,14 @@ class VITS(nn.Layer): generator_params (Dict[str, Any]): Parameter dict for generator. discriminator_type (str): Discriminator type. discriminator_params (Dict[str, Any]): Parameter dict for discriminator. - generator_adv_loss_params (Dict[str, Any]): Parameter dict for generator - adversarial loss. - discriminator_adv_loss_params (Dict[str, Any]): Parameter dict for - discriminator adversarial loss. - feat_match_loss_params (Dict[str, Any]): Parameter dict for feat match loss. - mel_loss_params (Dict[str, Any]): Parameter dict for mel loss. - lambda_adv (float): Loss scaling coefficient for adversarial loss. - lambda_mel (float): Loss scaling coefficient for mel spectrogram loss. - lambda_feat_match (float): Loss scaling coefficient for feat match loss. - lambda_dur (float): Loss scaling coefficient for duration loss. - lambda_kl (float): Loss scaling coefficient for KL divergence loss. cache_generator_outputs (bool): Whether to cache generator outputs. """ assert check_argument_types() super().__init__() + # initialize parameters + initialize(self, init_type) + # define modules generator_class = AVAILABLE_GENERATERS[generator_type] if generator_type == "vits_generator": @@ -229,22 +187,8 @@ class VITS(nn.Layer): discriminator_class = AVAILABLE_DISCRIMINATORS[discriminator_type] self.discriminator = discriminator_class( **discriminator_params, ) - self.generator_adv_loss = GeneratorAdversarialLoss( - **generator_adv_loss_params, ) - self.discriminator_adv_loss = DiscriminatorAdversarialLoss( - **discriminator_adv_loss_params, ) - self.feat_match_loss = FeatureMatchLoss( - **feat_match_loss_params, ) - self.mel_loss = MelSpectrogramLoss( - **mel_loss_params, ) - self.kl_loss = KLDivergenceLoss() - # coefficients - self.lambda_adv = lambda_adv - self.lambda_mel = lambda_mel - self.lambda_kl = lambda_kl - self.lambda_feat_match = lambda_feat_match - self.lambda_dur = lambda_dur + nn.initializer.set_global_initializer(None) # cache self.cache_generator_outputs = cache_generator_outputs @@ -259,15 +203,8 @@ class VITS(nn.Layer): self.langs = self.generator.langs self.spk_embed_dim = self.generator.spk_embed_dim - @property - def require_raw_speech(self): - """Return whether or not speech is required.""" - return True - - @property - def require_vocoder(self): - """Return whether or not vocoder is required.""" - return False + self.reuse_cache_gen = True + self.reuse_cache_dis = True def forward( self, @@ -334,21 +271,15 @@ class VITS(nn.Layer): spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). Returns: - Dict[str, Any]: - * loss (Tensor): Loss scalar tensor. - * stats (Dict[str, float]): Statistics to be monitored. - * weight (Tensor): Weight tensor to summarize losses. - * optim_idx (int): Optimizer index (0 for G and 1 for D). + """ # setup - batch_size = paddle.shape(text)[0] feats = feats.transpose([0, 2, 1]) - # speech = speech.unsqueeze(1) # calculate generator outputs - reuse_cache = True + self.reuse_cache_gen = True if not self.cache_generator_outputs or self._cache is None: - reuse_cache = False + self.reuse_cache_gen = False outs = self.generator( text=text, text_lengths=text_lengths, @@ -361,59 +292,10 @@ class VITS(nn.Layer): outs = self._cache # store cache - if self.training and self.cache_generator_outputs and not reuse_cache: + if self.training and self.cache_generator_outputs and not self.reuse_cache_gen: self._cache = outs return outs - """ - # parse outputs - speech_hat_, dur_nll, _, start_idxs, _, z_mask, outs_ = outs - _, z_p, m_p, logs_p, _, logs_q = outs_ - speech_ = get_segments( - x=speech, - start_idxs=start_idxs * self.generator.upsample_factor, - segment_size=self.generator.segment_size * - self.generator.upsample_factor, ) - - # calculate discriminator outputs - p_hat = self.discriminator(speech_hat_) - with paddle.no_grad(): - # do not store discriminator gradient in generator turn - p = self.discriminator(speech_) - - # calculate losses - mel_loss = self.mel_loss(speech_hat_, speech_) - kl_loss = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) - dur_loss = paddle.sum(dur_nll.float()) - adv_loss = self.generator_adv_loss(p_hat) - feat_match_loss = self.feat_match_loss(p_hat, p) - - mel_loss = mel_loss * self.lambda_mel - kl_loss = kl_loss * self.lambda_kl - dur_loss = dur_loss * self.lambda_dur - adv_loss = adv_loss * self.lambda_adv - feat_match_loss = feat_match_loss * self.lambda_feat_match - loss = mel_loss + kl_loss + dur_loss + adv_loss + feat_match_loss - - stats = dict( - generator_loss=loss.item(), - generator_mel_loss=mel_loss.item(), - generator_kl_loss=kl_loss.item(), - generator_dur_loss=dur_loss.item(), - generator_adv_loss=adv_loss.item(), - generator_feat_match_loss=feat_match_loss.item(), ) - - # reset cache - if reuse_cache or not self.training: - self._cache = None - - return { - "loss": loss, - "stats": stats, - # "weight": weight, - "optim_idx": 0, # needed for trainer - } - """ def _forward_discrminator( self, @@ -434,21 +316,15 @@ class VITS(nn.Layer): spembs (Optional[Tensor]): Speaker embedding tensor (B, spk_embed_dim). lids (Optional[Tensor]): Language index tensor (B,) or (B, 1). Returns: - Dict[str, Any]: - * loss (Tensor): Loss scalar tensor. - * stats (Dict[str, float]): Statistics to be monitored. - * weight (Tensor): Weight tensor to summarize losses. - * optim_idx (int): Optimizer index (0 for G and 1 for D). + """ # setup - batch_size = paddle.shape(text)[0] feats = feats.transpose([0, 2, 1]) - # speech = speech.unsqueeze(1) # calculate generator outputs - reuse_cache = True + self.reuse_cache_dis = True if not self.cache_generator_outputs or self._cache is None: - reuse_cache = False + self.reuse_cache_dis = False outs = self.generator( text=text, text_lengths=text_lengths, @@ -461,44 +337,10 @@ class VITS(nn.Layer): outs = self._cache # store cache - if self.cache_generator_outputs and not reuse_cache: + if self.cache_generator_outputs and not self.reuse_cache_dis: self._cache = outs return outs - """ - - # parse outputs - speech_hat_, _, _, start_idxs, *_ = outs - speech_ = get_segments( - x=speech, - start_idxs=start_idxs * self.generator.upsample_factor, - segment_size=self.generator.segment_size * - self.generator.upsample_factor, ) - - # calculate discriminator outputs - p_hat = self.discriminator(speech_hat_.detach()) - p = self.discriminator(speech_) - - # calculate losses - real_loss, fake_loss = self.discriminator_adv_loss(p_hat, p) - loss = real_loss + fake_loss - - stats = dict( - discriminator_loss=loss.item(), - discriminator_real_loss=real_loss.item(), - discriminator_fake_loss=fake_loss.item(), ) - - # reset cache - if reuse_cache or not self.training: - self._cache = None - - return { - "loss": loss, - "stats": stats, - # "weight": weight, - "optim_idx": 1, # needed for trainer - } - """ def inference( self, @@ -535,10 +377,7 @@ class VITS(nn.Layer): # setup text = text[None] text_lengths = paddle.to_tensor(paddle.shape(text)[1]) - # if sids is not None: - # sids = sids.view(1) - # if lids is not None: - # lids = lids.view(1) + if durations is not None: durations = paddle.reshape(durations, [1, 1, -1]) diff --git a/paddlespeech/t2s/models/vits/vits_updater.py b/paddlespeech/t2s/models/vits/vits_updater.py index e69de29bb..a031dc575 100644 --- a/paddlespeech/t2s/models/vits/vits_updater.py +++ b/paddlespeech/t2s/models/vits/vits_updater.py @@ -0,0 +1,353 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import Dict + +import paddle +from paddle import distributed as dist +from paddle.io import DataLoader +from paddle.nn import Layer +from paddle.optimizer import Optimizer +from paddle.optimizer.lr import LRScheduler + +from paddlespeech.t2s.modules.nets_utils import get_segments +from paddlespeech.t2s.training.extensions.evaluator import StandardEvaluator +from paddlespeech.t2s.training.reporter import report +from paddlespeech.t2s.training.updaters.standard_updater import StandardUpdater +from paddlespeech.t2s.training.updaters.standard_updater import UpdaterState + +logging.basicConfig( + format='%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s', + datefmt='[%Y-%m-%d %H:%M:%S]') +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class VITSUpdater(StandardUpdater): + def __init__(self, + model: Layer, + optimizers: Dict[str, Optimizer], + criterions: Dict[str, Layer], + schedulers: Dict[str, LRScheduler], + dataloader: DataLoader, + generator_train_start_steps: int=0, + discriminator_train_start_steps: int=100000, + lambda_adv: float=1.0, + lambda_mel: float=45.0, + lambda_feat_match: float=2.0, + lambda_dur: float=1.0, + lambda_kl: float=1.0, + generator_first: bool=False, + output_dir=None): + # it is designed to hold multiple models + # 因为输入的是单模型,但是没有用到父类的 init(), 所以需要重新写这部分 + models = {"main": model} + self.models: Dict[str, Layer] = models + # self.model = model + + self.model = model._layers if isinstance(model, paddle.DataParallel) else model + + self.optimizers = optimizers + self.optimizer_g: Optimizer = optimizers['generator'] + self.optimizer_d: Optimizer = optimizers['discriminator'] + + self.criterions = criterions + self.criterion_mel = criterions['mel'] + self.criterion_feat_match = criterions['feat_match'] + self.criterion_gen_adv = criterions["gen_adv"] + self.criterion_dis_adv = criterions["dis_adv"] + self.criterion_kl = criterions["kl"] + + self.schedulers = schedulers + self.scheduler_g = schedulers['generator'] + self.scheduler_d = schedulers['discriminator'] + + self.dataloader = dataloader + + self.generator_train_start_steps = generator_train_start_steps + self.discriminator_train_start_steps = discriminator_train_start_steps + + self.lambda_adv = lambda_adv + self.lambda_mel = lambda_mel + self.lambda_feat_match = lambda_feat_match + self.lambda_dur = lambda_dur + self.lambda_kl = lambda_kl + + if generator_first: + self.turns = ["generator", "discriminator"] + else: + self.turns = ["discriminator", "generator"] + + self.state = UpdaterState(iteration=0, epoch=0) + self.train_iterator = iter(self.dataloader) + + log_file = output_dir / 'worker_{}.log'.format(dist.get_rank()) + self.filehandler = logging.FileHandler(str(log_file)) + logger.addHandler(self.filehandler) + self.logger = logger + self.msg = "" + + def update_core(self, batch): + self.msg = "Rank: {}, ".format(dist.get_rank()) + losses_dict = {} + + for turn in self.turns: + speech = batch["speech"] + speech = speech.unsqueeze(1) + outs = self.model( + text=batch["text"], + text_lengths=batch["text_lengths"], + feats=batch["feats"], + feats_lengths=batch["feats_lengths"], + forward_generator=turn == "generator") + # Generator + if turn == "generator": + # parse outputs + speech_hat_, dur_nll, _, start_idxs, _, z_mask, outs_ = outs + _, z_p, m_p, logs_p, _, logs_q = outs_ + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * + self.model.generator.upsample_factor, + segment_size=self.model.generator.segment_size * + self.model.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.model.discriminator(speech_hat_) + with paddle.no_grad(): + # do not store discriminator gradient in generator turn + p = self.model.discriminator(speech_) + + # calculate losses + mel_loss = self.criterion_mel(speech_hat_, speech_) + kl_loss = self.criterion_kl(z_p, logs_q, m_p, logs_p, z_mask) + dur_loss = paddle.sum(dur_nll) + adv_loss = self.criterion_gen_adv(p_hat) + feat_match_loss = self.criterion_feat_match(p_hat, p) + + mel_loss = mel_loss * self.lambda_mel + kl_loss = kl_loss * self.lambda_kl + dur_loss = dur_loss * self.lambda_dur + adv_loss = adv_loss * self.lambda_adv + feat_match_loss = feat_match_loss * self.lambda_feat_match + gen_loss = mel_loss + kl_loss + dur_loss + adv_loss + feat_match_loss + + report("train/generator_loss", float(gen_loss)) + report("train/generator_mel_loss", float(mel_loss)) + report("train/generator_kl_loss", float(kl_loss)) + report("train/generator_dur_loss", float(dur_loss)) + report("train/generator_adv_loss", float(adv_loss)) + report("train/generator_feat_match_loss", + float(feat_match_loss)) + + losses_dict["generator_loss"] = float(gen_loss) + losses_dict["generator_mel_loss"] = float(mel_loss) + losses_dict["generator_kl_loss"] = float(kl_loss) + losses_dict["generator_dur_loss"] = float(dur_loss) + losses_dict["generator_adv_loss"] = float(adv_loss) + losses_dict["generator_feat_match_loss"] = float( + feat_match_loss) + + self.optimizer_g.clear_grad() + gen_loss.backward() + + self.optimizer_g.step() + self.scheduler_g.step() + + # reset cache + if self.model.reuse_cache_gen or not self.model.training: + self.model._cache = None + + # Disctiminator + elif turn == "discriminator": + # parse outputs + speech_hat_, _, _, start_idxs, *_ = outs + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * + self.model.generator.upsample_factor, + segment_size=self.model.generator.segment_size * + self.model.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.model.discriminator(speech_hat_.detach()) + p = self.model.discriminator(speech_) + + # calculate losses + real_loss, fake_loss = self.criterion_dis_adv(p_hat, p) + dis_loss = real_loss + fake_loss + + report("train/real_loss", float(real_loss)) + report("train/fake_loss", float(fake_loss)) + report("train/discriminator_loss", float(dis_loss)) + losses_dict["real_loss"] = float(real_loss) + losses_dict["fake_loss"] = float(fake_loss) + losses_dict["discriminator_loss"] = float(dis_loss) + + self.optimizer_d.clear_grad() + dis_loss.backward() + + self.optimizer_d.step() + self.scheduler_d.step() + + # reset cache + if self.model.reuse_cache_dis or not self.model.training: + self.model._cache = None + + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + + +class VITSEvaluator(StandardEvaluator): + def __init__(self, + model, + criterions: Dict[str, Layer], + dataloader: DataLoader, + lambda_adv: float=1.0, + lambda_mel: float=45.0, + lambda_feat_match: float=2.0, + lambda_dur: float=1.0, + lambda_kl: float=1.0, + generator_first: bool=False, + output_dir=None): + # 因为输入的是单模型,但是没有用到父类的 init(), 所以需要重新写这部分 + models = {"main": model} + self.models: Dict[str, Layer] = models + # self.model = model + self.model = model._layers if isinstance(model, paddle.DataParallel) else model + + self.criterions = criterions + self.criterion_mel = criterions['mel'] + self.criterion_feat_match = criterions['feat_match'] + self.criterion_gen_adv = criterions["gen_adv"] + self.criterion_dis_adv = criterions["dis_adv"] + self.criterion_kl = criterions["kl"] + + self.dataloader = dataloader + + self.lambda_adv = lambda_adv + self.lambda_mel = lambda_mel + self.lambda_feat_match = lambda_feat_match + self.lambda_dur = lambda_dur + self.lambda_kl = lambda_kl + + if generator_first: + self.turns = ["generator", "discriminator"] + else: + self.turns = ["discriminator", "generator"] + + log_file = output_dir / 'worker_{}.log'.format(dist.get_rank()) + self.filehandler = logging.FileHandler(str(log_file)) + logger.addHandler(self.filehandler) + self.logger = logger + self.msg = "" + + def evaluate_core(self, batch): + # logging.debug("Evaluate: ") + self.msg = "Evaluate: " + losses_dict = {} + + for turn in self.turns: + speech = batch["speech"] + speech = speech.unsqueeze(1) + outs = self.model( + text=batch["text"], + text_lengths=batch["text_lengths"], + feats=batch["feats"], + feats_lengths=batch["feats_lengths"], + forward_generator=turn == "generator") + # Generator + if turn == "generator": + # parse outputs + speech_hat_, dur_nll, _, start_idxs, _, z_mask, outs_ = outs + _, z_p, m_p, logs_p, _, logs_q = outs_ + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * + self.model.generator.upsample_factor, + segment_size=self.model.generator.segment_size * + self.model.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.model.discriminator(speech_hat_) + with paddle.no_grad(): + # do not store discriminator gradient in generator turn + p = self.model.discriminator(speech_) + + # calculate losses + mel_loss = self.criterion_mel(speech_hat_, speech_) + kl_loss = self.criterion_kl(z_p, logs_q, m_p, logs_p, z_mask) + dur_loss = paddle.sum(dur_nll) + adv_loss = self.criterion_gen_adv(p_hat) + feat_match_loss = self.criterion_feat_match(p_hat, p) + + mel_loss = mel_loss * self.lambda_mel + kl_loss = kl_loss * self.lambda_kl + dur_loss = dur_loss * self.lambda_dur + adv_loss = adv_loss * self.lambda_adv + feat_match_loss = feat_match_loss * self.lambda_feat_match + gen_loss = mel_loss + kl_loss + dur_loss + adv_loss + feat_match_loss + + report("eval/generator_loss", float(gen_loss)) + report("eval/generator_mel_loss", float(mel_loss)) + report("eval/generator_kl_loss", float(kl_loss)) + report("eval/generator_dur_loss", float(dur_loss)) + report("eval/generator_adv_loss", float(adv_loss)) + report("eval/generator_feat_match_loss", float(feat_match_loss)) + + losses_dict["generator_loss"] = float(gen_loss) + losses_dict["generator_mel_loss"] = float(mel_loss) + losses_dict["generator_kl_loss"] = float(kl_loss) + losses_dict["generator_dur_loss"] = float(dur_loss) + losses_dict["generator_adv_loss"] = float(adv_loss) + losses_dict["generator_feat_match_loss"] = float( + feat_match_loss) + + # reset cache + if self.model.reuse_cache_gen or not self.model.training: + self.model._cache = None + + # Disctiminator + elif turn == "discriminator": + # parse outputs + speech_hat_, _, _, start_idxs, *_ = outs + speech_ = get_segments( + x=speech, + start_idxs=start_idxs * + self.model.generator.upsample_factor, + segment_size=self.model.generator.segment_size * + self.model.generator.upsample_factor, ) + + # calculate discriminator outputs + p_hat = self.model.discriminator(speech_hat_.detach()) + p = self.model.discriminator(speech_) + + # calculate losses + real_loss, fake_loss = self.criterion_dis_adv(p_hat, p) + dis_loss = real_loss + fake_loss + + report("eval/real_loss", float(real_loss)) + report("eval/fake_loss", float(fake_loss)) + report("eval/discriminator_loss", float(dis_loss)) + losses_dict["real_loss"] = float(real_loss) + losses_dict["fake_loss"] = float(fake_loss) + losses_dict["discriminator_loss"] = float(dis_loss) + + # reset cache + if self.model.reuse_cache_dis or not self.model.training: + self.model._cache = None + + self.msg += ', '.join('{}: {:>.6f}'.format(k, v) + for k, v in losses_dict.items()) + self.logger.info(self.msg) diff --git a/paddlespeech/t2s/training/optimizer.py b/paddlespeech/t2s/training/optimizer.py index 64274d538..3342cae53 100644 --- a/paddlespeech/t2s/training/optimizer.py +++ b/paddlespeech/t2s/training/optimizer.py @@ -14,6 +14,14 @@ import paddle from paddle import nn +scheduler_classes = dict( + ReduceOnPlateau=paddle.optimizer.lr.ReduceOnPlateau, + lambda_decay=paddle.optimizer.lr.LambdaDecay, + step_decay=paddle.optimizer.lr.StepDecay, + multistep_decay=paddle.optimizer.lr.MultiStepDecay, + exponential_decay=paddle.optimizer.lr.ExponentialDecay, + CosineAnnealingDecay=paddle.optimizer.lr.CosineAnnealingDecay, ) + optim_classes = dict( adadelta=paddle.optimizer.Adadelta, adagrad=paddle.optimizer.Adagrad, diff --git a/paddlespeech/utils/__init__.py b/paddlespeech/utils/__init__.py new file mode 100644 index 000000000..185a92b8d --- /dev/null +++ b/paddlespeech/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/utils/dynamic_import.py b/paddlespeech/utils/dynamic_import.py new file mode 100644 index 000000000..99f93356f --- /dev/null +++ b/paddlespeech/utils/dynamic_import.py @@ -0,0 +1,38 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from espnet(https://github.com/espnet/espnet) +import importlib + +__all__ = ["dynamic_import"] + + +def dynamic_import(import_path, alias=dict()): + """dynamic import module and class + + :param str import_path: syntax 'module_name:class_name' + e.g., 'paddlespeech.s2t.models.u2:U2Model' + :param dict alias: shortcut for registered class + :return: imported class + """ + if import_path not in alias and ":" not in import_path: + raise ValueError( + "import_path should be one of {} or " + 'include ":", e.g. "paddlespeech.s2t.models.u2:U2Model" : ' + "{}".format(set(alias), import_path)) + if ":" not in import_path: + import_path = alias[import_path] + + module_name, objname = import_path.split(":") + m = importlib.import_module(module_name) + return getattr(m, objname) From a70e60d1ab97852b3f8e6e86e11d0469ea395daf Mon Sep 17 00:00:00 2001 From: iftaken Date: Fri, 20 May 2022 15:32:49 +0800 Subject: [PATCH 006/127] add PP-TTS,PP-ASR,PP-VPR --- README.md | 1 + README_cn.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index d32131c0d..2ade8a69c 100644 --- a/README.md +++ b/README.md @@ -161,6 +161,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - 🧩 *Cascaded models application*: as an extension of the typical traditional audio tasks, we combine the workflows of the aforementioned tasks with other fields like Natural language processing (NLP) and Computer Vision (CV). ### Recent Update +- 👑 2022.05.13: Release [PP-ASR](./docs/source/asr/PPASR.md)、[PP-TTS](./docs/source/tts/PPTTS.md)、[PP-VPR](docs/source/vpr/PPVPR.md) - 👏🏻 2022.05.06: `Streaming ASR` with `Punctuation Restoration` and `Token Timestamp`. - 👏🏻 2022.05.06: `Server` is available for `Speaker Verification`, and `Punctuation Restoration`. - 👏🏻 2022.04.28: `Streaming Server` is available for `Automatic Speech Recognition` and `Text-to-Speech`. diff --git a/README_cn.md b/README_cn.md index ceb9dc187..f5ba93629 100644 --- a/README_cn.md +++ b/README_cn.md @@ -182,6 +182,7 @@ from https://github.com/18F/open-source-guide/blob/18f-pages/pages/making-readme +- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md)、[PP-TTS](./docs/source/tts/PPTTS_cn.md)、[PP-VPR](docs/source/vpr/PPVPR_cn.md) - 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。 - 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 - 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成,声纹验证。 From 44b51cda0b3438bb4455653f9aac4bdc088840b9 Mon Sep 17 00:00:00 2001 From: mmglove Date: Fri, 20 May 2022 15:47:36 +0800 Subject: [PATCH 007/127] fix conformer benchmark --- tests/test_tipc/prepare.sh | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/tests/test_tipc/prepare.sh b/tests/test_tipc/prepare.sh index b62e54fd2..a13938017 100644 --- a/tests/test_tipc/prepare.sh +++ b/tests/test_tipc/prepare.sh @@ -24,30 +24,33 @@ trainer_list=$(func_parser_value "${lines[14]}") if [ ${MODE} = "benchmark_train" ];then curPath=$(readlink -f "$(dirname "$0")") - echo "curPath:"${curPath} # /PaddleSpeech/tests/test_tipc/ + echo "curPath:"${curPath} # /PaddleSpeech/tests/test_tipc cd ${curPath}/../.. + echo "------------- install for speech " apt-get install libsndfile1 -y + pip install yacs -i https://pypi.tuna.tsinghua.edu.cn/simple pip install pytest-runner -i https://pypi.tuna.tsinghua.edu.cn/simple pip install kaldiio -i https://pypi.tuna.tsinghua.edu.cn/simple pip install setuptools_scm -i https://pypi.tuna.tsinghua.edu.cn/simple pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple + pip install jsonlines + pip list cd - if [ ${model_name} == "conformer" ]; then # set the URL for aishell_tiny dataset - URL=${conformer_data_URL:-"None"} - echo "URL:"${URL} - if [ ${URL} == 'None' ];then + conformer_aishell_URL=${conformer_aishell_URL:-"None"} + if [ ${conformer_aishell_URL} == 'None' ];then echo "please contact author to get the URL.\n" exit - else - wget -P ${curPath}/../../dataset/aishell/ ${URL} - mv ${curPath}/../../dataset/aishell/aishell.py ${curPath}/../../dataset/aishell/aishell_tiny.py + else + rm -rf ${curPath}/../../dataset/aishell/aishell.py + rm -rf ${curPath}/../../dataset/aishell/data_aishell_tiny* + wget -P ${curPath}/../../dataset/aishell/ ${conformer_aishell_URL} fi cd ${curPath}/../../examples/aishell/asr1 #Prepare the data - sed -i "s#aishell.py#aishell_tiny.py#g" ./local/data.sh - sed -i "s#python3#python#g" ./local/data.sh + sed -i "s#python3#python#g" ./local/data.sh bash run.sh --stage 0 --stop_stage 0 # 执行第一遍的时候会偶现报错 bash run.sh --stage 0 --stop_stage 0 From 58028509c342afd05b46b4ef2ee35f09ed944914 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Fri, 20 May 2022 08:10:01 +0000 Subject: [PATCH 008/127] replace dynamic_import --- paddlespeech/cli/cls/infer.py | 2 +- paddlespeech/cli/st/infer.py | 2 +- paddlespeech/cli/text/infer.py | 2 +- paddlespeech/cli/tts/infer.py | 2 +- paddlespeech/cli/vector/infer.py | 2 +- paddlespeech/cls/exps/panns/predict.py | 4 ++-- paddlespeech/cls/exps/panns/train.py | 4 ++-- .../server/engine/tts/online/python/tts_engine.py | 2 +- paddlespeech/t2s/exps/synthesize_streaming.py | 12 +++--------- 9 files changed, 13 insertions(+), 19 deletions(-) diff --git a/paddlespeech/cli/cls/infer.py b/paddlespeech/cli/cls/infer.py index 1f637a8fe..fa46db19d 100644 --- a/paddlespeech/cli/cls/infer.py +++ b/paddlespeech/cli/cls/infer.py @@ -30,7 +30,7 @@ from .pretrained_models import model_alias from .pretrained_models import pretrained_models from paddleaudio import load from paddleaudio.features import LogMelSpectrogram -from paddlespeech.s2t.utils.dynamic_import import dynamic_import +from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['CLSExecutor'] diff --git a/paddlespeech/cli/st/infer.py b/paddlespeech/cli/st/infer.py index 29d95f799..4f210fbe6 100644 --- a/paddlespeech/cli/st/infer.py +++ b/paddlespeech/cli/st/infer.py @@ -36,8 +36,8 @@ from .pretrained_models import kaldi_bins from .pretrained_models import model_alias from .pretrained_models import pretrained_models from paddlespeech.s2t.frontend.featurizer.text_featurizer import TextFeaturizer -from paddlespeech.s2t.utils.dynamic_import import dynamic_import from paddlespeech.s2t.utils.utility import UpdateConfig +from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ["STExecutor"] diff --git a/paddlespeech/cli/text/infer.py b/paddlespeech/cli/text/infer.py index 69e62e4b4..97f3bbe21 100644 --- a/paddlespeech/cli/text/infer.py +++ b/paddlespeech/cli/text/infer.py @@ -21,7 +21,6 @@ from typing import Union import paddle -from ...s2t.utils.dynamic_import import dynamic_import from ..executor import BaseExecutor from ..log import logger from ..utils import cli_register @@ -29,6 +28,7 @@ from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models from .pretrained_models import tokenizer_alias +from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['TextExecutor'] diff --git a/paddlespeech/cli/tts/infer.py b/paddlespeech/cli/tts/infer.py index 1c7199306..efab9cb25 100644 --- a/paddlespeech/cli/tts/infer.py +++ b/paddlespeech/cli/tts/infer.py @@ -32,10 +32,10 @@ from ..utils import cli_register from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models -from paddlespeech.s2t.utils.dynamic_import import dynamic_import from paddlespeech.t2s.frontend import English from paddlespeech.t2s.frontend.zh_frontend import Frontend from paddlespeech.t2s.modules.normalizer import ZScore +from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['TTSExecutor'] diff --git a/paddlespeech/cli/vector/infer.py b/paddlespeech/cli/vector/infer.py index 1dff6edb4..ea8f2c1f7 100644 --- a/paddlespeech/cli/vector/infer.py +++ b/paddlespeech/cli/vector/infer.py @@ -32,7 +32,7 @@ from .pretrained_models import model_alias from .pretrained_models import pretrained_models from paddleaudio.backends import load as load_audio from paddleaudio.compliance.librosa import melspectrogram -from paddlespeech.s2t.utils.dynamic_import import dynamic_import +from paddlespeech.utils.dynamic_import import dynamic_import from paddlespeech.vector.io.batch import feature_normalize from paddlespeech.vector.modules.sid_model import SpeakerIdetification diff --git a/paddlespeech/cls/exps/panns/predict.py b/paddlespeech/cls/exps/panns/predict.py index ffe42d390..d0b963545 100644 --- a/paddlespeech/cls/exps/panns/predict.py +++ b/paddlespeech/cls/exps/panns/predict.py @@ -17,12 +17,12 @@ import os import paddle import paddle.nn.functional as F import yaml - from paddleaudio.backends import load as load_audio from paddleaudio.features import LogMelSpectrogram from paddleaudio.utils import logger + from paddlespeech.cls.models import SoundClassifier -from paddlespeech.s2t.utils.dynamic_import import dynamic_import +from paddlespeech.utils.dynamic_import import dynamic_import # yapf: disable parser = argparse.ArgumentParser(__doc__) diff --git a/paddlespeech/cls/exps/panns/train.py b/paddlespeech/cls/exps/panns/train.py index 7e2922148..8e06273de 100644 --- a/paddlespeech/cls/exps/panns/train.py +++ b/paddlespeech/cls/exps/panns/train.py @@ -16,12 +16,12 @@ import os import paddle import yaml - from paddleaudio.features import LogMelSpectrogram from paddleaudio.utils import logger from paddleaudio.utils import Timer + from paddlespeech.cls.models import SoundClassifier -from paddlespeech.s2t.utils.dynamic_import import dynamic_import +from paddlespeech.utils.dynamic_import import dynamic_import # yapf: disable parser = argparse.ArgumentParser(__doc__) diff --git a/paddlespeech/server/engine/tts/online/python/tts_engine.py b/paddlespeech/server/engine/tts/online/python/tts_engine.py index a050a4d48..efed19528 100644 --- a/paddlespeech/server/engine/tts/online/python/tts_engine.py +++ b/paddlespeech/server/engine/tts/online/python/tts_engine.py @@ -26,7 +26,6 @@ from paddlespeech.cli.log import logger from paddlespeech.cli.tts.infer import TTSExecutor from paddlespeech.cli.utils import download_and_decompress from paddlespeech.cli.utils import MODEL_HOME -from paddlespeech.s2t.utils.dynamic_import import dynamic_import from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.audio_process import float2pcm from paddlespeech.server.utils.util import denorm @@ -34,6 +33,7 @@ from paddlespeech.server.utils.util import get_chunks from paddlespeech.t2s.frontend import English from paddlespeech.t2s.frontend.zh_frontend import Frontend from paddlespeech.t2s.modules.normalizer import ZScore +from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['TTSEngine'] diff --git a/paddlespeech/t2s/exps/synthesize_streaming.py b/paddlespeech/t2s/exps/synthesize_streaming.py index b11bc799b..af9d2abfc 100644 --- a/paddlespeech/t2s/exps/synthesize_streaming.py +++ b/paddlespeech/t2s/exps/synthesize_streaming.py @@ -24,7 +24,6 @@ from paddle.static import InputSpec from timer import timer from yacs.config import CfgNode -from paddlespeech.s2t.utils.dynamic_import import dynamic_import from paddlespeech.t2s.exps.syn_utils import denorm from paddlespeech.t2s.exps.syn_utils import get_chunks from paddlespeech.t2s.exps.syn_utils import get_frontend @@ -33,6 +32,7 @@ from paddlespeech.t2s.exps.syn_utils import get_voc_inference from paddlespeech.t2s.exps.syn_utils import model_alias from paddlespeech.t2s.exps.syn_utils import voc_to_static from paddlespeech.t2s.utils import str2bool +from paddlespeech.utils.dynamic_import import dynamic_import def evaluate(args): @@ -212,10 +212,7 @@ def parse_args(): choices=['fastspeech2_csmsc'], help='Choose acoustic model type of tts task.') parser.add_argument( - '--am_config', - type=str, - default=None, - help='Config of acoustic model.') + '--am_config', type=str, default=None, help='Config of acoustic model.') parser.add_argument( '--am_ckpt', type=str, @@ -245,10 +242,7 @@ def parse_args(): ], help='Choose vocoder type of tts task.') parser.add_argument( - '--voc_config', - type=str, - default=None, - help='Config of voc.') + '--voc_config', type=str, default=None, help='Config of voc.') parser.add_argument( '--voc_ckpt', type=str, default=None, help='Checkpoint file of voc.') parser.add_argument( From 008c812f63d338f0b4c93ac0190a7366ed324776 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 20 May 2022 08:49:52 +0000 Subject: [PATCH 009/127] fix cli/asr --- paddlespeech/cli/asr/infer.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index 863a933f2..eb9721206 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -203,6 +203,8 @@ class ASRExecutor(BaseExecutor): self.model.set_state_dict(model_dict) # compute the max len limit + # default max_len: unit:second + self.max_len = 50 if "conformer" in model_type or "transformer" in model_type or "wenetspeech" in model_type: # in transformer like model, we may use the subsample rate cnn network subsample_rate = self.model.subsampling_rate() @@ -479,11 +481,11 @@ class ASRExecutor(BaseExecutor): Python API to call an executor. """ audio_file = os.path.abspath(audio_file) - if not self._check(audio_file, sample_rate, force_yes): - sys.exit(-1) paddle.set_device(device) self._init_from_path(model, lang, sample_rate, config, decode_method, ckpt_path) + if not self._check(audio_file, sample_rate, force_yes): + sys.exit(-1) if rtf: k = self.__class__.__name__ CLI_TIMER[k]['start'].append(time.time()) From c9dc388c94df4c96cad909009e499fc29d9014e1 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Fri, 20 May 2022 09:45:05 +0000 Subject: [PATCH 010/127] add calc CER in cli --- tests/unit/cli/aishell_test_prepare.py | 6 ++++++ ...f_by_aishell.sh => calc_RTF_CER_by_aishell.sh} | 15 +++++++++++++-- tests/unit/cli/utils | 1 + 3 files changed, 20 insertions(+), 2 deletions(-) rename tests/unit/cli/{calc_rtf_by_aishell.sh => calc_RTF_CER_by_aishell.sh} (54%) create mode 120000 tests/unit/cli/utils diff --git a/tests/unit/cli/aishell_test_prepare.py b/tests/unit/cli/aishell_test_prepare.py index 288de62a0..5088d7a48 100644 --- a/tests/unit/cli/aishell_test_prepare.py +++ b/tests/unit/cli/aishell_test_prepare.py @@ -55,6 +55,7 @@ args = parser.parse_args() def create_manifest(data_dir, manifest_path_prefix): print("Creating manifest %s ..." % manifest_path_prefix) json_lines = [] + reference_lines = [] transcript_path = os.path.join(data_dir, 'transcript', 'aishell_transcript_v0.8.txt') transcript_dict = {} @@ -88,6 +89,7 @@ def create_manifest(data_dir, manifest_path_prefix): duration = float(len(audio_data) / samplerate) text = transcript_dict[audio_id] json_lines.append(audio_path) + reference_lines.append(str(total_num+1) + "\t" + text) total_sec += duration total_text += len(text) @@ -98,6 +100,10 @@ def create_manifest(data_dir, manifest_path_prefix): for line in json_lines: fout.write(line + '\n') + with codecs.open(manifest_path + ".text", 'w', 'utf-8') as fout: + for line in reference_lines: + fout.write(line + '\n') + manifest_dir = os.path.dirname(manifest_path_prefix) def prepare_dataset(url, md5sum, target_dir, manifest_path=None): diff --git a/tests/unit/cli/calc_rtf_by_aishell.sh b/tests/unit/cli/calc_RTF_CER_by_aishell.sh similarity index 54% rename from tests/unit/cli/calc_rtf_by_aishell.sh rename to tests/unit/cli/calc_RTF_CER_by_aishell.sh index cee79160e..a5a1a77c1 100644 --- a/tests/unit/cli/calc_rtf_by_aishell.sh +++ b/tests/unit/cli/calc_RTF_CER_by_aishell.sh @@ -3,6 +3,10 @@ source path.sh stage=-1 stop_stage=100 +model_name=conformer_online_aishell +gpus=5 +log_file=res.log +res_file=res.rsl MAIN_ROOT=../../.. . ${MAIN_ROOT}/utils/parse_options.sh || exit -1; @@ -20,9 +24,16 @@ if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then echo "Prepare Aishell failed. Terminated." exit 1 fi - fi + if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then - cat data/manifest.test | paddlespeech asr --model conformer_online_aishell --device gpu --decode_method ctc_prefix_beam_search --rtf -v + export CUDA_VISIBLE_DEVICES=${gpus} + cat data/manifest.test | paddlespeech asr --model ${model_name} --device gpu --decode_method attention_rescoring --rtf -v &> ${log_file} +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + cat ${log_file} | grep "^[0-9]" > ${res_file} + python utils/compute-wer.py --char=1 --v=1 \ + data/manifest.test.text ${res_file} > ${res_file}.error fi diff --git a/tests/unit/cli/utils b/tests/unit/cli/utils new file mode 120000 index 000000000..973afe674 --- /dev/null +++ b/tests/unit/cli/utils @@ -0,0 +1 @@ +../../../utils \ No newline at end of file From 3638320f3b75e3949ffe77a546a918a0bc4fb295 Mon Sep 17 00:00:00 2001 From: Jackwaterveg <87408988+Jackwaterveg@users.noreply.github.com> Date: Fri, 20 May 2022 20:25:46 +0800 Subject: [PATCH 011/127] fix self.max_len --- paddlespeech/cli/asr/infer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index eb9721206..8b10b6b65 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -135,6 +135,8 @@ class ASRExecutor(BaseExecutor): Init model and other resources from a specific path. """ logger.info("start to init the model") + # default max_len: unit:second + self.max_len = 50 if hasattr(self, 'model'): logger.info('Model had been initialized.') return @@ -203,8 +205,6 @@ class ASRExecutor(BaseExecutor): self.model.set_state_dict(model_dict) # compute the max len limit - # default max_len: unit:second - self.max_len = 50 if "conformer" in model_type or "transformer" in model_type or "wenetspeech" in model_type: # in transformer like model, we may use the subsample rate cnn network subsample_rate = self.model.subsampling_rate() From eff363f69568f1fe37651f4346bc573258c7c5ec Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 23 May 2022 09:56:49 +0800 Subject: [PATCH 012/127] add paddlespeech paper --- docs/paddlespeech.pdf | Bin 0 -> 371285 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/paddlespeech.pdf diff --git a/docs/paddlespeech.pdf b/docs/paddlespeech.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a1c498ad23a29ef3a1c8b9694ff24d9128f2826e GIT binary patch literal 371285 zcmeFZRd6KBwj?TMYO&N}W@ct)W@ct)W@ct+NiAk(X0=+(%*-&_d-s_+cjnr&`I?Vs zr_9R2R%C={glAUO^5tnoEGHyFO;5uNMcn;Z@Bzihz>G(SXKP>q#l?k3D`jqDtnXm% zVQhp){}tgeveGlKG2_vS;c4QrFflRUF|sk>Y2(q#;xRJP;n51?u`{r+ejS&=qyGw( z@EBRx=~x)?czB?UZH)c`#ruEt`q~J~zimXx-Od<~R!za!!^uzy-PeFtktyuVlww6$`!wsFM!+L)Y!v5~o9WR>FX%#Kat`Wv3RWX zEPung{6iMg|4tCCxDcKumjR<8Jrlc$z6l*8JG+sA5jz_rBa<;d=r%t?Oi8hP0vOTwn!js5nK%t3 zfG|E%?O!*+p9TECz8(H|R{sL!@z$S_zwgBVcI?>UrmqbDzZDuXc8h->q!8N%B*o`#?Mih^E)+$&zgQkx=QWQ4$;5 z^FIviJQ0kgw?IoP)pV}M+SKe2l3)W&x9g(QQxoczcgyi9HK8uaC8%WD&1Q18a?FyS zqErgW!mTJch|8~{R28n=(5~eyXFY`a6Rvu~MBPZ)M!d)_d*_jQ5icl1s`Zmu&4!8P zb*OyevuoGQ#O`CEOpoXs4)j`LHNyGZs*8*Sr{4%Pb;seHk$T7GO zY`;V43>sG#?0fF#6>P-@A{RdLO^OfpJoXV9VA{}7ph{HojfJ2O{vbMwC7`%&qm(J` z$=xMZq|&3|FUE2s`C$dK(zk3yPR7N}IW8$|3@`3>ryc_Yh+g5aqlqcs$|eTX9h8L# z*Fmx4?kE?0&SwP`K+7ifgQ%ZTJsHN_O)(E3q+2vp&9L{VYA`;K$DOkb9P?fd){-Ev zBRC)}igH6hP)fgBtt_lKi|Yjx9Hd?j@f3Td#;jii!IS?xOaAJtKO05N;YW)5il5L(KOYV_nV+xCf9_0e%S3bziC)Et?SdT8lnMSLY_pEv`<=F%G29iC!7=tSHQtE!gIJ zp5kgSpyhqv51)Bi%_FgB68B&`3L*wnovSr-cQz$3R#Ul`STfB{nT0faB`nH@+gKgg zo*~Q2^k`t^cK>jiT1~mN_01^>I5Spvm?n!LesBvq}4x z85YRkQf}q%@NzI7XioJVJ;FmHM;7y;KH$v24g`FPC7y|lY<8>E+f{c@MZ#7DC;V(c zYz0=+sD}~xglG=@mfX}Sj9JjxTR!mJ$zm;tY@v$y7Cc5^?0Rk_Cm|{DbQ20Mn`Qh3 zarQa!#R*f2BbCWI_mh{4_zxrcZk=h?S7iMT)%d)0Xc9%I09Jkr*or*orCR^9*t`kx zRel%FF}*ElN7%i4RHyQETa$AGdE#!6*J8K;nB?eK5ynHA#gibd$1F2>gopQU)R7Ne z3wfREG`%W1ozL6ts?{3{X*MOteQXK8#d^1kwxiU;@XIjR(j4pPwFQVBjRa33ZpuT> zn}knq&B6JOaaQklJG@nsOOVOZu^_zpWs+$i5qM~^h|v)|xGPSt{bDB%wCl6rQ)VB} z6Wl+^1wyAsuH-s2Lo1O9f}UTP##m&ej;JJvo?3wM=Xg?DP=Z*4DB)M7b>|yXyeYtG zaO-Kv7N@6@D-U`G{43PX^E^?s?0THY-IwoHofUT@h><7bW5cu>@K&I->b|ugFLblV zK`Anfyo42*Z+qVMBpJjj8M8cG-B@#|$l zN_c>(88+#%`aQvwPPNXZTq=ci$DfP@1rbj`ABK^}F9fFiR%UU%7d9n$y%hu>Wj8h= zOM!qk>Cl$FQJ6Tm!IJ62e0ds#-ht|5H!`xW-`%&3y}%Fnped3ZLrN-iRLbc?;)Gfb zU}eO$o#7)4vwf0`(A#T@g1AM%1wA$r2fa0cU_C8$=%PH}FT>V@XmO-+t9}3U)7{Dz zOW*WLq;4t@t!G)d!25CfZ0xVQt;OAN4#GJ*oFJ- zVlW&CZ7v&Z5&Op_(461MG9IJS{voLoKLY2TSelUOlzB91$0iTcDSoO2nya+34;Gsr zxV58tWTRkAj}--96iW=|6FYVmqm1bzG#h^gg5^@0CrdMI_DzYeM#KG$O8}`>bvOs7>sQ)*O%u-sMtQyX5=J1JtYgVpaMUmPB3>eb)JkT+yR)dg=L#g5 zKTdqbCijm-g+boMH*!TlOsxMa?0aA7--9T3|hJ~;n&T_o#v}8kK)Wd zNVX)nSAg+p^V3?ftTga0NegJI;!?#T_3 z4@4>r3@pi?G4+z=Bgc6WRw(RmSsp00kz^pO|L6j$=xHXnM^#}Ai44&DS)S)b-A_0D z+(YHClI6>v!Zin{D%fPEX}6r)6#49;Xr5KP1e~$NK=uim7jDP2i9OpgG@x{UXyxh3 zs%ztirn-LDb90|%=p`h*J9xH?Wp=b+ zu)zb4MIM|C`fK9MxHc*=NEU2YgmsgtYSHH7p-+ZE`wtHVyisoDSJOz-)k>|$zkWtH zsdVU6J?vK}erG;(J1--uzCePj?&`s1l_HV%*Zd|%(5TIb+|2#9t%zvMhGW0Z#UiOR z)ekfJldeOD^)@4(ro?e@)l{q&D+U4$Ox$zQiPTe!hC_c38~#~;(J6GQiI(kHow;A8 ze+wtURA3Q>td%y*PFEYf`kQ6^{IC^x>kykw*PfF7q{BYS38YBe2hh<8%-wm zzNJhmT%KyH^idCLI(Q3aQ=%tEYHLWKp)Mr$MD%wZ&ls{4hNQOC9=6KS1VVy3#pUV# z7$L4kXFGn1X8B;DSFyEs(Ej^1?h@%Xm;nHD*nSt!fwP85<**8q+wB~36?N|W^){Qn zG|TXoXsG*Q_%%djryUn^w1%{JNe|i3Lr{glBsjO6b__3A9mjQ;ih6@h8rJnThs89^ zqt@0)O0E_RcycwIrT~0Wn8Kk+85s1|E;d|1cDf)A8!Wv|Em{q3YZWahwMI_~ycV^a znf{4v>xGoer!8>Fml6EV>UQB~pLUB9YUIH05}ReC6_4+%wy!0JbQx zMFq{T^QcfLomgG=)qA(HY`4BG-=^VSU!O8mwP0Fx3IMy?w=LGCH=DdFMt_a}=Cj^( zgkR6OQEfl{_=xp|qvv*bC8~qH8*pe>d&_N4#JD(^WA-n2f4N|^9S?*bKW+hyxh5DV zwf2AQ{CyvyD0Jc5b%D#u)-c_a6B=@bBR(b^MR+#)5R46Fcme!z*LLCXfayT3yLDo; z1n`qIa)pl=LW767wljOv`nPB)5sJ(xCI*XlIAa_m>$iUMF`1cdLzb2|7!QaOg#WWx z&G>f}|6kJLzfI^B@fhg;GD`k&7#Qj3|4T}5sHgW;aF1Is&@Qy1 za~j&a1`WS?hDuyKU!JfWUDA0DS3JG*$nN(6R}b*g?l)L1aDZ=0wI{lFFPS_Y)0p3D zJf6cyWCcRp$Dycj<{v{}ZG_9_jB~T-KY;y7p(>zgTSU`ND4)M%T+UG;;rNS$HhUbF zZLPltVY;Uv38?7Ve+gj^TSOkrX9!3&IuhI!bBU#aeKqVVb+ohDB86>0uQhLJTm6tE z9~Ms+7$3MFFa%6QudIenj1HZy$nr)fm8Y+s^iigQ{U3b#Klpz%DASkS^Uu6L_+Kv6Kk*-2hCetAe{dQ8;4%EcWB3P;k?GGorhkq5 zv;Mzrudj9fF`1bD7N~#4fAIgN@hfKhGmr5f{4e)P(ALJu*yf8>#y`hsrN7KBeF59Q zEUmv*#mvr3!@$ad$HqiY^CgVGrhIv1Uo-x)0{`9wt%9+mt+RvSmvUxe{L}T1CjLuK z|GRAd`wah|AEiH^Atsiu4g9-tRi&v}zuMI3^H@DY$Q*431j5hYU5C_co=L*%-PP!aO^t&(yE83Y5P5jpPAdCZ zB-JKI5*7U`vi&u9jza3jDqaKS2b=DiH5xBCZ7Ky+#Ca2iF7B8^{i{Q3ME-$#r`9nM zln(N`{V&!E+_nVD9OYkQzbY;*-4Erpx-<_s%rZ`*Pg6g;UMr;dg549YK>EZ?Ba>tL z7L24mGJCI=a$SVqdkUj;iu7|tpoWRW%^2$alm{4uCVf_e%q!6<#Jg$Vij+`W1Qu@z zW$gW$F(lm0=I$8D-kr-6^vX721Wqfo*+ogE1+a$|Ij*HSuCcOx+uCgX)bDPpGVuYw zTS@T{=kr>VSC74*slH^Bo4S$&&z{5!?=dc3E4pL(xPDU!rFg-r6_Med zVHt^;Ko~8cli*2Etgp+f=V}o5pn=v6T)BW^{k6v42`WF59{emiH#Lp=w96nFQ1CH$ zdcGO?94#?L^cIrun*^^v z&c+JX&w`dt^MB4R;rMD=EVcc(OLQX%#?|4?%!Fp+t8$}}_p$M`l@WRI-b@j@&i{t* zzK+(Z7K>{UI{yT_I^N{?Zf}m%eXh$~B~|B;z2NlV7>CX`9p&qB9_y*fN1 zflc+6FXYH_e2ZfxTl7&s?>A7Qm7}3n{z3+;+NZFu(Z%bS8sy zprQ#+n1`=XCY%yrhhewwo<`JD%1xMI6k(Q9K?DXev&{&L0I}uiNOr3H8|J)ZFCxHS z8n_3H9aQLNSGXM4ib~73`?E4A;p3<#Uv`Y(N_#9xmI!7qRfF@{6p zNnl1pMgr9!tD?pZmO%nAOQX4sr#x6^WJdWk+xniL&?H@-?2o73UxK@m@IVTA;erRv ziu(OfKf|2Fn+(o45@uZAv`zYSvB55JG{(Nuv#ix?srAQkM!$`DAff7L${t<1OVSU? z{JEW9LK;eJ@lDxx!^qNLN7he2>P52gTF0d5>uWOAe8!<7GKW2VtP^%n4R6eR;jUX|&0#eol~XETL{MiiW0;AJLi zQ9LnJm&)x~9!sRw)%XY8J3Qta72nm7+c=>-*YJ&b@5@A-Qz zXEQiKbmuPTM=ycjIZpcDpP_-@e;%_IZNlE0mv!WNZ4jUJ5moZM| z8y}7gX#febLSR6+SIG&G;sk1^yM?fUej~mOhuzD{E2-=aODG>$*|96L6X=mBKMt`x z+=4vG3i5*_zy;uN^=z711Zy|ZbtujwY{sABpYJXpdcjWb-BSp(+jb-Q&a>CS9uejb zIze^{$6N{^q~(Ib z0N3^Tii2HHFne4FLTQ_n+SA08fKEw51>k}_V;3Af6B+OFQIQVYbc}E(W^)ntnayHJ z9^MSM_mu;17P!4^%F&rG`u= zKoe0j?Km1shm&j;+6{J4s4i=O#SR6HuE zK3Wt@aV7w|Z>Y6Wf1~ILOp(ed)yHLycIW*3Hvbf|v-$~t@=PkvyNNh)V-BlX zNTgT#{Ym?fu6pv52Jj1P?jnPyFbDb1K{Z%2uMR@wx){WM9vLLu#f@P!% zM^0aBW&`yAgoarvg1IdQ02bZm;f(s2D$3B!!rnTup=zNOA0RbBIASz%nhWNh;P735 z0b*#ptlimq|Ifpo%PuNPu$9vtoYv=RvIzgv{S?d19YCo2H5o(%02z=`&1o2_n?BZ< z8@Jm7g+cc;Gk!TmB`N(MP#;0LJskaBoRQ}N6VJsS_wzFbLf8mVcSZsWAK z;b^kDqzLrsmM>}G-w8p<-mA)*Vj^+D(0@+_fFMP)dG2Q=@d)q}GxmNA*e z+ehp0DI%Rk5ZBmAAs2@@8k+|Y2wfuJtg~8A%X6y$jO>7rJnSq${9fCoPFhf&N1jMg6gPwd%iL`Gc`1IjH8^_fK6@=53a67|O$-K9%49qn*-qNc(^@0P zo;PGwyUhFiyg6$CP0eNq@qubvzKH|hI*)9aY;pZE`Z>Z3ctuD?PA}&itK%aWKweMLgHwrqO3EahRbh#D`UBK6+D} z-aqg;WR%NYX;K_z!nZ#_HH|#n9Xv;XPtE0hp1S7VH@tBB=ca1?F!Aj$X`lo#&H1yO)fJC8Ukx_qa7VKc?v-EIZ%n?89cp;j#=i){;- z_Ea!S?0lG1nu!6!a1yyXwVrl@a4$A!oF3AHG$pYd863>4LYS;zzkcEJL||#MU)`4Y zNt#EP;W$gHowe%67)td$HX4r=#%HaRV~FljE{wq&`bc1*4d~j9be)Xc(@(6G->!w9 z?B^B&hv?!NAuLF%cl7QcKvH@xY-W+TC=JJ%Gh$XNZEJQa;1E|d2GkseE&(w}RZF)c zor@3@;HuWuGUY>)4Iwg8q$*2$SyQ2hg6g~c!zXkNp(WqpZO4F9DfqtMvz0lVHu)Xp ztTDN@9t!#@1|h1o$R2nUCCz+8^V9_5RPryCS37wj-2bSd3ilv8k7P~zk-+ow(Dd?Q z{~Tr59DNWm%DSfl-ctuC4|tBMWbCzR*|EHwcp#x+ zZw<={=7Z$+xDQNih6ZWe{fWzMeJf^|-9X1JLAB(_uajXhN^Bf)jsO>fG_ALJ9;V9+ zd}0!Y7tv1bnIODJXK06Xs9r2dKZV%$>_`@Q_gY9^JDUs$6DJ=qrW5yM5~b^2imnxw zTiaiiBQ-F7O{OWu!d0PV#CMdPhDnYEGRFbncM3}Qw{P7=S;uslgRVy5tVwW*!ft|5 zjO>HBqDy;M7Wx#$yTpd`Gf#^5mzaCQ_U5plREuK@l?4^J#-y|faToHARUz1yEjVtm zan~cZs}3RA9kF0DoFOtDu{pdTXD~R5sPJj7{RA{4GxE0U%{I|~wtVy~HHKZ$B%|4H z9adOcw9AeQ09uINJi(SGFu(c0X6yOH)B`PM)zNammC;rAYFWwUnCZ#ocr%bO-aKta z&7i+^X0#9|=8M{B@Z3-Gf)ysBoo>X6fQrQPGQe3nENZNGgD_c z$}~(_GjWgt*VaB9*5m5T*E{p`ON?!V%V6?&laqUpIwuz+T^bRDAwhA#dgu`l@UvZj z>Qo-Pyb0_$%@Yl<=dg~>sbTC80b|=a3j>8Y#EGkA?ipMG619b{e9NP-;MwUN@&L{k z5B0YgX67P3>UMr?K8)*~&*ht84UK3(+`G;xkRi)bN-cV%Isr}Y5A=YDl07V^tΝ z7#&ROow^-|EaGSwi<*kp*HcVf_ipO^Hc;NIRUZJ<;n^pd?37+}5cVW=+5e zJ79nf3Qm{nHZ*P9&`NG|v82B)haMF@pn@&!f~lj{#Jq0DIq0W4AdN^OK^0n~log|f z-TLi7ngF?n5B}AEl2bXWN3MGpdYnV7;AnCZW!SC1*8`$v6Z%SeuBYe$R$xE~?zC9L z&rl$A`za_+-Qvg%gsVl7LBzmizfKoQ*?zrHX3nN4 zy+A&X`_nk5ToAa)nzx068>SuFP8M5oelj#nsM!z@>t8Ry|g-{4eJ@!y@Pwp8H4XsYVq) zp^TB*B<_94d>bw#!@lz2;IwN!fZ7aUOw6su2XU@ZefN^5v&>M~6(fDrSa-kr_bhRp z9~eh{59S#n_V+GFFAr{tc!10y97sRBei6b0#M4ZFJ(&wCj3f7pe^qfcr5g~-vr58MF$()F$}r#kaF#cDOw0Ou^L}MRSEpSPGHHd9_RQ zp_(PqaCTXoT?_l`$C>M-N>H)cyFZh*>6(%tYimXUP{->52HxCP@{VPjP|4iv#foImx5O3zLZ zPX|C_H%{K*it@krJ4%4sDZF|Puh)iLXM5_}lv+44CM0DskfN2|8MLONy;26aWjfC{ zxOL1c0s(10AzF>uqoR;>oltB(?_7-f0_NV9Nc?xT!oP}I{}<&YdOUi@uS(XRf-S>W zg^Ql`U(sLnwtrP}S=hdwYXA2tRJ0SglH&OsO@SDaxkUU<$c!Zri1}RPolbEu0v-Y) zUcO|t0!R`GUMHjiG6+i1BSB|ao}V9bJc;&R?0(j*@8L${g|@n->GTBS?NrAtS7xI0 zw47Woj$x?WV4i&d0)7p0UQtyQ4WM6dfq-5O1O{p8H^fkZH|IqA3_PwHF&K>#U%(_T z1UjsZQked2`xSJiyzOuxyaE6rEhq;`C>SuI9=$Jq=y4SM7$%_2;mx1{G0~ZTB!|j_ z>~HV+I5@VihTl$6w)}5_5X46{fx71bK~;mm5#a=ZSLm{k`cQlcutI=f=FvNDruYMPsy5hhS5gRbq8_E7*j;@L=h0Q`mB-0;u*b%B97=u`)CM}tP#00~Kn z9%45UcKvQc-0=W9fP;ewXVknxNLO&S0k`16VSY6NXtRR6z^k3Y^8q}0vH}P}+*~^N z*88Fe6!?<%Z`nmve1|!Y6ygMe6GH}?QT~P4_q+uHfWGp9h7i(caP>?_i;f6d-w*8L z3J0u&atDa+)6>1Hzl|ftilqV(DfEsh|D=lXMqQD2o=o@iCj=9m&o4z5lqIOaN?Ppq$OM=(f>2LnPd3xcR1&9Rzeb{_Y zJ=sCN$*ecdf^TpFKCRbCAW!Z3YXIPH2w?z4z(Aj(2eHBK_yAd7WlTTjfuVf=# zj>QAoGmQnnrpGnnQ-whR39bhGi!Tw_5F1L`&K>5dZ08Ya`;-A(^Gd=h<%qE|K-?*6KTs9e zJ%?okWyU<$Xje*c-5s?ll{<^S&6dqCC}?LC3lCG7-#E_#Jg)p4Hp6(y_UAO6WyU{q z%-Vlv%7$@~PA@U63*Aswx?T6?u3x~Ws;b4u)X5)T#foRMKsm-SF{a<4NL+ra;B}Db zU|}$OJ6|wMK7whyU4lU$@kqK!)U~Bo`vCZs$!@yt>LJwB%lFK}yDJ%wM8bBihP;AG z5}(32g|m;oYfH;mYJsqJ=RxtqHtFMvd)8|cF`~n`#h(<~Y5G|G*hWsTJ3znE(SvwR zKyHKyjb1$SH-G;jEO;Vrmr7vK=#!Bv>O~2wuowm>U?^hqz3gD)oQt=$dS|*MEvXz7 zgZq#6N;yim{e~gWq3=V~Fa(6R1NRc?;!;@#>D9h5FFGRq9=g~+L6$d5%pvf@bt zQsutMGExe9hspLk?@)`D39PlMwwt3(ruL7`nn~Ty=|fy>p?Uy&{%O8N0tt3biGkDb z-1twGo%$<+aZs=nDZkPq-0D1VzfZnX2StuZ8C3fR`(t;fIy|olmW)+|VoPSNG*PN+##!*r?(zpu#zfg8lV-*0IYOFCpEiN(tkPjI<`2u7s3l0JI`i24~=+SzQ73>0yZzB$36{z#xW%iL2Zs$%+S}xkFejj7Of( z6n7>37Gq|a2I1~Ao|zqJ<*oPC!?AdC&m^**($%?F>gn9coW*gRMEayN4Q?pdsN!2x z)wI3XAx}$wnoO2%7Udnk+c)990^Tz`ci{ zL`4E-vt{@(ZPHytzbiBHlK(F5_wS!Kk?UJ0jZ(PxH)^JqREA4=7#=%s3lAp?XWdAp zc4M6S#dEbj&bf=_GjVV!hl<{}>)p@B*G{SR1`b)ZGdZl+m;N>xJr(;)>p9Km+(Nhp z%AXbRoiUda-U^*9c>jcS)o^=8vXiGBlN3!9>{n>?R1t7uacdd1Y=WCuBkk*C8~Tlq zE}IWZp*$T>EsKjzou+V5cw|uBm8$q?=SpGO$+vwDUBopNcgrS9ujcE@o(ksqY8@TN zQt~Olh7CJZp*Qao+=>h`ZUg|DUklUaUq(aDWOLq_fx&krB*_ClOI53gc{hn>+d(j zR~91z()~NxksW}UA%^;inVGv%U0%=H8h%n0`j>X_x7<3bTQW&20n2yxmvfPU>nm9K zek%H(-&KOc6UjoxI22yu5iMLe;Jj&*XXz80CvLS5*l*r5t%G!d(w^CgV~3}r%6r?R zv~ziKEc?Y*(Ks>tHU%oDfBck^k6le=%w$~Kh4Xa6QlYusZD)6e%Z5s}$&U)a8!ko* zU+Z+VuTpADaVj2`jyRD>O%VwEap^uRPPKq;8q>IUfr2^0n~J6|0ZG=WgiM-)i;74w zr>ckiQ*-4xkvyr^C$!IEKR;Zixeqb*<;gD$(CE@BgUiQ6rmG>iMVCG_-ut9tPB3$Z z%42wqUADN!WI%su5oU+PtRL2m`iL$Pl!S=J##*Ie zjJE_F1&)z+a?-TTT3tkfcXFZTjV-w16{T5h@q^OgeA;(0AB?y!gNZ3T6Yf%|-pDu0 z83c;M?s3ZdV^7wgEp$`|&8frt3PY-!FeR;Oy3gGhCQhK1;RC7yrn7kP5Rp%?lRQYC zAnTiMkn(SJU)%EsTGJX`vP+a~Gx$%{QY%HsS7a@aOBq5!@;Eo#b#~T`-q#0cHx_{6 z$Py54TB?TJm~XHLu*~S31Oeo_mrWSs?i|;CN^~TLoMG1j&AP zXtRCGqx=n5?iA`?>x$m?^9a6umRk42+-_42ZVR+N##Htu35Ct@w*0MUKu!j#B;`ngrbcO8wDqXH=~{ZJ6b-9iI5 z7kW?vrd@+;rHMUKp$s13E_1zDEUk7kA?2Z0@}pK$vc3msz&KrYURCjpyM5lZ_l*HtGzOSaN-Ig&Y z(xZ_3;T<2PtI3i@$k${AZzdc~rHtbTAz+{tf6skRc*2)_k)1# znZ3XY?_-_wgxj*VtUn!dPv@ z@rz1HlEDSPJjO(W=VTEX36BnF>QFBtYo4DeZ+5!$4&3D11Ip{^wW`CsKgWP*p8A`4 ziRzE*3-ebNC6JnG=E>N3%3{ZUP_*N&hWGdN8q8W+Sj9O+dLZo{ApzVqkr|7owVioy zrU~=72!v03yzm^|*G{WVMaQo6=VszS28;jZ-GL`^ZWY+q+;6U2y^gtA2d_TdnnnmU zjD+O>rT@LteX`)SQzOIAi!9WB)wuCDrj_IJ3YsR|uIf)@9qRmC+U3VRpWCU%v%9`J z!hCn?`k4$i{MR# z!J}GlT-?~>(i|%IaZhIFVTYF5Xw{kTArg8rB8qI5!h3qTS;HWtXb`}~SqZqNSb|{Y zr-Mwd&@D;bZeYIXrs_m8_(n`o^tq)OrA{}wV=?GERvY)?bx^kwJBi{V=#;JFu0!@! z-YSS3`y!dBA?r)b@r^~ip#6LxQp?D*lk`!WK$TuTK^_=v3%5H^Wl%L5mnzV*he9xy5VJGby^3NACF8F&PtI1u+IOuhoEeLF$;vjZf=-%fQf#|FC<# z(!B9#8~|K&Eo_bPobHQ|=q)XoUNvx7PZvKoZu;q=x>uEE;177jV6wk;TzYV5%}_{l zGnl33EUBppdXJZI_#=Sal?8%#o?&&FM{|h}JT^2e@D-{Brie;qT*iJ0h$r*wr@6}0{qJ&yKP9m^kE%scl7j!BTvn}p% zK$&=s!gL+mcAgg`roHBMOv0YL66P{tfgR~LLnd0mc@G8Cr(XB;bs4BjFUP(gJ{%;) z@Gm>Brn`Bh%>t@Cxr+Cqx3J`8$q|kGT!`AZmD!3`a3F=X zQ_`t09cCW1f7b+!_^}+c3h}MgDn=EL^~?tJ$zC`maG(;)M(eZ_aKBt+l&jOhJ+4wX zU2-O(=|$io?e3OneaZ>=dc8zcaC3eUg+6``6n@^nW&u2lNuUPWpbxex!1di_-Rf-K z0u{DBRVanGwP)jmyXWeq7l^<((4;G|nU znWKwd6(z+SStZno1L#`Ih}$RK~y4{D$`0k|MjzXkAn*T z`6G3G;nQ7}oJdG}@*CAzth_XNiI*zQD%?XA=)_*4T+8oR8l>DHB&lbRYG^yI;3mtt zp|)17-P}8}!yj>kqG%~|9k5ZS>?%HD**{TK8zoO3<&L@K;!!-?p04<_Y|9fQKBuvx zWuNE!dG-sHF))|z~d7s~a)kckkH^%eG_BH5nNXfwI96m_#{gWxkWj(Ru1}LxAdP!(bc} zS)$Ore}HN_{J_%sGq|>Fbpx!Q9Xr%prVp#>I8n$A3qMndH(cee^{6_q%-5tv_;l#k zx%M4H>U3wn8;?yYvJ=O*DwjvQNJFB21?ZL3T#xhh(TQUaex`K6Z~1oLi`g14di`x3 z=0@J>3a5b+jvseqtDH!~+pH6_DHk_>^OZdn7BWaxe|>eMof>)hY)&Dm2D~gll2<65 zWy_n?G-;5T-4QPD+x|kV2q_Xim_OoWDX}MwG^AebE=jo-i}!E5&N6t24+nDHVC{cT zz`ZbBAXm-mOo`R#cf+0eH52AtvC+!m;>^|*ACF~k8-TIAJP}lPUj&B~qb*L-$K*D) zBJ~^n+l|I|#At{*k-SdeDUgvuYA2m?EnJnhtIm_SwJfd;KZxtOYU?*W&4z0T&EXwX zF0Z*!`~B5)v4Ol^Y<3TPylLtJHt>b9YQbM3xh@Q$alclnuk@a3N;mqC$7}4Ve8acM zHEKHA%IXYf<<6d8o7(I@UtANfg%4I;F^*Q9yV--$%DLWyqQ=$hi%j+kw7_PXUPCayY_*~G;%krC zOLCJ*${fWg`e%3^1tadaH=Aie&T92}_;h zNn7VNc*iQ*(Dg8ZM@x*`oY_nH7eEx~K!2<91r}(VYZ{%T?F-ZD25H_Tk-~CVV%7$6 zo|z@7+-}QieUtlka%*xE)iMz`C0l)Pnhn{cbnSj4AHygX^rhlXZl|2>gp zA_)07&v1md-L%U7yfB`+_Z=vWUu7q1;o5~jYb1B-KJI0);kjH6PhuLkzYmlcKlOlh z2qx5s?Y<^dBTm}BPu8h=s>b((lJKdr??j*I00j4viA|~PooxsDC1UC6o5hFez^+ZN zPVJWFBte8c?-3CITqJ@-v)vV5g_?H7IDKSv9Dd^rBo00#6d~)CP^6&~;DS7CttYAz zSm$BPt|>$4E>C9M<`^Z55z4W1vgflr<9PN^mp}5nS>!X4Ooq&oP{_v!HNwerBccEw zQMo(>Ruo!ZLz==aK42Q39WPeRpT%PuuT)MWegh`>r%N_0v>U%pu|iH%;%-QGC=#f+ zAU|?lU_Ayh$#2-L5P9A1+{1_%m*YE#t=E;rNI!{%tw_RAi^CUQJ4%&srRFj3#f2PK zEcLv(21L}13vFz-nqaMSlE1o=23OLk?vvSMNhL>Ch=n(AKh;%PE-|W5PaIIJLdVv? zq^hMNhv-t9@f?m>;nn(Hi=+~MA8+zya9eqGmpcw-eBTg27h$ASF?V|F5t;dG@Do_XORn+1Gt>TZ!u~JK zw68xs`k!VR3j-^|zoLIP(^wh4o@xKDW*Vqc()l7wSdkMX5Ch)2V5yU9M9}~Ma6ilh z%sRn3ae~5}m_(?UWU~_l1Vq>z@n+d2`=$HzXZ5|e<)X*I#_tPH-B%Nnnc2gX#$ffp zBt1DI*0@kvEWGM+QmCz6q^<4U-C;u$763y0JYF)K`fm_10R4G8pZTewft7J&QoT^& z4D&(!fEG3g0CE5T$f)2EVYkqOwF+jjq)%XE3RK&pJ7%2*)Q9$zG>j4AfbUq<^Nsj%5 z+QM#cJlx!_)Yu8#^z2@T&$IyG>E;0Qzy8hgXH-m`9?8+pxXg{v|$3ofj?c^`MQ5X zfrNaxG}YDmv$w19clOA6`eCC30hv*nU*U1bYXMM}ozn_ZLI8!_G&?i{3079hgLOkU z1CfoM09DGpzyHGS1A}wnCvZd18>xnK_YLOPt|M4b!8tgC2_X39)_tM~(9h3Orun$@ z&aFa9HUWHcduRn7va<4S(YrVq{2Kwv#TmGq;?3AU;Ob-Oj9<(T0T(wn3>^W$0o0Gn z8q3WWl=aF1@LlC2ME?Huxur)7z@-e8&qt3F;RJpfTz?-92yfd4k2m*I=}oZ@7znUQ z?>kWsrV)I|&O0_XXyDn$NZDwAZw`PW5AwAiK$mW}Z|;QrT3s+dzK(C^Ck{<@d{4H%amUPJRjco?1{5u{t*FY+Q+yl?(iJZl*rKcK&sS&jEX z?hnm`TTwDSJhJoYWt`Uzq$fCOReROYqCO6@Yu)liDLhAGcqLU`_; zGZY$hSUj*t#QR8=&c&HU2H^FOs!8q*J88iry&WDgO5%SuT=y*7^d-E@P7hlT3`aN! zbTZ2v60`XzTL03>*=MJ@&091dZ{A})F(NX`nDSJTy20yy81N5rSI&$|562A9W~KHh z(jcE}3nfODVz{tVupUx+3K36W^_SvC_NstKE+cNdWLdfxm^<6K7Uq$wK${LFj2vxL)YP+!=E2EtUoB4wERBh*ay zs@zwJ;!MSK?_qs8s;CA&G08}MSNaH0!zb6sLIz906eUNoi5zUetnK9|Bv`aj7i`by zL|#H5KnE3I+>`yucGXp^I|DpR?s>%MpD1gD#lK!gKKmVXKcIjH??lQs_|KBvgJ_sX_nl^^``B3Yi+Y+l5pe%v~`0BP;xJuUU)2{b%!L&$_5L zm{Avm0ywMqgHx2Tp?=xm!)gXl9oEmL`(yeS*)$T{6U!oX@NWGRB>&+~|F-~3OI!K9 zV@4DkcU)9iZ+{-)XTty@%JIV&98e&C&m&5r(-_`fy_;g}f`y3(h&;6_h<*JQ(twniS)^$>j$)8}^z20WtA9Z*zGeYLZYA2@p1ld<>&Y2*h zC8(;t8?i!&4U}Bi8gXwu!Fwyg8N!{B0OC9-;mJ{|W%mi%Y7>DzzIj~xt$rV5RYbC-?QB;g73K*Bp@vc=3+_{*)oK@4){nEyhGluH5!`v zDxwKizkKv$EsM0;=gLP7cAdo z8l!Bz5#*A27#`ah?b^IzGP^7yqh4*?v-;`PKZhjUKol~uo`d3nEhV`@Z_S{1aHkrz zEt{!54?#@BUyvl}*hq1l`~pt``GY{VV6?4d5FuWOWwTP@kh;3yUkLpa{O6Lhi3_VX zk`dDg^s&>tJNhLR8QaGAlxjNtGu$%|n<*e#841;w3u%zmH!X)_$9DvzRw?At#?KLx z5SuJLhbDPDW@i;DURs@b6)_K{p^z1i-D+#J>Zzm&bX{edJa)5sn&Ju>2=2q9F3}Fv z+Gu2h3OK?ks<5!;y$$8U=tUeBIeTPG0plZBOsYo~xluONe#RK9(PDJmiS~TD(X4Y- zoS9Df*cm7wGzC6aWpkSZ=!|7{(StDGLT z&D5w-=G*&9bV;+~^Xyl=Unym#4y_{5mC>yWJ zW51Z*lywz0lxQDOW)#NSZi7I%o9+Z~Md_{{)zuTt0Ct2ri=Wd{IWsn^I$#RKY;SlO zC_@26XYLnMNqj7!3U6{d4ag{I{@3<4@jRL%HT5nRhBxa`)m1T$mi7`NH(1)VtIHgU z&`g(DO>8~uKuejuq%xZ&(yaobL8y7IMQ^v#*T1j$&lKo-+wFREd>7;1ZtxzbgVfSs z`dIrvwe8^$wzBY#OxY$XSvw?#t{PUJxjXu0?K`3xd*TDcn!CxSQc!p+-4400kGzIy zIVPuAUwI$0bwcOMP7@MgIn48K_;iVy6G8gfwDrpJ!)J+UJUQL6Rgo}NN_ArO;Ud(L9LlR7D6M~Qcu3_V$b+jO&ChWr!79L%Ys z4km}c@QbbVLavEc({ITZPJ8n&n$T~>gRzc|y0d}>OsHtyG~KR^lg0o%TQ>>{`4gKk zCsoNRCD5lJ!Uep3Dmqs4{!U>33L3BD7O=9nfnjsv`=E({h)1K2tk%y(;MA_E&xkh1QO}ba#gna4-RY01PRCXa2uD&Y{KTXOlkwHvewjCdN12tdsT9M@ zfFliGZ$8_f-Lz7b_&E`NW*V#QthreJ2E80vq-+S#?aLJ?YH;lv9q z7M?G7FnN}bZ3fCWsvOh8Jy#MVS;&9ruy~?K+v$_NEiWlgf#!!ooHWBn$r4g$aa>Xw zAG+h3%#X21g=ylhSWpWSX44`TDTyLfjp?TY@3FHh=R&Rt+Nd|Md%sfM@8sKyq#k{?za}{L~TL6+pvv!Ztl`uZ1B2L^hb zo?T<_;Uj4cRoLlm((QRY9WkIeMOH%L=-`y*{ReuP_o{hoVtHalbwRR~4~fAlw04{y zR#p$^$hU52r>WF@2uD19%F_qwSJ+3Ad8=fwzC_+IjdZ+fie0So8c|Al@b1Z>F6r0V zYuWcrMoQuC@go)!usXafbou>KO4lIcv&A>Zhey-Dl)G|wqu7-i#RsTg^>?j|!LEX; z>47Jt?HzwU#)3_Fsd0fRlgE}hn=D$U`gmLa{0(SdK#5rfZC~DNSSY92@P!`wMQyIwET^dAj%^)XC2pr6 z;+6;md&)T8ZoLX;g^p$lJAlpdVI_Z?NPtVcrjW7F`d%+pA>n~8_u$O`&DV!uxg<8> zaqR?}+%bJ`a!qIHbW$Yrlw_XFyjTuK*1}^`uNm;dKM388FE*}i+hZ>^v*GdVWE@`K zb5wA7rMZjD(zD@Q^$FP2I?A&JCn?ZAnr3z7qrtdhgNfjkP3xq?P=E#yWpK$XLE!LL z-@frYvr&q!n>onSkqIjo9 z+`^@_dHvafn6tXM-4H)fpa!5B*^B6Ul;c(qJr&g}2ygWWH_f9u>sNVd-5gw29SHH~ zSwAFB{m?2pT3QpX7ATU{>A>6FTFOo?MPqD8wC!Lb)MW0S-AUUqx_~aJY6+DsQ7P=; zR`nd%RG$jAEjh+#d|cb#>x$%BPF%;yR-fNYwz^qCH^tnUINkE$l$>2D~ z*h-rPP>x&)TXopM4^g=De;gCY;Y2lKtH&$S)$!=pPyQ-_ivuavaZiZ)HA7wF{%n2l zupOpfSw>%}%*E|zfUzR!U{5xvy#3=O$?{_MeCE1Z`5KNc)miX~q9X5OJcHW;C`Wy2 zHZ4*Kupu;LJQ)GT>MTjKNg+@TA5Fbf?Ind0&fR$^c&+!eWM7jxom(M}&gmCo9-xX) z=M?Zj$k_bD$b*PK8bNM|SIe@~>%pJ2p*)NnmHq6dTR%tWzoR7Lv1wV@I*ViBq$T(E z$L@W0cNdIOw;ZcWV--X{@%h{oSP5SBF^|eRU*~gp^m}r9LJC`HFcMX>H!U|ob0QD! zMhkE?uW+iw$}D>F4VeMC&DXoBDV?5r=*rK?%+ATcFtAj;gm{bsxT$YC5!~QC95cZI z{_+R??TSacv5`0-k?c;LGpwqul|jH(W*vr6B#$3%$579Z3mzS5l$4M?Qp*5%QRwFQ z6`9}-VNUA3r7}){N89($g)f&7Uy1~LchhWKvSi5X5sES9$b-6^HcmOBZu3?r$)FC9 zJiukS3Gwr4UV_*x&+Um zaT$`+gdr}`4Q=qYMp&LdPMv7nCiGpb9m{~=7|sM@lAgi^I zel_{8V{#N7f{?IKNlf!xM%T>{!0J?e!9`#cYf-_*X343kL>NXr>?H>+GH{A}IeIJS zZM@2dZZ9X__+2SCr6Pj7Z*Gl{uUVDb$UQ%G~a&S{d`6*QS0 z9G+C_b?;9?+JZM49oi%O$w`^|m-+Ci+Mp}e!yCzG7uXn2NlDY0fKxj-I(!$AYN>e$ zBnl&56Ojbh8$^LXlR);=wVkGci;!w~>F2!eDYJb6s-|ckv-#TSfVxq>L><&8Zv(9c4E}CfrWLd>J-Vim%N}i{Of9 z*2#+P3_tiK`gNrQ`;Fxs!|+lDs{y@q6vkQhj;Vuq?VA|!WzvCZ7jW`iNx`BUT^}n+ z35MuEddJFc!{lB|t$PH_MeNcI_Hkiu=^(9Pc6pqdR_NNoO)YInv))`3NMM@^@KVBG zkD*I(mBOv-r|0N>l{r)BtM@Po8m=M<*^G9CzRDlz>Lkal_M?WZUWt#n)Z|I?ap2RQ zLqeI9xNBcOn-v}ib4SbYD2y?Sgx|v(;B)zHY;$p?m(iBKb?x6_)2&)D4s#w_FQjkFG%>aC#gDg_z@em z(4SNU%gN|v@PQa74DkR)}45aCo(etyTlW zMkESFT@5DidY!Z7JpE(nHYojr{zpfl_>=SjP?-&)GfZ_EbyNT_Aq^^s54|s4J%$UP z4XdK40R-DY^No{?nDUT6!c%ovVH&bVT&y&=!ikJ|q1fBgyUWaV6hss7I|Nm245yo# z^LxjTA5Bf^x@f7|O1bKq{ebPm!Vk`+R`-2&4tjD_lx{l`+oO5gqI9NA)owUS?@j<72DdA_0n6%5n zP$c2tIyq@W8x~*c8fbk(U0s=uF3Bu9?}Eh^w2zb2;T=V~vDt>O|&8wh~WC z?S)Kk_r$1sH9ll)(N){}9d%^l)@I;*o%wb>++j4Ow{`eNg}jC5`A*2CwmPnek; z8p~yBZfVm4oNCxzfKdsdO;)zKjDfxQcy63 zg6J(p$pVE0MnxCSEO4pIoV3o`VrrO;&e9*!O;RMs< zfOnwrfwnd`3cdbe!nkPLnM3!q?YlPC=^wh*?Cu5}x(cj6Da#DaKgH9LtwVnuz-aINfHdYQGx;HPiya0kEse)q~ z%A2|bqb$hSnI&6E>(|Hy;dGM{?eswB){JX@Ly0^5W zGfaN=;o$fu0i}Jx3L=fq>^7Q!$?O!TClRZ=2i?~Xpcs6_xy5u5+a-wu5$2tywb3DM zX+%T3Vbi{OQbM0?#vyEX`mz0JDuv9-$ST>w&a54kh%3)BQ8@!8e9aCE_hwC*2N#)3 zyT4upwKTRL5WLve@g(+bm-`z3fa)g>}wsEZ4;%LLB+oUva`%tARUX(}Q&a7|a&+4q_ z5n&j&eEcR~Y)eYCFMqm--;`5}z)!_nEPP@E`DWk~LOm`IH8CG1vt{+U!2C8?|GY%Y zi&-nzCvaToDpM` z_HHSNfPVAt>Fe;wPwdBS$=ZGQ0cul1EB{YE<$onk{U7+0L~NY@6Xvs4ma*MuK9)-sjbtYc>#Fenir^*K}n|3W8vU3zouFo_Ar|*K%B=j2ChsIu*A0 z<&q^lFm+YdRGz)olbUy$2-!3px(z4t)kT`Cy0av%m7YW5GPvk+4)UBj9ef2lDSDwG zp_EbUAW?wCk@Skcz0893B{ zOvGc#U~>f3;ano=m?UyZfk~et!Qi6kJdlGIQYS(gG8KT14LAav{`T9+W`W)_WC}(_ zhU_sx3v2~Dw(#lRSn=t)zS*wmW$jt`u^?b8IyfE8A2<$n&-W8$zqv)NYUSqjkt;|$ zS3fAwZRN)*HxPC9Zu-A&%3pR(AxrI~*qwbGw_^M|m68H>{OxvDWG?p?M+OCDuyN~? zJENIzlh}oYdYJfvH>e-F6*E0M1v;NO?#iMI--}xO=?WpyqVwQH=?QWh_ z`9F)>{xAPY0RI&M_kSV)_J3h@|GNQitLem){dF5xw{oew(Y^GkasEJLr zArsFh!+U?ZxFb!q@dS#>8JS^z*1z`dw4yAxdNx7fb9lCRKi)()iT~bg>f6#;MmtF; z3(*@+EXFhapnRTZ9a@IFIvHsOKyl$`CN<6%HOlLc3>J% zHyk$z;QufXRH|08KyJ*ue>ZVsvwaP3CEY#G;ZSU;#}?;P$?ROiEL@$tqW_`Og>5H<)h&Ep)(c%`3MDIUT9`-K{s+hu1(_0rQZqr%EY#sXD4UUz zkA)0-sZTC6Cr?g@YfFb(mdid{@^$mqnPY)a4le6Mn?l@7R5-v!dEc=haV9Fi^4F;X z=1p|Y8&BDWm%dj5{OiJ}V|L?fIwft-u|x=;oX7D)A!!aJ{s+(5;EBWxAp5cZM1m%V zvI>yxbpBAtoI~05>^|Q%qcjf4R@{3?l*}n{a_M4znQ-lK>XdO4sfp3GzvlWNXfIjj zb#mJ;U)R<_>H{hX_)lv2|Mr8){mU=Zo;vapZDwMx0xyO2W)_yyHQzcPs4KC<&@*Y6zK z5>Z)b;Vd~f_iiUAv2~X@X%g+Gp=0z=)_3>7&3XK9kz;-}y0}r~Q9+AIVu52*Ls*N6 zrUv#I1vz7h%C3$wL7N&x%_7ta3&z&Di3(<4k?pZMb)pdbOj{zOIdviXmAt41qTM=? zaIM~v85*rc47vc2!mdCHR3Uuzm1&ys(2$r9bLaFY`+6B6@PFe(a&fSr6eHfQ)vxpw z7^rjRB5R9QWf>T&AHV4Y8Y|9*)k;H{k^V+CJSF+91eOgovF(8P+cO(>ZZ`V_#0wwq zrPo0~Ks)fL*O6Qu0ovw3PYj$JI=P^)6l{VM1Ui|t15}F(q(*qtnTZD`IpHKb^pZ?f zwjWEMILM39O;H?6EFFyr11LNZokBAT!l5`8Pm!!rq4F|LW@ubcox~fRGiMVBB%(AS zm=a@-WXeinbKp&Nb0BPalGFsIlSr0TV|Q*6{!gTnemoWni76BVi`b6gJl>-;)>N1u z{s6X6HB%7C#}E}_=v0$)?tzy;jM$OSw;=t|9&}+t?5c;+?WKzmz8$oC0D?pCtpJGR z%>xYz-;7Nc)bL#?5r}E9KF#@N!XGlD*w_{EIq!$VFbtD@T8qQN?KJRP`wmKg3qsJ}IyJ|AC*Wa3g$BzKw;h&d}a zbo4bLD~-HkqhKs#ybvmhPvY2l@s~nspZO#~ zah~>a4~_a2m)W`&5U9lif=X$;*1;w6*u99&#wJuLW6iG^k^V+TD#Po6iwBS#=ofI2 z7i|~*ZHvw38VE^}6J<_G!;c2+??uIT$~Z9@rF%88$U+)Lu5*D1b5$U72g5aRxFnNG zRB;8Eb|yDhYu_PtgdRq9PBaSlkpW!oajL9!BDQRZZ$-;zwKi5Iiq3V|?7jt%xE%N3 zB(;2~YMjEE-em8i)A)E*jH!ATX7Qd}$rKgdTlMb0v!Yd(?KcN-bRJ@DlJZdN;#N|k z2-#9HuEv(`t&(DT-DhJ6gH0(19CoWS;0Uaj1N6U0qgba7{D`|IHByej;57YgA9Uap~ADE~>?BgZIv_TqB0= zFCE$lwt+6+#M;)f=_C95oBzDD)TsH`7W!nC__VKe>sm9Wjr)|`(!-V_VtAv#N;`IP z;W~uoH>$^WHYgrQTgJz8w7WYf`a#L_yT4eB#(KA@|ETO%tz;O`f5Q2E?Ck@FyS$!U z@)9H!k@ug07I(|`1B#qcj+{Yp$_54ig0py!D;W`y*dcWIbt$ylmeC!_Epodc}RMJ=yxmBBBN_OxjVqYH+~_u-|$5dow_J`eQpFFc`4^ z?trtxY*qrQTQ9Cg5s)5^^g}QtQ;Tx_bd8DLc<25}aJe(Pgbh-44dFA_>1ovgrol%JY zx_Yfty@S6!__h0e0FQ2XI4W6wCA(2e&Spy4y5HrnJg%nDaoNEX#srY@*!XTpvH61X z9gDmGmoumkWsAv(!?<&Bq&=K*rCFf)>#23wT1`j+@lm&-!5L{*x1BHsIQFcG4M_{NvvSwh*w zs;B+Gw9UDhGg_g8Ropia<{RH1|2VfC*Hr7I?OdENW3vd?&<9nb95J$&C~gfTQyh%| zno!U9ah5Id{?lUoUunqyy>4SpVnG-Ib1( z(m`vpH-C>%%*i6!Xnd8{QngM}V4x)k*{180FwT4|ufRLfY1&%PcP05)ANv|`h&$Jk zF}=vLG2p^PBH1jC0R=hEu0I~jOuAK705#qYUKuIxwv8;DF*n={*gcG57%MlJ0U{_U z6zTv%5-ONh5E)no2HsgjI&~if1=>K<09%kFq&P5^%mfaW21N=QA4VpAbeB{ZB?uZ? zW)O}*q7>V4a&MK(j^<9t#Yo5~5?)d;7h76f9P3n>nuIlgE!P?S@85YOe6dLoW@H9B zUrN+H(xET~MhZi5vQ%**g9#`@7SKXT6by2lbXAzXk=6L%#QEN65I;Nc{XeI%D5-|2 z{>gz5X@fk&JV=t6s}4j->i(;N7_sxcZNAGIDCS5E`hIkVW*WkT(2~*ud{StSWN?ph zSZLhZ(hQ*W48L5_N*QKDYMEGOLn29hfyq-38IJ`CnSisPe)SawS?3~}$70IUDZ#ad zSj2`4E}|F>CmIpWLcPo5!2J=O4T(0C&U10MA_52e>x`fvJp~0~6c;4K3526M9RPCq zcL@=x54+ zIqeqd4xtF0T5*V6NS2y(2&ahCNT2un1i2LB4+xb!ULlHQW@?BDwKc(GAM2@t|JNM{ z2)2{}A}_;a0G}c7r(`VgD27{tJ~|O>Yy+6}0~pJ{w*htf$K*qX5^w;?`AzpmktObi z0h`9j`@_!S73L!PvHcRP#!G~x6{e-@Q~bFg{2^-O)|rNgI8hU#rx<}8$lCE3sH1RV z@X_dgCMc!{(=1lkH*M|&r2Tm5bgD;0jHb>Ol+lC1lq9s=)cAa5B6~o%z5<##e6n}w zu1&}w2&~_jKQ^6RIeR<->Y6korazyiep%inj^6IzmvAqRpP|&=ZgGT4Gjb@$ke&JQ zIT2eG30+cXF=Jt3YPF={m!(`T((ud@tjv@pA7ZASsvMj)f6&?O<8XCGqN?{Os*g=L zI0y&`fY}o+@Yw79v6b{im+$)XaZKD;nNKJoO$)k>MNQJeXj13EO|WX3{)YCke%4xe zSATpBEYM8EnC*)J5(%-7#UL(Ujpo;Ibw6Ex0B@hkll6Ea#?f#zc7gO;TuRl*v5QM^ zGSi6F)iu1r{Pl2mFrZ8#Y9~lWmNKC$wP*~dTLafdJWjud6rzh@qVq|_o<0``g|I_| z0(4K@e)`vuBGP%JF`}ThYnw!s2V!HF;GAuq>HEeieHorjIm&A-mArK9+?rN~Ofims zsV?C~J&$}0Bo=f=mS47Gp&HW)OGM?|so=e1hH?qP@AKzO#=mGCOK>VE<-gM366w{g zS^>@kn!-(0TAF}T+{%oq7R|6jWW1%0bmz2KU4d!A!@3G#S1V-H#{ZN~+{nGY=poLR89hvTzKQD9xn<1f1=$J-m$PGweN+7j3G3S2u05eMd9-uiZ62x~laH9eHJx zyuc+?vl~CMNp}YZ;`NaiCK+xpxF0u@Eo``?M}3ivu~oa@F%Ny9h!p}?6mst1a# z`o=*G6XB+7h@u81kxUS<5ZNPGXzbdtuREjLjLR zA}FsK6Geg#XzF;G{Ui9>KxG0Y$Xoh=lyT3`J3E1$%P!W{Ywyq3SLZ_86&{^ah?Q7Uj(Y{JhgE8B?LO^aG59`d`rofb$?k8khnjMFRh5o&1IG#XVBq5Oh9s zsMxu89JL#g6l9m^3BIHDV^bFk*aqcmql0d<8#7F2nXN2O$H2jb&#!h$tVLOeuGZv} zF;4HTDTs#A0Xtk)&Myyfo~&B8J#U8^FR!fjZL}n)F{NK(f~GP1nxx2$N*ZUPg$!)B zV4bY~%5~6hhvwBGtf8m+2O6&R%hOa)pVw-iH7H^+s(Co#=l;H!isdH8=CkxhS#?W3y(FT6TGUt zjQdu9p`!+lcoq90dPEQP@w1^#a2vKZg|*qdr;3#y6{YfR82T z>xb1o3z4D>Opd`S4~1m65!BZ-_xt*u7v0cokEsF&c+$>C?)S%9$!fS1IYBz@C<&z) zh20a{%ibYc9I9FKUlteqzjVb$ky!B8KAJ^BDJ$hPtJTE$>dsv?Z#quV;zBM6#c4=;)CD;2;M{wFq1n&vEsb_hTP zsjOyRcy;QP&YXBx!7iSlQQZ<mI)# z#(p`%0C=Xo3nfyr25%qktfBFLXJ;1&@_jx%7(82LWYFhAj{twoTpA0W?z@YgUJ%B5 z0Nm09-$(YZz_Gu<&yGIjTBp1XuF@_Z+@sNLFk?pF9iCZUBmxC`O-{citYGj^sGzQp z*2xLPo{ zoWcj{2<>Fw2q3jYV0NqogS6iC!i^%mf)?kw`bzy0|JVuqM$krn2jc;i_C@R`MXU8Q z0vg55>w)?TkoSGX6bDWBNZX#SFjF)?TF{e9m())@?=VzAZu${&t@mv zn`inv5JaENQBVJxhS=7YE(X?Y_sRyLxh0c+ti@?U^ykgRp53S0b%RB92d0R`#k4Xu zkj8q!P2`yl-6pb*JF=;cQ@z+@^4!91Q-@lcYwu=x!$=R6|G9)`@7Is$0pAH8r`obX>iA(=#piInrLdEUZd~M%@~a>=MC{} z|EvD0NRm!_l+{6Fat0^o2sG(DNw+h$rrhgK7gJY!6K6T*tI9FtX>Xm4NmddSjgKXvvZZ z3Pkh2=jm%evgAtGa+c+;_yNJlMn}xsBZ_V0^OgGoY)Hi{xW5|s3_uORuMP(z}t-v_MK>23iu9mp=K9WK&4dkFwhpKzD}C9>MT~vHsE~jh6xSH!sPs+uwBL)n<5+uvaOgrX!iaKOF~& zP^~>IHrQ)io6f!B!9JOBN$;ZQP1*;tK+gT$H$gC39{d7bWzb^awvduCL}<&uV>+HD zQLp3Zmd@WpyTYzST6F@f$*}ZW6C;geyPr7Lw}@hHn33MT%lPeM;ql3DTju7*l-?3J z(0&C2LetX?1<~mEwn(gF`|b!)5_~sIPW^JFcS8~>d-$N0L4C0vC@l0SfI+R%YtEp8?eND42fJ+3BFuM_BF>ifbXT}?~m5&j^S?U;}w-$(_`a_ z`Hn&Y$GSqa3&4|C?xXutpef1eY)P3X@D)5hI^2PvHo64ihv;r`!m{?@kmIzW*)4%& zdmnp*zV5e%!0-Il6@;+gB6a(W9g;*G7;i*qS%;$N9DH=X26w%4WG>3jcq(B90kUj^ zRL{_@MWOOMC$H?QWxMu=W+9yGmS;Zohu$Fv3jq<#h!M835#RwvB4+khcrvLabtUTx zQ}|ad_*brY_^+RS_sT8KbN4*E#}-bjPfTI#W^0)zv>mS?ND^qC=fAnmn&1TL(R_w6 zA=$a9T8#pB4RtLi>Is_y73UX>+?Y8s-F`VJ(VgX+VukKc3#n|HLzl9|tPy76>%O`Y zf9HkLI}9rdux@e|D9kTBzC;%c|6ISFaBJw;n7?7S(B$#s=xt`+cs}NZX^ZEV5x`whQ1{QQJ3R-+-EDLUS0=}ZHaB-#Fh7T?-#zqgI;aAqF%L2`E zou_vnDU}N%}Ba?y!6)U)2+fE7~V`sCb5}40_ z-HxAZi#4*Eo!(bAr`n5ar++wN&r|aWFRm z0G#SP4*}BNTd9>;P(`tubJd{q@BK;(Y|)9CH&$+T&UU+u;W7xq>Qx_pJDlTMn$&;E zZ*ZMuA;i3Y)Am^KDCV&rCByq#{QGjaquY;w%L>iH(o!}XJhwS-5P6PgV~bdi>jTb9 zH<;!VP23Llp3Dwcyo}7Uyc%jO%#~*prnE-l<~MCtab4(VcynicEe3qsc@X~qK*Y_b z9h~bMJQHx%UZSuyw7F+fx@2v5N?$2+6OVr%Y|S0r#f6~YAli6*f;rM-j(v1mlbsZ` zEbZyfbIR?g`pkf2hqu$(m(E`v++Us+--O7XTdI0VF&I|iUEMSWD&dsKJ)0tC&A4Y>T(8xI zG_~0aTUq!~y(kElk+G+-=eRwc&e!U{oI17Ms7WnBgn(1TznEpMO{-i;RQ9A^_GTS?q)%r>l1?N7vh_K{;Uv$m*G*P=@49LE3 zyg7|F=iZttn(I3|b#u5DgM7E_uEg-eUcPA3NnI?u>piopaXjB)G5IT{tGOtQTz(WD zN)-h%ESAufor9AqLw9c{W0CSFQrZRY%2Eu{I;G$JjMl@=i%n9ELdI;h`K+Z>q5jAQ zQwe8HT9$Iak-v)#7q@b0V3xYa?j4)xNQZkXt50MEbZu3uv6eUwY-lw3uZ$AT$|WJ5 zUe2XEk=_$kC0OCXvqI{$?6>6vB?jMo&qL;d@^ zo{UQj<+hHumEGfSnQPc~Bu?-xzDXM&+u{5%Re#FngH@352YpHPVYPzqy=&V62~+h= ziFW_{G(h1zf+dr>L;Be)!B!riT@LXS8Gjy`x0v!VG5Rbpd(P2ArsUD{X?k7Db$j{B zvc%Qt7~x=^(5B)J;BlPH_uA0yB>N-dQxfy6Ob5uHaWZh#{&~b5V=f6jgVk9Fj6W+g z0028lGCQXq_FC@|--17dK65{b>@1Xi&Ik+)^+7)>;b2u3$veQfBSm$SQ$xBwwznH* z-;ZC`oZ38u)={P}w#49dqTu!#OwE}WtvTf2@TYWJG^JE3^Wa^DYPsdO6_q$$lgwIu zHxnLZieU?21kH42eX!j`E(B?@tK=MiGTlywd6(V$QigRQDF?C+;xIqm?pQQa3##@m zZ3tp{wFbD2#<-PjK1m*O*tx#7S+0^B?VvpDN{(qdDJqa$!`HK#MAhm~niM45*gA5l zT{%3Rp9xe>&W)qBJgwUux4&0iRa;`v&z(c+zp$^8>bK(TX4`ntYZbjby!ahG_<0|4 zG{LX+AQr@behZA*<2NsHdfD;|`pCuve}EobaB`UXfn%APYnLPsCv+B(7xmoEc}BxeTAIe8C*$TSR;u3_&7Dk+UU#5&~xv`TnOF z>3=o8{r8OYKMq3w@6NP;q1J4S|1WvZM8wF#^3S3Cf3W=j9mHp3W#ags)@4zGQCq`WnoYQvDyGp1up_>feDd^D&*pec}D^mDa}ZQN>5z;j&ci zsLGC>Rt}Mn8kB%7zdnXMI5<2xjws)3&*12)DI18)^MCNx4N1i=hn-l zeNCMcq?EtM4d^N>6W=#FS1$DD;txC#5>Pz^$ZzXQKOksy6<$tbY@n*hdJF-GQz$3a zCdktEjJ1taP&DgFzi1l)F5jXAn%?gnNZ+=Ef}kn>sy0R+!>~4BX<(aa0mqCPmfRH1 z**8K=(q8P-bNz$k=Qn))1BhA&R%bB4G|(YftvySNYs+g$Z^_pDnOh_ zzg*Uah}}^Q*A!{Dp}f5CvB%xNQbzD+(WT+d2{iLJKPheI@U=jiB(=Y|{rIFn%zkc^O}=kX-+Bc1e=z)h zv<|;~P+xj19(#25e^x#|_410guC9_~@cA%zcM#!rAA;;bet~?u|Gkva`QhIyMqqp1 z{s{^E!6^m(WFPw>#N&)TJ7E~atKtJUFgE#WnH!c_ABHu{vjAmkflyCh_mMry z(o-nUSr1_8%Hj|Eno&*6{QbSMM+j$cPf6daRy>0{Iyl`2y8HcmbYl4U#i#U{9;^9> zhbGNm+qx47B8Rr2{;T5d=k0AKVa~@d&Vj|X{?$~CMW8quBj@6&2kxhJYD-6lU*&5h z>c{GpfBHw$iD(wqEV{P-)QAqrA|ovU+@g1iFguau^u{;<55bYncK$aNOr_&F^l0Sf zeg&I*72;o;>|-C>mv6xSXEUW-5C3|^U%7<@1Ri>_`-0pH8lj!s@3Dbe!bwBz=uGEw z)Q_kAlzoasKK6>|@8`nwcKbCNxVuL=1hv)($aVpkn0$fX3wx^dwJP3r`^0l!JS}k)WAyGKaHsM%CRkB7uE@H+>Z^AF(=KwnoyeX zWDn*RT9rG$LLY@}#Ve0!5?Aev7~~WxXbDu7Gc^UaiAoho2}1ytjSR6)3hhBQ?LBEv z7BII*`v~3G+2Ij37h(zjkbz=smIL-yLRF#7i;ug0U35=f#>|drT9mGcxlL|jGPHnv z&<{2I!}{Y-(`$VDXLsPPirpu>Y?Jvlzgu$lq)WpU_V8*;yyB4EE`e>P=8tfRv>V!t zZeI5=ryg|+bQHp{Wo)z?ti1sh-|lX0IzOJ^Pz)fV=j@AH6yO5nLnI9L)yv>NswU|= zChuY=Glal^Dpfu4L1C0SwA0qF6Dqm;?ot^j$K&a}Fy_kPRh#Qw40)h-9cB@X94f4Y zg6FeFvt5DbaDoi#6w)Z*te~=ia#eHru9_!t4F9rsqu%BxrT^v8gY^XOz;#3n_kzD`37(0h1O|-Dfrfu8Co3?FK+NiW`+qP|+ zm9}l$Hv4|j(SwfYLC?-lIPvVg)}G&#)n#V!d1KY*c6R{Z8I#Zji4D1c2(VqY=6?3_ z*QIn!_?DfxLyZ#8Pe@BI4##3)wIj!@Kk;gD>xH9FEH`Es2+IZDNE0E|p-p=E;4D}n?__N zm$t9`As*G_;1%X^7K4L#r&ViurYmGHsGhA4(yrX~HA)r_rrDjNy{PWuJziX%5lz`L zxNhUDRc3pPHLKDVA_HVWuyYd{sVh6D5HeG@#I-ZkfC&EJs!li0#MZPH$;>JtG}G9Q z3OYbB=9XLPRXI44zUaWLb5U2-NQ2~d<*bbsw2ljMEq?Nf<|Yrca7kzD08_lL7*LZl z&Sm2)+j_VRfj!Tgf%Sc`bE}>+{utE+NWUuU6x|%u#-vq|vi$Si5!YrusQVpWgww?x zMTGgw9)vg(q$SSXlzijV{Zz$ZSg{+jzuf8Vra4t+J@8!D9|o!#*h`I{qwaNLJ4Wv# zC*SAD?@(I2){>BtmXw)VCd;)#YupfIL3%1zS<_el8 ztWQXNr@A-sW}r^|6?{AXh0ja8V%$V>!JL3z3{`SNP#S-Jw({0geS(tinld!|V-=`K z%{r?*E;RqpYD{@9V<(!)^cISQ@|?#L}iT z7Q$DoX?Bc6ywu)@pwT-Yy5>xKNBOGr#OlDG^6^ywBuDi8GbDFm31LFVHdC@(`;V9v z4b;*Mx`vsiRhd}k!rIYN{TaFLv121?77kfs(YAk@DeUXV^D#1hVlG=}9k*$o%C}Qd zm`jJ82MN&7`q`N<>P?s7XOvx2=7@OlXp?yBh>{rQssTeF|pJ$R{P+K9WdA!oN zbL^$hL@rnCie+KRX@9oEZ7di(#aBt%0k=HQZNg3jluCQC`yw?~67g^zdQ2>t-*s1G z;O8g9y+)_0*|u||5a4Ru&`x>xXO`KO!PX*`mS(c_iC8GlMk;DBPBB^Rqe4$Dc!f#j z(Kwm{b7~4ek1+OtF7mAKiSrJkNFn{s6<3DphN*^Pr~4+?t!BVd4BvX><(aW=gvgiKk@`io#Pm>Uu5%z~f;I*jmCDLfi1S6glnui6TU3Gs&bjgXkibSJ>EDcJDN zVy*0;^B^P9tqLD7vPyE{kDgbd*FXuwkryY4`BBF=5HH2M2__aUY3%nZ$=yr95*P0T z-%h^y#_MCzQ>mZZO}^S`nB_~uWPo}3!EP^|Xn%nI>T@JY6-ax|7?q@@n>&u#svWfT zmL~sJD}tbA1Gh`P%HN(8m@A)KAJK;{mtI;$&M(bYHKcSR`4K1B9~yFn`UthY;{gpKHSs6ppWcSb!#n^{(&4!v z(gx81+&Ehk3GbK|#4>@}6XW7cYI3n_^FAW^Ws^9E@GqF_3vE-8JfPv_1A?<$h+JrEeN03Vl-i0j6S_6xUGv|5c75llu3NH;)oAbzX<8N&Z zQ`HGVTuV}H?U@Q}vC|~p4Y&;)Cnxip?F>adI)pNXWR^}_p0ku@2^rb;$6QSo>t@|v z2wT~=RM#*+SiqLmoapT94xKkp(bv~G4uK7C3E6zGBc-$(II3q7|8Z0Rs~9!5R&=Df zvy+t4C$P@?at?@MjUJjM)ZnBT6f=>c;aYNyLvuBT#tz4r%k*GqMaCMg-grU)l7CVB z&+{EN+Ju(CM6L{3W-X@6iyG-8`etT-OLHxTk&a7DaZocBOQ>7#qyeT*FSZt`rn}|+ zyYdBQ=-xM$aIDoHlCGo#DXv<hb7e5noENs*~4bQgUwy%WRR%V)oZfD@LdEIfDlH?6!_^x!xhQ%-$ zr#b#^iV4Lq6I+*N45hCWf?ruXwYZN7;hEDdn&%ZhSPyOyg?iULr}C)3(c>&bIO2%j zXu)b{WSo_8m*cP_liutRj>FC8I#gb)NglJL0-?%WEM$#;<7iiPetmH&4o+a7vTfCwNTUa_B02T|Zm&*c5kh!-3KswyLU)4hqgeN8npQ zDfZ4*kvPETsk0qRoukg*V8CSB=*pKXchV_ZhFT0IMlK(!dZba(x`u`Q>^2L!n^jvg zj-jgfhAE~tG;}!ww=|;#Ug}pjjfR5=>zfh19>pBV`+T3>~rE-iHOh4Gauf(Pw@2 zmDKWW>kNl)TG=Akcr76nn=(gMU3=IC3f9i%3Ti?opL>Gud;dOcgxCuCR;Yr)+$gV!!Bv+?enyY4 zp1s11&vWB82#D(W3%aU3_S}Ey-~n4A<)wXNStW`WC?+W)??x0TUp21K_NN?P)4hAV-TC0?CvgrPu4osTfioz_Qc=t9B zwTSIl_#lJigVt}p)PP1zbU~Y3puf1T>;ryNv+!EZPL&ss$(Dp!Equs{4Xce`lqjc7 zlt|pfsNcyTz^$<<2t1iaT?d?3R%g8JtZDC--X{O^8OMRrtHEI_o*<_H3)!%?OAOc0 zrkNfWnjj};Q7CG`mf@;10JGP?va|#pgsKz^0S)K&T!8&CqNDrEGm9Iy1PW6;eML46 zTNl#+w(PP-IA79 z-nVROQnl+jNi2t4ssGsh!u3Y4q`F*=Xfyfx=^k!&&+cVon`;o8hDu9CZV7+2o$13u z8`I;!7&u;THW4tNZt^JZcXg;-=@c|yp+nHlK039*aG^G-FF~}i=ESgCWm{30$}jj! zR-XHSJLPon`sctDX3#sS|Jl7QNp)0C&8xcZmK2xfQxd~u3D;knXTZ!~r1jSKUtUkg zugwWezRlA55_8JOc+_oZ8PQ)kg6CIC^#W=Wg~iGsUV)e;>*qp4n9gUyq9#fmUZ9x+ zcZ&8p22Y*pWzc|uzVfyK?l=Ii|2#z}br_xRgB6c>;Mqw^t>=Phqw8(*L9=B3IAXoQ z$t`!#wu9GN*r&CG4!ER%CsbVssUJkO>xp}R!fb+FR=(z^JlbV8Hj0Omu{~X-oqnFu z5({XyCNrt_+6d1&LGb;Y_Xt><=p?9FK0x~cTX6FSCj)rD z!mi{=ke=)v({{k*uKib)SwkHsM1ke>lh@9Fi)np`on3W5=AdYlm(LjB9r7tIcQ0`r zXF)NUbc^xtLiHMX2{`6oTakW@1fGMPVXXW3+?0qZU5gb)E13_~Nrj?~63PR=>Q$UkhPxs$8;bO6dB9TVXNoX(_+B zj>r73*l0F~u-s^3Bv;*?_imPr^Z1-tkOAsT65oPNz}jRn%))f}~N%BH8o z%6e(fi;kZ>A?m)LYJ{Vl>c?OGVKA!hyf%ur z$bkFP8+K~G;;<&1b5O5JTu?m#@)69L1)CbD%a@POj{f?lG5=UMYVt=L5sLqXLYHnN zkUTTY&w(y;+J_WYE&GDITnP3O|2C`ok*HRHMB+zQXXv|05zZX{csYP$fBsmy#RQGC z7gT`P3Wh`;)ojg=K^imJ)T4<$G1ZF^TZ(PhwqpTGs>ZIKnC`B{!58+<9gBK~rL@*N zR{rR8vcX0(Zr%~K5LG>t!;}s`(Sp>gjd1-0MBwwU=^Ohb87m^`pTaW>;SL~I zAna##%BcU=gIrD9R&s-|Tq;TwlY~I%8iyz6am~8PqT=RVTF*3dvMeou>A!qv&$0}> z8U2{#5g1H?z~WbYIw|{{1{a(%HfDs(B^fyDYe2Ev;^Aecl18XZ}0ZFcRA#niAwD$LLuTCwQsPJ&q-_+M44>_ zzgsx%GQ->!qRhYgRI?xrvJTSX|F{G>DYehzZ5H%+v|D+z2}C-d+Ypz|i%SY}=MSvv zpUs8a;_&O#9mA76e7Ez`fU&40$q3J2U-)zdycojlK6sm&cJ|7|3^XZKikm9Qbpa*| zo3S&|8QNdyyKVd^#JbsM-(sZdec)Z%0LCgKlia4$zr!P`5pQ` zLR>o*<6?Z${FT+$0Uf@@#cc`YI?~zTkB$mp@qkVJz)6^g?A z#MsAT9+FnZD&=3Ww0F;tKL=&*5CeUuWGC!2uUa?3t2-{T&A{rCuYp=gfUk|yxi4<~ z>$=2d3biWy9oBqOYLi84Ed#J<`85ITVbE#p!HOb2q4`azN|)T=oA~eVvLBUo8d}V1 zkEfWv7yYy!jFP!(|4Qp11F3o$Z=}uMHk3M`z59+s;BTwmKAo_VI3}Kxcs*bWPHY*;WEFtn~sh4tJRLbwO?N1<< zuGs$3mpS(NJ|@niEsoA{&MTOJs1$7k>+a_aa2S5<(3 zOOVXeBFA?}{#`pYhDY}W(n1p=s-eJYMRfu;Lb0bh#fOITZA;a~#UF7B;~0rf94)?E z=V+BR2>tkHd#CpF<5Nq}W+DvA0fZ$zDGtM{8t(Y{19VrmrMFe( zqquc5eML;1z_UIY57yf5@G=`p7{X= zg%io5i3tD|eF?TMC=J9jDLdE{bdy1J-2s33?n`qE1(HVlPa`)4HpC&kImkml;L_$U z)*l7*nDec_VL>$2ks86U#9;rZYKGyD&)az+TmXhscO{bl+2=#*EI0OXoqIqFZo%~< z7_WpStX;+z|l$O1Kl!yVt)iGAgBGvyu zH_9eO$hB&)1eiu88E@Gs%`nJ!*rwZMbQT%E^T1S*b{U&D)6rkEydAD`A0(Hym+%dp zBF8#2V!|sV6Q@xcXYFGGyAAT8?Om{@W{IDCgenodVkV+8c)v*CIz4Nn4fV!cu!3+a z@Iua*R;0cm6l7ODB0soN1wA@k19=Eou8pBodzS537d)3%N5nr8&VjL2-TcAh5C!l; zmfoXCS;f$)5@=6pX6eWP?z;xdy@w}%3R?_{w_Z?Z>N195Os8|1KN4i!rOxbHcUvK} zQq%sXJ||Rq*;q$LVzv(Ksw*8+#y#yT6D^$lW)@)B((lIu0>g%+8W%Afw48Cu9-!SG zWPVt3a2lru^z>P^3%a4ZiCW~#I)y0{i(h?+XuLZqjMxOBp8K4DIc`&TxdZHRm(pC~ zf6-6xzG2ZmymbD%t!{g$r8~sAg^T7Z(Y-zPg`U)zHui39QOc&-j-KwoX^lawxrLh9 z-7iJE8etAk%|j%8>u-464si3Nz`Czz;#;X&95qD$I~NzWXLav%)W+&prlc71#1!6R zSi|~ZyiH-G?J?&o?r;P8Bd?EKJGs?jSy{7 zo>7^hExTB=XVczr1Zk>v%iERiWh9vIk26F~6xELr2 zjx13l9N6OsWO57)zvyGHsr<%yDhd4QzWOl6fq-O><8c2goNLbubLXn4TpVB|)2T@{ zUNB2`u!s0A?qFe9%}O6h3XMgglH|IhS3-J>l?5qplt7rngsm`y*?`wGsXjg>7rJ%c z@e9+e0_(K2pT@^+v(Eg?EwOXu|0IW?sLn5{cq`m4V${0u43E(Ij^`x;|Crwb5|~b3 z951NFeDbhj^_TR=TdOu}u~19?gft9EI2@Q1EEeze24K7|s*3LWD`eh(OKv_9pEG=I zWa-&_ThIDJG~N2N2t|5L_Xcj-C|yDYGFo$#H;>? zZ033hAw`y{1EPqlihN&S@;{GB;g}H~9#+$Xw}h1(ohTp5o=4bieqwEjL{s2?QO;-46=UxI$=7=4Bu^65c(W5*PE8|6KfQ# zg1UAuPWO1oA{jUZ4IlBnA8YD*%0Z-wd@Jw#USZi6Jv)-TpehB894xnOoDku&6T%5G zM&fa(@B62Wa6X!wmJXEj1bp;){56(c$j|mbxsVv}Y6Pb9%<}QlnQT%!T4eN$=qxaY zGu>PZ`FTD_!rZap5Gcf}(V{iMe|iwK&cdzE0Al5d-7!h9U7@_sdwlrTK`K6<`6GV| z?asA5jU(X`7>(^}sTtowA=wAxc8}0L;zBe#w6ui=rN6ehIl;O_|16wpkB9*5Gep>Q zJ!5i-9iRksAFN_`W{GFbSr}pbAC{|A*a~L@WAWpYUc0%F780hNfk)#mTzU1M=&s* z8PBQ=LZ48@Vu&5hufoCycK!ZF(eAk}@y>48*}FH4tTu%6jdE`Bo!bMtuqO|lDoJJ% ztMe$g{M0cmfq(F*ct=M=X7G5`<`7d)_pTGHMWeRGGuzmIuNq1xZx##R(3^cZ&v%kHnW~EYMsE+ol!=2li{ZG;>;`?9kp#r-9?AeI`d(WBr z8+6xsr2cjf6?#=CXcekUMpeHfN4Ew34Q_G}^P}O@u?>}b^%`;}2YU93T=*mDcf8Uo z9ZC~s)Ub<)axSU}UHRaHy_)rO<=+vZ<;%zr?0c?W@)n59KBG2=+V^-5bOutQmx*ye z;=7O0ks=S#xdWT&oJM}P*}2#5peQqTmR(@e%Ut>cRkRRa-k6iuxb`*1C;XoVn^R`o z0Aay%iUvwo`|x(chq;vb#C6)H+?2VHR$i^LGRAeHDFZ5>GOERYZ?~-8vW%Lk%mr-| ziX^0xIyi{@NQe_~&0^TmK3p+->`kdpyGh%+Vr+* z#h#Zoer;$9rL_=d&lCf*n)Qrt0BVNy;xlW3WqqixC!#joaCecM3|fX z4Q^ZPic3bYu6}v33lpOTP{N|4RgSWWR6VYM4jJpg+1;b6BwT19SwtJ4#~z%<{`vvX z+}oG+@m${=gGOku_{2Ej4yl8|jv^c7y=>c)uA>4Cqgg+Xw%w}dhaX*Se&MRx536uY z9p+cb5MuFL1Xe?0a7^=z_M0RfT@r$(GCy6-k#DbLTl+(fyA{^NxRF0xg}FI*Bb=a? zI1QiqH)`Pxo*_pwxm;2f54blu)V2eg7!;KMup(Kys(nPUE6Bm68tG@H)uaEx*Q)(*gQBv}BCR4_JO@v%vT zg|JFczPr5%K3uCRFnER^+laT)UYh#_1h4+S80+W@w@+wMI7x`z=ujw42UR##RJ7FK zFW(_K0eu?aD&cZedi9@Ww?HcH_M_AdS~!!Ri}TL*`{NkkahrP~F7T&{AAq5@7rpv& zc;CVdn4q=(pd+(z*Vr9NKyY_BBt1=}l07y0L8d@)CK;-gt!w@9(#=yD_GK-=kyEmc zU?aC}CYAvGqp3EFXJ)oDu8Jsc= zHO`Ak^+;KgiX;*QW{fOn7#p`Pp)DAa*cO2%X;Rd0eb+k6kFm#~2ogjVew5q~p+8M% z;XA5xYPoM=IsF+w@in-!%~iP?$Az(Vg#K3Q5nKvijRqGNkDlGzzlMl34 zcuHsMWmeMzOJwOMEPzSoTh3PZc|6AaEN9Qq){WgTt&s#HlkxK|M3F2Ya38H>pz!hpY zvt@Nu)D&TLxA&PV#;;nl0Zg9a;fHC>Oixn`hP*_6nX-T0v9mBfcu5KR9M^*-FF>g$ zBf?_6;s0@uxaydRSu=RoiPjE~7h`0`7w#|MsJ=S{3|;I&IhnWe@KASyw07Bm6<%p+ z>Ag)zqAm}`FN!@7kJE-*sd&_9HDC%%hB)`|(d0XY_Dh$bFb91(m@X++f=l!=$;*1F zg}bmtqQ83x&_^hp2OSVx*A7xqo2GRy6`#_lgv}@xqkWOk@J9-P_MguU{|yprKr*K7 z8<(_7;8dxihE}`x68LIaheP0gR#gd>H`I>!bpqPH^=8d5C7)}5x;x~I0qE%yy*XSe z`An3Y&c@NRF#XopJRS22CxzD1N0o(2G!e*fJ1TfLxF^KC?8R!7zA+GTtR{yg2M?J_ zY;-o9%-%Jp!BIht5|g)}he!(#^dFi3$&`79tH!rq`x)FC+S9b3NYrBL8h&%NsW z4*WElN!Sm9qZ$>@+o0U$W{ZM9h}c287vy3joqKlc%oWW^`@_HN{}v|ILJAV2_R(IE zSce8E?Y*hQ*1bVe+oZ6+OUkJCvyK#-=_Xbm%cZPbXUn|hs2@|`lnG?4gP86Q&$Q7N zagD-{*ig6d!QImH*wtT%Hx-6MA=GnZU~*Sr2V2$-O&Pmu*i!YHHLZ+H_=PzR5AP0n z0-KFj`}e}HjHA_%up)kSB`BhkHK)vsv7>t=k{Gax)%Tqo2q!t!-mU(wJd=(=^VlLo zTIsZfa_*R^lK)}UG9a4;eM09$p#HRd1iX`zvaA7rLN37j3yLeOBSG#4o;O^w= z%vyHMbhgi?3tFFTH(C>$20YGy!&k%mGPQ$RtM&{JHbnfbvgpU(cC=8sJMGk373YotlDH$Yu<<7ViqQD%d?W$J9vH&pbY2u`xCSg3u7XBD66FxFSScebAExnP@`8+xfq-}eHX(}^cHv?Bus`nMjzLV~ zEijI(VdIP$`E2a&or>Blw$_?DTUE8Us)XZF>SB`lNu!@$fNvdvE1mvYZ3no7!Vg@qjCAZ7C)riSPa>wY!M^| zvzD+)u&^L^l;;1wTcv)R6cxNU7H@PtSE$p`wh2#wSA~Mh!tUr>?-q>L%Ng!%JRseS zg`Uve;KTk|wJXZpq|L2mm-^~N?eBET>^^;NnCjT>QK=3OswV5%yq-%?)#=;iSc^-c zyVd?;sE2r=5%4IqxVl|EYy32tVH4lm1kmIRs-cOm<%Py5-ebfa1+u$Wt8B4enGC3ZOoI;g@C+u|6_n?bK z5v=b)%GNObZfV2jn6t(%HJQ=LAuKOxEyS2}=}9{FI_vNMDv;&KU#^~`iXwn?w?2we z^njeoR{@3d@PLq>noxv{8A=1;`XNw^DNRD_?P7+TcZa@SO`t1q`|hKlc{7K^R?~ip z>ZMLsXtav9Oz}M?v$FJTnRQaszos9#!}WT@7mT7yyEHi)6@z`&ec;bOt_|MpJRElRAHOOIa4YY=txslt8OKuLP@=k2Q56$8$sVFMpULz!|wC zI&p*J(xteE5`P~$^poW59Ur5TSalE2*S<2dwU*%R&D!@>FX~AK@8E`o-fwcyC~j9- zq{K9GDBm_D?s;m}W6iV72ld&GOz9~ZMo&ea+NNm&X>}#J^EJuDWJxQn;Gym1E(4CU z-Jc?!&*ApFLxK!LmLXNejIFtAHT)yOlZAZuIpbkEtYws~BN2_LW_^D8X!lzOA?zrO zN^f8A{6}H_xGqBa?-AMVdp_-{L3?b`%dCD|`b#LS=GeV9fuAjCXV%i@yQwkmyx0&fIPHm{w5T`(@{0yw+xb?Mn+nE`BcfQs5=XIXmb&Q&SA1ea79H zq9UAVLEYK=8hrU>X$tFjPjFiRnH0Oe5x1ZvJ`?>Xk$mh8CU58?qc$*1M<23g z(b=chSju;Gu1zl7ZnEayNR-*2e74BT^pq;NPBKA9tm!7z2s$oE9^4uS*sz5zXvX3E zFCZD~C%iXJKvosalp?H#QkK!83>y*<=p3g9TeO2p=vP>jv#Na}+rG`}3VIBN3rXg6 zs_qiiKFS~r968W;*XKJd&(jB_z7cj{Gq<-d1)wEc&!W>^*r#1i7mP~C`;=0E-NrqW zi6jdWnxZ`RJRs1-RYIN1_W!GypobucD(8L^>Cw9$E+Bi6x&vA%tAeIf()7c_^aw=&D=*v2( zH2G}u*FD+ zD|zQRA8hEFn;$(g5dR>b30Y&ke-=(e5+nMN45kW6iJh2tGo+8S6XL?dCD>uI6hT*vhQktRFt?8olV8>{hj>X= z#869Yy27GeSN$!r*Es26^&R+#E-MRY6xm+>@g1M{7E(~vLSHxo$v~BV|G+5KbQ(OW zr9->XjRbNTO3SyA_iTc=v!Q`)Z=lEWf3nwOH4Or8sQG+8TtILN1B=j&ElCc>-SCib z{o9%cfihhg&h-5}fZsu`SY!E*#&Av)>4X_z%V=Lu09hLC_QgqfghsXrY|;ov&M$RR{Ak61)9N2Zm; zaXA!;#8r7$No12JhY+JOFkF`{+p2aahCiKy=cfk=nMa6_lAa&Us&;X5U0p8_DSvp> zW~u1U6@Dqtja{weys-WaHVsW$LX?fT&~+43KSw62E7VqKJvO-&5KK~$;8;(8#|`m1 z2|e{N4WNM9Nl2|^m&5Ep1deR@lE=LP&oq7zAG@eR%Z@;5=+V3HRttXBB2|%^z6rMW zX)JS1$BT{FO74L#$lns|RvKB0c;$d*Th;Eth)3v&N~MQcA4)aB3% zla&xW5LoH34X}PvAZ8@vcqkOA4?5^Q-c~jf?B<;U<$;rU+2RVzK3viJ2>iNI=ry?l zZpLUMhJeVKufq3YVre&z;jeu>!}wuP(UJIFp{3A!&8wcsuia{GwaqTKFNA#!sdkbT zPL}uz^{KB=1ME1CBS+61(7^9Ym4C_<1k6+*J|}t zMuse=+F#{`SVtQIaE~39l+zZxgFx+{+K!&DR#No1)=t`C@qz!ZBTW zePUllM}BF7tnJl3ac&U|6C(P(`NSD~mMkFNZM(`n*iD5C%MJ)-RmtIa3_gG{Vc8u$ zzf9Mv#{n-AM`%3&9k%q%35%r(wryuqxg6FvwbM^CXLL^ywoa>OW$ESr)87j@@-T<0 z1j11+F*xmqv$Kqx4yQuPRei~7U}D$ggKZt5C1Xp51bW1BS;}B5lS8GfBhc3(yz`;t zus$k#`90--lP`DQ-78J*@u(=^AR+46dir=dK!B&agnYOxs~>`FibOQ-_iFeoM~;Iw zT*CM?9|L#Pz13j8SX;dw)l1wsG~X^UuKWrt=dtJ($p%j1^L0Sa3oc4V z<9?bC4fyjs!;hS$kX1U2H9#N~Dy9NJ)gkSlikgCS=dF;`I67QrE&Xkt!^1MroPiBx zSHaE>;|}~mHFRD^Ny=}cqa*HLdwMcLJ>vD?z(tCrn9E~0E)ON-G)@m26Z>r#z!zS5erHd>i2{}H(Cch$e32PH9Z~&y;Q(~EW zMg>|Id$+Bk>&CK*Db`gCxo@Aw*l9ioo7e9 zNJbfRUOI#^^K)t=9a>(A^LxAWt@(yUa@KK*w>u;9-4ogW)!IT+Cg?B!b%cFG`}#W! zZ`W9-Wkn&AHU}E9Z=a;h-2Dl|&U%Y*-Y{+iG}aLar5F3TND^ejeD5i%^nP`}LB4pY5;kHXWY1Am?+VzkY0mi_zeW8f zK4-{g?yUH3RCSp3m5oED_;U@AVpD7F=;`YKp??tPO+ia6BQzoHMuN5|?kqo=>M|72 zSWG0lp9>nKvzRIPOO&jG5+vJ`mnO*|ZOZ+PuzVCCs6raPDsj6zb}pI~{Ny zViJW7Vhx#Pc%{JCX*IV*Px*4?0*ib3szy11bUhqS&M4ZO@2kdDF#tz#p+_nZUL#sm zg4&apho#gA#BlXAlB zdJ?qyy5{YAKB2WG2FW52OjI7}5ym>^(c8a?v^N(f|8{eLmST>t-!Ff;rAfr9^s z5oZ4%*YW=bi2rX!n3aR+|B{8*bSOI?@2=3}Y?_*=T5q=6)N#_b#zf{s+WhzCsVFb3 zEKc0|xb>NNm*0HdN^|NBTi&okc$yzQGnz>gyI0kW5gOX+N259iax^h8G(G{R(%Q`G z>?r46qvgIEW1#|?t^xMD#)Cpb`#U3us%{1O-|_%$JZ3&PD`Yt6G*ERl5XOHBJ1`jk z^q``&rk0WvT7NAO)u_gy-(Vowe?MiXcBN4*sJr+emzU>!dpZ@uy= z|7}UvstNjswBREVl@?c%7Eyucrz$N0NdgNAnxXReH)d7jv|tPFY`{S|0xtm3;bjB6 z{_+D&;R&9s{5GEqykW`=fg6C>JAh?s{>p)RW&-2?I?RLO1tL#sq7kjU0}Z|nkbo{tg@<< zh)aPM-Rj>?U<3?5(rNsut)Kq^g?kzO{?$^f5Mlb$&MiLYX_X54ceF$mZkXv{mP#9et>=1ZD(c~3$ed++s`DG)z@N*44VSq+; zf{f3>?87q;=qoTHF1KWFRgN0)LRrv5L`M~s`&@=&eAQ$WW9X$M~6Flyf*{i%ceoNc@ zH5dEu@ZyL66M&5FatHRtSa0tLinX?;`Lpw{H`tvs?6eo;_wwQQ`ok~xx8+eV zpK=b)&}g|onqi58lyFYkZ;rCPs><5e_u2BXtaiYCM}bvJ+nV+=Hhgzh>#(}?I}>dY++ z1{+T^u%@htGuuWqB&|!!EaD4HMP@$)Q{l^O$pMY6%V`w6lDwkL;2`agfgJR1ubN=5 zCD~K&P;0xem~1vb>684@@GBOvtB(uOHd7QK3u7p)-Yr7>%g`tkp<7e^(jtSd&HK1-&KDG$S`vB#Ij0| zV=9-Zz2}PR*-D(`Iz$sXTaV~ef2aTo?iS$SYtI9FKrS4OZFzDxZAup!hH5+bBjH-C&F>V z)G*A!Q(!wACASHNsdbXHMWuz^nBh)c&}ZlUp9qaVcU*0nIuxWU+c|O(4Y{ z3?kh8Jj0L$b~JG9Q)rnbiwWq6H39yopEu;*9le&@zqAMPy1&?uFUYgr&%Kah=xKfH zSr-#uHGZ2X5#;U@%IP(H;u`@sg4py0k$FB9pLecS+t_46i4cw+_+5V);Fdf0Zx_Ir zOEMwH5uO}}tJQhITVmSJ{TI!BoQ_`my#5@rI+GhEzi%cVn1iYV5D-=!#PbCs9lL6( z9Bt~38vyGzpXUyI)(UW}lgpD*AmRHS?=NV!O2tuTW19zEmOz{VY^h z40VAe*o^6o2>Qn3KX#$YKQ&1jpGiux5v({!W)NJAMsF{+@!9>A)rj}j2fLNFaU-9u z3;dD7(c@xV1y00tM6$n{|G8tumqjblPLE&$x)_NcXFg0{cT92Z^?AaoZ^!^H*!lhe zlZ}6q&GfvkKUtOHPHrZFQ?TYhGaSX?@Gu(Au71L+mSa$9f0_r&j$PS1k1+6ZpI#QM zBL9IHDK?uITV7=}iCZflwI^$aMjKee5gP)J zo~f6*PCcHijRl)eD&;9kI}$r3fXpr{wz@j6Klry2TB7rZ)=UWzu`q`F=N}w?jJuj7 zqMPbLCvHO2ZQE^W8%%av$aDZE>6CG+zV0L$pPrlrhWGopk0!Ag;jy93Pmtwkuw&CJ z0TQ^rp+4_w>52@tRP*}_=Q;s4TuTHbkEGmsu>-fV(9ufiuEb&;T*lu}m{jq1u8uca zN{k?hfJ@AaCF+k;vx=-&9CTwd?`cdRX&dhc7t0#+*xNVf6r|3%Cu7T_wXCKN1J!Py zr{e*3FJmg-fTyt#^am)I)R8Q~!RP`b*n^K4t;;lGOLW(%^E;LnDR?XV(T2bovmGJ; zzfkX|piYj`x-Z@zg87}jc#s3*jG~?9*%boy0vx()(olwazK#UFj7*zS5ocK~U8?Uz zoN8DPO}*kR15#zRSd&p_4J$FI=d#L7Sin0Tx58r-};c70g!YU}~;pbD%<(UwjIzx)bI_ z@AW;Q5}+rfa8$W6Tj@W|90@u$eel}sblLRGK9bxAx%-DE-Ei9gf9=*{JeB89yXEiu1D>(xhLUu}fXpC)V8J%F73*b97Vb5$J z)^$!i3d>MqsBj&irfz~pi-HQ|sS!EwAil$6r|@%^NA4D~O+x(@MI$|AcV zj$^C*ctv2qz4$muYSueN;g)o!DuCP^(9y;Y16vz?VT)9cjTyx{3RepiGv5rnj-ryq zpmUEF%x1yetLFzb>TE%1zxj>*^YQ!qnN3=dLrd<(51xq%+|&eH=@mO8$~LE@iPw1A zCdTKxP?67{y!6}7gu&9+B@F*0a}UtXi*gYL-H5c-z+GTNaUIog6_H`#Quw(^S5fsd zp}H|fWt`hpD2erX(s%1E4)V4;G%i&4`i51CF*;+E z*=NzcT9fWnhf0lXeT$0DLL>nJ?{}qp#@x)e3vWr`zrLZ12(aW(og9Et{@RM@d1E&EWVu zSGZp#!e~zK-5kAebq@Uj!>jyqRhJF&Qj7xV3aj-y@@RC7Wv(F}3hRvwgoq*mH$U7a z5`AfcYBN;O2lkjmFgOKGXa)rOSkqdp1UOY?M1UMuW8s$45G(D5=JU)le9gwln_YD} zuN;7^amN*!bjq5d12hp*onz^(LDKb0RrL8>ipl2Hm`ib4!Z`$~$u{@R3kH+H2((_RU zXI*4xFXi;|(#b0YA|r!K_ghS!KvC)%(--?WEVqjj=MYnuVIe%AQ5>tQEm3aC$%iW^ z-|B0}1$ZZ>RwULirP3}G0pA1p>}6ukF+yO}R_b)L0(&EmYrLE7=ul3RaP*vdE5jo8JQFtvYk?is;Lq!C{Eu zI~(-ZoR$Th!|k4@Nl{PGcsjy}-9;U6^!hnuRcdf8x(sFSx3|sKf6wsRN<%CkQILdN zv8BBg@LfWV*Gw=c&uvO8Kc z9ezQM%NeXy;w(5gz<=j9j^rMUyA9K$7%#}O2|U!=0af-jIBG;4I>`=;bng3n$tuZ| zR6mdV-}8zjHjCZ0cTdqT+^>J?l+^i}T3S|4s#XZor5AfMC)7rw1@nY8h1zvp>1gr9 zY6~PsTS@+odwD7!5_SiHTgl;}ZH-ygGP7o6P*J5$_O`!8n*3)g%af)ls&$Kl=h(C{ zykho%`G{cy<9|PCB0k#Ih`AL@DET6_`fE_vq+_7*#mm-@owD8R&~FlkZknae+6NE4 z*fZ;05n+tqdH(I&k{8utm{b&=EGs&>48YtP*pnh)Vaz7(i~|j>+}kW;9x2=w`lGROJg`FzL;h~`At z9uTj%#nHIcb`OljqN!zvPiF#4BfqTGRg|2ZS9VHSW?-4j0dTO?{xx5yp77pe=XFNg zHz5TC9Kp_yD2pr3-3}=aY|-C%-Llv^CHaN2nYvcHyc4uN|y zAUqtJ0pz6pZ(3(!o0h5>wvUQmaBiL4uQeOzSwZV^3J|G`CW352L=X4d6r&QDny9uG z!CXe_e3>$L*a||SO-O(?18#y15tSL8BZ77%dtVhL9_yU+chM4zRN|>iUNf!JEDTf< zG`onmkQVzBlIefdD4FvJUi`z->_W>J5(Nti)l)-P4979RQr?3m5o?^p1sy5C)t*i=6tg&-OF&wJ#HD!*ZAsB`&np z?0IQ5R@yAYQt8-G$7-*mMF&on%Uab{D z?hEd<=FbvixocZw@U#0rF$K6}MT>wBg5kuwCD*TSSQ8AGz-Fzp!7S8Wy1`EFczwU$ zMaL?{y+|av>QlE0R}nxld+d>f#g`1pFjUzrUMuFd@FzB?r=cD~NxTbArLm?yWid4O zF1oD$!Kg{h%QA>xBl0eW;0m(rXG=Y!ba~=%?TdjD*=k_@JYH__$8f>s^0$<0Zgm@H zg6WVF#i)!C-4i^beK^_sZ{>9gfp$jCn@bTe8|g?ydU=)^*~(oB>3SB@m?>JEgDzIwZPv2c%k-Eye3F%FIjt-fE7)b$nRUSTmCy8| zDa<{xQfHP25xymS-*9}C_;)ur`rYVsxIkwHaLviM4_o4WUVtjXWwLO88?6?32^5;w1(`M!)j779cJH#0`u$-aZY=jYuwybMCI_{a${1ri(WOrv}O+V4&IC`+O znp7aH@4Jz{lq!$B(}uD*jYr+t-;d#D$6YqDgTPLK+RRob#)q}n=ml`dB&yyV%nz%( zymWN?x(@$bfUa384Zx?8Yy>7QTHH4+)Dxic`a}*a64B(vM`vY`z+)Ym>~evSIKQd! z?N6KM8T($lPKf11ds-b8n%^?I)Kb<2R#Xj0C6w@jN~H*`re=aoZXZyPeK1oPGhihH z=6sF5{Ka>iq?B`mSl8BeRMwF7&Dmi~V7)lpLM>3><+S_L2%R1)7lQ?In2kA;G(T6s z8An2>Pf9}BqZA-mF_lON-|Ey+>&%j5AX3*>Lz9vaD(nDzpI zojuJ-bqa&)naaJNL4~4Ib&_MTQ+&`RR~ymkA_r5SwUZT5cz; z?jH#bPN!lWnk4NqrZ1dY#->|o)>|rjuD49ZHU4k(Y_y(wCVs3LBEp+`h&taEMxhhz zn!j#nG_Q~M_}N`daVRw%zjQ_ojpD`DR9QgY`O8 zO-FD?(wl8kPo}U6p-HC=GEFiD&PD&wSX2+6%^@nvsjSLcMY3P_>iQKvkTxm9>)VBx z1>J2+p?rf@CN3IuE?D^FxNd8F`?-SCX{B=$7HG)+NlR0OoUB=)i0-D1@t zHf-~hI0QX1#+O79`o*yE1t-~d=GfID;QjFF#;k|-b9D$HcQUV2o(ga#QxmS86oUZc zZTTR93zVX$tR;pq;~fiv7I`|HCu4BPO@o?EUr`edXtS;hdX0|vU~u#1#!J4KVbv~G z=u>RLwpW+XG$?UOlB(%FhQgjpd`irRG+v=fC|H+r@!v3E))#2 z_Up%blcW(V#~z=SNp!l_Iu43WO{6b*B6Rj=1*U3vSWCvso{qK~^eCP@8?Qu^#1|V??-_Ij z>LC|=A**p`KdF0i6{&B5u7)_#6qWH@>%GkuZc5BWAGdXAga;{>j__VIOjxmVIV?Q1 z6yypbJ38g#+gcTq>TzJjdd<0_AVLLl9ZQf=B&v?s<3UDZVYpTY!8kT8oWV%xi;j2b zH(US)RhbDssm3XPs?FC9%Nylie^rO;tKFoa`?s(U0YFU zgRFi``>Owd6!SSWvYs6b_H*dZ3V8*7s?5_gl-nsJ9bZ<_u<2`U1#kNmX!CA=H6UM2 zaPTGIi8I27)f-^+YKfRAU?lc+KTo7oiE|At63bZ9-a{sis(5qHvbJ_!Py<%$tDxpW zs$ONwT|)eHsB=T5obD*g)u8*)))=0fOREm4u5QUEHpb>EhXJ~6I8R4 z)QS>_y#ow#&axiC-teJMJ&Os)M(8d3NRr`lY((6A5|n%Q;NIfc1BO@6sEpB30#r3e zY3cAFvKj?QZ!R|QOLNz^jWxYO23=2uoN#s#c)#1@oshXq^<*amc-9Cn0xpz?v2lrS zNmGY*IPV>2xJ?*~Nne<0CSH<1hs>!1Er3C?oj9PLLoZJG-0GCTc0DEhZOkdT?P|Ad z&zqnd4_7hk9;6NXh(W1n!=YhyQd*(A29`Z4UM26}Zr^Xl8CLjIf??71saI&wzBjU4 z=PiM89*7}BgGvJ#BTjo#$m%lfSMm@b`p?F$hY1}P1f<4XYQ%Xsa-{IxpRI2jH{1P+BQe2KmzP*iSZR?iuRQ`OV0(&`S}Ot(U7pmtpEr%0oz$a3Agq?%4h zt)I3tGEFWG{^2hH!-6nG12LZ%^&nhJtVyv5E#p3B%vLVVC1dsN)pMU5K1Xv+nN?|t zA=~f0Eki&e>Xk%6iUX4-s?xi(K3I5#tv+?2GX_i8p(kPGTx@TDfAy#Gi2Gq|e&3yp z_q`*7wMxGdB%_zRu@}cjI;)oaO4QzNaF$cK;dX%7DD;lM)y58`sz`QRg4pyAb?a{+ zSlNn7e7&pi^W8CS=fnKZPpi^G-EN?9546J(ZG%6L6?$;#s^F4r7G_yYzj7lFRUc%v z)Lj>E^i~YJd6Vrx$X<~J#wl-OilzN(HIaOW7$F)t!VO0NHBzxMTL{#J&pWkZPZcSX zBlB~?_g20rQzDDYcEgISGT27$-VQYCPWKpeGvsaf%ze(7i`W$^k0xg08x#jana45i z1FW*gDq-q{+o#JD7`m#=u$nQP8mL;1HfT212jwz{kqL~F1#$WdfNdw zvpK`Od~_$7dXS&WqzhuU6$gO|GhC{Mx`UNwQ>L8#^oJ8)y)`9B(~TH)i~S~$Tv(_9 z;bi4df{_yEc35IbUGzw)@`9;ShvegiTB}O`RKccQtpJ7^cB_)|llt&;w*5$Bm$8ydPd1{?{UZv+8Im;| zXzTV{oUWQi!EnJKLbVz2kb{ZstlxqOHJETt{0M{}$xSgr!xHhC_}5qeX=UF_XTr5_MXiTcMY8noD;FwHQLQdSdKw)C$PfND=6GRGuLW5QdeI zU+*D!8$W*L=OJ##9Q$GN2ht&OD^y{CUceZSTO)2xj zn9Kw@wBV2WEhmcil!3WfKT*L(hh{_MTTn2j+1A5*vUIiBS|bLl6qE#WY0v8mY;iV7 zi=+$aM)UIZAn5iBRUVWZN7SH61EgQzL^VCrylDR;OSG$410+|R)#yqv3Hoh15!9-T z9~(*PXAYq$#zAY0I|ca|o+d{3s~P=MP>c+V94F?$ZD6jZS=!|(x_4j3K(N%@kF#Wnv;2gbe_R#R z?j29>#CHe^E}4@nl{)nOa+6MIGp5qP>=`}7#IXSG)>rPzgm0NhvH~i`k!x~1rKa8< z{>5D4GA|9XFWoM84A?Ha&< zX}b@=i*KD?e;RlW%?Bgp7*nB0!?B^J3pqFYkFX$sEG#92K(;~9QGVie@Yu^x7Sp4u zY7ru=bDk1qjlsteP6T2y?$$)_Ve^Szp^8CmKqYL+PJBXk=0quc`Un!Rs`r;&`uVg% zOR^YfXth*TMeMaZmVG6;OtOA+#=7^P{#ICun$R?QMPKIeFKiyUyH%QyTY6YZ#h!mn zS#icY6^wVtioIgMhf)=OY6>o{d43CPZeF%2Fz_XSw|MbH9Z0*0$FV zdpI_Qp;#sDnXO4*-2$~hYU6PC;lBSD!jGbYF52D7v*_9HLQ59nq!Ju{$1H(##E#yV z5~im9PP^*2lB2|+XFlDsa$j<$17b!IE(75jC&gEXJ+mZ{f-2JMcNGN==^*RUSWGR>vY z&Hv2=sY)Ru!|MTDEeLSR_QukOA8u26G8lrqfEo+|nKzu0g1BeaW5L1EuBxmr58mgL zn`hjM-C!xxof@ke|5NR3Sl@zRUepVqe>k9vIlRqdNzZ+PjJjIU&rL6a-?z05Dd}UK z)go!l(dWMG!j=rAR%Gxp@sP(7hGS9kl4P)+liMx1c#7PnbmiuI2-+!#3=A1?-%Q72f5^@-{11UU~(G`aLA+qe4GSezNaX+WMcH2mF9c*5S=;%+!7QFAconVkutG z)=fwm=Y0b^4mb3eCV5qlZg|J#=fcy-36&StpP4pG*SfN#e_ldme5{Uu?cRA@n2$zL zMp-cVd_W^{NaZ$}iT8AB+2#E~%dId!ZITUz(2#bwH_!6B1$afyO0hv`V!8-X0zTXT z3?-?^slH2P_^AaxuAa}2DQ^!~bqT=_;M@2iQqQaGR&bVpw~JnD1_WYV#E`jv2Ga@{ zANJrED{>CR=gyD9x&U!DL?=owhb|`-sAHX*HH6R4%+jwx1MuaD&bFSE#YWH3SdZa6 z<-;Qq+raI=2acs@Y@)CHk(MeHNUGDa&@q9wxQK>}Qf1RemE##ZZ`meedL&y}hJVO( zmK57!w;AvV26dXcNa7N_*G@2L!!ie%vzp!_h2wQtBpUQHi|>^Cm_1ic@_n{-f8$ZH z(k7O|^8V%jE!-5Yw_M?Uh{{I|^US2wWed7PVPu{8ML>ZT2V+9Ky%)-bVlnKCbD zCC0n`4Or&-MFW*D{N&m_!y&+8<&yI=9pY+gcHo(j{hW4YCqbBJrS8D2?)du_d1>%> z7v@$GAwdA~Q^V{VlCq@;(!WJ^;;Azh;z{{q0D|Dfyma(ZC^StrN<_2Idl?Fe_3J5lbYETPSbKp{RD4F;uY}*QUZxvUo~Tn?;5;nc(K?CYxmo&lo6NNrL@SnJk~6BQWG1e z`P_VsM;Ps#CiF-H8uKXDDNbX?5o|2_DUVNZK}|2jB?2tIN9R^sG_tku;XmkVo9f0z z*xG)P>8#S6Jpyv$i{!bS?<^y{Ypy?d<7*%k2;b!<+y-!4=s>kEI(A6Prn;_10 zKkD+mR{v#)k@d{2wXN|;uz}U_zWH191vVN2pr-cQ@Q{X3Vv$^&Qh@0x1wh8Y23$cQ_Iy!{Z0bC27o&Ew{yIo#*}S{CezAEECbhvWz5tUqQ>?b6nq@F?WTKeq0gBpm%6t zmD$k8%;%UD;(fk0&)0$Ql`6D)`*DJ%E^NL!;>5Uv6#hUSHyQiy@R2nGZ7nMw{z<1b zRJC#}+V?fKi3|(SBFWl62pfT`U?E9^VnKqm3?qV`(lJD~-vMnY;O}$U^<9Id818&R z>W9rj(2};o)K>E;c25ChW27wNEyAIHl+d^?=u#|gv5UWZ@D_=I+@3$^)`r9_9CKQ^ z?SkJWGHKmmYf9az_YMc^FsFqY6{rpDZ9JTAJxD#;)DGjMk%w@r0h&1R>AtR?JkH3t z8bN-}4+~GqWbH@9<}UW$>-Et?OXoR=-srWjlh@5wpRnM4a)I33QGP3L2qg@~vfCqv zRRS~lv9>TJjnNI^S+>R+5J{(3aBgdw?4!hWl9B~5KGIwv`R2$ju;x*5LUQr;O%}Y{ zF}O^tQmgb6szK$-mx>udB6t&(Hg=Wyj+sd^CQcc!nQU+oheqk|n_KLxA2FcN)9nQ4 zU$O%HP#jsh)h70{k^^3Vy8UY1bHNh^3zg8GGw?6xz#uPe{yO~2qO%OS z)F>6%4DJr^=u_XX8@T^kHi~|2$JNc8Nr2PG z!Ed^@!0*xy0LOmr0p|~O!?m*4J_ul+k^G?vpI39#<1;WpdNFB&(iPE5zbs!#Pp(u5 zqI*+dhrPEV;ckAAPdl6kn(K71vyzA@cpI?GaOo#wp@kkl82wvEf~E@ZW4@Ni9f+Y4Yjuw&`VwAl9*xg zA(EX(8kk}HVOVQ~9_i9#iP}d{mSb$A)A)~FL|kZYv9gcCyr>eL=e~q3pUC=)GjL6- zXj#b7i?O~H)Vg?bfu@Sw4L9<`#~2NVe>WcDQRtE73D2m?7FP%7+;FS#ve}L1XsZ-T zn4=@0oc{e*FV((a-9H@cR*;m<@+7h9FFYv&DtrKmnZeg*stBjO1G%$BSxP0fH6L@$ z=f>e`c2;dHo_IYdg+SUJjxAf=6m)5Rl648$l$;JxdN~Pgg7jNj$uoI=IlAMK1=cvL z^w)H#UH<59_Q|y}_F^cf`yFSTmUBSo<=89nifH!eD*oXdt{se%aMj?iz%M7QV*oQ# zEk&jDfoAf+vp?yt36H7bXLp0hQ*0wp$n6PXW9ru#Rc(MNN-L7}+hu98xK)<2C@OT9Nm?hQ8`djD`t`3+4@vA|f&#$mwx zYLT7C&lG2#=ZiiWSZDmVJ?*5LQJx*6Dc2H=-C;>SSuehv+`CLBJ9*Jd410Fp8R$=N z2mD%^nPXLQxl`}z66PJ&LFszRk`%{^)hcoNNHihaaj0tXWCOZIwofYC6Isi*{VNmz z!kT0_o?#;KlFp!Urvd1su~0I*ZL__q1*uJaWVnfY?#$)i*RR4WY%W-YvuCH}?{~hc z5|QjPQOVmGT&l{C(@gULMcyKNh|uF_6i^Qya#aCABv`;ON!6UcQ-L<~fT~iLhR>m~ z#Ylg^`V059Ug6e_Gx}_g3@3vsX{Q{Vm1A?nzC=N=yYn0QfMT{Vpm(yCc`7NpfNdGp z+MX1%CR+l^e3@^LCjK=O5^=gPD-!q{f~#mf{7g|lwYc*gW$-7y$h1977)HQ~8(ZK~UP4!4pLw)ZMG=)A0mE5{=BTr_8Lj7q!TnidJ6oz5AIM@LL5IFcU5xl*v zH|~pL)CokH)`V?j$p6@G9}9__9kDYL=POFJuGJo4o7D>&$C+iVzgrzG_g!ho`q=+g ziGFN>=MmQWLY_NVFhqYe%7bKw$(C+)Waad@b5zFp;+wKKLPOFKs7hjA3EbanSU$m) zT7b!%&xZKQt{zCGdeV62q)ic*kcA;KqjZ(Ht~4BFs0Vgdih6ewEz3)#ayGZV`e22a>D9Uxnv{`VmFp4mt^+RR5zd34lGp{YpkTrf z=<+Rf=@KyEqRmMaa~6V96di=sKrYd`P4lNkdkp)L@6ht=X9dI z*af4vnMJR(CkzPBCT8~h-x<%@iiw#?B}V=E@Jkbn9LGvArO|ePnIf|%ObmH$96cq^v=}6w=CalhsMd6HLQ^s~j zaS!qiRl!~jgQA#33#94UAk$`Vdi{oMZ!EG#E7SYBf*P1)-caT4D6nEKk~*}|R^Lso z03V)?wH|T$nKyV$yTcFNUqqde54D)q>ex!Pg?`lTW~x&x+*wqap<}+*&&Pe|rCoY% zlPpQHo45de%tTT6klQX=Pj<`1HnOhJYUx5T%av7I`6~%f;#3eFSc-a-;J2|8S9+)7 z0tEBh`WR-t53H99hODhXhkugDJ3qXowLu2nl7T-k^z&PlkwJ9ZorM4j($b%Z8F#Gv zLx30+2@F%|B@KG zFhVlKix>C$sE|XXX>A($`*;(4oR2Qhxlhk*HhA7KUC>CY9yK(-@eA6(^Y;gNc>lJE zMzRWZub$F^65*lEFgLxolD5i?cpFS{rL_Zy8zu-mnr* z$3}4*y_%D-+F*?;ux;Z}hWdzzd0R->p_-MXM&lHo2=TS|WI0}$&tnNL-1KPFDn&PN z^ey}OE?5))I_l&8l^&$6)Fz$g79&&7zs#q@QFH+uMqM9vo^|+OyXEpSGlt_eJCmXM zBycF298pu|nJS&3eiqm2Ob=HlvqABMp1r}DDt<+>DH#L0hl2fp{fboPGvtltv(=jJ zSJ~94viLq*uLkvmhzOzEQE0&!=+w7(ZD6PII`};8j@2|g<_?N+5vuRbVs|F7bTWMH zgCyfgqli~EMSAj(tLC~_=Z2=4UdSEBp+<#;EpVoKN{Ov~V!{ zB_rlff+M;My(Ijz&Zte?3kr*lPYAn+(MeTEf5$uh=7}Ik;&~QH%#}IRxPKAQ+@ifM5^u9#E){=J znn^w!Qwja9yBESOi`FP78;1mTF~5a`=xX2G%xMX7_}Y=Gx96C4y}aLyz)o3=QVSek3R;abrvqlIw|^L0*>Tg*NVS_}Um5ypHoO4Gi2 zuRYOup!15+c$BiUumO5dVbU9TdFZn?t#+iw)4gmG?s#gRzB=j*VaQ8|jZD zw;piw9!ueV>n+MI!F#24g7gKeKxBSGxq2b5RIeWJ2X-KhgG7%Y@vVU>+Ij+R(G@E+ zO-msN>p1#3EA@{hYNG$;Hg6Cw%B87`js|icgrCp#Mm$vuQYo*O!OjS8U9d>ZgVuga z-8lz(ARwHamB>jcqN27fcnBP$V>B-#JRy zTm~h)Yz5Q?Ct6ifu35y#{o=NW-o_D`ct0>;kFE2$&F2o+@vx`f(wy3|dOhO$Bthcv zo)w%9E4_zd7k;Xj>mfvrgA#7e@jPUqP%(KvAHplS+oUMAu4Gs_RTpzLthm#~WrQLq?H_0#BCjEDuyywhfAh-}19_c%6F>5T? z6;p!QlTEhB-?@~;6n*OWMf%SWhulmgozO@Ca52QZIBHRFQ(PvA{MkpG+F~(PB~@kW zdWd&t4wPe<1=myVMrhM`25EDY3e3UhAE;AL_Zat&EwC=F$14?uon)VzSQ~?7?pq?m z@1@U;V>N$pRP+ldkDDJkOGExeQeuCK2=d#5kmx4IM1n$aOLZU7YqA*uJpQU$RuTvMepiQ1m&<@$UIWou=p~rxTu-tKksO<#j0!pSINzGj-oYu)9B>ogVw&^1|x^ma&}#MqxvA{d;SH)F0)bkKM-dY zhW|g}%=|yO$Nvy#P7cohA)fy?;>^Ur{%=h_J}4(=M-u}ZDEF+Y4kPDcyGAm+P9rCB z+|8Ed`fA)w$){WOQ_N987_snVrG;fvGJ_f%1wXIZ7nu7HxHED%2>Avsh6z$Q>GEG=LZ zov13>D8L4^5dm#-0@S{x8ASc>Tj0L6nU$e~+=Z;OUqeVvKo~%|7XhQl-c`Sw5rI!% zohAUUyu#?<#Km1RzhLCPsrk8;cRTp3YXG#Ckrg_!|1i$%&7bImUgDj-h;091oKdc_ zva+LaX|J*YeH*F5B7QSV8{6|A`P;K;emVeMOMHoCucCB+DF2&ro(Vx~kpnQx>-;w( z$`1e;n%v7uIJUR_H(;Gx>nk!OGBLP%-Ipm3lmloQ23NMqUrx@~;K%THG5fa)`L0*R z&fL!K`o4D4^*7=p7l$S*+i#`h$zEp4*9nXJciP6(0DAOpwn}0>V*@Y^ zmwN7A54PaT%(Uz$fq|vIvH1lc4M27uUiuT6PhStpd(GB3I+X%onH7cKW?=}Z;4CqIbWhEr$x0gGF4Q&PQ zw^aL=RSWmcuHzS+fzoWS*9NhH^^+QwnD?+Z8cps1?M46@6xz(_AN!X7yqohqSz~Dh zU^gPt4F9$cS%%}-SKy$RaH`d*9rm3*o9g;^Awtg_T>{=*La zqUR6g3*N6eJorQ3`l~n%&z<<|@dhrYu{MKymlzYCjVv%aHU9Ma{V5$5nZEq#oA{t7 zAoJ_-V{mq5z7IF|qs>md*jw?7-}}pR{)3LesBOfZ=qo0^zCO2dbldbT|BrDtLKh$H zCHl1j{Hp%=7mS?YMKFhJ4%6CUY&L~#o}LkZq7#|S%tKhQy*mlQhhwa_kf)S(t8TM; zn2FUfsA_Qc2U%p7WB7ahq~6`~*MUIR`>hjwF{6MS(>*=(_B*?R9^WLfD=_ec!B4RE zNP8pQ^JumbblnDm)>W)|a2ryTwEMt~BA^@&O+2lVoT~S>=H2J4DE1|cZ-nxh5hHsn znxL#1wAAbfa#TJRC9sfqP?(oJ&KumFWu39km?uPoPqK6t4YL&Xpw5c z(h$?*t6U9}T4x^ZPCOb>8mW92dcB2Daft$y6dawYDd13sWEq1qei4Or zndq&AQ{&PebZqDikr>44TWk{94K%Xgi7Trc(%`K_utG%%{Gwq7!jqO~Q)h?r@ETChG&F|H@3~zP;l(6!jPc?)6%+VIfi3py}WPI8eMMY4AN{@`iO80fag!W;Z1xeRM`n>tT(hP ze5+7aEMA3ZG9IY+oEd{r6JLDIieEMOowK>9zE$Q5|7mdKy5BHH_0`8jkG!owWS4*W zdjP3go6S+nx_DgT`t6`+dxt5_$jOPyzf#*Rk3T`5+9}@AW!+)ROGP$ihb=e?=@Ptg;zQ9zWm1EO zZXu&F^@bh^HtD8_X|VMX{x%iCvu0fVLMLyAg2J7J2i>4ueTJ`&ETt=cS4+;orK@H3&yRwA z2Xo&`cW78?k(Z=KUd?}R6b_i|twEn*>dNJXv(4#|29fYf!R0)oEoBhm3#wm{AcqVy z)^Ht*9zvPq#gjKUEUjlLE2Ji`2W0HHTgD@Ts7zJK;9PUQE~8)I`&ZM{*~1&7$O0r4 zsJi};qrsFsM=RfYt~=Y;%`-^<-N3}zN`^>D*J8tBy9`pvRTYNoW105J9=A6D_JCsYF*bdsSC~BzJB+;qT_ecX7;5m(4U}S|8N^2hNLq! zm|Nma3lIB1^u~S<)N3LY=h_FnW6HLO`F$E7k9;dc^32?9e9N>igJz;w1`|}DBHc5V zsuMjh8@tq?d|!_iS>@_ya6Vh96V}_XULvtEXlrw1vMc|SM$uFS)YBW*&a^ZD3*B~G zZa%{PH3F4W&JYHagig5DXOavQj5D0<+)nPVzKfF+o@{q)U&Y;7S4L~_5(kE7Ob3X= ztjlwsJdWn#fz8gj+W+8h0?GTQwFJan=f_SM93NPe_HGhW*?REy)?UkqK!J#-+Z(|A z=ScS4h?@Z^@Kru33p5XImi0@p2;{4EkAH~O=uASKSEkn*g+4(P2Nmo+O){mEsfivn zXeBLCG?!QyCNRC=&`RK_D{cSD)AUSTMi|q{S_$c96e+bZ)WF*ct#kOvTsb3D!E7#` zBMC>`9BknazFG#i&yZ{kX#8|vX)cR$If!E;7}9fA`@(He)gHk|^L8$O5^U3C zDLQ|Xk9Q0@2OqWXC(hn-lVf4vd^=tNsBl-g)YTJv9&B6c$hIgjCoz9Q#Hw(~49gpD zWCk!dz%^w~7?LxQ96CXoP|2Zxi0OuK+^b3#DdX@+3xdP-#!D4DzU-Qg4>F=5eg(q+Jif*_Hh3RW|7Oby{fun*OHg>Jy)Ct3_Xy1x`Dam;T1kBT>FJ(j_kp3WikeE{4LFHnoO1UMv4*ArJe6@=ZuTWul>bqR#p5{`3Wp*P zO~4t*```YV?g0QLmzJ)%&VGW@_*b5*8+w-H(te&mqVHz#Sb$!c-`lIoanMp7#&7>PJ1j08$5;a&;>5!N9&nOBD!Y+R(KsoZgMgFa~EgBs&2e891YeBg-PWqCBv@BU%qmG_JTIOnFWR<#0b?>v}l4T z&*Brvh%|n6LI>kxc@%rxW5`w)kc&SLI;kFN1U%GGb7kH+420NmeEGrELM(}-aZT}h z*H;*&GwnRm1;I!Wfx34AnH6b`rBQAtD}Ys+ADyA(H$7-qi-P%5aRjwFI>kHyY5i)7 zwrD`hZ>G*9pq=~0na|)`p31Xv$Qjv|-kOn)=(g7NVN4{U$`}UX0gb7gJ7l?)61_IU z4S(m!kRO{ti#q12@5PBqBJ$BG2KyQ(e3QjKEaa+Q#BpFfrr@sg@!i}G@gQnsOqjW+ z9Ol^1=)x0ML_Yoe6zN&8=6x?8u*yWzG%F@$V7;r6V@ zzm$gT%kVvtQQKoSMV>YrP*Xa8d#*pb1IL3?O7NJgjMmuzs}xa(+YjnY9LB3zQEQ%i z4#aSe7h&Y-g4d32ut8qgq!~d*@YWa{{Q79p);za6plROF>)IKJLKW-!;8m+j&}L2} z`js3xP(AWEX&YyR@WpN+%Ux{v46IO@Ixnoqk}F+>2l|M1EcjR=aD z(F0m}vXkiS7TtDcP8);+8Mg74sER^Y zYWx=fGeFG0TEmeU6W?W#nD=AEyI{f9fN)5JWWu zX94RCu-bpYYnqQt%(z;O(r!59~DwrT|Nf+~0xlJ1jYR`#$(@5g{0 zVsq`WU(4)du@;GP3sx;&0Qyu$4}7<6P$sr}goo!eeItQVos#w$Ueu7+z^cN$O*4r* zsFH4|J?ziwOQI`(cpOeo&)b_@APw4Z;qe@Arb;jJhYfx^xerX~=V38(m*z(CE1e&% zTyAzFTorL5mgRCt-t9LYN29DJb2fn%I$phUu^8@e|%7gB|$+P zqDPsScjSn=jX~9idvilZKUeGSM1&5s9{+K3%C+E4>YmW9C-^?zQm)wxyMPLe1s%!O zOzrI>Dh!?b)=KIrS;EP<<4utvfL5@5%ZI^Ej`Pa=xCEz%l=TMbF(s;08$NE+?v+jR zqQ{S#4Qvw@QHr!SKgxNY>Nx0hu};c3M2fgBM`i6(%rJV;xQFP&orc; z#&89_$W*U@N2-PmnN-8fY`1zi>YWt6x*u+#yy%8g_l||0W*hO*<662{OvOvCB0g;3 zHcy#qz4IK!L|uVUi&@a@*%@yh5SpqrN~K>KPZ=|Nap{+K!Zarc9N_jZl#?+6t%`oD z^NU#(6+@^v58Dp?2OyJF zi~ljHh$cYnJ3#oC<0CI7v`C=YAR9Ygwvk*_K(H(hf$t7hEqO6qUu*TlQO`#cyV|~!r0i_3 zO0waun{V-Rujd@dMb^kxyxkZQ|5%C7L!pVzg>@BJ2_r{75!}?x&rZaF7u@uLSrIbV zD=XvFF__v;ja8{KVetOZuW{1}*8*YzEL!M8jtJBuJhsK8Z&I9SEZYWuesac$MVU>i zDyA@I9Dk*H_r%A`65IBoI13Yl^$qW3AB%~zJGKgIqQ66htvBbze& zsR|k*14fx31OZ)$r6y$a^BI3BGCYcoFwqUEkwkr@O42WjWY{T#f8hoD=iT~=^!k#e z2o@j08?)Ml!;3R2hl1QWLa>YBsF`+nJg$7^jo9iz2DfmsC&Zy)w|zes#PThVf-FLt z9(5@jx{8jbv|}TClnHOk3XO^`mXcwyB3XVY76KL(?dFwMx$LXi1x^C>#hA4Tjv-O0 z)4Z!B)q*KWRBM`8Zn~2zZIRq3xDXWzBk@}UQu|!xI!Mi`z&t=gvDy3)GJU!H4@GDs zZr2T4afx7{Rs4+Su)LxFZ&O>EJ9mQeP0<2&tQM1avIOYurY?475Zk2!1pm`~R^HoU zYVaZawg(94w$dpN264H5_=blswz1~pWe8%jR<;;s^!15*8{jLP1n+}wuaSzMkEr1k zzb1=75hQOksgy!SLv&{YdPafB2fyNtR%*wbmv~CJ0`XgIvdd?7pLy}pzO~kJlby6K z!t*C_vVAF5A~EqW&ab?}tz#{%847a38YQPnDWluCJq!rok0MDd$~_Vo*U6>yUr1{+M!pGo-m;PkZhn$gZX zv6#UkWM^_R(xDrJD;&HStR3LCOiI)5f-C*aO8Is)0~hWgGjC|@@pS8CRfP@S7Z+~i zf>K9w^PawovgZ~#zZjmAE@9)l@{7NrZ#j9GoM9o5!h`TE+(*IpOtaYCed&CUHN9A6 zMdxDE2b)n#^A}!5RY0cV!En^K*7k#Yw3-@`;zmy7*w9loOC{$33-_*9uVO$422YJV zUw9euZ|@g*2j?{|jAi`UN5#n9s_e*Yn}55@*br=(qAct2RtmP`bK;DdU}13+~voryCs}^THfL`OC2}pJVhYW>&G_7Dd=v< z+w>5^gsZAIMQdX#d%4x1F~F6?vKFLI z?bu)$Cb;~XtL-kqMG!Z-XQGVICX{9^lH9_dTV`-XY>{ykKtL+2pR2(kj|s)rhos0% zUN2Gmv(B9YNao(vRuGTUoT+5`4bJf9;(LfufHSXKKiGGpbtf5-H*g}ZrkBKSMx8JNC*i`zsAAQR8prfeVgVyIIqp|*AinFM1vB4s5 zZgzoRzn@`?3x$O^KS(;DC$~ZE?<=q75c5z6i>|FYl&WE|5@+#99+R%Pz`YGa3mU-Q zp{v8s*zv96l8~Rml5A2A(Hd(4(`p3g&qlL6wXYQ!Hc7|Xdq_&fs2tIQBOg%mntD~H zVLGTW(vZ$eyk#I&5ZBawIW)@z=lGq(@RPl#!i)XHc_u*c8!V^{6kzVpZdY{|lUF9+ zO+$U}hK#Y*j4t=k&VwC@kr5~d3}!zYe0DW*SiN+yE+h$%Vo{i*Z){nt3-n%q>z;Fh_69shtMV9){j2YdiFh;V?rpX&+XgG|)jUHs3di;j+Yn)m5sPo+ zej>Fi;tM@9@T=D#pJ=4QxV?P9qk(S9=P7A%&H_IIO35z!FJQ|IGW$ zN*|G-j65=t4zjt9?J%vYiXb8=pQ#;-3XrtdSKM4p>*QK~BcR5&J%_f3f1kaKYU<)) z$xn89>N0!S%-xhD);{BZN)5nplr+k2eAjdiHQkMq^$nZ_#D0X8cao`MGJ7X$e?7~^srxB?7+)`9}O!tpmtH-yd z*&01M-8^49imeoh1>+cqV77&;D(BL>!^E-p*3gInK}w&lN2%n)z7INh`%r}abedTt zZPebFbKXt%A#$lDyGWjDzfCX{6nNXE$gh0@ebE-Kr=-_T#{W9*p!$pg)OjZ-2-vV- zpVZP|E<)m`TkQ0e{q1nR*rw=7q}=j#6c$OmCF*;}86I%VrQ%He5E@SL%F7odk2?U9 zEJR%iUf*Lnq9yO4aJ?qeVW_@v=3c(otM;H=j@7lukn)R~WPA=Hy4bUEcN21Qfi4eg zG0E=J0p(^7yTG9T2?=ep7`+m8Gy1x0|6#JHAIt*GHU-9{UgP(eP5SWoY0PfRaHuGDzdkff7Hctj*{#uC_8hN-gJ-k=V+W9;JpvJMtpSVX z5Ei3MJvp201eKFCzV)kj>;8Nr3{Hs|BP z@=03n<>;~Q3)r3t0J;$Wwpsht!@`UsDA$}Ied;Xl#0^VpoQ;dCJ;D&m)UzM0t*hC1 zic<yNArPm4^kR`7sGAkwNXgBMLeyP zb>r2aC)Rm;fo5;2^O;g1mz+h1n$3EeJ!Lsm-)aSiatC8jM%>33LEEBLHo3Y$$N1%i zky8r8r{iti5(ztRP!WQs+VP9w^<8V+R-$+MBJ8_ukI13nMLx0}TyYaLWkMPhl}2N` zDpq~Z+x-MCX~hw*X6NddLaBD5LC#%=wSBg?$R1;@BpRos)nt7mp*PnB5-N zCXDY~BG~=wW|>~i7ETOL{un~R=@wTd`kG?*;P`%u=8dlxzsrPlSv}y~7c{<;;Ly!q zHBu|RWoK%16^C4+t$!CN8Y-6#Cv)<}RgA9MPFYT@S6=T}deF-Qx!};8QQN;q=;H7^ z*Oqm)OMoCM|JH$H1xErO&7Cu5wwZ@XQH30mry#A}?8OY30elqJ~(W)eDT zdA^?Xya12K7WMh02mLvjnh*0Olj=g-odrDXT9<7V8$AST*#S`x|*y8bobQ$NirMEG5;MX!T{q7a+ zR%}W)^RKt;5|LqRn=#hAH&@b!gUH9;VfON|b9QT3%Oh>I{V~vwfVF0uicyECW-z5q z4013e9^6@DhLsiB|Y8h=8LS~$CkrtQV`F7`3WyzFgJgf(9hX+^rM^4D{*5c-nL zb9X@Y&;#*u+rcjAvGYVRYHW2PHe6OXH06(3*-o`S-8E` zN&*@3_S6ewRWl4yftO&(CEQji7EM!A8GITIJ(4uj`J8Zqo{<;~JHOyNk|HALP!gSn zedZ!ff3DGfS4h)8k4wX4g;NS?-N+N*xa)oX-8eR#_(*5Xj--C+eB`IRhpaw-!`h_j z%Vo_5N?%v`%Bh=KsjjxWv^CDaMbtQNN7ZFU%Wp~Ec#{~yC8Z#Pz`&!alM?~LZ2HFe zGI10t$@ptW8{hDe=~MoR4H-WJvq$zl5i*7*GItsWzO^->uWwqQ=Pv#VqMw{mg-(tA zBKlP64yWQqUT7Wwe$9;>%tel*zt2^;JDVAIo|M%!^a-!1a) z6S*s!b6<$2#HJ;TYcwkUGr8aa_8ZIaD*h(~`A`36AGZ-)P91Kyt$mUf>L#q7oHI1< ztxdH((I<4$nnK~Wnitl6CAlX$Sv^a!a!8(ugPH<`w8M2v|A5h3GO17Vb9Kl9mb0p@ zy%I`ODPJZO&8oy9P5YB98)57T6?7gd&WLQ^5LHt=XtTxUm%tiG09Zh zs~kV@>~#$XDI0=UBT*S-kO4r>yLeLx|(s<6Plip7g0z31EzW5TB6)+NjlzIKVJw|_IGWU9b*Gy_tEe_ zI4o5jGs7Z)d`)v4Ahtxe-;;z)zVbG=nAMDc-K6%II9vzuPTszt62@cU{B6$5hdn+D z^*kt~noySD%w#2s11VhxFHSeWjlEi@;L-x^b$(|2!>-t;n3q@%(o|M>nsY`Ywms^K zv1(FaJ1*Lf9gzkQVl@LJ;|8Q39%>x8R~|$tP17!_$yDQtj?Fvo{z&tNwo0Cxmg%h9 zE&ubJTc7H**vb_YvOnLdXg(pV0>joIyB#LDM0e3+S*Mhx)Z8`2S(4edqFb5?cRa^G z#d6Ac?ZQIq3%B?i)$_tUn7JKH`cx`yhKgtoCvZ-6ts<$GV!eelX3jz7sMQ&baVK=g zo9kJ4N*AtzaxPmRnW5nE7x}K+oOz>jRSft$DcBMd1c9klL%d|?=#c*q%8o#0 z8t*4mm&wQ3J)RU+sy7qolj(5q)EjwnP1SjH>T)zF^O8W=0dQ>r_c`V3;hAq9dBel< z&=3Y537w+-2idnd4;+zsUnFW*@V=sR;Ol3>3u( zQ?Vc*5DNV_j(*z11b0>EfaL0XAY2pw+5BLN8nm|vH&l64vbL=Ac(V~De;(eh$?*U# z=-lp37$UB%_Th%!YV;q&q;;ne+OLX(-Z0efCda@E)>amI#gg)I2p_|E`zTFzkoGrq zuFZR;ELnKA6{Y<$rLj?q&KmjxLC4z=jiZNIbt;#%(G@FI;cI*n$Y!ye{q;-#(0TZT zUzt78+H=eap3IDedwd=0--7ewMULD|$S@ai-1q$?1`}nRqs! zneCAD@KMwRtBDoxTx`OkZ`QWCZ`1ClwR7_TJ|?LkZF_#0 z7NWc{#%kqvm+^{dw`(7$I~>8v6ijF4hpf1{#Ao$Rbw`7j6xJH_j5EIVj$@oAJHdC} z3yGFg4GGF%O8b?}pqC5b`A4E-skMpr9u9li6RF=fX&8`t32ncaQSgyclGfGJM^-cJ z3oo`UK}p;*9uHqKw-z5s|7Z$h0fw!hTc*&Z;*rv!xl%s;t8{qxLnOd16=&Da)XuHH zzj5%Dq+FyIIw~O!)rJD4whW&# z?(qARm8~=5i?tmwrmc11;;tN1@+049UBsse3Q{gf#-g(kt>#}RgYK7w?IAV=^tV~3zf6Nz!*Uc^VqWZ z(;YQ?H=t61s9dB)Kq7@4wKqjWWjeH=ooZPC%%M4B zXkfq6x>){kM}Ry%IPy?-u(WfCXA{8Jkq4B0r=dTq+m&1iflnJm%gS!L3w;jbj=*HV z)_1f=vZ_|*(lo@=1NBJPS)s9`d#@!-FEFvk(f&Zj8NXwq{XK|J}r=! zGkozXK7nE0$+S(%R`HqmEqZbv7ZrES_MwoTJR%yT>{*32b=`uf&Ig&5{_7~b2^oUW zY*S5qA@YNsQT)gHY|O_LF-S-EO5-xxstVawR(@EA0h*3IOwSmzJ2=}`xsBxoUB5MD zA$Z3aE8D=Ehr>ycsav&5DuGv8MyN(K&sHgGLG7bh{9vdt#M|SF7 zP85Xq;w;C)d;^6fGwLS|b8-n|es4(a(0H+qqfNT8IAa;Gq(DC#YssG?XJ`bJRVt$c zp%*#nlVV8y0Qa#S|W@)9w9RB1ptZ5VM!7)Oc>AXONQo#8+i@*%!C1$LE z!qs8iXFTQa!)U@o{XO!KOXip}sQ7ZHu~wrH>ioiQ1@ph|7w@`%Co5|!_%3|X+o$C2 zTk7I7<2W!MHYX2;s2MsTV03#)dZ5$%>R{&Vbc>B!@zKT6?ecm{U$w>L?Ma@5NFnjx z4m1@*Q2WD2a8rcoDC+63^`RnsHmmv*zH8TKBW2{b^+nsS2EOEjf=CS4(aYG+Y`7e~ z!T4~dR!j^rGV;RR^hmJTF4voNim9Sm|K+2$LEp@`QuWvKlnyedA>P?ZO+MqTCx_sP znO;4|7@rlN!oZ^B;$k}603}9s98)F5cq*^(5k2&}uMs3oBF!~OH3A5ds@+&GZEryr zHhf71M83qSj2w~64MEN0U;PUJe)skS6#ud6LkV0;7dCin`WKe$;+~{ z5lU6WbT7C#Lpo!YTT?W2cd;k>7fIv}EcL+^_!FpAe9EDGg+YNp#?k=Fy88*Tuua~T zXHl<{l*K1d4F(y+{dAaeQtF;NtG2ZyD4D61cRl!$xV=!gCr@s^(sbU7dO>WTP_4aT z&tT2^u(7gneAX5?VCVPuaO0%FGY2VI+Ve0dbtP8bC?Z|K?8)}>_s6Z4igw6Zx2Sg%*wiROf0bQ<7LjnGMuQR=63DW zLi^kGX2qO!E{Hdb8QI;IB ziCeB_^f-PegFVYa7q$I^AGcq3EHu0JkGGMPbj(Z3T#$KaIE1MbDCH1MmHj^wMF&1e z3srT$S)mHS?n(uw(y%Y1qhz|S*gG#pwa{hoT3{cdJf%m!UH*?hhy`d0w$QG)J}l;c zloq$JPUuAN{|VCkDJ*-)70*!x3WYa$LjN3fH<^)44SYjTksfY^Q9)4U`*OLt?ZBnYEY{1 z8wn~UZOhliphqgsAzT|u>Y2A67?FpHNM0tw;G0)cEL#>qoqgK6+(SzDeO+i(hutd4 z50ak@Br$lvt@zvX3v}!T)8dVGchBUtU}p7?+t8DzRn{ZQoq9%spzC+3s&9FyyJ@06 z2#Qv797sd8e(1 z(kZa*_4d6ZKj91*(YMJqG}*GMsTC%6^Ii8+CRpj8H_MHA`3%i1;2Dc7qQ~bmxggy< zTB_eZB(sw)%t`^iqp?>3HPF~FeYNFt7gOJ7FnOyX2BVoQss2QqMpUFEBfqV~72?!C zS6Uvt@Aq%l^N$yUPXXm$NQW5+TeBDPFpSE9>?$N4Y?dt6dvEES3x0W%R{pcd-pIp6 zjg&-9a0);k*7{X*wk**s2c$iUJ}#W`K5h$?mbxb5%!&J^@`?v`hl_84C{Vi)YQly? z^i^c5aUr7WU8z_mq8ZzPcu~LtEYU4XQN)S0>j?d{NiUPq+IEFO+FEbI^)Xc8$n-E- zG*l`yXNvbzQN#;-L-IMF@Q30&;Vl9tBW+l{(*C6E2(L*kVZEdI^5QX{;hPB1_7^jl z^@UqkOTDb`CUo+UC?2~;&eU-0`uME}6s52dhs+G;b2nc>2B!_sO>NROLKG*F-|qzc z@RzAaqruDs*|=Ak@nTmt4z;dF*(A33jZTzm_u{c)ay`xVN{Gjsb5#X$uU%U8rxF&) zb%d2karVDTMB#NB%CvveY*v6D!;J?e<<|ewuRzEFDE=-`Vu%;ul+~SF0=IeNzQreEbB79dtR!bZCLL9Sz#{UuzvVphiRNi&W)|SQD05VZWJ277^{ViLy>e!`M*0X?3( z=C@*XG2*ap(yX3Ji%ANrTd&+^gRu^LacrX~q>eI=g6oFw%6QJjy+fUn5kZC*Jp6OR z8P3)QJ7PJM%+Fp-rAg#559+$%TV)y#E7}w&c+j0$PgFM%LIX)w~U^UWxWfRB?&{z^GGl21)G z;4P0>kw^qF<4&MuEnQBg8rJZ8JwlVfr9+o{L@`&et3u)q#5P7It_-owaaN8?GA7lI zs{(Is_-S)|XKVKYOLp4+Sgs&63Aib~(Xm$AP()L1Ka2O4kJ4&1{VGh%Le26HtZA-h zow2P<6a#g#k0c|u?2L|vh@W(umwNS2yl3G2OXInx`2k~X|29^cYIn@mmKci@?s((_ zk}akGq&s4Qy@mKXUWoON9-)R{L#AWm%j+N0f#3?Q zx z$f|zH?4(#)h~6n7!c#JD63^TLocF0DaDI37KEbqLS;{`SkeaAG48?PhE2ksWv-t%c zu)7(qC8q|i;VXR;-FR3T76k8I{+3~SiSMbAgK+_R!W&;o-PpEfXPrZ=7z~7e{2Y}9 zFbw4I;}mN5IAr!GL1|VN?oOpt_9URlerdvn>T=wZJ!UWN_cXbs_iNyGx%(0fyo-e< z5M;X#lpqzYJMlQB}r+Nu=4591!{$P)npNp^3)!75XVKs0jCb5E&UgSX{BC zhCab*%3=R?Z19aJ?}qf|R4>>?F8q;44uho zF&(O1^ZWJz^GUwJ+PLm6A>#zTolo?zu+hTtiRhoukdm)lkeqe8)=#mO6xzRQ_9GtB z1#1GdwI6h!HX`hrp!LK~&Pey^2ms$PG^P;#WA0|oFPv*Hi?z-Z?NB?vi1RSC;k6RPEH@>)&3uwV;D{UZGj%p$n~^3G)9~YGN^?Z|j~})MrVZmuR-Fwb?}&P!C_XV} z%(EUwXq?LHFP+2T$qT{h?CB8rR@bvSXCI6H=LD;HNJ}Q&h^n9J5Z2_YNW+03sC~Jm zy}{F9AWalLM^~qkG2C;f($G{5QKSPAVz1=qZ$Jk@%%BpQnM?Iv>0ut9)Tb~Iq6F5o zHWxli22IP`P?Q`#tPVB(@{S@Jr7)xSO_`TD&Ep6=amu86Z$aAurlYjyA3uh^H$A(h z2cHz+L?AhG+bTgAC6>9#8f)~p6Bhj}4uIy*-2OrHDf~{Gt9q6{ zle!`jOo_~kLpVT?kT4P<-w8pEj$}re=I0BSa&M)>>oB{Eu?+ADV2sc6*!}F#P2+d2u}$ z?YTl?Y-u=X44d>YEkm7OdY0`$Fil4Uf|pSr=9*H0d2HF;9)im?fv;bSEmE#`IxZ5}7iv6QJ12kC`;H@RY1(SyU;( z0{9t1Gz-+WOi5;Aj-3z;y{mReXF;8GL6Bxw@xz{YL(vmKP}8yySBxt~j3V<5wYz8L zVu(NM>&`B2OeiyuzQ3<9zM(>yv(!0J2zoDwh3bdWGx6AOZAi_nSd{LP#R?ZdbWe1) zwW{xxRf4=#P5opelRL3CSEekPin!f0AU!pk?%kZlkq2saFRTb)fDeMR*bEV}pv|an z9lMUV@zaiOdJ>nwH0-haR!jJII7QXRV|VF>)(Jkd2Qe+B{%93?-s`BVqo&p!6Y@TF zD`XhWefe37L6pyakx^2jcDKo>=IG)9Am$1$}5bna&>wH<8~rJMpeeiNt^ z)%kEy$VQKqfMb%eoRpf~mNRZ6;m3zH^tE8;3+|9~=&OO)dV1HBMTAJGLN``)8XJCO zmfYM`O3XT#-k8eEsx0fsj?XJvcB997RsWmj z7Ru%fwUs8J`IwkX`?ZI6Yk-|E*5SjVWBFj|aw>k%pp`WSJy&XjYv(U{`wprI_g_0} zWU6D%8fOoP96Gb*u5GyQAM|q9^3Z@KAEVwm@4^Bx2R#@Bx?oZ(B_P52$atrS6SGN_ z*7#KWwAx%b8WjhSV&@c`^`2uN+AXgV4y}>bXVMA@yg6}dvVNF@{1Tl@Inu}3V>Xx< zb3r&4a9TAegA}J8cyC#G?rA$Q)G3s1G?jaZ= z0}98}jWZ4AyxbxA=afb7y$jye*3+KRK9%75$Hw*NRX##IPdmElYjLbEvbp~NQMsg4 zg@E`z9Q~1Tzs8hdnOpKK3Vtgws!t~tSL#IkIlGAQq#9VjL1G!M$NrJyA+I20zNZ~r zn}BtV`PmXP)7P->S?U(WpL=7vqMnV9M-JT)u>Nc2N&wD^8{yKv!w-?yC74{|2@!@3 z#gGY;^R#!%ESYDg6AfdNgkIIG_8!A0zqv$T>M#0&Kb}F+yLpwE!r4*4d4l^*a_4ly5GUq;>r8l6)L#7-Ob+o zsc7MS`!>=nCeY96jjdfMGuT=zw8PVw?v;YSmtU~D0rYNqvf`Dtg8wV?q*j}K>WDQ0ke6!XdkjxbKsXL%WuR!n%c{p^#Cj2?`}}A{&!NNTv;y2VRJY`49-gjBu3bAv`wKN z;ihAVCvU%8-W3V#=J}c#FKl`j)wHYiB-l!NcU}lI^DY)_a8-#=ABvn1cQ%v04$0`-VvjDKB89VpvAeA&1X5J8*p{Ye#dE*kslEBcwIaSq8t8lViZ-2Gg5>-O13M#P z_RG0YZ^d&xv4@Eboau6(^E4pM1A4;AVBX*HjvsF*(#_ZkWvAw{V%zh+_xLz)4-dX} z7fO*)68@DM8)4L85paeS(ZI$WioAZX1 zgP`Um)m*hJc)GK={ZMSdz1lIng|8#N&abPml3WukD@eCOljNWjW3_C&BEc2FblH zp`RFHZ`F=EdcQDjSm%iLMevI_WR_jTdhT|-ps?M(-viR|^yl$Cr*OBHHxN|@H;KEi z5ACjPt+4rQ{TpQ6pZW1TxiJtX;5v^en?Onrt+9WG>F_yARIOFsFRC6WqGP)huA>>Y zqS&4vy~86pA774O?w#;omjd&;%$)=radY9hyX2}T$?){yE8#=_ zPS~%@qLi3ih(e(#{y$D~waWNzAZ(VNBhM5cT8{4pvsuvF&lB2@3q!lE|9P?W&{wW5 zoROo0!eoq-^Eht|!3yLdy&2^ScGK>L^ZpF<#w2intf7ME<>;Mh%V0beO!CQA+{U15pRg$d71=JZrh?<3=I~LGb5*P0mXZHUduyqWYO^Ou&<%v!x zpE8AJCgaty=(GJ#OK6Rl2|S8H_7cDf=stwPk0()(LPfLR-p~;ayMY5yoCyKInnpc;R6G*J=~^bvO=g z?GU2AjSbhrLH!KREo1-iDS2d*je%>}czL-_o&YCz`h&}MMQ1^RO zujn;jgJ=rUW{=HE3$d;3zq8DkY4yHuaMpHp!}-V45ZE--4Nh0dQ!OHeXhqHE9z8Ic z#!&vRJA5KmBu4WHiDA+(IPURW*ufj-&t@Outf!XiR zqya(&i=?#QL*7xLH4#}q%-`5)5%DKg3{bj~RlWdXKl#|VbBk~&P}p>j*@Nd@f+JR=j@ zJcuKRpmH6xI}t{H_=CK$^)bFlXwoO*!a11^Zc9IJrdOv`CqJ0<_(Kcv=eiYze=wBb zqH&6l^!6cynn=-e5GiX5aFQfL*Gj|u?i1DzEG3-`l_kCGgX zUrTaR&_WCDWgGv*KtNn8TM|&4SrBw_4>EpnB%7sHuNu4E)FQu;Z+WcBZbbub>>rQ? zc8d4%yXO}DbZCT8H?Ej@ebmkTTEL-Zq5$837>x-Y2V*q=Y^nADnJpZTL)^n7G1KiW zwZ>}LQ)CxMDkePGCW^);Z983pm$OCQ`~jHjIm9b^`6%(SieVSZsOh{g643dtq(pKj zvG9De=ZrR$DN>Tm?&^SRH|Esv?gRc@Ym^bt;B;s+9m_@B3btHYW6XM=`dQv%{0n_Q zG{0rI1BeDk~1k$qoTX zo8%@99|;9!WjYv(3$VoeP4HcMElL|VPa!v1U`nI>F}}ERu~T^9hA(f^W%{6h|GAoC zdH-+&zCwk=BHFQ;y23m@^NA%gD}16z3JZ+OBlMiig3YD%3dN#q3ieN2og)*(CGY_= z$dB7mrb1vvBc3X9rumqjPaGkeF{q{>xMXqa3Z3&tK7aGzZzs~To(xQKQr*nhTJUS{ zn4HCVhJG^rx0}mafm>1ZNfmfdzjlEU&FMKRZ;a zg9c1-G)Kcw!U%1Sal=NGTli9*oO`66NH<0*$f5BL4Yrrw9U4ZO;#Qd`ygTZ(k<6fb z!jERJIE#7F-n}awFd80jGrb2Rr&u$Ov-LYWC!W@BxmDt*M}uD#8!-JVpTuCl&ti1k z&{afLp10;@12!3`F+F|w-hY}=l10L;LwKC)Tt?uvV0?wtbK3mAj9~P1=LiK%!>rkP zosnk9uFGSo&s#jkSwSsoRcIbCg)D2=c2OF@xw447yUDb&x<>f+ zoTRx8G?*@TWooHHvPYKQ&F(b>R2rCEt-d6WKdZ&_>z~v$uYGK}8_4QkwLB;b?G>H? z_#DT33V=Q1Tkib)8@hz@q>T}ntpU%V_Z9!(BvzS1OW$tsf5w>RCQk1@bA^?!TXY(6 zJ>Mt=baR0)FhPBdfP}6KH~uUWI+A+?8s>^X!=+>2v7()uAu$CVzR`&hBf@ot)!ZNJ z4es26b%%=#ulnZ)yQHAe-fPn&2GlD+*$E$^>O9jgP<%M2I*MebFIgqla?4DV?sM1% zmeiZ%ZA7a*gH7AQUea`ApZu2`DP@PWp`CDi!Z1;as$VXKHTSJ;`G&Fo!!5E=IJRbi ziq3-MQnUxKDlkc3&Tsuxq#p7moBch52@^nNB_aOXC4c0Y>ejtnNE9iKmY!+)NH9eM z4(^p+>UNdarE3p%*`jYX`5j^9&)7cP-4xM(G~>Z+=0BKzzh9RKUH$-OB18E)Py|Qe{6~u|u0Ml~)I@*2i3U{zY zoyy-DG+3Dy*)g2#@}*>SKSwvmiAcl*MkjknzG*A^s&P93nl!?uEbZZA94(&e}29$W0-(MM5vaB_$_kN#e(Ovp&5>acTiLDL5n?9dR}kf^(icO~MI$&wDRIX+5N*Vzq5BtvZuk3G|E-P|iLMC;{g7N-rwf z`1qCK(A}iaDwE-d6`1fbP$12xxi?#BZV{Z>Q*%0QzYG?3Jzio8-dGF%f_L^)lrw0l zRZC#1xj1`4syUqwpUNU$nmnXKg$=5kD+JN?NGBtPuvF<)K>%YQe%%gvQ@^(#s=WX{ z;pwaPy8gS>ySriDusVL!=t-UX9c=bdnf?!#un_Cr1(peDKUs$q0`hW% zP5CUsF4NsqGR0Mu`iGj`Y9~Fylcly?(S{LAB#fXby>al$?vCn~8#-Mc$1|_UbY6@z zYfBlgEF;2ImpaX5EhIKgOEH}Byt=$0MC>Kvtd4t@@V2^hvNAyrE1ZDNRvYtHDN6sd zM3K&uH15e)4Q$S!D!7tPDte1Tu;WC(`U=}W zyP3}bl&{r$0NWHL<+^+HYhAgKn_3f2?U0!in&(t5SQhahh#dhJz~M@-LJ41%DO|;$ z65f2aWcS!=|My^b@us;R=yhf)g?n>5wo3I`8v=`sU_rGFPkQx5hv5Md_>rHj{bAtq zqfX*b@WjHuJr@TDpk4M1P0;fz?A3)K!#mb+_%Nl9mB4Q(^HBw=zQfl1tAC2q>@!0g z9uy_Y5I$M~X-)ywBH8`NN7m=G2X{+$ugwfd@muNq!l=;zMk?Q-@F-MvyZ6$l+9-AJ zieAoET8bhRa=BQny<>PKU%T!btK)QRqhp&L+wR!5Z5tiiwr$&XI<}LYe*anfyz8vB z_qDF;oKMf3sj7R-%(~~O8ox2>d7P-d(Sa2KVtpcBNsyEPew9LDz|hOKcyRl&a#fi& zC}EAmfChh0nBVIyu5^|zV@nH-vwM5_mh5hH-KhIH4F_OE)q$R-b_f3bqE_iXooHc* z*c^8Db>QF}%!qa*CG?`vIyXFP&o}rSv~Wi;x)j|NE{rkarvi}eCL%dmB4YD+KrDw% zV9K?Z;S;o`6C7>z2(9qQc;{3wi=`fQ9pCqw^lzx#_>1*?X`5~oSOdPCkT+3!p3EgQ z7&@4&P$@`B4JnRfj&F!A<_ygcziL8+BJo!{-J#nuj%l4*#U_#rN2I%OS)nYyS|!W$3>Sa>n3ZIq!>Z@ zrcVXn8^2HJ<$Ao%KBN#>SPZtid#S25R3rwjeDP|8JE>uTaIMTt%;nmiZ%D-$xBJ|C zd0nkO@8;;3p-r9fbQ%VZa8jkPZ`d;y8j{_;FrQk$rIFx_a-p~_baWJy22 z%9U4EW4lf8;7;f*C6TNC~*f;d(e$WaQQ)69;7FjC#t#Q`J{$$Vxr70>9c+ zhJ7_H*M{2&RlS!%ui!!s8e>=>&c`p@>IBpLXpQHN$0V;IS0Xh2aPz;*yGZwMc^4I2 ztPSvJq;*Z@?Je+VBn&K#?2Yjl=vkQXXoO76?G0={qq&Z~!A}D{EB((srSSgQWNT-S z$ISYlvn{5psoJfwB6@CWbr0$nA|kQKGBGJ$dx+FfJq5Qj!Nva^#Tv~~$~$H4^lTL! zA`yYlSWZ#_!Z@m|go7cAjl+W`Dg=}Tdav<@e4PO1Lw^mRL+sW(%9)3x9$Xm{*RDl;AeEc%t zTBHXYTV@!*4;r|{+DH_rgd8uHQ9BzbA8|_#h(cR_fJh3G*1L;a4h;HEq!+aCH3d$1&2v^T)gN6=V-Ob zz%m>qSYm5OJI#di!Yr7KljI)B2>N~Qg-KM)xWuL+Rjb;P=Jx@re^0i=Cb8&J{eV^p z45g+0<{2x!cw92H;G-`~JQo!KolUfaZbj0+N&)Fuv8DCsnV?A{zr+HGrXbS1RgzY@ zwN_BOLkCA|&-yTkE7{bVTe|uJwkAuz`D)kH0+_^4*>3H^qJiAJy|Iafi-n8P^)^^|DjO($c|U?SGyZjQYXq&S@S9C)U4(7tF+hGs2?);pa`f?Jtc@XE`jL-rLQPc=ZX5LJ8OE7~2C%P}hc&dqY zUrF(^Ym@1xwic#r9Kja!72pgGFE;*3RMf;^5OP)!__ydVke{U$ZFH+Gb0H#42xq?@ zD&U|Ato2}Ik1l265=4%OWz)`u`qqOdU+5T9a1gIS!k4z+P20WA%bVwX+#rJNkpU=Alh%DEMV@Hd` zIq`^s{u%X$m1E;(>tO3(j;7qnhuvYaDaOA>th~59zz*6!UVl7tom2?(K%`Y(Qi)SY zZuBiawARH2b+2=C_3|`hC2qhAsEEuFZI5@xr<*+PV~1+uS!-v1*RkcOvs+c{rV<%j zb)=KV9bu2WtexIJjyNXwSF~)45II^vsCWzJJVZJnYka?ay&kAat5Jz$d#0YzXWJ%b zJ77yw5w1>$uAH#63=b>I@NGnf^7n5AgKWNdeFX6d0iut=i*v8q`!_`@pFVtLQ4lsmSygdGz_u881 zWNYYcdVO4TlQ(4tlcYLGC99tDCzm7dNf<_geW?hjEj9=9|Nqpn4NaBHP&_ zGkh_^tuH|=Tu60Cv)4xD%CKHf-{3@JsbjI$a5F4aS6hVTgiXJ#j}ZQWOAR6t1s9E^ z8T2!ge_t6PTnqy;LrJ}FXy_4$$t$hr!q~`2#SvX0O|>VM3nR$qiQ^onIySbPlokf> z{nop3pFb}9TF!ahb4AO1tbN*ka%1DQx+HZURBv-i^fALNp4vo?qGtpe>M#t=&*z#WFo27`#s3SEP0!jp`dPR5v19` zyY6b$R^z4sXRbmmw`lEELjkiNJ10>SNLbxa6l*`TSJ~S$r6(wN?}^Pt%veOhib1^A zrg9W9K)tk}2SPaaplD#4!lt9IRIw&4pQrO-3W>x#|J-k%Q#I>Ar$%)!Y~%6AoRkWZ zwNGvv1N(f6Ia?tcLxl2iETP_?njND4XLV)fzJ0w9>;dz577!P}E`RG=?NR zhQ{BMvj$SVG8Hw)@^X8ngZ58IjYV$kngciEcHww&?)4H=+Omr}sl4N(qDc%H%nWrT zBXPtahACw;`YGn^)gse})9k6OHR%bFoUaHq%)cOk=y|JXS3;&tNOnXo8u9qD}qkg-q@s${?BbL6>O|(o0GEiA~p{J&3Y;>Q| z7%XSh9HhBIq-chAGZ@v!E<8e)rFmg0$;KxcXzj1DoMKNWI>xAtmRMXqIc4j8ox0mo z?UdX(CKM~%b&tUK(_y9BGcMh%2|J97VE-s3IDaHP#(+&T^F;455z45}{@KKcVmg3U z*SmfMknx|>vtDgW(f!IRdaC6r?~+ml8`TGDh_Oy$!M4Z1Ru8w}Y(a_lutWy&67RYc z?UtYF^xHX+Lk8cKJ;hwYGIw3n)WmKXX%^Js-hfOg?W}4+EX53AKUf!R6@$Wo0El`m zl<54;V&W0PD1ZN1^4y?=8siWw6V(v^6*sa{WO;^pTU$k+7uAnsE8#KA>`eXY>5aVX z20g@*N6MMeQI>@|kXUKpJ_tnZA3#`a%}L1&@8)m9?i*y^p-Q@0cm=iVk1=$wO`)iJ z(#pkH3V0L~@Pzz~jjWT7tw`R}fbz2ebX7Fxgn~80R>uMWL$Tx!MYgcPK~SS909t$u zyQNHD5$<>UWUXucP{)rK4g^?z3`wTy_h=AsnHx_HOk#)>CgJ17Yr!nK5$n2oU^P(= z)jI*H1si$G-G;s*mxGds9a$tM$sA+3Ie*dVyducB9MM(z0bjn{iMjV^B!|l~U=Fcq zi<=KRHKFzZZ~@$gOYPTjuZp72e6n#>bZQ5ihg0ZJ-jN2|KO_0a+Mwoy$RnB0wzWoa ztxx&t+7fKhMy4jpOK5onBI=)rHzgXhRO9$fZT*)k%3pOkQ0N$RTaob@Y9#AgsC_ZX z20;pBmR$Ple#WqcI@dgAk&IA0(5>k9(Vw@n>2eLZ2J5^MkXSR;<9T zV)V2aq&GyEjn1A$tY_pHKiwbqWa#2A_##o&@Extejb}}u_wi!3Puvp^0z@BF|4dm# zFv$?ebLo$Me(S_;&U8pyEGeKixtBYz(tlS-%I`He>Z5N|F=QYe>*bz8apvFGybO(^cD(|Ry(xt?T5&SUAxc|qc`-o7GgOVc05<%Fud zEP8b_CCtKNpsXp?fQ?gLZoyTNf!kMmQj(6{XHiYnXu+^1d)ZLFK~s;N`;J>zV8!mI7u; zhdqy#+Th6N?HrIM(w}2lg0J#gF2x8ybiqC0$7Qjqhf0N_`^+D9*zHkO(`x*@XL^!y zFS+(hF~-U1_{HfPD8JBXe$iP>sBC3aP#p5?g`{TE18zGq-Rekbamz}^i-5s;wnyzW z|46=yIrHdH!(Ag2vfQsIJ#(RI+sG)f`B*HoEf|giVzzjaGH8z-R`FqkT=y}TSXwrC zx>e^O65#@f$>!)W83jT$KjM3G5`1$x$08uupJ=$*h_-u0F{m1Z!V&Igt-6CLshF)I z<4cNnhg(J9dm~MZN2K@k^v|{F?kjkSzkY^J?2R2}TNHEs0>q56+e#Qbl1Jf0M%I&v zee1_ceo_Ql4t?77f75DWSUN`KIFhgR@|Y^3=kbt`8s|F1U^Sxr?X~|lcI+@sjYuu~ z2^m?;A$O~^rjUc0{dnSVa(T{dU}4m3#?fx3s-wC&86WbX8IP*6rwWWb8>b8JYbnug zL}MWtkJ9W^qkg&YFdA<-GVP{JNfREd)nCN8u!PGWG5UPFB00sxrP#Gw#%-d-MQV~6 z1ffo2lYegZ8Kqq;+e)^^?n+4Vv@~G#ao@3?zC_cgv~sYCv}Eo?b2qcmN?rJco6vC* z)9>XOG4KGpFiZ0MEqmd!HiChr{#TWR&sSEurcexwe@iL+eZ|T7e;E7!4;}9K|2RCs zv|d@{ufucT{|^pVoyS=t>PXt@PhS<+Y7u?6+Z1GB5mf&rRe_F^UaG5P8-ETMKsa~q=if5ydyloBA>W90ex3X$i{Bl#Sm5}# zB+13OLAH1eC4nqcRykdwHl0&KNJM>CWStz&SU>yQ#HklS{^ zDVmHGoNUq78%?O_f2bi+KxPVW(Us#78UeZLk4+H-jIbbFl#GRLs?WAKBtxkFQn6Hh za4b4Cw@W+&&PVUrsr&nUFdqluRkt~q$OLI~{g6k=nEdH*L&Lugzp5QW z|8zJVV{-VX!&N^Wj`sg{IMPwwr^A{5I{f4xhhu&^9PHEK>W`ler(dKz{FlR#JuLj zXc2Y)?87-4lRE6jg^^xG_1>G}7TSU)H}maTD43eR#_EL{bcZSSQ@W?(~N7>Ijr?bROCLH&b5! z>cr=iD=Jf2Y}^zDH`en{Hn6`#pi?E~*a}k!wR7W0NlA3FIB0Ho*-IHWBq+-_Byczd z&D;eYj0=7vPtvVkrX*Zc*j!K$ap8)rChs3%mR^oIaHK4V;aJIp4F>D7~Nw*q9ch3cbt{PfnDUPJ67OdF5%n; z6Y{bcdaUNIsru*(W1Z&9oHb&CJ1!~Y%Z@>yD1+1uf%<1sL@ecE0QkAdlLy`8`D z?{0tN-&>l$@o&ffH~#&3{EbXsk@+jKd_~r;_@&4ErN{iG$NZ(o{H6DKBlIsl<}W?w zFFocjJ?1Yx<}W>#FFlqoJ(e#$mM=Y)FTGCy{V!dXFFlqoJ(e#$mM^_eI{mNxSikgG zzw}tY^jN?2SikgGzw}tY^jN?2SikhxzVz6>^w_@i*uM1GzVz6>^gfFe{i}VpFFm$@ z^cWfb*`JZ|pX2Em{?XIKqY<#Ov^TK)JZt}Kp^-GuH__p@a{fGvf7@naW1^=2{I6LV z>8L+H)aL;*I<}wt<1sV-(=?5oft{6ut)77$9v#D%#y@~xM#sqD?``SV#s2TM|H-AE ziI$D+Ki$oaYn(*oD%E^!c!6eCn3GSXky_WQnQWwvNzjWvoSU+Q5Maz&Frr~~F&w|0 zSk1d!(l1#y*0$iPZS8QDOgzn>$kbhMvUc2@kKk&0brM+LVef#*&<3U z&GfoUN9KH)xN4`>o=~2*-L;nDB;RIsvaIlIb9HfZcuTzOf@~KNJ73%2;5+ITA-K+w zVQ>6#c-VhNpA**f-e#{#mfL?)Csks;s&7zj-%R7NjwiVwc>h9Ooa zL`q`Etg?#FW5G^R{dEt1xhTwT%WQx8BGzTe}8UA-Gs zW0+eBn7DYh)!HB5-QG@6wH{49WHjK{i*c~rUOtQ1AH91H_vKLH*uT_GFS0&Ju)u&C zKaO|XDA4?o|FB$xQ^BjMrQ8W)VX4&E60X-gUgt;{msY*be7sTRM4%gg)cWJ})r zPSgQF1H=P?Wz{>oWTWXC6PH8bFY^(_6iqOxpA#18$dks?r8P}7rIKptBm13;SNpVH z;lqx*yz>-Gbste>h~!*=+GiKt2C%UJ!P;_3y=McEM9Im z+kWCw29Nva{8(VpHT3kl+HyVLRM64squ_J;Kk9zO8ZNeEa>>iH7&wfv-Lbo?1-|E$ zVY2)}f2iSN#4#+DvOHWNF>uPsNaQV~~PWb$^*mg*!60 z%Eos1u&W7elTC)hi(h55#N#m!GY^fbIK)5P09q~VBg-(+T{Zm&OF_6 zCF7VS`p|>K0LMRBB=$-FLA`%gAQ?PLEOKk(l-r~IFkdXu!ED|R{9r>4TX?5~VxH#8 zxH5`50gws-G?`@40q;!)0n$G1W{DD!h*HgQqr!MnH}MClr})Km7~gzDLf10- z9BoV#;wbuhr9Cv^c?4SO7;;^`yVlllfsmKw_h2wm4U1wbh=3@26 zMwY^w*KKL=Gh=xK%F@A9(teHsMDJR9paJFpxAvC+`jm{M+B7Wr4n?IJOKyYlV+nys z3Oyz?I4(%Y_KPQy)Yky9ii{?2b?@9&E{a&qYc>vPEczdjquuy)+`N&jRVaQRk3SE;}=enKZ69HUL<-kL!jz6 z_@d#Zx3GC2Y=)hgZ2W^^(efoq*J%>REhW|RSYQX#$C@=8M=!?kC83h*6aX7Jx&w{G#D1cFXiw3re)aXJl90$&^mzyQ|Od=Z-PuI}vfn zB7hJqcnCR7OFv_F_R79;rd^@$+I`q4v{AxxaM0aI^{KyoDECd1k~0kx?xHFKn|vAX zy3-=a0Y(4;UUZj^F9R?^%1v98`a2rb95M&SE5yb(499s32k2Xyh?ESON$sp~sgT0m zf)uQO>!CC@z7L)OZ&(3|4F~y$Rl`Ip zM6k5)v5U+`gXnk;45*1yWcBtt^BXRCQ+4jIq+IwNK_DsyBJwI6d`RphP@MdO*Kaix zmm=I9cA&g4XE&tC%&lUAzD+#buzw%YOIZ7R{)L%t{5yZjny9D9XvJw z1p}r}6zf~&-zn{|dGel>@WTULg=4%uMKOsVV~hgu{m4O3=A}rBgD^2DH}WMJI)@%n z!qoE@xmZ*)iv6jw!YWmBwZr*hP>XrI^&Wr$*go{(B@w_@e)1s`(E|4Ha|Xb5jo4bN z{B#D4lS*KJfae>BYerSSWBPVWMpTC+nr>Suc{v-e?g1=R&P4Qtuv{R)5K>jg2dxat zXq8lXn^L?uYAqRN`%ty;@Ia!A{v_rbRMN9l#KHM($>6DkD}71eyAaZa(`GL>F)JC5VX=D8MlB`EPc6x>kCg$=3{j6`3BLtf{!ZfmTQHKi_wv z=hrH*QZ1`U5wXlfT3px}NhxCEvEJlD4W&UnBhoY9=MZXOxvCd2_EiBqljDYtn$C<^5jndfQ?)&UC1(uC+RiE|FT_4($hS zPR&D`B2LL0Q|v-v8UR{;%gwHs{jHh{fcPDF4{M=F20&jY3)D~t!Z-PByNYYy5m**~ ztSFOw62Wl*+vH^+eP}z+8(L~CFSSlJ35|@x?+(R???Sp+p^#Q1BK=4V|4sejQRpN1 zMq@L}XY;tLu;J#}owc&r{>PXAmndTj(VTcG?lBgWv1-g-YXb8{OystY>5h(MAA;=} zdryA_?cr}X^!RpPb~OY2Xj4QfY^Y*V6^6Ni%}TuaJUO@U4Jyu>cK?!Ppl^SIO6*UZ zzy~}Jiy$l9;dX4?-Td|4fqaa&kcd<`gr`n5-%e$$CGd+{+uuVA*P<_f>FoJ)^ZcAhjOS3M1|fhiQVkX7n!s!I7rafTF^=NLU%qFv zLp;(qf$w*rhw8dN0H#>d)1YB!632UoB{wb5$;u_-lHxFpR5%8$3dj*I&`t)yui`uV z?h(5$7-2G;c-AIuo_}Ik{iCYUl6J)vFKr(T2$E_bDUfhTV2rW;hCV5hWkJ&VJ% z@v_n@(>x8o&+`<_=8VjeV=T%sXG6mW0Ip>Jen0NOloZGhn2*hp-kBEYH%NF6*3@Nw zzJ;OsaU*e7eD%1#!WQg1GH`1HeU0LftP_>=sDDf@a&E$%Mgktm$l`i%gy?%XzIvTM z{2ij0iPpH9TA>kDaX(TZ>~}55#_tfAAWiJ_Q;d-pcAy{<9)bd#@)Knl_=(MMk(Kjy z;zwqx;#g#=;%DR`=e=qlG=@{{*L7I@F;E`-7|Sj;mx6X}6+(1OJ083xUp0KUE=v zt(iT{(()|>M0ZK4^=`7m6n%!b8oIm?&2cGG-w3xzanP8XNIj;7jeVF@$s$iJFjF8j zK)D#W@X$obDvNx+2P+Yr%mbcVGKym!3P27B8$DD0$2t(^5vo60WQtzt$8=Jsm2xz? z5}G?KZn*fHdiu!#)^RGr@R`1AVuwkip5`Q8?>j@_&s$oI%c*ScI&FW1a{F*$z%zvU z8J~8*&UINpy;w*9%I;pXuBM|nqN9jBp%Z9Xkf}%p%i9S#{4gP14$!>bUwBoXQ#CWP zvD~ZJ%STyR_|8?4;jVpME5hrP9e4WY*UbrO;%x?2Iz zx2ym+6PZY2e{9w^c;#C}+`#~uaPWP+w_Qetu3pKZw!y-qf)74tZN?gEG6{>~dSVJ9 zRwL)eZOE^zNb>PZjlp)9xW>O;EU+2MbV0DYk(nS%xJ8Kv-tUC2S|7WJo_>% z1_Js;r*}n2^C2!&j8}0FGS`;7D$+RHPp_Z-&XXOS=0}HIlk%USzEci{m|`Qz$Uqo(w8QA2=KM{qx6BBs*BY-K5<#1)$pZw(s&GyK9k0hT5l+ zKk5m27|qn3gK&TJDKZ1D%MQ}nCqN`)HPTCnLq!RMLK2f2L1Am2`17#vo8k9HOhePwWZ@oo>A@U^aS$9{L1#jI5+dCRiDbiu z`5}q6rSclvN&fKX@OuHfbG3y+|6HGv6@T~P!^0H5Ku4@`{Hr2;1{uckE_mmL8Lv{1 zFj?Bk4#s=S29p_ueF0=E^MG)FzDFNw+>xLt6Yf8(DkL0EquO2xV~9=b7-LJa-XRyl zTd`_&C4{qGg~C&zS3oMOMmh80wjkvoog=|}43{JLPjFZ1uR3ijhQMv_?cEO>Zc2Jf zw&qOGgWxt|K<75eA8fq)*&xD1QTX%BCjt&ZwwjF>*`6=HP6mjZRLz~24atU_QeBEp zs{@MZ?@E?;sl~4CjofGcagTL57l5}-s{JZZukJtHbP$5HeS!1~&nU`>NHV``+%dp% zjMG|LrMZlX;VleYCmfw3hgjpR8cI_O#qDWr9#YRQ18k8shsh z1oTl*Kw&_>9D=g@r_fgypci^W8}?vWUoP5cY8v%(`Lpn2@tO&on8{DcX`MLvJLqvQ zw_BF5{(yZ@vB@X=jgFfX7q$CF0n(*gudFvk zU(Lq%hgF9v-G{*z`@|oIl*4;=IHOhk3vQ3|&B?6=DK233TCkTUu5NS@5mGkU!uXzC zz&=jlYaX{>2y%+t*RMCBmRZ2(cp&l7c6!qb*dULUsD|>kX3L*O*0OpJu7)b<@JzQH zdAsSBO+q)CqT?affuP(HO?m>k;skk7(W8PN$)-3<_Ng)nBv&UBpvq}D7n1n4XR4f> zr4OyG5p*KCa@*1GBT}%a1E9g7GpOX{6MkeVeNSh-Ell?_($nX{(dx1l8XDLWOA{HEyuYSWZYF%}zas?d~0X6m{K!?Dl+o|A$wI~8u&a8bcA z9f?rk4FoCy5AcLP$#ejA?{`s6_H02jbd;MScrAoAogvsL;>(rs2=o3-1qj8`vH{)) zNu=(+I+v0XkE}niS~Ep#R{{=Z?bcs<<{o|eGlpDB*#o583VMcWBfwQMMt>xojMC2X z-NnyTJ2fQ~ukE;UXss?f(RxFw^c@>$OtGYv75K5^++H{rkP;64zL9S`W=9u=j~!z~ zN2?vPDjYvI%*iBX2RtN7d>D088?;hI!K7?4R^GI;CK(Yz4~uJ$U5fi;9u?%FR4Ef; zs)7j0tBzQ9NyJNiYF@Y`vLKrxNe(R~9b;12qMlJHq#LN-!O^$$;yyaB3pN%JF{Xb| z?hJ)n^H`G#u@8f}4rpvV2y7FDv((oWy?culXCqrbpao8mEU&ozn&l4j>Ga{71C{Lo zZ1{8*?d$|@_x;)1A`-9b%Uw(tyy*@u&@DX)g95}dz&AHT>u?Urhdw_~_$3u_i`>GF zLC`5ds!FUMOX!|0xHBjRB{CJx$(oH61)EN&dvH8{S$#1H%$UfXxR*yhTCxRyI+h4<314 z1th{!+Fmd~%xP37I}UP5yy3;U!VUAYSVo|ZMrY0P4@-0 z>As*g-51oR`+~-FU$B^-_DlZ@8qAoN{-4{&$cli6oa{s@;-!BwR z^WVbXzxw}iBL8nI85mhUzx+>T{54PPoHnk0Vznz2`@!uMK$}UX{j)Zc$Pdq(Aywb5 zH7OPQr3zgC0a&u0@?0h_GH=KE{kil{2?@Ds4Obk;R!33lo2P>E+n;QGn@$lFo_0~z z&sCP=^HDuT0U|q`!~HvsweU6s+4QQxf+>B!>%-|4iK%14hZ}m=;>hB5IC3`MC@}5yd_MBlaZ%Z^UT>!Cg*wn$Ct8f`i4}oP zqbwGD0JB{{{ul?!+#(!(Oa6|NYK3U&rhOC~*gDsS$-OO--QRDH3X>6%ZCq zdM1fmZY4>x9?pZ}T&@8-W4`}M5XUTL<~9A~!J;-$+~piCK%ts(kjtfAdDHfwwG;yJtY-1-EZfQ@Vo z`%kvo+wC^bF@p@A2bV%`qV0o4Yr!si#>4@=5<%hd8rbkXt|-Ci2orI|qm2oS`SxBG z143c3>JnpE#KPQiTp*&!9GqY+bqkXR;4rr7Vy7QXzK%eZoz}10G@J#+(dEDwXxPGc z546c6{c)v1mx)TN-O%kRd_{_oR;y>p)w7;Bd0(OakzSek`AZR83FV8+ZyPFTSu1B| zjV+9o2-OP`iyVuO9PrIoZrybi=PfyW|iRZw{;`$M3NGYDp&YlbM?WVPM~jQ$gqz z8we=Nz-ui;$716{yY+G`wXoizHL>nBkvdHix4c9(P;XM?NT0|pMgD- z9?yb<<&PO=JAzNQL-}z0PLOf-SRM5oB1r8u){7yHzN~XeBcKvS$J7;0Y?5bdd8o^H7BaX52f;} zviyk<;htV@yKIc2={*zZapNO!qV^pGH$&bRs{FC&mK-b@ipjt{!zi~wE>1RnKkhP! zk!0aJREQCM3fR59V_{N)KcV>mU{bglpDQgSyd&;1+V!sLKFoGak8ab!(_chZYiZZDJ zz<4K%g@_^3{w5O5!S&p1t>CBL=p zXVALpcI+$pYFqui;9jnTycvwSuM8IFXu1p|m_ar)0CtDP3v9*@-(+rC7*BYZCKtu< ztmwF79rsVp?t5I3UwbGL$~UYYyAat0m*&-YvXEO-+&)XyE)TX|c4iR`Hx=#0l2%Ns27WsXs&Pcj<( zpAR<58N5RCH$Lr3qRY?i#+wWT1C<<%KM#Vzwp4QWE@W&GCr=ESXV-@_7#uo_5@t8v z6`FRz@l~OTb0|LwgBq}WRQvm`8c^q$m6&6eEZ4tv`Aq?<{N@a)IAN#6t0u(+1Egdy zWbT_QFxrw1KW8@TxhIwI$1b^g#;RB4@IGZ?F>5Bw*%u$Al1QV-@YrN?M&E4)0q?PhQLG3E(!Gp+0>qP)({WolYZ=zEkF7v5>L0Q z?U#Aax{GYTD!f7;bq~WF80F_|k)(BQWj!oOS89zq#5lyavNnAbAm6VJ(rN&yaAwCT6Ko^YL?e-B>k6;1h=z(`m*2oYs8ud|5m5A`=JuFQ@wyY} zof+%TCr04SJV2b3&&vDb?HFi!b7aK}n12SK7GG4pD?IOAtJ~pOo z4(7%qAeeD{wsKkW#vwPT9@iY2fY6;tH^SmdN@I1<2|G7)!M9qu?04@rG9+35rXhHaNxYmU)pkw$zSFWK2v+o=S8um`+5NVADCy|R4r+i&<#CYw=}649{M z3yP!y6+S=OTe_b6{I#|bfrRgx02&vc7y}3wD*;7PmdBMl4GaW2AA~%UAq)^PEJ9}k zrK*saHUjMym;y_f7g4-4P^}uI`;;xj-jX{2z<)Den35pMv&p5?H#NW=1=?KU%Bzpk zCPRD-aT}4-o>?QpthEX=W}sV_j#}eeY+5o({9J!}X*)6!_eev6weA2Nn;-gWFXkWNe~&aR7UfEFGltq-?()pBf_^3uZylk-o)9Mqz?8 zOb1y)>VqaD#5LG^@G#J1?$p>w0Qyllp9=f+Y&>sc!<^tYRU6k(JL;={<^wmKczkKM z)>7*mIEYI|gX^Z>3s4Z15WGE}%(M|IeF`rpg_jN*0@7fy?kr!$j;b$%FT*i4jaFHTPW&Mrf!Q$YBG>Es5(89`?Dj;6C}e^tHSuRj$XwnCePaypvO zKhjTULj9+u2y?Qsby6o~LO}LKar|48$zjQqQ=RV& zs%6<=H2i?No|k8i-{J;MLPvPpn_O-hhn0s4TAJth4%J`Yp0PzHWTDGPvJH&~#qAJhwk|&X;jTSYLKJb#UH4&3rVx zJF#FM`h8HjhdcmYTAg{N(`2sf)iN)aNpfRSxy{51jW+F%n`>Dr{faz+Gt0$xkXHLO zU8-7k&Dt0$WqIal=g`_|+vyfKi>vvOR3N;U!`nG;ZAG5KU2_|+OYdCnqw6o~gLqv? z#L#>kpIEJwWee=Ek8U>l5jjw`Pm$EsR55xzg?w7wBblZZK5YXfccxD#lv=3Ns0}ar{3qea=v;H(CYDD>UCZqP?~qcL zFzpU_gB_}5{zc0Z0RujNgq~HEx5H>-)#k)s-KdLxyse&dc7=Zz8q=tnanc;Qih#Ya zW2>F(>gJKyS*aXgWXN2{M>#dV`Rt4ySS|k&B(KwIXir-ywZX7Y$pEL;)1+ zXp22dTFwL$+*hL&%+ClfC`CJzM$LZ^R|_nBDy<{MH=G%^9*J6%)u0-;?jt)9IFli1 z$&MH@x5l#-G(ANjwTNe)22$I-FqoDV5;5SLhKo(>9Sf-(1HkJYn?=*0yNapJq;Pu< zr%Cr4+f=zMleKoB7|A&ymwz)hK_IKc?>9^q%msxXr;puGGzzetSqu7-hvuvfZI$N) zh9N?$$$%wWD*H7^eQKPdoP400i=vfFnR{-ha6%(EKC4X#2rUZ zobnUYh>06~TVzsz!Kgrh_XOHt;iRQfTGma%hKEGJEoUtq0sjqDx<}#`CW;72h?OfT zu54|(NCr%?;Q;X89*0V~YQCUdx#UP`_Q}F=;gin$XCVfLY3ja#xW1l3i@)Uz?bEXt zX&+UQ1DLcOnyIEp&)A)p%_>Zr3UAWD!(w7|L9;JRIU=Z4~uv*1Q`7{IljO7Zc_9QhTtQETt!uIJ8854^FSHe8EyR zDImG|e%@x>#i31J<&fjh|>(3Z=iW5 z;IBOx<6YYYrk@Dqyfzs}?atmPC@29U#Aibb)g*oqt*&G4lS^Pl53bA2sKJW@K9uEU zdFUP8#1ODTM$?IS%Xx?ahuB%iQ=#4ET7w4MB|m~x!Bp!}4GfB|Y@LM{$$C=KbFu{x zeUh@g-qA!EHGN_#e{fX`lPyZDVHlv~;7y)6JfzJE4{S#W1hHxmklt39zX~Toyh_)h zZyy*T4j_WJM`pOy)(#10>}6MUGKd`93HHd;&Vn@GPpCR2>3p+cH?iRaKyl)D7&7@X zTRN{Q>NBAf2PWE`x@*@8c-(aeHJjVYpfdMpDzr)A(Q&v)&(gf?wCf-(k6DS3eZmwI z-s{gYJ`A9yPsE6i()ny)rT*QQ92oU5>6`=7k^)%0`x<%BBvK z3hqihU6c8{z#d-}H^K0!ei2p>Yf=++?8ta}qHq}A8~A7XBbsw47B4_!v&r9Qi> zRdgaBMzqZui!mDv-SqpYUz>~5)aJwwXgoTP5g)Yix9ldT$^(5lpb9Aa54DQ|dUBDM z-436)Vvu&-@n>(Jc#4=d;Gr&2`2$fEL=-8Zk5`2zF`w!e{NS*(pM4)r{hex&j;ZhN zyw!H{N04}-ok~uc^HDlal@ZU|4oA>=zbM*+;H{C%1u_RdG$I$L*~rG4**6A%tYveb zSCZq98u7W>(i4C~C;PJ=`%LZP_HJ}O|4QO48Fk_G4yh=`;dN+7)Uqp4^AkGu$Yg$< z9pLmTx*?q6y{E4o6u!L9_Vmh1_=vHFya@+FG-l;#FNT)umGhE{?Dh!zh|qx`X}dwQ zTfLvU%kAJ{7FILLa^e8aC)JS60|@&}zzny#AWqD6Ga`*e6S0sGH7+o&H{7khr!qpD zwQ$oZ>TyNq*GuXAO{uND4?AFZm*^*PeEko15UL`R5fb(5(MfSh{(SWQ`oLsTP$(Yc zOeiBPJlb+St`JIIeqafy>k!@^^9^b4)ZW&mkQ%$RF>s_7?jfB6s^MX@VaCAD*eh9$ znN@#8n@6+W*9}RcnQ;2u`qbt6)#DY5yb%5|PJ+Tra^|FSe*J1#AFz{=4@~1-2y-%M zSaR_RxA~Cw9$*Rxeyfp=O(jV_)lm4dd(cqPL8|6u4dt1_FcC&PYL}f69GJM%qcmA^ zU`pq5QUz0^^T}xe_L*ZrX(JVgcZQL7J}o1(Z$JGZWiy^ZEgyYr?a=NwH8i<6IX{;+ zGkL7{TR3?U@6Dqhuu|#w6XJ&&Jor9sstNOb=K>%e@V2Ss=DPo&c&NN4?&%ZRFHrm+U@f}OrIDXd?km*+JsuP5CxZKW_ZM;gM-BvL)=xC~@ANCe9#kRz;+{Gk z$k!VWLI8?@Hs0UfNhH{x78)3u4wyjzQbb&g0z1dszR;eq*!~cYJb&g2cQ@_kBi(JW z#c5n^)7|aD=)&p3iDz=KJEwH=hY?JpFIle+*)KruED&BkadAm301&7vKp$TsEG#gY zc*4EPRR3NGUta!g+K>-W8D3sqvT$hx|DLV7En4rEQbJx{2)s)feqIG22oMM$pPdh& z9t1=H!D!4mjI4fenV?=g5ky$frdm=UQ?sBRteZ+@0MukmJUnXZ=Pf68-U&!wZ(lxI zZxjdvaGRbq2rxFF+ih(!ko~79M2%QSviMpMp3c@*4%{KgVQ}FkF*_cBCIMMT??HGX zkZrJPuvZMUEErqg*Wck${=nMW;DYa0C%qOjpOY^j-ZrfK-SB9LaobR}fp`GPy@1cA zDFE0;5T|e|H*i~kTNmp9AdevLn&(#cmHaYG7y9sM&`!2s-JIF9>fSJO2tZS^3wr`C zSvUZ^v?p4;(S(SR!{E(6S&?`@ptqb3@w{u&Z~*+#FE5-$Xo3EO^IzjX^snh`uP+fh+=vkTny$KKIU=vV8 z{vM#MLG*F8%r66gkd<5%olFa`1LW=OefH>;!1BKsJBR2@fHrN%wrwY!q+{DQI(Eml zZQHhO+qUhzu`&If|NM(NGmF{Qsy21%R9*LTU$;&np9nCY4n`EWcMc)kpjM2+0$zD+ zNK=I2m?%dOzzdok%gc&S3rsf~`Z+AbASC^F+Srm@nnD2Ux?(gjo{V;3uIQ-NVz@U(TA|il9 zelzg>&=tZ4d|QLQ=4!L1!Q&;~mFi6KepQ;Z_QLJGS0dQ-@kg2u{%#5eYWp5^V9tX= zigJhg^mY7lo%o49=&ktd)%f0pr|#t9`bKd4iuoZlh(lePe_#L_{G*O02K>h^3PJw| z-W%kt@XxO>)}`h1-a-p1Br#|QMlHBZckrM$zX9J*g(9XUP{V3iQUB|2yOY@6+td}j ze8w<(Jm{|{`+j3!{~z>0D7Y)XH$S4^L!YQ1#J??szjO~7a0BKS`9Bl_{P$OSgD9!) zk${AV1aipImW}9;{ZfI%@>h^zcb6b;ANu#GWMF`p8xi{;Vu;?FO_6^C^)j2X+gDjZ6t;{|d9HsZ*>br-_xi2w@E z%p@wQ3t__!Sw0>cQDXl0PNjbL%pVxm?x~4U!Q%kkGpQ3A`F&Dd(_?ro)5^5)OVX=) zw^}2y73c`Aswzt8dl?i$BprFVkcB~6@bc)=;s=^I#Nn|tKk0(mF464UP7Z97p*aUz0GQO=jbk z>-kh!@g>>D_`FKEXM6(sGA1QAmew=xid6v>Sc||lG2;@+VchYlq7=VA5`ptOQz?Un zu7q$NGCU7Mp~DCFNA4q9YAYNBjlMsKajbxsI~^WiTK zsTLHVGk3x6Rq^?|eA~+f!GxryUnGd#@4fW4-(WB!?yjoZFv%&%O9EY8(JV(M8@Vom z&B`WT&05sjr*&WHL7RO?V-uA4U}DU!hB?5P_QrTca*)wwV%|?HdEu=7bhnYfRO1k& zn8k9{s#ZlG`d}`Wbk+m&QSTBhjDNcPwmW?xBJ>z(k@+eosy^i+UXbSBQUnxgSJwS0 zZEZiRmDD8`rW^i7q=}AE3N^yH@21UJj*a0YWL>s2Se&Ate|2Cw)R!leA`{fk-NlbA zH|D(M;}mnq?AxXsMPRX+uyZ|^MetJJWUu^993OM|o4!;^S-Qwb2bcE1o-m#CYVbs0 z|6Z}(qDX)2%TwM*HrrwAPXw|GTd_WXJ zI91PH@jcYl=!fnY&Le0AK#k6>sxEmYW``{iQt`_~V@M@6VEHQWs`OlpY&pq7_fbDB zG8VcM{To8`tge#=|Fk&2v}x%n=yUfFP7YT%#w5T;NFOv6T{Odml@o?(rAB{$U2zec zO%7FzobEx~4hCS>YZbR2!=IJ$%Er}2ry`VWUlJ{4%2j`zac;P{qpl#;knCWpHO=Tx zrF@YN(fy(DZ!az45XFzI&pz7=-uGTS4rx`C&&h=Ub^4%IAgaBIA2AA4d(k~sqw$j9 zS(DtuF^A++ zD>tLD3q(nK3nevPC=7FH9DTF2i+dARx21W?T|d%pEg*K^HciY9=lU|?{xn1_=I&D% z9?%UBKsP3S?Itrb-a<{0`*YN

VFg6}y%r1d&|PAv4mNrF;nL-pPg9?jIxKV)6PJ z@L?ZCO_#2nhqc?vR3ma0#^S;V@d5Ug zo9K|*w)F(lVuVuZ0^$lj>));W!#@>kv-;yFsXB`n0LImulVf3yJt2UGbYgFoYiV`R zk+K5s{B$8+0vo~vme90PT*|3DaG;D@f@Ym)w2h?*yzd1vXxSQ%*dwOcLq0eJKf_SM zoF-4J-hzCq(e<`U>$phH#b!ji%O>+WU}I*Q7M_Dn!pyjRqGLj2*f3KRjW@LR;I_rW zNl#V$D=0ek9+S$o;+zr=d#i6VJBo&lz=QVB5vp-L0rXvI8<&&L!C6wcBn_CO91Gv& zwMX9@9|Ug4VY`Ejq~onhinPYXW&InxxtDy8hGepbABB~0+x z0@unu!lq3F#!J&Cm^N*Bn(Tg{w`vq9y_hTR^*r!S>TCB-BMxb2F1^8E_aP}nz&6=K z@stxu-IJ4#4s)Sa> z5+(YB4NsbAB7CxMtxYl*neS}utV2kgps1VT{1ju7uxV&v(N@ z*E7((s3tvD*_$s`*7cn!wzDxcx^i7^Hc#8=IdDAaT=emKmth`B4EZUrkENUUYH&(9 z^#{$5&(t?jnvBV(pRdP@UmZbz;%SmQCu@_gEAZUOzox=EFZ%`nvY#1W%z>pCR_e4g z)MBtZm%&E7m?21N>88A8T8z(S&Ca&eMqc(NbQ-MmZL_mEGPJYOg70bSvnkJRt}TPs zsT@Q@tdIQ!-+7Mg^z-&~dw)Kb+s0r(S*ir&RUxeiJKqn3h>aS`A1Mt8Bya1lMmGZv z2sr%xp%mmz5=3a2$*To{xcggH-dfFivdm?7I>aSeK`ANS7NXo9Nik=?3ipO$#=J*= zJ?Ng|@*69XULM4fyV1FE-3%F{NYQ!bvlPM9jHr=Hg?(`Ta-KFLL;s7;-U>E3m`P_< zr&;-uS|66cvtdJKb|>)gs&4FsUT|`p@}~YvyHO#9iyO|6i$k49)nd5hX%;(Sv>iPB z3&V4G>ruRm9Cs|83^N4U75+x#c!7OC#(zz+0<6f_D_#1~hrthST|j~p-?j%Kug4yL zvT@4=yga%>)h^n;B#wv~`eS5#TajP(AWH5>=#%mEtPD89~H)WLhCCn2wd@I-&;?pRUrmT7fb3GsZxabpP-o4!3;8 z=siWcD9+HqV>#b@wa;CayOI2^%#L+te02uC;Co;?{`hV|H7_4c;$$`sC9xKxfV^^t zfcr(k-9;3ROs`p@l&*7{P_Gk6%&C`x9L@q58e3BMssk8f7)W%~zmpwHIEh~F4xX!|so;&$jfJk`C$KtXZM8a8U(jGF z*y#AtHx-cV_bvaT*sY`696FDv+m^5)zt$uG2$*nctr;Ud>X0I1+Ze+{6P!}1I5uR% zO>YLJ*{@1iA8-*IL90>2fHV)wh`c-p_mP_8tAZ40YX&zqny|hGM*`%Eok3q=GANc) zTV3XfR2y2Zugz1K4i`t+Ygc#e(TqJE!x)NmkCW6J%&@MUwY9 z%LW(Be##pjwe|V*EYmg)Oru5tZj|>^w(Hpgy;}~^Jda%V%?RU{w1@P6fyp}eII?L% zhE&+g*Rs55=h2jS^xzs88QW~@hiYc|X@1Rk47n&Jnbw<4+DmiH(}ShpFVwtUP%djml&O7$UAg4k2%Y9`R!Uc(W=7`Bg_Dw? zh$+5aemfkpV%KxmTjX`ZZyu9||D`k)PnYcMW7&0O7pc7ZkpK7RB2Y=)5|s6yoonm# zf|&Y6tg~vs`fBb}&n|VM9Q`Q7Iw2e%QRS;y;!uN7CoQ$hwm^|eA7g(w``U{C{7e!F z=*lM%MgoYTZd9gRvu^1w6=t9DxobL6e(G7cl4o$)CAnhm(k)4;9B_L?A}H46ucSPF zP8#9&=K%6vAMYBq;wy_|Ndi5oUE(_Tx{uc<-%0Uf8Bl6?c$tAm%$b<@xg~T-aI8~( z`hBo+bPWd}5(P$?VjZWn)_=M%_WIZ2E`FA@3aIM7f3#t-Ivk$xD%yc=gUc5Yk3h^= zNCv}1Q`A+qD9+TgG=9jP*SN2p$p81WYtVa~;Bl8+oYJ5v4bS@)bUJy$>A>`US;n19 z$B&kBM@Zu9^WJMS9)}kBV%Zp%Ww5|nX>hqps!75(+_^He>b(UUb#&K9X5cY#SQ?Q6 zw!{+Od@VPABwIS;o9r(~{Hw0=`LO4suIE$7XiGAAhKyeD6Jy-r~<1>Kg_cvfys1!+EakGS;pS z=Wmjn9F0HJB%O_v*jFVj2~?5}g!K2IvH1#|ad7uHc|i%~A@t#Fc(VND$$R1?TEtkh zYU9b6`3(yRi4t-rwytp4*|ti00`cr#Zo?KJRD$bC?tctL58=VzX6cdjxUijOYmNLF zu(O^o?;JpA87iz_?z4_M!LHd709DDdc<>`3|HA3E-D^| zpeJ)QhOu&f?;p9pphG^EoIq08$;vMQgW)4aHM5mDxgGkC2dz4xJt8AOv`)8Zf5p^} z6@QT>3_NU@vv4HIh37q3bQ-XmsUj*xmM(|xM5p257O;QVkIL`t00Ma?V?O;?H|Q3x zhfH?{sCHfw93G9@^Z{{6+J*VwnQUI>1GpU31Z?^ln|#X-T0+}cIF7*F%k6e)>dKB=9qz!!bn$5uKv$lG zpzKwGEcHCcOaQxFnz_M1-dp7Da7|`p=n}vi)afNJI5qmgz&bxjQ6{*ri+O#>!PSE*BI#Wl4$#L3*-LpenF0vh$Ex{E zkf)Uz%z|HFgMA62{~#!)bUCVvqq$tFm`;?o-EQM8IRA7oL2+?U-CSPSy&dO+q9^l` zV+xyeASUU-HC*+PYH6qt56GRmlX`a=GZN8Z9202t_^xY$Bj90gr$r7?&i(0;f7kFo z?E2NX*|b>xb~$M1>~VaauT_yNn|!P@BDqgNCQmOnFOI5cVtv@wX$ZnMEpyH;rB+LB zc@JJ2X}`QNsWMMrX6N-Vw%RVm(*EFtXDSK2f_?S%E8HB<3lF&F?R_ruF?b5@U1|-K ze~~iKFF{{5>ne>lg0*d{7?X+W87m(^DxXkq-w9-bnmS$F#c6(l$Br{U7Ju}Lo)ug} zA-sblhqTx?=$)%L#%+{rL2pLVqSjeBWAXgh^~>Z(Rj$3+_RSJl;6ex9mS-$8zW=1J zUGuOj7U2r?ms`SkPrQfFUvx&TqRt`>LBADt{2B;3Y!SWEZiw>2Pb?rI7hG089Jnv9 z_QU|s-;)v`FDqo*)V`=CjY+MF->=eHDS5lomMEZ5Pk)%!4xFE!F0a`F{Ta8=A}|)Z zOUn>R2l#Q6r$+w%x||u%ub0l%>Y!_bbY{tOT+7iz&3~87=4Bm>sOs#c?5=;0u zdqVZDU{r9JN31JTwcm!6YlBa$>Y_b|yL~id;NX5AHSgbEAxkZA?lH18A52+oYwK%B zo(0EV-rLjFW^xr$|v`xwB*P*us*$+X$UdK?+lJ+rnQx5}W>dKtdVdGdwCEHhuX zFW*DnfW|kd?K=(kkP9WcUj0wdI_Pqps6WKTmW_fH3O-`Ml)@&<@T}z6q$uVVi?~?? z2J6zd91wghL<{p?MbA~JJrx~Z&wLbB zUWKJ&QiNAl*mO4!2)iAJ z-1v2bhXaRspcFfrvr06z4bTJ5etDf0etyzZ{PFVbo0=Ny;D6$2-n&qHvg4~IyNRi@ zi_4svOgWvWkn`Dnoij11Jh)}(tX>JNX2sK+g-Xk$s+r$q!|{!9>LOpv`bK(e20-7G z6m+VqdVFj`$oAS_aJg0$v)K34qTh=>nQrT$*^zL9rv@(61C-Q4|Ms$`#4(&_1*SCR zWJ|V(b7H8denbnbPZeU4q)5dHQqr*{1xx}U_!B5ErR50 zvDx1QWPYSt+Zcmdru5cTeM_d~Oy&G>%hr|RnJnq|p8mfFqz;b76n76xE7 z`Z4R>>3JPVy_mEK9MAu&1QYWvme1nmf?v*%fbb*f-_g#apkj`yIt|E0zxD8=6>p$c zOl}Y{edq{FTt_cekmdj2Jvg`1yeo8>6em#kIh?r{F|@j~9mg}Nq*6REo9ibLR#O{K z$-9zf)G4a6q;0uI0+GE>@}ki*r`|ASLHo99-!HIGXcS{RO8Ilw03&bAS!pN)jH{D7 zhG3`#_GAj>7E}7kl*24_=_JWaVH6VkX(?$>A9>EZVcYsRItZ2+J%-aC@5Jnpt)0SN z>BIOM`m3pJA~Y1{PT&W^sbcU(rcyAP*z$Y7V&+?xfQ=doKt zv)7qHCdMG@&DmHV0+(5JHHQ4n?&ph`CO2TJ+Kg52O)JW@lk>Ynv$;^X5fNaaG7d(j|DF~94=r2Ter8KpD0YG)VtiCcSlG@+0fl26f}2(#!AAiBks>Akw*sQH zu#Eu!578_UdhdGf{&erQuDn!hUb3I^%+kkIoAUyJ=znS*HKD}9ibZvHgLq9e@T#2ZzVwu-A zaBi+ZLf!-nT!6I7P{H!b^G2@yZ}-6b4A^?%`{izKJ$>siu`a+`I@n(mIk57Gs^Nil zS6^J6Z&qv+FcDpCT+0>#Fz!B~?Y+kCY0-3B>)0RT6(4H?wt1LLj`qsi6J?d_w)Jr?q=#-O@YTdIeLhF%RRZrqQWt!#&-Cm ze%(!qg2My8Jv_h!w!4E40O}(8E(OZ}5bg67n+Fm8nE`o>t!0|RKtOt@xiv-uo)Q0H}Tt{eN8fU~=T-d^PrL z-+%P`XA~%Nd*3{}Jv_E7_3(Z8f$M(QSc5)UYc7y*%Ss=78z%_%KCt6L);YgEQ1WkJ z6u^eWU@r`;e^OPx2cCQp(@@bM<^wu~efgN0WHAWwUSU$_PR2mxPM8v;naXmES`A^p5JIzTVS;6PUUHXOYl z)MJQy^}##R&jeqvd!3MbIW|8)LO=v--t<2}AV35!o+jUx3`B5ycfoD_KR`e6ShDxL9>OE8Yzp06ac+XKTJy2pG?ueux7k^7)e zJ-=&rRpiTO$hVs70z55iEbkAZ=tLXr&*wmO)M2~vsCHq&T<9>5IstX9i1gs#T}WI(1J*us%MnE=??k+4k+Ab<~@CXVPcol zk}AdK6uy5i$)|wnXB|%2DCM(Cp*=j3#&lsrk^(qh6 zxI6sfn}JMO&Y>(OKl~T(z2n8xiw$>*ujH253T>RBqGo1Vzhu=7)+hPF;spu$| z6beqgt5-4o(y;LO7Xyx$)21AKA<3%#Rg_EH3BU_^V(2r`DVQb^hk8WHq!B1PBJ9%TGR-%wglw~w{ zyC&@kHaX)qs|N?3UDdaQaSGDn6h6HdX2s@NuW4D$i2_3&LaT7_wErV=n{>x^DXnKO z&RaU8AvqD{%p6jA?AjQK5vFT_j|NK)#J+rSpD#E44KzVK(IIm~)P2rJF(QdD~|dml%j%kS{1l#KDQ! z$OY<+CNNH=1)GSv*b1tna<%r#(J3bap?}yUp@h~Z);8Z5ovb=(bF%`ksx=2sMrXM4 zzRw;SF0S3u^X`nA23(dWW0d%vL?kB-j4Q&gPhHXUr?>5nYH~Xt5Yk`PkWE36Ig7B|n{DGqv1#%o{fpQDDbOgO5S={-&ex z*kwA44Idl%EoqRlvBs0 za8_A=BI4P1pH7nsUpT7;#PeDnk%d~t-;?Y!?Pww%hX8D1oTc&?dX%A8ezeIyZ2<7+5WvB9}MTv4R?XWj+!1P4=BuJPWNg=WBOJ7#heJRoAqu~8cbl=p}W zk?BA-rnRx}OUBxML=Mxls} zbHlNmoDlOfpR?ThK1B#ic<^utNtV}QK0g5aA&1@ED%ry;Ea{QX z_{np${#VE3{r<7CVQY+OJtR6cS~q<`xJ%bP@`qRtIR_z)W1~};fp^x{b=vlRDP|@o zEZz0cUICZeCUQTa<_~ph5)8hqBR+3s2jgD==uN<^LYh>t_X=Fu5O~tsqq%32FF6c5 z#|r%LOU#*e_FH6j%)6ljRWTwYMw(KlDKJ*vsTb3r&^Kq+Ukt#GlG!R4c>w)rd)u;+ z{(~;GMHV8nq-h$2(nyH&GH4Y+wtOPGg?2@1mMlWxbFm!1;U=u2^&Q*14#fv54XsHYz=P(neeF7nu=UCwQ(K%A=_+U6YS0ZIG}-0$ z`8~M0-_PMmNDF#Lc9*<0Mt!rIc<*KBzUPm~JZ4ah3WC3cx0{KEHJ6b5I&YR?X*d@G zU~-Tsl1GZMp7=x{#0e+4WW%F(q;IysKBnaDW8QjH+$MLrr|~#B3dcFXNHlg-0sz{7 z*vUQvS)DR9;UWMwLz(Bsgm`wCK5p`6MjM}QGSzQlePyN1cPf|QF& zHI>jZ@wsb?#sG&w+Up1#!pND@qzcZiT)H9x+BZ#-fR^wBWJ*;I3>Q|H*^eY#-h!C3 z#mYlnQ5dclCNtr`Ghc$CIxJ^A{+}KSdH~SJxY|&x8sJ=I7?zn9`be8q3tK|tc`9jP zwAs7d`Vs8$0>dcOC>Yl=iAHnueE6wJ>M2c;XSo!)lIQh8KU(W@BhlmnEL<-9k;!R?z1b5*IdO^=pLPCPNsozw?52(3*R4)@XS{ zv!_)$uhCQfBPhLM@PAnMAO*i1YlY;q`F^4DIE{HwguG%nZff~te79%jB8`V|&gQut zC@ylDRT_$6LwLD8r_b7k!(WWx(oQ8BCK_z9>rc^IQ`Y!$eweQLsQ<9-IW1O~15am3 zZ^x5*IAIT4G_D*6FB%5460r-KHAAII)P5OsEx~E|m?Pp#A_zmq2g6rFJv&7sfn)u+ z=gr|SGAq87*$yDX9WT40eAP@#5$D{XH~Lq78g74u)}o?>HaNAg@c7lOqHqL7k%PJA z+Ly6F1OCuS7AfoXha;;6?jGIAQ!Xs9`qS7?%9gSc1ZqT#=YKuxThh{>jemmzxOC#{ zAsSio$drw^NB9;c)?Z-J^5ci?l-kWb<>X4>Dj=z4FHHg)A;o4fd=(^yH7JMY)PtfA zD4-=eq`bU`?flTB%#xR}2P=``UGqH=c4RO94|*43gUC_EUtWr44j^=^R0&bxHzS2Q+_2R$edeGGU^;6AlYNc8Fz| z@6}SkHQ73C%yXF~k^LgCbPIL(iIMTV4P()nDA&>hLO`YRg6hh zB2pX2astEX0m8EcUEA|7Q4iCoz;L_XWd25F$%n2u4C4~$yzY8)74gB@^oJ2U4y9hB zDhpxPOxw=eT8ApyI64ia((!NwOTimnt}0dPuk@~%D+k`W{8N$>(U7v)8LGP9GQx(W z+~W&3#_#Cl;}${Kmsp_uZZctGdS^;?@NqJa<6fd+1O1nt1})r(w0h(9MSm0`cR}ug zXKof)m6kNL(kOqq)p9+}trOf*tj(QFNLH?lKJLavVoqT3JI8mRH#L(?{`iHg?QBi9p zio@^tJ0KC;jmXq}^G{Dz8|_jN%)lzR_Jb}?f3qVw#oG2*hjyMKfCOijucFo2wA;b> zzI&s&gu#Q!wh`V(Y*2l6KD#m|r;WMyHzE7L*xv;c?*Kpze=`$kii<7Nm8s zBe=u3ORo*=JT3y*AiCVD3kOJ$SQUftqHmKyb6|zxW_1A+lkDaoN{on^{%TS!_5nn7cH3`ncQONa(F_OxmX;|HRLpwu451>rD0-4m7@}gC(Q4Pko!2I!-i=3*3MAn-Jvx94^ z!WSZhLzTYW-8oQq z=DEM+L8%tk#_6MPvw@_Gv;G!FQ%nZZgWFr{!lpuv_H4)>Fd^q^tQcdWEd7$Sm2MXj zkT5pkDb7wi7OisRr8j=zb+>{G$xF^kJdZFyRkREltOVd8M|{p|gL)=&}|dt(zQxc^;SBhTZ@P4IE%m8wX!_+vN!KR!xTob>e|#&0u$plh|yZI0d?|v z!^!3b+{87R=#lXTL^!tU?ye}Qk&H!+WL4oCMv2}fYVfIiWHjHuFyS?sLLHY{T8)ex z5+6L7cqq2XYAMYkMS*lB@RZRE%35dOqVWD!C-toFIYISLkAZ_`j#oJ5wE9q4UHO)a_wByQVR(cN^aw**#)mpTw))|$DMHftQ%maz6RgdL z{d{P?8Gjx=nlNPmD}BfBv|>UNOG^iBQTu>-5fLOLOBv>DNq)U`i>5FPNy2H2ZS0$0 zF|524la}bu9s19R=ovPojRkF8+N{S1(s^X&=%X zvHcYWLOHCMR83|aOI#vuKdqz}F_ji*4hyNP#9R*3T`-%q@zKnjcNiD9KSr5N?{AzB z{kv9kCx7V<-`7+cuQ_wT#o&~vJ8LK+tS(``0l9XIjxjgvFw2nGge-v4B-)Xz>E*-+*-QECLe}xI4j*vOQf>?>tB&&u$iTl_PSP#WNsM1<1unjH0N;q9^zeVS%z^UV$h zU}GyQ?-KnD3nGpdJ|8AH1s(N5cmB|gU^!V_a+>`FUkxV z|1Cg2EVM?DecR=_RYkivVaJ-f-#sGbKly27H;YU)&(NG#9Xv#+yWZ9 zez3HO%h!m-8u;D+m*(9lMO=M5sl%)K5kotpy2H=ruo6D!Zne)*ZBHh93MA+Q8q3&f zEi_ep9=tV6>%8~J%_M+0wbpk|wWZWX88Jc2$Mmda)-AZ9ab`rH+6|A@ux5_L%-;O* ziJ2|uFWByE7!qdmzfZ?cNEYLJ6GnziU4Ny|F7TD>0n3FQZ~mpyJj>$f44CzV;eTUM zWQ2Ip1TJ=$HGk4Y6sW#^KtZQ#qQ^vWNn`6XY3kM7GJnK|-7zfZ8LJDu1s||IMvIhy zmX9v?SL3&zQJYv+yY{8&Q)D*oznXLu-2D?of3^gxa=P7fF?usWr&A5zB(C69a}k6r zama%vrEf2s(qL*=ADXc|d;!=MQ_eD(i*gIm6f)>cVg;+eFh=lgE2v52kpW9hK2w7D z)OW1UTmgx6?U?Ayzk9?vJPdJ4AX0Wc?)c~(opJ)XlW!+n)Mj;x6J(6lTMM)+3%ZJ7 zQ_}n+Y(m>eUk9Q3<|CyaSX4qF8BD=Q610^$^YnU>r61t0ZZR{PU|-@9{wd?8NgZY~ zKPQ-89{@u3f8SSMw(HO6+dP=ZB|#pK%1rv}_D&%GIbx&=c+V&1At-T7*sp3tr|@)w z?!--3dvYWz8-(Cfx@$?)%*R3yf|50x*a>tL3g;(h?4MTFD8XZWsBQgR+TqVe8}X}+ zIdyjA_T4ToP?Im@~RZ6i)P*)IcA^1gtYrM-D zrs#hS6Bf`xpgpcO;8_Fja_uC&)E=Fjy?nnVWjD&X-V|2k}>6B=+Gs;xiMzE`?rv{Cw6Ke)hDYAUp5Q+U3Wrrp`EYQ ze{K$T)PaA9EZzv|NFtANOn5vye;XhJj+Yk2?YB~p=}_G~82FrxA@EWiCJ{4&l0xI# z{8Hwp^r&6MN*;(r-+`zJ?ThEFeXjDUtoe+KQG~6XiD6#!`P%99)%BQpwwb99hbkQ} zsNy-9Y^Syd2FBd}B$eV1(qL6;_>uZ+uc8%8UmeO4>5#S{kB;z1#MJc1H3}a4B4+2h zrHsw^ld%9NJ!wq<#!O;b!LCwnOqiaQ3^_qd$(ATAd|j=mJfJ1+MD2-}X(}jK#I-bme3`bN zU&h?7V_vmgHtp+j%ftd4>CQ~n7(lF`P*8eow0EfCiF^gp^z$UVVa*AF>9Lpcxe$YiVx%GTAi0i9vZf`@Q@*kwXMg5S zAX0yMi)#scvr#*_p;tf@10K@Y`o0Hs^M@*6rtJn_li(q4j5}RO4ur0 zjR;K27*}z#o?8$ed&Xo9@(&^nmKW;(WcdH7u=~F;eCGd1yZ=AKXXX57SovQVJ{#xn z|E{i!RsmO0(dkS<{8cp@7&gBpPcEz^2*s-FwymaB?Sq*-Z&r$oU(#yTN-dM ze-SVNA_E}ASYW(Rzpvk*Zc+MJba=r@-?l*LaBxwBn=1VrJaV|fU=|%DenKPy#P;UI z(B?=`RB#}YU$9{V5}^MAQ+8AeT0mvtLIZU`1n6iRsGzN`efyK2mzZO)?eJ&`2?={2 z=m;tfAqIO$j3{OBBbt%vUeQ|DbA~KPcYEW=(9vV0++iMxq@*KVf|dI=%sP z=wJiC*8>4>_+NPlI3b_#EAQ}opx^7bd2xPoy^CMr-|0{>pXhKQ0tz#K85dAvnjkm& zks0%1GE#tMKu00LNb~vq^}#TLV<9{J_o3>35!_ZfGAhVQB0`NIzMApSfq{x2)`4hn z9Gr#)@GtJUzR{yp1UR^a5Zi0?&3>!wV_|{yUgre#%CA6&JBWMyXs-3gD|F@x3+T)T zgh;eS(#i-wA%fY0d|Ee#?)w=MQQO=9MnXCP333;}1M@9mzZDtu;SBc4xI=yQC{+BL zhwlTG#;}DD@&nXWug{K!6lx#*=JDe?%C8PYOiYA^&<|{F@N7r62e@)$p1OI#@qNW= zgPg+)I?X{sdVF|$$tBOxRX{`8e`Eh#eS@*6Jpa49`hNK`^z%$jTgcz%Xuk#V&AbYVmeb5g z1lT2O1q$~kdQF}C$-L+l`eqOLnY#Cl|K!J2aug-V3UM& za%LP~=ZR*N@Ld&Hf&MbPl3n>y!sB^d4?Uh4QS1#J-ZTS7m3(2cPrY%Op|QO)b0q}y zG>6s;D^5LwqKxM^E@jqxSS7KY44t$dMDyo@OE!hmsnH!}vm0Sf7peKr^aX&@%#dMd z7b%H7&=|B)Q@*g&j4lPxL_iqj99ZNTaM;OVA;fb1)0OFSJ!i4TN?pHwe|IRBm>=3A^9)$J&!bIDW6!g36x-sOBf0gI0w8akJl>140h zA*peGMpOYAbN=M(a;g>$DvpGGEkJ9;a_77V-w?B~EvHN(YX^6}np?)L87aF{*LA9t zdD3{5HkzS2J&00Hb8GQzS8iq^wS71iyLk0Ae?W!W&TSwi;2X;-&qf0yOzxCs-vG!H z$|v$7ZC8X_PS8-{dM^=s$5!*n2vzQ(D{7-;8h0P24s1s&e?MRD3Sli5s3o6X7`eBq zE2VXJ6<_TI$Qq*A>0w zoxq3AK$2Hv!z&`6CW++s_Vk`NBp)F=c-@TC-#mmh*Yd^XRalhDcsHcIw}7zy4k6az zv9&eo()l}#K2_@rxUiR8H@2PhNS!If%$9XY^sK3@Md3GwB&j4x7;Bzm6(mrGlC^dj zPMLrJxG~p?9!j^fN16cL=pr&VA(HYVUj-C@b)^j?Zd%(V7>S?@KF8;qjXqrDWdGu> zN?B_p$gczOn0e_Ddf@C+^gF($D32_ZV-zhgsA&476Z*9ER%! zdmLcv`K?dHqGz_B!1=)ot4I5^uPUl%OCX5Llrd>?4y88P?sTIs_YG+TXWiy5+=LjI z9jnp|PKcpqRTEz+RKZ~@4bJJ5BYzanb^DbjdZhj9Rj!tu<0H^2a3Z04-YRrGeXnid za{smSLf(@~8*txj@!YiJe};P8tk3v%sl|}zzj|N`{92~nqD?+h9XqTeI)7&`ES>5U zD&e|O{SHf4Y8w+!2zJuQx}fe{psMRXiY3bWaOhm=$Wb++YfG{PF~pzJqcT^7s6unJ zrb>rUyUKq$hmj%05b{brEqovz^T^bg-$z3*9ae3O^QZnb0~uOPW{yPNcV#7LFw6;5 z9MmWt%J?ry$3)pJPOv-p6V(g%t-6`*v zF?uz9_g0tb5cG=79VM*}Z(voYr+3^WZ?l{3P_}Zkpm9J*@f+hYs9c!{YO+jgy!|yEMu$LHA^FXEgtjHcMvAR#@y;Lr8Y?2(K}$(yh7WoaoO*s--n??0Hp!k!%#u+1KOxkNa_Fiq{qBtSC?F%e5e zdOVR*y3VVyb^@`IH*lI@ zW@V@+xLt&7CDZ>9|AyN!R|P;`@0&EEf)o{Q=dloyAYYb0!>#X(_Ejq|-?2Cnvcv08 z1a7Uj*bPPB5SQHWe7_pZc~hyb66NDyZh_;M(Ua^tUKxTJ+nDt%v?2^RJ0v#9U=X)< zaxGu2b3N6u)HIYP`_Lh$wz$^}pL_TK8kdMB1rD}nDah5gCeOTXE;~a`RvK^c;ctr9 zk^t-%kooL0KoVms|cUv%UmRd7> z$?3^BcsyA`m!AzHbA!7=`GQJft)!1fl@ONE)>`UFp%8UWmy~oH9if`Hz8^(gi21!^ zR(5Ch3+Pelb#CWfqXGre0z6E7nJGr*>hX&6gru|9N6gaf6^PXf+hm9OOG7vINrrBy z3tT1v>5!{H$@~VV_~ek^9V}^NOq7#z@vGascEII^%RSKk@a3%o_w~$hTPM$5@a{V_^&!0bvn05Nd2~YpY$;)uO2pzkVy-4o^N2fi6_%FK25!Ww zNBJ_jgk}ec67}$ONIi~YNB1^X`SV7}{;lLmk6w@V!}pwqoo{Y(C|jZ82}>_bbe)nq z;e({q8aj$An)L&dNjPVE^+Rb|<^aKtQPJ>#v#*jVClL%oUtV7)7f{+)BRXm|JGx?8 z;0DI>#4IJH{EfM7Hux(2xbF2RqN)+oM*%aXvw@0IB8*`Ff}q-563`XAPQi*CSecT_ zQmS@=e{&UEd_$Qg!>S%pZO;`EbGd*Ok{!9C)Dk_1}hH(ng|l z9j_QRyVF0Icyly3?T?E6oYnP^nDdn=FoD8*WJe~z+`3$@uK#|x(>~^>aU~g6mdBvN zQv3;{kpJUxdvHCgKGXWbEsIViPiQp47GO)O7?6S+{?`IhtI{?1%bPUzbE2L;)jm6qokDs>`@8T!)`DipXm8oME?kg+V8Rgh#lK?)enCo3Q)pSWF*}{ zsr0~g&8aOV#U+dA=e4RlpuZxEL0II2xl(Wtw$a;p2Q;=)aE$8IOSzqCN=Y16z@SjT zrnHul2;^|l@A>{KA8uIQJb^Amgt;Q2xN-qijVyqEBR_)d51;Ow*0sFn=Zx7&Q_UHKl?6@F&oowMW;p5bEP%dzZp zp)Z6*g0>F2__Q!+h@P>W>c$%-wo z)Y0#|-s*TBQZjUS$(k-uQ{U5$(aKSstI|!sJP{1x%8IB z^;P2QEICoNa@4SVLCbA$rE?zn4Y>G~Iqn$sIMZz(cX8t@`dIS_KHmLa8Zc|)eE2D; zmb6$BRck86_A0)#1EgoFUyIY1QOt0ZrT=Tokxr9aR!d&OndR6ayA@jbg%cAg7QW_M znr3Db-*P%PQot>lMNBj$Hb!^UQm%{1^H-{;=sw#wq}mEs&g0?8FEvFy%{u*}?$D28 z5W3_T{r=m;d4!$?2hPyX%RU*v;1FEjz4^vHLkJg1VjT;>cuJH9gAmg1DAs!VU4Eh| zV9LTdkz`e{je;8+SA6(A%?mh!KUhZE_hRI}veB!`#+!;7!O{SqRF)c0Tsi>b9@OrzUBS=j`i++DPv3GWF*PIdG|&Pufo%d=>E9}Lc%We)wE zdx9uyE63NYqX8o_RFoa6;GK_XrxQT~+s8YCV;e1nt-wi^o8qrONQ#}GFDHY*Qlt`YS`#z{(fmmx42oLB*dfd(IIQy{kYJEJ)cT6wayJ@t z#GrL@B1e{mTDrt~phT&n-Rs0NTgr6ELe3iog)JA&fr5z73hJm}+09o}2Q(&5v? z1)41uVZ7SI7=R5p`|!}?{EU=(X)I)Qg^pl%PB)VxeM*q0oioWSM`l)Vc>g-}B;Ow! zUW6eS+b`}bdZP-mAi7ja3YbQse5r_aU@Z57roo(`#4oIRw#>1)}h+>~{h(*>WWx=kcn#uw2UbO7a;{SLOo zZ6%disqu#E=fQ0R2}A?HOX&wPYBsj)&i7nxjjwJeG&krRaGt)9L9oG!b> z35r|1VgA;0L6p$em6BHJC_e!F*$0JM0(wthvUjQreM{y_S0VtZIFT-yNA0i+m0!!s zyA~5Ag7DxBT7DPw&bK{>szhM&BA<@1LAl$=-~Nmye+6-26N*u*4^r@zciW4I&%9LH zfdDP@%4Zxay>fxPzhJ$drc;eio(xZ{=^R9&szM>3N2|@_U8jM0_oHs`pw(Z*>}X2B z2RePeT;kN?yChudR9B(|l%R0R=(?n}TY4}7Lo1QIc;W%XJM{niPB)=u_K$(G_Mm5dW!0_{}zEYZ|noyg+u%?f8 zW1{^ca=_QwG4r3F3jH?z*@7mb6x4WAT3qjzz?8b% zi+CAH3PTNzN@aMm9ji-z#deX2Z*BIH>n-%Q)=+@nHsb5B$2jD<`m`vOyAHNmJ$I$) ztu34rU~VRF8?#*58&8kOy9i`uBth&XYqwe>f8GseZ_Aku#SEBf@!_uP#w~9VXKUh` zTohC+#UqE-?a}y|DTHrkxs5Y>?`u`C0Zr&N;WpRl*05tB1~Uhgv+g}3qGKXBB>W@9 z@#!0vY>PT2-Im&DS6}xhHp_e$k&?)7CYAQw#s>_*+6A)s42ERF;OK?$JKe;Cg6YA6 z(1Yt+*RO)nCv187hlV3sxNKT#&Qmbs7nX2t3zaVvNKy;KcuLK0aDCX0_J+^tvnAX>=#O*LkgC{6|{m5h8J?F zhDwCu6($(!s(m&Q0YXcyzC@0CA%+}k6Q*Od@?T$vnnn)%Dodd)4kH&$E;h0C$?>M( zvVrbWMKG-7H;S%({ZbuMOUBZc5nR3?Q*=6#z&yVsjWMU7xd@VoOWEm=U~1H*5a9m| zZ_;eQFB77}OpN^54&r*?Oif@)(s396y#A!%qpZ}#BS2<4QTU^ys@*6^t}w-;fO7-Z z6W}$xOmddCc=k6ep~h9%)&1igH< zN=l)8C3!2e&gvyqV{6uSMv^|>KmR19)ej8}z$%&Z0($6Hy$7FmD)cS)k$BrIFd7_* zvH-(DM%)BJ$_e_B95Et@lQoduY>if7<9qNlL&U2vx>8#RAkTdjSuM8+X>2)IJ}(7*^ty- z6@T~K5q5P|*VMAufSFlNX~veFk-coPjm*Yb^=_YPmS*N+yKg3!Nu7_W$`v2>QIgKf zgYWQd@g)`3R-G8zQ^*dfNVl`wsoXncnJI8d^*`J?aj$!7W^bPRn_6XiearQjTXaj1 z(fwF_Re2n^mK|HvX1~?TKXm?X9T4m;1b_$|Uru@uHw~CC;+C8Q zxDF%_Qj8s75g{EM#cy*+!42QuPpDlB$wC1@EHwn6B7auJzpo95PS8V2`xU0&pw7@K z{(;?TvQLc55H3UntqW$re9~A5eVPm)k}iwConD`<-jG*W4Uo#;cS6l6rP_H-W5#W95x3)ka>dA{{x$ z(1zd3hU|IUB&_&Gvh~rMQ7iKA&tiPHtxW691&NFOzTtjeNffNQq|ONW>v$$XzAgsd zfOWEmF8MT94$-z-)~k}J!WYJiYj8LHlLAuXNL#G`OgIt9ZIA~fC@kbt0@QlKq@$fG zco_s`C=%h zTV!$b)~D7`vzC~?ta0eFI{)a6U6gQXQ7|@ilbS#1u;v7KuYP5|F_On_88mb(hIZSK zU@x|`^Qw)rHcqCjT8FoR_DH+dY#fJ&i%z#uS7lIG#?slrl>|bt;22+Bl$?VRTY+o) z$5mBK_@KAl+?&Y)MY7_ZY-6uXb;HglN$Xx6w%zb#b%*M`Zz`8mX1cogQclWP(UdOq z@qrxlIgC*>wtQ9H=x!A1FW7EsW7y)64isTQ&nq=2v*$J!pEXZ)Kq^e9Qc*`3>t&qx zv<0bLJ`*w5EAfn|mK7Msjn5ij6!ua#gGz;DKsjncKt~c$s9_h?*Rjdh=-J1Sq)9O_ z?E`V6c$5t}qg7fe#Z8I|AMIGPnw{(cO|W4~jv4BSJCp4a=)-ambppkhfx}@VyLg9W zZ)uz+3AE=J5yss0u{|7s4!7Z%qSxBI5|zGc0n4(%6Ar3FxYd!S14!-*fuO+J4nI^%7#Z1hBAh=-Npu1$XYL_}T%<*}@<%ba&`=xidUYPHQ{; z%F63vPTjcoCL_dX$YCyD=H%Gy?#7M&o}--nl)P4V4# z!hSwb&F>Z?N3O&DFzFe988PtLBREzyp&eM2MPt~Kp(_zlLw+l?n{ z|3cS&8F{2I>03N>VD3R0k{kIJTOPYW*B$Y}n^v4W^00S{+9xn-(1hgeB@Vi9xzo}? zc8mw>Ch6|ME0lkxNndHRGjgE6F=Wo!J#0XJBtgZRsf;-CUCVssHt{gQ+#=J~AkM2n zTcKR9#4$9E@ONDZv0ns$CQuy!Hoy%l0GD5{CKJG-BDL)uz%l>}2pd2KW?q8|H1J;q z4j48-h3KGCBupS?h^5;tnQ{c>V+zC2P=kQT5C$eQRZv|-i(lH1U5W=5BrYn7)es*n zfX6B*KnvJz?-)BOgf=!R&bmdpF%TEPO=A#lqUSMxBM1`&izTKU?V|dEA7-DaG_XaG z^Dt| zn#6=BL7`R8}SPa1xeEI9DIG#330EQY1%&G9Bzb=n0=I^%}IC8fA>U zsAWvODr0B@j8daAnQtHck!K8f&r#QI$qF_t!h<@w9pj=Z<|XqaV;k#G6;t|PoeBd! z39_m`m4>~PvlyizS*~f)u8Et7_?wkZ>>BJ29wabEa~EM2JLN5{I&xrE!~=7c+}+nw zTJgT6YGC8hgTt;l&d9DwTdknCXL(>o%11&t^@}yRj-rq7Ta|LS`sR@8?yeZ6t)7nS z9MrdcRWca3>V39fD)vVNO;4pd_&EF}eO5dX&$5Ed1pV|-HG9cL>mIt-`3~0s@zNev zMZ-_m^!E$N$4=_nPxtuu3(Ln&$EB~X=?{4N7oO6y@9f%7pV3b!!v}BB<S;|87Ft00@P>X3Hr>vrf{Ap7`Dv<3U-S9@B%PL-76@W&MZ8`VWuoA0FF3Jhp#$Z2$1s{^7Cxo5w)+Zw>?9zc~za|K>3K$FoQQ zpMn1090vM-a~SCV&0+Yv?(1LY!$AM<^I@R>heQ7lhyFh~|L*6%%1Zx1(fLI0>5-;p?1o5#Pp)o$OR*o~RCoO4kOAOGI#uV0 zOIAAjh^Z}n(jW$630ZIv!^qpyNoBX(Z{nC?exOT2S*z#jipq<7m!={UwgwN;89ISnLJjd>CR81X2lLSF*-z>X_yJVDbwOU?q zZJ;g@!BSxSHTIB=X1a9mQk<*k?`o@X<2H3s6u`7L4|;4jV{uo1 z2bNO&*|RLKCxaw{6Bk2-+8g!GD2;mx-Guy@s_|G|+UO*CukB9sC=tm1Oh$%tFVDpNPgVhzU60KYc$>M9HY;uFblgp}6SDa)nND3s9$*6yrA>MA; zOM`?~4h@mPt^Y@B%r00AHFQjkfdggoBWvvLZD?^g)jND>LkbCG&yo z)qGkxPC3kI$ks|lJ}E)H{10uJ{-r4*G!eOpJ%vDe79Un$qT%BSR}_GAjz0@e%&^GF zCXQF;$;a?!AU6pwnh~4IxEL3fk%EwP{%6t>N##|D#hsez#v9ON$!sJW%gB-rty@M= zxt{@mZJz^)o5ro0Jxf<5wuPoq3$jZGqXu&$Vl8x-Ru*hnmvqj92~a!rkyV~G&L4D< z3pQAyQ(N7?Me>HD-=Uu{p##8#f5yzI8zC5v%jNTQ1Lzj$WK5u@X@OqQ07}w3PNjK5 zX~D5Fh&m}#ocEFpVCY8tF%yE!{C2bC1zVwo&j`=Q5o;_vzk z!|r9*y#g_F6B%RCpa3N;lkV-eSw!}D5?RuiFf5WIi-Z&C`+Fv0hGjf0wrU#El7_4U zQ=)qx&;A5M5gpr@WUF#!%|>)=wQ5LavxYV}cd2hZUAsDGc68kNKu1D>(>{k9t5?M& zW)^HM32z4xm4KD1v6UF5Ij|1`&SVKvxW>F*%*bC!ChGZWiq9BL*NrJ4%*{y?d^F9t3p%$WO{5y*{|kGlUFYjjx0UX z)%Bta%X-@jH{5v?uWcBjjSyXk@<{?hIr1Pq=osH=>bhmOiJm7y@?af=Utn4o0JUMp zDAIi^Zi3)BC>FuEe){;9H<%qb#Q>hwpQYY&G?DvqTD^&7ctnH?5H3tfEoZ$3d({nI z0_$PaIfex;`zA#6CL2p9I@S#0_iTAHlU9EGe0c0rRxy9~2oa=9Wv96qd%K&Rk8H># z5bXfBr_c1|8-CCss1roNr}?xJ-_Z;kb1J}-y72`2 zLSO#m=pb2%MI{xJaT`%~$+Gm5n!rHG>An>?PBg^A6cq^9n&+Q7OXD4lO3O$!(LPs4 z5@13B;Jo;bV;695wG(4t<~VsIuA}Ck*+b+czy-jcH|Q7`MPBZEv<+K0s^oJuuw0Gb zeU`OGrB3$N;Vw>Hc4Pzr^N2$~mRFi{vy5=hc1BHekPco<24A2}*Y_1umCE1sqpBy< z<%F{XiISx&sdEFqi14Azag}0xR)#xK1Hb5@8=lr7Xh~dM%C=5DwBx|X zBtEv!_#CYRA9}6DTh$04o5&^a%@xtnAnoI6D;#ZW?|OB*>KVc{^#-WFA47R_neTCp zLA*dtoLaBn-4%fJ%my!WGrXI}X%!`n(?)9x!DdrL4g#qss@}cgu}N2kUhS$KXA(i$ z6h@ziPbOTaj2%WPgc-EZWz5l9dhFB0cjBev@-q#Fh0w|{29(O!ijV6O_DdOE$fg+m2CM>bNMdjRxhzIN-SNlJOH$v)ducovPl7h!QrfQAn0(5%)=n&1w(vAq zt=#g41#_B0J;resj?$ore29 z`Fe+NsI$l_k^O-#AaTKWu-1~1(C>Z5PXqAz6rv|%rNBpcXm;;dtH$CiT)m#~lC*J` zjt>JI#a6-MOkpn9{c^QJZv&R`HrLCU1st8eP@RX|piNn_C$ zbxACnxkljz{3*cZHk1o1DP$~Xf=v1SDrh+pe?7{tA(Xwm4v-OY)X4ao9XfuPqpN@j zeL9#QtYofKwjcNl&Un_%1P>l1wV~fRA!1+{OQ9Ted!&e;4(7_q74ds?1aHDl1c45Oz0g$y@y;xbt)wI0?oa%{gjX3dY zLlNh1q_l#|7I;Q?37O0y0ip*(FQC}W|nZYhUvV?ZUDNq z=D?fHQ~K?lt*xG$8A1s7g9fjHm;GXTR0MlA+==R~f$@3` zexzwwXI7!3`0-yZ1|MoCq{yVuaEak9VFrHOJXLI)?G=;3Aey~@_SrCMf;lCke@ZA> zp2#dJb++QRsd(xW8(3Phe;>)Rwy0P!C1sO@WQDD(;D8J}&yo#d*}_cg%!LLR+8S>*~dg%dYUBh|ks7XU8hr z3+czXmww?`=L_a=B#NdH3CXif3XiePMnPfsyKk}W^whJ+x1sOlj3BKBGq6GrmZ!3O zd4H%}emavI@j8&~;UMR?`*AY)5*VgNO6X;FT3eGcd`I`SnLQoAbvZhu*lq6M$@oc= zQii0Za8zon2Ph{ze!_&+un5HIBuf$ctN{>^YhhI3hxK=%UIC zO{2!Z6EdH#1a}VaI>@}20wMh>Esb}?|M(l9Ou24{`KmwJh{`5Tsd$ZAwm?i&p<;HW zb$8BOc>lb@Y_**Na|#Wht~#8d^{u7)s%bG%PZ>`|SY;gzc*o2Ry5FgVh15PU6Paq= zp3seq3r^rs+jOFUYMoX|#(>58u*ORohuZwEf+nT;aS)7{!X#F&{VQM!(8Ec`Qw_5O zcMQS!W{tDO7VtUZm0o9xY$Yb@#~fl|_GfQPG}N$z<5BW&$RrfyjdIUZgeN`BZvwpP zCwV_|V}!-`I*X}H7f8~m2sy6w;4|*256&C~0LneB?G0Jin_~{BYyYR${gWxi?uiB% zKLeN}H9SNy<8lXmB#Wz8oc&=_*~|<)@=z;wtR9lwL9Zi8$V8Su>hjFb(dDNPhxBKG z9Jz3HzBzlV2;3pQB-Z5x2q4Ld9j%w^;>MP(#{E(S^6OR|2e?NVN5r(Y!am(3lt7pfc= z4!`3TJ0|g5nsVoqBTlIu9mz}5>L^cvp>d@+(5LmN$|(<+JXaZt1!|B$vl%LkI;8F` z-npa`!cuDdM0)Pn@Uq@1z^95M@5u+!+)g_ZL*NhML(}N{!knX`t8z6 zIi+}E#*gY=JziB$ObUa#RKZo{$s3nVjf@;&9F9HT?*4Q4ZL4U!A0r+9TT0Zb_3@3O zby1#7h(qiC_~Ye9SGTByjRr&fSXJ(#Mi!|2BkS{E>T2Vrr>jiCk1$COE=6Z!$BjmL zJANH_cywL9kP*GI=VbnRLU%{3;^owlO82E&-otWprR}Z|)hLm> zowrshC$dN~Hf9nKvJV`Y+ zQ`*FbYM;RL4gybZUrm09(Xrgb{ZkTdJA_P2lgZ6`V_BN2{U8#N|9xk|~j2*|l(gw#D6S~9Q&K|yOe(aD8 zkMp^A@A#s-vLP;>)RO@w@ozJ_aO6j$~JFAg#Ggvmge3ZGE zjX%ztmwL><3K#!}qQJjokAI6G|7GhNBinx-{R{C=13n8Ij7;X|kaI$xD7C&!r78WT1X@091U(7XT!nhIEtvfS@Q{uUA``&hPmsHgyWnACV`C6sF5qRY zg7I|Vbl7?TBAkL-wYE+G9e{ig;OHnTC$sd+dSLK_h}OZQf`DoOS<`?~V%lBq0tEBK zdY`TU;$nDuM}eV!BCCGT_#tjf*#YPg>wSYi4!$T6V7@mnE_eOxUH}HZ^z2Ceh$dm7 z=;jyE_&*AQ0rc1o(GV{~1hhT^yZY!@>%N%xT+j9VN+KWuytMan-{s)M(!eo^0|MH< zD&!wk(=J#ndkHKcTwTeqqY1y0b2(rjom;Ova!+&j+WQfCZvA>>W9Zlh?^H1mXyN4@ z*(MjDD+<3u9bplFup9#l02TNk>Dkrk0Tz(~o}BGaznuNkbHN{5?pVFF1?Bv}`+;}e zw85Aljv=~y5W(|$x8(pZ&p=_JUS$V(Wd{um03ap-Ve3LU`gR9@Vtil)aeg>-XYe2& zK-GY2k4V7)zCFI5j6JLAAb_rqUu@rR5F<0T85d_4kUkafd!i~TAawcssQAK*F?ZkwMNsG=l-;5!ryeuYhiE1;7EZ5ykd(iQn6MlpLk0g?0#F`p(D_y@@NOFc+jH-?1{g&E8Zu`9ZAPSr$WB%Q2X@$9UbM8FR9z z40N+k@X^~}>y{XxR3SvwZ{*zY=j8&v32EPN)_*y5IPvrtOTa>#&w6{bd}HaP(8Y*O z+hJI%iC-pq)8V1UHpR6>*5H`X*86daiATnxo|GWt`x;9%cs_Ir;jrD0&}J2nV>`u{ zDPZfhkmmaKn)Yp)m%?6}7#uPh8jbK2;N}=Y&TG%CnW#gMQ~5N}n``-XG%wz+1Wu+g z{#di_oyDHCXN`vpE8Pec#Y-5eJ+fw=;T0ua5`?|bFQSay&BJ(z8%lOkrE~a*sw1~C zjhG;If%>Gp9L3Kj@rbI~4=&S@Fh-F%$7eosHj2@6Gb`M*RUo{C;DZfpyF1xX=WBqJ z+1X@6^FWMmKH4y%I(VU`Uc%x#ZTdE+Co*!jRho|4An3t=%_mg%F`Tn-iab83cFV-M zC5;rJOw?#vq^JynhiKl9>BR~bI9^n3%=5OYBXrg_>FR>%x_5aI2P z8JRl*^d5h*dLL!@P({K|U6iXI(1){Jc-|>a!qqugdz+ve(2Wch^5mq96^*+?rQ_Q7 zu4c;3_2)-NWz!hMpg6gG&n;AqA0;%!oH^}|M0MxatWA?@aCXCyW$7fHTYC003!}SO z#^ZA2C0mi$kS9Dv6Y&aq4H|t!KBYiKaxX}Uvs69uP2UB4;P5aVJK9CUJi;YV1dDtk zBJy0=kWinTFZNZn*@UJNs|kpM4^1Fli4h9e{Z`Va8^ zpG1wW&fm$h7#QK(0T7t=w}?o&N9#K+4*b_#zSHYI zZDW6Cjkywx-lW9`!cJg$Q5Rml6*}4VpBFR+!(nnKzp0_XFOK-7H16EBNoSH5?y98U z<;X;V=dMf&-I}2(Q)3(2MxlO=r=90BAe_|a45jN?1dm8jL!*?oQEPVA>AwK1$n+nDdxCMi-K$qsr^cZfm>;)WEgR}4^ zAW1=_@K@W;eV*gkp5t6y6X<~x51XU*++q$V9dGNl9PiMoasYHLgo(glDv-OrKT7rR zs$(c*4y+lMtBi`EXXUMj8k-bNfE$;0COyksrwVq5+}_1iala#h2(Ky&pEoa8=qt{# zy}a@SwzF*xr4CKl>^>3aPavC6WNC@Cj!m3*<^BS}J2^txAKq7api;OAMw5%1WW|^P zpo=@Rf;`O2*@&t|SIREstJ@Tee>VBLibf%Y8Ve0y))qCVwBe%WX14%@LZbZsK&cWW zuKON)NROkE&RiXmmeof8ZNO`_X|yyIg-Q)%FPIBrxsO0ImX7Vcs-6R@c26Mltylcb&qk1(JToLtxdXCpK`5(GT*`guVFL-7#i8*Q#vR(((M;qoK~5DC+T0pTwCL6xbWsT$)!25ocmSrV$SiPtYqFR%oWUMLHpMG@u8oAVxDgVL?_J{-b< zl)w9Zx|R&|x9lD}Ji@ZcuKwn)h{F}ozjO<)8kZL-!vmKLP=#jB${QZtlLAzT77-iV}T8HD^OwDslGubD)b3dDd``gaofs`kY@TYegmlOL4)u_t*jcDR( zM0Ea<@Oi0@gf#ZVgu-FUxj~8h4M&t7@CN3V=JInJ*e0~Y1mFr8l(Z2+_>0nJM^rlo zKj@Mmp#*49?nq{aD{HjmvyCOEp-m2J>#>6#;!-R|@XoDC{t~K&RwkDrUN8B* z9xhBcQHxatr8ASMDSnxY))6U-q|fgyapT8H=0!<0cd!9goZyB83zvdaLB$r9BDQQ2 zc*wPvlyM!*7OKXO>!n|8oIcH7tTR3;X>0cJ-JYk4vkADGOysJ*RvWH~C>Er5<{bxu zRG^+C8#e<}ItV-0Z0B6z>mCvnp6FV+#eA!MrvQ2ijQj#bBUwiWtxmH;Jou!8{*l%s5 z1iLZM#?FvBedpif4#UyCeo2-$zN43;+N^H17r$Y4DzvNnmg1I=*LR`A0OTz19vAc7!oIp~P?=&4rJujcl zpU~_eIR;bViZ4hs)Pl>?9YEzpc@ZxI_A$ET^ygX84s;m|bkbZsJqo+?Bb0meg_J{i z`?rw?h}s-|m;YV5e#MeNEa6)+5A9X;cK?_Gk;ygoX%maDEk)n|?ZsY-P^|8Gmt)3ZN!y;DX27J$ z1SQz{HGUZ#3ear`FY^^6YLu6M#yysrwY zf`{W+r`pzlHy<}F%*1;6`&i zvNZe8q)b(*c5iQ#D9)P-bh?^#$W0)F-@&sj7caep%M7$Q6N%(Fi40?qJtpMb5EX-(xn7 zS>9Qd*Lo=?TD@Z4%+Thx+JkeW=3za3gtdKXwn+ng4W~G|rv5Sqqq#T~+#+X49?##$ zPAKMi`t}YG2N{vmX#~{c%z?GzD^RX3$uV_71zt`^@FqQtaxGF!Vuz$x{S3yf*mS0j zHn!DU^Z5i3iFe@g8@wsmKwBBOcUVF3X44ou>Se}|+6mP*5gya?twome%h;Q=n2Grl zLal)LuR=9iSQhmvec1{R9Q=GsS{5o>GpWzo#!VY;A7hv4v`gWs0AHE>JZ3^*5m5rq za?%kMA`9X%kug_~P`9r_q~T6NQwwS6kw_Elf3qMX(LSrt zQyfwjYA6=d3BS}y@RTMQB2-ko2qakEs4sjvF5kUwXhGil+d5-i@ayGu$QvFNjXeiO ztpwJFC1f^YH_UBnm+PLexsu&hIXSo#k!%N&fp@8k=2^Y63{hmB6v5uyTD2{EsN4^<28v>=%f~HEEG1TvT zY5;K>{9K~_#Tr1Du{Y?*n;|$DQA&tbn*?HPyMJ*VmgP-K z{_Gnjx(5zh;{mNX2!fj%`HZI;wJG|&CSOyUW6Ap*Lnbe|+p9tzqfS~>M971mxwcOu zznILXw|%I|`Ao-L^uBLDIJ^06*AlTL{4xdgAt0|+KwvxrBz#Z~O)o-xCBBy46Ow(w zfGA5H?8Z-0*z2!~($x$&KWxka}wW`=?y!@Te)_emQ7_!a(A9A~vTNe{I1T0RhbdYC^+g3b=f#;CV0!T$K*AQE*(Id? zhns-QYvmO#$5v|*otyEyx6L&Zad6pj(P65xmCMq;8i}XmHo%aKoH_5 z=v{MNi)$8v;Jv46_`pf>C3$Q5)*Qz=0}wau_`BWWRGDzFbL?J4t7hQf`qkqG57T8mZV*CET^1h+7C z;C;Z*ffukvd9V}KVUps-Dbe`%$UA?QlQwVmeY92!t_mKsYP?Yx);%`pjrxLnqE@cH z+SKUicT>D<+5lGBkIjlfxkH%sxN#B}y?;?M&M6^mzX)bL(CyN%pw9J1(^0Z@! zU6!NP9s6C1^(g}ZZG#N{AJWb#O46^()0K9mBT;GFHY;u0w(YF6ZJU+0ZQHi(sjvU_ z&2+ET(`(k6+lbivyl-5bi|6d;cNDiG;!~fXUc@JEN6+)C7jkJN<$z7>E}^d`#kx6} z=UfZx;HGY?1g=Mo1wHT%bJZ$s7h&Z&E^@XVy@f)Hb(aDc+gl3{?rG zH%6@{pO#1iEw^Zt^ygC90tl+|EV>&!>X9yzc19;6WQC(g=^T17&bG{(rvevHz+s?F zF<|ph?*;Q(5gp4G5rM1`5zRw+USpg+nkj|OHJ1c_&6iO$fA~A!v_yKJy6LM}z%zBZ zJbE)eiSKtbx#Xv@n({^!Bg7?B*J%ZxtzkB9kqz;IQ+HySzk~hY z+$gE3)KB;}*Np3+{w1eFvF|jGcggfzwKd)gaukST!Fu4&V9;n}M=NQORP=f+=Jh|eDFt83JWGD++`iDwC(Jsy^0J3 zJ4BIWtF>iy&cT8V6f7&L^$Ps5c9rPs`n9Xk;e<<`DyLyVq6jpNgFqHhfgD+8{Gw!yrNCo3#YeTYaN`nUs?#irUv=VEFox;6u*SwrgEA~?i$PUnAHuvQA zaQ*`q!qbOTKz#FeoDunb0&;b(WBv$n6Z+>w4N$zSC78oSz&NC;VpjPws99TSz~J|l z$<7OzJKVbm9@X4;*`i|1m&bUfdY=ue#R$FSJ5h9M*j^w0S*#zMMf0K!@5Fk}!Yg#E z^vZVYVT!9OLpiT39nL9?K508U6TLI8$C(fKVanp%D!63hCN~M)RriB}9_7x~GQ1f; zR|&{oOKQoYmoX`8K8eMk1unI>hLKm5Zo|1#>KLu7F&+?wMSMHIEj(g2HP#Z zmgCeYr{_!xVjDbYx}MqJY0L<=&KR@cdaZ4k8=${Qt#IGDuv+zQ1O-Jjrv;n$K7?5> z>u5DP(oe=G=>|rOzcH3PtiutK&5odH6{;i8vMC#hPjx`6+Q8zdA|N6llU|F=E_4bi zW8%YWYC>2DAKpok(~TVnIg$iN!|TK4Wf~+v_oUQv3$t%hOrm9%oiP~)mCo@S@=Mio z@NQq|C_X{qLNyFUQkiP%%wv zP96-VmZW^EX`p;aUCAMg`Mol^@TfxgVapI*h%uF6 znBNOG=7wfTc$O03A++#Qp^;`lyNS{DquS+Ao!gZx&+j{X6++x|fkMW#?O39Xf3kN2 zP`#>oOU82{wUjY>&={c*3rVHM7VHv;_@Pkkli+)kP?@mNa(fFbAO(v>L7EIj{Rc-9 zaPWiGH+g#nQtCWCT423Lb0_$!>jg8JzS8eSj&E$~1ZQIH7n%ORlx%hjs3RlU8&PGN zTeG8^q{s|9aLAGyP2LENx;jh2L6E_IGGLJRCF@fd zp6T+E*5~T)Xscku^X_iu!(>N&PCty665zZEgeJW$geSzv$~YWGY}Ro}vq-xJ=;M{^ z6i*b|K$g_jrKL^RX$U`7gfq40TDdewPN4_~OG)IcitO~}=i(ha@D*ASs&vIX`kzDu z%-22;7@Ed4-3R<$Wle!RT3m?^tp*WaH$P-33~pg9KozQx!Ut3dBS$8-uYA`*tH>om zej@GMl>Kd;r+%FvciNw?aNbqwc>i02?$GDk4mhgae{(|iuaVyWhZ8b9dO8M%|1vgf zrToV~U_Kni@*QeVnpW_Ch_Vc3sad=9ygWV*QtZu=ezifPl4Y+u`x9{F(9iVZfF6 zbRqaeXDy=>ch|Svl^N{%@VZXkzjTqJC0pF?6ZSRfJTxI4wsPn8=DZrk2y|3@aYD`R z<=!PZc$;5Skn=}DSic;yDSW306b)KntAE2@Wdq#7B-(ouKhyo?&e~MOR zi{>Rv=~ZErbxa@M?*J1VLx_P4Of$9z0v%IV20u|c2>O9SY;myLHzrI#u}K^p4qghU zOu8QczD)LbP1n6B@-A8zYWDiDMTu3xMks2aG&C#Hc=2iY{A zQA>8lH;z~%-bjZRY_a_9^gD>XW+T5SpJyCH+YWyXoS0T0KrSiNg~pMj4q; z>bD$`6?$-p0j{?1DQ@n)X>S5uVb2Bp-!uk?tFdg*?aR`VVct;}l)xEPxNrsCIHH7U z!;-uG($yVsR-LQPwsq{ZIFiFo-65^Bx|ubrFrfP}({qzXzHrM=_1w zq(W!>+H3W541P1G%YNm3I|sANQKhX@c1>*PRZ*jCpH$zGT7!L>aep-Y|WK^J8`$=Z#oheLILPd}}N`T}Dh}EqP0ZkVsYX0-VBmq(y6+w>)A{2hO zsA*kDAWgWasu3d885Ke0Fu|;{+i_(l_D^f{E9v$Vo9cs4E~%49scevYfJ$Zu>Z|I@~@HqfA1sz+k9YP`fCg@{WTAm{+a_!f6W1= zzvckbUvq%@uW`Wq*EnGQThIKrp80P*^WS>rzxB+2>skJq3oL*8u>9@A^0yDmKMDZ@ z%in%1fBXD3G#LJx84Rp{>;IY<46J|G{S*4}9|pu~<`>+2W%?D;i`v0}lJ#N+dCeSU5a3Fy&ue|>d=N0Ca3H@)pEKR$kHQUUIo z5t0my5FjS`qdkyFk=#v`>wKX~0bHN`mDspNW4 z-3^J5VFd*ZNt&%^V&uVa7v~9uu4CQv$e?+YQW7b!Jy&JEoZj}z%hzJ#eaDFaL!|Vp zTy{wLgVK3UB?X7^nj4<$fQ$1(zHr#jD#|@}h=lNE4twn7@gngg zdwjTq|B6=9_R0?kzX>=S?su%Zx254OD>B0*5t}8?*`4FADykjgn39!x7awdCrB^rV zXt_QAE+zvs;3h&?E8ETMO(=rwXMLb~I8;i<->F&=o~|V~u98m$67N{%vFN&4+XkZc ziA$b^L&t#gC#M$$S8^(0E?_ar{dTQ+k9*Y@LniriIIEk&c$)rNf)#rr$#W+9)gV<^0Os=ja${ku zd11V~)wEg5>gDMJXE1q0_c4f_KzNl9mJn}O8elayx%vM7`7wq2{hSk)Wq*gm8@}@P z*`<3<2v2uJ3`<0>Lb_KsyI8(^EM=emLJ|)>mz16?IwO2VF&x-W?MxC>=+nadYECJ$sXciNnQ-LqRh}*mZ{C3dFy_0Yp`FLI5X}G$Ib! z5dNZk1gh{!7>JDInb1)0LR$jY)PB7(8QiMs3|+|mfh1jO>YhOcX~rHQud@7Rnd=aj zM^KSoif)*DpKXPLG*eyGT&fMT5=KUPHQ3~}BOlga8B9TKib4$AQ-t;Lz*t)dEtIvg zhwu4X?4%%QwK`2zF4|U_Pg+`585S!`-uqQmfVY+>p~dg%wt-;*$f<>TunB>DiRSdD zkdY37Tj8lrrt5E|u?vQbNy#{HR#at$-xF{C;QilNVub*| zw*cP2U+Tge@5INiKh zZdK8&as*Fg?Nx&>&(^wV##j{a849C38)%>7(P0m@voTXC+%fYqQ6e;MKW^YaWKEP` zG^R9pcjZs$`^E}gTc5q>mK1Z;!unfpR*$wQ%*~QJz;%?coIxyku(`W_;}P`Y=E;;^ zt}yG9ne^o};o?yWVN*=Sn*1<=2Fn*h-;6wOE7N$gr4xCB^qAkov4W_7fIWgjb)PVH zh;%b7Bk(`H@YsQ5!9-l(}rwqabhkRlg#xZ=RKhfff(uh~;=9aH8q^hx|U zSDH$X$smU->JXXkUdpJjSi(WhV(I$ad^qz6DtDW3bR+D*`9<6auiu@F_ZvWq2Axyl zXi3ubMM6K{YMls|UGjk#@mk)smER6k>6v$7_pys6p*=tLT1r=fJzuUXi`I-a$RMFvu>ekV?O`9U!6B2Nza|f(pt1 zemd)@H{*D;a%U8iDWyHKd=^67N$Zp35Vp**bNI1`!ZqOB@(28jfm;o9P`8yNd0gxs zPawAPPucE*@z!d{^&IJQ{&0{HKb0lkd{HOWjw?1e>gy{1AlY(8*R9)1au^FgY@rbo zW?-7VCb3!R&VCeAl$myCds)3@JSB+Zi^=q8!JA%3TemQ{$o_?U?(QNiMKATUr$seT z6Af>~Do7!o4F%%{`>WQ{#EO|S#Q)U;vlrVkW}d@vjIkOtm1mWi|5Gf*G{-fDj_}9K z)VgJtL)`nVnS+>J6~LD+kkb&roc-v$KaFqo`(Ou*Ey z(M>i@OY1m`z;Bpw*5p!kRT>K{=G!T6oo#Apa2_Kg3FpUOMAD@CD<%WAtA;wix6^ zJV=mIGa2iP-sdo4j8f(?sKurT@`$a%W&xZ_y`}<>azAGnp})t1--)wks4B2ttQv9m zLC-@DB9Oo#+XPHrk7O#r9a-!D7WxR$|DB1aN)pQ)FNtrKQ9{Q^4&rA>v~ZrPgd|?Y z<%pa@K6r>QIli3FhuM$H7uNRpQAcyUc^w~8@7njc;7P~l6I@+6x!JnnGk0AEpWw!hFk)S5tL@-N2 zBFYTyIv>V#?3Zm*Eb33Ltve(o=WsOV|NN#1`MUrJf#h0tRX^HEpXuvBI>MMAVT3WD zfZMh}Fkdq-g)?C9IUweV-KL#>Nt9hZ2j`6O82jktV@8XNGc1cxkAZNpjmg&vhipWd z8q*#^cj%$9obBk~b(f6Jf5*7nwiZ1b5Ajd377%jK0$mY4YA(Z#9laz!u_yhwA$}sJ zOqfeBQh^4=jGm-*=M~cAOMJ5mV*Xl~p=_#V!$sB0*ueo3pzaF`_(j`#*k+=L(LXaC z5G@P;ZQ59F+-}?5cG97XG%NJ^>Lw>FqGPHstT7}54tfLuXLI1vA7UaAH27joY197C zRYBG4GKVH^94RqW4IvGeN?UemqYMT0J5R zSpk0L((PhAFk5N5g&ft~(P3u_jo0G#&3E%!!oGmQp`wgBV7l%6=Bk2;Nm4ky>gTzG zP9dSj&5Wk;bb^~av1pHuLtwFnFBJ#k*O%nT!!_TuhqK19Ie=YjBe6|i%NoYH^}xAv zla@+!2^^@1W*tTxy+R~X(qx{g-w2%RI0SWFEM%wob4E^JiyosmN=b`mj2bPsz=&_1 z(#?)s()}B7CkO>0P+)%7dSw~)?Ac&+N%@EU0{hKgN)f6SeXG=l{cTA<_)x1bo$57m z#*w#G9g(ka^VP*K`0n<2?EXRqjP@**x1zBhfgGEeRaB960gV}qws%X2^PRMl_KfU& z9ep=$Oa?tf$F4ikK9(UI@_I<@AlIJ4&BV2-ICBR@jm!SY6In1sHIy;^JwbNbmb8AW zt?Ipb8{1c7AQ@~t9l+;A-wrS5&l}{%n+Ve2qb8CE8cXgLg~iSY?ET3#B}v;)LL6sg zJjhSy6tF}`qQt~@>7q0zE#c3WP6Nx>D=5T~QQlF4Eu=*gM6m=GNu6Eogp`WMOkm{) zx*17M^jlT9v;EqhqlPfu+43{GJ1&8&b5ga`-q2Wr7g8w`&qaME`M3(}rk^9u#sIm{ zEgP+>R4r67Z0&z8da^X88N8OF?b0(1mexO$or<> z?NmTpPE9HJ;JdSKp`!UNn!$mjZxU(t8^}-dE2rP0)27+Y{onv9L9JH|%<2{gYt+KS zqmaNDjk%SB2&tRKEgx;753o9yuy6m1F7>J9=usSTF;l6sy>dCmI7~^<(077_ z3o#Uk-04(;Tj@qahM5Lzvy=S+>n^-F+d~pFq!BVAm}hdIr>1}@k!(eB>8RnB?ewp% zu9lAz3$jr@DE&iUf)~#ng4UP-Tv@{nBh2;<4YJe~3+mHt4Ns=UgCffoGaF6NjpqG3 z{lKq?ZKCfqTM)hc+j=_FoT6Lj=4RL^^fkV8^Xg0iw_=1rr|o_xDBl9yEG}Nexp-8r7fN8uyA+F zt?rbR>YkHnIjMnrpiP!gc+!Z6!fHToRI9YSc0~~F!<}okE(ad*cp}<|Boe6D77jwG za%?h-2|9_YQ?19$`g_dzd8{4m8t%n4>%WH@`n-ic@^Mz0(`I>Tr|FLS2L|+7ZsU{6&do1*Y9c!%ZZ-0x$7%~l9oh95GGnqS5Bk6+jdJF)2 zwWXl~=<&cm>CmXqtvlI~69leG6h6c_MSDtq>MYrzIQ}=>?Oy=v-|*-Ea5wh99E|-h zmtz0Rzu5obT8#fr{`iwW_a8iq{VxY&|I5?Z|KVwje=>*u^*tlqKm8c#{&F$)f1noQ z-&l|T!n^)y@&CiS80r3b?Eeez`qu*ge&zp(cQG^kdEEb;^l_mf5xw^_uJfrH0f6oc zEMDOme_wf#`go~3y5`I}2(bpy*iT_c6^a>W-}Zj?6z(ZYBwSYJ4#PwBC_o(l5)u*d zPQXCW4}gbhe13%c@nO%k;!leNC?4Dv=}$8XfB2l}+UidgW7PX`ANHtlOroC)>I4YQ zrRDwT1^n=Qc>~@fw!c|vT%%2ht=XBkXnMXre5?8zooYLkdVd&VZMtd}2ap>xo8R1c z$KIwe%9;p`E6iI0c!gaS4(lnCC=KF{gY(!gRu5=#zdjFba6R^kkv?XH^Q1CPEG2W4 zU&VmqsQ{&FDT;&YZ4DZ&8!ls!cj}sJ3oSSO@zLv!cc92?jAT#t|Z`=x$%JOR@#{Pe~~{u~0Vlt6A|+D%dyU@m$C0 zzWzbOj>2&A9Ubzd0m;z;3c*aY)W+5QXre}OAtON(d_gl6YOwxh)71m<6C(5QkD8LZ zM@3}tUF0pK{Z^Q z^)by4KW%%T+4Gaxs&~=is^;VMKM|1;g4Ye=Xk605E|+R-^K4seu^Yv=`>l&R=ub?$ zs?Y%5%H}xT3*YUnvFt*9Pzd>-k@??($rxY3_?pGz=J!Tr!-7VEZ)1V47^2C&;c&as zg^&PuccJf$zJ^R)pw&~sT8=R>WkB}*;@Q!)OvdKa0PQ%^p*-by10z9%Q9~Cb5XfQ> zHVFe_!SJSdQSh^Gt2+@cmO+;BQgM|-RH8IR$aR{p#1Zzt2ukmj`T=S>9G_P^53&rP z(FblKM+KIS=*|7PL2#l2V*aSQLPZ8)CQ9GNfl??9P_JPV+v8HibQioT@si3>9wC6> zB?`Zka$jqP{i4+i0cDLyYnS|FUh#!Hbi#h0b%EZ;4mtj-q#PgBqVXrKuA z&Z2RPK*4S+29hz&2p!Nk$7cPwqJ?p!RLy@%Wif}LLzgogA@hRd3Z_+OAm3%gX7%s( zI9}YTuyo5D+@GM<3 zrwY%JAgP%tpdX`Yr>6xQA`0?qqr-GjzwVWOt90cpWvXve^ym_B+wKvI3D&Qp{$TDb z@zPVl!Rhzh0LmO_zS}EBhYyJET!kkgpn*%28Y- z-F{+#SJ?l+ao{1zKLHSE?;51tekZQcGuN}-*&lBU{(Ri|$V~y2DAjjvJgu_j8Cbd8 zx=_JTY7gY3D_-8-MoFe3yhS&18_5RND{aKIg^Nn~3gf{0Qkms^4-hVk9DA3rx2lGiN z_GBikixXxFD?OwdJXxyf*V%l1N^5SU%mb^{MT>V?-$`>%3&E%Ri>0t&RwPOPTuQ)H~j_W^1VimoR9z*gAHoJAjLbX*XD+XvL?NImet=5 ztWPVm`jnipg{*OkujfD`i)s*w*fT24@kYfJniH*MXZjnLI2*M;EeIsE?V5_Hi`IVq z;NJv9K7GsY2un5|=w()BV+ z93Z*b?;r%IFVZb$9{y;Lv&{754)v#IiiSJ3KX7s~9WAn2|8Wy*qL$FMWYFND=?3r2 z>Z1#fkO;%uwPBnl?OfZ6%<6n??OzGO94Fp-&uwdz)36Xl1B2KJ#bNmogX z_2C0RcXj%>8T(P1VRMkQ#c%zLh%ja#VQjJMSv0r$UM^Z zSr5C8%fMj|gO*v>j`*XlR!I+g-!mEqaF=8}4$lpIGWE(9FwUIsWtuV)D0O9-}RzhkP89SdjLYOPn(p$z5dhEZg4 zW8{~17{w6XGcYxV^EijCwD#K;IQ^r`w9(QR*0f%1fAvidRb$ zFxX`}GLEp2bu{%unsB1WMnl`?Z_PrPm z`srD>deu1mSjNhYIfdqMyn14>Sx0MvMY#-@&faD`K@L%0Ns>^t4E8)~>ax^ocGk~j z@GF9!zL;ocq6i7#3dOud#$;d%?cb1avm;*i7`1>sTG z(~57^y@@0w&`(HBK}KEYwAFqnWK~tk1iPwJ!8kienFE)-@O z0SGgPTcjE3k9KB?LXE6}XG2yek_1zp<_lYHEVU259L5GJfm-`61!h<)@{5ri^=tPpWdPnvw~}hm8$X{o^3g&mx3l3 zfK4GrVu^0vG4auV;C)$fE7>pa-r0>u1hcHZz=hsEy|$Snx0#bmu^jp~nupQA#V&#M zAiZg958Rw-(Rd*86NHOgdL zLpui`IZ@%!D4KDUFVaib9KMf`w1d7#v%Ry)5K4M8)03w8#oiMHJc>nIQPD&2Hsk(O zvU%}XAtX3u92)%LvU#q9DHa}+S|nUTR!t3>H+Z}=jPPc&&%Zc$NRvz_7*rWBUN=VCC$O%|5+_+Gk z>Ij=r>RD%Dx6S-xmdKms`5on3`vo`ddEI$~3t`6jUA))eV@u=ID9=*|;^vx(b$0ru zgGu&4-5QrqJ$#1~YR6Hl$d^_0r&jc>%x5`RIe&_`!n^W$*`C#o=J3Qt>k8}HTBFte zMU{W1mq#bV;DqxmE%Vcw1XLpIlQ1#hx2^4M;nb4U`#~{fguN4TEEQ}7tOFv)RogLf zJiRbZK>vdffB^zV%E&HFt0?}#Fi8$wzT7h7;>WP|!|`Pv9ydZfMjh3{AYZ+g@ybdT z>3#bhc#I?%T`c1w$iNQ&-0yNS6UDu_7)j8rmsUP*e&X_UIPh0plzBS$kvD=p` z1CU>PaV#fyZA@ypONv-pJxRHm_!sB$s%Jql)1D+UCnTW{; zwx>7Di)Gvv_}%v}h&gUXP1?UG))Nhi?qev{Q6KJ|Z`ckxsA!Fgl&G4Z2s<&t-JFdK zP=_M#iQhPC_yq znu2v=z3wHd&bCZOItAE!Oh?`C&if|?&WU&fVn0x1bbh5o6?^Fu#auDFjTQ-Q-YEh> zxjLt2KKybO_>P$Wx#W7wq3ASzq3|pIy3BPqrA;+ds9&HUJdu?0U2VU&HESgpg;cC- zmZgL#6uGL}8e~cr-{Wnm%+vx+XC-b4kM4Zfmk(9qrVOFC>Nk7t6&k`7z*0^-Zg|4Va2VO0>98&@8t%9~v8hI9_S}Atpz{y<~DOlF+#7;mRy` zcs7b*tfpVRFP3&qQ!d3Zr+)5exP|gXU%Sv}r+nHUQb!b*oVmao;ziTbno$4o=S| z&kn#ntZ%{k1&2amM}ijfXCqI;_Hu20S~vFCWx^{)14x)J8&aRBKUHk`ocPBr%m^Ii zh#j$EcjBz~*$c~Y^OATT!l4&eoOW_ghvn&(l|uM49TLgDopS9!=_O;nq9TWDna0_a z&%O zKP$5uM^9WRW@);CENdHgJ{8^ONZR?mv4+m{%fDs=CB&I&hH}8zD+(gItksju$}@>4 za-&MoWjuO7cu|Cv^qk`sahbi3VSYG3iWwiBa*pb?${gq?%e^3QYS1@I<;@cm$D3U3 zr=67SJw5{hp|sVmT6Ka`Pn~ZjmgqymcgIT*Zm-rZh`|1zl2mT<2O_@~xr5DEH(^Q1 zuta?5?ETQv@A3nu)P-`B_ev@lU+U{doHF#w8YdyJZV)MRelyRX=)j!6S1m{6%WM<` z?JWgo`vy?c&63(xZ1{jCVrNramd9&kJNHmy?}{bVo!+r(nfrk~LM#P_sbsD>q++Jo zOAG7A!;>#97@5O<3An}xIag5k)M9o^vtj=%j0 zO1?1#zA^>5QNikmdLut5bDOxQ8KAytl);Ysp0t6$Mg>eahY)2YGC%*nt1f@9$l%v zr{6xEDbCXUIWfGamn^sQBq~Xj1+1o3CE$_>SX&BwCK0VBJk_JJNN=>3O=1bY)M;>t z4`C9v#UOX6bN|w*@)XQ|{mC2H?_k+W?7PmoV;Eox#%a{iAbW0aS#r83UwW+gC7v?O z@FOPQ{71|j3!X@IRNrk2u#^-n=>I_Yjn+H5k z9F3(rznlWuVuy=b&+3t%Y5MJEDZ96wr4S(9H&6?@5FZ=)EpAR@(0IF9UTa_KV2ow4 zdN^pftF{E0veuxxVM6HN=qf%jwmxrvi}Z{5`*Ov!5h zaX`8E;^zY+Hs@nZ?@X3B<1Zz=lXQnw(oEyz$50O;(YJGJyw7%>O{c;-G_Ru-Q=^c_XQvzKp$r5A)c>(>ICV z{q(}-&62w>4^K?fIRFVe3MItlZ=scM_b1?3nbcExbxpj&?|(wBidDKogfc_PsPY6d zHn=m4y^})OBBqb6BdgadN39v82v9BW2kR9R_|=}Q>d+5^%6Vo=9`xl> z*COff7N!u2)%fc2PmK*u$(_8XQ?>FSGk?~cRagW$TVFRyhIb!dnys?V<=!D_Db3Yp zu`pLkU_xYGj3=^M1?)H)b=+fPKPDWKCxv;OrWd(Wz1cD-5kPyV8~)OHs4XKQKyU3_ z=upEt?^jQ5H-MQ-l%TOuHd=6;FlH9>FRt;hoAxT|WfRwYdNNj8+#53cWiE;}O?tq7+Ofo?tva0d4(wu}U`sOzOh>3!i)5Y6UhE zx!EYIv1k|7uVQI~Q01&e>-lXoP^2}2(J5)rtR!#TiM>xl7<0bwHETX}bOPkV82x^* z4ApCF0X^-6^;oNg#c7K@H=Ss8D!n0{xSm}FVos4^JZ}A=rN^)QVOb1ba?U4zNL?~LIKG@JO!g-j<=UvElSD_q@bq-SUIus#AIIT?sDEyo&>hKIRF)8%Z4P<)UwbL}s99+XP?#Lk$0TtdRO=!54jw z@w76Gn17}ILw=F+hv+)o7;y|n_bT?Br~fRddP@HQO~PQA@2Or#8lWz0ZNIGg z$gDC`ZsQ{!c`a38M@0(zbd=jGr$(L7je}XMr^AP3b6H^|UepLh6N>=)aZ1_KJiTkY z-DP4#;N%JGcOg6~ZRj){3axbD*{K95sh>~X3KdeUKa8(X#&nr%p@~A`uRPs>S4*64 zu)wFX@c}&&`Q;B?(rsbCMU7rHTTEB&fTw;i?yq-0ca{+Ot(FUc*Lif1?Z~4jPP$d) z+B4fYw#`~px$B&|Yu$1lQ7Y8EPbt0VHd+t5WQ5a=#W9|XqaU;%epa%5H+}%tDE+;N z=1u@jW?h*bScU#O9)J}KJq(>=SI;l|!aXsNKkp*?hu{$DLWSjd~TbGhi z8Pp(FsW(kPB_>vMSe# zXLMz?ZYyql_+JzcOq+HmT0ytRVd3 zeBdqwO*F(BSwxqT9uk>6@vLYa*5 zP$_k~k~R*G+-k^*2hk4+>K440n_I!a>phnoc;jA6p*OKuHKJngNX^Fgg=FmELST~$ zBC*Hd7<+Sc_;?Ank~a1ws0LgG&Awc)g0R~LR^lPa1jK8(u3V)gB9r7eQh5e^>;`p7 z`8IdfY_P|5&P{|0L&)n!yV`sbIW;1OihVH1^Pa!Kmgyo0z{s;f;V8C|uyDELHX*x$ zxdFu9Cga$s-Z`SfObr&C866>VOAd7niZ?oY<?j)v{imoR*10mDGgBNVABS+YkNDFl|`@{iX5KdH9wr)UL30Bp*M|Z9Ne*{{Zw|kxnIl-cQ^dK)J2()Eq4O8 z&$mO788+e95FbpwBl0NfT7vt*pMrFed&^zw&|Q`iMH8?s?810=gEdEOP_P({jTC|^ z5FONzVTf{@uv)yC@ooS^R_%wdfXz9shz_KR;4)f0j5Xw(6et#?D*Y1Tny%w%sE>ZieRX1|R znsno%>pe3Wt!e+Ffk<#ZUp?-TB+6l-4pg%)nsk34;@QOXm8un{=A3&HRpJ1R`#Vd@i&r5dVLA9dG%Qz%M+J)VC@wQlMO`{n_8;O0FK@5!6}&OMKbEguBpI* zbkwkA3^S;F5HBP>KTO^$kh?~Zq6F!*OGCkmP%J7aAx??gK5xVt-aOo?*>4P>&AyRi z?q(FXR$_=5$qzd9#6(|~t*sfntC-uf{*YzE^qNtsHVLL)b`e+~X** z%cVRgAoZk-ozD-Fh6H!|p%MU0v7v>EGpHx%B)9{0GPg8w7352M*9q#aoHQ9bi^+de zpJ@-qBTX)i^DAPmeo#FRP(i|{WpCKp+)vw2v;f7abo$3;P&mhEqHv0Krjsf!16-@? zsA#s2U5%Pf@OJq#9)4#L+ys(9S-;UzgyWJ5fC7?54YxyfLeGo#QekIkYyh5)oYSl0 zhGCiPvL$v{6!5>MRMnz@DbCrTH)*?^m97l$tw{IlnTjc~>*c~*v~8I3Eals%+)||m zLM%}8$~y2hKb$?JpbhVsG`iPlaM^64BIDJ_Zr35eAVA4LV!?KqFASQ@S%NK#-Rh^C z2Nq01#LNh&ipR+2Hyo!J1{3>a3k-kCM#{PfDH#!T^@&kAmR$v|3SgO9sTb&-?mT<# z$-hsla!BjHds!@6Ks(}+3hScR&^l^iIx|lN*%V+@wW^Si3?q!E>RZkM2CLv*erKYd+|b9oFTH+*fKb(*Me_~y@2 z$v#`N7Q__ZM}?^-xA89g{u3;l;-jPz5%o_7$nfBoon9F<4>FoH>F?(0Uu6-gb^r** zK{2K?$*cx+) zPMb{=vj=;8W7KBFgnTy0w7D)o)>#)W92P#bUvBtT`m8_u-)M2RQi(*m_a-E^T&fE~ z$nGa=-#KG7?0=Jrsaw8mL-gMJdG&A(&C0s-&g*oDpwF4NjFrjm$l%S+!Z^M3vo~`+ zd+DCw?$eK2)wa5_q!I-V*~exPCrD2h9lJ%cN#`?$ z@$HsbnsCp+!9ziarX$-AuXSaKjA#i2Y1Dy70NA0k-3i0!+8mV$NCH6J^-8+le5CW! zT7cE!kmZ?Z$dndKvF?+tE(`|sr+CE7j{DRi_YQAA$+;0$^a!q1>@ZD7s`YBzZREj`&I>66poT&$UI$l;` zhV_B~*7*|Z@PQCRQVE=!sq;F}xW`ev=e)`U02O~Mx--|Fx)D_CKr0b}$0Lgg>-}KV?0$uB1_$biEXk55&4EO*TC%dbr zIMt?_Vnq#mqv$0m5Pf3=3#m2>tN}{d7SmHObWzE5xll*Yu}PIrDH|4q1Rv%Dn?)!y zG~d2{1T)?_p64R@rD6%r>uE2r>y}F(WJ@(WXUaENs7qF=poX3`$%VbuU@2Up1iG`l zAudWXrmpu%+2LRUCYF2v>hfxec&F=H{Q$WhLRqJoh`S!W!lSjU$H}S-(fx!VT$r>sl_D z8Ba;6LF0YEwlA+aO^bv;1?J??!rr4o>TuxR(!$qrDPyLtPC(=?2H4yke7)-njJTQb zn}mZERpo>9CTcJ+g&#dk9>WO--Gw|t#P361TG8hpMyokqSN!VqKZ=E1)5U1Dh7(rSXYh|D{zb;_11i^^j{7iUu+pheSLb6 zrkzvZDw-r!hYU1gFulEXT zEeYkXZ4OPNRF@N%tSJWBqAo=XU($u{L5e8VZZP5IHaPeBw&yyVL^*@glpHwA+b3)- z5=Bu{b6-Fj+|Lq8_O&o76XkBWY_p167S-ogj$*z`<;D&eAU3>oLZ*Ft)(?XWIWh_|0ud$^U(O5D5%m*{$=C@{frZtFiZms1*pB| zg%+{Fi8-Z^jII4b=3)%rC!ND#yFsVXEQsyMoiwK`m_}KM?fO^7UZ8&{U zWxnWdOIhso*ji?!dP`Zs6ZXDRi%h33TuBCR4E^dlPwZG*sI$KIKN1_jSi|D>ECq*H za(IFU^o1D~M54!@sQMM@t1k(hcMeJ~b$u>q45)*?nDH~dbESFA;zM{(s zx_w5PD{m>C(>FNRSZVpxJ(J)U=$&tOb~id&v=YhJCp5(!e^fEr_FsUfp&mjiWd~Wji!}840+yz() zwcy*(`eymjXnezJjvG?Iq|Aoj0*l?;6^UCA%i2<^3nFqKLYZ|_Un=S8SSKu*GgSy#bVqvbyB}3kwd&Hf4)e)qJQ#yU^KP z$L#)2S$6ZQxJkNW{>Uz8QZB_vu8yZecX6gvu)z6{t()@HA|F^g6$5%=w2=E+VzNpC z6y5tnJ1t|CFmPPYK9HDcJSEl@CF)e9b28zi{LthODzK@JMH?Dm!Cj=Jx&f8E=(b~l z=Yqqu0;bw=miX9gx=c2V;G7`1)Hv+A3VmXxEKkDO;ctJW5m(;W;M>MBKoHhzw$LuZN#&cUhiW?(fGZj^120NL&n@=3)G^;x6N#!4Nr z++up=yp1ST^6=pI_qBz&O|tl~gQ67lv^Uk(ss!8VJOZC|N9a4O!%9G2n7CeLk%)U= zi6OJPp)ICLiL95OL`WVtLMTRLKWVh7!<3{jY~Sb+;Wrd0lXxd6K-s>OA1h)KaLlh0 zMm2jbmWlTwe&79kk|Nu#mb=@4vZsv3F*SPkWW~JM~$z!~)Cn zZEyZ}^26Qpg*aIHnWH*kr7@sDjAHBk^9Ax0Ov9oyt1{T;Xb3FOGV)^C%?axxXNq~u z#2b@t+OZJyiXAC}oDFDFS!&AQDGRR{#VWT4H zJ>-8>mbX{G7-})en_&5~qxKh1IMcXM#q@EEuHua5Ih~=@gx9U!P~whL@Y)ST(Y)?rBn3@=oiDhF-@WU~syp1Xo+moX@bTdo7VGUi#uK$qxSxu?n&_h6yIVn_g5HFPF(`w#}j*Z#$Ut>-%ji&NFb5 zX3|1X;O$2Jum_3p{^jCJHYQN%<>8KrE*u3_Hv{^wc{iq1uG(+W-A=PXu|Sq&K!Ve} zE_u3SRK_kK#{w>mn8G0N^ck{VbbC+P?KM>}wMhi@Dy`G90;{sV$zzP@ANW|&xe?5%Qf=3W>EyTnK83(hdm?SMSUpz$WL zZ0@=MWTd0<0emD00TFr&Zn3fl@MR-iP#^-)T7M7#YSZ3+kc7$6#eo2@#2^Grt_9ax z6CvX9&>+%W4qm6>Y+X>0THfUPnCYX$U>(-^0N!eZP(a5XAq0d2=`Ss^&#GP?LS2<$ z4K(pG$P=cdEOuUq={Ccnw#Tl~)vg@3QoNCM7g^i|v#=9Rml4`cWF_Zj@QvV`xgg08 zAJ*P@F4ZM?ld86Fft+aTsT@f?m=)hB4d!5)j=#ld@;RHZIyZT;t-}F5} zG(~ub@hVZST1A7mrHA#A%^Z=2Y#?I+1T zpy+GETz-N=iF;qV^ZLbFiFDoXtO9aUuZ!X_y)*xA=eo<$=Ro%P zj%fo}frUa~J7GPY4<@LEYK3HuukQ9|TUrG{gVx*t&oFfnYj^yD_+56&Sh^u>HP#O+ zG9^)-k>dh-tCUiVW88WW!Uk#1Nd|bP$|^OfUfF$i1)VA%RruMqOb))dWm4RAjNMor zWoPvq(2KrTwz&`c?qBdtv|Y$Dk|(?XCFJ=zJghvm4z#-L6-%AnEol$dhl>J$f`ILx zyY^i4*!e^~qI;UrTq9>egBPwp%2{5lY$f?vH6g8LrI*#?Ue7&v&F{4qd1zE>(VEos zOIdQ0EorYrPCT;7I}DJEbt#1{UyPHBsR@whsnR>RxxSq~JEC!|?d5>@<4V>K-?EE~ zAP98D4r0kx3wc%%pKb;>xYQXn!*GD2ISR6j_~B(BxN4*ZYZi`Egfzb^Pls-a3%oPd zj|jqiyJKHJW2y?bdUS#98;C}~c7C{*7p%&{WCbed)vUD4Y@(0DbkwM4jWywH&XVNY z;kT_gNURjr{fc<}JaJ9k%MD1Cj6*kVo)9?Q@6kZSLmxZJJ(O+FZ$DNwiUcZ>mfWml z#uhX2D#98gY!alGT1A`>CRYKow{2xP=u8P^9&D^;eZ>Wo`lA}mCF;ACLlHHG<*9}2 zO03%e?5$Q+VmSgZIKS-ZtfeDbmpFKy!bjJv($o9}t9E3FOIQ*v9QDJ~8ep#E*kfD; zo|=Bi?yAf(wrB4AFQ;PsL^I&);eOep(R=9fOOn0R%+=@P}HR#Ka=%S4H|tr#bzo;EMpi7# zw!)!8={+ISQ}j9bm@ttHQ0Pa3Jp$>sZ9~CsM-m9KIMS(TutqnCT-@q6hghc2XBY#9 zi>8J(vN4_@=*$6|Bd|``CC~mjFBc8C@+-ynX;|T+{N5Y&Z{H`*$GJtd^dego3Wl!RdvB;grV;eN4FL% z_a&ZnlkG`Ekm4?hzk9WC0DDXCM#RWU>>?fQw9m;&Z(TdGwO67QV1&YHU2jHOBDPeK zIN~EXGUu+1CX-%r=n8AJ&A4^h*75eu%(mPFl=f}61Yl&JNWW)oZSJvL4$X;-6)RZ zoNYw>(EZAmZ~!{hhN*+e_??cnf~-DXxHup64K#@6R{JK#OI_f+Bk#FD-JQx?1fO>7 z!;B2Wnh7@v82wzAw#D=A2CrbTRfQFkm+0D$7m5&S6Lux@*22kruT1Q-a!3&b4ELO*eITHYU*9z-vMQ6Z`8FmI(*N^pdFj z~1qUKbAzaz=*ps(>nt zD%1U9eCpLqeN7m$)8SrjfGgaEAG5A`*9imNzLuZDyVyp|f2!Gf3E-8HhS_c~o@Xsl~!#6ZxSSJBEolSjkWQ4Ftob1ioz>ObS6e>as zqPsCCxkpa6&{as1#+c4_Mg{Vtz0W6m8Ujz-*yK+?G*SHKs|C(g{<5C`Z0bsIzj4YC z;W>eof2EvDW_8mA%uj5APYSF-{g}rKyVqznu!=if<^!cDI*{>LGo;h+dJ_^o8`} z=pqhigleqRY~ALe>-l4`MaofpG%S;~hL5qVgz{@+Wt3J}wXQqJ(;eWGpTlb95F5E)H zyDuU_!t8SJ=PLpxly*1ei0aomKH@3mAr}2=Jxij7 zsPS1WZMIOxN4o-9^CqYF$`ZmW>hghP%qY`OU$PrtI~W?x=*L3nz?XL?>`l@@mgxNJ z_nL<;W9It{)XzGm;feqkPOaZmwT4?4`{&1-DM1us$EB}#4%=qd3>-~04IPcs0T2Z!Y_rJ^u z5ADLUs$cF}rM^?&?XkYY9%lywffZF0B<&zu`>Sgh7S5z}lL*`@@YN-C=HEh+IA?tA?aNs&FVV=_ZlbCZ~i{HB(T`bn0@c)GK0Z z)y%OtI{J$aNi;#V-VVkgd{o(~<2E72o6D8H;>Y+vo6&56V&!SvAaVEG%|`vL*KOkT zI}c&v6}J&6kqC}LHAUm9J<9r$3~D_DBFeiv2}#rQd;|sr$qD6$ zcs1}cJCA+kh17&7{Vyl{Gt_D-_4FcdH-1PD8xbUW2n7@?8lx;3!xmJAQzLR1O%k%& zBJC4{H1ar5?=!(JBr!x0h;cduta0 zf%Vp}8Qiy1NFAu8?DLz=ak{tGa%07o2OFYMo{3>hP0Wa+nL3*}1gy=w1cn|i={R>D zOqQ;vJ+Hp5H@xrMgU{5%XjO@0R#uA=us9s5%X<3${i-h!bpjm6O}Szvf(~*f|E1N; z155e6&Bzw>eP2bTek({tX_ZG($Ne%k5eC}z(IEr}VsW5d)R`E2`;&f2lZmpb zGK}_NjoF}>M@-ADTm7CfD7j1SjkWpr4*is&h7zy&V*5^oM+zNQ9yao0s!|a?NKny+)@UF zW#ZmMihJc`Np-P~IfLy*=LIbb>FB1ep?lZGn@v0vdh59{Wy-AE=Xydt?KsfHrg+vL z^Ox$5LZ%$<+ISpChc#nSJ?(sN&22W6v!v;52`kYm8vA%ldJUrZu3nubgu6*SbOTl{ z$+jeRrGh*)l!?k1Gd4<}P;T-wvmc?}K%1@k@6~dwWE3?9C~F2f^CP~yt;XLT8^;e1 zjEQ+)J$S+8A;^5q(l)!CaArw^Q-v6o3(`-Twpy*f`HuArOlXOU(^NQznFtr=XwWs^ zQmx(Ixe;QUohr~J1YAx1SbZ|&#KuAh?LB|H9Q5|Q@2sX}G1CYM)0WhkYYd708a7z) z$rK|WVAfY@IK4avu70T($P_nxLS^#K`!$EwGp-yB-n*qc-zy zt20}Gx{Txmna~5f^5Wx6GW2%5y*`((EK2Bv+W0_9YE9wmps=GnGs~YK38ua9fIv8h zuSRUpvN5MWSIn6t3WrE#@D1-RyBKgRHJ>K7F%QgSTCynn=F2zHa5y;Jnt^3j?A_Xv z09aBkah~4`TaTMBe^-~&Q}jlhnTZi6ovD)6_E@p`VGCR0l$vN@RUZ5jQ~)jy!ayAC zC9qQ89?UtB$;Ut*Zs?6imQu(^WR(mK-R$r7tQ@V*92@0RZ(m>CFxF4zqr|IrH@h0( zpgtpk-bZJ*;s4rDPtM^o1m~{MT2Ak%#0&r!83w>raqI)Smd)nXWFZ=i|Lwy#*|q&m z84MwQ@Mk53*E9JY9jNhjAC7MEZ%*R-v5p}()AtCtfAf)0X^^|=ZktM?F+VyY;y%89)c1Lf# zjlu~IgOp+{{U!OP!r5|m|HuI-ZE6REuUoc9?lW~-30ikE@Wf({v&>BtQPY=TiNE{)^v!tVHUNqFN1g}w+;nCqD+8VC*ICM2H zN9E0VLV>OqX0VC-Ikyot_ErZxRC@Y8~50vYeeEUr$|Rj!&Bu90BF`oH%EU&)6$ z-e9X?FNkFWf$?dj20QO2!16VX0y2)~e5weiflVT|l6t!>)|br*0AzXY#tjl_ivs4Q z%QtnJAW9_{7$I^Ej;|4PM4l$b=fw(X$U(xtQp$8u966JJfGLUo6u$!elt(%U?+TIZ zCWd+FHmX$>+Vtjs6ATBVsD&~Jd<{b)lNc?phcpW#y{!zHW;a-G%}A+>D!>UqNSxm$ah72M)gndhZF{w+Yf^ zMUhYdv0m18wAa7PhX%Zg5GYyL=hcr#2yT%zp4UOuF`KH!NGoeZ){@;0M4T4-NfIlf zEZ>e3Ab0CZSH}(nr#b_2Tyx!1YVE@g{dao{uSn+TSQy6298^BOBBkIJ)2k#9gMcEm zv=eD#inE_YGB@Uu~1C%b16^f+3!EA zy5AeER$sbJgoSrwDC;|p6DU#@`0+4{P~~ZU!owdQAOFN9R@bfvwD0jHTJ#2T_7;uW z@K$Vzt+?##*;BLmGCm_In9Ta{j`KjYLLN-i3nAOo&>45P0#N#q7nYl9nW>86mSdu= z{FHkac+V1?R?T&Eg&lmwWgRU40S%LLp*2kFx)cpCtkYq+I0Chswr`)A+8Vc>S5M&- z--$?b<hKs3JLm!nPKb!V)c44_*dG_HvlzWpAE+X`2Q%5S)x#@Mrvu!1B{ z@R8p-n+~T|65vh1;XRl?5D>Z7CLk5ZKg|t5PcK^FcI3xtX6AzdGuQELeqBGpE6Hs^ zLRDYwlyR=z@b(~`LF4us(FT!Jw+HI{N{TSEz-Gj@G+)0X*`P8m+nbo98AuNISl=5z z0n*QGrAr>bzK4zGuFKvxWS67^x=V8ayOja zITFolqvsqx;2;nLNwA`S3R3t6cUhkd_O{Paiirnk&?nq3T5{oW_%B86CZ;px$Mwc# zB#+{ZFonVn-EHlcP{;}WsL>Ro?$IRSn?WKXt+cPCiqXz4%m$Ec$+EUG%QiHD(wnLc zt&b@<1oBn!oo{{GSKE(Y11O7=0>P3kt}T&o_Uof$GcPEN#VKkICc(b5jowucQBei? z!>f(=FtrQLoqi@<$@jBm#hz5VwpyUbDd~w44O`f41P=>L2FW;n0HdR_BC{CyPB3yB zmLqz3(HEtD&KmBq$6&P#Wk|+ZHorE^g6mqV2dV=bPEUeB!r*_kDNAGJTB;BjK+k=w zhBP(IYp0*x%wOmi%E7|_`fv?Hm^9CHn3Nqjh5zVoJ(HnVskr$J4K~R?U^e=4W=3Z9 z@&!eahHd^>LORy`>M5Fh+6y4YXB4AZP1HZy+9&~wv^O#Fo&)5u6W*r2SR0mvY-&H{ zfgzMZ3svs1feK&HliwznU;9u6EIswWwS5_E&#~$dTjkWU*OM(632E4aTV(;3<(QlR zu9i41({$&sZRgX5!N;v;St$AeZ~ry&FeesLDZuIGszOY6aW91<1&y)ev=wP(vjmwZ zW#YJ4o!6}CnD&PYJXmMbNVFYMXaQjH%u=vtAZVA^EQ~q_C#KsITQYlD9Z33p43BZT zIwhZHVqA9y=ERZ;SI^h>xAW8B?pfh$;{W9yFw^~Y3FB{Ffxq2v@~^o9f87Q7-?##Q_OcO%|IHLoi{y^z#)Ix{ zAHsCu7fjAV9I`GDML&(8DYK;nCFN^I+m6gyYxA6h#jf<+%Xl%pS@pO+Mx2fS;gIgy zUx1+2LKbVB+(r*T8C$AtHLk4*l19fd7t3ziqvtUxT%)29F`rL6IDH3h4YWR5{CC@ z_FJn&Et1}%8xP*^Z3uG;ziTj(YCx<+7-v$!j?j+SvOaPXIPD_RLxV@sA3D|3VQr;F z_H0F{pNrQ`tnxE4bb8u27D|;bWmtOEQnypetYVlw0|G_&i<=~q9Vdt(%uP)1r$Ou# zjYk)s$B#mrPCdI_uZ{!UFCVwUNH%@J%%Ft$X>3_8ro=(ws>~!-SmYPWUc7TYVTfrv z67A<3Wl0sF^qdN+1Rb3x7e@qG|LEbTv_#)tSfwFYP)@5)roiExvcaeC;hVXFTfH+< z_z5h3c2no?gN5D#7$5qtuxw5eLpA5g=VC5?X;L^8?MRKZ27744e{1tBM`zFk{P9b$ zuzZKZVwhIW#wPCux91a6?fqEJ1lqTOOf&zz^;>O?h%_k9nOMG)CT8o7jqj1iR@=b`CN6J1PYG z24t8u+WiSC zzugh=$HDTCzS=Lr@^@6el4Jg7qC)%CX#Mx7F#fU{;a86PKSU*>i|#M6(gc{d?2lyE zg!mPlO#tp-HnLHk=up6%{rrqJ2ojjTl;|(9GUMj(PAtmaC*)e^$G*FjfyB#(*P$z< zsSQSMGlryySq#hEy8^QP{GPMY2~=YuIP=ewYv9unY#)CRvMAoV#CSXs_i;${K)@`b zg!`-R8*UE%cP9ug;Qa(is01yYrx)MlFEd_DL#l!~)is0&+ba~5FepwXpPoiSdP4^dKiNPPB(M43p0K$omZ}uw>wq^0YCmOqa=M|AgbcX%N z>(dq&hr?Fxn|D+SeoMC#+vq2l@2I%U7#h5z!pRvgVDm%g9Tg2_)o9Mjr~I|g(*f_O zAlh>uN0_~$&!U|%A(Arfv$J8?WQ7DNG9t?L!7|^B%N4CWf(nT-#rK>Gj7v=MX8gbo zTnVX1Xt-dO$6b2om3D=7hJl(^HNol{b~9#qPRIN`KJ7ri;yt*@MkC&zpz>$Om;aDg zyln@3&5e)`{Oe`P{+61pF!uGUmt_VlJsN8*OH3#`Eu+FVpgI4J+3J9c z{RDDD-FX{J=V;Goqtx@Bgq-`#;r7hFX)rGi3KT-$-F=#vzJSkUo|Zz{BXTfx|3$Hg zy(<=k6sV^y{I_edP}bx?M2uQ$ESRS8g&rwJ1i8L?X1gUhqlR0k@LfZyppB@^lzcy~ zv~Tb&sAk0F-6q9cI75F|EP?;KV&Pt4Gm`xYEWeeMe;h2oQ!M{?VdPY%jts4pv8$`@^^)39U`(h-4<09j$8zCM`t zo^Lgzw8B=Gy}6J+O>#5@rSaihS`sAUD49}_?e#HmrXliIDs+AYoVzg+{YkX^d42mk z$?|Koyr-c9efWPa4fV6W!*W|ZG4>9Nqa#(&ZhHSaEN#_|L{6G_674R9a8^X=q{r^_ z4S$q|4jONu!sCBALqYFn$cHO~acph0l4Pz^xTZ7O;Y8C%}X|CvdIlvo``#+-kEl2)2)eqPIhpK<))=2v&u>1@9 z_Lmaz7uEk>B09^0$L>hRG1MwDD;_97O8QR!QX;;+0_0({|4sG(t4c26pHy zF^sl;j|$Vjpl^Sp`X|YO|1(uTQ=-wqPf+>u`t}cx&_|$ucXs|arJ-#mBaNTH@~;To zzxfF<_+6)w5Kcx&gZz`l=6U~+hDQIMhW@{l{nO4rlX6V|yuhXZO^KM=gYxe#5mWw2 zVyXW_Vxd4bn)^wt{0jp2H732x*C`HYMG7UEFlH)g-8(Yx7?^WH^lGp>z!?;@U_*<=@O;lFngGv3Yq(_hSf%;|)` znEj}?0XnY-31}8f2&7EEFpG6oO;CnDL1Czw$zgrbr1=Rd*61%?M8fwjB9t3@-7j54 zQPqI=E+SjJ3kl9QuVK6&xAq8wV~4dfau@4K=}>oq@x<{b!=(FYD0% z$&T4?-6ww1f4BSe?3-3*aM5x&chGdTd7$a>6)k0 zovYE7D?x+Qx=xj94iw>RvNaWJhnHP@kMx$CTl4nC@~2+AH$&z*TYL8^WrssacD>kv z-HPEpY7Zy-$?wZS^{oRqsx5#)9-67r`pKY{1n9?EpMfKzrZ342k-LgSOhVmiQTL)x zcmq)Q$d7o9b5V1kQ_*Nc+-w=Fi3ow@v-O44>$cFbk5P9d5|p)-5%yOGOSzK{mmziU zYOhG8GsV6nXkP9eu7OsnMw&gFJBs@)NEIQP4Bx(O>)K)?fg#k4aDi!OssJtb6ol*a z0LXk}5;Y+Z#4USkKu(`s#%*S;D7t_vfA#~V$LqYZ z9U|o+XAbDQCqe#FLK<4PXByB zgGq*#3=qJWA%(B+g~;3t2&y9kd#T*GGiOwbZ~7E{)d(8i-FYv5EYYHe>u3~0oQuO3 zT+P|5L!U7WB4wgkIvR(NKHDK3I(4fFit2Am&QFn2!VYbZ?0Vo5)<|qcOKv{g7Oa86 z9&5!MW_p)oJdFae#4t4vvx0zNrl@Qj&STfVVFVG;>uN5pQczlN<$@E~B)ntff;>#17BZwl)_p;F3?>tsQ;R}!O)@BeG09;#MnWR4qEt1c z0iVn9{4#&Htk0NyBN=^dY_1n9eN)+Tp8MS=11G-`?#DI&SN^!~4_(k2E`<}aXL;aZ zgH#-oP+nyEWNex~tGiU&V}ANClSBENuy&6c_pl|<85C?glyn%W-S99$%^N^o^=7Du z96UXaa~K~|JF-{3;PO+o1-y&V$aU;+@d2UoeY&?jbZyBpIG8(4Lr>cNLKE;6oMgd0oMbr|m_&-);6>tSKq4}z$ZEQD znFzusaA0?Ki?B!mT>{R|3|n?Zg6aZ^mX&06fv10FhTZ0q6N4$a#ORFhVn{$t)f??!|E(g|yh7XQpHj zLi{|&kklkDS2;9y%R_aq?iUQfy@g#zn}sIi45smjgZzSZgIv1b!F&ALHb3 zwl9-fV7f3poebh*{s19>-Assx7O}B-gzCwIkiL;wZ2F(E_7DR-bFprLqP-so2-d?Z zK(PQ)RB9S$sbDpE;SJCN5RE^0;e%KT5_luSq-E^>LzYvOZKK8@uVczc1>C@(N(`(hYvM>9zec6AdfTqg02kJI9%=dj?+-1Fa{xo_an&P~nNie@Xz3 zWCjTO=+7`i{5>acKoee}1vF8(D01^FJVE3DJTZp1G;J~;iBbqabR?l@Z-Q3^HO2@K zft;|nra-86?T-a5JU%{uQ&vEcOqmW`Z60`Nyc_1rx?qgl5B)?F85^hGA3*3<(m2zW zu~49GaDpx!9g(DYJYj1z0r=rTO4(F1pV`AQ-~?B3{mDLr^n%8}U-B|suWjQQ=SMuE z1xRT)13%91>3(<@@=;Rh1FWWFAZlG!_p-l%MKQ_|=eeDTL#lw^2Razf`N&A;j<=dy zAiTu9wl`kmH6-Q-Ze~g9XVTA%*gGEw*OA0eIK2WWLHl}$4k6>u0Wcrq8*}i0;so9t z?i-XebStG~al!-Ms3Ydqv8ep1NWbFo`q?KcF)(1r?FN3qCxDAZ@`pXN9ysv{g6H$r z4g{y+pLv7j+55&o^(l6{J={eW^J6$Z1A1kpwo+9o7r7iB;#VO!%d9Mh4Ugkt9$`u!?R2W zQY&D`+eB$%5kchLq>bcuv^YTW3<-si2y$1y!E;TuPR;UVFjLmUPQT;04P&=*m_@NAUsn+QzI=WV2 z@W}jR`jcN>z*zdI(2u^xJLXT!Pew#SFNMbLjjeZE)g2DtP&hq3*meze`kybzQ@+-B zoPN!#m&or^S#6vh#(lp?T8DifrRm7U7Kf@RM}yRyx>==vnQD z6hbPsCx ze!V_1(;wEb)@Fnijb-0xZ4+wXSJwH?sQGU7wbgEllYU#t&#fyI@*S&rLJSzQr74}a zm&w5e#jDdq&XT84C2>i2N~%01`3w<!s?_NBz5$(+IO*xF4j6>%I-A7BBKYdks~X+g3g7kK1*o9 zB@4uQWi8k6N|#keqIjxtY`U9S8v!dS{%Rso&MiP}ST} zq8uzX#<+ZRB;FEc#VQih`sE)#8hJ|&;Qh7=> zN#&pec!jZ*fmYf@L|*m6H&$jWYlpr2)Mn}1tQ#J8{|Tif@s-ttmmLm#|1KM7=d%~v ze9V~dWnyfYOGd;GBW2Cejt6>q9wk->*_GN28jtm#(Eb-==M)`Sm~HFWb}F`=if!Ar zZQHIG72CFL+qUiGrq4a+jy|{hp&$SEJ;t7Mt#8g{iS>0*M&tQJnrCKE)k3*gIOd|2 zO3E;7HnsLv(>|=dR%{lWq)fGv$#J{38WHRY%|wYn)qBZo>8LMh2z}Z4T9UOGt90L7 zXhrWf;+bWxv%X=f8)u7QinitH)BY84QQ=pBqRn!(V5j56GhRQg=uy;Es#hni@9-JD zU*!L+Lo>#*A#}cGOz~COAuEjSIZVEXYre+r5$G1M>9SbhgXZh-ByL(r!mD3@nkrxQ z(PVk-G<~Rbbybp-j_uZUtx519wUy}{=UUa0Ym4RPTK$Oq7qrY}_mL?JkLi6< zP6wW0WcNsqm|v-X3WM9of4lS(=t(h|aUv;tP__YoZKOM|;MnRn$~+=QL1 zPYzz&Io2dm(nMaP^>?g=XkR-6-HUjJo1Xq>x9oMU!E%|2o9UejXS!GLNMx#LjVhKR z7eDCnI8$JtepZ}Dk>ermCnies-*$DWO^pt%3bFY==e}h2X42)CkbF(oXnfn10t>$Z z5r1pR{_DJocBX14#)IE?s=OiaxG^ZIA;!^FtM_`k)EGBnrx`+G5jBR9@w zWf!gBVAJ)AiBpDoSgI#+`Xy)A1WS7htf)k8yikw>cIgQ@Wr|jxyZ~FCnAk|TQ|!X% zA#d9!NB8&nv&W}Tb?4J&*7N-r*Yv$l)^U9qtKNmJWKJi*qt&&2m}5AVS@&M$b^t#9}E+;=;1G$_$mUuyJ7%<1Bha7$Xtl@45WTS z*m^)$uTllGyL68R-NiLWH7x)TK1(F#C#u%Erq#UN73cnVifi`4^uRz5+_i4hf_DJl z=bQHf0WGi3zG{uW#KNDkoV4sO*-w~)sI{xmiZf)KK+KwuY5P<`)_{_Z0N=KKr4M|m zI8Wd6-=offXlDzo#}2?>G%@pi*ipQiO9C8NCUNXt7qegxruqzK{zzAu?1u#h`^K!AEy0W7^~Uh0%>egv%^0FdwOumk#E z`8Ai=U@4G}UVzwH05h*%9vDDW>bBUp@L~LX4(Pcv!5N@>GuTyf4j?<>L4W)XvWSjS zLiIrOgs?H-f5{)?WHaocPpq6^ff-15`GJhNA`p84h7WD?VdK7o7R00X;GG$Vy?BM^ z1Ro}ay;{`kiIWkKqZr_Ow6(z01AnQ)DS49K#l# zzlmKDmJcwN7^t%Uay*`nC>eGvZYXqAVi_yeWA$4KJ=DA|fk7CbYQOd!IayyiU`>8y ztI_WVHrlUJF>VZ~g#ynB;V4o`uB})-OUJop*dSfUab38R*u#IH)C?wS3w|`Uw1nPS zvujVYIp5NryUoRg_rN3R%x-#qi}S4I<>J`3nTOk`kqq}36}h{F06nssB_%v7bTHLio5kU~Ms^*U zK5;dPA@9wBK#?~oV?tyxtv(Y{5Q(Pg>5lh1vdR*r_)WP?gq9JPS|Tx}YMfHz{Zz}s zNgQ1?_1QC8!&E2Rno`&E8w$2+dA|7bthu56&ms7oE8y&@oz2&lB!~F(nzP#NmpiKc zZj5n8Tz3uyQ1F^1s;na1W*f2KsK@(`ziIMf>%vqH1Z+pbJk-@6@ar=J9N+ZYYIF=~ zjO~DORlU0u3uO~6%ZjXVhtx;fj{W3~j$ZMjnuv$w$PC(M?i5@ru**#=t)j^J&Ajsr z(_$oEC}f|^F0k*S2UEP)h(WN+NS0}74@p)0iB9CDP%=$c`Ii*qY^pof_Ad>MpoYqL zAsN@C`%YaVvvuFD80QPCrB|uaGnchw!nDU@!8MKKFNg5f&HK0vYmMs%4b=H2`_kvAcIoEZjq_=aanXBI=iDt=GMd0SYjTOQ3e6S9sKw*4-7YMp5#(FItIRG0td_0*L>GV%hicmo zNBaA{%H#2k0t$(`g(xK}!sVQZ94^?lxa#H72^=>{XET7h>taiLsgpr-k-xmF?k6-AhqK@AYx#Rin`mP%KCgU( zj;e7gH#X!DR?)I?;;cf)Hf-xU)N&a)S_@p(51)t7zf87u+&kTO^lsrMmnTwz9r@EV z4_`<4kzRNALZr^jy)&Kuz^d}N-e6PZ^NAMX3-JmC=DnY`E`|@LAI(~)is$*-QJ0%p zf84?JH(0`zgdozqn#e}}G(#L}M*AF&P)ADTKGCr&8ai&v%t^*yRFRpgJkTp|WRsAq z9hZ#L=3|Inwrd~_Bw?{%?fugI@VE)v`>B9&7oGs)a5zx&$ZS~JLnE~3D1ifDN^MM5Ro;-vCn{ZQ-YrU`Q7HC?|O?-z7DDxWC5~@Js0CI zFDptu{f4!|Iz+gK7Ie~WtI`gFdaE>p3u|i@U1?wda9jXHZW&9D>BteoBE-_h6CNU2 zq&F)vvLAobBUvb+FN__UXCgh=F424S4Jz)~ogy!ng>T5O#tox~_h}Nk+`@Y$nWD7I z%Kn8fMAne2=0m=|Bwnf?n4AS*peH0chK5_pXtw>CAE@H7nIs*IiCTu3?(+PeTI_t> zVPo4;R&JcnW#w2qrYT?Q++y40eRKIsGSf9Lr~oBszv~#CZP2V|*A)0Ok69X49s8AlukZ^S-RYsZ zjEu$v<*Q4GV|qlmU1_9c?Y`!u-qmBYEUddL5n+3PHWj&*BTevg=DST9Y^PQNOJY*R zM;&*|q5P=ZyAwin*-n^5+UbpTul;Y6*Kzj11ll<#PH)iXvy~3h%oLrIWyHiBzJa7H zZG98CwPu^E5}VHUi;`P;?g-}V4VTv#fCG=Q@@|D?kat)9#2EDVYgc5@WMTVm@0s~g z>#*{luj(nHZAEVqW-cMAFfbnXg=(cfD`AeQW;Y*S+$7cfC=-8UCJ7#wvTOHIb&J{q zXRep8I;WMt(`(V)_O|w`-E}_Y8T1?O#q(`}nE2`${d>z0vxR}CxhDf$>39RZ%*AP7 zS}IP5h_QY;jy6dsNr^I0nz1lHI#EI>|E_jVd%`g#;%waJG#QHMG5}jw+MV?TR@G%j zRow83IZA0*Ha7H~mwJP%Kw`uk>Jj=#7j1sS@cx$3SC%ovFow)vX^acoZE619F1lZ# z)H4&zckulHJ7v-v<~(S6FL7IbD<~^?=r{lA!ltc|@AILX8!;^BWzBrJ5c#51>yk#) zW68bFM3k#qZresSbv+E8B?tBxqX8&++(F;TRNI`V^I&pZ7>(mJSXuK;nIl>3)?(Xz zAI7_lg)yf9)mNr!X1!U!MlsLL$5MuK3~Rg+MFo7Ys_gYH?HoqNB+U9luEF`a5GDvv zA*S=b7;O%w_x-@`dObarX8vi*d>%vjHoCete)4j1x}D~Uq>R(qC^yw1=N!T_W30g@ zVaRY1bg@>gqwz4RZMIgX`*P3mphD+x1TGC8U>Vz~g{z)QSwetfVt70$gOBdRi-nJa zui?+lqcs73DtgX-2y0Y-bcBzesh(8lJ{IWXDr9s1ml1X5G+ZAzEUkkQJ zM)>F1R{G`gyD*bz$C|8#T7}@X)%4VqePlNqN9pRyX2=QJCe{u%3U9kq5xJ(ZR-!_}O$#W9Nvl~!kh)o#!V zF=naxj9GiBH}}yki@@8(q=Y9p>xbXPUu?B+iExV6k?cs*YT7;BEcwj%Db%9&?k&Z* zoU}_Zx~v7{caDzo(l+hYn^XzU4r6apKxNfb%{?B*`LRW3Ga}NulVug}#l}h4^*a#$ zoo^~FbEkN!qof-ihqbKM!{2~m5^|;g#t{97 zY|uXp5k2ewSfGCxB3AbQdHrug^kcF9xrrcC36d?*@_kx?jDsM&?s1J{pkgA5pHXCt zr%7%k)!h_kKa)YSi8>r5m?(lA2@;ipCONLoaqdrJJkgj+SivM~gZJaN?zXe{_V48m zrswM~?&tgV?Dyx6UzEh682)2-V3X4;n8MxIQ)Lc zLVZ62@uE5&c+P`J1HN}d=qv#W=IV+4)q|};MhQa@D+1c;?NPWtqK3oKBF&W(gULDV z->-#52rEQtC`9=y87PPkpG$IL3@$`c_wmUeHjJvPVL7zphr;jJ8)h4N3|(jm#^$N6yS=Ea&FODrU;J$6J;UKq>Z) zv>xY`1S7pH_a0Q1G)O!6KG&^Ok*<6v3HP9ByKPe4YNSH5b4+8c&IJYv!u+!XB^JKn`&`~~6wRBF-2T>U z)MZ(G!Q-Y?d&&NYqCaLz8&{kmW&Lz*fX{LNZib=dD1snH!r+FIZLI6#pGiPDqR)sQH?YT; zyoCb*6B}%3@3ukbPS)%H!jGQeT30YRKU02oH~SnjQ|1Wk8-<3i#ABV*38^? zrwz$ZfWV^V+bDgcrAPRDZ3g|}IBd<)Z%Q`We;n*evH>!#ZQF}T>_9U1e=q2qijx>2Gl_Uz$S@*FmqxB{(gl3M=Dj^KA)1@zvbc3JWp=XI?!rRZhcU&8VG!ALU3 z=f$=p8)5Vopg-nO_nt`)5;Uo2E*m1W*OaR5H4@6&e!_-yPQuk&+Jg zDx$M)kOcdL`Fl^0xxFTdnUP8N!{z3*f1D??kCD;b>{s!snH+q+&lz$Ttjab1EC%^} z!?N4Yd)sRejG{>k$=r^G|Nd*;UT>R7uC5A2S$z2w{Z*2_?nB<4oSd4GmR*~71~}Rx zq(sGA)2Ym{2jv{G;)`DlTcB4Bl;bVPK)w7Wt5HDFUlaa^K$%H%4h~n(s;DlA)MoEKcI4R^&hcnx|SP zqHLloWZ&iyu)1*Ankj#6=>^Cn+j!|Rlw=SZ`2piAH3GHtcJn%rD%2p1Y~sXWQsS#dQZwj#vl18`h?@5bE-QyNKEWWE z>=~|t_s(1{yH>CyGmxrP&YkRVyfSi_czGClTI z&W&q7I%CO=K(G^kND$Se#&wWFX)G9oh^#;BBP$KoIde|4$*AgPb%%7##x%7TTBFo( z%d;{Pu3$>j4c<~>v_sW|hh=&T8)3au{H>tQ<*0AtVdJe~3j8x`?xV2{>$-3f`6>ex zC+x&|(8f8uQbSj6!h7D%tnu8XlJD6tBs4v!1x$87UvR1l-i*t1T=cjzl0t<{YT4v)`L&&nrCMkmih5PE+-bc}8?i5FB zE)y#F4PU$v;J%B(biRW{7|jqEz@*0?D+ zbwU-6SCtA=-P5!>p<<|~k&j}y1*=l7!fq`2bDtD4f9jldbF%rxOrfcYGmqdb0OwhZ z)(79M`=d|+I{D&{&@*e?Hvz1(-)j27vWt4r zbuI&s5-(*P_wY!!vToKI`jSav|8+igqeOk{+>O?|jz3AB6C*+PU2XiEQGi3Tuk0!- zd*ek`QP8X!V6kY9d zPLa_WZKp~Qs^R&PIi7mEs$OBR!jaXw)*j1CqAaCE3o9lry#+;_(k?g+9W5>=YY{wY zU>w+@Qr0&Fm;+BQR#48TpT&AGOG}pA=${vVF;tub7tebHST6B3FJzI@CY?yet0U0e z-8;8FxCT9$o%VN_S=&6U)<>w@u}eKm9YOnU;dXDbI6nO&h%SkUzri#~s<*z@+MWs1 ziu2Ac!UqdB+>XxKbFo90PCf2xBQv2bGYxI-n?XhAqP+kv>g$rU)Z0hlL%lEQ%8jT* zYZZZHY+tNC301`QE@EzIqzcn-?T&kY%IuqEaH>&ks;7>F#c*gDR9MGGAk!MQon;w$ z1#2f-2)+CNrjYLXXWXN*0h`_<^zCg}-=S=h5aUf#q&#cK-7*)^7r8>*$iFF3cF z4}}ER9h|ulW;l(+_H--DmGOvIq|Zs1S4PDV&oDHLjxKP|YPyYinloH{-BfJRELB;q zx?W-Z#PeO>e7s~GSk{O1U^l+4q_@^Nlzfvw7;EBuV2I1C6?%`(eIhEl#b|3q?Va^H z-SX$JB5#9FUKShfU!#1}x7Q+BY5T%4$JUai9A$ioQ;sUp(IH`Qaoo7JOuM zED{-XQ(o*roq4Gw@$_>7U>kMuwk)w*Mw({@E@uva>P#t6lm;ixP9ZTupZ#%59Z_ivOiv@r4a58q{e}!UQDoblyqa z>Q{1S<(}E^T+clB6Yo>6ncLrgw_mdzMf4*Je=CGe*aiN?OW}}&^qB$5urjU!hXK`4 zAP7-Vtn}bu-@*w|F7VN?OlE$geRxoZjqC!y3BhHX!2bbs457;b^W$W$0fDXH@Eg=y zKy!mx!VVS=RrA9kK==-ZFhKSu@awvI~YKEdY;A>Y326oP|(0{=@-4g&^4@0gw44R?bjZZcyI*nM&&L76`bo2m$c?Ob^%ev6smQlN~UK1fYyiF40rIzX;&`m$D?} zup7>cOao~b&x4^`0b`y51^}dcd9ZsXcn!^?nv1o9vgev&5)(TW1aNE$gDYgf*YErO zg58ksV&l0xY7I{u^=lgQ81KXOK8ocbclM|~r@|0aE&X=Xi4vRzILZl->uS0WHe83E zW`}qF`(RD*o+HT_6K2{s(vW*Q6t9xy2*I3QJXEIe(+YNiCFql&j%0$Mb$a^HbtPFQ zM2$HWPMvY(X2TcUkGLdHX%WK0_r>}JeNQ}e#R{M}Mq#+@J5`P_V?dYN&+j!0EHB|3 z!Gw>DVT3*tO7FG|;Lji-7(V^S?a4GiS$F z!DTGnpM~UjWSQFBa4FOvZ$E*xmO`*gKKQXVQC6naLOJ8rHrdk8kKgC(>p5J$QcD4y z5-#bk?N7jdq(QpU+*BA}xbB3j0%1uC9d!oPS^BQBih#}LH(-p7i+IUs!N*dYbyby; zrc?eIR_9q!$&Q@Hsww6|Tz!MNdxZn#n(S+Z=u7Oz+(}eutL)3VJnUe?j3e(Xyj>@n zV^&)mY1v>R=BzCj|6ZxA#lMk}mwE`aSSp;+r7aFc;VpBl)ulnPws$qGeAwQ>G<(L@yCbqou4co9*`D(~H!( zko)9vyuhs*uh9W7zF%2ch#`O3zqqTiH;>zRfkV{teuG2dG-a`>NkQW5YNGf&*>c3X zHq2_(sNU(4=p|bwovW*QW&OC4XRFEK~cX(#kc@ z{}{ta5maSaAfeY`--%mpce(?$B-0LUEX-)q{Q4*1GFd)bxz0UTeKAontyKLOZPVSL zwcIRRA{p;`_N*V&2>5va$;6bSh$YPM2E*eZ zd;U2(v+LD1n#>=$+_Jl23gheXOMtfXon>)vhhh9JpJCqeH>~AVSp?+XpT8oNH!f-U zi+PJlcwB{ik%-wb?SqIkddK>oVfM)tM13LaM#ji(jQR3x!9xc!))7;qWTeiv6}<#W zMFX6DzaVMH+!g5DucK1k$CN7r@qiVht26I3I<&gq4@2Ra0~(gbS<;jAp%<5PPo-L%R|fh)s=sF0EHX>V05kIBTea5^`wV(U9dFA zhN%ukd&zLp4y2H{kI8lI;JA)(o)OB-vk^*_{YQ!!8z*d$I=$ACBF~*L`mvmoWoHgb zkmQ>sxtSpBZ;IOD`|=ckJ&D(peby<4%}qs+xQmY%zfW~(#h74ha;|--^(IGoLfZA2 z1fhV=(5XpDWCtQAs}OJfuS&-f>T^0n@{NL2mg%tg*MUeJvWJe3%OR}8PqkHUzlyEmd7~K)t|re@cAc1&xvzZn z8>3d`_VH}#oQSUeC{E1Awy2zYR$M*38JN70VXMC80br-uPvTD&5L!5*7>A0;_Of)T9LV^B!baTK_d1f}U=CUHHH^8@9DcbBP zk78xVrI5f*OHSMBu~vO-vTg=XL5fS+LwPYaGOgR@jor5PC6)@iYkU@pv{RY%67e#T zag5P(!HYKs&Kkk&s<<=UHAgm${g|A`YvMiO%yZOTak!*Q$AHA=5>AOWLp9g=2uk9;Vn14iVPsDrmlEF!xOLs99Oz6`hPS-=QA@1CU*yterM&*6ixo6+iD zaLc3kh#aoVQl*0&$BmG(wjO>~fv0r2{IJ?vjdbvRs_kU6sPcd7!=J68JC#xk8^H;n0Ov1^0Jp2jJQI`<{%m?zFw+c`ztXE4SqO zov@2yI620cB{{T;L=zwQQr2dhV`gsp&QeL5`)%QzfqJ)!8i7l^?XY?GV@&gue2*Nl}xJk zuaCFoWhOqr;YpSFWQSI}5 zNI2*QFtWIy{Wq@QKa9-&7gGQKe83M<&-l}B@b6p!J^R170wrX1G;56 zmN{6n_%bmTJAGIob>l@5X5u6{q%wg*!l1A`zWIP0@Iv8%N>mI!1VkZ3P$4Y@xphz! zD?1d`zvd_@=4i!00hLU}(`oEUZ=;^qU(Z{g*I#PWx6nD9k~a`Seul(2CVX&wXt@JQ zR&aU@dZ{MXIAQS)Mtzd?a3T;S5r#inOce<6Ph!8ckU|ZD5Rli6vmf-&fgNg|M?h+b zy_kJF0cZmHAOIaPClG-Vv!$vQ0qLANu{pq%FzenRNIjnItRJ-dwI@wZD9->PER2Xj zD`tEFC5i#h?GMrr9>0h zw0Wsi1(bv+79x3yRDO(K`2s;UczlVGPqSo#VtVlPdbHp`VlaGr1AnBf!}%J4Fk$;N znjsR1%}`o^#DpD4;{7BL&mxQDBbEsKh<`Bpqga30e<1ZpN>Tw3LLmkXu|G&Xe}Q3B za~Saq+YR@CA3g@(n(5v$r9~VPc0xICpSRE~gdUh%H3~I!D(jRx;n`lmFd!_*2@1_W zka{;Q9CdUq?@omEFYTId_L@*zJT!Uw_bnPDUWsR&>U z+Mf= z#hmzY(PF**4(u0qc|9qSJe{6?&>Hq^Wc08+`PR4Ye%|2_{Cw}VmL0bR-vdaD%)rl7 z5@KNorHiQ11LS=EBp_zEKQFljYz|5(hFADf0SFE#f**DyWuMmoPCTK#yLYYGgTST0 z;e7~&NGtakTxi#U0>Tmu&W`|izo-KB;-`N(4iB_|3>x_fpUMd#5dQvepdJitL0f1~ z#PG6@(CFA0-PVbNfvwg>FP**n~x! ztJW6%X=WZ*K+-JX@wM+AEl4^&~dNqQ}+i z+c2*o-JD7ebUd{qrn?GDfjN}-W$shxmM}PuPCD%;+uh7+%N+z?6q{c=LxB69xRVbm zdvEdM$^G+xH3}rR8onJ;b?&}OjDiQme#h;6GLl$bDQ{OuW9eL9Ph4i+wQ%wuvfg>{ zR(ADFUSD$@CIs!4GhriR(VEv0$;yhq%zgQ|7x~Kw>bzF;23!o3tz)G57EB$8_@t<^ z=qDL5AjA89ykxm2D7PPu9^}czr3T_sYACX}zE4^@7HTdXEq`EicX;F;iiDKU)pXSz z-A*kOF={V&q~PG_yJvx}vKlN+8-atU?Pzl9{4Sto#nD+W(W2Me4$7+4Lre2^N| zOM7fmKIscA_RDW?*Xn9exsm3lQ-fh$jnp1q683g&k)3&bCHJy*DE407y#nmMGlT{RCIt!b8sLj&2%hKnjy@TD+Ej@o{Lp^3R z<%g#?)|Uu9JObMfX*-rjtGV_oySEY^tTAjD-bM=JMtm+pRpWl>OnC4rf8_yA&_EV- zj>Oz!nem<&K2-KSoQNJ&jys0Q=I)v+mkc;(yn-odkGzl_oq!(mkWZSDyS5*+1dGi^ z;$SDt%*gbxQH*>`s>S-;*`-L3Jy%`m#&|v6+&~`G4~}nv_D!NeeS*I*8%Nk+>$-V{ zs&JVvJawO5;XigR+X(4)XM7IJGam+?kKF6sbCsWS9E?{a*{xbNuV4OSOm@kvpD|(` zKvXo6*0_9I6}3O_afOkpRW;ENp1XdN}v#oqv#^4!~wlx zHrs|l?$RE5%pU%p*X--#nc=usadbHFc>_iujR!4Kb z!&QBvuHfmYPUl-OhM?%4PEs2+JnM-}MCG=_la;jZ{N*3uhi&b6 zp{-xb4_yw@66Z%XL#vCzOu&>;76Q60&8jr3%IReR3S`;V-})(`0g7*#k8nm)rDgXa zr-56&=btu?k2YT>2#e>)*J9se_b=g;X6kmizQHyJRVcFOj$MtT<}x6wI9;x16`P8G zZBQ`;6+#FnV~U#l?$=r0MYU9DN7n?8)j^FWx~RTc-AZUY|BW{J4~yCUp-tHSX=2I1 z%J6TAcgEkW{~Gs8bVnD3%7<4!mGI4#V>-fN!XS2)mab|I#$dX>F`1xG*(ekX787RG zB(9JQE`e<|I*bpYX_iy4tZj@S&zO5A{2^ecd8VJa?KF7X3->TY!Q;3#h zJ1;#d*erO^yEkAx%b~+)aSs51;QScQkJ=E~puV>T3z_<~@VhmzXnRLmsY9YTXv{oN zj{cZEA@%SlfMUabd@M7)z(Baw&3^lKrUY6n6F&%a5BSlW!NMfFf3DWRa;z-?^GW(n zBqTm#T9`22!DLS&)=B70FKz18@KFt0S1vhBGp5`t%*Hmfm*Rx{PQOqn(EAbO^lgs- z0$O$G5r8EIiNAsgK(-t`X9sHNIrOwYK7=gLFCQ#E65`&h+)IM6?$Dw;5+4y}@>zTMY)_TGydI#OhPrlS#h5dSq9D`GJKMQWZvIrO9Q zY4jst!UD;K-UL8wkR61!>?f;8?OgvGErfLE`IV@V8$KC)y*V{5HYEMN6L`|UqU+$m zJgNYqA>Z6LmBIcHe*QDBJ>XBmE2-6s1U-If_fImvvLk~4T)E(4LjioA?hmuc+Qh&@ zC!s!}nsY&W_%aOYEko%M; z;P7($`TBun`{rlCA9yXNf#DHNy1(%ouhYxEvww*qe~VAAsQ{r_+Ccv*g6oNi2w3g% z3WBPb1Q5S|@2xZggrIup?(ZIM>v81zJ;8?k{av!p)(pPaA-p@U2C`eA2I#ItX^%-fiTf9awL)jZK@oZp-yn`^5jHULO}8;nDsL*H~M<32NC!8ZsbDz_gU!(u}{<2do*DRfV{OUfq z6CJzcZ(I!9m~&aNrxpLD7ac80V$-7F+FC@4R|@$<0tlZvqjLYM zYj1xZy=}&`dX_Rq<${Zxs%Kb}al29~l4}}m$kiIVi8?7*jrSiqJv%C?L8NAI{0^Eh zJU6-UzN*&DNaUeRZ=i4Py7aKa)Vw+U)gMhjlNz?c?lU4-x%f2TUoslmc?@P8`bGrm z!jyo{@S7dYWm+f_P5#LZhbgMc8bMxnH zq6UxCncb(d{r$u3l}+q}HG3(1AmLIxHjb(tQv3D-u7lc>y)BNcX@FYnhE8rKS$Ef- zmihjr?CBGej_OtGU$Z3#6xle$-?a2?q+S#pGcc=d7j|!QUPB~pRqjXHbGjcdQSgN$ zg@u*gMI>k%WuTloXU|(|{tWjK%P^e$=8VTWnjZ@3?St(5B7cw?kt)50)cczA)5)Sn zD~(BYWv0359NH(RTEw)NSk+btyT2*>=k{x$ADynjjWL0L+5q~C1Nu0jl@8p=Y!iir z!S`rurcPJlhBXE5z>3|`m!p3*1WS)DM>kHZeKP$0GksP4(D)crZ^@DRAU`{ECX-b4 z7Xxy<+tjH<^Hi@=%K8!d8#V4SD5trMIKhewfkDQTDA`M5tnTGuwXScblE1e(|eaOl4x3Btmm zR0njwu~3H=JRBC|r>c;tuQh&md1qUBO!c^@sGTt}jik2K@)3VBD?c?pFhcTIwEt}| z9x7t>eooeNxnEX*HnR`Q;K@(IjUf z>E!Ejo0JeM>eVm1h=<&?zTMhpWZt$md9jc*pc0AV8vdf!FQQeEFBG%;-cET-(jxMW zGFqoVEhuA8{4sZboCVU_C&`#OJ)Eq#x1L>u-g{UlTf7mWSuKttLtcS%h~r5uU2LUq z0KMGFbfS9Rf^46Y8+?#MAcb+}xA3NA7$R z)w>L+m^({eQDfd|B)zgY@1ekuuOYwTlCiX9Cvno{)HLeUy0Bfm!XH?nsdPw z@jr#7hk%S(mIC6Tq}wr|cn;4`8c4LyJdOQZze~mz$mjs5>S}oh@s-#qsp_YK>Ykko zoevqc14bo4PQ@1!{5Z;EAy~p55756pj%}yc5{AusQ!HOF+ix>fTj&mbOgR|N0#Q+I zmBO`V>S}eI=sBd6^>?I>H4ahOm9?(V>{p5p0?g$?jn-gwgeJ=NLZQflL@tjyyKP-< zDM`cNQ;M6ObWz;X3F7wds_ooJEtcD+mqjH8YR@)ZN(PsY*z>D?7U7|texEznY5rA+W$G#4F zr1f2<)qamsYraj1$bBkJ-!z}HRJr8(snpS)QLtQQHU0+M6GEo?Hxl$eED8Hp3Ci^U zBAEYcY0mQhT|3KE0#`ygMngjlR<02uDcPwG0g0ojujx@xia>~P0)zz`zq(+onV{Az zs0#1{Ma2pu&m|WF04;zBFa|6fBV=*oCnP2$*;7<9C`kjA{4s2fTbzzX8=l?YTR+3G zcTvO21ghDn;MZOi3`$}F;t0f@7f3n8Xvh*`2tqTn76Jjm5YQp$eIjh% zT*Mh83;$SBzY`~J!mpfj04APkJbfV6)YDi4ttjM>eld<7oe{!M?q1VTEny$44vHGf zkQ-)rLJ&j>u$NzYu=~VzW7Iq7U`CUg`d)BcumSi`ux4-2!CXX)>d65#(0@Rzkf5MP zHQeYl<&r_sP(bnp0QsAikaqi}I$G)PLMNexY7vZD0U!V0Ps5+0<0KXhVU;fpHU)eCm4;xI@5JV5$I-h%8av5QsoS zv=~2$OEv7<=bmFUz&s$dJ`^C{)WDn|*;Rs`fK{d^m5{jDpa48_Vkl4`l-0*uOEMlY zv=E1Wo?aXbetP^2_Z4={7Mquu`{9ySb~GqLaz6-p0swh(6g@LO@V)7IH=F*9Z@|1A z1j;112ZA!5OcOCCfDn(jI6%~yQchDp8JGiZ-LYaM7IG|L1m=14u3S8icY06hG_}T_ zV-g&Cg1#XCBJ^Mp{XO^--=!>c(7drL-?%k&;oo1f(fiOp64ZzaF@=*GRGJBW$0#;X zJ_ex{eCfz<=%db_N(7+~_Wk;Xt`_%(SytK+2KpnGt7_hHCZpCW2!^(gS&)4rHlH6v zMT~sHK_E!#+gy99r8$>6by`?%1riy*&KDei?os>=%qfs=&*mirB(H`K-~mQFM5+H7 z$`~T`Wfs_JObQdj*qh%9dTc-qVhm6zok#EHHHsbxG=()u1Q-#aXPOY3!@CY3EyzH? z3-TvvY2cUb7$C)k02?PvE>NGNHVQP)!~N!o_a&O}%0o4Ra{J1bZ~nMz#j|1svi&1K zgVTb1x%*OwY5@mpm$y*UDxnCTmb7iulQA8X?0A<#z7`ghidn70W<7nLo*@tI3Qa-7 zH#Rg{+dV0bJGF}Lz=DM$Sd1LSatqa8!};B(o>H^ZVp&<=;QwC2mCawtS>9O5Yy4%E zJB6{)fTgD8t?xK}R{+;xB}}sAwHs(-%ME?EP`kZpPO+#bj^`3Wx}u-8nLnj)rFRk) zJ*FEL8N%t@&YZr6iXC&NuPyv)sqFjtt8mlc9LFpL;RZq92<;DA6 z>y6~=9W`b#(Oz9`xqA6{X@OeXnuUMH?7*|!ypv%Ff92E9gAu3gj=xd3A!H3dDv(1? zQF$S`;oGiPC46Ql8mfBsj2?dlE?QO8+E%6)%BiC|+)AAWQG$BXHQOGorAl!8!)Q4d zHSE4AmE9Li7n;N(ORrTq!c&Y}Pq0!X@}!)b2ViqwG)3$3Tp}v3Grx-VW%s>A-6|Yx z^eqjW3?{Fg-(O~7x#-9fq`nVW24~lpH|(H~@#^-lI#0JK6qxwJPJ69+^BnI=`!kak zr+YI~{XV{^ICsrtL7QA0gZ#6l-d60_IB3}&>g{mc(pLB9!S(fPU?)uU)iMQ2Mu%b@ zXOO7Se)@!CCiFV!=qUzrZn=ZL4eQIs{IVQ8K_&1L)BPmD5?gfJTPE5QnN$Tsb8%xv zmn%aAg(YLax#Y4^rK;=IGY9J6MP$J}-;F6+y^CBu6{ZpoV&#NWs)!Xm1=IJFSjua4 z+ys7ekM))vy6qG*8;fAjX4JB@bFYsb5Vd}jHS>mTBU4wDy^y%Eipk!a+jegkakzik zBz(i?A+{;r1tp4OiTpQM)W)S^l5K)(_@lyfhV4p`+rGY+-GFscz^F@LiG1FRbMPsh zPEtS5(^$Dqd1!J;w>%cIiA}`o=oovPYecM@MQk#7G=>)C^ocwaC%aC)!h9u7*BYM` z%I110A%`W{{N-IbVX+SQPe2>_$i@0S)lqISBzUU?c$(S_`Ipp8y?r8daA^XB!S zO8NCP93Cq~i`YfAERS}?_>!#H_YhCsEEV1A<4F1cF!l~%q6AQuX4xZ6UkOnNuXX0 z_Vm=!z<)seSr~3DOYHCSTu&Tj36FF!%Cg4_@_bfmFp@CkA-bA#-2eUm+fq3#O*+jU z^H59gZrhXi_S2G-*O1X-LBLE_S#>C8G$yF_Jvx3ighe!`mTC8*?S8pP+NOmt^NJC{hZ2Tbsa&p*q?3iyWT~?axL-Fb&w&yXs@tsS zBVBAKm~iG^{MNiW0qHm&6%viF`7GP)+9lD+J?l=kGu>>8mTm7tv3F*Yi(E7Fw@8@p zzMOTTYf$Fkc~&>l|AHb>88uX|huxBkEPUSnKReng+R}r%lnXtqQgw3P3j62`(`>yqM{vDc+c87*C+;z=&TVuH8NGDoT44WTOBdrxlSKUea5K0cjlDD<;q;Jh&%VT-JvarW3Waz2Yq=i*OxF*W|FwIm{IqdkV!Yd$1H`?Kx+tTu*qO{T-`r}9F4^`X zKYRC#z4rNr(#ADj*n;Eqg&Z%__^M4-I(WT1a`Rk{50{7`nNFv)MYX(ejLUJ_{^fIe z2(kCL2u&j$D56e#C9jNr_`{@fp)SXPU6eq4<9HG|3LVR=QXuZnR*I$yOWJbT^crJ& z5_i2l;oUOwBBY*OIB1+Cq}sN#^Ppv>VtCCv)h>tWsVdLM zt)^98Q}&4dazy*z5a!wbhqTuJ-}RQ6`Tyvp|JQiR_)kauzawza9Leuk^5ou3Db>kb zrr+8&cFymkFrj~I=53Os4_C$)(%;Q|SbO6#?2j5*LZpPO7p0(%k%(~SD|H%0lPSPS zMH97_&#Af+9%1KKIR7c7oU=TS)7`h9pMC%F^u01`oR1Q3LN@@U#}sg27S2@%a0rA> zWDp6I2_TI3_X7`*-bJUG?14&~LWF9`j^UOt;FIIX`D6!p4hiiAg#bYVWN6N7s+pMu z$Q5bI95PfO5s4a7?hOh8)~m=cISdC4m4tI3v;l31{xdB+DewLJJCT$S0W{ zEjIE%0uf{(V}KdHH=w3c%@8VvQ8Xcff;vRMHTLhc1)T z-(f@9j1`+nCjvp0VD1*}6ARz&oR@wP-H=t3X@!T}L50wML0 zjteBD!D4E^cc?@HV^gYV!2&UtoR{Ihi}(k{sGQ`Bcq;@S1o)yY%VQ=tmTWQGybumf z10%c`kVzyAK%{6spac|J#5Z$vEQz2Gvk(!1D+hFnu#+>&a=aNJ6~_5*5S+%;ZE1j= z&yjFna~vs0ImEyDbR?>M&eCqNiCraf8Nx?v0dxov5C+EAQYJINL*O46&Bqc!_x&5W z!dGo+?{|sA@ox4>#&nW2ahwuk!yd#=2PityF&H`<7eM)m(~*rIA|d!680}QU-lI`o z7#wEv1>e&n$=N_g6DONq5agv?gNYeSWK?X>pa44v8i?GL^Y}$1q+TT`zaE=RIQbS% zz$`;~idF+HZ-h4-dOdWI3DZDBiRL^NA7vs9GJF}BWW*vTolNs zA{k$<5b>duIfDQRXqax$FewYP90g@4f|x)fRvgf1R#NI-!0aS|N*s}Fb3qA{@ZTMm zAWbUbAxVi?N`8|lcn@~Sus*=03K>~r%#JpWmtV`O&xhOd&S{;s=IVO9F{=S*1voe6 ze6OMGYXG@vl~i?Lna`YZDfM!B-WNISS*3AV_5`mOQ#d=gV^wSER#WBJKC*+h-PoP< z?mnS6e{yu2OO|gH%>sq5)(yPnY79&DJSuBdwH@S@u%=Si8#!k(>4TJBuk!l0w4Jc5 zwiclx$^rBC;OVV=IPANp>+~itflJY^`d8H`pA0Wg;m{UG;K{6wFdLNGeMXxlY@4PB%9nHw`JcnyY9zz zH92SAp2>Rw935^uDzU4_KS{E3(%Gr2PU!KU^{ zzh_cfHTzuTeoMIpi{1IwLAk_>af6znub@D$M%qi4%iI{+Cx8f+Q?b581nM-h=&AW4$x{itu6jZV z_r{aRIdfHi%8AlW$4g&BQivdR$j&#g&D=WO)`kr*qso=dm+?*~N{LXW%N zRi|m&W{R%lp>*{+y7d&R8S}WuLkz3^XLV2KQw*}t zAv@X1)ayd@tS;(%zMZ6q#d0;UIkLMpehZ??+TGwt1|V%4j+0g%LoY_ORr9tJlRn;a za3FoFW|of2iCF;AEBa~!hHlF!Z|#YAQzY{OTIz1ciHT1jOWr~K!ut=<{`2;6DnDuM zy?%?kh>xRU`b87Q;f~sLY;9m_A7>_BKZdMg7t~?mY$cdwOhLb{@icE(4f@+r2$I1=2(@Ts3{C0~rSEJ6(uh?`C$4%jvy0)&< z@Q^O0y)FY)P9^%x+IoYZ5n+$X2$M6lovYfgRb1#$h^TO@Qx%+YRu!DsEVn=R&e!82 zhx|Dn_%H}9{_WOuO}4cCv}LS*X;{?EMrBOQJ3 zt-dEK9#z!(E%FKOZKN;PE}_{e?D7$Tp4WwHi?Z8xlie00CWfWA+n49z@Y|xP>H1DB zC03c(ogcN^^1;3+f0}c1`;a6Kmg@A3qJ6Z=min1mbrV%>V=?ibNH6-t7X zP^O*`cr;gOx#5=C>;ZoXO9p( z$O*%A1#1sum}sC-1}OVG;Q8d4LIvFd_+OlbRA3=R?o~y!XjM6WRrrCWkq2mgUa49y zmWV_*G7LO;&a(WuMcjZ&#Dw)U?5ESy4q^lx^35#KFt}MT^k3UKzPJHBn2Ey?bojB* z_px)}SpZRaanKkwa6CG|JCg4FB`M7}PeBYp`wbaEaX)~3B)I@t`(nO8I!uFJIRMrG zHqe<07z)PSYyT@6)G!c9x&(MOAv%WOcM2|u1j2ihtw_GIp?*Icaoj)vfZ6xQ%W*#z zNkmZl=Qk)jnTG(t#+=5O(yrKv_3UmT7Z-{?0RZsdKn(-{Mhuwz!f0@yK6zv3?B#CU za6`d*4W0nt29EB;$V8_6!f@b}1^~R%J4_3yfWEtLZAjo?89;$MXZ>o}?~TpXb?MqD zRj9Y#V#W|SWI}-jyrN*jM!db>YF=-caNWLtTv&b-)L(wq8Lc;YFR@F;t!?&Ff(l!> zbgahyaA;gMkl1qoj;XkqOGF+6_r71aUqowI+q>kY0)?#~;<%Af929GQg_0J8V{pXlz@N){ajJ; zx1-}ga}N+9Gg?3%OlN@rm)S)tBftkL04%_Pju^Hn&woRas{(!xcq-fWziLSm1WOt@ zY|r9GPOD@?HM0N(dl3*>nM4RHL-H!2?6oTAc20@e9m^Ws&12EawPPyV|mDE1Y!qBbejD<65|M%NFxI zw-WHsZtBms$rO0D4Zv6~Wucc!t}hnq*j{|VULE+&ssmWrcJxPmx2-Rbj{RjB?^_rT*#6InSD z;Gh(btge8BcueTK$tyFA&c43s*2dzU)NHa=r9h$d+_mY`@;+>8 z)I|((RsrFjFkj617!5X-suXE_NZ3;*(jzSGtN2%3%$f|zWNd^~|MFQAqFo{&bpe_8Od1=J+Z?bHT@+044JxG6eV~qTK*=1U!ZY^!^#O~sS9RX8K6wW zn+Im}(33Y7GHFhl2WU|$q8w^HI2+w`EJaC+&dV(tVyVI5mVPdXk~@@WE^nMf62!{7 zEPOAvWEtzUCl}LYJ!T3$v-uk8Tx)YEdS&gKEdEG$nA-=ZEihd@ngDC(Yb+W&c7dZ-d}eEd;~x@~?*AKEiayz|RJC zbbLIU-MRvk{<9x{IE}i#wu6w3w{DUc(xSm_k9cdvdiU2py)bY1#`o2h7U%*s**}n7 z3_@owT0=IUc1vIPWP)};oQ;rZ%n2pteavuWh?x{RFDEyq5HhH`_ietYKHqVl=>FDW zY2Qj;F@JrF(O9cl-F*Lb-$ZZH3-6_xE@jAC=l!hPPK9*bv}|YDoagH1v>oH>w_lF& za7L}Ta$mmxBmEkxbT=o(Km0>Yskej9YBxLo=uj#C@)mB=1S|F!tM({n(#_%uts-4A zPXU)92gX-sD>*&QSi{#f=GG1*|4Y;AJJOB$?>&zzI~VWqg!Sj#RD5XEDXaSuTR(zQ z^%>@Fy-~}v8=`rmj=rjroA-%H?|Dn&q;1f0xO5&8(~k?uclH1>;AMNUMdSO_tM_$7BF*MuO_EAkk%qBz z(4X$)v*t&r+kmTO>53sh`wD5~ zM|(RQJUZt#HHxS+PSlL<_z7oF`7L71OP2SFl7x-Yj<}9^5oe0YFcP0#w9Y zDd*tEFj1JCEY8tM+UZCC-)1L`fQ)td1@=oFn(^b|Nj;U?jaGv4i;-+WT-=#S+6bx> zPoDe(=Bgb#;}pMp2Pxf5z2Nd*!rSe?YmZGW_hYI>tgk8DBkrn9B0Jb3)G1Sss}GJ$ z-ifh%8Po(UPTBRosBb*9H)ahXzG~lBjrJ2+xlgsRrsLJ0X3A{uvwBxs6X-486|oLz zG6klC-Xt`gIA2m9sQi9Ys?jjQIBa<*RJNw%kov1#ha^nyl4d-8EM7V_*;slmLj%PT z-crPzESep#eExO3+ZWR<@QD%dXsljHj3n$a{uT0IXO}IRbdq?+SFovkylp!13|~)M z^q6L;&*Zl_6j);*JsjPV^-b3N;OhEpN+$-VV`L@JBaXhweg)Kv z4O3h0t8XGH;Xm<(MSD8!v?jRT+6&_C4vU1K5szbR=i5>)m@O%wR5f2~L4`Ygn|9tz zL6}(2R?hi{7xFWbRZsh!w#JrPXgZA2)TQta8u#V=-9+!0S7Tso=17IaxuO)6{1p`i za8n<>_8D1VQ5EUs7sJ2BOw)@w%SLE$-ct_dm zlMua(RRlsrVTQc!Ub{ZL`M;m(a^)*~?RyWLuuoAhZ?dDHflFyR%7|KJFJZZcSo9N} zLTO=J)nc6QvDb|UT1&QdtevKJLLG=b#0p^&7cKrraJPX}iww65&5=0k8CKW8 zo}!A4X*~Qq9WI}TAf_1h>wEVqrr>|tkJ-Qr2QbuR9A-8aGCe>#tc&Y4DV(E(=LFn$ z`vA?XKI#AedjtFbu#@w@9`uN=6aswaGrksoM!KG_Wf2h=b98%N}F~Lg~}vXK^fk21>DRC`mzuT{4;ZkNsgoK zGws>yMJMu3oe;>VoLcMT!6tP8g#l(z2XOYQ7!2@$Gy@cu3KLwER{;L=ls==5nam2U zQA~lE)u(Y!l^Q2MWzg`m3BB%J0GLjg{o5Q663T0-JpM_ix@?oc1pwOV2Z2nOGM#w< zq1#YX?J8IN$Y1eOqhmQ_1fX!!>^!SsR3H@xEgLb4;l1pMjbWp)zc^xxj)5 zdcYLk4jnYOICgBHwrs)2bM+KcBerM`>wlDU=VphUKn}-qY@YP2QpvU)zyx$ZJ>Ey#961vh71@r)3N#FNRr`A6k4>${2 ztjLO02m;cOLC^h#fx`)&GZ=s;3lQc50jApohl6@E-xG%hMg%}Da0uj<5m%Ucy+X(z zZ1&vDo-!rw?Z=E8IV23o`}X;4)32AHLNBYGF`V}9MSM>u%(+9z>c zRw(sbHVO*gVK%~x2TEn2r@f1acg!bJQU5muObpiH5N_KozD zWs1HNl(*FR_V&^J$>6P?yK3h#Hm-Im-K4*=A!sG+E)BJOsvR3$gTZ%fbEJ!CE*n*( z-wXyGB6rKZm9CqtET7V$J>#W#shwPh0i7Ay9=R_pFd!B#5!6RhP3 zsvBl36F#RY(T3@VtNFc#=G-H|@qu0f{DmhCtWvVofG44HhlFv!)x}LAD{SBcUOP!J zS`~ji3|cCcETwikCU(B!C&r2C4yB%I=CW0q+!_S3rc_S@hr9I&un@X;?0wHz0usdW z31@1BXwS7;3GV$2WyEuP-Co@v2(>(7HDx764D@*gg6ce`h3)G^qipQmbKG{dp;56= z&&p5XYsSlCs7R6_A*Y+JPk#MnfHjqy#tn&+Ik zN3*{RhIQMgml1G;uG5S7MZYJxGQT=OCUlmU%d|`iihI|-W!uVRf99QeJJx@zC6o9* zGyifszQBDxH41q)NeP~aeCnUYRdUuCrde5E3mlCy@0$=7U{d6Y>xuM5Gd-h5w=bsh^_lUh~n;w1;ti2Hg~M(UI- zG}^BZpQaAfl&TT0t~K_0=PYY&w6>L0!XwAayAq>hU z@=Y*p_`~U5+&DZOUyl#@F?&XSS-pb*oz%#5cvyU=Um3{Se3wq@f;u)>Gdgu6U5?da z8*@Pk&S@7E%@~DxYB%)O(!VJ!WVyYDicOxOtiaU>|TzHPT`_>1`nB8(93V&vzYF>U!l?eerQD!7RZ}QlTzJ!2uH83Nx7oi zbI`AiLnvxDO-HMryT`R{bHJtG_gBYciQ9jb?=#KOcg;sOw0ih}MOM-m;q8cB$hy8s z?geRGOKqnxb1FDTYs5{wOug4Cxp%Ez<1KEFG!vWQ>u~EiZEMjm^bBO=B{(Km+Dy_E!hS>Nyy|EpNwrv05EBW$(tG0hWQef{TDsxCG9xCL$5kZzL8gT5=f zlxz0pU*6oVQf~a}XpXaGyb|$n;nDK+AeqPLy~?y^LIQgGn8nTP5SUGp{b03+M=_(2 zfbV0Y)^XLfLt#NEWq?nLALX`U#bwIWS*80u(P=ginVyPjnzs9jWGa_g%V_3r0tWen zc2#XU_psFIX!$gX99t{CE}KwFIiM(h)j>SVK_s|ZbN}(Ex3@mzXAhzL!7ZI4N3R!- z$u{>~aT^&Qmk7oqHN@Z1w5@T2ylgWGHy%GlRV!MH#=`*s?i}fdAC)5 zIu3~ljk5-0z~&pF8_Q7NomOu4>U!@`3L&qu5l_U>9%0xndCP}LneBt8#}6#JrN7&W zxEM39MwR={U4x=wW5{rBV7Bs}?%fhVQr8nr@b0;KKGdDwPd9nDpS7tq3=%tkV_YQv zwredl54ku3H8m3XKFAilJ{0--QWjjA58H!+AmQDGM$;h4itfVEM?tlr$@wnFLe{DH zXP>JXD(L7pmYbz*MWzTzTZ@{`s%~=IAYp^Cot$2++h(ZZr*?nO&rM<9aoJ*<~k;fqJT)O?mm2 zl+7gU4kXQh#{Eo{c|q`eA%R z&a0e6UY5JKX)RB#YBUHH>)RknP0YS| zqxvJEL%!(p^$;axa;Ow)>=4`iFs`3wD(=63>Y{Pq=-Rl-cD|i`3uvPA}ye z+`%>_p_vUvEv#0uz{9XAk1gF$9kcAnMfJ^zX?N}7*G|l0J<+f@;m639rB~p1Ci}d` zq~9pnY?@J8WCh8zX{?{h-dtaM(dOWfV@u4!zwtGxYB*5QET|Xx=PZl(AKxs9cB$9- z2Cyb4ay%9+YTT+ZejjCM99Pfw**sIh({!Kt5-hHwW7c*%dr8V_75Zsz@1%g)7QFej zd=sI9Pyc{QEJDEiH+#kZyqb>XpKIg)+a#C>nAw^CTNp75+rQT4|I3M?4#|@Iy(uR# znp=p3S3qMJY6wHbZFr@vX>>ARF~z|A!kUbdNSa_0wc1Ncxw)1`Bk5LvRM;pUX}_fX zz5Dt(<3976!}&P1wd%V)`P_B8o6S53Bvy)4i4jCxF4~_&YLKV^+P|I<{Fi{v_8-)mu1n46_i&(-X6ru`w%Qr&99K8{UAP5cAFAD%BB3N1h|1Wa@)%-SS6xftN zGjwhMF~_HpWuiHNFO%5Azpa8eqoS+`SrujPxoIEYSEAnlNdbfvY!2+78C`o6Ahx$Z z*Mk;l9`j2uXdu5aARId5s+NGXC2i1hmQ>Oyarlo3rD7$TERR^u-U-;jILTkVYOqv@ z!ioc*5Q=S3r+uu!>!2?GA%mI&1>5Rh$yxjcS(OnWMkoP~Fe@p=AXo=(EQ@{n_ z0iFcAB(75!9$mrxISjq)f$@;fZsi=nvvC|q7!WQ&l=umLNWke42#|RP1b+W9wibi> ziveNKWWjI*&1vu+$hl$q1LXK^fx(#&FM)M}rGyF?06JekzNb^UBwzsJ9>0afIoksR zl3RL1TKS~D(oS~uSXqPM_uxTE3>Fvwz(RxoJwcElzRa7${PRXk$Q*3>KUu zG(9yz2;kHH5EcU#cVEjFCIG+O`W5&~ogL&r0KmK`qdyDyTYj>7C-j)2g8pbCi4qvk z6A%~yr-T$%z_0y!zS`t7ocFx`Hd_5sQGNYxIa=-gw$3VP<#KGrQHGai0*HO6FzsYS zFM~HP18|=EL$5kH3?2CVnt$|lr0&J>i7r6|PyQ>dJCEb+_t)w)0mnhBiF6khz}n+5F`eI?0Vecd(bE`r3M*};rYO##LU7XjCYNbcNOXlxANhsoa&ojV<# zVPj0cnf|k_Tb&eMsdHJc`1IBRm-g_1BC?6B(Rpa?mnM|C+6{m2c zyoUEta5=wm%(&Qoj5=DbEGoMW3;VtX#=rDRxXnnNDUk;~WL#&=-ix_mYlbTG7IIXO zJR{x5TF0Y9S-bI)tawj1-MJBX{&M{>L(u05=^=K9bs6kdCc=InA_acyUh6!Yqrw*Y z(L@U6Ce5&dK{neW@F%5;O{gc-Q9Wn{Ke}Y9ep(L93+uX?KY-=P0;8vZvShiXDNYTh zp!t)$lgAi(3qFr7hEqaVlJH;h-3JZ zgl85Cq9ba0qDzGag?oiHdZ`K++Y-c&MnX2@;WZ#O0!bdT5BPf{QbP zo)<{&Z)TsF$;uw>nwDKfw=j8L36E9Xwk+#T1N}u~^d$;&N!Pek zkxstKLX!mZHY9WN67#Q`+bUCWh4ZQO6xzGk$G@@+nUfvIWhYq10iYhy_lB`81lCXN zLO;h&+>2qP4^grM&8VlblI{=Ir73q>jBNeoB(FgyO1ZX1W#`-Ls%vH?x)S8+=NnHy zigt`5OJu`}JeHT3IMo94KtH;HA^&=TJp{@hM^lCHE)y&|QLqpl&zmInCfR?bkdg(n z_KymS5B;$#VI5CCUnVh4`z6+5xCM;69VjhWmq%-_lTz_r$+d8gp-LK*&(LWdhN0HE z#;(Wsl$rx^xCltG9YW}NWr{#6ukx7YHumMqFdu+jl;mNnskUdFzfb|Ydkjj7o&KkV z^TA}|2Ng=KUBphXWiq1gGn9P$pKtGA=A4z3F~G`7=?wMxpQU;XbC#`br39bC^q*lN%Ch0U+_xo|((wbvFJ5#9|T;z!0`I!}V%ZS&97dwXS!CBC3r zSgSxmwfod654EiwP%5+CnZA7^s|lbHIJ!17o?0mpk_WSgx00cZ%zy{}_r2DZW5I&( zk$(B6i`>J8FBAiNi)Wf8OFsD{rof2~`)8d1)yN#3%8S|-db-U9+(WFKh)e_My7YXw zete(4gfA)%2ijb{s5wE)gCVIwkK4AYKqYbQWcJ#VGDK38MRYW!ojA#V`Di0enSVnX zpJ*|Z%KF~{+z-YUrs2CbkyS6ufJXJ@@CPQkYe+S3y=OIr1U>WCjw>5|yBQn?%)fIk zF%xW1suH41(|xOjtcZubL)XZ+1k%tethjCO6_v)4u6gKnyL3y%MXl|d%T>`K^+e1( z_{P$+K#L7iT~^{W*O10Ka2>9(!|W9sZQTQh zrfe6EH%JTLJFM)PhefhaikjoW&b=_(_tsall|Ok;YUA}iSNCzs)d_e^zqvZ>rbW?S zTGHo9T#!xcO1o!WW%!VE&t>iy;_9o@(fzsQaq1@HT&kEbeM!${@IM=oPWM&?8zb_> zG%;zLn4T+sKo3yQ3D7TqM)Dk?Qu z-c^FIS0pCasopJG5$e~<#VFU(3O0c=HtueA2bUdVT9HS~WsFX*&6LsWj^^BTU;{-L zjTgzf8ildbD|8!HwgN+e_NfvlXNLAP<xc^WksJG#bNG&)kE)WA5lVISp>ubg9->1_f^l2DPfX z)z+dLW%NVA=g*h2v=m%2I?>DX%jD_kj9C#ee8%dALG4y_h&|N})#bXOMb*i0dY>pQ zoz3B70Ul6lidx!%v0P7`azq9gdX~etN73>sP5CvNog3`CIkHEgZ{C_{0C3* z+Vau>z%{M=K)tR8|8z(hvqWsS&9qR44v({T8~(~{rj)nPMS3Gk`avMvgEzn0IS&!`JK?lenk{b!@f-gStO_J3hzDd9PkhKa=!`N{-2< z_+$B={oz9s`82nZn|iu<>_cZNbM9d~mNjx`fKdmxds1H|k8q9~@!!8gAawe3Gk9AB zL+EinccUi=A=O5dFX&aK_123-g;nE%#8~(D!9QuRvmmRwnvLam3^xpd1-P6H?cw8y-f5L{JqLD z(TWkm(0%FP+uWCn^hr8~sjaea(9XNo;ASGwqD%XuderKDmZFm7{Vg2`-?=nAbN7rD z(M@3w(>B|TH(vP6qw7~S(jTAq0dIAvai@*gR&ae-*9Y;6FJXNz{bB%@L0X#Djghg^ zob&q1nKDq>@3ny3JV-05*n44zdgJzDZ^X{tFqsJNoK9K=a5ba}7F$7xe zWM>J#s!IHFo{yFUiMfBg8WLeh@q&YSENOb<*QdR>x&ULWxAu8lihhCd5z0{+o3Xs>-Uq%DI;=1ZSTUJU9mKKDW|ZuoawdDrCt0eY4=ie=qa-y z?ut}QW4pm1D@`bK-?`}gb03%^Wb{;H9R)Qx#Ox?8M{a>ksU4)T?eUMQPHwJ^4UGWy z1Z0$PSr``)6>X8R&RU*$+4cniTSd2bLM3j|_OhvajSGJpDB>S?DHo`JVA_{=Cly*( z$@PlYLyPJk`@>)&Dd%aFlDkf_$NXBBNI*JczDy*MD|d-SR&hJWXppyLb$JN5&fNy( z1ICJu!4s{G1HN1nc5IS3xv^$0>N>ima~E4Kkjs}1Z& zs!Gx>T7zK;szEc3A&Fgu0Xn~?bL!EAvF}C$+sSkAi6!-2a5hCN&O1m)a?#^53*6nP zunmbL68wM&<}(Y&(<$MF3aU*1;f>9Wb~CoIUxP;>MExXc4r?tZ8s+)Zj=1Q?5Kzv@ zc#9Z5-E`I@*T_6e#OJO%`t^Od9%Y~G=VQeoS{&KDPW0D7NWeDJ2Ml^&53SFJY5vom1O2*kqApqiA!O8;S6Qg5l3l zqH5;y1Amiqb19f71`FD2SCr&S6!kP%W`<>IIf9dgvX(MlzBkn0;V@};i^ zZlp-IKTAxr&?#}h(e-`pJXRkd~Nr+H{g*#29TiDl#Btq!<|V#&FJVYv=Yb zffL+^f(g6pTR+806+ z=P*0{Vk+zPJLeXA$Ey?5MQgyhtz^w4#jFKs3mUfv5h$mni5pkVvx>v^FDQs8WtH#8 zGGRbwED8sO47e5rtA?N0ajSIG9&=AcvKxXfDZB+29Rc}5HNg{5KO1NB`4wQ({sj0O zRY@k9=i6{mo7ItK`gI>8C=y+YyDoSOD`OR55{WsGW7wfyQ#?ZK)+a~l@I907%vCbj zM@HgLHaTYIJHWC9eN&!vmnn}E3xS<+a#=bGw`m=(9AM>!+iNu8(+6^4?5M#wA3JlG zS}=Ru!nq(vmP(X+30yT{G11=Lgp2{@To~=r_0a zs%s}ifBonRJf+GyEJTYSqiUsBQO}*1${JdfQB)gO41*(~EvGk2{q=}DkxKkt%%>yW zk#n0(bsNq_XRpKKzyUc4 z2-Bv*rTHY%o5#Sm@VdzV5M+;}&*NXUAbK@r6D_Cj$k#kp7g^+_A)0{bU!Ir|XOkjn z*}$$Gm?O*t9elRkU;6;$P7rVZ8-eP7ULVZN_J1t0|3Hr{tjzyguKLf0StcgV|9bd_ z=1{rBDpkpJ>nx&uZY$Y3X#`^i+icN^9IsIrPS<2sV_v~>c9cz!LaLycprqnT6rmCi z7Ljm@r4xyepzFewbQ{)`xaK-tpLL#Lo~_Qf{oJ{I_}TrWe_n>05pjk)0oFDw4~9sf z2n{$T7As)^79bTw5Y*iUO@P>|(+m1g(;yLJSaRU9qYTnS+G@xp9WbnA0B2|b5CD&_ zB}_3amj+gW3gL?x5~dcpvMSHa0$o>7XEM!yp80P_HE~&L||aOJHioxAW}N>fU6I^C%jAG z>?g;NiEkSe;rWk@5Q=a9K6L^F?D$ax(pCh5

Q^eQpei z;?(FkXi^z`T%1v$#hH3iRDSljA%_y7Cv@bSwBhEnzCe4_-SpTWQh~TaYk@LBfEREF zgv~Ub`9V?*2zfB_ZJK0cG^*j?fJvnAeTHD@L;hd|Mrr^sqy!W==x^se?HNZv7iTb1;4+b|VkMAOpX+ zKg27jx!KA?Ktpf8iesKm@LtNPHNm6;YbO-)cdo4AYfz9uoN(d6*W8p-l&@&Xz)>I& zP}~MaCI`oW;V@VcV&-%H;az*+gW`q_7-yLAFy&zSBojzY_g&x^$rWr26YqxguoT#b z1QhTJ&z1qCNDV=dC_1|;zP<=hfL~05f=TMo@sj@nWXb^wRFJ@LqU18+;OI!2kwh%O zks~4jce;+PDkOtl@>H<5Jhsk=*=#W212#g7$kDdV_U1dih5J<}RDr`Y6Ia$S-XTgBfKoV!BBIxR}t{Te=N0XJ~Zd6Q?0q4p7hbfH4iy)+-G>s z4bs2+{o+{R+3};+zV~e&D`r4-YYdgb8sN<#OE52SH%#khnz#2+tx@#UP&3cGtp(R$ zdRnU-9~w<5)zxE7NJ91fgQOPd(`alY>>H#}DnzMj`rTYydG)mlUQ}8P^h>cJ8k7$&^u&{ZRY?zR+zuar)+}+d$ z;n;iWH*cHI9%q~yZu0d&m8wN%biO8Ql;2fH4k=@e_10f?JovfE-#%R0f**I|BQyoMbqTv+fq-IRsVo_JoiEH$z{W3ucj5NUYeF2Hb_Y1>jx zWGu7GKV7aYVXLcNN8U|HYM;NvpN#Z(Zj5->Ip+jf~mr?Bddq

x)giR`|)xOnEAMkm1;cBc~2x&`_O1!mc1gXQ*rc^wi}b4a%H2=!(2+zrCWQM za>(DjbQl);50@2XHZ=J4aNKvR`bg@AOmiv*sz;O(S_(fNTMjw_^K8*mh8q6)b*w8_ zSFxC(??@v!mkf2AB`Ax*un7S%{D&>Bx%e~5&wu|{=R0+rXSzT%W41}R*_enwp@=)3 z5Yr~m@n$em>jPrr5wIJ$%i!m>Ifv)f6u^zA;IYO-m+&)_PJB@n=HnqjF^H>ak*-({&o;+MaFi zY>vDvUKSremcbQf9w&L1;kU*>Bs=FzN5`ei(X>(hpFgVG__Z}UbN~2R_x*#~t)|^W z;O@DXbsdRy1@z{-*N!IdVlB!0x<>ZzPvz|oI?%bTh-O7a#&##XVRJK749{)KM7X%v zH}nx22Bh2hCTq1j7F=UFnLw3Qh+cPv0krw=y&3qUqi^JNid|Ih^lods0MW)wh&blY~>6 zRZTJyRJ=!`!YuM0jZq{fR6BjqqV1~!~tWCAG&226#6O+Q!`6Q!jJWG_O z7uK9RTh0~=bJsg(BUyD)O|bo_T6&USs=4p%jM#H>ab?t6fthe1+QoExZ z_oA6}e%5ko&h|!YkJ|o%37sy=y;lf_RKyk@ji`IwP&mmbD}5pb)eOF+RCy=suWm`b zvG+VCyZ$$m)_;si_C{7vJUj&SVwN^8rcVFfHij;yBBsXnCZ_*;n)+WB!~b*&WMbxE z`rqIF^Q6Vh^xyT8`o7u)r!`cS)9@l`nnxMNH;Sz{k`zW~n2#EynvM2~BttxhvKNf> z)~zS9A~u%T5F&|ET9X#rpVhop{r-N`UVhegoMdpGZk=X-{^UG*n-tSjC%fLMn9s#a z8B!4&Lcj)`P33#!GXg+`iU^sSR7=XZ-<6?LIRh0lOQ=MS#D7>9_Uc0xKquCf&QTcB$MH}+J*j7nwl8O=tlze@FVXY7siZG z5i4(S3Lqwk0*PY6b9ez{8^%^T7jZxi79mmrBdWu0i%=wb0JpG69IRmapb$h4?7hr@cMr!d&ef*q6S&FY@54m+qP}nwr$&0yKL;T zZQHhO-s=0IqtA)=!|C}8R?L_YD@TsZd=x%?GeM9*dnCYa1cEl?#m#v5jlOyWsL>zs z@DPoGD9MR|iYtKp%>ZYyPk?sJ4X%WM*@8x4*-``HZwnPH001wh0f5Cp;-7$@jP!W~ z{3TuhfIq)^ZYx1W3IMnvM?kU#2qS>LN;#2wgMx3GDN-fKJb+6P=s|!W@mYEKemLsE zB1#oEeZB*-494&Q*p`@(c2JoWVTZ<*Ts~tPdlSD&qhCwI7()0#<414$)ydw4Hskgt zY^*D}Z>{8*D3gML0Xe{fDS<{j@qaFkocQq-uy=myta?~d-tf}0Q(p7XuxwBFuDdzF zIB=zU`-4GC5Hg67cf)Ca=YwcB5u`}lK0beLcm6f+lFD)DP%d5lEdE>14jOlxNRMzK z+jAxte#t8&V1SX>#@`Mc9Bi|uJ$7q>1%;pAl0caa9LKlmE9u9b&Yv>HybA*xB$$`Lr8^A!nz|6(#^8S7G)qUiil=4`S7$P*LHJ zhor9~I_vdd322WIGHzT9zzlB+(A$1G8*?O3pbXFo9uhoUc4i~o%>|GD?OIXDn$y!b z=rManEt~Z*vi~qZ8m}G(7`TlO$GIvtWD(k)8mvl(eirUQA;*gDW-a&J-tUfO8J$tb zS;p*f!Q;x9GDY4FRK=+Q8N_#80fqD7Duk6dSv!gvCC`tgcYN+X$ycG8V1a&*!{*(( zWWwnXy;((%Kj&Be;Qb2lp?-rDljI~!G*w>&9FS9%=#3TBdJfh*EgPqCp}Qua_5sm1i?ULozl zWWrKM>+eltz2tox$#(*g%{cBOS(qkCcO;H28P5CSPEtwsM>WDPfcH)vie9I6x2|^L zQ`3HTWJ3rt3CK{KgSD6$zVj~BTZb8&Vb{lrlZ-(Tj)M1dK-h@f2D zk%fr1tSYWwdZrTDJCoNMNh}Qx5w$ee_rcWIE6KS>*u?x<0-roLIrB=n?&iGV+9%k4 z<*`DlI~7$E=|tZX=K6iUg6*Gy4Ctk&1T-)(oP;Q*gWSw}RHddygd!0*8aAyul#h#z zF^;;T1>sV7e?!R2wj8-l`CF>9_m}jy(T0@`lSEPQUOk}k1< z;#38W-#)yd9d02LaYKMT2@M64gOp55h$~`;cZr%=$88%>XMj+8_1IHBi-d2?v>T4n zKeMlQ$Kqg2h9i>z0p?YjmtbJi^bpsZ%Q2PVD;_-NcAvBm?327|?CzZQBq7Xd(o8O$ zMsps#FBRd1TLBu)co~(9H}#8t|G_t~@6dKP+XC?ilDPbJq=AoP34;{V7Mi(#Hd1nL zQH?G}k_8I$Ryfc=ka$T^PVIMejv`(>|5TPT}AGk8!*Nd$}&!D30-XWr{l}eQ>uAgecfaGvRrY0p!kgG_z|8g z<)+TE;J5PFZKJ6NXHu%C-5A1-?~5phXe?b;@YJt!Pc*hFIeAv`-31CKIHz;1?FX1C z+&pT=zHEBZmv1`|PcZKCeXaVw4z=wjFEzu8HTeUdFt6_u9IW3do&{Ef4zv+3h@b zof!}6<~s4sC6g#|WOTn3mfPdBCb^;bURpJq+~-?U9wO#u%9BS(YEL)6MJk;32_5-T z^Xx$jj_isVAyr+;5fU*vj&`3F!7It7FfjX72au!<8)zcPwFDlhhTci4p zF0tpsmsAm-i=2wh0Ye|TFRN#vsVZu7&a&cpZoQ#LxZ_GZr_U$L@}%xs^@V_ohbcN-P{wwb4ENgkc>Vxo)~hN(XnO zwh?Fkr=;Z4MO__#0pHRQQzx6|A9XT@Yq}!z$A!s^LCcx)<42crcLZwjiWz908l+{z zkx2Sy!Vrho^q#wK#ul(C4a52Pf#>SV0o6s4t`g_Nd8gBo>|CA_2o4<0FXyvaWr5AK zDt48duogT-xAU`!VL<$kZ0VML>nPbI_7MifR=S`zDrM)g5B2Xr6AdSbBC_%y&-kiL zh>4jOAoqgEEQj%i##RvV2#KwWUG*Z3CV4is0x9j@u$sHJ&`Q^|wYA5l2>E%C>Nx70 z&Bm55b9WF>2VpA_sSq(LB1_xv{K3K8nR@R`uj}hBq$O7isX61HuhhztIC>sQr|C3O zvPxxf)r7yN_a`lOmTFeaw(fD=_oR57mN)>%{6(@Gaa)D(?h;NRwCg){D&Bd7_A5Vf ziqfSarBC8>MVRvk9yxrSxRY3yL(f3vvRdz6UMQu@kYmjjhYgEo!i#SpfT`56w zR5Y}!F3@%pyU7&QL56TE4bI~zCrj3weg6bmFt)l~Wo9d4fht-s#d6%oy|{uOt3uxO z*0nYL?pU$YO^m}uL3c50Y&dMtu+#Zzou-%t+Xr2kk-Hv%8?j_jyy1R zw9u}tKI@2$ZazkvHB;kqVB(Zaxde0*c7F7&s_vWQaUo~NGP1>N^cQuQ>bBRgJimCQ zV@aJr!%MzB-{#6ebB{ltZI(Jn7nuCjc+?Y3!$(}|UXQT8>fmV6x~%-|&CrST@}!0{ zEKq17TrAHt&AQ>?y|u^b5j+PbW%DSj=oKcCsK*o%2^-Gl26NqXz7@E ztvlBGEcN^bEAV<>-s-o+2Ct7#boE|rP$P9>i{6e=;Q8(^cAS4!%l(tMANssaK zBMV*8kRRkFSJa9uS9rL&qwt|KB1xVD;RdEtJMZu?G7n*DfKUzs-rHnQe z`hObJObq|=xWvf7@_#Qm!2T{dVEuAQEgg#=9hP>MP#EycqA`Qc-_4mxM3LAE7@SOT zhB+p)%MA#VFI)4PyTim4@?B`V>q8xzG*vUtm^H7iObii@r>3$!|Bd~&zUaygi8 zKEAi(c7DFT;D@w#ghSO zTri?K#tjH$**AmP0Uv^fdXYDv*a7NY^c``nIsC`;?T78twZRh#>u}tA5ee$FB@0>& z6XqO+u_Czuq!Lgu*Z>EQ>dOX$tbntR67s_#to=pu$w>!_=0KuNfw}D&u3kX9$&U+R z5)g`dm*)b+j{gcGr1TLu`g2Lc2t-qZ5zO(6<%W$F-$FnHVz~A)qKpvXo;vG5M$if? ziJOy0FH}R&R?7dQLPG>BYz-`$a1lDJ3&%&7!Q<+*N7)Fi7?FVtj1&t+q#`=&CPyj` zezVQv4~}|VBMWipHRP8D3j^LI2dEUzch-r5olCeCN0@I5DPvc~jSvBfkT3B!LWGq5 zj9-M>Q^nYIB*Cft_Suz+0$~WiK-|LzNhKsdb8>+QHyA7Hk`13gM1|0w#ejjJch0-> z%Zd<*WKc%?!n~%4_6OY3+db}%Fyk5a;QoaWmHTG`mLVVmAq4dSJmb)Yf7M6#&X6R~;X(d@ zV5IDagWJ0SUdGo(JOm?#Q~zR!dWPToh@3Rr_0?bf7Vj|EfUOqy4+Jp@9Yu@l@0ReP z{uBfb0fC^0#6mbNUPVP3w801%0G9AO)4}r})mvD@r9G&413TdFkyptBr1DJVTKq^9 z2!@2WdihUa*Z>#q*Mez%tXw&eQ=3~ef z`HPeMfew$7osSqv1wEBSG3cPrXa_ip+9xd5mUYHcz?$=zOvMMW!BFiZoaeJMARN&# zD0R!hqju1(i&=MSd(4!WZuW^>*73e3$~$7^3Cn}>a(q%NRqajrHnVuFoJw)f!GqU4 zJ&La`En!L(_~cQl)gjqAQ9k^sduTqLeo+(5!#R&^?W`K8+WuLGDeNap+5Q{(GFV3F zN=ro4y%-!-I=VWqZ6(?;ttpH!1eXyTFL6-yuI=B-laCH zZ#%b@{t6^l$}$<&eMEY+JpEI~>^wQ^b!0fJ!>7E2fc_9XKEB4l-+i&o-$iPt;W1?G zKK&Atm{Pl8cc>7>n~U^=pId?xu2U)lrM-FSf=_V0lrTM+n-O<2LJ5kei%Xz_=Z4HZ zOO-JS54C-Pp{G>49GshlSvhk~E0I&7mZVZ|f7(jg{fQQd$E9Gh=y@3%CTja{;ZckF z^zNX8)CKLz)XWX_HRu(FxJ6&;ZvV?>1zpND47c)=!qg`uX$>y3vcImq>fXWAQRj|$ zh1y0;On>?OAal9wYG?iJHQkT`m(D72*^ddwRlb`}A1?z?!trkl9`Qf$r;Y+YyNNMMA`;q*qn$XEeWIf})^;LY`K%=CZ zOl_l#@)T1MN8@pF&vWwcT`{^1p8L-=k7oEbAKlaXec`U8;rT6&*!B)xeaz!k*{e!v z5nRI{mZOY{UMs~R%iqgRcTXFj>M9Wq^XhqNUQ?@h7rGJc&Mo_8@2hN$%o7c={)96? z*?!NoqTIbjHE3eEw%dm02^o>{9Akas{2bc|o>7fj#Ldl1G}N6s%j^7Dtwa}}r67^U zx9*O@CHR=jc`Y0GbKLtgL%&J)j1iS9>a7GSsksV}oSLLB*RTen&JxX;!HEeQTJ5cD zxABhg-t(gguYT5>#|0fJq~qMcuondlKTGj^SB)ohER^ z=@!SDm4+jS%x>$72#FVzJcFS`l@jf!*8IzdH5Obr>%NB#CSRYG_r;e1+AJ0wrggAC z^@%gIweVi0t6Hy7$J_iqt#^dFh7YsC7ttmO&{8|oRsX1|t7*8-`n;To+B9rRYGvTF zGa%G{Txwm;S=_E8#EuT@E*!qvm||0#NvtXS;;lR0{m+hhR+6r5+g__$%kcpt6dHh7 zNs1N7PaE0LXwn38kf`t*@Mv~jF6~k~CyHRqXUWU^?$)K9N^M!M^j8p4zG|Pjld`+5 z;|6l!yK-x9E3b~!KzJETtrt)EbQ0!TeI+tq*Ia+<-n1l@mWkzNZ5!OQ4KUt6}usM2e!D@vKYeZkpPOL~p^5${n&h zX^?a8b*KWJnq+Eho81#!;JRi={2u&_^~jklzWR6otZX0TDYYAA`wTsXZ*UOLy$AmA0f1j+c2!~gPK`*^9?eppgp3JSxI=2nD3th_Xy5cHD_>`z`0+<>!q z-Z)??1PcgDJ%BqHAf^X&HUhx`K1S#&|3(z?7fgn<24p_&@yWUyQUw4v;uT=-CMeFCKtgdvcF0f-`BX)~BD3X!TQz5sIcs|LB?cz$?B z|0i`YfHlZ?{%&-o<$j7$cue?yMmBil;Lz7SU;a^Ksd#ziaCyE)1jrdYKw<(BL7{J~ z*ZG1zYJC1T;#*;1a?uZMTrePEb_2WJ1Q0BUW+51eKtSRT>8}j|8byDj{l7EzT)A{W z=~>iQ+LtzJ(vG>2nVI_m_#i;e>7c+Ef%y7=XF&7yQrGsS6L#Q)P=bf{f#4?h?bpDp zef0m11UzXd=k~=y76HS5G1EV`g|Uzh0>a246v{d$ZXVt@nOH6X>OD6ONF>Jr*rripzSSJa&iTFRg$V&Y!C-(>`^X~hiM`LS0cn}%!1_S+ zV4L9%4uDXQ0nxI6-9h@G3j~t8HLO`70svhw6MF9|LDOMi$jVW6$w-6%UFIwp?|N}C z=!-GN8JGf05ZBR{-V_7>uE$BmSn^mvD@3vdf)IMI2a#smp3q`87&`!TmEP~$ z6gT<{OBgk>yd)gJQ+fINPG4~FOi}JJDmETL1!*{yl2<((Ai}mWhoCmV3AEQeZELhx zp>RIZnc2oz9`R{=O;p~fm)aHEMrkgkZu}@uUQ?|@NK(I}(Bb9i*|>~N*CJPE&EYi6 zF!JR~eR}fq`o6v0GZh!FnOoFtnK!s>ptY^`+R)hiZ2ydecWrbma9s5vYPb$xXlzY` zN`g{J+oFwO7mvbbd>2+)wiaG;KZZ4r1&by}QeH1_Y}6>{&sDjSa9Tl! ziwnq^c^q0^_o&rUL_f*WMM7>nuc^~y8;~Z@btx};nahb>J2xx_q@t}TS4JNW=X5Bq z#l}*)drxH5Z)J63KhdUDVJ%kj*Ot>a9Mg1oJ}NFX#g$`^Y|gB-602Nr%I;L+w{VWU z3T`pA)mW}>4-I=98b(S9dpxaf1iu(>jwMK)mUFoM$vn)uusmGlW-_Nz&d+EcIRhcn z*kL51opfi-dNp_$>*r?KFsyJKKh7xlAW3X5n`AWD&sdx^+R?6bQ_o@3A!GVmAbh{EiQd#tZNunvN6cxJz_^0F1UDL{i~AQAbT4(1|g zXNtut>^IvHOZuvbDmw5Q&z z7Y7_kHtVBZxqAIXbv&O@{iv|tSWr6gJ|61*g=sT!kM^L=aKjyjxrA3M;4`8>Me{vu zJOK!IibCixwU!9#LvSt5n=+aep3PJPUb62E`~xOg++dy~I7YqK#T>kLP`L1G>6*jq z*0JQ$y*{7IW-pn%jZp48-zbh-V;&ym7p=nw*-Z>zDLXl8bFrR^%fr{IICZDl$EuXxn%=xMe)cQdDvs z^S%YV8$Mze5pRX(3`T3U=f^|KX+0R?3Lmqciwv>6k)p=LPmir#Q#iiFx{K9&%uLiw z9|kX}BPy_OEiv4Q3+~4ebZe}YRD@EZlh%^8jkw(k=7$ydb{^*yf%jp<;NKDOtrtZi z3KdWS9i21tj3s&U?RB_zC?$1QVUlEr$!&ja*mnv z@Xs!BR~V}st2nz|h4xE!ebp~6Cj(F4Y}81rb^3b_M=VWfI}MhZFAX=j-SO?S@05>T z+k0weNm-P5bTf8d-xq8>uud0Hp#z~j!`e4uGmA93Szna)o(U#8UB#9qA{=Z0Pu-4= zBCkWVX~M!|c1E9u$OQQqTF(WGdhXTE`WY%f6Ku8PzGAik7Xt6Y$XZjj)b$!fY;L!@ zlTocT=1rDn@bz^@uC|&@US>!r?)+L7Kd>L62~b3_XB)Ov)*8omn?hsbnNGDkIFj{A z@G9TFKB3PY`5c-1_Gk-?O{ZU89c2PBv1-wqlngkQN-SG`<^~=7mHIy(CR_VTv+Dg9 z&h>YwWIt)4l3_ec3tErcLq@RY7vp!N{Yz)xD&<~zf^GM4r03TxG&WtAx6lr?uzp*A zEc829`qVX)BWPareObM#YV~|B{pBfq-};SyG~bxd!ygy(UJXKa{)0UFUjQCi|ED}+ zWckn54?V~KZTTbE6AOwJ$rr#0)`-f95EpX0SYs;nl`G;-$?m+ zKy}IL-m+i%bl$+40}-Ecy(BECNl`z`xedf6=F*j3IvM)9C7teKYq2Zh1sU<9-^9MwMcB33G zs2~s=Odov~bo>%PL}F-tBZ+r`%BVcTayv;nHLYq$rN4H8GLb_WPQGrVdXq6Mkhoyb z;E?a1{}M>n_(H|R5CSN_0x206j8T}^nl2?C1PfGEQ2JLO6?KXB_!fXs3^15HlC_E> z1puVx#hpO8{df>%ixtw+x}$`L005f~0EFU0tUkQrUr#W9ccZ+e&8)ot_np+IzTZ^$7m`fIus4HD_5yReXTEnAXA_K`h8Ed z#KnJZ|I7D*FtxhrnO#>Cq4}0eP=fxEoTUyBEyuwMDt=(Ze=h1x4;1FL#U) z7ydhb`n%hSbrA;hWN{XFu=kTN;$;q(Z6GBsnOwGrsYEU&7E}RhjzWlBSj6lso<3l! zCmo6)i6S)Eina934G?5E(s*#^p*Q4H1`9cY(1c}wnelA>5r>pgR=l_d>}}o(6FAmw zIf)7{xXOQi!-ki{0M*+n539*vZF3P7D#IPMREx z^jPd>g{>so{m-qg{zB7DM$yIJ8Zk=-#eKiju?jnC4Y!{GzDehL*yFQef#teewXRUl zAXnYH1pgwg`3_k%!H$>hxK`^eL&b|7`ngkcx3Rdt+OsywohKihae@iwyq-4N51}+s z(288Y!vMo}xlWzu@*+bDjfA3)-5RUswG9lGB2rcx%`3pV0$<<{*F(gIkvKoozPnn? z#pbzAGGniAn4CVeJI;&URJB^@3sbwsBQIE+TG}J#_`Z71Lx*3=rN4`(wOKzN;R!E$ zMb!MP0t?tJjbtRVopV+rGZ*jmnlH|nyq0kb;ou+j=BQy?gE^dUe#l^;b{To{8rU6F zjf?cMX$uK?6{SiU8Fug2w>7~9S{NBlRhwlLCp9Ou{nQb>!0?`rr`ySv4W>yAV{PB~ z1i(D$9p}Ms&DbkbN7w_7lB|Q&RIbbsX6+RtYAC7Pk{`wIgX$}jxv8nW(s2VFnX0up zeNWFN2U%LWDkan(*FNeg>&U&Y~^-iG9TbjbHML0ZaL7KRa5t=+)Z{oLZxjWcJ+~K z1fBbm**GuGK>zgLzM0-gb5F`AF(-OdHh-5Hr8pHSc09=+MzHl1mVg}*Rq4$q;LLcV z4x;(4nE?yQmj)x6$B+-G=Vh0>=yvQS93)HVygJDh<$1}xAewSVIXq$+zsXCTSHK)ijT73-Vmgz6|WzdU716QrKXog=jmOwNY(T6ZYXx0_^;>GhK$ZmD`9JSSX!=^JQwqdk%^sU&}yW;i1ZOpu48m|i%F}I({DCCOKc!^Yztf7 zZj;s@;ny?_H0suR0JXS?_x^)E`QOn0{@bu&#Al>uVfc^s4F|)2=uB9l$7+jl93`I% zU%^ewu^2tmyHUP~OUGCRGc?3Ztuh_=|CPw+iYy4HO;n_iph)myuGu0hBFrx=+*FkO znY~*4;XCp8{#l#pu4>x0t9J9b_-gJ$!os!b+kY2o))o#2V^D|ceMZM8EP(VM@W&tT z?FB`1{A;J19DqHD-m4va&tf|ih9THCkV%j{ut|WnI|vIL4qz_Wmq1aX4iGkk)U)6E zDFJ~O@aj*5vJZc^Mh;9xOBV>=033qS8*c&P%>)=P%_=g1cX;FvnuiX-1c)Av8E(&^ z?HY^|ssYCirIFr4_DX=MbBKp;2Lz}J|;0r(E#uRpT;4xZ+Y0u9pro@6;-EGQ zJ=%UCq0G({AiAz(v`(1YNDTxIRp5D9kKYmfydw@45>Sacg1;XLfE>ktaFYLE42c$z z+nf?YwjC2f3Tf!z-X8!=3_xNRK>S-!1QNpl0sk-xUt)0wm`{@&0lZGX1mQMpl|Jx2 z7Z*fbzSqo)Mrbr_$R8aoXebcC_5S_2N#U3NhrTE59md5Vh~JxTdn4JBPxrC@yA@bW zq6Q;u0LUB;42Tdl1Vl`T8UL9+JfO1Oi5{LJWY5mX58wd8m>Wz6M63iPJ_v{(zVnJH zVbrS!VbrryCkE8&IC(OB$o1aTRy)9!tz3zCYa~NfgJ3G?pU;LCXEKhr`$NrZM;)Hk z`6FP(hsyHGCqk?DDi_GwjOxIyWJKSn4nK=?q#sNZj0?ymvxzhH`VvpU_xhXuU`V%f9#D7OI;T-gb)P=oe`ka|9R;uqK=RUOoR|a zuwFkH zcJ~&%nS1Pc8RAPAB zW1@Lq&H+bb^9Zm~%W$?2%XY4^+wC5Q=31UFbUeRDg!REu1!a#|mTDyezxJ*;J0KS+ z5gld8-6uWb$dZ|RoM06ly2QiZrmC0=zn^Q{%ho(I*kp_Bjly08bAChnWCemXnk!Mlch`o9HDD z3O6CPv)!{tk4^VA`Jv{g20kzqNa;`+tKE5z;Wo`4Jvq86s;}gKw*73i+dLZW$u*5) zN(Mt34bU=QP%}k;8$PgB;L+jR?8*~mJ7v7dHhnII2(noU+qMdNJ)5ys7WXWQX&O=ABt=X6 zv#=TqW*V$J-CDW+Sbx-sC(S>WI^G1#mVuk4nL|G7wRL(x!#f2h+3pMfu|_y?jf0M* z{f?SP;wVvhI^GVxn%Finizg$oHtV@+)MALhYbtL}VS{KuxidHDT4T+yl@2i#SkZUFVS@O9x-ZsgVrSol+vu- zdOvo0%Kkye-)d6L)~|Lzr~c{HVs3k@FW1EVsVoB`k6?q^y-}pW=wL5}LIHW~!0S92 z)h8}I?4xRF8u9*EKJ(?;?};;E?wsYTq*cAPgyl_9#(=loiTO-&TZ^RXC8a?EPUo_rN41IX zqxUIbbsnY00|WU52H9|$CNd_tY1 z=FCc1TB~f!XDn=PN2t4<(rpNLVr7N90hcH`#?YAUI@j}1UbkW8S>rVRH*bKn&xnCe zejol4o@6}KSP-yw}?_jbbcZ41>)*Co#8(V~!6iK3Z=?j3f2;j7W5TCY_c zlPv|Yl1u}2gAwM*C`R$!2tYJaD`%16(KSw7E822NT99vf+hes!;b9{Bk*|U)KF{Lk zKo1?@WR?mDPHxBEe8~4=C(*@9-4hU@!FKefaxGKfDwmu-F$0C%fMW1Gl%>A)_83`(TU717e0&mY%|4XEd z(iDt7(*(C=9!t%W`Y2tp*IL?5Ttx4|>nbz!c&vt}*_11qgCF5_7P7SO@<6L*RpP1k z!+Nr+vt`(4ph#h-KS~ljFK79Z=Hby8A8FXQ^Wv)B4LOecrQ|hjDwT#gXnNAA!-J7E zuZ3L(EiNH2^Dtqv`$1ozO#Qw1FX^>apt;U*Bk-cDu$rY>0Ff$m8n&{=_vLZxUKb3{ zPp|9mM$@^jjJ#^e^fTLT&mby^5Y~}#Po1KTNUZy(M45HFle1}xK&-Hl`48EsP}5wA z8cO=fTmOju73_QnlPzb4O{wa!NMMUqn#;(wa_o#EGc?`P#VP}-cGF&Mw49iyuIYE? z&Ait@8N=0~|Me})-Ay%*gF+lQZjsQD!xe6n@DVt)f@@7oH)M9=(7275hc255s#KTK zO9YI`NKm*fD>l{mco%vcNpnkQOjXGB76b8(;&18cWv|Pa`dM;1K~Phd&SEeXDKGOX z5WZ9yS~;RQx6|_C@&ytr%v9G?OXVu_YH>#d8=6wB+)qejt9;w)_>iN6hN@p$p|~M9 zqhdIEX||H%Us$hJa-y?hOO@XTZ>~jPS9(@j1Fe-C&dyZ+jok4*|V+2)q14#qz! zz8ZX9O$|-_qf$EM%gIKjxV0iT1slJ9UJIKFW)J-7O=46Pk9G7bJi3Kkn~+g!w51o3 z&_B#;AS@`HX4NCnP@ThHSSf0qwdvhWu2^05F<6dGPXx1Ai6TLMI-lasMUK3l1XN!l zj}LF{pq@icVx;%AW_X@y&5YzkdzqU3AEv0Kp%a_u$s#T{->{`i<6{3o)&DP`iR}Ng z3C%*!{2x_46D#xobtwy}jC_LTi?5EoBP5R!=l>n4oNA2>LEtY5ppZ-g0U{3tJ4Osc z%tW6ts*0ATyxB|0O{l2EhGH*(k{=FKtOx=^sr)azth$)|wF1Zm&>I<{y4%fj+RJR3 z`epm3=cVK4>-(SfB>dmIL3QHGh)p@G&q|1rWrGL49*2@kWAn zK|wKtKm(cQdMMNmBS(&CB&bN&YVg1T!umGh;l<;jzv|qXpUcVkW$uB|>X;J{f(h&- zmq&MC!tLZY-iqk}&(vOMQHKB{fb0S&L$j$6($OJgf$_^U!b22e|J14DAxxmyK@)lp zqY|qSYy$xUsP>W%`M|eZ$Pu8!5fK3i4JZN?w@t$QF;b=1c?E&Qka|T)bH;)Jl)SpW z-ji{PFoaAZZg#r`IbblhyM4a{o0^q)dScq!3?T6{Ak7h=fe@0$Kw@!d#Xb0B#1Ym$ zMZ+Zo)v0Oi{ng>Ln~Ac3ajtOj;h^~uP8KAH^};&|CcV`Lpi}gTP3svP#Y!`?u)0%J ztCx4bGP^MX!u9o0hmDy;4uRkEhR}zD)z?3M`_KXk6Tg|W!~Py5<^d&&B$?>G$d*61 zT@v(^BnWnO>#+8#c&yhH$wc*ozuqGPHjLbDqvJqb`p+mbJp=fM#6%RrI07Pk%J_CH zn1VYYWLy`0CNrSR=~iKHv&8{8!=c2DwO_>xSKIY zcS4yw0pZM_T}HBr-m+Ik$Ic9u=8-e`d@Sf5|BnDM3K*l)}+^vf;XWs zWc@{wwbdbv%M((maetoBS}) zUew-#ux5M*B0t+?tfoJPw?#Cc>`X5;bW%O!o)6DPI`}wL{2H5^G0J7`aXZeIa0?oU z420BPa=M5LefMVKk{^kS(cjvuOulfGZMAJDKDy71%$la_HXglVYArJrlin?!!{$R_ zJXP7|YWDc5IxTrwXPQLn9S@J63L6tw%m(xKGsg2#@=7b}ifw$?vXsJDw%*`PpPov{ z-wp<_ByIA+f4XJQm}_std8W@?V#eAO(9McZ*LSFxb(O zR&8#yHw`NzFwwSGR*bxd_DejDnhOx(YeZvFd|W-xwx^$q6LIH8Di{-&)mqa_fB&WN zR&s04C)RwA8gsq+7HVqh0$D=Za2@ip{s99?*KO&{oGV|%3>RFWx(Yq3#Y>S!7pzIq z9_QJ0KDPJzaqlZT*}Epm_;1q!=b)0%nrVVNU$Y4K_FV*u&Qt^~Tx1RZjCu=PetJG~ zJHFo7w0M~OVMQak0K@5^OPAjybk!s?@zQY_nK^}@9881m?v}T_){!w)h2~BCRxdQ) z^6ZpNbZOju4d~IaJgFsLYT*4Hk!Ac%I=ic^KI`9fUeASq$yUcb`b|Vgbt}}nHyiGR z+%G<=zYAz|=g_@aND0mbsXca*hHwk%>UE^$XP1bl`jZ>3G7hoW8>)}Z~vmCpk$&sTnDR{fY>D*O}h zFwZoTc?V(z{%7Hb{u-*6<){A+vnH%G>x-UgW`1&TuP93w>F(+vfbon*q<{**rHYJf%Iiq9;FZ|=VYFoN*H>j~j9=@s}VLIutNNLAl1Wil7eKa@E6q(rc@uv_O{cn^lNf!k44hz^WJw z^`JZ>WcX|&{Uk~0 zvdVII#Q1az5S+UVvrU!@1{{bJ+YmC=_zl(^HQz%fFD>yNX%#9eYzTy723ML6z@ z-|njPs#f6~iq=%_SZ0y81=FG0UQXosezusNAEg0(G*Amp$KAMXQ73m~&{$H2r_8-_ zg&sV^rh+~lac@SrH<3D@~V`)O-e43H3 z#d^BGZ|?E_)U43$M(yP_j8qvLig4kgaxB z#;Wp%+}5sP%Y6XW98>$Hu~>Vd$=V8I>FMv@<_7xG`$F-EuAxR6y6?%}+z}LJN7&;Q zW?xL@8{M^7`p)Fkm&(p~=+^$}{j3Z!dLn;yw!$u+PJ9(-9ZpY^0OnKhMb#QA-#bu^ zv+`1y34}EICSOb2Nu`+9yd*@&uuk7v)QV0VX7Ud?w`u|>1{++Gi z_|Lo+M)v>2Yq7$XC2+29I{i#+kvGnG8%~!YkxCadT9IHd<@JuXV`i$y?I15`B%?8g zBv}_XmU+$|SA3Rd|E2ASJMuPWzyDllul(paO|d;}&V2FSe09!z`BGM5E@!@5*FalO zihyRuqve8aYQL7jXMjLxA`s^n7zRPG-?AwS4#6GYA<{~IWU?LhNdtmmFo0rA{V@fG zzSiXji)W4DFGl?3yBv(^d1!CLgoO2T>w|f<^>VX8q6*ct@WDGcq0jnLO2K+V0}2-- zvHSy90X}``+(JaF3(&YGPaAM~7%|tx>Xq(Ye)kH%i`4jNZ8ek$=yD1)b1@f;C(Vtn z1|+2?1xlI(qy5$|@i3yDfzACL)zJknYljBeX2!8C3BHh)jJl}^=?BWjTAn~lwi$i8 z4^#luGc>J6><`jVclvIv4LgZ&^qK3m%clygHyf(+>2zo&uO9&~!S*3Ah#z#Dghc}& z6aWvzNAiCGSdBZ1&06ap-oeh55oDrAHUS6qbm|JAN*V|YLQuo_NPswGdgc}-&gX>^%AeA)e<44{=Gp(?! zNx}T3-`gCFO3?f7er_J~+MaK#WL1D^S04Dz)n}#y*wN=erQ!PL#kSaYM1yW)5c$%l z_(7;&G>oSEn#r+`9S~4V?cYWxCxFcrB{Yc{Q$Cfp!MG6QP>vq}6ADBKb&EBq0rl znRLlqzGS`om9JJv-{kGoRk{7K99EvWwxG7gwHoC;95^ZV!a>Qm>Pt-hy7l%v2w0B3 zCBPd!b2BZKT!PJz3wcEQ_r^7H0hXo`j{DkIjMAL<9~+ylbQv{;+miA7-E0<38UB@H z#A9J2T*Co(F+^L+<`$i_O#yU=t^gb0!$qLjSt}S1H4#y?JJxlb)|5(p)nFA~la|@) zw%{F%1chTyau$k%$uZE*r_J#v4XmfR?UE}V9UqTJTx6q z?qzsBCa^|xzn>_=#dc%Ymfcuk|D8lu5?yA^+|ha?b8{kdXQupKBBnItB04rqzv&sPmy|`HlR*;7c9PLe-b&;C<94xP_YmdI9yS%QU%v8 zY!~5Dlx6bFMslJ^x2n$C;>>E*d5RyK>|T5YGv`DejtG^MKQVDI6IN8aN{5!Ss7aH> z{?`4qvryRCBJ~)3y&i04Q?1#<&_W{0Fuqs#tTdHiqvCeXBje|>Au{&=arTbgp{?PT zZEV}NZQIU@ZQHhO+qRPxt=P70YbCka=iI7Rb!y+%?x_#+H_XwVcZ}X8mtPP`AJ^Ty zEqiorRO6Yu4Y3pSb8R5A!w&RkaL3AdBQo}eIl1^J9X zcg;KCb|mN?P)$=p?`1^G^!S0VQu+>GoX&f{uW^T=gJbK>I?EvTyU0sGbr z6C2`Mad13B#d&Z$9Z&p67n(EZ824UHVgZ}NiszY0JzQ4Wd6jIF`)o_w{1p2 zUu+2{znXLQFx@YEp&2=>9hnK23sa?mzxmAPl&Pud3>XQTM= z4VKGO`HHk4CT5cKU?6>bYB&P0q`c#J$yVWkNwZsUJ2|p=4a&TUA&gItxwVGs)Uo(i zLo?t9<@fc-a7U8F&bdfx3Ui0Q1vUbH4th(SQBlG@QiOjctQcn-H8!&G)1@+cHcrq9j5uJEqyDt|>KB|HjgYrkGt6N*r2tLQFs)G1Z zw(=JF@sjOwQ+Pi=2h3U(4}}-!8)1p5 zYwV@$NY8&mZ!3K)O^+;x@LcHh!%OK@P5Vk}tt#*5S4&;HVb;AvUx`(c{hxW9n#d0AaY8 zsG_2Xf?_@uff5L!LjfoXLIVwuW`!Z(8fy7`VB_dIVB5J@*-5vXBhy&V%g4>7FuwL&W>KLdi;Fwy3thO`JoQ~mKkI6gf+z)@J?UY-oxPbeR)4+0foM}dS2y`tgw zXF$Jhii3^kf zYcK$?fdL~H;@fOj8X7zSfNcN?*p~ra&>^o$7y$dYyA3zpkdO$lH2?%57{LDh;VciH zO@a;oD8wg}n+e#!-`uEgt$%v_)@UZL@c7sp5&;g#JsboO0U{6}Mhcv$SLWa>l(i2Q zh!G+}1U#A`5iv)oNC6OQ4Wcj+;8M(cn>Z(96ntR!1k05oEY4t?J#_1Imp*MK_;d<$Ti!STJH413>umw8+A`C$oLWE2);D#P$3l;qBA%3SKrcPF)36SJ_ zGrEExfJ6}kSkE~u?Dq$Yz=$DIp|a$`O}<8t;(Me_EyjdHgeaZZmS3g zw>(_#NQ!o&vu)Z;j!E0rOL{|fGtu8-`GkyI|4W#})DB`ZRqM||)$Bp;1!y=)o%&y(qm{|wv_t$p&(S3Xy{B5D<`iE-dGb1Z7){c#0iPr5i2B4VWQ9MulQh_RAA4UfZ;qo^J6`@NT=8M7@S0Se@*sNG zW_z6q331{cVTF4J3{R4wV^sJ+{f+@&Li{lCenDR2)kUu@rWM&P#|_7&b{&z75nhXIm${^jAE*`)ZbMRc zd0OTgib5*`Bf_nj8t!?qui9Y>E9w$dca1mu-t!bP-O2|WG9Q!{18bagr7P=%L)uPK z$DZ{}ms~+nO^pfNHu31IX`M~?pgH>z=?D>Wk5c4{h4;hlvlJK3B`zSMu*Plo9=F&K z*6<^?(L(?5o$f8b{l2^PmT9=ZexG;}y=-|qCaD{FiyYG&6O5xnj?pxY5~c15udTT5 zI@Zthn@IE*uxzgJxG9#V}U#>RpA z*5Xtw*3I&At?zZ!cdP79Nz*y2tsrb3IV!ypkCezYrn)&7568D@rlwb{YJMG?o}(OQo!0kb~!0{MJq7# zWN_RHT z2Lha`5?q=pDR*WZ9|-BDHCkS(Gq+farc2FsP;J$lHQlTdEbM%&!n|Dc?8gaTH{tI+ zo_juD)={riT%_C}VxRk8(v3G_#EVp}Yh$1+N-MyZhGi{QzACJac*YUk+10A4ceari z7uw6rTPNO|kKa$k(FGem?Nv6#W&%QsDyYb-3l}V&jxc)+oPZPSOs1K+^A)m}Q|x>4 z$4)kiCvir6_)P~Br~7E8?aXx@J1K^EX2hEAJ1d5B&=gdX)Bx2i#ZIrjN^=VuPP|)j z-Mf^vlDS@2(2{t2UsQBYD!RDPpwS=PdL+5<5^8>BX;YmUi-2=19%k)U5e3zEz92)_%B(R?Ck8^{d}lwQK~xb!FeJ^9^#sqam9e zoHvu~uy~@LZ#YU9u7@hAjhB?#aKkWW75+Zgg7qmVa8%Q-+W7GAr1~DOaqt69V0A?N zm$3Z*Ukb>~@xS$lng4y?#L4hq!}8C*$qL<8A^l6NBUi~fC3Yh^!@@MZ;4qW2O{lXy zCWF@~j5iargOwr}N+(?5T*Oj4ag(SGM~F_;R&kE-T%sUK>d7wq{C{?cx0$VKUbHTB zc0Bv!b*85AtS0w8{n515{YiIkS^N+B!#*F&0T9Ir@|Tuy2?E$(r=ouC0N%eMpqKi@ zWZesd0yv~If`f6?zeYqK){1}_@ka40N!($yC~P% z5y0=eLz(-7DI>p{0)bGa1dH;i;QmfOxqxH@1dl(1bn5BI761ud0IUX~+1&X@IKDj- z6vF~~nMcku0xV6Sf9E-hrOzB5FF?@*5GSDD>H$p`2Rt1B0m6ehdy4|$ZPD4;M7i2T zh?5r+Bp6UjHXp?~$#nGo>R<04m#bq1b|*kKubqFrGW8JH@nwI@o&?)p&B;W=ucsr9 zv3>$RKlKGfNcZT54~_yr0u&)cqzGV-4*tZI&0rDQ`Bsfr=1=tka->a?XKXrTj zbE|_T;y=~rGvbGUaQDq;c)bb0r)3F%s3#}|xCP)ui2lmW0jL*v?zWjixXlO9hsXPo zmje8~e7MSkX8NfSKg0cnYcqKD_rG4QDQ)GhZs~jeOKWL301FU!2aqa20sz8;2Y*Uz zuoq|H%0)DFL%53wkstv|0YVfawM>2i_)Y>P7zaUMrE{F%3Uf+7@Or~~`ZLW!fOv=@ z{LdcoO1f$l*-wucyB5GO&_9$pH``PVotCN4Fx!e1859t%?9{b&d+Y!e;d#Z{LTH>>_^8nqC6Np@$??Y^X%kAFs0=T zIEZbCHHdR#R}}DyrT{S77z83S!)Y+xg=>H|`Vgu=8#@tZN)HbpU9wBzsnV0T4>ua> z-cOJRAOGm^F7nw&GQP{4&;+h<_h2HB{)Eivls;x6$N)}|NC^-$Vgyon7v39T%!D8V z5I+b3Hi=6sfDY~iI*RmrMV9FJ_W6Vzh?ghJ`iD=}RH; zY~+A2`}i^J_RX@1(3n|(0l>J>tE*imD7*(0k-h98CgrnX@M%T!kp6Y#ft%=bk?;eS zYw$#uA(TE+<4=me3AeOEr`!6t5Lj=vMo_2!T&mP>E5uXr63uIUyo(g$D=5F;mphB6 zZ!|8y%XF*ER0pS1Nu@<+##;eZ(PmOpiz!$%(AHn!1ZtqWij3INhhRPBWu)-6(zLP^ zb!Y=CgRJ>n7Cu}H4S`%=9(lRGT>mV)yc<3b=GRlRg=&TSaiEQ%TVU_dC9z3}^)PC% ztllD4y6HlTg>E%6jUJ*H4Zb736K5u%DftOPjjNE4=MHtF@^I)n|t8ip)4-7GPK~@XJA|CZ*Qh z-zgcYE27hkPF85HC#>0}jA|j*HYZ+iG2JWl+RDCE&WBTT2hBRt8ZeJJ*fY(#ca=m%k>qUNvc50GnU7!g{oPd6L8UH`-HbH{%PD5hysxFh(VE<-Kk#<= zqN>W0I6lNp%$}z5wQ~Oj%gskjOYI^!tkV_qhLoxCRWsFC;6dYbLP1_t+>40{Q}Z+v zQUv)C*wwgX{8knjB{bmeaMG>3yrDpz1zZc`cUV>xyKoIAJFzUc`E7mY{n!oR>z!nD z;k#OUr?5#b3LK@Cl3ZeFT%F(I?&HAH=Gj?1z41#>Df82MOKR0}Dtt-fs$rad8I5XV zhdQKgdp|0Hb!IB%-Nn5|BX_WLGP_r>EqY%R+v$31Q`vlUwN-rg;bSXcRUTET*Jwbo zqlC+&#jJURT&btFg)Zp=t8<~-`tB+gD|NKOc2tsUxrTRo!0|3|+c`?W4|X^7>FTVL z9?=n+K947V^9F;~MfUO38H2axS$;F3yPdE|U6Rm3GVGo(TA?Z7*!+}&D58dcH-`$7 z#MVEP3YJ2_xhCGjcny8O?$Xg(6ujtk>9-T44W5ab$vsDuu|lri<=y%_dR>^C?d(M3 zT-)uZiIvQfv*GIjQy^HVm*d3gugj1T)xHk=)fWv`)y6@Z*`nnR0kjaR2H17{4SJ(Q z_ma~uncaj{D$2~`T2np~YmBRRxkowCC1(}6AU{aqtYaSYvR@ zVm_LLYy@6w4=bL@xXbv&(r3mkZQrF|Eb{a%$Tyn*o<6xS-;*QM{0*hUT)qKjz&(a;-;^TNTfU+LeAMwz%?*+1xzi zqZbbupZEcG=w8VZ9gMqsb$EIzds*RGe|7ip%T`NY;^+A{4a`nF{5s$X}NzLj4+q2))m5*6hd zQ~mmXd{4l?Vs#%PD9%Mx23w~qLMw}Uml||x)b<%tZ^WBrN-CHNnl&%dP*H0z+w5qt zk*|~c$aFB}TCOHfZLcGi46!|0Yah0a+XTi+j86mhhZ~*&i5d=A3KGJ;llC*yq+|V% zVo7r`Xt36qaR%J@3Gson&wD#5PJ9!;IN^YcydO1{TLVCgyjd$h_lI|9#S}&L7h-5Q zqkB*GtP*R$FX+)talSA>ag)Mz^=?weQRdeyS8-(N7rQ_oVWQnVEq*(ceSxfu+D@jw zVr3|2vwg0Ubrhz74;RVvT0Q#u=T(r zw<_;p4hv~fP+N>y;H6$Mn^(8f*!`Aa&?Pl?I!*&F z?rS7^557TYdhgOQ*U7!QlBGIaX05ZgWwh01{n6{3=UGRTJ?*#6){UiCEpybY(&t63 zQ@~nyN|U*kZ$H4$=8sslWzrkayQoOy7@`hL*{fX{ZhdXI$(~5OT(Io%DQfu~b{UdD z)$)h(!)Tm|=zP*a?BzX`9GDoB*v9p)^Q1@-H2Q(jl9i~cq3&{kDZXg2~&k~nQN^Fs1uk9Kgw+AXvuEF49G9*&}W z{ENZQd#mX#(VIWt1g-yZ&Dju9&VA%uOAZn(&z&yRG4l}p;zy6qU%ZXo5c9UH1N-xd zNNpdiia1%*#)Dk2pG>NNN} zAd$azv~(_dSgrU@vVCSaKXQFWY|;3WuSo}t5b$CXoYMh2041Y=lpTlshy4vQbG8hV zlL_m3Si0_7X*){@4Qr z6o=ovfY^~jtNsc9!4*773=aCt8l=NHl(YVZFUZW-L<>k4(I=Q0goI>*2{Z_Yy(}T2 zbTeXtr5p-H5GFL$hZu9tZ>Jd&;d9r<2~2>>G^nAMA`ptyCUSCqFl6KbTnA?nJf~Ba*#=T1H&T?5&ECu@8VzMv$-WdT$H!5*8*Wd%o?D<=tBhhQ-?t( zZp$^1L=*L)gi=664TI7#OoU9hnmGAaH9=1r0HzOXumv7M&-P)3ZZAUo9^&xdLo{5B zjZMVH?d=h)f!n>x6y%72Le$5HZ6G9k^8Yrw6BTBdu>THP-lnvC@r%sX{0Z=;_sL9a zw-}c_LIC{nKt@PNupqwM{=~j%9P$WZ<48a8UB?=PX|t?{J$B?vJa=}FlOs+Yuvj1z zs)r%s^2uBQ6ke5(h))WtWN-k-A%?*#Bv8$A>{Y{G-QH+?-`9Y&GD zzlnM<<>)<41Qd$UnH& zf%_*u;83=d$k9P)U!z_4FAtNGoOd%U#VS~7UMbBYh)pRP+( zaqf?HnCE-Z>%E8BACjKZST;(`CKLZ~?JSkbv&^5En?2&M=X&3Bi5=rpvfGXKVYcU_ z=NOvA(EGjKE+<#6O|g|JL#O&5T0aW(j~i}GTB%tN>(z~~*eHc1)75KC+$yH4B%k8h zZm6sBOp}*&m5_3ce9u0YzS7sM6%_n>O-(&{hb2S{s-i-D@XW57b^0|4G~t)Crs{aM zS(Pm+Ozm49wW@i6!DZW2ksv-7SOhB{eZ4%;kC+z*i!%``QCp4sPW(xl(Q20uvi0qI zAX|6P8`~7c=eNJ^IUaf3Lo_AW!3yxqE^RlS+P(@11Db~ymE)D{-^Ph9J;RW15dql$43R|lEK z^eXWND}DHekA~{(a@{I-)529`=S*Yc#X7>xU6D`7>Qz(XZi?QW)p8lUT+U3F{r0do zO_tm;?dq&yldfGQ{Y`jFV31dG%}LzDG{}iH4{kI2kcpO!d{V{oAoqnkm({bsL)%*I zcDGT8nn)j-Z>lU0wQFC-3-q^Knv3~M;VFFS7oSeTtmR*-toW@0aCutr&<9iOS z*SzuiI*L2{9;wp1cbG?O|7bNVFWYd6V-yyrow2O+<9Dg|^RlDKTURGShg#N`(qwqW zbxD~`X{odt^3$7~m^6C^++4ro_H$2_ak^N5r_8&f$o=>)0ADAY=mR_Su#|U#JoE{!W4~e-+&a~}& zm+@iS31z0DtsXpcJY^35)adU@p>JwFbf>R-S;Ut<2%PrZFjdcq`Hv`xM}}7EuPVEk zHOM6bd~c*{tK9K=I_^!^jP9FmQh(6#O&nVG0*VMM|GV!H~8BuW&5g|jD01|%gqW^vFNbRodx3UGm=&Ebh-7ft2ExxeuXow z_kb>6)uL?{VRBv+A8c=QZMZyWuR>g_Pq>#yDuC!!ap~kpilY1v7#dHEfwlbl9 zN8*u5RU-Txy?R_@lESvj#nbKDOKb)sZ^q?x#Zh z*KLKmjXvu|T1`l@wI5+Mi+0&>UY`|j8t}{`GFr%OqE#)m9s_(uZPV1iy-QWna^~2| zv8p~+WqGyQMVb!NguO{@2jU|>rWYM0sf1eAgA4qWaUPAemWdkN2NUH3+FqTga}B>{ zwhW+6ba}E^#5|vi@_*eVmgx=Udam>R71!9UQ!~3 z`l!!E8T?K7mzyEt)^ z`hk-hn?B=3V?N$uCHxV=BV;vwj;?87f1rpfQpp~tPLMMc9c|ig782)@C@B>zQI=D2 zu2n5`f_8E4xyCCT>A0wNxmma3@4fl>?EB~2@x#Wp?;d^j=D{%7OHYPTCcq|uN!4cp z12IRzAj8DA6D%PzUY2Hh792uC8L>9qk2MUYCdHA{rUZNtef5vI0a^xVtuJ7-p{W6Y z4Xie^Z&!yzGz!OPCpPF0SC?RN9U3eeDrXR}1eS3x)n5i-NV1S5!;^8+r-y?V^w5VM zfbO?Pq>CofM{(MvJ5uJ8z13i%*Qoa=0CkmR=UD>B3>}MSZ2)U05J481@J7G^O+M6U zR}Ys7(8p*5A|q6?XopRylp$0Ir9c7%6)sj$4vb>{<2E!nfFMIfNV&pQU>GbOA5}PQ zy)6f`6Ei%S350SW&pMbEqTIvCA{5%r2oBDj zBs>5Bv$X#oqQRwJb10OoI*9*j^z% z)>m;THQDbUBquVlt8?h)&^EIDCL{w{a^dywVL*m_`iQ1>gK~=VAJ+KLJCU+aCdcmU zH2=|EW@gY)^AXiiq(Uw)8Ol^FSW5k@MDUDYNq0P@fm~M)BOejkPloF!Ex$hj9F^e; zv7KuEBI*rUjnBa#N&t`s3lTK(j;Bx?AfCq47g{hui2fEen#O=XDN09}tv>aqMsq5X zKq#=wOhF9~0Cd1e7_BG*Ek}V#Ly{I~m%)M#50X=g1P4>W3}s4=B>!9d^YCPB$f)Qo zWiGOZspXGUjYxvDm*DH;|6qL}Oh2>|qq0uFBYWL*XB0Jkwk1yTqgkHx^Q>XK;eCqS z(8}*5Yyqoy>Jll~wyoM);LR{8cht0M`NKF0<0?CE-cj?k*jeg+5g^`O`_v}%-qraMzmb4{><35jaMfL?&Gw(24?VYKN>FAMYLuD#&N(B~G3}*_pcofq? zC7%8~v)KiDTi%)D3<@^htWE$puiuyYN#~w$kMv!MFTpj4rb6dIx8nLIw^-m{fte+| zsMqr9*;NUL6bm-pG9SO|?faWYEN2idZU85CB+~|Qua?2lQArfJ8w@L%yJcJ$nt=1Ci$IAuoY$-EBi_edu$^|2 z_kP^s%luGxLzMYfU@P4Rw47<^5jHqR&Qd2y*i}U7+*xfK&M>Ma@WZf`_kAVx)8<9^ zB)NAl(w7pw1+T&;1&30)ayIYDIPUcNxJT)5mlmq@bM6T@INqbncY%|2Nvf<>-+Gi0 zA|w1bY^nrPW!3Xgh_YOSoMwGSP~_+;LZVhC@jNe;hWdzUvKkXbrDzHCEHoo9tx zt*=)7cc1KnTCPub!uL>l;jLU~cZA;elEX%E!m8H&Z$PJ~Mc*dbC+LJ2w~a;G>2BY_ zdB(ql+19vUCeSsMdIzA{V<-qn(T2BI}xby}fS;@`a{kak8A=z5rgeszSsg3RhY zHDG7GFR*xa7S`eB&R#Fs=vd^q^=))ykxrHam1Zxx?QEF9(q#P=XQpO(6Vv@tG=1++ z^<9#DsPTiI$EoT}i!QFldfWQio44wTR2;|oy9S-7ol-3hOj=N2rQ&qf3H}N*6TkPy zwK1c+AV1X{ajo6kU?Ray`Vl_2f;m;o=_snHzaaV4c`cKylmnmRdT()&*iLHPdm{T6Un+G%FGylAt*!GvM-da9?2OI@CM8dTsd?-DXhrs4G1)7A#-%b{=PZG&4ESopP| zJRDzWVP%!(_N%4ho59O=%x&lL?~$tEYNSufdXnwS5_vu%j)yW7dRs|mYt4As2yDx6?l z2UV6De}lPX=@KeQS~rEd-YMN$kNt~!)g=hTuIBQQK>rTQiYD_P)I9^9qZekB@5&VdP2=tIA?rwopPV5CtiGR zzI}E+{BHW9&7ikV*^oBj9sZLn4jBJN$v_$ht`37yiiuq(CdtXPUxMjH5DYZT*l)iX zD_ISUh!KWt2@yaDVlNc<7ePpjxpgpT5K`q> z9?HLB6(%|z;P_$9`MjSUoK2*jp9-uBv4KBy!a;)r)F>U8TxsCdDX=cFgYvT&g|gnE ze6OGHltCtp=7*X|`5)9wK;Jz9*=;~_<52>LP(nC{C-O0Uz@!Q}Le($~C`w>qiBimA z6G*a#!Jt5dXqDMTt1CqyMCHSQjKM7Q@V$a`%c11LbmMx=2&VM2*aAYSp}8;SaL2jc zS7QjsRfe!Bqb5Kkpm>G`eq|j)7k36GjNm0(M6w7K(1U&fszmDeAJ?V=eWn!QdN@F$ z0#QOGKX`AeIRmtW0wd&K1kA{}UgNL;kOa6jYEhDaP(Zu?xI-~UgP=qy$ioipU9=x6{y9St{+yxUlCbf5D>Q3NcP}!hIU*tv^*iDA1q{yt zzi0Nf!24;QKG8KlO3UxQDIG2Meq7#X`N*x1a%B#}*@` z(5NB|;y#qh=L3K zNGZsGY2eVqA%sxm{^4eF;N%Aax?m*K_ZR&qRh*PST~Z9_hnu;$A3(_V&mC&y;C>Nt zU2Sdn=MFUvFBL<@Yc2f`RXmu`Q1bv-yPJY-*Up+vdYr z*gfS!VjM5*9Begnt9Y%?1`7*A+kEIdFXYU5A|hqnT6AVT_I*r8<3L&SJT;== zte)1aTH}0ZDk7rJvuh*lE>`>+nu2}8o1X=n`F@!$DUV^>pDXhszN*QwdUg0yB!5dz4;#PBE8rvAq{=w2^dF%A{|4Ss0v=TZuUg zxJJ}6;w=r_Ju%el1nyR{4Hmm%m z%cb7@pu>5L?p0;HrQ6#%j5YE}n?pvn#M{tr&a+GIC`?*OE|PB2=^LamtTXDsO<-+( z;2s!1P?u{BQ07Z{%OBxb`vekonLHypyIxFQ2>W*J`PI(h6}@&gdtNz~H7%W5y>Wi# zoL@1cpr!D+vHJW;SP7odL{9yDq#Y3nXNr?RPVao&R{SvxGxRam)_D$& z<7>m88o+uoq0MHEm})0?n%6qIcMMxQO#zov-5l*@Uf&_nRleIMptD+{wdQ0)TgrV%2KcM zP&}NR#lIR_2kcZU7we-m4>SCXZitIXNIAyzc1HbrJH>AoO4+(+Dqf#tA@U;+ipvjo zsmO=R>cMkC?D%}h-S*;2?WE26RMcpG;Xs?!NiG#6N1|;d$z6rs%C?VATkGiCa^`hJ zk^Mrb8@P9eO}VjtG1jgqBVdR#mfB(;A1;z&#UA{A`f%IkjMG_S0iyHgj7vFmyI z0jrBV$DC&_2dAXYqx)~IEz%6h$!iIo&qbm?m$2zlf%*JKn(ijW*7EE1En+M+N5U9g zwtY+qgCFj|iVY(kyXp3p`htIZGK4lmD^qglc5Xh0V+B7=HK$c-?4idS3%9Fsf0pIj zFgfDh60Bs0_;n7m+879*Pv~}Bkn$59rb-9<-$%<8parD*=c{Vr6Sr^${Ls*V75}m9 z754L>#131rCdMSxUx-CL5^?RnD~w(c?@BaH-juh`oAr*=#zvFpSeuhyOAfqWX01Aa z7hq8l*4_#ZuEIW8Hc$qtD|Naq64L4y`3U>mVgO4a9-Gc4a~vIaa*ramZd4-d({1PI z<-qgLo;(*4vugQo+oU9B%%8W4`Yx0=_PuY=k28^#+ltM{MLY1YO2p8{T6#=6j95Ug z@SSaWI;iXVg<>bu0UlzK%X0Nj*NrorTFUKBmX){G8Vchsq?r!Xaw6c^CtCTnkM!%g zjmNVsd*Z7jJSBZJE5QjJ-irL}^jOZSJEQ|%n7xtsi08@d)?}t%Shb!OnD{pBsJ$Me z&hA}bdyUqm*YR6MjL!ch4E^6qR#^W(1ai!*Z2u(;{qTG=&`{IRiUL<>pDCL0ML}33 zRY|Z5N6{b*K{P}`gq0vmBvs2*SP%qN>`ffZRX|V>0svL))wKQpgvg30ip-%YV!inE zqq#sK*yM2fOixxQPk7yYy=;Hpd}SJ69|^l9?;C^zj7V@z`d|9<$i!H^3lXd)~ngButPk!Z}GKA&L2snANF<$}hM`cf1~Qmt&`DnkGhEy= zCBaHniXSRwj!-aRhHzBq^E8Htv?`sqk!ZzXXB+5*DfE!)jM63arT=B_*B!CZP!9%E}c(0#PH6S6l|E9F4{w z0|5lM{N(+J%d5_WmN!VUl)~;Y0u7V*p0MaAFIDl7A>Z&J6i*#D!P9n*L+I?GrVD zRIr$^6u@(qiSg2v2$X{gc=%KkB4>NY=;jbWRD0DdRnC01LY2A^MWQ6kS27*O9@~dM z4&?rV~D7H9j zu@fJqVtqE0P;G^@^Gd$6#{J7sJ@6>EQU?`P)Lb6 z_NT<9MbY@8zjeoL>}?)-(9!slk{)%rKkd8TeLQSr7JWKxrX$tB*taatP9mbTY%GnTqb zn4QQTFeBGx;>J+JfHYW`+wK+ip#7X)G)I@|r>V8jS#QF7)}TV0Jk^K$8>bgr%%0&~ zr@iOqk+RQn`#LBMJ193s%wTe7?;DwXbLYZ_RnINztY`AcM;M^R)#K{WNzXL zyv~7?EN#5O>{w>AG_3XJZzXPAX?fW7TlHRgoBdZS*W>JxwiB=$yWk7C#kJe_+INOc z4eu_+yO#%z%;gHOtBaI^S*JzD;>8CeFu&Hqq^Ee~6*3n7hK<)HS7HA`R~b)qGeI+k z;aEFmZE~)6>2C~upQC=3(g|7tbGNm*`WpUSySEqbRe{)fZ1^yDt_P1F8KKJPN-bn{h)~mYbOB(N4R;sZMu6_44_0fEMx-^0^dIo@wT`W8RrT&5_Ny zWcDjf&E>#S+OE1MtINjG1Drit+RlfFdtoxx{WQVS+4OyX5xDV4Uc9gg9>fLr7DCpG zQgd+pV7o3BVWe1NdTf(#kCW}f41RjK^0v$73=COpDO*mEWrP_?SH+}8`lH@)pp`>4 zt70#r=*u?Ow0Z;$lXL5OBd{}qJWVZmBd?LxDR;P@F6~pGQ>;R_(bxO&dA~M#ns=O3 z6gD!$Vty@dnO~Cqtoq){+kEa!=Pit?N9P7-;j!kSS-Ebq>7{eJ`f-J_**oj99!PFr zFeCvIa%Mh0PjI9Whhv7G}a3**)GBT{sUVD9JiI?=xFPX)=`Nqr1 z-RjzoC9Rr`h^h9Nudjp&KO)m93t^TvE`p+2pqWY}I{_Dn%?=N#_4Xi}>N+MVCv#(veuW6`pcy{x6-#8Sg5|HH_{2MC97QQAHoQ;E9d`GsFl2|9Ys7H z!7bdcZ=)bq1H%PX;c(N#J7LkOFU3qnjW)Z6^J{wl^w1YV8s3H9E-+gw>FSBjga>!ruzW1MJp5kC8Yf~R4@N!3h_T*;{JKQ z`+uc)1WcSiw4eX|Vg<>O?1lc_UR;W#*)F+FV`A}V5`)=PX)SFOY2MIcvU@P=4z5W< zJhF2fjWoe{D#Z{f!ngxzO=>wZI5=z5ihkcWC{=6cW%J~%*DTk=Y?`z0o3qbp`sSO; zuBlb@ToJQ2pAp!Un2~}7(%#9WAIC-wAZkD#VSL;bAq&p;i6|L9PzG(tUs7)$OeTI3 zjRR)P8gLS@7$_BnP5^)*lY}NAm>5W%CG!?^M4wJ5onaJ&MnGOI{w!31P2c^Ek&xYu z4N{??97sXLN4Ok`nBQ<_g3sz8GU z1-6D&8K@hrUkNz4fE>I=GqkTE91f`jGWBNRjhPERc^d%So5=!EN|#XJ*v)cjFq#FN ziRlXlsU2>|#~u&6Tc7Dp0|@QE4R9*$u%b?5aB>1l2g(>_%OrCI>dx2=fR!-~a)F{sm<(Gt*qe!$kU&^q&%avH*v|Dlzp$28%wB;2%>g3%4FO=VV!)aqMf_iU zGKHT5kK+tyfgsSNL2fZShc`-icT;JZ z*)rf1ANASaSC#~5j7HImHHEDsKN1Rf)Knb_Pn{o^Y z@EFDo0NHF(*j1kzg~n*2_AdZLo*gg{%6TyuB+uA%X2_<*+y=6*`nc5>10G-<$6 zl26DNuR4o3eRTX2Xhry$b&MpP{E>ShDZZOWWA8ZNSX(w*@yBq}rl1Uj?Uk!7j<}B; z?l@X&Ocjt6?Do+4GG9f+hqoFKk$h$Q$7HlQOZ0}DUmE{dZc4-Vs`99a2rT*F zp2Htf*@TdwnRl#Hx0kceTVnEk=tGw6_l~txw)cw9dp~UOnFk9=g^Lbq#=PE~7MGeX z+5unGU~x_w@-^!pAy&rBXWo>|?V@%5b*{}@YT)#HHQYyfXBM-B7T=G?!g-{Ml|%uh zWnEIZO00`r`%1KuY68k?EX`-gTVF;;m+%b~Z4E5=V;^4}IrQa;SWOD$^ucXTS038d zS{zfeE$USBv5WdJBgznQI|KG#a4vRbsuyt`Xc^uR5Zy4vW_S}SoZD~8;Vw^ZrV?r2 z{)9MfNaVmvr0Ke`R=02)1twNhQ(-4rW!&EU4qt94QjUmf zusm@~qbZ85x$5%oJ2k|aps8AqaCOd>&4jyqf}RdecNwBV9_RF`a7RzMwt6lQ)#X}g z;cuh7sds+>B(?5~r0BNgWf(@hgx?Q!EP}cxI8?sX8%Dq;x#!{+CpW>BuI>)0B|+2a z@Q@v?sLWV`jaB9gm=fx%KhFD<&Uw5L7O8Vf!2Te$DHDz8VN>tv`K(dOT6&OiI?ZwA z@t9>IpZ9O}=jqcaGx8`bB(QJoWb)9Dw3G5ldYpX=lv`Fh_q{ylCt>}T`;)m?$FvkH zCo*NXX7Zthp)05kJ!adE#HzsOWJ_+6C(WrbLW#=ZZG`_Y8jS`!qDm-PwP4e3edj_c zo+az)Hr(@DvPXqWqV1dAaIKVrQ zw6Lw4C;%? zRprzZr;emc)=5DR#uiL7D!R$V5I?!z zF+*wj*9cVN+RZzwDRlzB__ctaVOd>MpPi_f&~tMjY0auv7A?x9aReE$j1_49^T+gN8yGE1v1x@Z&B5Z*v+Q z@}pGghGRpEN2`YJaHl}7k2)(X2ZBR0N73fpTgMIJe3mU@3WbjW^&h6zZseu!O_O(d z)(tE}@As>rJRB^wnD2agO5dZ0qe)k}b>@NPRIsq8xwW7(73`02-5vtR)XknE{uNe$2YG^c9|+@}?(9KPMhj5bVh+5GN;*5mNhH zE@IX;3=H?ryPztF%ixk_t)j~lFI!*tkg$@UgPKffbfDxTk#aYH7#cPG*07x7wuQJ4 ztycF%vq;_;PEfm^udmP|s$PMiXu1xUFsDJ_;sRIx%N|PJfrk-XE;0{Pe2!FE{cT&@ z)!N!acc8g*#8D}!VtRJgD;ORe5!C=I)X^x6v=M+rnYmvb9Pv1WKffMQef_5^ZqvRS z9JXvv*Oo4w%W%NaZjaEc6K$zYSa)%qIS|b~PjrARYLtYKEr#ca8BUD+_deTb^t_qSVbq(y?R_@f|Ep6dRIF-tC&j*1KP$v%_|gzZWAU1kBadw;X5pF|uOAD3MvIkfZTY zEcdZ18h0MLVLoJ?ms#A8vt@0~9rQMRxfQFp95On$)XII&-F-}aTP&}KCZFdVbQ&*TnhR%ik`r2t)xY0{ z9YyS8(c7dRbea|Vt#ku#DMCMg4HQhk{mtx(A{9#>s+{)a{oDJ!+O~n6qllk%X0-;% zbv2qVbIuND|Ez+-=Bv1!VF^AtCqnvx7qi9x5M_XJ@LIU2P8+!3{~=CLZq+y?88cCR ziG!I-p?GIW;JN7}+fZ%a%6RuQ?M5Wc8lA(s;P&OUF1Xg# zk^UQ0s7?R;cXOw`&86VrM?4NAnop!0CISo2#~*eO--FezR|M&JdcvN1jq_-g75tXh z?aIR5_kT(A|KDOHj{nW*{Lg8g?f>*z|IaipMkz%=L3t*!GCtwP%mLtF0~j?mujpeTz3<1(-375}Ck3T@ zhO0Q>gUK9L+*(f8<$8!I&~ z$l)d-cGH2{m3ekDA^n645F{V)F#%GrT_B`EHwY{t`VH&R6$sJS0DkYx&3;SY2r;tx z(Chp~uDxg4b;l)#7dDU)HUsb!7Zwyw2LllUbevkRV}~)v1qo2+&co{8uajX&YeRl$ zgU^r>BZB``NOm@any=B&KR^&PlAO+E1?-4lUe-oFxjX?aq>dmy$(9I5)acI0%!{?} z-^=v^KXwEyevt5bWyH_j3&GsC4+{%u4h|rqC`9i_vQfA)>jj_Xs05#ZA2MYK>;#nv z4XMvb{1zX+tLP#K5IYbG!tf3AVLs-^{Vhq}gOB0>rQu0BX=5AZ*AGFaQz( z{zr|pF7&q(FAPQCH#?y|%wANu8_*UAJ~~DqDLVi&5HK&%81f6W!NoHsanLjn|Ba(j z0oLc*#`5m@^dFU9FLfnIk>E-M{L6TeBZ?xwZhnG1AL`(g_jlw!o94%#An$b31Bq!g zV#atpl4o@a6^A3NAJi9!1K>&J$N%1D&n3w|8TB@ElUt`d%Pprvg-Ug?eb+iQ(|6J! zvZs(nzldl+-xCdXwAL379}RO~MsWY*Mtb5mhYy7$f=?d`KGwwrh%k?0WhKb?Kz2pb z5l08IZ}UR{l<7t27EkIgj&S*fZ^)PG55FciXpKM_2}T415XuHdjtxg52}U)~1{Vu% z$j{#xC4cZ?35d?zzZV_@V9hHB5QeH_D$AW`{z~r!Lb?Kf;~4Kw1}F8g*9dr=&6iYk z$AOJd>i!x2;?;Cod%}oMj#x16)YMRh^&FCNQI$nquZ`r7*{b2CM!~g*W_$jFsZP&q z>*HS{H^CP(lp1#~+vtFawy>m#^=9@|RN~gM4OGTd#UsW^YF{T_wsYW_Av(k; zl^-s=$$ZPUgxzvYr9_5F?f_tM5rSHlt1{nkSPcJ!u+?3OcUtKtK4iGROnV@>jC4M> zw1VDQGcP@bQ;cnKb11Sio6-Ao&Bp;QdxZQXGIGq7ZZpL5NUlP;sqbE)?XeGe3t1r& zi&bKYX;c_0FS=^K@8JCuUDlEHj@q(VqGsDI)Z(^%yMtZ+`-yQXlym|QjXmb_UT=HP z%gOa(@H9sW_%84hh}Ye8uTlEa9qSJ1wg9xK_cD}i7#hoPUWoDl6j3xbQn6?-I9mTwV9}M1DXY@s;vv|6nhH9L`8LHM?Y7w%QRGTb<4{R{|h855zooO+j70@@!{a- zd{$(>d~RxKZ*EoXbh&uf*Esklm9UgQgs%ZeG}(c_6@XD-&#*nkhDj#}+WHyUxvvl6w6h@0Ru)iK>2!%*dUG&6$u@s2R#ntWKNVD*j6g9H2ewNZ7t@%6AD z&4SBt4SEr!tw)zCI@+wTA*~c9JIu(H@E*LRwRE|Qu?vmokNR`7&9LpuP&!aiPr@#C zW3HE{-1x(Iw2QAY7M1vpLMLg1cebTxkVAh)s=;{PJ$p}SJIEM8Tvcw@X)eF$qu?ZxzSMMka0uX=P)dN#QB%%SQF=>P>LKA7pfA1Z?&vDeHeNPdBeo72cNgG=E`Zv(Kb`2tRuKb0qG4c@gMv07hpw!wG51#W!g9H5)s>QJh zi{sK0-}61Q9;(y=PpqAWRLVKj-~OzsU2heC^}bN?XgQx~^F;K2!NdQIWP&Y&Xe1ki)$#*%_&e^EqhaA&{z3&el`v z6Fj{#2M}}F^7Qr%VNuxC5mhx>xjUN=w-&WKg29w&y$H{w+wOrSB6pE)wg=OK{u=MT zbQ>0^`k@TD2&jmeR16b~3=D@{&fCN5O*)0OlZpoO#l_Me`vVOssjd3Xw>A>BjVztK z@4m$!9l|SB%(ffjsfeU=5)H~}ozJtOWA={45f)9`*U2-wm#K07di4!KD(YLi=|#n^ ziGfk~SKyitQo6KIBOHobaUd3A{&?STO8eTmfemTFycE7M&x1f`&4K0C-^NpWb@etC zfh?#mq0_`NZ-Da?yzOB$IN?Ak*fq8*PHT2|hk7$Z^M?{Y3kv&rtvBC}kyNfa&a%g} zTH{fp@U{MGHly9}RgtNhcFU__Uuvf6*w(vYo8G4PboUm~^ywjBDEB$B&R_P;ON}3< z-+3s!t>l7Sf@u!fsw`zG7LZPIL&9CWCzmTnsmLwXl@@Fz^c8EiiX)ucxc5qYJZ?j{jR6R>O5r@pPl&D*{%}Cnk%rvY(iEK@(-(k-1Pwk zL{H80WxY-4{C!Swf*O6%GhUTryN!A~Eyrm&Iwc**WK54{QkzJqKDSdG-g&&HY;oxr zxCO@rpi^b3z#j2(f1j>p6{qXd`0u|zN$OslD{^>>I=?5U$9AN6n*UX38RP40dJIpd z^R?;uq2(ei5;A!ublHR;?#q^mcQQ@)T;9D$SU-5k#P18cD zS1?QOxeP%AOpJnARUb>R@=)4rS{P^_9#2Yb8UD&@B+m#|qTk5_TJQgiHC&{^Q1oOq z&+(b6VtCc+ptO3lbRKcAv7oSNdWx?&W}y`7vL%+ywT_fmg~0iI6mJ$Bgr-OqR#~6Z zTMn)=w;p%nZN}n9vbg!LEp7 zvLkW^0=>CF|!A7TtkvOsvr+hu&3sgsaMfhJH1OM+nrT>KJXJldjSDz9S^S?a?Y}RAQ zC#pWnAQBj0$AlT41;%kFy5?wiZ*e3-t7*}EmoDQLz+h7P;A6@v(EkPk1RQO3#H&45W05avMH$))(+`4$QZadC^N-loC z0-;*@mGMX?Vh?dB^9Y;(J!xTYb3uJ5j<63z(0&+4-=uCD;MgF$m=D0?1&q54#S)5aI`lkZ+1mefdOb@MwQXPKFXBn9N?^g`{c43@xfa z17Q_Fr-BK4@N(#WkYF(H2=f@yYxU@w@&E~g#vot<&8XjTAaR3olURE|Z+W<26*a&3 z{Q-~#9|dS_b`dcFXvq3V()x>T2Y}Q80kZc-{9j-=iC^L&_T`_108(*4ZU?@YZmoy` z3ji?#$3Z*s?ZA}w%$(u#1Rm^eU7%8@AOKwo;mMI9@=|#DzT6?eB#an71%2mn$iLwI zHgZRn3z-{;6s3R@G(L7k)c3$U~T}uml>;x1@1cplKcWh_W`NHhW8lQ zqX7LDMgqXh0sb!EzYG&AqJKk(er^n;NSFcQzf>{wh5OiDz2C5;Zj^(5w3h-$PUH#t zm2g1?7C_h&(t&Y^bwQ78oo26!N(67Pfcv-|PF`3voT)sY7a zp=|H~#J)Ad4ixoLPQs=zPT*q(f5K^gfd~Zp@3#;%HcJXX&F8x4{f|^8qlP-;O=bQH_ zT)fpJB3bgg-Vj{ggoVQK{$ouv(;?K4K(xYqW^RAU1aiO!uVirlo3ebW&9x@ZKKA&g zX{32sg`9QpQsTOIOzu9$lh;h|>R1Go9o<%lk4z`Er`qVPR?thTeztM7eD|%beE9bk zu{Iyo9#Q3XJHl4KEt`e>Yk6%`y6xScQX+64_qw#^NK%Z4_#W@GxQ;iXkp+d_1&pra zk)jdsSAoIcQC+p$@5Ie(0wN|$Ik&eyubfS|$yyAc)%?bcL>;~NVZnuN`l3i5JV>P1s$rZ_ZJY zBF6q3CBE5oH8tAO>vgp)W#L}!3hNrgaHPgwtF^6;-JQ^MHRSJBs(MV3Qf`YYAdqH)vmmR)bcgBBcjU_Hk{+47p>oDzuA(Q?}`=}z=sC6OE zxN)dt-as3YlFuaX%qeUyrTF|jYl}{r0(Hk4;G!|?v`a$JwL7oIk?NE`hF4+Iq;Y)t zdRyj5wX~uHGOmfunuGI;tPrcyQp)mZ&dvNYTNRPC!AC|?wP=u|MW|rWmBHRTN}H;x z%!Uh~#2v7B15SmCd52KeFV*i_eBG;)n@9$|{4Jw1*Ru8@yRmfUruMqju65m z2S|6hdUjseQ1jGY)|gpeF@Zq_ttU2s|J5N4LE4s zn2A{k1T~)G49m_ZA8S%?n|IkBX}R)2&B0M!Z8MXzKe90NL}$8|N-vIKd8@PvCLb3= z=Huncz~WZaxNO%_$Tsmnr>ZC!4ER^JsG5FO>YAp@2bbusVLAX4}e_&grv zin0y7#=V7&saq$EJ-{pjz9|j^CgvpmStt&^bY(-Tr+Y$lXlWl%#Wr?3hRVId-Ri^B zd(knsU(B;Gmc>DvB=APr}Ss-Vpa~tkO(u*%JXJI*167m znP;P-qX;RbjP-$uqnAICwk&EjU6~}AcW(XtAfb#&PRtu9?*$Al*t{DHIMg&(SK0l! zBAwE(x%whr{UbGR+&GFk?V>I=;mfo}J~K?EYMOg$yc2l+5d@RMSTT-4cTqit%Q=^^ zR=RcH=TECi;PEb!waUN5N6wsF%}(NSacJGVPdBYo=!*n+tjf1ro(%t%sCOLG;}9fh z-;KJjwKfkVJ%twfjK;A#>$l{4nw@TZGxf?E};B@zTbiQ5Xeawb_#h4kG}7nPD|X_-4LE&Xiu z6PG0}GR&q+0n_`f9jtAbmEIqu+zvX~4BFfLBU29Zdtk-nmuzH4mX5UoO_ZZl)%a;z ziXglp!uY2w3yWP=BEVPspw?D}^aoq|6@QZ@%a*gVP2%DS&laU{Y6Qi@jw#hSWx4Bq zMMq}tm6_BwSLzBydh&T*zMvH?p_SatT}+_~)pYuCww4r5=;X3UOJZjM^*ok2$OOtw6d3Lzqa2X*N$b5u2DN>5=lQA-66Zx=CsV z>V-2lf{XbisxpXV(QyvNb=Bq5@Yv`kdpa>SOcF8;GPdXlUkc8L${MW^o-iJYt#!fn z#hrW~g7qf_i;Z})qBKgH>~xttPF){`ITT&K1~~(f0}+#1Aq53 zVX$Bkq;_QK{Em_lp|ih1p9jkHJxrht(Kktnhrf6_-P|82Jb2<>{Yb?mxkPf8IN?n3 zk|#uO6M9Oj53o>MPfv0}6C2&KGQ)02N@IboxmG%CA(m0=2(yWHX$wr)yAsKXQ)x}} z>(cvWYd}R;t;QC~7lVikQm<_S4UXfOo9$Lx;%w)fahMp7MPbXyYnLM%`&Z>yPFxo{ zkx`Q$b4r#+lfM5I$s-eZns`bze9LycHL5CvwrAO{k^9f`^BP9Rj$g&bKGCK$e#wyC@!;tv5a zze|ZPp2kNA#O?K~7PPhrC(&wz^qxQN5*beI1Rn}U0f3H}8eKu09F?|}ThU*#C-O_*fZ3X>|FK>udBBNBGfBXgs`sHak!MhyBS&B{Fo z4jW<-NAsi96&ocTO%cIoM?4wr*oT5-1pJ4MMO_R+&|*aIL^0S7uR~E{1kqPZ*w$)5 zh{jC~(+5E!(z1l2f-jh$O>AU&FUY_R6q)l68!N+Hv?zh;gPvJC*sZl2Jf3zOJQsuj zAkt#0?#KZU5e~jdetb501(8CGG@y2xrdK$_oP!qJ&|f;-T%JL&n|w~P4;KU;3Z)?Y z%Vx(Y5{Stk0tFEtJw6ZpP{;*64F0yLOM;(Vo@E7cjsOBI?keL8iWr=tpF!gQ{0BZ2 z0uVA&0xQd#sVZZi6T`{1ZwU_zh&dSq2qOT$v*O@?w9mZ$A%4eY5EewAuB{KuWYcO5 zw~qQ-owb$*sh8Sihd6d!J{;im$$Ma%-+QRz>bbH+eeQEJ(>;pPYDjU8E{p=fyLD#uhZba=%AUminYh z(^fgOD{$D!WNcv_&*U<9RoR4xpOx0tKf_{iYiU(`?F)E&$4X;Pt;^7j9NPh} zF+WIuUG*Q1Ty1L5%0i4)aU(ai^=~=hZHjQzR9Dv19qJqHQm0Pl$He$e!n*%)9eAcS zeScHD9gJCid}s6JK%LKP#5H9pU{MzBzO65Rs4vNIP3PV3ply7^v(q@`IuG;TY6p5R zXcp)qqp5A_Dcv@g$mnU#H~CmT@I?rfi%!WeJ>K>?Q9f&*@9cGqTTqcLX20gNMseOK zToqAZ_LSGFb2_MY#F6mcG~V3W@VHmVk#5Fh`gdcuhAPLznYG}u!GRLaOD+KGe4S-gBF>A*Hji1Bu0O z^Y`5Wr9GZ9bJ~bsPi5%#q_?+*Y%AuZQDQ%^fiFOQVkb-s8&lkjvQy z;97TOkCUxqe&P8(HOpHEd1b$R9#0~AZ0U+So@0IF!{>Rr%R;2%Z0+{1RkXQPK;V1v z0OTXZ6x%q?y4`nMep^dd$Z&r)5Snop_0RZ$?nUG4!AmZs(YB)dp{j-1Q$p`Tc{MLhCQSN5z!lxlx2)t2)>m(~ky)0<`@h~L;>w*{4Zk1xUHqXU zq4_qw_qR`@?_If`n3CTY!mjYHrZG~jWvII_?48AOkL%RS4H|u997PA`PDYp75y z<=m}g4A0TqW|RiG{zf0ez}Qu3U5#Eixz#PhAjkNf-@6p6u{>XL-7|;xqOo(^wbsVH zu8sll^+d9vd;=cjGUff(gu?OPuvY!2gu=k`ukCXN4vzl<;{PY1uvV+W(lpVgG@-ny z4GjzsGsP2w$WIJFpi0o==i!G;62p)(*)#g{!{oh`kw=h^6Fw@)N6Hm(GgHRyMYcpN zs^6%L3sBA`4=qQuBEeT)<>WMY{x5q~*Y=OS3QkY5h?*|wM|}k%Tc^)@0GBkUsZDkO zNQ)Jc4hPqVUMwiqDIH2e{$~hc!<+p|sDLrTXMpS}ip)YinI4+Gb!~s0UfsEb|NS)ltWbW54^^S<8Aw36#=ph4p zK4B6ha}*CWh&;r5Eaa91>7odgxgx&pKhy+u`pE#02aH;P;>^}(*js^1z-EG;hU)xXL0ZO<~62`213Jw@V1!gtU z7&-j3k1-7Phuuk?8}A$cfR6yAM$#D!KK_?{hZ-*K?G>aQ#t}E%!xPCv-wy!#sQ(}A zQ!8Xp?~83lEqulYBYFXWb~XHs0j=Js<36NcCKVp)Z@>%6C~QGVCa-T{t7{bJ7d{bLybmS; zM`6xn%il0Dx{?0Geqj5OP|yI-o&Ispy!P?Z(SH!XxIVnC$vIa_3)uuJKVrG(`#4(R zG`^CggU7ml7h4)Lr>O>=DhLNC+@<}4Oc44FLZX5mBMCuc>h2DZ5;t=M9Tf1q*Y^-y zIjB&D+S*>X*=lP?Ot~sv%}=lz3{U+CBLM0 z!CF=)k>d30yhqLqyVeDx2gxDilP%V+tf8BXoC`bWt)cU=A|q{@=Xo(TYsy8fACou3 zt!M!mgi<&gX~KbY`3(i!^2#XM$j&>lJJ0+acyWIVQp*=5J3xgLth@bi8*zB0 zNYB!?E%UZfuk*uwYmO_N0@ObTy3K}wgq4I=H&q6SyBW10CD`3sCuWCQu8&Gh+VjKGqu0{hGFklZLCIw^?Y|wohN6k=jxzI!-ZUJSc&-~v zXT-pAPYhJQUL}1DkiUj}-Id$LBo|SHR3}$sqYHKLkZo~h%EoCLecj2aiVsL-aky0( z>R$D&_)$4{7F{UoIt%w>riM;iE@W${Ab=@W)ph$KkeC>4wX^eAkUeJ~WcKzLi!rV4@Z&>zZ^VeOd+)Sgw|)MRppE)O4@|e9QmqN>3xh#lKHUZBxcjwJN zJ);?Id$0UGQ#m;2{ybUMv85?)mcA}yE*+Hq#>BfjSfQnIm`?aC4t4=NKABvV8m90q zpJgqGwCn#}-Gi+Yp$<)LK;y z?<4%D`gAUP#z?^#_j2atlB$N93b&>ObmY4e&7sJ*=Dg?r%F*Ih>0b7-rbSQJr?gL5 zUMpeP!lhS>5v_05A8VcRvHf>FSDTy6o?0%wIGxQ~W1F1U2JzvPu4kt6$|#Op_w($) zHE2$$b`&~xy$km<{5EVS#RaGK@5#ySk&WwgDi8RS+@-8t+M`G7ma+O}ciklI6}`s3 zq$Shuz~^UnF638x-A3=v$I*v8Gb!H2DW15o&@)u$_b|}X;vYq(!mKD?#{^a8IuV~n zyQsT$KSmePew4JVH+{`&?Pk@-aqd%7G2OEn{v4|Ul*#6=d55;pE?i?R^e-MXl4w!@%v;zT9|yp3y@ ziV0W;sDbuvd0ue2&P+?xk#f4=7U`<7(HBEW07W+iK`l8LO{y+8p}>FY%SXojZDzRni$^_OelLHs1RlnvP5R z#Xl{t6TJ6H#cT7bQtSB$uS*rW47RlMCE0ZqR5aywe1qeGVw*aR%x2F^(ra`xHrpiH zOBqoQzKHbaSOzY=nq5w37VS{UM9B=#mg4NNvy8E8@4?X;i*Y_&J!csVmYeyNG_S>V z{Z&cl^#W*`JG0vb>`}1}rA*NwuH~8AQ9Yao_w=<3uGjY8m0fP9ZwWo0Sa|Os|5A75 z_-}Z8SpTE+lHva%y`*QN|3AlT%1FnksF4v0@nQ;PD6cBD)THTxc2!x0Th%Cc?>QU5DA(d9mQE+Tj7^ca|4+cs97&i=4fAT;5 z5F0M+SAqqI5iYHgKhYtGus1Xe#0x-c-65!%n|6mE-WOdg4ih50koZQBNKip>n%he4I2P^|aHNoQ_*`A!4f%G+b`y}GVE|Zn1cC860eBc8Zhc%)1xYPdtwg~R$U0?4 z)EGr;=!^+MYEC#M|wJzRQ0&o93kcS(~kPK`+&sQ$wk5O<9BqA3Lr zhi@f3LfQ~R^apC*Hsreoh+m{XjxTFqLV(@F?+xOh_alG+0_uhzjFoL*_9N|q2{DHV zB?4ik9}yizx_RO6ZSp_u@^inrBPdc2@U}*OQyu_LMF0p14V&ZQl(uzyIl^qx^qXVx zhJZ>e=>2IM3n;$*gIj_Q(T_R$g0Avb{d@7l80fmG0hvPmgA&olkj6y=Xh??{H9;{6 z-O2$>@a?!qC;?Iu_IiCYQzvkupL;Y7g7Ags{sDH}i&M`{9uAbqx$7`AYEGXF5h;qm zASy8c7R+$~*Nc#(tj|tKs+@Lo445W}l>+Ol&O&7G>mk%{uUrhI4Jg%%;FVM+Oj109 z3oj^SB9HxAStrt(#80mc$Igv~Paq)y16=SpwR_8t4RbOOCi)APrq^^AM1&4dk_F}0 zN#OKsm>4;LHA@WSD=W5)9|L8ZvP^>0qd@Uoa;Oc4=z{2+x$^kSCniCYl%_NN=}HA- zX-#38j@bkq#(}VnLKCq&#Pu6Z+II4z_FLWm99tHQ1|aP51Wxt9mggIrz)p; z0;v_F#OY$__Ldiw;O**np|vYF-byn$u|bUq)%~~P)RGgFXJtCDrUKlq^34Uz4JxlB z9QFOy^RdiLxDsc|9Nx^A{m#$QJJ~GqVcEgA&W@|OW@Rdpw~nReOf5aQmyphJnl^+? z^#h}C*Xe9OA+XTOtZ5aw#`Qms{qA07pX+NOO?5odwz7gfSFkN$M&#a*9a z1DPCau<&FlHK7n)4ckPhex=!V%^OonX(a1qV|NsdZ?XFy7G$^;6tr`B@23N=(WcL6 zY6UWFLNjWr)})W!-!l>WSbOWX_>66DRdE*AUmfT4S;DM}yg9~PRhQMg#43?<1BN9Ys0tJ)K9LQqi4ID?SQ;4YO3tF_j9hMbWZm37+Gcv z!kMh01}j9PJtD#3M+UHgxnG)Nr4?JPzx>|#d|0$!mNY%D@G0kRWK|d&SG;I`Lg4K{ z*ZKRXnrt&~wvPNR`Tci+>RRO2)iu?y9ZuqV^#(rgx6oS0sY^lPh|R>U%PW42pNAG`s_siXoeU8u9}4+7CCw?v&Ul3mb44 zquHZjmhXW_YdBB%x9#-OX*%GtM8WpOxmD%??yWc>BS z5Su4-gesRVzqDN~?Xl-_W$dn{oNbj<=;<4yoiFYC9M!Jsignx;&p1l9*J<;`Mx|rz zreFO&_Ntq5kSg{*Uj3LVSD~<5XZzF8ZWMdXd8}K#*k&80pnKS<*PS`1A5!|1rWt)~ zthc&iaAu)%vGMmHea-7HZ(SSJ&o(c%PV7B!Q|)s~p;c#SaMr{F?GyVwB2kXdw4W^j zW!omfAe`=ZT7m+H@HaBCcwv?+>H&@jQE=;pb_4eau4rte80bNZ@4Xda6XuRx@ zS@N{)gAH3w1a^+^fxDc!g;Z&{@>3`N#oSn2(z4WBcKun#^;Iy49jjXEH?zg-I1Wu_ z&i9W7t<}!Q^|}WzUJ(NOU#=7X9ka-P{6Asl_}^jUKdot)nE#7egk9MkOHQww5v8h0 z$7P^LzNjtBV3I@!9hQgzvqcz#$!vIzU5v?$zEbP7pa?O(5=uw-L{JMtVobORSx=IJ za??d^%~HzSOx(tMyJ6yMuh9E=t)u`8#W5d5q1=d)}aBis?C7J>|0mN3ijwI6XOMg)M=&zjwdHPit5 zgc;KjB$T2N9u$ho59=qJP#F_L)1p?Z;@87x0aFjD?j%Cuv#zH|bFVo12NH6FAOO>; zV+R;INSBc6C$;s$rpkY1>_i$1s?;XogA#^d;hqJ;2pD4~ssm>k6hIc3{QUa>)41c7 z#Ptc`LliLhnLxyZI+iKhK>cAAR*gZwb5&2nLe{b{17^ye`Ebb)us?IBdO`lWE- zet{BHX$46d8ui!x#zz@T{!L979F9gIN)h1)SS{aI$KVU=Wf70h8HaC(4Klz8LgGCQ z2M9@x)0`Ud8x#>#b(WkN6U2P&d_fJLK^!tC{J{u?7jTdf-D2q=72d9o4 zUI^GY3=S|%v=9(A5!SFrdjD7^P7iPn0wQ}F%msckW=fwS&CkRNoZ%F}a27Z;9th#C z&S32r(@Qx55CW4_^x_1fEq+(aa?ujByYe(vA6ErVSSIb?M4XAIA>gob;zFpn^x z`YndLJBw`{O2bQ=Kx7E6O#01?DLf`rpdSv;Cfp9qk9JlI% z%te~{s}!Ejy?fMlCdy4#;EVH(LRW7bH+j zG!N=Ng#r8FbK9fumgWBgrA8hIC&AYa9s%0ROKjZpKau8F$%n`sIuQ@3 z?s_fMQIzX5`|v4|XHlCT-2ALvmp1F^er$DG!w%~)(QXj^hP(fpe|T;iVMo)BJl}F` zyAsTSGaT(Rg+zvP%C!$^>E@j%YvQ|n;4shgK|5ct-fdBFfBV<=_4?C;q9JAp?e7w< zfMww(`1)(M59gS|7f!a$`nTJ0-JP?~+~rA-psT0aoA32*)7*9+k1`n1e}=}=Q}Y7{sbT|2eTi`Y9I=t04+;)KJ5+j9Sv;?Hy7$r`sJ zlHsn6_4G}vn;Y3m$(yf3UBycQvOb>ddfywh>NQTf{AvbLqM~_xmqr^`jWhP__AJK4 z7H}rSal5=1|Gf-}DsbC26Y|L=ne><{ua}!tID5P(STAQ>HIoE19i!UWCQwTAu1vL$Mquq^%e`H;e4t6^R-#6>1 zw}akFo@*4hcQT*5xWW%)S{){KTNkh!3P|KMvQd=8V=7p+L`rYz>`{u_g!l67hnnk%`K$u?Ev9L;CS&ME>xO^hnxM4Gi(qv+zP;Z{!0Q&wN6g>x6~ zgr61OGSaX_m|&8*sK)mZ`zhM)tg&2Oz7Na#w_3)#N@h6J3iYs-zPFxoo?qs=iB7U9 z)5T`N`ih(W1STHePG>h|GG_=3vpk)1MJ+F#E-!Lx@POW-%RA@rOdqnL2Vc$MWivj0 zJ;}tlG8Zb|Jth(%HK6=L`envf?paYWjm||MI1t@%=b4j;mTkR(bDOVYW-9AMMCY5b z73qehc$4KGYn&k5@)`Nd-m6V@VBB zvOReM9RrmG-$^pLPD7~FEfsT}3qOHgjCJC@YVZbI|2j`LNA;G)>5+gHp{>>x)P*jF zt4wpgmVdILnzU~CKtDU7Zx$xuLUPx|>FxZsaKP9t>oZtob*@H-lSZt-`ck|!gnQ@1 zbSdOaI8)WgviFx@HDw%Q^RS-pSzc6PK5gBanN)7y_4!p1l`g#fI6E`P;lXr7H4|{O zp0Hs5Lo?$2(TL6&ygwrJXwwr$&HrES|;mA387O53(8ZQHhO zXWsnR*=O&6on3duIw#JJxDPYt!~9-mi$2zcUIPMKP|8IRc|7i(4Hp1 zL==D-Hf#ZZ(y?b&>CV+J*eY#g%*C=PZKpDUoFZXCQUJEkP4#X{?VD6_X4WGZFxB(C3=qb^fcV!C_z zxp}^SyRni^9PHTR;AP)5zlh8ajl+w39b=92?7U86&q&j0wS#5R?y|VpACc~9zuCcCMi6+^>_38x}=;SQDnNrdHG_T-MsR`l(JMqG0z zHft13E6+@rZB}!L=ki@$tOF!;`2waXX4&rpzNUc}?Yc#RTqKlq1|A+J=_2l0bqA-LDq5uPi7P4&Ol2NaAKz@` zQqBIL%>H*0L;kMJn7&3B{==&1pOhIRBg6l4@dK?U<%+dAU+_xH`eKAWFM??c!5E7#ZjeUO6?lv4iXKJ;=y!|K#s+1>OavXjsx zeM13BxrdSB#^h1zk%Kt-5J~trQCfZh&-fl&LEym90BITwYARApT|W5XKd@<)1@qAf zK`X`O@Ci{A?y&77l95ruAgJ`Zm_U$7eblL$D4+FG0Juto18#iKdXV&+(b0lp=~E%? zNS^9DNuLN)in!(YElC{NC2he6GQhsAOb4_K#)=l8R0KsCa(v!mB$SPLMVuw=P>~b%u9$~r)m#LfeE!B z#0=pFnBPX)!5ZVgx0}p<#3kQk-pWOq4-USZ=C<%=! z=aDktOe5jN0}~4m<_|iiAH)%EK3orwXmU^u3fv_%`^``s7VUkG{;mZK3uy}=XfPy} zi&MtN;|c701EwFHZY5{zJHKnEiJ)jn~rRjW4I|pWB;|sTag( z;bMb&dWV4QmBi>sh{aTPjo{P^i+vf*0>$iSUuhu&FZg+$m`l4)iJwEe9Gu{F4B^st z{p6FdNMU56{v+vAa$jj7keOym$%YpV=yNg_EB)YHJZE z@5$qK^SLemN=xofY^SPzB?wsg(SB99P!rQ$>t18E06l3;iX-;9<&>Vh7pLU+>qRGb z`YV}Lv@sG}C;z9^xQhjp5lEf0bh~=hf`xqCIofUVqJbvWiG(fmZ=1P%#a(y%AU7M? zV@$d2xjdaX*4BxAzqI^s*6BC&UeWgQ3a!mm-YXX*hgF&7luiZRb>!EeBJ$d$PiIqm&y6*C)xi#yNGv*5=l>QH!jf*hKb0R^Ha^q1V?egwB zT4L=K?R9M`rY=#|{(8@=!OpS@lTmxO?@zP)w#a3#&QEk5`yJkzdwM4J8iQk%DuL;@ zKj`BXTvuXmr3&;Ix%75%PmfAdL;Q0`nNZ@#R-oewOO=d~snIz->0fq$BiJ1)1>ZMH zP#67YL>N|XuYD>j$u#D}aZj|u#;-xAOO8U*rCr10FE@3l#g{xvnR(apj7nsX#|2m` zZUoI$ziT;WmI{e_E5>^VjUM|yHhwT?9UqrOUfN(5uWaWqv^|%O-9@UiXjDp1PHVYv zoC``niw~qmV@)o;o>ej3D6JP18rVCTd+kysJa8_po6fJm1(xtmtDf?Hq}*kSkG=ps^0?UF6zu`5{)6w65NwW|=bOIV&S`{*b@Tu^`i_ z!=RegmydWa>CAj=Ri#i?`s5kTdiEi@;ZY#&eYC)#U$lL;=_MCQk=d|Kta+}kJFfWZ z9=rd80_=0m^!?4a>=U}5b9Iq)P7On$szUd~^ybXaPnvRiUFEB#!QNxz6AY5g>McF76*6lZ9k9r=0eDy!Wr?m97hol9Bf zOb1TN_4~ZdMx6+692X{_A{H^Jxp}L%M_%Wc2>E=uECuLJ?*UK6FxOTy`7U2rtKPaE z0;C6X50GCSHnLI(r_AIdwMtAU%;11kB=XE+D>|@g1-+eKl@x|E511IC|i^I(8)qv%o@O<$Q0_ zj_ag2RvR?ea7y_*e2LdWITv@<&%ek}EXtzsq%fs4wFqrr*rv735mk3`#9q!&!r1hV z-$jz&Q509?$5Bw}dH$icw0YA7VHNAS^g)K^jhU#%3#Et;9@Bg3D`_8i1PGNjX>q=ChU>q z%{D|g;c+Hr-qo%yOV6-x#r=mqmnfc8-_DUcU!4v1pf^3Mlo^$<^vBg3nspVKm`xr# zGD0PFIGw(c1nQ!PjoIDlxfQrOE-V^_c~SnzJi~I(VsCHQ_VJqO8R{z~&^GwYz_KSI zL$j7-ORl33`pmZ-NK3J8pR>mSN<{Q_<7r4y`5a9l+U{NiORYS{lW}T&lQazuP4)6S z_Fr6XH?ACQ3Vx@gWI{X*t=CcxBHA#&|Ap<{rsdc@_+hU36KU09-JX2lbOb7Lce||c zm*-gm?z31=^m(hhI4c%JAR1YW}*nie5_ozCigL&}n2poXj+q zjG>e)ZWAv_D?c!nk`jLmV^1MPTVhr>O{uBLi%?TbKt(-lP1T|du{lzpAz{^ELo(wW zv+~kmnsxY^d7D1^+R^#>`myo(`ijSc6VzJ6q6rVN3Mp5^5I{Bsz$O>)y;zFBPY7N{ zb`lujb{kG7eFIjFTo8oZn!tuCAjM!zz%+~@8Ep)V*3YvGo4=5mFhP_MkkD7J!v_h% zlt2&}6^P8m7N2@==gtbWTVF6>6lwM=ei*=m9#FU-6~hqj1WbU`r_cL`X-smEZA9ewGiA~leDwqKT18J;=r3-gJT<_O1K0VABt1TJ)&utEP z5Cn1lEHYH^St%(KMLQEA7XOOq<148l(Lnnx8 z{fV~oF=Xgix$1ZyGI)Ic&EBbZWuIoZv+;GL=d0~ zK=eT&@IDYAK-tNP0)0j1Z9Fvx&VnE?g2W1d7=&>p#8AW7a$`d=90>fxOrNgA{-JGT zQcr5Vfcp>tAmeO7=SPC}(7lUU%4L90^@ZX55!HIxmtyz`YD>T`UouI|C#3Fi)9k!1 z*X=7*ttQZ$%nXSOGK-H5%+r#s%dV%3Ts49n&>f54*lVL1$DXbS?)9%%WI9*avYuGJ z_(yy<&6DHZRvSOznVum=G52ryBES%q33BMud_&aRIP@o4dE{BC_lU^H;6q}Tv%u*K z|FP}{kEV(1(;dg4dTnyAN_tMYwm z?0p5JNH`PYE~_qgd0J_CGz=ayt7J5$;z?r_n+@W|s)Oe3dFgm$x-=3FP0B#a<;^nh zSwUP4#9=Iv*XczwDNY17-=<@ucR#;vKmY@i7p`i8h$(j*h1yxQ<>Qa=qZ3YZ#wrhy zfb!*DKM>HAlV3m;CFOfZW#`3DJF*EQ6=-Tc|VSi)HX zj6Z>H7Bb9QTcd8rGsBI=de^^Gv*cYh684wo*7_>`V7u9~QWWLndRJ&PA!!Q2RulT^ ztcv8Z%Aw)ecRt$WZXr@=Xu=i)#S)L5(6DbBenPoXV3oD(+0so_vN5Git0Q4zSZVBa zTRqn6HZ(svZT~WYQFXNPlA;P3zhh`VD>4*l?2N01HuKHV9w(SWyWfI6ZG8}?Cm!2o z73y|BI0>r4DIW#Rj8nUpF>ARdz`avp&V#%)H9k2d&eKM|UwdExh{ONveLRv<-t~;e z>xYg^QEOqtFSBu>8(G;v@l|_AC1JAgDp67JUBzcpY3*X7$Q+J$#oXJKNpt63-Lkol zWmdz-GxHUa`bW$~7W~8?T%_CQl+dlIr$)x=aW&1|H{wMmQVj}@`*Jaudz_@lzjcyP z?hI$<-7NW>w|Co)tqKUs?wBM+XKtdYqk??urM-)k{huvUKUh+jX0((-QK)}9aA1WN4C*texnku|i&*GuA% zMVZtX9`ikxCz9JNr#pA?YV6m}a7@A{z3{gu+fP0)(|Y;o%33m%LV9R~1(htFl105V zSI(Nwk@w19jJWn@S}k*HI{H6ham0pozNp1P8{C~19IKYwt43*2H@7LR9Qxkb9KmWR z-q+q7W@Rr;_#Q}X3$tv2fx$}|Y3{X9@T ztmwd=<;!*b{XFx4(^D`8; zW=zb@T0<|oH(6?)Fj%}5h1ws9X$3Ddn z`>F8vd?_Y;BRLczU`$cdyWKgRC3lz-ix@BWc1Fh)!=chL9gm`Ey}}Tkc|AXYbcH`s*9`2ahwCA=8})>cM(Lgg)LA$oerD^@_8{Dj=W+!fNx%y zj!w*qO4g+;g%icbWk%{7Qf~SA90d*qN}8I(>DNn&3`NX@W}_OdmsMFVIh$uo>e4?n z8^52RxlJ+{Wo3vu*krC4ZAfOEN>R`{TL0X<{?(E4j=Wg;P{Y>K+;KbNP5W?O-P#Ei z`cSA^lhiKxzS7dc7LSe3X9Lw4TW5uLCSaIrnKp~ZJ>DkHv}m>P_VKu?xVX?%b!F<% zmVb&}Hk4TAnz1mG2zdmhl`yt8@PmeJxk^DHV1Gp7`5Jr?8I4CJavRTl&3vlELo{$; zwUJz9nu9{&V}2}(Wv#g)+)ywbC!Xp?`832Z@P?`qjS<#bxvcyp?=d*Mvuyr#|I{|^ zh*P!5&iQfLBoMpN@M-n6EvN&FtuEa=7u`G%w#g=dW}j-*`1j;#^W`fry$y{9qFM|2 zk9Q&!bzyOfvE;-c!{&L$-XDD53s%3ce?&CYSY9r058JkrhVZp%0phUlAGWKTQxl%6 zO_CPYMi~#O^-oV32o0DQN@lmUPuJyXbm$iI;9<5e{OsK<8{ghozoZhCz&=*3cx@$Z z@E7w`U80GldAA;3-P!0fEpY^_)X{hc^kf_^ODq04501iItK-CFvs94TN5C_NW%6ke zbB&{#rHS63RJgk?=;0wt+JVU_qx_zF%bjMdj}QF|nh}!@dx7WR zd!^Ju3IN6V5Q74y*Vyn9Vo)K<{k6cL#A;?MZ

G?ei zjiUn8J?)F{vyNjnPJ4BAsF!+Xg4*2ZcEJ-!KRlLD>*tBhMk1*;?DT#oR09 zk|RZam*u%vuJX&f93PV1uN#z>z?^Qd22-Skvs!S6CKRO!ZQL`DG;ZBXME zM+GJ=^TF9AmY;^;BrD35R+G7*w8~utwOZ}4%e_ii#kg8cf0f#+uMtn5Aop7e%sL>qSCw3(d54qBwR)W#)t*0l z3C`XpA%-}dgI}dlAGYfdD_Mwq+khe1L5=zVUe?2pgV;a@VU)^%K#b%kayK@-Y@@4= z@z;~XQJ11R2qkt=eq@3-5)u!xd>_T4reRrh^Ui#qE6nF@=9V;@Wz%YrI&7C%_p`DV zcZVjf%5?Lc)tb+bwk&SGSgozk>X948)2(hZx$|gwNTK+m;86Aw8u;rw>8M>IpVu$7 z@Abv=M@h$TDv=w`i>CHck;|M?bgAlf*mR{5k=h9Kqg*VUpGzIMP0FLBP3CU%W2><5 zR5rUu=Zz_=OoGs77x?S1s^VMCU(<=L#Vd_E6AZ z=$fF#20K5$;x~vm4K+>T9fQUu7weYs9;6FT(-dYP-+1z?h|4nGMRiSc3ny-m&d!tY zJ9>23eA;`4@i&)7I2F4@z^xD`ZLhZQFKqAuyLC6)mz|UAXH7cHUoHx*(aP{d^G?m( z>pJG*4`8i+|4yAxn>-(Pkzb5YlW#3QomPI&$z%zaf3NwiId(Gt_-U%YB=z+AQ-9bT zJTjwJMs5~}VqjxjWYA--l>cZLnfXIAu;g%x%k?W>c3D7}4QKf?_y};WpVfK8(hPJM zV0aat`EExsZx`36#9l?n9Th)-(O!r7-U-eMZFbmpo~AnR_BuSN3Jky>XDZQQg&#?5 z{Ej<2^i~eNHW)Re9;Vxgx3kjJCvjup)R=_}3r@f^TcWx(LpoJOOK=Ws!Vk&H%`CMj z2(5eQ$%hu)S}z|w8>?yDlZU9oqtPanNizj~-xmkYy{ug0($!(fJ$83q0ocIRqPYZX0bM@DJNV33hR^_G_|1 z@ctxLgl;fhz;_Q!KU-Fkt~M*$#WJUReE@d798D^U6RBv-7g4h+eNHRBv>%*SSM~zg zR{*pckw?Z5%}AKve6BNE(;d;`*~cN2w5B(>TLdJ#Oq`eCza4>G+5t3nGT&MT2p{a6 zov+T*p$S#1pH0jJ@@M#msDZx^OHEOJX zFb34|f*$Fi9M#~%L&j}A5^*+#0K;GNa_|NN45Aex`_$3T6I9z`_yEqq0k}Y2xCJCa z&ItnKA{jVjb!!Wc0O}}ohO}H@XY6dRg(}3+xpDYz7j8S@;Lmj6bqp56HJRi^_!{S|2pHUJJ zL*BBzNT*7N6nyYt$jq8VuZgu`aT5e1Ne>?|vSip-0JE&pprMOX*LB!yu`Vg$YRHpb z20tI7LHn>GaMGP@CSIX|)LQ2WltiqwPl%`8zF-1I#(e4;PR-9Fpi_R$0rtKOq1z3I^ zdrFMix=9?uR{qieqOZGgYtguX3_ST4Cw%9bMZ7&jv+k~4yUabC20dmuwu9k>I~nMUZR-X-J{LH#Q7`d*fV&V$ z?H_>?B#sFnEw@n_V5-sGcae4bU)=Kg>b_2W@@&yKH-6)0WbE*k!cb?xEwCNRn}Q7Ut*pHH-aVl!xHfzhA57|Z zNh9d3Q{%5|*2hX)`j0!0Bye6cs2kZzM1G@_kRx>{iD35zH|9Bjh?AzyQ^{1L+U8pb zQ&CkK2D~ntR?^)Wo9pWSt<|ku)g2|KBD1JH0w~g1_vAF;2ZkY+9tLNrCOjIG$L~nt>v&*_Mda3{qg9*mE$1qh$FP~UatBy|n z77K>B3n_`D*h}ikY0$bOy}PSv*0652wtmz72?V7_PM-5uJ|4=Gt8Tm{XtQn#a6S+W zIqOagjwx(uKFJAv^%0dx-tHCwtXMQ_vzj3WZLka9eAU)jR9XB$u&Pen7|6TljMWhC zkF8k04$)i*LAI!F>fP}It8=63;GHnyjiB~&S1WhW0N$-7tYV_4&v%RFo-6@*63ozZ zOl30)Nd>ovN(k^Zsup_zOKNvpqri=NET^mP`mdM0<=jIO5gLq>sD8AFTzF)@M!0Vg zCEoZb!PwO#XF@7Lrqh|Aio_T|hyvtWQB-G~T@D*#E1{X?cW(11X{*zBUM!a2eF|Jv z^Xl3`>=m|Ur?Tq=GnFbAJL}p00@D*WY-HLa3Bf-CJMnzrzjqq-hF*8`W0l6zbWM00 zr+9n&?6N=yj6SzE*-lIb>)A)-saYcZ&LaU|X*pw6)_Z3pDDqD<$34Tvf}+yL0d@~(AIvyDsbUjlysBYN_$ z?scn6AN7EJbv{D~tNeqE{d%Wk(U+sebA!EP9?%&gM<~3I1RtFF~CsoJt z-mCpvbhT(zDO8N8_Q4YtB$g#wwA2u17)MFaf#yhbv+RSC5}Z&Fh@I+cl@P(Z^y~^- zi(VZj(c=RV2Pn6Jyy}O*_(aXpt}Fv(P&;MNdoAgt_HOaZqn;vrz<6bQD0nA(T<;ve zy9CN-pl)pAmS6=z->vJe^KF&5W_{WB;d(M{*s-U%l4gylj+$iP zz=9Elm01!W0x)GdLm&AKwK8HBOz{PvUF$aOF_8hQRc;d^ukjW_a+vF0w`Kp#z;hemr1=WLX^MvRz{fU zR>m)ub7PpWRajGr&>g)3bnT3QrYv&EH_IpDr=MWCQ#}KE^gJ%jO8$I3SCMj!Nj<=# zg<59ruv*5lUnR|+A1bk-J~TZF#2BVp)<{fSph336WKmSb+2`GPQ?4832L&_QqNM!$ z%_$$db&0+KEr)qdT)!JHT4(5G$E$~}sgWNB?CE)Y9D>5T$K{qW5CB@e68vR#PNc$( z-r@NWE9Wt+{>DQf%?QXltPlbbeVS0av(PR{Wri=(ov52jxo>-Xf)=#DkJ&j`u`!zi zy(lKr9Dn^aV?a)IUf4|0YN;zB>!k-v%6()y&B@~F=?ZyXs!fa8OO=QtZ7JSLmS!7% zDPL2Wd0h{<((`LtaR(imoBefOac205G1GA<0VSkmc2W%Lb~ic|4ytKD^|9#Gr-$cs zr!H&mi+SUhJ;P+dyHSnVYj7|)!zfJu*)|6`jF%ZmThsl{z549pEfXQPyYi%p_U$DD zA&dkgsj#Gc&#?uGf59$GS9vEy1cV4;kc3-FM2Rkd@C#M&RGYt91Z}67z*|Q~Q`xOCk2yTo+W_#}zV>JDmltK5 zFcFmy;KmbqlL>lxIb0>~}otONhXPw5D zz0K@)e`!WKTe{Kx&M^~!?Z$m@d&2#d%1L9>CRI)q-9cpf+^E+AZN7}ZaFsI}UMQ~- zWH%R1K|iy>FEDE0dYL$dbUn-~`Ac);``@-zr(UcW`l$3u-{e@0kGrIDauB7k^RUWy zW2en!wOg=U5OU|tVcL#is0ehc1}h%V;6;ci*jywD$AiNeN$kkbR}HrXWeDEEtNFbn z@cb>7JIu4CE2>VO9au#TNQZ{*WQ>nv0nOl*jah9%K%V7mx_?^@-?A>D+x3JFd3nq? z^58NvMqQy}x=}+0JVLlOk1-M|8q~{NlV<^fc)LG)Z0ZZzHn!Z)8*kFJ+LoT&mv?IE z4oSFcYd2}nW;ias7qVEB($IT*^5?$5en31`_reu@A|L;d>}LXPewH z#sWGBNTT;|kZ#}yuo<~-F$b_gyThPVFGwt4 zaNiA+=;IU|1q_ZX91y5JbS7Fas%rD7v*5Fy?k?6VLzVziBT@jJ^25GK_(oX@x#313 zAJVw(iw@1HYivYHWq`_|wnZf!i@8l0(sO{X8j@BSi{n${GbYaS!3W!Ek>h3`HrZ~D zn6EFIqTMUkQNGwDjqf~GYFCj)ww+ZS7M*hWyQvp*S(d1A%oiF(6n$4NLom1x8Ov<5 zq*8%f9i`y$QEO7DFkLrh%6c5%s||5Z%6j=7gRa8|9yoNi4$M zjnaxM=)*M7=SOv=hEDxe#ToVjQ$14#c44;ABof2-et+h%%>Ok_qu;DOt6C(9jDb?C zrqbIT7dHe&@H2pa9NpJMPV7R|3KYtlxPc9hNRIaTqE?5cHf6+(a8Y&MxIK9y?K_pr z(Fh>{pLVI{%>iRR6d2GA>UfPvS#*0_jbS1$@$oPnyg>;R(!Ied}yr3Wz^3 z*w9g$J&1oGC^d$28D2}d5XOBMs5xI8S|rSOBvHq^hjX(&zs_4 zb2x`bRYZF0H5>i}gIm}it>jG#-4I5~1PN)biUPX#X4F!YUg@WDa8a&0Z1>R3>N|Fh zFgI+%rTjs;7QDT}P@kFBpCU$uY=AZCia(H0hjZn!kg0^NHBOtl97ck{quyX!v}nzi z`%AA*pzo^S;uL-{16U4K1I6ma49g3^XJWN>=n-H$JNFwIPc{2UUc;HSXQ(ocBi; z&f&|LnvXr(ja_}>&cR_MiH-BBCs4jH5AUu@*!as*{5fr#AXWt2W^bp*Oe50c*jX+v8qZZ{hFNC_JGE zigAX0d@eKhC+laCR;pEHFS%vR9sU8N&q!g&Q6jkI6gQ2@y_v%i6O+Q=qOu(W9 zdK=pw7~@NViR(P8^Uxo+jxV>L`t{*bFP~ZP>|g*4UB&5 z6A06K*Y^)%^WO}4!DpoZXECWA!w}s=4-@z~g4F_8kdB^5>nwNz5PU?ij)~Y5CY&)O z4gT>?BCoF(lCbw&msQd6`!dSYk5jUmhXf`wV+tF!CIBlkv*tD6mbIb^^W2C~Dd=@D z-R#5#VhZyluAgHVI8FEE)#n94Y{#|lV8l!Ixk%TbZB{saD%*P7Y$z~_G;>T?4r6s; z&RVfxeT9xaHLr#^NX*YB74^l=OyF#zKB4*BWr>^Yer9Nq$`<2LS-ZAyZ5^jIi!!%M z;i8~HNMPw99@Vt@${#@aH_fiUK>6no@h?y;d+Fi-0%ew92+{}wf{q3d&RlYxUsZ$? zOTYus)ODz{iZ~qx!$I8balM`Ee#6t@3~8l+tnYJ#q_v;N)CB?cq>BQ00quril4qQB~32Cw4x#mfS<&6F*8V=2{8e%V!fU`E7 zC+{P^6WxmF+M8wRL&pa6b(L~6?2VPJYV1zA^s1CHobPc<460VP z>HoBZ{%5Vs67&qmP!CC#iQ~?wI1)LM*fqxN0B2nPg^ElNOuCQr=6b8*<+|7)uYl*@ ziG}KmSXO&o(-u@Cofr`)d*3}}m>s!6zF>*}A{Lx4VsZC>LHq}?^nBQj@x3GNMw0FX zfLTL}3DG*RUe8K_B-B_-ud^zzRJ{A;abQXsxDX!|n&-$Cq4r&fXoj9%rIx}Ceie%l zl^w?3@&+y8vU)~iDkU!WtUUokpWys0+{T@`%AaugH^%_KaQSb1`?q3IFcxmA4|5P& zC13om4VjFjFoBQ9wzr8k#Ucboz4aI0ZUjAkRSTzNJ(2NKmyB6xnOaXmVHbN_4`^DG zYPb_CWNFu~=P)ze8Bu-EZ{#1tKL;<;ymYfW`eH#ct6#V6?)FW!6n{@krBCNvZVx31 z;*ol$res4k+LpXlN(5KvI@A*zO2Ej1tUL1D9nbkM@t9-%xSuR}IXZ2J|I`RP#gBe) zLZO&gsUKImhfl_ZmS9jW&ETVnF<<`^D*t8+{-02Z?#2Jd5^@(3aT6l@wJ(#vSg*>W zdaME^6EKIq7nAe(0Z@R$sRuZj^=@@P@9F%-Eb4ye6#7?Z0SDC~Bx`1O5$j}s<6FLi z|KQss>In(lrMH<)@VRJq#7QDn)d%;HzltT}zlw$V+KAL|WT~;xCXMoZ!K-^3JCcNf z8_{95ekxc9+TgLUqR-kzcHW)xi*6gf=vL?NblZ;lKj@ZD*?j9ysQjC)cYj3XZ+mke zs$&b10yw)c-0?z8voi6Cunp(cC0!^KuuwVi|9?@TiT`u0{QWKOA7AB48QYjTnEeTr z|5`zQ%v8S%l=HDxL|ar&zIQnbj$rKV2EDxk{Wcl$0p<37sGHaP+^`>#GYz%Lj$o)p z)7YVmvMZpVhpoEm-antuO|$l+yt30}9OsEf7~t0qYc^^z{Mv-*E%}6JvjyPQ~zxXzd_gqR2bhT-&-R3X8<)InoL>xuxHe2sbm%#~nk8ea+yylq4 zc5Fx6(JiyX*_+;|AngCLMoB5f1)4P^peX#_a0@=}XwLqpTKW5>;lD=ZUrCl>`2Wu( zOLXx6p=5C(|5uV_$K71(Pq_U1`u4x!^0$3R;@3Az|m2> zzm~g5aWMm>_2*t)6(Qy-n^jTl_cwK;B@R+6cKZNaU^N%}6E6RkBrb9M)ePcQOyk@) z2j%=agg&w#iNSBL-@i6)f#CfAEBmv{(Er)~93A{G_vck7bGbj^@^5Nf27Cr)mj6tu zXT)b^`O>_97zSfw=J;nV9O(kC6u&ydIwMdjP6w9%IOSe7<6cF;k54Qv2!7n|k1q|M zj|NCFBVX#{a11pQy8$8`L+8f*xMBb4U2&p>6Y zD~KL=Z$Alu+yWyR@EP#!&9Ck44d{uQItZQ^`Hd}V(g5axpMQ%!^iz-A&F4IZTxu9L zl+dDE-$yJ{4nPhb067*Q?EBR%Fv!z$|0ih=4>7=ZCmncjA5^+61Z*(Ttvd3abx$6A znF;YZ4sSZZGkjP8IW4VUuT5P18{f_V0hhZ0G@QgutFcamV{nl50Kf&h#5cVI^pbvx z60VaxJ+-yByPmJXx7opwVZhq>uz?)IOo0=1;p*stcU8p!t~l9j@fBR})B(U`5QKEX zfZ2)VH)VjM`v7Ib!=nfBx486Rci{re9RmP6O8Z1{p+BIh-_ilGZB6b2__TF-2fpsU zQ^A8gtS-ZX__emIc6$)ellc&VK!F_XOqpNR$fMakoxA;FlncwV#;Xr_} z@$refaL+(|w?DZ!p}@F48pQ55fp>tbx5S3wAhtg5UlvE83h2SvVNPCcUoKH^OUOq~ zi?QtADV}vjK|!10y0XOhvAc+9d*A?od?;yqUW`O!?UXJ|hagiBX`z=zTU#(L4t}3ly_`ITv1ZYJYfBexmGqqr7;V=I=lT zel*s7iadFrIoZG$U*ds%Z^;y&LpJM{uLF3sEroolnw|&Z;^$ufSS^jJ7n1hp9)m0} z!_9`L>zvYM2gw1t2IM*tjKM$bRJuj8xwG8JhDTRJi}HDM>&b3=dj1>@!eor0I-&66 zm$+?$j7ZJ#&Mbog?#A)@mgZ9p(KXTAJAsBbZ#gG}z6B>NT>!jv*pv+D(MHdXxDUiX zj}BoM;J)=~QJxL&1Gf+QC|V&Ow22<03(#eHsSge$)EzbS=>z)N<=yaLs|C_^2zxKw zETESevbn{F2;%DIl3jS;6eqX*+5Y)%2H@}6h5z=9!7k3Yw(scjYFxox!8SPn@o@9q zRH{~8x1K)E@gcCYj#Q@cWX}*(GuWbZ+w`~nek>9bV0}7xQXX&OMrPLB4zP5e5#FSscr{ZC%_MynSm=^Yrti0qwyTuX2)Rtb4_M}4UcA(m zlmtciTMq0|Uo1Y*vcv%Z;~R8JC1((8-e}n2m5|MbE!``ALLuCYp6TlNy|R$NRF5pKE_iG__S!T0Z~P)mh*+hSm?6J^k#54>yHy&S|OZ0SPt zZO$uo9-XOAjohP&cmcI6Bj04~O;;7H8^y$PjKpVcZtffXmW_sDkH!=_dSvtPv1)6{ z5}%_W`4Bs;GgxpB(nc1=(u52%out7kmfPO=H}yBN#FL(#JgSQPX*@^8Dy~nkZ$rks z#FY(vDB939mVXhil|Hp(aSsgTZpzfXmt!pEz4{x@E;|Ed_Xp-rxrMaeJLNs(yheu& zLVfpf@3XQ;*zvQR*XSGM$c-cD0yOiwTV!*or## zW4tE+UbXZ>?2yMHYgps-=n41RlP@iOn2>Uxm&-Af*GrFSDbdY_D(;4|^OceF=*S*J zg+j3kv{QY`)X!scBPR14^_jb1K}GF-qH!!=7A)Vy)_IzJy9Nhu5B!2j;`{e*oW@Eg z*%KdD_U--WgwO|Iw|A@kA~KR!bs7o9BU3)LtV&}+yy7(lH8AtI+*2-KU54VlWxOP% zXwaV`0+1+l6#yjt;p=D3+YUNWvsd)AgAAow73=Td_r7Ry>YVV03e2+t4UREh^M%TR zntPnXV&dk_qJa}zMud4U>rab{Ro+(ep0i}{=Kkr7v12DMcIg~j6oVhtK*P^DzpPFw zr^B^AeHv^x?|{xh*px=H(T_8V40w~3>)A!(hm=^5o@*~ol!x60Kz9qSvjV~@gdTe% zw(loV+cj<<6pUL>vv^}XQZ>nV-Xwy!+Po|3YiHw1Zrl~!SoZ@ycoUO43|_PtOe!&gqx}(l$`c0SE_RgI_EJ#E;wIvm_~^l^9_s&+otj1UMSb*RhhmIE zXL>`K-7&G1ZtZ!U17^<A+kIr0gG=*yuU15-f%@=PySp^IPmE0>a%8#udKPod6;DT9)>i{}fp% z=~)o-J7x&Nx;Kcbhy}pZZI3IiM#YJ`0_&mlnl2faK+T0{vs3oVEmQLBOfo7eiqd`0 z8>o;-NaxFts6cx$HLjq{hr1jD(P=-=niPUyucgR*p{-b0foV&1BQXUvfR`-upAO+Q zfzI1#%R}&_*()WIZuzWJuo-}e(RhFyc~?iP#BLI9IS%_^2#*&>eq;#h2O@gd=$}uU zZaJrrbkQunr@qBnc#51wqEcWmbDMd=5{9;PdE)BWn^(4svJ&l$+|c9dQM^ki^?$y8 zK?#e21ug^{&Og7il(l5`NB3~uj;BTdHJ|Ahq+2pb+{UzVR8d%0<5KMF6X8ff!Fzy^ zmCN?N6aLKB`cMK>#nxsGIGT{=7_yneVQp?+puaa>>GYFp~6)FPV$>M?Rj7@;%4khODxDVkikSz+1~w+K*ORLt$e5X0cLQ7`CIM!Yx^v6$h9CMi?J$mH)xj7zJZO* zaWbDES**dW$HVC|1AFHAFuq0$4^A%Yk=>!Upj-B6OD6=QgR~8{8I|-bd+@9=(Nm$f zNSbIZ9@tMF!!s<6#=5nd-`BT4^97dm5NiC-4mlNnN-;jr*HwpXK&x8Ei`F&0Qrau5 zLiyff=xU#$x!Wb)KQ5yc4wi!+sEI#K?^oGAto5u;?TCeT3Ieb7Y$;wojszCpvuX^N z^+J1O#e?RP(75n4I}hCxw~%M$JeK?OjOOE$@pyjrda7yq498!x_i94uQp#xS!`AuL{u^C z*+~-Zk>+`fS1R>kg(BD+Td3iK_&{5ldG~ur$bzp!_FnqdD?XLyU>oo&?A96mCD0|L zv+9cKvX!lWCK*eMC2It(Tv&Nqbg@%6C(|-f*L&rr5*YHl980;CqMDxx=2{@a0%b<* zXuyuO4Zwdi{yVlL=X9GkT5d5q?Ys|mZnLojd||+KX0M!y*Go$l!M6mSf!df#_eX;_ znWny%ult8EV{993k8gEIf^L|R>tL)|6>@yE?_2ro!WlnPf3iO|vb;t$DTWl z!0Xc8NLYT?H6d1^NYAwYOmXq(99*h*(*j&87%ngG}4QArCn1j+Z_9L@9h2jz5r|6vv{362W9k~N2d^Rb+7SfnyQ;EzJmbs>4oj& zDF*ObKA6vel?e%W2HL#f%BVXtJx}lC zR7$$jRopJvx1{Eam21M6RN=Z$UoO;(H1>re%z^8E;*Po>8xu)w$%=4>FVr)*sMEm| zJW3zvsV5gYc|Gdt4;3~Pv!_!+rX%{uq)@Gz!knSoz+4YfIZ`Ig{uXfxR`CnLhvF_aqHPy)Yo50<%VrjgzMq<#LJ7NbyWheLd zXz|9hN8e>Qy9yNela+qZD={CgG7FsOi%Rd-ad5`rZ&Z0u8rRqR)AYUtVu-)5B`l~p z=#X;S()%)fb3}`^xq`lWiAZ}L?#?53@z=6mw-6nw7@v-MGm6nWF2#Lbqy+`rts@%~ z+ZhS0K>jiueEOkM>oanG__3kJ!rIs4ht^LCE~BV@FF1)(+qh`|us2G1S>eF##pA>* z^``fHJ`*5EBttEvB!BY+c$H8|G%waxs(69n-{BEk*Wg6`8x2>a0F@$Z({(`r>B{9{JV`OTI%~36DS;5vKcCLP79M3kA_uwP(^Prs#v*IqxI1^P@$1-x* z?-i0e*`)qGbm%(h8um9?NSF@CxdpxxNqDMLYA$=!MpJq45QJe-xr@r;NU&+SXFAu>uuEXIQxkA zsToZ*9q0>8ElzIGjU!5n%R_6pTyi>TFHx9KWxGOhFYorY2*}oOSX{`Lo(D{Lk9=yq z^UtKsZ|H2`L2wBnLPVq(fXRX{%(CvVV#AcFZ3fCKyvqvha8^8j*?F#ZO;t-z;U{-@C+P&$qW_Pmq-zj`<1HEV(oe&0PG9V2Y{! zfQr2tZc6>=l%}O#v@O{A^#!fCe)@0AU$GYS2QpEHc1cyT+93p@ z!h@-#)I_qoljc0J@hv)3YyoJu%N`LGfO#sec2PH|R6-$R?6=z^iDu`4gL8>g9g=*P zQXBC{2c|)N_^-E-?0UduQOZj>jejGX?sH19e%@{aZ&&&hLGco-&>kGkysR;r7;(hH z9G~hJDO+|hx*P>ECh4As6QfT}TKBt2eD1>43X`|Xm+YNnkIh<9J$tVT8aWclu9j~G zoy{Xltvt4u^1GMSa_ZEhavy%Z#9}Gv}l6vIx!di2k_BjFcSB$pkT>3 z=os~|sf&(EiKsml)xMrmjF=h@;b61AOsbogolVSh*&(LsICa!swI>HHw!v#Mr0@Q0 z)F;|pwtV)A1{(Ai$#?UW$S1CbZgZYsFt&9qJ)Ega`sDG&`W925gF6kPOAmoDtK~|0 zgs&hd)!T^);d2sRpD4M>JDK*@-Rb03+l?g;D*yJt37GBpBI;ai1KT91ZHor)66cq##qWlZ9#O~6h@Dwu;)stw}4hQey9}SnVTkF># z9?4KUhX3p+iI>!ww_uF09>CK_g=-QEB~oGqR);mSj$xrlurIklI62arE_9Myx!P>g zhIugk@K+^yw56VUD=xc##7W+7nxRZY;aPN-A5G7bG|Vv1`tjd}b|vNb%ot58U9b4i zR}#aNVPCi|kfH1IRhUhBo+g(fe(?G>+v~j$-pMEpXfa5Z@ygx;Fv%Y1sPpSHd^o#0@GLh7O!ugwgHnyF~AY!ek zcy4L!DC3d_7Syt0<1w-b+}AB&p81O5MI0Gp$v!YV|&5|FF}J zR;zXJ`ze_(BIg3SpW{rtSFOjO3uA74CvCwuoJjfY!z(j(>`2+Kb9t+?*C2X5h@Xr8Qz`Z=%{FSxPvz3BD6qD&ooMvMQ$!7WMG!p))%Li%pH12@cn)8_6- zAbRn1kn2gq8Bx`(;le0P{F+ zy#R0O^&v{ng}TCu6vEcAd2XB!yL7gB=(s@8p`YoiF(^c~cpRQn0WX8rcY2y#BbJ(x zU$=0$S!+r5o%(DvC8&AHJh?1i1@dCpE4jgqmytaRI}(c#Z$#~W4-QF_8Jbi2@D;Lo zU47aF3%Q4ff5BeYFi2FtmMeJ? z#h8<2k3uD!&Fw3oBacx}t@iL__T-IyYyU0e4+-sm6$F|j|X?jl?5X>mhy zoLBX--kG?nn>*m$)HUAllI!$`!7mQ!JTYw9|IxA9uneH+?qn$-L>T+$rA z9o#ZX^x+Q;G`s!a8vfSL@@tf+Ev0woXL${#M-U2kSuQLV6tL6bor$R=lc$e?;3CUwD0{>^W^bCbHOSpuoNv{SN%Spw3&Gg%Zrr zd^bPXD`QkT>!NPFy4auiGFdXLPP+*w5;Zy3iW{Bu@Xontf;bltHf3V)ZJT;6aR@A} z7rIC3ke8}Qrgf*?DZ{4$nq<0iJ1dke&aUk}K>WOSvldT#GTMi@PT$K+b67a`{9y7a{5V2#cD;-;}uH*3yC-MTcG#9%klz{723CDKtgmurtCna-bM#RpuH?F7kLxi3fG-V zF#MmR+~b6*Qmu1byQv3vi-fiu0u7cOBe3nPJ(ie54Z>Q!cSrhF8=$y!68Np|)=Gt%I{R+lQEW@x3(W%3G}#UkhB4l%KcfZ$Wy>ubnj zQ*?>(_bv;%P6AE2L7c(?h%im zTnMs2B9kZ-O?#D}?0Na=+G|^VsnNWc_RRab{JQ?SHqUczUw4JGqggMqIjE#0J!vHr z6@*q;WM)JI0|gBx0s#>wq^B1F9Yy=b0C&V1;9G!1RXX=WO6EOaghf{lHLT~QLq(py zbpito0TyDS5^|&h1{RbM6uj3LO)f*thkX!V|D}Z}_$ws1NJvMD(p^A4xCjNjYo^Z* z7@tQ^P(euv_j3zR!667hd}$yd2ovfU*k$P69Mk~iD&yk?C;-e0tO`t{DFU2`%otYy>DZgo`pX? zffsovijVdGHHZfPx79Tq!Ky#1Rx;YS_wlB$aEDn-H0&Vdvh?mZ=sc@V`ejL%KrTOh|(sL(qe*bf69 zvbblcj`*@?`|XAZ3182_^|%j}7W5tA1ab_h5G2vah}Z~d`x_#a!; zPcUN|NN9)O9lzk$iT8ydAvm8AQ2j$ahk&EYFnB5V{cyfJK%k&Oo*{W)4Pe|w359+l zVnYt#{Z`jc0L0$F&cM~rpuhq8eSUkIM#rXc!kzD5Vc%VSpQY5)3Vcc0e$?M>vWiH@ zFrbi7AZ=t~qCi4ONKk@0N{Eo}=BT=uhxX_oKaQ%ux$}W01SX8+&-9@HKUV^CdmH;; z-&oQ(A!u~C`)_fhbfBVOm-QF}KlUX*Zf|{1pX$j!X`??pM5+wxYkOw7d(S^Zu+Adw z?_Xs9zAi%s&h=qJazOt0DaRc6nW@3p#oao6*jI6bhvxhOD<)B!Y6BA^1R?fw0M$Y} zfN5J3Gq}wDraVFLy;&>nAdYv?Ma90mSO|PVMto(!aqGU+sgFZoRZH!Yb+~-|bk;{J z#+&?JONWYx1ZI?%PbZK7sv;*+R3sEcu!aS}RmjcPw?zYg zHMFpqfoz6pg^}xv?sF8Xws^fVjE(&#GY$DNR_0-lm%qIHS_Nk>@~iPf;Ksd%Y5O%J zS9DoSl5;_8}A5d)yyTvuoAmREQaU|D{$tvm0LzpyNlA6s+_ zA34xO;!~xRlJ6SOI(5zUgqxG}dhjdGuO%)sT(0bw*Px15qwqd1UVtsBQhzlvIGWlh zx|GugWio;icv857B3yfBx3ei;yoe&0l0@nn(u*^de2UB06NIGaKZ^wmYa+?{v2{c$ zWSmJ#36YJoc}R5l36&beC@&KarT^h%P1x3sn3ABHJNQ1By_{*~XZU4aa^_{?9md0# zzIzHb7iXXBmZ)=P2XiV)bUY<{cL5voNSU{+Y4>g@_ZvNj|7lmp5uyRdy$#mMXCN?dZY4+Y{~-y?0|_DDb8t z${oPPpqqUQSu}&Gl$n8&oB`G6dJd{MUZ2w!%pzs1ih1=v#iMG7-)lX9s z&A|UOj;)=Eae2XQJ`1Ltl*x21=QCtr1-xLMODjLJY-~OAao}C<>G?UhvS>U)qWDPc zh)OFgBoTOadIF6wydLJJh;~~c%Mq+AMDp?|1?YF-Y}*02u_sT^K%h|E#$=i4)+RJP z6AP05Rp|$9{}hlBBI+D}<MCZ6wgsU^dptIIJE*G$5AkWQG;+U1$Sgxneu>XHX4h zSQL1(t1HCbR|||6vru)RXG%@1|GV?Wg{<(_hy1m^E8&kR{BZ+QzZD5B6e{nsW}~8w zn%KFVGQVVG#Y!K;9T9m*#7YH8Cxt=OA#8c}pB;;=cOwT_CGe`A9K6cmy5fW0PUPsi z!VM98)O=G(wKt_>Vq;B*k9^aJFi}`oMt-0t`*z+!V4wSSvIGVXn$dX+ncm^n(2tyv zcHPoA2bKE*taNfz*M}r0v*!q9ewS*KhZIqJGX414x};Cvm0aZfEV-v`TJWWQl+e$-8h^*t+MNcI;l=~FP>xP-Xn-uM2Zq;^gb5B&XpN4yquXQg4G(QkbfQYXhR zCh4vtZt>xR=R|QEx&&8yIwOBDiT3cOY4fkA;nhb)3CdH_C(T5)lkt&E412QuoDc>t|4MB9VxBGd-Ag1)96b zBb)&Vg%PYD63d9`?Ulmavp>)bH<7-O>EO-Y7PXZ)X=e(Csa%+Y4j||c96ow9xU7`x zrPLXxW&~kL&Z zANyYe-EP7(Wozxpp!pEZQDcinA!&nV?5>qu=#x$nW{G1cO5x(#NC!wb&zW|x&os~1 z@-{TR z8=))ReA+Sku`%wRQUjd@wDg@SjJpHqQ^z6`F5$s`YPajAkxp6#yoXm)D|gl0Z@1j{ zZ{R1jziT{vs*xX;%PeCyZOh8B5|Cr^i;nmVv{aaW=L%<}6lIia0@*!gkSNS+N80Xs z6eeG*15m%O7yaz$U%h%?w=V+AY;_|?WtAw2qt-)zJUJXmH#oZ{dwb#N>dDm4pAKR9+ z^gk-qJTJ8GlWu@oQ1wf!B1qU*!l2^DARsUM*%_yn;ae5Br=^2XuAd;FG7(6BxK8x6 zzMI%>P$eE|298?Z{I5PYj>X7UW6-mYgSx2I%zp>yLn8^ z$_}d*c*=-buPDt#eD(A^)HPCASQ)%Y_~>U@5R-{Jw^gXy_(il- z*>Km~a7Wrw>ikK3F=?p+z<*ouv0ZcXi9KQYr>htZOCwOLJ{s*~Mk#x2l6*F5~Y{p@dGtG8ROPHzm`1FMQy}x*1WTD4JLX`Aj>a8E5MB%eX^`B&6H$O>c0_WtuNX9FcW_Zaa>G);rQAszSgCLhsvlH?z0tNX`R7$7SaIdM%2-i^j`SwC4$07*u&S*v-bwc4xzrPD-6PsnH#RwBg5WmA1T{VL=wj*B37+K=1 z6f+k_|JV&}h!%iQtMfs{TJXp{a6wSvg|AK9okoEQBnI z2~E0fTjpSOi2um)340ZPi?AdTxCQ%;o#JZI8Mcqb+L1mMrkciL_e-w!{1hKYH8zT6 z*hJ;sBh-XzgL8%%X5)p>(&s3f|1qlRN~B80xQz=_xT#y2<$w@|S-BD2f%5J_**9?3 zcNPHr(rl7*QPZ&&@!monkeLbrk8?A5UajBt5H%jZ|56Ne27b`(%78M~%YM%xeVt_{ za)GO6VMp77m9U`tO(m`+5luMKaz0K%A_Z9v`{JjOf+xa{MWBI&6zG0+t=2Y@HLgt<2#xF-4yt%eSXIBf5P>?%h!5b zo2szXjd9WaH7x$JZ0HsxvX<#UwpJ4Bo~`!t4DqYbUHD`>2)+Fl1!P588gRXZgK-6J zTBsCU?XASM24fb74uYb0D#gdLSn{!)7;5kpn3{_fON(W-R!5#dCstX2C{%B;)YtI( zT-MxFXUM3d<3mM_>kUyqiM04ob3DSXA8<%|BULJymrkDatLGN480WJmR7v<%j0`CK z(IZaD)CZPijdDnciDU4del(NXo*&uk~;F z__}p!90IGBANSxP2THSOMXxCb6XGX|V3m3Sm;Jj5woFzPaeMr_7QMHS+s87%vM*Yh|Y>` zj8T|pr5N)zg*k&Xi(0=o^5HJ7B}sQL%%ksgoyhAyWmG+i<~+@USGsG4=-R(?C@vhn zp4<54+VQjGD)7h_G?m|mR?R;!)?&TZP`%;ayC^p-a*+YG$L}948y2za8nmLXdg68P z;8oz!7s<-Vt0|H3C#mwDL}#*d@-KT_T*AjTlW*i^g~qfRVC8=FgX>eSy>I(|+1H*+ zaKp!b@tLUhj0$JS4d0fxo%9bnWqaAtQl{UL-=I~x@n$YTYQLsk@RaA{3m(7TD&NOD z>=<@Qi5R*zKup&^RhZ~IgBYdiYOY?o^4JL}a+>QkfzKQJTjny2(QWk_FEAl|*t#ae z7!w!?p?**S0eig)z+H=L)Hlmi{Ier-i~Uk5T@>9%%Y;R%ZsKbZH*Z3{(VPo+hlx3W ziGd3i8gJr#$}w4wnm1cv}(KxSeE5)*7l$M$*YC*b?}OKL06=_K*!*F+1^$`If}td_?0g1 zej0gT7sn^+64#24$Zn|6r9x^o?xE%ZI?muxPhy-=TTOrQfTkEX(sl^)y2JABz3xuP zFl|Qbm6qdmXsyO{zUMTIWOqa zCzUO6AQa`F{N=sTvK#*wKfCR{GS4kuQcgMNb80X)G%71tC;D+)S@U0hc6r^%!idJX zqecia=ITBch>xT1Ho0Q2qJpo8rMxhgy|R2w76H{K&f&;nit;AtlI^nFm>By%voZmr^yh`QHFuG zf!e{JhsUi{y$z#T^4&i!cZ{j8|3lX+j(4zH!Yk3hd#e0C36a% z0BBS_Xxj6x4h~U|sF-@>=85+b!HsWDmj<1SJoYukSC-TD+~I%0F7_{t0~F0^Jk<{y zjk{p^LDY8Qyjc=RR5quO6uOsF&82`yJL6eur~3Wf1Sp6GesnlAGAfst18w+Rt>0)XKCz@1>?zZc4hGLGHG)Z1Bip z6npb-L}BnCuJg6|9f;Ac+!TyvB0=X5OW*)C`B9+~xV;jP#%Qi1R0ldFZO8*J$ga)_ zUWZ=oqI)ztJ8yhk6<}le?iwx*vGme^AjdoM zQqlvvCP7~`KGBYL?WCEkR=7&#Mfb9ZczSoAF)K-Vc9`7eqBZQgdsg;@JuX07E)zWC zXKHmFg*y}V+R+VmbQlTCHYTxhE&;Nsl|MAvj_arLu6ghfsx#-ZqH231(D?hmA!ziE zeM)&c*}Ip}5m^Mzcaf)*toPZZCvbT_Dy?rIqI!mRy$<*TYfb`?sg(U*&GLf(mZgIzNGw_HIt^ztSBw^R z0$DaUxdGYtm@c_>&_K-2pu8_u;AvmK2uQu0By(9Lk5F%LUQbBsh1}GS=+|glc6_cA zs<(3fRH*glxqrWFws(EaauMUU1w=HT$t`bIik1i-nd?VhmDX5wX{!(G+r6Uu7LT!* z^-?F{jDC6xIqvk^IKPyrBZY|Q5+*c)9lSjL;l-Cu!@|!M+{@Q(2hX@5oA7_Fh27)y zP!QQPS)UPi&+oSPB=>7+#gcH^&Sq5Zen9yYCOrOlF6wx#3{gM+3cGDgici2{qwM4f zY?fZU!xud_xAsw|ed0@-I*st@#~?rAybQR|)qDq_JwBM?Fme(piRwCT8C^5UNuugGD(HP-;kD#Pg0ZeT9p@?c@pL&s+#n;h=|Dw$k6+@KA0)>mme_Jd=TkCMIzMAZmMZA=N%>JYCgqBc&HbQ65|!; z9;%tj$6pkM#tJwTTd0;V)>yQZOnNVcG1MOMB+o+Zi1K-VC}*flvrmL~>WvzPSRZhz{^UMm*EK2oX?(45E0R*4`QYKC4pU=FA_2Mybn#tml2wZG(CH>Z`Q*%0 z?H47^CDZ&p!vPVuS-~tvJG=hrVj`+t;JnS@IChrI1~~_{BGSU;73MTOdBQylle;__ zk+rtHn@4-}QkUX(V9Ggzq2Xyxt;FoBT?XTG&M^E#Hi=YL8m(P?>*)H=$0U|@6T6Zr!aE0EU{oc#bjzjf?(rVpc&m~iS z_o`_jb!*WS=I&0d+8ny5MaFd*J2b={av-o;yCeVWZ?A?sm9lNAzFA_)#9p=RJVH@E zRlj-eC=|&9Ge0#t3H`L#_R2*cExlW$y%NOpzSO1%M-^aahCxz+`eHn&RrGg%a>gBWbiA^dXs90ZGaTP_P51?hd3v%K5eP9?q<(hFR^cw~VK z+8PK%TocTmp9RFpf-E7OI^g5GzWKOOqKK12I~OuMbv3IDMmCp~mS%aVMR1lvCr@z+ z)wBaJ8!~qwEV3x<^$07q^&GiuzqA^-ooCoCX^Qj{QmGjzWE(o_y<|-vsoiu|ykigN zMwWX?nL5D+)ztbRU(hAMmXY+5QA&5Az{ggP(t@5{>v|wCx^AjGDM4pE8YejE$sD;< z)S8xwp9D+)i1%bID*Eo*lV%cXIr_C``U64U@c#Cn5a$2Uneo36CJQ6i{|B1?Jw(XC z%<(@%gfVVlDocJJ>>!I&WFX9)W8!6zF7A+&u;@#_kW%jMP>|)t!7kKzAjpAA z)Of^n$Uj(f51)Q>|Drm*=BGA$bFTXCKGzrDxpxkjm{F8M|Hf`MRAdqm@W4Mnlbxe4 z#XyAdoQR;vBu58K0meE8eum8WEa^PhvBHyI41xcqxxfKKYDC!BvM6DYW?mg214keY zDfoX&6cFHHLBb6`B*kkdAhd7_TIK>ntt zB_V!YzmgFd-P`#0K1C(61|uf(U{BUuUoqqrVs(1B-t4NB`kVjA1ypO;WdzslYc`gyx<;M=%>i)n+PKKXAce#Cgit!d*5y!$Us704$<>7WiSnBLrl7toMEKn(Pr$0ENos#%baLms~X4*nzs@O_#7 zUhR>Y#0PfxjIt?xsJNof1k*^Nz!HHbCB>x0z>FL~(_6cekLn&M)ijrFNS`pHuKPC^ zkxpQo`T?@w%KeB9LUmKdbn5fqQul7~s;DKAe$cjniricxo2qg{?;D1*Q^cX7q;vC&Q zi!=J(FYRNW0gDaB%Add0uS3tqfPBo*5=B z0YY?lhfIYHR!&MUChf5O#2rYEv6Yj3+x{l510QaEI=%Lr)9pi1uXz8yN> zPfemGLuW`hHY^M&X$^k`>>MOw?w*zP%5e{8oFCb_+ZG(JK`g=YcSGW3-5Voqb79@J zUQhe-=2a$y4JcoI|EtlE_Wg|cmXTckV+%ufIWAhm7@PJ`p*G8{YQbvWk|)aMM2otI zRcJJGvs?33sf}4cp!u)ip(H6_C`HLztq}ySaX|nm7Kgb^^5@Jh)y}Yp(z7n-PfCx= zyPZ^v6M}UHwFazT&dxp?d^?th0I*Ga>XJ#3v-C5cW=yGd1LRTh;nv{JQQkb7&q7q)krS-U>$o>)WAHL`@o(8?I+3_& zAIv=YFdoHMZ47Al)MRw0tk!+LVD%#}kaVr06wj8>h9lmUoFHfz2@0h%kX85otXcEP z_se(Mf#+tqfbe%}HP#{X6*?9JkeI1tvwgOdlV}9j{9#2UW&w#jyYixV?yjX)^?5`Z5WNqPz`-lNgTVApA@ev4xVGNjPu38gGFe znN}28LkB=NT9c{Ob_7X`+^}8_Jw2#KlaqXP1655eCQD2u4t(7N#Lq1CS#r6S+WQLP z9)Z8*?pNdzcGV+gi&HjGZNGNH*XuxQ+JZ zx<-K|&&fHI=-d-HEo4+m+Rd{MPze>43@Gu(=9q0-W%CD1_0#4VJrF#z7Kh%KbtyL= zQIgyA8*HqMMUdzxz)fxKc}g^+Beyrm$LofN?F!hx^HjF?)(9)&%KNoS^RuY^nXeNt zU+9c`xHr`1I5doWBGtycDecZrjNQwS)DpjqoohzE2Y`! zRL~Cpb4hGgeaj&ssf;X-*J2knb~%?%^>pxE%p9@y_eIA`rF1K%%IX~w&0gH&HHRBh zLrc#(pL2U@21`Zyv;~q!#_kkdoyyM<)y~nI$evl_ft-_Qe6}rQ^jEn(wVhENM(k@+ z=@>R^n@u|eHp(yGO`=G@KOwn9h;5$_?f~cU8!Jew;98K=VR=iEE)U9EdVO)OUi#HN=*(%w z`Bs+f2$m_U@7Z2MkH0lAUWsol*IUY&89I39Qfd5|K||j)3H@B~_v8xz+bUJ)i))Yu z>^FsyLoFxIyNbb?MG2P4D2|FuoA7z_26wK{K0Fjq8uHv)7je%`2@^Gvd1;!(5>x35 z_1||Hdl%qa5nDjik@L~UG6W=)7Iv$HH3Mnj3h3@jyC)F%y8D_hJN7Yy7Y7N@I0U=A z6U1Rl+rwI)tYf?qUpD#J2%dwQ;bv2dgyF4aIxT+3+ksf06TMc1T~9uYEfCPrR8@Hk z!wxUI6l?q)Exo86Uc|z{rbY^;2-pZ_up*n=mApy(LR=5i$nc6}+ zeWcLhvomPn<%_k$_I1glJ0P*@Vk*& z&?a6yi+2^NRJ8g!=j-n+gGFF_e;<6A4JW823wH*!fqjF7s&~F7&L8t!=uy(Bi&faW z=duaFNLA!U75w1PB>rTgU$V=MLR5JJUQZV0P9ij=s%UR`mT6UUOpHRP)U$3TV zj5&+U+L z!tt?^cGNA`%`z1ZdkL;O6`a~;e1m#}pz5cohu}f=5 z1sPC8*__;uM=6PU>!A^XtKOJG77Ju#i|YTQtqk7~fDAIr{D85TyE@u+3#J2o{M_sxkP$`?Y>POwjgqi%S+qIR%eI5JOAl2LE2-=m2Ny0PXP-3Ew9!|-72iE5MrKy;pFv61k$q9zLlRQforGjd zsZcVq@{827!{OiCVM?VMi;Y!4)kC>KDxzrTbbgE1MWhi*+6LvWYvK?i8909Ef#*fH z9QUdRQ|jA59!vEHbbjFnKV?Dc%u!Rt`zrYS!fWR5pOWkqG0y?j)zD7R8L^$ZGB0;1 zYHEG5g&xlw?lQY|w));A0-YUPUwddHJ&fy{IG+Ewbj0|ah z8^`mI5oQslKP>NjnX{OsypsdZ7X=Zz&zl6Ws@l>oU6LA8S-+r6F`yv^&oTa%cX$}b z_E1*8K=^zrHogOx1t4fQ=B?VCR(6LeFJFBoufp4nc&+awjUJ4Muco{6o>KQF_95aH z9vRF4Q8>fQC*_5$l2{Kar0Ppa0Yu6wGB_F+BrPSR+MTqW5*l64^v-n`W57rIC3-z$z!WAB$aE`>#MK-pP< z;I~j?S|6!GvG~Er76iiLvCG?_8CD?`^=VFMBd0}T%^`dBvE7y_r@0zgH0b3nTCT#* zR#Dg;>gAovCHSaxkGO6IcGCeG_1A8FJlM(=Ilm#^Vg;-P$2Mf9+T9o|B)PVOf5dJJ zcx82jOtSja&DPEYBiA15#a`oE6qu_c|B|`u#zZ!HY6>2!${kZD1u^(^VS+`7^gdf2 z{sG3I+`SsOF(Hs5&RA6$=EM$Gs9HvaZZpax?>)^Mv6lzce<&E?Ra%hDV)K%hl%~Uz zx*nCBr)!9$IKj4lo<|jhzboR@JETuP8br~n4o>F1x>G1x;n||5&`5hcqqivhvQR-S zPo)?gC1YnT5?*zCI{0OemU723ljIJ`x3}a9RygidtH<+CQ9Td#MkxPhq4UH<=Bb4L zLfEO!TWI@l{`r& zc$cBi;o>8gGsV^WZ}(}4zpl;UICpiqPVlD{s&Qb~Lv`CbnscV;B{XN;vAr;>Z|AnAHh_eHaMM@4lKxbLiOj@OCtEHmfAQOwAd04$EnGS zBs&Bt{r1=-`3WPRRbdXkjL`Z2k8`bu3%BqP4hH49i}&sd1u>V9Y-VhRLDncT)2Np z&FRIpw!&R_Y4!DNZtxELtX6#VbL(irKeebGzC8EhzBXGpqL|V)i9es^k%-a@jh-st zx4=g5wEr*3!M~KAm(UBx6mDIh|-0O8tei$F8rI-Vc zoRP(*ng(;q4(-{Ux{KBT>NX#=K}BEo6z?fTX@mV!qIU~5HEwHO;RI&iJ$Ya<(ew+lJwX#5(?`o_hdLf z2}l)K{w-*ZUWDCb4P6Q5vt=#juc439UZ;|jey(|pzW39HG!#V&H@maXai`)~8)C=Y zlbffClMR3KXr{b_-Mflb^>F}tgDC#Xu4?{>_w+VS8rGuX49#2IdIbZL@+=uv8z^^5)3o>0+4ztKi6<+Y5tuTU7W0rs2P0T zGoP*@%sUZeVCJ=r(U6{*qPo3`xX(k>x6{zS7SP^Ku6dT1_zHgcPNU3j9sv)xRGsD> zSBb06Mv~t!hpNHbH_CzS_i$}@i^nS#^KU!#l=o>y= zv58@v!K{m?@D(!&jyLKMeDg0PTZ8hXjm~Jw`X$(Sre86tO{R4mTHu7xynRN}6>(t* zeWx07(rUu^EPo&fWlw!EccLk25>GjFd|S%`;8R#*43p3Bicz=!HqWHN?4HskOSxUQ z!Zy?XaBiz;&Zjh@seIlOk)>mX{=LYh!1A~Q(pJKuQZ75d4fBpKybGzYTomq8xHvyj zCnl&Zo!RSYxR~*(991r|hM^p)lQ|wCu*%g*^qW5b`9h7A#jtdw-nDPGv^!iN4_Nq$ z#;+$aELYIjhkGq18>FJT`0W_Su664k3>|7CO+|@jO~gkOsHQV`s~9O>d0XtW*M9wsq;}E-9QaToSu0&VRulr+5_i!gRAdu<93SuO-zvu zxt0Ob)^oZJ@({p216RnFods$+OMGrRaUUvqyY`uvo|DhVhVEYsNarUzJ%m|Q14;x5 zVHZU)ABwD?J=wQ^VlB<(5WM$`k0c|6cU}RaQw<@1dgQV{qU_~V5Lej4$?Mh{xc0~#ueV*cZ~H`iN0#Oj@CpuZc*7d| zH<(}~;7JG&K{9gnjN*cN1f$&woLd9eQ8$&zayXaNPF*cNM0l=>MKYRlA^MP1`ra+B zyt&Vs7Kq1NgE&9u6 z55&wj($jOe>y>~YI-sM{tkjZ2%CNjO{iRuPZj=)8RU@BkOZ$M1=^gI#Teik+|F3aA znwl%B`ZJ$d!1GZ^bJ#}zluqVh|GFj~`JN&@*#zyhgxXRO0Gqd=?)p6XqPH`YoT#{R z`3O@O!6I!(P=Ex;dqI*JJpR8JJI5Z;!fwl!ZQHhO+qUgfb;`DF+qP}nwr%z4bh>l9 z^QLpZ>}36d^<>St$4JnWIu2R8^PaQo7Vgqdp=hluPnwteDc5Y>?mFx5v;bnbrz@TkCUiA&KTgMyuRGUsImTL(!O$m}X7#IE`8!)^3cQjL*k7=s zzvkyq*&YAS272=*40*G-YhE-d8J}zGEBmMH)@gG~^prx(L({P;JvoS4Ojlg$40`yb zBo|Kv5@(p+Ny7N6+kU!=nvpOpPG%yS{}dmm=)L|3*cN*v!U84;y3?+l2w??LQl&i`M5%|GQkp|k1THr zjMUQcL0A@(d5f<(Un@>m80E0j!V^N(`30C|ZIRC)^@g|M_;+A2ezySW=m4FMFK*^F zIuX@ayMr5Z01+Ve#UZx6b1o`#67x!pqK6Lp+F<8nw(sv)boUD;*Rj{GJd?vfr?FiQru$_2RM(<&VZ~1;ZDe0c<*WIjQl}Z_NnJ=dJd(zEgWEtI5MO%I z=H3)5*?!FRYfr#{6p{HGTm!M&raQ97vc@TR(Bg^d`(rHBWx^3IKIU2Bb9S7k7bA5P zr_=%Qo?qNJo}-Y@j0j;x&3HT{Z1l3(x*s8~xIbT4?)VYzrA7VmEP<(RF-lH^CY#WU ziLTj1bY+dDg(wV5iK$uJcy?4#bPwJkGVMkQxx)vXolQ5gzedjt>cn(WMH`=oogBB4 zye8e2!(66|3s7PAD8?9-{o6oRu=9qb&58t+vBb<7*prekrM zELoxMQ$gqjrj1a>%+3oNy*_Uxs4;D=W3KP>KIUe0?c@`ga$rx8QEW+aDfUeCyqVeIg5m2}M2wI;gx&>70Xoio#R z*{z}L(L-AjYa>4(z9lz^w~dCDl}KSZi0V9f6>AIYv%svN=J1D>3fgZjm~P@=uN8lG zAb;AtVNURNT?+#A(3gnAeMa<+9~b`E4$p=!)~SHjTWV`mxzA^BGwj}zXoY;sW!Eiq z;&rHgs9#p*Fh6Cc;U+N~*Xns9D!RMt50ir;z$V|b{PIf}f)xFyVDrBT zz5k!V=KnO?{V#ww69FgdKPB`38vbjp$;i(6f3xwkRiIT+PEqOJC~6=HMg=^n?$t>^ zU0lsWbW=c=?hw|-q}A}SylA+8Z(?YeC#R}-Qq)25|B4SAlsdlt_t%LU7HC zF_h?r24Vn=c)-D3MXW-T^Kg$rj5C9XaLx_;oKxs`g3||`>?b0G3n;f008&7AmAQxs z2Mi%dfW#5SCS-xMM1VLnBB(y${7~p1$xyJ?aNR3%21_IpC{{wu^^`~jrq?PZOftY# zv67IIE>Gl0GvI_UaqQ28EFnp}G*-c(th4PhRL1v0IVMK3gS`vYLDre%2$KX7d+B5`v()Z^;zipO>)g%xSDMLX8#(*~{P$FhY(MT-{EFrLR zrk3DY&MR~lD4t{bCtnu48D?gPutlhA!Yjerfstu;qX2Ud8V|rIBa_&`y0U0dnLmk#BHe#;_X$#j;gai|@!yfg|hdNO0wNHwRr-I5m znnAOEtkAb1kmo1uR z!KEocK|q>hU##dWsU+57UYrOAc!z^b}Bnwy|l=pzD2@Pa~D)i#Ib@vCPaE0 zDkpUw-`vbod19HqYLD~=tUl8`!ykXgxEN2GxD#Ki!RK&w8{waL0=TYP$8`}Pu(qI= zy~6%d9=HxUX^IvaUtDHWD{38iOe?86|1s4|Vc)e|)b83ue>1UQ_c>Cw(poLQTd4^TIvE7|q0q^}z zk*n@H!T!{uxel{lGX2TRbb$@anNeG~4C?$Df?2xGvcctiPn?in0SS zm5&dDQo8Evcn-Rj23q6#BHVjD)Ajt=U%9djgcB!FgYa3ZCF2jP#del zqK7l7{ps|Wo-!6VEq%=Vs+Cbqb5CQ7&2(umCQP&%)XR~IojyPJ^&t7e{CD;q9d*du zzIpeK>Q(nbp}}md`m_so*7FhX^eC`_48Oy9WlqOKbm;!k^u?c5=VU8Pzv5SCvdI)} zxss1=jGF5sGICW&J!RUNPRgRwD$xf`AGe#D=?lzLWoQbnrf8CP9>KD*rLr z5|gW+@ftDJ;QVfR=)nI}=4T*xtCp0%+0xD7yLfjmc2qvG)oO`Q`)>%F?JQpN^Ihjt zbY*jU+H8y4%~{K?p1r2NC;ti@tb5t*dr6lff7D3n=FZIT5$~{`t)g0Xl|?Dnt^3o} ze9*8arD5==<)<&+z{>0v)9k*6`e|zjbLGpeXMnK}SblHSwyB7i1)*DvsV>{iVka`6x3ftMI-dsrc#F+|ntcVe(_L z=Hl3GZ|@PJHL-8jZ?U`cbXUqOHB+bktq$WsHv=R#w!6nDdh5>4kDIdD8ZNhKXn&)H z2qAxbM;wgNw>U8pZncDC;G5*g!~t;Fne%Zubd5F+FDuq&Qo(g|rI|fX5$6iFXD^$D z@Khtb9UJYBc>D1e=1*Zv3;S>K{n=x_5=nJo)B3?20kXldP%)2&eq*2YYhRAMJrA^z z59>!!)ZLn^-Fj;*}`G0TOe>en2Hpc%w{nwq#&iMcC+$2zyB<;;t(qd&HM2sRq zqcKc@bFm`ralnu?e@G=MAt|ilPHilZB5e}VB4tuhsR?x6&_nl}TkTt|-Bz<{-bUZm z--_;9??xs|i$@9Vep&)32aE{tu}ONcx_W_uIUxi;0D1(8zKIH8QBHxMe2^-Sf4w58 z&~V*NQ$&P6OQ>J@9d29P2`~fLsXD+8A4ac85Nshu!cB z!kYI#+jPtzu$wVD}^E_XDQ@00DS^e+MK`Wazu(AH+D*W3`7l>P>)v@B<_g07R@~ zkkCK@KtckHfC3N^l>b4pN)!IAN&S_FGEHN}0RZ_~UR@5+d;RPEg3}w4jC_|%xx7zN zMhenrI6yyT@BR$%!#|q?$*-`t`s$0>{3|u_@ymIr9`n1&^GKP8i{q0E6xxI)Abc$a z((hp$h#Z~Y=p4)c)-`7`fddkFda@9Q0m`oj?4n-1~Yd(kOJ z^FdER@x%@i{^c*UdhKuW3tGD^`0yLBDjE>{BY-&Ho8NSX579vr!8DNf(YaosDUKo8>AfpzNQMslC2`K z*#1!E((CR^y~v8hiIxYWx9sEKOKZF7MT@OWSpc_Sp)fniewRP*d2dd{EJg;&u7|^q zuD}U%`wcK2V4>TS{~L8&rILG{dL%k(S3~)s7x+44BC3zY3V4gs6SqvN_)M)@$xSO1 z>OdB#Po*YC!|Am-3U5w*J!WZ38_?oWrY}R~%hU(q$ei)Pg74z|^L8%z_dYAsKEER< zqA$X3x*E&wJV(sy9wbT_bE|l#(e--=Pv0$w$s11M1m)9xhggi{^NhZ%7_6E!mQ4JQ zTZC_lHl`M00elzUHF!pymmALEsdw5BrDe3UaX)mmqkEd%Tn2e*eUzeq@z>H*ahtt% zt$EkAwi3NeesKf@PgF}1R7hi&+ipfIgNofacSHS_X0sbeBmcDS+hTNdySDCB;?px= zFc`oth{fN0O-$*fe-I>>FkL(2x=^)jkZ$-|8l>Ll4oBh}QFY7~n~9&LB*_RRy|1*# z#?FC_4VJsR@>^Sftt0fmxDx%q`E(Y{u=<3<-K~<#-P8QaEcCL{;7`UM?M?NA5)61~ z(n;{PhBVK>_?MLzIlYo|uh(b2JTY(nP}3Z>A!dl)X~{K7hemX2+d$qG#e{K;u*Xkf ziZ_X(NryIwM0jVWOKNkKth%CdbDfsV_i&s?xw~e#>4^!sUWM)&hlZeR<~^5l1i|v@ z$z9{n-p#AIGnf)XA#9F(qnw^TG-d??BP%OzpIrAPoFtGHfv@1(P^&X>=TY27gLvd9 zxVt0ts69S26alm>>pX1G3um5qj)Jr`b2~&rA(T^r7tFAnJc*0G+VD+;TVtJ(Y{SX9 zQN6(OsM;G)s^vL5%I*yVcLRRBFHB|J`cQiLu9L0;gFTlyTTsC>)lK<1pA81#@bOs9 zY05}YnhtWkW>yhLnuDA0X8IsocmYAWT9x7po7<$4Ou_Q-a`GmV|X+aSp6Y8W~#55H= z$Vk6QzUKG(-N%v1OSVJ%)FB;L?5SsT%N*2rIinCNd&*hP&~M*N=f-Vz!vi&KlT!3Q z#b~St(c?|Z&kb0VZyWe8OJz~iNK-Xm7vH6~3cl^?MKq1NI?t_|+e-L1>S({zFw32ZdhLC5`C=pzZKK;EaaWNL0Wi`b;%R)|LETolW-^1WtK44^ zG^L7QHxU{jio({*45KX$I5{;}OPhnKv`co=C9`N}Hz*QbNs zG?v}2b}&$l)6;5#H7R(og~OPOIFFq@vaOwh_rBxMB*mp9Hq4la-+nSa-h5)K|IJyd zNPC^H^rw6{Dlh&*%W7~Q}~6gHtr>7mLovrS{e=5(VnGWoZ_#O%~?59Qp(y)&tF>cKTNm+4Tg+a#FFL`W_j zNoY*)XCCHSRnU_CHa6_MY@)O-@`un^HT-TfLe{WA_izRjp;5Q8q+K*Pux|Re4g4w# zFP-<^I#d5Fa(3d+D<$^Pmk9g|eDSf{`~~^d0*I1SBvU$EdRX4m0GK91kF0}sRd1&5 zG42q}?2Lkt{fk!hN<39dv*FSu{p>$B6H5X2s+9hkLybfF5EfYAf|gsU$wxCL6Jy(W zPy5iHPNa{}+r0;$dWr%WV>cR_u{@)rY~te*`O43_YO#jLgFy(Q4hRSS`1wOoEvUX7 z{BR$mL*I1formf7<}lqn2G;G7n*b9!=Q$7&ZQCAX>xaX^{V|y!+FQ;UcANsQ$Z$(m z?s!ds8Dy@5oHrRha@;iN@S8kR$I ze`eR*Nz!-Fb*kDjC|)(*Mn0P`H6?i!TiOGtT7px@%l9{w2ai<-FEb)9EN)}uJ<+sx z4bXMei>+KwmTyJ|C*%u?F{G&s3p2UFCI6mAb@b8Lh6bOVsUYmpk`U>aCtXDw2vPO^ z(hq+aG4Se&{bQLacARUAshn2xV*0=lAn!p=)3t4_B5M!n3b!!fmt47dC+42Lq=|Eev~5pP`zU^K8>z}aI0g)m zKlI#e2RS}jufmjqpYk+r8Dvz9odFm3ihJHJ$I8dvhrg0%Iob8xylQceSTt1#QBx?! z=wNk*8TW3O;kq5Q)8h=!P*D#j6{nXpmAg;r=ua>htzx=e<{LzIecePQ8Nm0eEo)^} zcdO^4wwRXE_7!K32uU^~Q1&mVybj;U;xE^{+quQH-y5TFnOmJ&!T9Kg!D*&-ES(_F zfmX{JLFIBW@Ws)ulRRf(BV6Kemd122LSS^hHzB?!|K`pfTD8w0fFb@0NhGlMQ^X^Y zEh544Wj;qqb{vaT?N%jxn*a$0k)2P#_SbrfZ+_|rF7~PgiAPGKzdQt?ksdsavQcn4 zZIwn!@y}AN>zTl=#p{r`CD(}?f+FWWD6lqYYO|(|@ad^Q_r_hst{J1QICT0N7aGrH zNKWZ^SUrw%!F__mh4}Dg-9v4rP^7a=edJbs52TSok;-V0-yB%q5n{(fA#p#TbjfKx;o$0tpAPA+-$Z)Ib z#fp2kq{t%BS?3k4Wl@Uh9C2OdhH}GVb$qk-8xl5mGVkn4w&G|f{JI|_Qd7`F(Wk3H zk`9BZ#(Lc%(zK{cHqMYsC9v3WwD__&N=wzSkQHH8vM??7N>N}}pkV{{Fgmu0mNqif zaTWT+D+gx39+cPxD|I@ERDh*);<*t;CP{*dY)js~)093cR+aUsjko>hG#uo)Jg?g@ zSLKPnT=_GX3Tf5h20s%D6Q1eARxB~_9ZcD(FK(53!*tO4=HrTnj`l<+6w8=U z_~m?$@xpz15R|vj$v-HZ%;*HxUi2MjryAt>6Y+%I?cg%XbR6)AWP5?WU)31XREGL| zBh@U2UZEj+A@;I}9cmT6stwbL;1y?uSM14Gj@{hl`N4Fn|H;diQhKVb*+_poK}6e5 z+iXV?lz{0;0h5Qviw;b!}q9!0zNuVEOVx-5gYMTs$Nug zVo{3`+W6%vpDWS&G3^RIMY~F#2q4Fnf?+*ZEx3l2;en68}HwVtPVd#rd_qU_b3i0Vj$Py z&-d?Im1KX#CV9TK;lF&Dm)!a-OKg7LH2DkB^@scw`R(~;6qyU0mD=u+S+VQe1_Q|q z;_UWLN7%#XIw?mgg2w`Myr0!$@$4VP9Y@NTpP_n!6_-blBH{PEj*706R`T}*Ua?Qg z?d$6tM(=VONSCLE^VM!|oSav@16eVG>z)ZwvdDBu=gD@u7#--wpWwr9oYM15EGJcS z>N9IN;5DV6Hb1f^am!w#;W%{VjTb&BoYwGiq(&?z_7!-Cf3_#Jo4R7(Blof6U64ES>HdaE zec7aCx--Y?d0WSl4wE%pMEe#F{pE_e_P4JaYix@%-gl@Z-S=XxY~NYTOe>nsrP+=; zqoHTrPH+Y3_Y|%2fvE{@-Lz}t-DBt4Rw3%-#2lnl4cj1EJ^CxbW&Hfc)#>fFRovT7 z)Vk+mbM;dvZ&C%VDZ|1*$*(p|gWXADMAf5=Pj%?}c{Uuq1ddEI{#XP$6JKI5>maaZmG!i$}xlSAU%$mpP>w9fvz+x zzK_JKw4b$ZljIpp^u%n2?5zvA;kralAJkQHsSOF(G*la|=tosgDtpZYJ$01-B|FH) zhB1EU*-Nuek|14kFmE^<4Z+IxK6U!iEq$Aj9&cQoZP^;|ZXpN1lT}*s$FitnnJ$06zJZs-{1W0_d$7l|tg~1-c0p-H zQM>mc$TO3|uocI`sOpv_Q9fMxr9)V`&$?DjiJU3{|yyjQvM$O8x zyH64kE>KDrNpx=-Zo$mn8pY%z5_A_Dp8!oE+F*1~%YzJSi<%rcxO%%&&SUA!$YEqC z$4qjzrv0V0sH8A6D){zOR*8V8ZCG}pTjwr?6Hy;SKFm5DmfdJlGh}1g!(~he7Yp<) z)A1G3y<_HF8j6Is1%zKiN)F&b3yj!SRdV8Gn~NJ6AiGTy072~B?z1DG_WU!%j3c7K z2Udu3$m2u>oYHC=Y+2INs|!6WY3O4wR^?(>gg5iePzdsv;!`p;0|dLAR9!3mlfU^@ z?rOhBNM$NGGGLjKJ>b<5>-dz!4&>5S_De@T0hSw8(@^6Nn|k|4W1ZAy$x?W_cu=jN z2WAMpzX0*`CbCk?05Q?Vyg|Qm|7z$gOd~6Hzu*cJ(QU;IY4u!lKw88ckNLJu!fJ<_ z-sqzQ(jTLb`W%4AwQ7}roxrr}AoIENPV`Um zlp;If-`hoeO=sdyxgYc-#CFC;f!VoF&|TI?Cwna^RWCBY3~_#!O~u!Chr5WEm2(IJ zfazO}n1H7B;&@aO!KaKA^w1v5B(~dE^A1;;8R_PX)P61KTrENHkys%8>iR>^{+XSN zgBQ3DC;R$gGPjqCJviZV$6T67vZJH@5%8AKo!tyTRZjbIA={u8c0QEUSE}?Z^7pXy zr3n8*q>YlmUS&wWt}TkHe}!KSHAkJooo4>4$~<5labE(hI6l2=Swc4aO%XO2Tq&vI zKYTtYMeT}xQO`Y7&^$A;5O)tsMp!2t1oJ0bi%Zng1dECLMm}-U5Jvpa)Fq!+6}SU0 zHl=#a89#ma-6V@e&V$7^rM?1N(GXqojpK7tTciVO48&kYbAR<7KSHM6c6Dz9Z)A z+XTA}UE?B}b;`4IRa|2a&9&`?Xa_NCe$3pld00v73sJCD8V<|4!lNdCfoSSA$-*xY0r{pLopZ^AdkAv!RvY7smR^L zE*t(mGsq^*z}UjQrkJFxwd6tdPe{7p^L#%0^T@yxXV9oT&~8=ZA|%ekR&MlH&T)Nr zuqrJxDU7Qk@0nZHa`a2v!OIMs+hcoZfb(ORmciW&m@<`mk|@_SzC-RIb8B=w7Q%X- zHhntMSb}De*1fwJg8U`esv55H#`9oyiQm`LEwO!h?EDz@#_+J6L10&@yfi&z15w=oD+Gp<-7#C9!P?$7CZc z>%6GMXm0aEh*JnTPYq%H$mPAyzNGBceb7Kera7)8;$5jFp5i&3`o*PfCXHF2I?PdG0k?BK>5F@IF3$;b zdXV1K+7LRhA(}Q4xCLyLOy)P9X=^1_stdSI%&cFY)w0LCbg!C?x%DDdoy19936o$i z%u|A{XX{vdCG3;1%I>F$Of>jNmJcW5N0YP28Zh@a^o5YoQn|H)V8se9xx0lUddjTh zOgh3?;^&#a-lY;CORBuKy{L=JbP19jO%1g}UFS^lbj`kDRQU9b#OhtQO7^2+hPQ75 z;$sA1q1|5lGnluXRY5kG%#D2$%Nb4giBzpd?&%fRTKtDHG2MR?>>{B9DJtN%`ejBj zoAU&7G&BvkIqjwK-&a-cR+rbpBVypVW)$&BBsN)BP=zXzE}>3_<{+_!liS*}R2aVC zrvZ3I{!97(pD zc0pI#M51_v+EY=s2%6?sNRYu%>EqJU9Q z_Z-+H$CW`UxZki%6M0}0C_#`E57qT@VG7Ef`v7E%7i|P-3cr7y}?c z5AZbHCe+%>aeNASL$bfgkP!w8bw}b3#L>hJ3PZFf@Byz40D27zq{0OIkF(%;kc)_| zgV=|}pwb6AgdAq$s~QTdf$+-ACaydVqQ6HY0|*ufTz5Tt>l^bVXe24(+S_}EW&w}j zr?r{3w!S&}z3|0dSt|Yj3S{8Nql*F%+Hc^8C4~ErjE>2T-SO7wfRSBLRt8r#fU3Hz zx}<{Om(f6llMg|#gzJk$J0HaV8T{QG%!nusw0D==aR&Y&y;-|1ZJ((M_I8?rMnVM$ z4732pfQk|FiUtiLL9&KBaVBS%}Mba6;*EG3M7^q=K9Nv1q3R3 z2U?#3LN($tn#ym8O@v0$i5!+k8ZNZv;|lO_Q2|u};WE_wPQAws6^ZIOT6}`L9+b)Mqs1 zg6uak3&R_B*#mi4(5>HZoL_$YxN|hOPs!yc>LJ9_AgG`|!r)6uY!D}!V-L&0y;l8= zApZM!NdnvtOs<=hd{F&IQ1iqjt+KQ~B1XwE0q)Qsj z)*F(vIQA+<0n;qs7p<8@>9EVoN}71$m7heTE;zY-zs<8`9vDIk!J8F3#C3!@%TdePr{0Ma z`Yoz~4dZpoNx_rIcsQhNawH*oFIjAv^rIX+*aA&EA6aA11Z#tLIHj)5wR4@V`nZr2 zKay3f8v?N7`vYp4xR9j-;oU!pLKB~1OIe=JJiMI3W|AiN4uXaWi%(AszJ+eR5~?CS zxCjX{V-v1E1dh)xsUUPOUUDu`y{g3}5_JK5!OxPK&ri>1Bj8aV<&nU^aOGm{lDvBU z15_(30snlR(y;MrUD&6&ib*NS*pSTub6!1ztdyqH&D!PU?6ceUjj|TW&1PLgpqzST znomw}Ogm8ghSpY(g(T~2N1B%>7s>WZk>~sq5a~GC!X3q64F|y`f2;yg18h7_=_cOD zF3~h3wgUvQ%zo!ksUHg(YJlnB#z4awLTe?$noTIWrt;Jr;ase zkoNE%J=axPC;waM*^7JmJTD5$4hi6@G3=dI@Sy%*@w6o0Twfx~jRyfYsUu|Nd-<4>ML?yfGSiv;? z{=%HHAiF&s73KztDcp}!wEgh-R&TaY6?si_SuNR=!lq?gso3%}%^n~)Jfj^=$5sW+($bV#vz`wB8Rb8Fje|b)zQ)~<79tJ5%aU=+ zf4TYFo)aHkSg9D_+d8jqVErvOkndCVjTpdO#i4PoO{!^=T4|2ax%(vbwSUw^?aAiU zOrQ(DyyGZ0?4svw%u3iR&uv=JpHy=e}ttMZUGxwbg@@VpLVui$#q^X0p(H?BTcTWQBYcGBLPDz%X13h+h?qj{>E$@*(in;ooX%XmFQudw?vpFjV znNhGd!Zh)PugBu-1g+hEqsR;^U(=C>R-rSyUd7aaqIS%84~52r-Jxi(^m7}+=5LQA z*#lH!!ijm9y~siH1O`6MYdCKuX+q=ni@q!hW5@irU7+gybb5Oyv{kWD{%TA?@5)I( zvqb)~Spec-xJ&A)#b7K-0VC#fux^|N;A&HHxh!{pta}5KrLA+5U5;y|**JnO5>u{% zFSVE3638N-B|do(RH*NeM3ZonrwI0ONQ5Wekgxaoc{_d12)^U0fu$Mv-9WKB(Rb?B zLesOo4(Cm5UL9t*?3=P_f$g`${`l+ISmEa;dp$Pl7f!Xh;jjJrDn0p0<-bq*K{X;M#_G{>Baq zJS|klL)~pZf09SEnO9QqZ?T)Bd8{S_OM9mreR-Z}bUbh$uET?MdbmEsa{U0mQZDA< zs*U-bWPof+ZpgqddUM6ca?H9T7?tH%lkpug#^ppJ^Bw%nD0b4i#-OLM-KE$q(1p@< z$y&>HsBRzrLCtBLtw0^ogAxyvPvQ-E+T~f7@tv}3@DIq|V_Hwk+|qbybI>&OS5~>Y ztP+IMISS^f-HG}F%Fcsv`k=_~yiH>g0!x6|^VJS&1! z;F}=2R`xT4pLauDejnhn`PVL*}E=Ky~$az|ljR!E(E%;Fi9RZF*~ zKTS=rO!6RSciqA*{lU{sTa-ebvkBSQOUsou5Q4dt!12=>K;7nEPyrpt#z z?-9QbK*MgD`T~ACTk(-?WtY~y-S4G@W!UX%1NuA`Nagf3xFm8tgLr@6#v#;qOdmtk zTycr?Oy3|S66799-V-d}-jLLsv4ZJ+>eH9+5y;uUbyRTH79WTD-VxU@=4HcO&|PDG z;jNFfNEZ>d|JdihR%PYT#yZVbAk1s}qZr?y$VK&-xS$dD3&ge8ofQS=}hC&AVGOO!CtIBxBSJ zcN$clBT}1Q3A9IjnXFP$mq#S+I^q-KHrTgh&iEC_p-)dgk8r98X@~!(<$V0_2KY=I zB!8;YTSC66fd)Riu*MTOYmK$Ce-A08?Zv!wIP6ZXRr^!EsH|e+1^fyo9Cv&MlSHLX zHD2n{rWRaP+wQ@1(({d1=W7XfRpkd+_2px!TcqEfc(8^-`nK`?&oE6(t??;CEqE%xL z2Li9!$2%R9-7On(@p8C$c&6bo8It-B-!CjZATorbC`kcd9sJ>RJkDF4T+h`XYGn*! zD6I8U=(Q}kzlRheS1JPv`JFX4*nHD#5`d*UhCp0C`%Q0%lr8;`=9IId>i3FZoY48Q zx8MtQNq^H(j5CbpANQ);QbE%vF=K%%Pj;;;B!aKc9a0QhBk9d_aq(SDZ?&DF>4{+nCNjNR;3;K+KgL=$ ziG1E5@+>;IOGK8ZaL!BIuD3U%fj2sl-1>ZJeQHZF5SfP4X#XOhW_P#%Za6hr=r5+4 zGSadj=00i+c+1O4qPV#qY3_miF{v?(Zh#W;LJEGZ8TVJJI~77C8&^KiM81AJiXbCsRXPD7UO^6-bt($W8GV8_5>B zZmb5lMvRdZ7ZY+B6aAB64KtRJp4ws^5|XVrT4|-02I)Oh;Yq=@tr;T zB-JYviHAXC$O%smTck6Y5+JS_6RK&bAwaSr00Bfl;$%dr2?!QHzkZZgH<1OpU=aXG zKg6>`poA^W;imAOev+QqC=D4n?3v)nlIh0GEqQ4ex#oqa1^-E0hDsKKFGKX5LQ#1JtUYbv|4GB_P||9 z2M!oe3AecMo(U(XPP(nXstrDN4Ipte#;7YYT)ufy5+;5lh>@X zkpeIzK|e^_cOYA@CJsM6P8vWzQT%1KkBK6|8otIi1ekH+1oIJ45&%5Feu6c4PMw4_ zf!_VMQg3uuGJV0JA4uMS?B@3T3^=!7O1>Jx-&w!{elic>ga4}AZ>2*};s6|7)R+!{ z+xH+VyF2>;aL%8E*xZSdZv=g4WYB^AfNM9WtNfl91dbvBu6Onw;f?SRW9jP}nHW}* zx65DBmBpekKthH9DF4KM4j3?Cd8o;LcfK9z0aPRf)dUqK@AW;Tt%SQE@^bMBVpBwk=lI!|)@PGF zJp7QZ#4+OhKGlsd0;mx~$=iS-l7RMPh&aI;2g)v^1@iqAI0U*y8O}aU0i7B^h|;Zq z-aZls0WY!}Z|LA%E|0z--B6Iqn=0R8DD!-b9`1J6@Z|B{p$ls+ zQsvt7KsKGv{9!f0zgVecz0V%_N8sjuJq_O+U)+h5rjZ=Ab4F`!O+DNcmTAd3m)BS% z16z-h?UK>`G`px%)VL=ishAR32Dk^)9#tlH@kGRC9QL*NvA zI2>+gfp00%Eo~90UbrgjeZ8JYWHCG=yJflM^PB8&SnS3<54hNL7Pj0KeuX*HuD<_c zwpr<3bxq1;R%==Ldd|)9Q;xKJ&%rWS7|xs}=hMNJp58*-s^!*5LR?{&BtTbVbBINk z!!ki2X!QO=z%@19z2^?#PUcKrGldm4F@T=JsTy7q0~66f>Vd|d1I=i!fEtms{96zn90Wm+#T|3w-U&zO^)g@OiD4qN-y4`r`3`{_y@~-~486 z1Du%A>QY328iGHE{Onbw6>~}|yztc7B*~7+cST;FJyBbW#$;9EF~c={^wTt5%ir?P zi(E|6$`V>)I(Oy9+HGQK;7jjnf(mEsx-xP0FvM;Mcl`4Tlk~)#Ux-Sy<&%1GNmSrv zTI*p>ESr?#5jbJS_Wcm!rGf{uzIUU8v%cajK2A^GW%ZZcMT+4vLUQNepMPz&qEGaH zF!l~Xf-{hwtNulO+Nl>TFDg)C=z-4r;fR&BO3(#kx9y zRQzPT94bfI!;#3o(`|!O!&BB@V|7?ukJN15LsLNS&t$c{;BDozME(-+Qpe1zOfx?| zN;L8~Uj}wrkwg8$>vV^N24bnXM>vg-!}4jr!oBgSk8HhH>DaAeD`1Mlp`jl zRsZb6VU&j7~nV9)V#M z&y@!{(+sN5gylBVmx&69?Lvv<95Zp?}50bhHYNKrrS|UO!@%dI1R>`BGiL#=mxn~2# zKQcj+_}k{jwZpCr-%vekh7YCaTUprqVm+hy#ofqo&!Z1UT0pnx@&Kht`E{u%3*EYg zR;(md;`$S{_7eKTn$Gn!6WuWP(ejv0U-PX9AAcho>)j432GGW+8x$OcznR(Co$@_q zL_DwJn);R*(oXz(wTs>P8%KgS8~_g$n{63V;(Uv>Kci&pJj{9nO#Vq*7+)iu9J^K8 z1$39u>l;K68D`xn_yGEHO7Y;j?9H7iplie4M!yG+iVQdU{JhM5tTMQr`;I8?vLJqb$ zzKODzWPU-YLc^2jhl;RWxnYfwJ=-0*ZpVRIBg+EP*pd`MLZ0E%sym=!#Fzx8-)x`G zi_2N`N7SoaxI06pei^%Q(YjF(Bg3xQ&D-G(qCULzk8!~&!r8ZLi`gD?7FYWq+NdEBW6mXF z3UV$;LH8aPwGR|p2FDjDgnO%IXKO1>y=T{DNsEc1OMhgeLf=+5`ykf;E4*YKc&Qul ztRkLmR-Qzoh^qJd~I}BGw#lzi@5&3^qy{x6DmtFpc08OGvxhyrQ+1VEKKx zsMYC!NV~wBv6h<4vt}sWgM94($$asWd~500e=usT(|+2bMU$|+e})SjI))h zE5BFA230|8C$_Nl_>uWz*dJ>dS03Vk zoSBTvly+xnMW=Zlk4Oqc&Nu4J{18gAs)8cSCZU!nV|Cm{7fWVXCRt-DvfOLc(fKD< zJf+5{;dnerbZ6%qBzehYKs9_Y-!a?KLjCfpHU>}k7>Ph0TlUI@f4af!mY!S|b{N;M z#FER-4q40d_YnOSA=pYZ*U{!B0f>d!kgxUM&qPK1sP!QhQe&-roum{H(Ow^Z*gj0a ze}`S7uCXkM0gu#!q3u#XemjX1-e|A6PmO!m~#pEEJ*7~`EpWD3vd zyBw49)ASYpFeRjB>qy1dTtigAqfycz9qRQSu+$j-YQFA-i>fb?zj>OR!(~h-qGmpS z&^gmfEgx2YuaQZ_e0>2r8x1za_r*rV_J-TxQSBV6Yu1L|my0*~9il-@K38^CH}KF+)-nZJ1|{N#>=}){XZm%!nik!KDqNCa zX~(u4<r6c%1rU4dbxZ2vM-EB;>omu zOPNipB?%js5y05#Nl<;>$v-Pv3`5@P6*Fdh5B?ITJ7C-zys2_;(^{LPfe|7N{^tj9 zByt3aU5CX3|6Hx&zV+xWG@mbTDz(x7hksb2!Hd zD2tO1<8+)qeE^HRO(h18nk?ja>vx3%>tp;;%Y=AF&^vo3y{YN_Z`vu$9+w3dsOz$;lN${ zpfgDqEfk~VS7601#G$rC|4e}~Tx3j{r_U=rYt~a#z6|xVHEz83MqF$2(Ca2CEE)SN zefPS2JQoR#H-4=ZI<2Rc#Il z47N~T{f+Wl83J#Y->8aZ*78ufc*CGa9Cz=<*ayoetEaggGeRD{v10gaRpeII?HH8b zSt-d@NhasLro4Tci43D~+cN&bgp`@E4izVjkLimzXlvd@K`8SVh}$;8=qa%jSkvk;sfxV4Bu&^l^b_v!(ImZs{ zWBT0&myUQV=dL5~bKXaUVzQmAG?wWjNtB#dETn!uu3Q|8QpeK+mzA^PRRQaE$;^x_ zdl~>M*MK}^)fG$2b!Nft#4!G@Sbdzkpg!BW^$28%nkC=)RlAa=&8^_XX}XeH`!;5+ zXT(INqP>e%vB>%3io&}~KL`AK6>OZGs>|f$%2yjRo8!weFYpfpzYun`bA4L0gwsOe z2PQ9k>eoY3W0-kGh;b9-Bt7INX}Uk9T`-9in&n^S=tZ23=*|A+0sG*;!Ff*E$O95woriQmOCm& z2&S!}62l0kmda4~IFGinULtYzWTSv^FJkR+ZpA=um zZDtDR?lIFIJA5yJC^^G(P{f#eIR}EuQIK;eU)vAGc2#FecK+oLz$cf*_Mi9|w*N67 z!@~LB_!t&O_W!EY`R`5|Rz?Qa|C^8DSiah_OR^o;OQYcza{E^VI7A@*CplLnV?e_! z19K{&G=W6AFoCr+f%Q)c4XJfn4JC<4YjUp5Z>3zh^X!}M@73R4>$;B_o;T;dxzFu8 z=RST!L1bo2SC(DK;@Gjol0qf~(*Bjmh^JI>AO=hd6dEFm`W0GKm!DbLv(ogcR9j+29SP$qmvNA_8?J~ zhg}n()s@&1kOAOnOGAvl!q`riRE0LnX=I!tvIm)@*hkMw4%;{3DJhesy1%ZmQwD)$cu2#t0mHGuVN!V2Jh*dJ`h>1oJ6 z{>uHbuR%Q(%QM3A>>l|qGk!9Hzt2geAD>6Ms*QDc{jg%uC}Lv=2xc2d&f4+&d?2^> z7czp#)4&-AksI~(aRXSE1jPUWMZT-=<~R}Halj-(^rM=h7)JSXEhnDKba(+EJvw8< zzJEc^wQB~{z`)5D(ExV)N)7;= zS(Nv+7gOUw7s zc@HHX;mcn~&x^`sR!Yd4({ah2;jKni4=!t|PmQ~|tcCgTNbo{V8YY!U2T2IH8lUgF zFvJp9;cbS*bNF-YoW7piir23p4f)ir+Qoz# zsZup+D~u?A#W zfMac4Y+I+5ZiMpQ=qc&I*VXJi3k9AubhA_fqjFBWmnK#=`t1fITXmm){)m3$D4i&^ z$ku78g6M?(@ZDNfNM3KbqqZZz4pFB+FJna3(hNBh^2KQ4eZo+<_LBN<86VIxs3pcz z9QmfW?*1I^L@6ZQ73ed{mx2!td>9=Blr-@ScF9{cX)+?su;C zPCIzj3bug%5yi5mD9_rZ>FQc*bNJDopNBV}mT8T5A~ORqp`VSy+nNm>-1Wx=A}7!9o1EpXUb;(4S0emBNcgk=mP2O7U7S}nMv6v zDt0R5#%v>&3a+{Z-6+d?+qKp#trZN3oWcefe}&+&Fo>J;VqE08(KXlkMe`9+A>gZoMzTkniG#p~Qy}`7l8@QD#X04`AR}o$s@@X`-@x@lV<4D`_U&3AGb%3f{JrWpo?DR z5T}UKKZ=a5dX%uoz6CPr)?yYtla!L?56kDx)yZ??!+G?FV|PV*qL&*P{moMD1Fw@Mo5>CdJZl}ipWJ*$ZD~3 zTee6|`y4L8%UFOJqGlcs-JGC|jDQaDu*WKh&x_qtpJ@9_-TCK60w`-E>8;1^X zo@w@{h<)_1wG!lb*0_#>!Zex&Qd61lkJq6ZoS0;HV+AG(b2GsYNDeVwVh+>G!BeEL zHm7XrO2aE2jvz)L^w#XlG!)(v&*%n{@eM97ISASNjWfp1@18>I(JRQ66-FdM7F71+zQre%3)WORJl z3vc&obS@k62Gl1UV#x0VR$bHdei`(0|*#3~25%2LqkYUGgOq0Ps@*jApVHCRnF?VRPx z9adTE?YhUbbI(=@85z#38Ep}Mvqkycl7(4v4KEX*oTeEA4xEW|qlar5d#G(D8;Qx! zfZeUJFFw*n-nhrgXgTLol+y@h%?gM~7d|uL2U%YnzN6`2nz-{{XXuELU`48gtXZz>$d@8{q2%o zN^J^N!Ujw+S+Aq>U|m{bX$8Bn;;h=UybU;;B@z#OHlj_gfDPl_M}k8_inn_s;&nZV zP>R(bcDC<1k|tB785cl2UXfC^ z#AK5{$`os-uDwa5ZbGgP=}Q5PSBZL)7K!pV%hgpbU0irZv2N~8 z8mqB(tP32!X6BSzqZ!qyvDEQSWTs9ty-nXSzXW%y$Q8;(~Ng9dH zJa6#zdg49T%g=~(svr(HlXQy}8GS{)-L%ssg4xGrR`tW2^UF`?^2)P~UYz+KDFZ=$s_!`_ z16nsl=s$M2u4b?I*XkBKp(lHJlIPi~vS<{0t_>M53rWZTUm~eu(rYh(zoz!xg+%f! z*SWjzGs|4VT?D|H*x#~H;F7p9d=y?*ln-5eVi^6{X}+bj$cyB;d9vP>+$o2;eR5b2 zEf&SO2d-wJIt*|3+Vx49WJ}Fx=Oz(Kx(|-CpR{a!XbLAt7GX4us!7OdB0omAgjL@| ztA-jl+nJ?4(W<>iI&fD!ULw)t!<;4DMzj(g>!xVkP??a=5Jz5|m1gwiFLhYyCLi|| z%}zX1%T^k7E*)n_zBjvlqIo4`7vmz^9ZrZhf6TTQDt2et#!#moRXy$O5)PyNys%zm z;990;tE3@P9E~vFRs$N?&I)p?l0wL9msVF;|2c zB}t5#)t@4?(9RfB=MP`k$>$H8S2%unj*v1bRhARia@=9Zms(^efk~h6NT7~T6>X$D zL4D<-VU$sOg|Ymp>12+HfA0OVP;WhdAu}6t9D)#Y?vcCLnt*66ngCx=1HwipN9Wh$ zhPBN&#$9JOw^{s)ur&uGohCUazjL8eM4eCWf{rklMgG`xt?T~FaA4TivQMy6B(|~2x>ih?5Z`kpeU!djZeN1?&|5fs5Dj0IvZrSv5Tq)nz@V0p&`?u8fRAtr6 zX1(P(ZA78p9uXfFpDO%0ZHa3mTUyM5x-*zwB+>^Fw1E25 z#o9TD;AGK*Yr2dq`<=IgX~gp&_J#FHOjqN`Tpkm7D28O@6{^`ZSRazO6dQFr3D4bM zmd{ZW`G$@;SX=!_{dmr=1a|eeQUoiNOG=%MMN2)q8$4p(r_(|uzsJQ@1Kh2OnBdQ9 zd;N?#u*h({rbv5Il4PjJuI4#7MnmxXD$iV9ZTb=U)Sp7Jpv?TkvsYFs`}Pt!3CHR^ z`NBS%ew?K~Xf3I6u?qYpA0Knyw3p{(T++T|MGv<_uhi6_a~Y2Os76^!9$_K|T1RS5 zS|y4|>BbE@8@qq>Y5Dui`XGhb7qyF#W0R8Vg|XeIpjr7b)%w~rxDzpR{9El~_^^@r zipP31!CG?NW|0nONwDsStuZdA^l3a0t7n`L4v;UfBXQ&=tw3b4ns4zPtoh8@1QDVS+#-&&I z%mrZDU<|@PBAiq${L!=XK336Rx~ZFs)Twq?`0UK+W5n|^LIaOr^2GrAOKhXtE@4`J zCyA@d_)0f5leXiowjY=O?yne2r+aj$|B{N8!AT*<=gbeF{je`1{#k=ppvMhobu^QD zh^!aaUERU`k0mmnZFTo;&Rx;-uBGWM^A~vZ{NJqqL@~1ek10lGj{l11G5xC%{oe+# z|4uP7u`#m$Ulb!Whl(e*e(K_SLt3-wPm&X^b6BRJ7GYbgV>t?|*=!8*Y^GV<6${0t zs+8%HmX>rPZn`6-u1H6SQej1kREJHq4JoR=-QGY|?Vb0lXYXC6`Rw#(Z{P3d&+gmz zcYIu0c6lNzICRJjBMN)aI0|$>G)j>n3x>i3g2>{+gU~&~dvJQm5ioQJM3~m}s4XZ1 zG&#oSDO6bJ<}zI`NR7cnOxO+pB*tdr0m4Ok); z@gjpb>>%dgvR6){H?d9zf)GK2b{PnnUhV+I&Rm(Z4CX#)Z$w~7F@r*c46MZ2iZee- z6w>2lb$ATr((8`?dZ+m{eaTNb$nry}+46|7 z^{p!-Jb6BvT4h>;0=E&P_>NOZ+>f$^(2s0(%ice)!LRDTUd(ymZ0GOh+y6X_5|w`a zK)E|i^y**ObXp}JS(5!SF4B0jKLiMCJ|-s&tU*3F7n6=xo~=W?-6UdKLP zo%^_)?_^l_k4FZDFE}EVowzUK-8rQ?0bh+qg<3C~>~2o`JDEN!Da*L@C0e>&V~2*r z$%?Uqu-DdE#n9`y=UTtq-qzw22Td5wlEwL~V>eD+@oKBJwwGN;d45`(d*vT4(K%*p z4R7g7u8^;FxlW}_yF#sd8OZ9)pekC;gNi=jERmB=I5Dw~Nu|Z-$;49k1s`^Vrl)K= z;d7KjH6N`kHwKi;<5w+P@2zm86Kp^nE+!x+*)FDrZ)&SMz1hvxO2oW!SXUxSx1!S3 z@aaB0kHfz#dbW<~xx3hKo_a8~*pA7$W|fy?we}fTn8lB&A8lQQklD*zB~q~(M?OYD zUzFFY$l^KYuWPx?u$M~iIqM|_O}Bxjjm!Wu$!8I$@4@aGms<8Zy2>ICP5chNna1Xd z#rbbM_IKL!^tug%0XEGtTajefem)l|=hSUNMVW#hgIfFknN6|aP)EvK@HxDbxW|+~}k574yh4NqVFE;Uf$|$z z%E?KJk5f&uSZyUb1?V%(O^xiyu0@T{-=Dv4gnqs7-)@(6ejw<`0ko*kOmF%IAMDXb z9pp5>r=CjdyNaYY)=5}5QoIvNb+uee!z3U5 zQEMz1Y36SObScA3nY5HS`l33yS$c*78h9%W0ov`?)_LjYnNQEmP3h?VqEaw_o!6E3 zTf<=el_Ut)lNgN0#hBoi9X@f~NB?R~&-1BiTSU&2Znk(H9! z7+l`)DP0aDe6Q1nB?Yk(DQ|}poZpgQXPBmj%@k%{v3f4++`J14sXjL`8=E|sy2Xy6 zyW~*QsMY5%?dL2`JD)E3aipCr^(rS+7k(zk1s92>%}Ve4;UTxzCTM;i*AlIhG4yP# zW|=cfbZ(NA<0;hBi27mXs7X(zxJ6;N+=xp_i&D%y#cSou+3!Jt9g6=6`ZEa z@1+n>C|QllirOYdXL8~z+4F^6VWpMBS5#n&p0a0&i@wg~`dkRwaBTwH6|SD=v{kq)n^tS|a~15g-7vi6nDvzz&4VtJ znzFvdD@CRAMK5*W+CY1(e9Ah)E|*##GYScuQ~Py)M(iIa4f=`)3%mF4A>xjMp*YCu+PGAvx#ggDXiTFSU!eT7L!NfSRhh%36I?mbzizeyeqV;iaKXRp=IPZ;j=+i6z9NK!X=&Rmf z-~zkPY(wcs+44F%JorqnDOixXRc&>3%c=3jY+CJLWM-3o-kL};{))P%{@va)s_5f* z$@Ri<9Tu4SHHe4CXU6o!@x`pp zyU3v3sS9YYsi=C6K9L~csFMn3NfnN3~$UITI#Lc zEBi-v>d!2H@A_2xY)k6WLZPSjqkR(;qt$fAOax4<|9Mx2ndKjZ z{Xf$Q&>YGBpYRtAQb{F3@i;`&c^jki134!u%!HLKyxb^T*9pqWF-}r`p$Pxf(jo;V z2`Qn_KTZ%Q0FdB<8ChA#xmS?iB8FD82-(8sOfde(vKt7A)Iie_I*7LLB4F2< z;sCw?%giE1urDI62oT@+JY?Q+gJuFY{b-L|5RH`#tjc6l@v`NM?O_36lF09vKm=&f zi-^1pn*os7qQ%Jolp152D6~Xi9 zwSY8)i4chZ&6J$fZA>F zhT-4ehVKwzM1V}VBB60YDqO&>)m;$?gj4PsaTBM?K7qUu9YBCU5S;V#e_tcPB+Zz3 zzWo+%BJ~gou1w62jEsw4iqG;japEqZK?neF5g>sGfC2$vo*Myv-Sxa401}<|e)n7W*;GIAW$Cou^I>YO2cDjD@Por9?D`kf zM9dUAT(tXv z!zO_JYW#~-M&9BDi2Ce-872H8n9UL}PPl-f3KTH{B+iii9Y7%RCtk21000q5M|{lf z(+Z%=890tg1E9f6hmWA+vYF-402W9eD=?nNIMy-31Abxr4d57-cbKi(6Lfy2SCGF0 zkH`i0UlKcP+#q^*5<`XjMN~d7QVu52j{xz0Ns?rO;#9#VIOS6 zhI3W-y>vMB*gqdxEy+vmv47_o%UpiArr3p=a03iNI#2%te_T1r z!mnH!fs)R*woDaYT~*pv~+A(IM2I5MO`=XSpkF-So7LSs*?s1^@6}{3bCdQxG*QWD%PIB=8*I4KN-&(4 z8*&c{=@mqEW!5J)Z(pp$uS-&1|6hclWZouP>UI_|c=fP_VfzO{_Lq4zUUuUn33zH< zJ6Acno_aGal8T&zzO0wxj0J*DT}jTIM4zwkU&-#6?Han3-m7IrGH04Nn>&BU*sIPJ zJ1ON*7<|&5aSyQGDb`8u>%d8 zS|>YJh>|;U!^al}>_Y3KCsJqHvo2LMS9cHE=4lS4e`sae+at}1ejb;HhxT35wo+X} zuBwvh<1$labDtnh(2yYv^qqovE!FhhE()7m*^e85tehK`FTxA4NsN)Ufd0w&tb=_N zh)9f5X}E1>f7H4+a+lhDtXife-~pR}-Rl-dJc1Z@4;+Y%*ltW99QR@mMU`I+gW{_H zp5n?y+o6@(uSFk#VAOg&}C)3?&`Uylgp_Vh>?TYKax<&Sx_ zqD_grCb6j`-edI?zW0cHT@v%abWnSuo8GTy_&Fg-$lkM69Zupt+frne>}Y%P_cb-; z`U{L|+H&(+0#WqeWueYc)p9=YmfTJW{4aNT+6Uk=3|`vqRE5$5;g z%y=r>`&88yr8U>z?N^b3`%~FE7=c!FLf0+@(-eo+BgqG)&TOs9YsfBSeEwM`8ktHP zSZd9v<*>xNO!h~Upk!!O>Wyf#h>X75R}%y6s?vuPa&q>H2!;ykE$YLFL_$>*c_iWg zd%H3RniZ`L>dJfuk6t&mf!Eunyrj)ZUFd%9sS28re6W{N$3@2g6&(TY^(rUCd z@j9huLt<2}ZJE@da}~odK(#Hb!+JExt`ou>13ZJec0n&wMRu(Zdr*}cw%P* zHLzMH6_(_1L+PI$wPhz^>yF)*TrMPLgB$xC{XSW$(^ACCwa?=>@Uhj>?QRum9g6&x zwv(9Aye<9ig-p0$aW7B^YTsC?TPd!q%f}Oi_+E-@$AS>zd)hZ*aw;+&TOIA%yI^?t zZ=_bXYdIHxk!b}y@@i$PxCETfCsQZqZ?m2uwA;6Uh8b3^j9ulykey}r`ry!#@lx@- znV$nskFoh=)>a|h5b=>F&626Oa-MtcerYmQ z*7OTt8l_eT_0LZNn^}G69pv1qk5-gYzQ>2Od`j^!p*xx5*W>lKT3c2rIC$D;AwQHW z%3-PcTzei@E-l(VR)XK>?kju=LPV+L{8%gUCej5RH$AH63gnE(PmMWuQu@Y&vin3L zpAHg#)780RaDnzsPqjfVi%;~rK5uzX;4G_pGyXeO3%9S`8#|HP6H!xgbJgIS?~Lje zHL)k35TQCY-p@sz5> zNTMFxh3impm3PB_TelpY)y)#kTz@-2vtl(U4+^ty50dkDo*C%l`uxUxJ01s=xG9G; zHLY@A4VKA_D^1D+ry?&x@!FSj-lu!tEN9)& zXPhA_sAujX7n0+keze(M%B$i|%5Ncso4Qm}nh_`B$|3j+D6m`jU>=c)80A+(Z&u-<~VqUM#0pcY#$8y1|UT zmY#)rW?|}?ttA-#98XkIOcVbejMJFB@Y+@-E^ca6qAzV3ff05j-NH-`u49Xa;^*Vo za!ZT2)O%_IfBIOjd-M%0xVQ|1F3KAgQU23H^Z{P@Y< z;$H(u@|Kp9h=a6rT|)=VFBCPFq*3!s%#IcajfGUrH$hiU;nT+y_bx3B(g^xS#WO3` z*K3qAc}%d1y$-E^k4KpP=`=Ma|E$BX3-6<@@9DHG7a^o}xE>O-6sW?iXCOtr97~0p z6!cBeD`0qeN!;9OU=COL0bSEyY5h;f-aoGVe^?^O%)$1*k3B{MT%q(85UVaK!l3+W5U7xL-Ikrv}7rf2z~{_Ko|}%1Kh|0 zE?grZ{`0Yb!Xz*v9R0wjAVnDfM^g?M3m6>I?Zf=&?*$oMECJ#0phD)5bN38^1jtCR zbAWIRazN$NpnyyaygbOr4+oeCAc@lff(0T(%?zx$bOYINh$B2iP|`B651QACb+l+W zFvUoWhFoiKhyyr29GMr}2RCAkycJ)$6T%9&Fqm~J7MGDkgi7lRx3m!Mh`XQf4Il~< z*pw>HPyxaKIHQm(xpoJH$b-y+7Mn9-9Z($z$Uu171YxKU@T2|^PK?|rU`UB5YU95+ z@AE_gCjFuuNC*H=h$;8ITp;ZX_N%*tX%Ptw`@%#?5(osc{=45>k@QK?g+LqL?a z*$V-jp->Cs9(bN-G)W_3yGUd`cLx!V3;=>n!3X4A?AzOquw^S&5Pa7cL6Qh?Ffcxn zW+ZSw0YBysA4vp{&u{e2uZqgwzskoRhx(A4DU_%XW~&DM4!|_#I5Cf;|GzS!ayb^v zyfF|V@Va+^Z``wwnFq#((fY}M**JN&$4ldVC@ zCnBSFO0Xkh3jDa4N?z^9V!s2%G~sqnup^Ce5Zy>h zJUT=3TgJ0Ky^Wry*Ie7-Du%dM{wLYdk8)qr=iKxs)dQ2xsc)#orbjW&A9E^I^_S+q z=+SXWF$81pqBF6VZx-p!t#`kvOCP4b`}2```&VK&T&}w_Q!KNvskWf0(}b~8W9qCv zs@Fu!i;5#@i5mJn*mdHsVsWMJICuJd4)TPivT>!zDdsmCJyoDj zxZiH18e-}z6Uo5!bn4T^+w>ATC0eO!4Yw~F$LC4h6(pZNrDE1@<<5U3-1|vS(8>bnFpwgK{Qb`YO_0KFjd#6cpSx*bnT|Yh8*WS`SAi5B~%fgIm*8ZIs0} zw|$oRy0Ue!k{H!yc6E`twSQ}>)@+d@3$6`gi&L^ngqB&}M)~XT>glL6rnb5|t%Zu! z6T^z#T0LA=t$M0`(8jCWu?y7c$>rHYDR&puv&!^J_)tbpc__!2(Df`#w40chWS>2c zcNeQ98#GbRGJHiC_taAO4-}IY$!;-8LA&HkJxo$=p*{LnqO@JlRUZ0gr=~dzl^&8! zbqdTdn9EBqMO8%Wc<%-4&5m*H)+o1Gc(xndTSR=x?S`Qq-bqOl6^9Nh2OVMSo)gO& zc6`^5TwLL6Z^Pj0JKkK}NS@vsjB-Bfaqg}B0#y6y9}ZhdpU&|svYuvUN?_F*ZMnXx zR;sJ=ZP!9}#7S}cb)xX69+fybC1v}sP@R^?Ex!xehmUPD#srAf0$F7Rb2KD&>Vkc&@I;bmFFZohf4 zDhzr*7<);(zsd&{T{dc`yN}_$qM;G^*f;H$x(|nGby{j4&v>~pW~N%>eJPYL;8VLV z^V*loPu~8(SowT)s9Nj}P@x`6IY!_L`(@YslvVOcWfXpM*>P}Ic(K+{=z0|I{_Z=D zLegr+tG+`2W@BHfMEg5UNCqcG+GhD*sPowCP!SHp>4wb@KPuMU^&kl`5(UJwa$#wRv|? z{|0!m6O}2xnaQh!e8HKer$^I%gY&F9`P)s2$R`i`221DLvL~@KdGba7`IJ0c>W6EX z+Emru#@9xFO~Te{B)q^&+D$t8UNYJPn>(lF1rF2JY}fNFt~|4&{{4JF4$8AP?=fTQ zs7b9tg+XfGTFQIb=4-KN4@uLUt6mscA!Uyi9KQmnu)4 z9p10+;_~bDSjMVis5~ zuQk{EuTWf_7sd~>Dl0ARN5luSer)+EbXCK_p0ZQwZJ(Lt6|b}$xpjU@2h}HodJrO+ zw){ox23$$7@mj9Z=+1W*UOEo;bqvRHDdt^+G0m-~*ws^O&F%Uem(&%WU;3?NTySbj zu~Nw-@7QL!D^nBW*~H+UcV?PCNDZMbM_!Fn2I-=K`bIO5T`7x5op7GZf zH3!?)-dZC`*(zFV210%2>pw!>Ib+a$^A%_vmuxrT{+(*n@n~w#ZMb5ve~?iMi^>1m(ZQHhO+qP}n$(NITs@BQbd5 zB`UJ%68UmrI75iMXVe_jLjx#-nBNln0| zOrs!+u~4kaEVw|tK(iscmG$0(b~;?Gdm@zsGD`Zn{8?g<;xg7!46#BWocxGB#R(Wt zwV2tux!*n90Bb=+abIzwS|A?rVmTroqd!1DAhH3z^Fe(%Pauz0CfsPC-HRNZ?`d0X zi9-1;&+mK2DZdutS-Fr9H4|&^sN39WxG)HozA`!*h&q-uFSQ1IYJ+Kt-y1dz)3G z;B+r`ZNL4f5>tdf_#_-~;xK3mx{bTBUgkV9pJs9d~j z1!V1}f0l%MRV-_^J<5m8FGj|y91y&E1r;1_7Xb@~$K(&I_OVtka%x3IS$Hh>U+r)& zS9mZR$h-GYlS5WjwA{h0;|{XHPbd&i8JRm;iEC#hOV$IY-EJNhkiMZKd5am8h8~~n3a`| zja-&;mgJ6^Z2}ocs>t+!dow=Vbs0-0+Gf~5@>MzEROp`+mFme_bXgV%`{croQ#Aej zlqJJD)3$tsL|A5R&=7*Y@KW^L`Bbf(!c}~(+T{NC2|tM^?<DYxwH(Uo>6& zQ?;5m%_l;AE@9*)^sD*_5Gvjqc}veD$t}?M0ABEqz*JR&MDU>5B%8LPQuSPuoR33k zI_8B!esiZ+T9_p-?>6IifR#I+Q){^88K&NnBb~Fd(#GQ$N#ZWK6PZea@Zx5Doz;;r zSeg&@pZNv73W=M-&(?0tmcXx+*3`y4PBQ!Yj*EhwWzQB$LkJGGM(UdY@f)sLea{AF z9!tB7rhXQ>r?}{2DCO5%Z+-7k*f7SbnZCo0Wo5m5_ldc`VHS{YW66UoroIE~=O=ep z9}qcvnM=XH;*7JQHSkxo^+ho|E8Y0Wl!Yi6#2NVP7@;zf*v7kxK1f!A{@Ju-S1fQw znGsnKWK+P4_0XFq@YvUx^0|crCw50YNa^a54JKX=hqz()z^wBV8|cLmoHEl09eHA~ z<5i+;r5NcJM-X*g%ly}zR}=RD2{ zD?I;{yvqm_IO7z#jR}b+NiK2kTWEJ+-1{)JZZ8|H0h^@pFZBpkpFal+I4>j zimW>;*mbff9gAlB{*2{SuQdd~TUzqXiVRB)3deyJSd&`=}i{dCFSq707Z&A1-bE9QV_LY z)UFC9a0JPp_=h=gqbQVed9<&=%JMxsR1gGM-X5U$pUwBzvm0!B=9-&ni(3gn34AI3*NQ%@1QUJJVd9HpqJEJ(pedVwy7^{s3G+i<$B_3 zU&|>mi{XxAoSaF15xkwKtgz4JhS`Jat7Qi z?hC=WN#^;jS!?s7)Y7F-6S_GDZH!zMo@R)ky=ENO*m~M1wBv-cVjeB>lklBiw{ zOHA2*+IGe^E@P_qanWk?DVd449UiFeyvx`PklqNo*C2+oZljPtcuZzpbW9Q5cT+dy zQX3_We4NS2lc^^(RDU*>PKr{vSN+;hy{*_rg1bS*;ooRGQ*&S)@)OWD8vgL*o-?sN z0tPbmE^-l4Cq@Uf#n2%Wj&p{St{i{${W0g%XdSA)9fQ@x0hCHM#Af>TF!tl^KWlVo*Yig!JYz?TOQ-kB2gA^xE0Z zv!Bv-sxiR#4fvU4y z&uSwaD#FAI8V$YqN4~b_$m%+>HXH%{O#hTaPr5CefuH(7i3#uJB1-y4*xJJBI5S;N zd~t)^1#)t0drG3iH1=P2R-eb}6SFc!4j1?tg|w6L8*}hYqm=3u{pBE6O)Y|?w)`j~ zqrWpQErRj@E&X=azQT_`$?ecB&_vH=z*4t_Nq`z(zDy;Lqli<`NQIq5ooK1#qLnMq zZ;R{Ea|zWP+D9)SdJn7j+~=syr5$_}>0i>d4)@MNr;tJ*4z)eK*QD^ay_*3m)-3(M zQo6Liq{F7q8e^K@NKY(y3HE|UVgGf%zWr^vTv>5UW?un-?~f(pYqt!Q1)k)0cxqvl*sSG5*D4M{T1$jfdVcVfAOO?O^|{4%kO zyFugZtd3sv>~cLn6W;yTJQBg^*FbJQoP_|YWmr>pq4A)6!5U-qyDSk~RLP~k+Yv6K zBco^9)4ER>G9lRsvZ7@(YGyygX+s!AbK#GyJjwk39_3*}EPq zgj~a#_ElpUgHK9Xk$0^5Pp1oa7;x=<;l(hypcZxThFk+3GFQR+KvvGZQOmZ3vKsD-y?3laBPUgMX74#nm7AC-LNH1XyZx0%+O)#zx+Lck!2x)qpFiKd<4` ze!6Mj76MuBk0y!9e!-|;G}AJQvZmWDZ%v=ayCVpAOu9+=yE7qn)J9%OkQ`vs1HH0x&HX&(7C_I4btC{<5TQ}=%0Zqg%G z5k7s-xY3*@)Npl$$JSykDL53(D6Z)Y%5K_!&lYB)-Qjsj%at$^?|zV3hRaXf=xId* z(}ybKcZ0W6CfY7iMGAQQ~}-nqbAUMSt-NpV#81mH%P?r%Fau40(;qvDUB7j^?q!T zbpFq6gbrjXL1G1e@#W6O-Iv|xKc5H}1>|`c$Y!y>>W;`+4o2e_u1NO%$ihRS2H>BV z$mj;oH#24I+ink&{a#@gGY3vFloV4_!$uh#yCq?>KE`XktafEAMQv_VPoORKOl&Ue zEr!lrJiy(Cjrd(fnhdj=loZ)kBILh+$O-4k%pd>hUk2m<_+JJy2kZa#xR@AOng8qV zzk6KF42=K(dLJbZoSy!qGj;of3$ZL@we%w?V>F`-sb{SehM(-s97loLWqy2yNJclY z6=@#wTD*`_z;98gAY#e#3?<_CUsf&MH*GTyUNdgf_kiu4Z!N&hjZb3=H%EcAnkRqg zaEPcWVn6{1EmmP=87*D~#$;NH0l%nw8|Wfqqx^`FR1nUFkd4zG zKtYNAk%~-JH|@d@lFw^mK2IE5`MnX@4yq{EmSYDDuDN&~E}q0;FI&Ss)y24OMKP!op|fYoC05TOnk36H{}5gCFB`-&3= z=Mw|2U$=$)Tz!X(!h!fh2ZH2x@&kZi^?mBCu!w=#QEvzJY&j5nr?ho?*hm5l@AR}+ z+1v3*5J75}SxgjFxdZ5VeV)?O?u7lIaD5@}2!Zx(Ieew^f#i2W?rcTVU%>33z+*~) zL=a^Q5l6w01=vXaNqKzXj2AVmcUHiW~H1^ku| zjt$G?_4WTqgz#76M>U3Ke)~-~349Co^9`Ud^(b-kZFu~EM0|nlG4p2I3cz(48FT`F z1N#r=Qvz|Gma5$$0##xjBh$ux1NqMg2Jf1{A)Wkw*5WJvM<@W3jzXX?iIFiS>I~v9 zL_~frh?8T6iX(@p!Dlz-C;{cZtKvP7zd-a6NL%Olx2NIm+z(|23l!_2GX8HaQ;Y&! zb9b4g#?1&SZP`LN#6yDJJ0Y*0w{Vv}LK&&W!nBO>%e(sboCTgS)N{Q-EV zQ;5DTr>69Uh?O0^8jlU{8#t~P1AGe<3qMz$ac9evm2vWqfQ^L?Ijuiym4Y#9Nxd70o%uBLKGrd06~C zzi6edn^%)$!Ta}`T17bR?Q#y5U%ZAD<8UmR4iU2>i@LkeOfg!7@0@(3O^3*GGfjDD z+l8F=VpVn}9Z!mG?9?cTWm`V1d$j)XePQcZd?lnPd~w*MUU*D{WZgcc0tx{Q!zu#0 zKXbRJx~?E{3wjDa6**h;*?%3EFHhmC+$ori$&niJn(AcrW0-UCT0C9bQl!H!tCT0k z7o8QoJ;}%#ZkEFkNKM8)Tih}j_Br)*o@wyp`PeiUr(KeD?a{!y?`+=qZ~beGs%_o5 zN-r^5$t#17CA$_^G-rY*vt#zyJ6yptNpnhr*dN&&>6)gqgfJzokCQ2*vxjrBmt@(I z%zGpGj~6;eTY0HP3;C!6OU4SL^nB+53 zqo8YNegTbO*UXq%!MKRDU?E1^BD?iQ#X~X$6DvHZ<#P_GE&gIvG<~(!`xLXD+Z(er zF3sRJ(l@YYsc*4VCRPoV_jMXFE(4Xz13Uk{*OS2CKjAH^EMx4+1&`;RPh~DMEj2i~ zoy*DJ0g3OW#o^d3v>fhIf0Bk4(o-ag+y6Na|HXDl`F&>@8Uw9PLTXZ-sCj+n@`S^p z!2Cg#w2u|LZ9&6UDvMR&yihs~oXLPvG?mm@9P#40(W!ERz`sa{miVj7_gI$S?dG!7 z$tVrhNPO9K?o|>i0#xN0P^+aiR^MKQ*eBP+ z1lm20_GM!duV<9%d3zq@kx4~|K-T_sEKxa1gq+&+wY_UUy`;&_!PI``|GWe2~(>akWfNzx` z!`)Q5$u=l>Tmv;PzkG`$C6@r_PN8~gUM~J2WwAacZr&?^ZG{zoFd`@cJjOclu3(>J z12X04K8Om<`Npb>zPgT?jDw518MBws15Qp>w6CgJ8NNKfTy0Zk+rHf`t14W!a<|5b#*IP{rRZ8CFnOhCsO`dnojSCOw;a;?*QrQSY z7HwmO|DM=v^Ky$giZ&X)l(^lk^?3^aqL|DmNbPd~g1IVOa93u0zg79AQ{_W8J_|!5 z1y?;KAVMj|)yHS4{5JjU-X%~!j8TaW?{dUNl9@99uYKc+=?cyJKXLo?5*@hPKE-F?wIzqf1RhDAU9|X<<^3PHDrjL-6`wGstqPDFdZe;h7 z38u)y&XW$l9?LdvDci{2Jk*rZRIA+|m=;;+sPk4i>ofrhmMTF9&KdV5a#SU%oJ=CV zl5R<}c8W*cSie4jwcuPD&(YXXnIY_3$=(8tglYu_zq8^YRoh2q=VY-f>y`cuNM*}L z{?3NDr}o7Q8sxtr0;%u$YiL?r_}s6li-V9tf+& zFB}{SshaaC2w@&s$@m&^@`V)@+9&qAX|MdUMbC~H$3HreUn^~Ct#Pzki07|C>yUmc z%LMjDR@-@5A{*b%^M|KP($kfJWHE<^mf-0kCa0fY;pB?hk8F3t_v9Q4=K+};Pmws4 zb?5nYiswx;$x56^oSdM*pd3G#-S>2)Anzcoz5S-`{lY)S$z*jSvU5%{E)9QPRMv0KH9 zR?4)Na{fwLc{1(d^!&ZL@_u8R{0gBqJ*MY>nk1rL@|6#$XqR=+3wDY8;9MU90> z^~J9yoPwt=6MlLuiQC-7GWUhkMt?;Efr1m0@O$dT1#dLXz9!|-1>N7GQes+~$`MuM zfk|b63jfyLwHd1H(h93A2FkhdZ*im~M1-+bUd9$6Fbva`AP1@j^cp|>qn?7_Bql<% zH4+%k>bq46=wp*aWLFG#%qIpn>@`&DI+cgwQbIt8?avivUpbFACCpyR2d*iIYj5Y~T_YgF`U8U6{NMzj!-NWuD1jb91o%6-EgfKSd}M8D z!d3&dCfkB{WfHVc!k-^_p zoxlWu!gdT4F(9H2Au|JM`uCuKy5HWOZcr^^f9mEhzcIEZhd{w}hmYBef63hQdjOJ_ zmW$tk{cHt*{X~GEIBetg&&cS3;0 z1LpwJ=+uqR?K(eO(kFrR^#963!4J46jAGFVpBPnX|i23@R>MK^Io13^;#7V?{;Jz~Q71`^(ceg1>J9X41G+?J9= zzAQ{(AnI~wg;ObzxxVu9O7cUc01}%4uGj|DSQ*H~*>F?R_YhI7Q`hA!P=?!^4PVrL z{=hT&TP2XNeF(xNWgrafsUZRwlQ*2QN;oiLpk*VlujXoWAjLvCtV%N95_S?O-JepH zVmBpa;8gaYKzaH3Eh$SqanyUr(hvZwlENFXzH)PK&seX=g6Z!pjch}+Pb?IPN>C6% zenf;TAL*frkQ^%5nlF`nr&(3KPYJV=ZK)mp)2+SWRK^Q~KO;8Yj#x9>)`$;4mCmI~ zfld4szwo!k(L)UFf3{M5Cy3zaZ>}v*jLZ2(9pF3y2zchs`I5LU??kF)56grpFl_pi zn#gVuxraUoMQf*WHzyrZ7n=&$@58;-CZXJ{y=5C0xBhU#!-E$F12r^h4*)}slJS>x z`?b_XR2;Y3tOW@J6TKN@i;c=}gm1DXN7cbwLnN$ z&kYU){*t#tK@s-I81fLM9(cg>jU`oZ2H1!q*YVDfg{DkzmPfdM~J zQwJSIGbL!8@6U9*m+AvhFV#=E4t$;+P?v&3<*zUr)*gC1@>TpjF z)`k=DXYRsgj>C;S?_5_CJsXX?JG4z$IotyWALnG3nv^iY|*-4l0k6CXAZ3D`0Jm}NsrIQ z#THkOD(*?ncqEczqi;oY71W^}q_4{Mg4Y2@a2{_d@P)e-jeZDC$8W6@`{&&ZG2ZRB zm{CqeX52M$|LC2Fl>Fg+v1Ioh@e%UjqR=(M$(*q6?Hsq$m^=LGF89DY+~D}8b)^gu zl%Vr4mWR9~mx*pactT&Fy(ZG~Fl*a>~lwH4Pd6o``76G@eJ;Mi|M3r1G9!P9= zXO(k2rf!?&mRA-YH%-y|tpgLvh`sx(ulfn3vRS*^D6msm?#HszZkdS2II@)BF*4_P z8{f*UdfWizns`5`zw<69Tl)dX7g+lGykyK0Wf02kk^3|^&~|_0(EX(ciWdbdg9&XU zPt`wz0HYN047X#3Iq>`i4~!Upm2~Aj9cQL*KT`9f5nEM#vX>AIr>YbhQ|ZT2(bG*2 zI0UV-EYBkvRc)?1IooJx#&4B`MMeo5C4J4`&cwnVauo+M!j6oFu1`6NQeYc?VR&_i z@@h$^Te=PKG0++j@bT6yhean{PYG+#z7n=J56#k;Xkd`1y^JzHa7PuYK$rCLk@d~a zrVV@DdovLsT}mTip+#%tf(--Sz8EepB;f)|?VZp~9IO=R2cPsG2vamY=!2&;I9Ygc%kl%oAjP8*_` zDHKN(PO;Xh{&~&8T9k|JXo3g+#rfiuclE;Yt50Zseh^I{phaCK*_dX5bJR)#52fI{ z8(mvtaX5ET(1Gu4KyBNnzgFgJSIX!5n#*>pNzFl;5#9fxvltwlPU18TsZI@LtDr{h1yXi}ubkehIJEwlLkn+Uv|K z2&#w#sr;++OPM$0z{EiIJp8AQKjl@-^3`6}`8hM<*12cBzHBk1RCx_gxKoeTqg8-5 zi<*G9YZC)+D!ySL`cf>pL@y?Ye;D(NDpY|dMcy#yU(O#Sp{1E>7|;2XG(zewGpJaH z$#EIp7k|#51i*^#hssJrBNZq+*dYtCwN$UN%N=9}JqLF~4C|qQG4AIpOH(wAPF0l# zKU^moox3?k9}9rtpE?|ww>1*wKG#_BiC!Zu;n+*Y(4}4W-kkD!6fnRLZJ~0#R%kOS zng$}*?u6MvzrA-Nk6ow!rvpG+1P2=KHmEUM?9EXr+Tz9MDdmDqI`&0o;>vM1b#jMZ zDEzZskyh$!bA@s0eVMPs3#mNFdVk7xV*e~tP7FPmSg|UWW0>T3ZEE~-mGA0sDO3Bg zJiTp*{6==nj`ay$6LWqx!E>>PgkQ2k)pwJ`v3=PqfTB}78_SmeyR>#z^z_Nh_OMuM zx$12^`GF6-MR3SvO;F`4>Yoh!Nzx25&!vc{W@-#-_%*6)Ig`vfan-R^?@j6^c=f>Z znd(c&@Y44&D(Sm47{)v#$+|_dm2yrya@5(F$e)ar|Q4ibw1>$#`|wRok?L z3kuEv{`=Kmu+VD3u!MA|HIe+ow{M%z)Y!Nx&6jGi|Kg$~5GOiY#sTnQ=&JdUhmCY4 z1RKX!Nn}`uOTqk_?U!5fmX6z`=MBYkYpm^!xL7ZOgVws1w2mEu0QE9m0F_tVelP+I zjc9g}a{idxr;t*`mrJ_T>_SX2tH!a)qa0+UE*Zh`O2tOR@FP4-%0&5KDr?cZF?mUi zH_3Vy`sLW$PpccBE7v zD91Q>_83Odq436|VUn~+-xR5d%4Ka!^|oy(UQ+8hX}!6ylVcy5q18(fXf_E$fpn7xR;#XT-3M=3%TD zr+AWho&%@)I8=JWiz^+Wwb{|ez|vv(B^ky$_;DF|@#}+ex)W9DGq?Ut34$6N6eV6M zB$2v+_}BDvcvP~Owryk-5eHP z+@M&s`yDGg!@snf8efmE8UsZV-YM)u`DC>Wytj$}7`u{+($kt_VWrWT!&oNP{X+QV zUn0F*OG}FO?}sqxv@^Out)Z>QU2?Pk5f?!bq8Q*Nf0OLG=r?hF!@Qe5yhF|Lg8Edz zGC_}~&Dnr~Tfs+hIE#<8`bWBSbUn-SNV}v!Qz6FPt@U7nB5z|qy%$Sk#j1;ET^qY) zFuVGa0LNScT+1?B>WNLVY`sbFaNJ!xZQl``BDo^iGDPCCMhAy#Ws*t@Q)>B*RQqqI zmhG&m_Z@!ObM<(~q>(;3>Tj&_E(0~y88oMNotL2zkN+g||ChOEqi+tw!$U|fW@hPV zWdC!u)N?cvF*2|*H2N=L&q&C~{J(`g6Co!n!+$;c@Ae=gI|s}EwdxEjr)Yw%4c3CF zl?&H@UFql}YAF!EiGyO=?=&F90rSr&z(Tk~=m@60}^XAvGC{llm8OZhEsA}~p^(+W*QI80TkKv#Kq)?M3IPeEzH#EkM4d`F-V45^tOT(x^2UykOguDN=f*ES?Q`x2k)aQ zF&;{F-c3l7k4A~S2ZrqhuCGU~m!1qEyOtvbhjaro(k*M`K+FsD{y_@!MbM@8HKV4T zQ6L^J3|S^Lx1)h}5^K+11eGdd?uEk*m_(r2%H0G4md4T(dzS@GphX{tG}jajAT8~K zUf#H*loPg`)GjWh;(@OeYK8I#8N5$>ZAr%}Oeb|LZn>p2f&uFk7yZ{L)U2ENAi~0t zDg}B5wVUgSnhWI*R3{F?L|!5hYBX9SgapxlBI!#o{*nMS6zMq#!ml(4=}FY?4o9WB zZ-Xtiy$nUcfSh}Z$#^U_G8To9V&zfQ z{{1_qp9``t6-|c5sg*i(_LN*K0LRc3TWP{hy{HMsVG>WWl&EL`nvw#(_2>wg1DU@% zN1QkvIYy6;G|d+R6fv|-zxy>xBnbko!9&15GRZH{{jY%}6k6FZG&~6oe{LaVQCJYe z`!Q?~As+Z7Q$NdvIWT?-q1iFiEY2BR?{BUNVtiv^S22*3u~0$6fQ8mKs5bvhmKs7sF+(4Z!57#N1+Lx9PZa zuKG3`EMv>e=j!_HxOX9C&LSHvd7ITFSp?j3^qcKD^P&x0P)CLPm7op_8t~N>jKM0M zF(r)K7e*_KdlSrRr(RYAg2OAeh2Em;C))wA3s-z^*X&Z@9e5=*Si6lLbi0~^(p}2_m9*1RRtvAw@NDos*&h6;c|}$dY3A>Nw)V{C^oDlR#gUca zcFiV;@zpi_W-M=GNO#mW<;B)!Y77BDGWXJlt{D?mnTk@?iPW zlQDn4n&+eo7CxD=nqG9^Upk_VjE8F}OoHoVK>a+@ng8{`obpbYwAjp3tvU=OMJSj-|qF+hdu;%%W#d1n=^Ted^- zkc-XZru(FG2I1SRvdt|WZq1QR=8{C<-=I6@#AvTbf!UbGjo`) zaykYegH4wR(_?}hsq*xuC#{@9+l>EvT) zpz$68YXKY&NR8aA37zoo3js7j#9e)0uvE2mAmDtMaCL?I9pJ^Cqa~mnKWBlG9%;&U z?2UivK!&u^zRuGE*uH69D80u!kiKDES!GB3Rh-O#BV=a?OyEpBU-D$10bhJ_fe;@; zcVE!ivE})-w;T4}c`V&)L&KB%mtUHfz{$?!Y}LizsJAQD#@{vmxS5yV?yG z9XQI-92W2`D{2UciaXia^clm={Qa#QT+vXJ_2#1@fM5%fj>DO@4wRMkJ^y(GIB^Cv z$o76Ocf|z+M4j~HUwt1v^_bT>+Ci+RGWOTi0;bIN@7Y`0itun3_#cRkOpFn9^#Cnf zi%Wa_4D7iJJ#?Y(Ica%s8i8?TU0G2Sy{M)TBcwJr_v~c8@82>;!{2;y-#DZI8>H7B z(d}<`yYC9I?+p@wkJrq1l6wEru-NwK^9|6a#|#jF4x5C#cSj2}7W}ip(lRm===(RW zWWu*>Xs9#V(+`*2qqzSYnLT*`pQA8Mc z@8sz5&C%CHwXq3|V|`sCC;;tE=jDSvH0`ZNw;Vm0eA)TCwC~Oa%GBgt$K$nPqlZg3 z9!W)I0lDOZ-*Ur;lLn9lgP|-7Nx17{JRjS@{-qP*Qz{~I$qTd%_dAmZ2ttMKxF_cB z#^F%U=;iBM;)9+z`I~0~tBpg=9mp{iD=ke8$mjd};3bEm>syG#+C<;xeFnFz6>}{O zfYxd$Uy+GX>!QI)luX1K+;f9( zOfXj>YZ*EQ#NztPTS>^xy%Lu9Vh|JC%st@jR+H1^l_#;77yD+UKJQ!-96(2SOICnI zDYBJ+xu4Gsqd!kCJlQ%M`rNIZyyZ80Y^cJ&o}r?!O#kIeoOz}1;NRw53{B`j1J_IF zV=r*eEMXBja4BX6Ay`4r)srj5<>j4^osEfuch(Vv%Q09o5fxCh%iRAg$w?vMe8=MG zZp!oq`gS~g1alL88kK!__7~msPr5bz`fk(fIJUr+vNHis1x@_%=Ho(s+S-NtW1moB z4%}@3;Uh`H1ch9F_U~oV%WH@u+t?eVv@}Aa3>kIWh7oDz-8wT~w7Eb2Vd=CF3gZXU zlsW_A30wJ5vilfK_YivMo#P3i6$%$+{P<$}g45{}kEVT|I&gMc1(O1bVObo?{bwG; zN_&I9l0V7}%VIWD&*9*3%B7*e(^ZAGH?YnYiV{NYtHQAodff)Zh#bt4mS#@5YhuJ; z=_Gp8j3TukK^{eQzVyn><#OsYici>*m)C_D4?en4=C1HF!#8y@AM#i;b$b%*d*)!e zN+@}8vjj~Chc_3Kulw>VfgDjfhb~SC3~89b{_cQ)@2+o`(r2%=o->M zOPL+b+aAyoCnA2qjd1UgaVPhQr&J2ug^w`i|NX3UT(@OdbJ=a|G=b7NP#NL_ch!Tv zyRdtg`izV#Jfs)uHaoTC^2au~JR-ag82EF%oE(w>kkPpLJl9h&-o@)o$^T2%KPJ@s zK3uj#28ZljT-5+&FV!+{Pd)d*AOMbCrjE{P!A;J_Be#KY@UdGvOeu)C>*k^Ee@t}} zJsJSnEcL~xR>r_XW6K+|oL>*fE5b?=2d<>Xy|tW7%c9MT)j&CnK97B->24AnFA3+Q zCoJZ!uQIE~xOyP8?i)-Xkv*l+JoeLz^qyY1`n9)LrAr)GMN;3{1P0`SoiM!IzBo$j zVPVL?V{xk$lXziRhuPZe=~I+(in7Ck;ACtAl><+n?H9fVgVrk~O$?QO?557*LmOQV zw1=Y>x43Papl{TsdDDO2BrkV@|7+x0xt+UU+#fq&mBn^7%4WCSXT~r~RFpcur?{qN ze}~6e{dbH_dfG$Ic1HlX0^xC^msx8}r*~$bW%Cojt)Nu}ORwSpR6Jsn2&Im49~7h^ z<=EkG_iXL4A&TxGOc&DxY*^GK1QK#JW)clCOK_b(-zqcdz94qb(D|XS8u#_H1rCd0 zn?Q%$Ns}6=<~&^vim>F=>~Z9F2|wndn26-#E3^@HoZHb;5!eQc!&Rqd>xk85hmZa} z&ku;_V!EWanKh*BD z{wDwP4^>Phw`F{e6^P5%pb6KoM z2jey27ko&4zTlCuUy`M(uoS%6cT>U~pXs{F6%^F4Z zhJ`f>c+MeQMWE+6%KY5{e9GLW+({&;LEwAfn%*!n3T_NGc2gAvWwjx|Q0@-<3OfaY zsegc$<8%z87f!4NoeFfPujedmw2MIs|0>iw1Qq(;)88Kt#%Tlpx?M~ZK$l+2X%RQJ z9B08>N9J2US6Lxd)Xe@A#k;nu49V9d>j$B--xfC<&W`aMar4ajgnF}csx6E8#) zr-_xB!YaQIA=U?FF4&BuBgWZgs$~1kj9)hYWmPA@?$_=(ifL!Qp=6z+<^PAd6Btp^ z;^|WG7pD!y*2GDqw*C8(;B><>6AH~M8}jLz2pXfyBHS35hsyF_x9pV-vxlzYds25W z*Vs1@gikUh`{!o)Er#>zIi*7;;Re(I>aUMY`zM&kX;TgdXgEV7Go;qC#@6`MtW%cq z>X*C9t~ef7Y8l&L9WC*YjpLk}EHZN3jHNAqzyr;# z2M>i({+&-_#aA$*o4h!vZ?Hp17zFZOXnj01%M?3{Qci_Ny3I1`1uf9tv)W9~G)Z|H z|GxUkC*%BPIXp5=^r#VqWLTKn23+RMS^2aNreXR>i7#QqDX=Sx;-9KMuER{5-%8u! zeCRta^&FZzJ{)`q0!YRdc?V+^Q#l8^2-Z17Bm!L>Oj;x-+(e6@-;zzEH)l~GI}cDAVHmBh zVpnpfFMG)hj`&zG!PYN@#w%bp=o}|iICj4_xav1@x|EAp>#bHblV$vdN*MZ9*@mWd zKt~SBM4j?EjHoGNH!e1828e{7K!byyF{X7~W-tudx_%e1&>!(NNXMUZxudV`N6(U= z_{TaSFF3r@4k8%s#c|1oH0VE8jm9Jn=5zAxgM3Jya7y}UE8xTP@K@dBt*c^c^C^5yNSyA>;`F^M7rdrwgTtLp zD;>DuWL&}`4Q?GKaPBY?+HYToG4 zP(~J=J(9b#hgI` z`!LIS^PkqXi)t}(8>>lxg=pqQr?*D$ zaf42+SYa0?$lJkceR*c{@c%G(4$GlH+ZK&;V%xTD+qO?^+qRt(+qP}nw(Y!hZ`Ezw z=IiYr7`0Z_o@>mt>5?jnIQ^(-VbN)Ki~OHZ2pWF6^MMM!74dU#4SUz(OOxQC0d3xS zobpx)-R2=JIk6$nJ1U@I46!t#tB>Xcch?VTtBA?Q(^Kb4>#E zXR-ZtghF>6$~|HrufmFh7vosW@4=@WC~pLC;NBSl@Wa_l+64D4uU(6_`Dn?X^=VcgP^a;8TmH^Vxsw(+3xRHe>v_k zX6-kX8CmXi+F>d=!~!cek?WQzk96ZOERmOcvIlt6;1>M9{Y&m-J)V2JRv>r~Gc-PFgkbLTkfqsh(JoDp9to?3 z@%Imd60Bkde0`^MCwe`!dT@j;#dV$h(@esYVC3pc;3s@7O%8M)LwY^GI=?|_^~Vv3 zqkK#?RNo92AHf`4=Yu{{Bc-3kCT$fsN{qB5XENOqyukP18itN6fuc(iohdGw#V&hH zHZdlI`+@g&jSt|oOPUlhQi_=I)ly>GXUZtR_meg7gQ>;=X&fJoPDQZeNi9=}{$WBG zNtJz!QCf>AaJvK5&Tst{#V+tbj&B z@3%8N|4pv|A9-hdMcF@5jvMosS+KEgp*3m3=^l)>zO@&VS_mW5yqbf53cN=h;fK$^ zf)ve}7;sVE1I$P)f(`eI-cEUNha-ue-$O)KlCqO9uM0Gr`CF%5+iIeXqf@)dhs@H9 zPqc*}Kbq!2%O#@pe>O)Zq?`{`-1rKjSTcTIV##+f^Hx)(4nA(}{_dn%mLmuU)ym9Y zWnnTqw-H>Y*c3`ILFZ>m>>>XV3RPw4F5!NKHf zQdBoye-*$R(>DqrjGLf{;B`7y(g(B5FGmXW(BN7H75=fAM-4!fI-M&3RzO;eh126}87Li9ES)YqnR7Ox>(%J>7nzT@kqhK)0EjG|iej{4Ts)kx+A(oYT)ERJA5gn3Xb; zcx{4qQmQ^%k9gN6DQbYeRAlUdB~O?QJY)ea z1gZ`rOUsREGdomCmMMhyycwqV45JGRcZhYKooVEm?p=PHr9#7HSSs31+DVa=Hw(^q za}jpFc)xbAS0|L@*E2>xabLs2Lp_2o%c01NrUUY1@TmdGj;DS#W^HzQ!XAQJz_hgG zfl&o}0Nd?*_IR9EJ|p5?AjT0t<`be1tt?|e;g+&$=R1AOlt%q`IO=M&EBxz#4szT8 zLuFcSDgq$w4=(r9XAR10S1! zjh~+!H*9)gH;`j!(F1-SL*>m1)y(g=g>sj4#F{+1QZz;9d)RQ8Mk-n>&9k@v+1V3C)2*`iNp7h)uQt6^*UiaYflGhD11@RU z1Kfe3?Et1nU7ov+kTc{=Y8IUKkOxd-y{0O!jno>Nk6CWRlQ&T5i*Q4SBlCC;Rt-%e z4z?=Y`UN10$wQAUGQnn9bAPg5ncGy+4RLnCm zB{k%VLfXF5$TRXTpNf2Ir-bzyTi$Vz~Mj>a-|cEm;1L zK;+pn8XmC|W;lj{iGLxR3ySDZviY1>cet0m;$`){E76d>i zS+p>E8#nHhn+y8O2mlIL(K)dgFF`vmFzR?PucrulJDoYcIL zVa@en!z2BJG~ZFk9#rh5TVV_?+qSV}?r%G~0tSYPK+OGBok{La@?w)U^#q6fmceIPI<0aKqB$79a#Uj6&M%3}!zf|6a%7 z=)47f2VRKU5*-$)2xoQW02r?4{TpBzdusQi?H|x5fB<;>&Nzi*N-U~0*+MZCCOb;W zG&CZO26ND{}_xw47rGIS(;(@|B z-xMZ5vYItOZf2sFsli8NBB2Qpkbq7tz9QHxSo5#MCzhC27pFw} zg1mkGL;g=+)D-blyO8=_`GO|rMpDO(bq3!pJMDwih_d;fKV%g7vS#G|#wE2G0zQv5 zGo~Ifm7!J4O)vuk$yf}ovg?DXge7(uo(dCT++A7(q(_KF-3qa&6PJ%i%*%l)!F8*D z&VCr6s#1fLdp-;rok&!V|3(0J3bWBV%|&zW{FbsRg=}Qg;{1A1fZ%-c-(Y^HB#Jhj zS}3J=P_N9xPPGw<`-#Rb5BzIBQefGY{NRbTYF_@Qp~*(7(n(&l8x4LnY)EpoGV8z$ za8zCK1XC%n&r8?~T$wVrBQmB$iHd$`m9ikD_#DUP^%+YKmA%n49NJ2Ce(XOQk~oo) zv(nY#VaxQw-XM)TEQ-9_KsFAr(H2r&I$50X$_-@iQmEKV`-3rVz8dOR@&oqCV$mfI zFe^1O2@m2O=!f+Mx?V}!4znm_%6XXEF~o-$h?wke{Lpe-gOI+q;OEDAo{&1-!cDAc zx7w*?dxk@E8YVjn9d`poGqgGnwJxMW?F?q*K6HrK=3&@iG4@kjYbM9$9`oY|{C}%? z*dQKRlmU$Wv}JCym%jo~!BtZV9GkS6 z)UzTF|J9>BPe(EiKb!p1A=0!PiL3PYpMX#He0Y*iJ=}kCvK<<_P7HYfMCY|oNv zVgMtOH2RAskr-!-sgxwYSpsPlh`CKfm`=SUycS^5?-`XR^0i94hzn)bxtUdw8AiNY zLd~ENs8TO41LEhD(% zd&JfHtmgIg(F^k`3%j)yyzGLjw^d0FYjQv0x?j2rHK5qy;ifLmb)%=)OnU)MaC#tB zw0(HHalyripDkc(W*0Zq_MbiP`|+|Q`Xet+e*q{y0dJ$dJw?EH^P)i%?Q6bW!5^%R&WTlTRil8qV-j zR!0s?_tsLq2YSKgpL((s{bN$fcVvB3&EjNX-RSbH;v0r!4{2S`K8bNkcSW|x`VCWV z%Jvb%sp`PU_FUi0)wN$-xXXv)ho^MD+Or%t08Rz}>^=9=hsA`>wExU_Vm{AQZXIHFKt{l}L_F}BKk5mm9^jKLqY|8%PT0$}d~{6? zxC2zvW}Fr~A~cALxZKdjX#yQ3kE=Brl3ni6;Pqnx!n_D{Bra#ibW9zM9lzn2NV3^eL_HpKe1`qMsomMB(eo z{MFjkU!wA}cS$&UI8_b|`uGWEo^~iYr0#BTPK~DWBJky}q8t=afsxw+**}*Enxlk8 z9tG^T8#iy)6 zI?8ZG!mp`;wg8? z1BEEhJ}%hkKBS|dImKKXqg`CW-jxgZ)|O}^%4Oj`n8pA&9&rcxZ`VxDzJjlSaF0~& zaN{PYD&!~~V7FpNsdcX)^{!&68CRX}cJ!cXQ%?JKf$HcDuDIM7WzvXA!6qkM&)wKr zGRx12SK6ia#JXM+E{E9Q^{5vbQ9@(y6M;@%wH#nGercL;qon^JGF4zMVWOjDlKFmN zbRVqMRXm-t{h1owvCav*e4doP%vDOWGMOVkvM?n`UkJ^eEky9iyy}E#zSQu-}C&n$%YHU>g|`io(g>3@;p7C zqXX3-VHc?^ar*0jiB?BV`_%My`9i#~4I0l7-oj9|T%80J4{ie`FVfuD1OZ_N!0AqI z#9(x3hJeJUgeZ4Mj%{O9gA=>jtR~^XzIsQ?XDhCX*g*l*d!3+`esCpJcy_ig8!(Ty zx?RSVujq36fmuu1d+-nvS$~sNH}M^_TShzFuRPRfT?{n3!pAesiVgr6eeim$dSJ*7 zXoNeVn-&TtVR`XQ1Tj}<2y}ON>p}q7oV|8Zu{Zu+MCm$|&^j`(a4Mx4t^&Uj@dKd2=I>D2?b<9xGGko(zoo*@h$QijXtL~ zH)7y%3koa_VzNL!b*lQ}W2OXi2xPsAC?vh5l}HVJP;U1g%!zJDYhQktvzKrowQrtz zm8xt_8CRg{t5@q!1fFYmU%;F`=KB}sJfD-=<+l`RIEkbxN)1QEgWP?3kZH!eCut;c zZ~Gq=uNSg-4sEUq{A+Hzf8Q3~DW`ChySi=fGv@P{2}(kZck>Lea9O8wLyK@3^oiW! zcjXCS)XiPR%x8P_7m)L&fn4pG7Xz-h4^S$+;TeOT>55!!Uz72)e-3 zKmR!x?`)iE$Bs`o_s+lZ1hV1!4VX3YUsvNG$mM=Yl+hMGv-ioJ$fw4ZqANjA9M`U~ANVd@|zmGzfneYPs9VT(EtOx0J zSop%{bEP#jj&r-f;~AT=aY>e1(CAXh+A(;9 zG+I*-F4em877Et~R%y$zvq031arLPTpKLYiE03fUhcb0a3yy`n6Q;z_S_(yo^BojZ zCSwzk&EJLe^Cgd@d{wWPB>-vGqxc)M_ljULuDg(>wL;5<@2^rAgI*3siaz>j? zsI4!Q1)|A!ahbHk)>Bg@{xbu!20vkwAgHE+$Pyhj744(Lv^HiKl!>=p!?kf{EDNJN z#>T$k_1{hcLUQ0lvo@kT zni}V$P&Udm2@C|Yva5y$t+L@ZOd;6@jLhtppg#k%b%@_*)~RMfVB4+UF~x)gdlwSt z;S*KdJjhkNMMCI}v6^sMR;H;xwhND@OEwaD{z${b>rzw5J2B}h%){o*Du>?-}kvBquJ=rsY-oqtpItV zsRvsJ29KsZ*!J5myKz!$e3UyKmT%qVi{ytb!()H@0gU4xc3rcFq6-nbTu;SPOb;7V z2OH`e=1yVb^~6FG);%T6*z+>l8M%yVeb@EdT}(riewUI`tUy=L`*gSSYn>$(E0WXX z-k340qrvL1SI{lHGE?;Y2}sP<2#w;RR=A;()k@-_N13U?=&~;Br@g^EfX;t>dm~u$ zapk)U6k$kqnb1#+h?zp2>sovMbLv?NPY+GnQ88rfd3dxCPD-<)7p+X3$h10QWJ%z6DXS%KK z!IKf;(7fyhNs2{eEvWXnD&s{;Y~Xp?E%KtlE>4#Yg^%7|rgFob@)V<72pN$%P>o>- zw;)?ZvfT<+&gN>geMB>yds8txIVM}ED|;Vwxl>z@W>3j@_zrnK=zfcx`^HGff~6*# z7>h!ZPG+s)RQ_5;)pwAM(pwoiJ1@V1d|AbVw7WZB!d-DY5n^jS%Zlw}(HI;&9gSTx zWx;febu|1&sM(=je#QrrhlId6zYX+5A6Nh)KALa*7?>07j0X4kjMTJ((OADzq&ad} z1@jQhKy)4AI#cN^_#ijZFw*!&V{1qVk9 zol6`-px>4UE|j3-N}chFLD**upGMAuY2t18zu)T|ds>H~rje!d$ykMmF(w3y{21)> z-adXm9Ez}oa%DD&8pj=NJ&)m20w!62G-c8VTeGZ%MD%y9n0xPe7)J{Y9jvP>}6*x?1S_;M& zV3ji73CMXBYOw)pIc?awNmw=3^H`lt6Lo%f+TB#H#?aIq7=YG zuM`u{CvXxIb&aLqCcCqW5kB6PE$W6{K5+U>35n$; zR75dtKV7S@t;oZDI15|E`rZ=1cszgYoYx19P;&$#ecBi)sDaPke;A30$BU5JUe9vc zs&P$!FF6V%Gf5xj^@ISY3&6RWlc42{wb|`8(dFaElV%vT|@v?YEN4JarqiR%4e+ z@Be$mjUiCu+YUsA_?{LT_3Rp8?V%dM#-$Mj(rdd_Ut1`A?NiX zm$k8m#up}Wc~{$tA^C`O$r?>4Q!G~(!--8kvV0M0f)w#hELqbak{&kvoz+YMjzt@C z$L!WqW=1Vrnm5~UvJ}s1Hya%*j(LQaw*!Ur$Uhnr9b;v|oxFXsf;n*lU_dWeyh%~J zmpEW!Eg>;4BSfZXx>o{%1gSQlFwf`)@JW1RKNSmZB4fLx^gvHtnYTMml{pm`@gCGW zg4QQZ7t-od@XgC(AxYgYo*$vjWq9SBDu*c^&WemB;v$Pr8HGUC=m6c6A(os*Tj85t z0|$3HINJv7%nApYlRTT!B>B!RQDUS9wwMitQ0OI_^zZmjJE^_!O;titDqO20&>ujV z+bLUfzL!+{>U=vcqa631L!+Am5S8qMXk%WRL>4gv%AU6@d^m@~scAJ0;0()6pP;FA8P4g?fJwONNdTS?B|`f`;rlxoLuX zeg9^C`4C{qU0m<8pvtaLUYB1~&oeX*x3&*%mgNZX4^vmhnDx}$p}R=RkonU|g{0o7 z6&#>ltCr_ROndTW{Wl6RIxp+*j3lwK+gjGieQ z!x;fg)??9n;;3}Gz-{cpXLRGGvYPbQKcJ55n$Ob+QpmNh#WjYc#)%7!+c+m@7iP-V z{az1^Gt_ZK%Aga;DoQ5WJj!mOj)T@SDr6bVrHr>I6WfKhXRsm1*PFb+&<~sCWV&`g z%&y6n^d1MA5cHbS5;`cemzZp{@0f&1hQM+%*Cl3lOnPLs0Pll?F;DztdEyUVR{IUt zO#TyljVsJ?;N{e4>dYDZ#71T@L*+*z?N*(sR-RE+YVe)E_-34sMz6-L ztXxPrTw{R}SZd7Z=c*L`sIO_?z1jD$`J=@+3`avGZV=3ophF_;t)s8i>3|y)&gh6V z?45uKO-KR5m&SfAo{n2rt)tPb!D6Hgj-44PER4_VdsKP_r+XjxZcE>0_?!sLOP8{p zY;B&Q$}%$O0|-9`H`bVPEgnH|{E*jBWLUAdOqQ=JoJ@1F6$QEDEVbYu(8Ly}{|p*8 zI?Og=^v%51n&rBT3><;$v)60ic3dXSW$*_*NIG<*B+Kui45&ASlZ$G-Av)*nj z_`bWjDBhcRyKL6?`}=sHM+A4QlPSF;@h^0F4>eY%-38T7Uyn(EG`o9|q&~@woQt;W z5THf>mp%H9on8nBjW!j=I!$-BjnKMl>m78O9u`tjr8AHDMGQre0G1QcyJnyGja+RR zvFkEU)3ML65s_$7n3g2Uk7}w@Fu8HsW0fhpOJoZ(Fj%1RTzoSoj8_{^#CB281#xt+ zzU&{;9yBQC0^!0hu+GlraK%4&n=Gy-%%wh|KtE@=uiK1U^NS%k4PDbd{+fHJr?`q0 zj)ousNF9V~V?kIWyK8QcW!7q*;YeW)AkGheaXv99H-FNtwnbIaarPk4(RwYNeri~39elH_O%%_@A+!iuJl8m zOdU3eyL94K|R%;m8=f>YbZR+ePuB##25kW<%`;YH+ zT025|ugaj1yr96% zHu;^XiD-_7dndu$ZGRG2ta$yL&`e_QC(`y8Zbo{jkXRIYwR*cQs~0Pqm%lN$A%#`bsQGBnNgA#`5zpM*_?INWfCURnO z9;gjs)#d%+^|5s9#-5v^=AVB|T_3v?06m~#o|a~@Oe;2zO%I64IvTH#%R|AsmNr-i`gp7gm{8$e^E0K ziq^``1S*vY)P^>5W^>(N@vdYLJWbyi;u1;IqMTxiZ|tJJ`>=+8d613Q3kV1ia5$YQ z8`pQXUl*40hzPZuj3+Z_^dBs&@c}9^J2QY|Tcy0T>05x%~bHvK}`2n#Mnrv`$6 z_5fagPm&$)%L<`3ce89(h&6XKoHA#X7TH5w9wDBb;(A54l0 zmE9W*iq{IRr9=dyq<`HREBw1ga>m+4GeXa-)N$%Lh&oPbu_LLCCUCB86+!l!-WE_U z{WBs(2I7gBR>R}7xd6goNsb-<56c$1*e6lFFP#;2svJxy-3if_S8gohG}tnc%#O}n zS+7_;X*3?=$8fuC0e8f)s$RXVfV`)1rP3_{D(^d+O>p=kW{(XSUb=nZUZwr96!8x_ zz0BTtnZ~N>6v|0?2P(~Z$4SP)=V!6e?+1t*VRVnJ8wR8i(=Gc&z=~^ti3C#XlYN(A z2J5N}{@*IMG-$0x`wrd98q8T(Q4(41?_NuB;Et{qYBkxNXNE@s1=ppSqV#(*f50TM*8AlV-?icF|p=LKA1T{j1Y72&Pq~wNxqJ!`_w) zeIrchN|jVV+7OV{thc1bItVZ_ha;A&1BC*}8LP78ebkGEGD@*MyDn$MJertD-f9Tn7~M1N9-IJ|v;qS=O(T$VcE5(S=eCq2qfzrHhU;yHBQjek+24 z>|`*hLu7a9dPFasxdyXop-66GzcJiZvOEf3IcH z1Rn_^nR*a-=no$4a}rQE(nu_s7W$`f6rQBgGE12KP0DO8K3LXuAs0*)Q`el(Zi|sE z>Tt?R5SEW}T(J4$vmC>*^QeW5(3KM zn9=v&vv*Q_^8`r0h}#Jdq#Cm0q_Dw$Hq;Y6e0%V3)@jXejd(r8$Q3g`pQ(4}^=^+& z{3Ess8&MrEFwM_CHmLq6x;YAe-b6QdP2`GD;J*n1N&jLc<5ZHDvivGRY4|q+#*VCD zpM&hoKZ#iY?*HEQ_;nQTHV5y)OmTmFKehdb0XG@>;#Mus^@*cI!S1MWdAn+kWDn_-C$*Qy|Xw zuvP5`c4lMG)SP(W`fHi7s6u>H+5cqJMWZ)}7FiT4&;Dbz>D?9`R7o+9l}V3n&gTrj zP%=2}+ETIf7=>o~;yj4ZhB8YOidIfBIVz6H6JIHT=2^=HKo*%~yFQmAHqRVk$Ndgq( zErjwk_KzZ;wAVl*R=J@0-ejUSNzA^FbeYD#E-N)oefGDmD|EKjm`M_ow7BsfAnH8oznLT5mT0L z{K(Pznb$mX3YQ~h1Wv8h?ENGr1&W${)K>Cg#;?x;SQa_b)&tU`Ko^>z!lT&{;)2N^c$4SrhR_k7)ato;{Q`{`fL zuZG7Gwe2;|-nh&uFPe#TYmBb4@%Y;*O`y+}le4@vv}2rsI}i?EtJjU}Uc8Mc6IN#e zZYQewc1s6J!czF=}rzyWe zAGi63>d8~30O6d5%8(~!vQiNgVUK6l^ziVgkFj2%3uJq$+7xjEn`H|;Rc}=rV6~xC zehj7g{9rVK_=KqVqka6G`eUXui;Jv>+z^+)7EdU%+`U^Bbj>rp(lcnTVvf0?>c5@$vu=d8SEUt zZrLow#m{1&!ENpgYvnxp)wg;%r=*--tO%TchQ`?zj~^}BYATfLDWHP{eiZz8iR5{X zrCt(Aog~}LPWIDp$H!b$#DgF&ks#`5NrUNjBv6^=JjQ(m^GCd%&aVm+PddInNOw4r1j1qG4R{Z&Q0Z}i0w!P?_*y*xUom^(eLtV2 z&{s}8hA3EB?*I){Rs}b2sWM#OGNC<5k%uOqL#TA7H)Son7rulp=TtdDifIZb{>?ek zd2mF`w?l*v@E@v}&6ZKd>brO)OT-|uk|$hR)VRHDmz>Shc}F$b63mspl%G1qXLiws zW`WI7#Q{m^8}IlMAW_U&eb_~`UXg>BSaUz0K7kxXW5DPA+nA!I(07TQpN(SVHM_~p z4vPr4yA>55{{w0R{V8c+_3kY*WuCIyHoN-@%4VH?8x52kp!Ko5c=3V;M)q_eZ8M$m z8MHx@nj%KP?vUTm3kIm@Ub$i~f;3kvavi9<9_rQU>`XO=7Gr4WeTcF!U{tYlQyWxZ z*SI$zpjU#nS8y&gi|9+0wrWeS7OfnU3^U32vfC%Gh-^+C0{1dNS#p|4psCnW1Ffdn z11Phhbg{Cmzm$W`0ukgAqXwH8bWuI09&-)vxjEh^_vO*iJpkl}7u}#qQcsX3*r^m( z!6l*ttK_Hv`^WUFf86Lfl}C&N{N-E%6F8M>;xs>4-kgn{Pe?ATiJjAM4|99*jSap& ztS=U;JkuYI&n831*UY1Q4Du1G-Jf|S&0097ARSqwe^&viU*`E`7S3f$1XEHu1X_zh zoBUiQ%==h`+dp6$_99o}k^-CdYZX^2t3ARJRXjef_b}1qm~74fc8fKT~zwo!#bfU_zN{{`#yXZI-5+7j}&9$ksA{zfRys@5IfAC`$fK zvL_^2rb4uuwlz>uLc9{#Qx8vq2juJ25iM^MX0ujoSdr_KeS7h*(oBQ!Fv;PIk+pAj zSD{BUAqW-p_9qGoD#u5CVGXguLE1AvCAjHQDPXv;Y?qEu;w^Eis_K!jlUAwHl!%wb zFNA<%H*1u7U48jYh*X+$Y=_8_aXI|o8Ez9doq2*2%Rl9xd4dNZGw6ouXCH+POU(q; zv!aDk)H^W>G2L*tUcC*qeT%VH-uD8{`04c#gwIKnLTyZjhCJ1^Ky^8Q30%cgjiCWr zrpJH!pZnP4OHJvRT(^3#Nx^(Je}+Uh+psy``jgt<*=Z6!^PKh=&y8`Oh^wu{Kv=6* zRXmWlb`44{7NbL>BMQ@s&XbVrAdI33!$cj)q_iHhF@klFh z-_tAR%4YG~g+OcYR*hl2SE(?Sxj@p8!;1UgnAZg>l4qit!<8+9p`Oh-SC5Ks<+Gg5 z{swg6p`f;KjgY1hY+1S3^fwGzUXO~7c-8rP zr%sn&qe}pJf?czsLb$#p(&NhYl?VA)Bs8HE({UCZnpHz#w$C@JZ{d5sIXQX+n_)xa z_J9ia0a2`%aEU6>F&YuXK1>WQP->vLR93EWn~3YGs^fjtyY7PLH>NEFt&-{}r} zEi`c9Uto?$V42(7>X^Yph%jTJX1KSP*api?>)(A~$*zry3p>O$$A@>?tH(En=|Lny z@2-_kPA7|Hr#W>0x_Zm?L1zm2x+uaOrc<+&egfD7qqb+u& zGB#1r3uibhfOr3!;+yNjWyF=lbmnC5iaClY{IF#2F(rXl8w~>4Hz0uZ3#sIw7Ow~L zNklT;?`f6o180~W^#KZnp(nu;W7wzt7GXwTjhyB^k4vZfv}J>Yr}pjc%PpcK^v+n1 zI@R!-iUwD1fy@3=d(v0hJ1gW)W%EaZhK45_F0|zjrXXrX^67z8dD`($_m&95kKM4= z^uh-Lq|8o)yYmMxF5CvK?8Y4nsuYEqVcS||1pBjZmHCKPwqA9c^LVn%`<#bDRYcJ| zg=&c3IuJRaGRP{TALuTCTMytaAjoz;U~aaBdn9-*SS>*>;Ortma+4-Hz+x}sE`U4_ zVX@|4^hJ8@fk3e`MeV$nBnsLT6h_MF*-`_7AeBzgLF$l(#SQV>+FX4qFbja$0a}1o z765SB={o+?{o2!6RVf;zT|l=SlE<`7Cb$feT!C7R1wW6x=?t*NKfIQh@iPVr&gKWu<6K#7HHMgNK zh^6xFm=j_&n`sR9wW}D4H)|yiA&%t<#qQ$O7ZoZ4%;D+S!P_mzbp84@?V~s~9izH3 zIFkP6W>g8jLB&w)I7+|K+<>xm9lQCb%}DxZ=5^2O*BE4vby1iUjS+`5B>*y{w?h7% zi!dq|(!>yY<1PSU8(@+gH+}G!w0Zp&M<1=dLfM7M+4QF-oZS^L>G{`uhi;2XXC$*unhoc@L*{oqSl`yu%I6H5Bg!{DA$+&LxQ zl4Zm4-F86H-i5X0o-v;&c!Sb~>_!yAu$|pF@a?DbC}z9&bT_mTD|5$Jt$vRC-)Lt2 zA8BUiVE_M1^NYo^7w)#~0DI!7H>RvD#Yus2JpCOwso4adJtyqPU4^_}VG@LTtarnX zL5LAU&n^4|RYR)5WeF6<9kfO8d_G?L!cnA8hYlieCf-hLNvkKi-t&+H#c|L4{vC4{ z(!k8n=g8uSIlA7^7vALj_`#c70Ic5&762~)E)6EYd09$}8-OJM77#Xo42--+MKB;g3^gz;z!H&Rxk!ir%m53w zTN33EiicGCzM%#|!9H{}CaR$NkQV=>A-hypGzc6NWV2x|7y!>Dke?Q?-R>baR1hs} zB&>DQA|oKK-;0J|>=@5Ot_Bb$NG1zRDcX6ZIe+x-FUKtXoWDrd#A_^oIkLPLn{!@X zlr5iojBojFoEiS9LOB-~2>`uK$kVb9U>RAhNEea-D_^5b>e)@Z z=NU=bx!1K@u#7T0cdc#yLef_qWRJ?1d=v%vY=eTW*F|upe zT*>S1S`?6(@E#jX`DjM2q3Fr?S}q@?zTT&@vnxzyqp#^O1N~uNmJAG}dXw#yit!dn z-C3#xJ_3J6pB_cPv!p0FO1otlal&E44wU=KH0>N%E`Q`renR8E@G>rch7UC|?pcYp9NL#&Phvt(pRJru%q8g{ zyHp<)ccamV-8^SuKY(54@VbIlT+#ISSciV#=FUX_8_R6}Bg_B4Co`8PFPv?OL6*cJ zFHA{G(!*T6c-kv4QsYq$J5K1=t4b-I;y6g=mu+r@r8rlw9^vJG)!%0y+b z?#EJwYy1AwVX8{0+G;*_InqLfI$5@k5o8gSA0n5qHwAzi0KE4D1^^EK78M5INqItx z+mFQ$1`yV-7_78%sUIL0ECmqU?;L?)iEtn{WDgUUTQYecva3kizOFJ(!9GM328y6^ zp9bHAA**OxG$0fdREt3#*bmnQu(t}3-OeEfR1hsxB(z!6A{_v(_k%ibWlwqbfNFylNwDO)=899{QZIWYt+Lpm1{_5ry` z$k|VWomfOO9EGS5%F%}+azw-q=EoSK zNC&$0am?ySJp&;xM;I;6Wf)y9P392;FIl0C>)A=aWgCs#y3@6uv5Y_sdZ|KgLO&&g zdBQwMU&}C1$&fNsCPRmf4XWTrs$whQ%#WjwpJ0%*Vq_~Q@@B3WxeUCB_49{V*ZeV^ zhWwOT9>g!%_ky8P_VQ&hDSKUC+O>M?#CFphVQAI3zL4M1Ji%mu;jI<1lBB1= zYpG(0>UxL5&OSemmA0bO7~qR-S=85;>_xUqGSZztabvOUe-G>lZF(3M+my865anoZ z8FR*I!w#17&NkyF;rtp>e(77&@Y6N+{bKywQ&a!bHTC^M`?2#2xPZwIc=8vX)3Y!A z+E3o(HyptSAM@Hz^jHJ)k+o?1kyFLp3=Z7f)!OOwLXs}BN7ZRjFBU`8-E%I^Gx%jb zp9e(E4Q;oNRm2Y2 zk7})#lqg0nqA8Bi&@A;$Hxc11Y-}#^ zt92FkGu--#{}&iCdxpJi)mu(&GcC9z{=Z7bTqrcoppHQX~9+h|vWdq7Y7 zS&joK^spbGdYqFu?`4nZUl_4WZuAkB7Lx;Jc!BQd*Ag0T^aetEQq@baxpzmCinpl#VpXUNRwO2vI z#mdOQ$jHDH2#t))OiiL-DnP1z5<^lW6v{J8G88f@3sQ@UQqzFa-l>)4`9&$kVAFi_ zQ*av;5aJr4=T@4Nla!d32RtfRBP1iUSOJI%Qqq7X>nMb!78L`XqF}6NXkut;33Pz5 zu92RZuA!cZfu51ELW~B;0X~^!sR}@68pdcUWEUisWE7_+DwILBn&}zq83SDvP?Voi znhdf8VmQnUpj2>aQdVkm3D~_MnI$!PHAplu>$a_)PSPY zl+5H3V6Ym3&S6y04_Am*FjO!G;#i;)GpkY+jE#WmJY5u`ZIXByq&yTpCwS_*mKdcX|dK4n>&xIS#`QEc2ExdDO?i0B4?4q%8($B5~axg6)sB! zyi}Y5ggX5e24%KzeEw^;ak7ukVu6!BA5WV(sD=={jT`{jjE?;Q5_BWYo=YL~Yx@l0FoaxX&mpyg4O z?MHH!?z>*wpt{4eE!yVyn^;w&hG)y}2v=|xTa-ucZFKpxY{t!F#_Ot#Qs=yOsk%^| zV%NU;%Zua7Lb^qS^Ls;AM_j+_{z2qkcQ5w~QMCe{sxMFdjkUE-+6RkD?$NH26y^5+ zVx;W1=hNLwE1Qoy-tM1cC-#r~{Zr?cA9Pbs-M_TvUoEfZfA#$g!bR3TnAuHX6pV(z mXb6mkz-S1Jh5&6sfUCG9v8bd1*a+h?GBo2-Rdw}u;{pJ9SkU4C literal 0 HcmV?d00001 From cdcda0b86d518eb3c4ee2b579accd06c8c6878a4 Mon Sep 17 00:00:00 2001 From: DanielYang Date: Mon, 23 May 2022 15:09:59 +0800 Subject: [PATCH 013/127] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2ade8a69c..2ecc2a02e 100644 --- a/README.md +++ b/README.md @@ -186,7 +186,7 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - Scan the QR code below with your Wechat (reply【语音】after your friend's application is approved), you can access to official technical exchange group. Look forward to your participation.

## Installation From 65224b14e3816d5a355ba8589a6fb8a7c33aab05 Mon Sep 17 00:00:00 2001 From: DanielYang Date: Mon, 23 May 2022 15:24:17 +0800 Subject: [PATCH 014/127] Update README_cn.md --- README_cn.md | 49 +++++++++++++++++++------------------------------ 1 file changed, 19 insertions(+), 30 deletions(-) diff --git a/README_cn.md b/README_cn.md index f5ba93629..fcb9b7c6a 100644 --- a/README_cn.md +++ b/README_cn.md @@ -17,20 +17,6 @@

- - - ------------------------------------------------------------------------------------- -

快速开始 @@ -41,6 +27,11 @@

+------------------------------------------------------------------------------------ + + + + -- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md)、[PP-TTS](./docs/source/tts/PPTTS_cn.md)、[PP-VPR](docs/source/vpr/PPVPR_cn.md) +- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md)流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md)流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md)全链路声纹识别系统 - 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。 - 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 - 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成,声纹验证。 - 🤗 2021.12.14: PaddleSpeech [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available! -### 🔥 热门活动 - -- 2021.12.21~12.24 - - 4 日直播课: 深度解读 PaddleSpeech 语音技术! - **直播回放与课件资料: https://aistudio.baidu.com/aistudio/education/group/info/25130** + ### 🔥 加入技术交流群获取入群福利 - -### 技术交流群 -微信扫描二维码(好友申请通过后回复【语音】)加入官方交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。 + - 3 日直播课链接: 深度解读 PP-TTS、PP-ASR、PP-VPR三项核心语音系统关键技术 + - 20G 学习大礼包:视频课程、前沿论文与学习资料 + +微信扫描二维码关注公众号,点击“马上报名”填写问卷加入官方交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。
- +
+ + ## 安装 我们强烈建议用户在 **Linux** 环境下,*3.7* 以上版本的 *python* 上安装 PaddleSpeech。 目前为止,**Linux** 支持声音分类、语音识别、语音合成和语音翻译四种功能,**Mac OSX、 Windows** 下暂不支持语音翻译功能。 想了解具体安装细节,可以参考[安装文档](./docs/source/install_cn.md)。 - + ## 快速开始 安装完成后,开发者可以通过命令行快速开始,改变 `--input` 可以尝试用自己的音频或文本测试。 @@ -257,7 +246,7 @@ paddlespeech asr --input ./zh.wav | paddlespeech text --task punc 更多命令行命令请参考 [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos) > Note: 如果需要训练或者微调,请查看[语音识别](./docs/source/asr/quick_start.md), [语音合成](./docs/source/tts/quick_start.md)。 - + ## 快速使用服务 安装完成后,开发者可以通过命令行快速使用服务。 @@ -283,7 +272,7 @@ paddlespeech_client cls --server_ip 127.0.0.1 --port 8090 --input input.wav 更多服务相关的命令行使用信息,请参考 [demos](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/demos/speech_server) - + ## 快速使用流式服务 开发者可以尝试[流式ASR](./demos/streaming_asr_server/README.md)和 [流式TTS](./demos/streaming_tts_server/README.md)服务. @@ -314,8 +303,7 @@ paddlespeech_client tts_online --server_ip 127.0.0.1 --port 8092 --protocol http 更多信息参看: [流式 ASR](./demos/streaming_asr_server/README.md) 和 [流式 TTS](./demos/streaming_tts_server/README.md) - - + ## 模型列表 PaddleSpeech 支持很多主流的模型,并提供了预训练模型,详情请见[模型列表](./docs/source/released_model.md)。 @@ -587,6 +575,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 + ## 教程文档 对于 PaddleSpeech 的所关注的任务,以下指南有助于帮助开发者快速入门,了解语音相关核心思想。 From c65e03eb8c57d17cc3b6004d29b584d512c864fc Mon Sep 17 00:00:00 2001 From: DanielYang Date: Mon, 23 May 2022 15:25:27 +0800 Subject: [PATCH 015/127] Update README_cn.md --- README_cn.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README_cn.md b/README_cn.md index fcb9b7c6a..744b00fae 100644 --- a/README_cn.md +++ b/README_cn.md @@ -19,11 +19,11 @@

From e6ddb0cc6efe8d4706aac23009aa1a16c506bc3d Mon Sep 17 00:00:00 2001 From: DanielYang Date: Mon, 23 May 2022 15:37:37 +0800 Subject: [PATCH 016/127] Update README.md --- README.md | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 2ecc2a02e..af3a5c4e4 100644 --- a/README.md +++ b/README.md @@ -20,16 +20,14 @@

@@ -170,20 +168,9 @@ Via the easy-to-use, efficient, flexible and scalable implementation, our vision - 🤗 2021.12.14: [ASR](https://huggingface.co/spaces/KPatrick/PaddleSpeechASR) and [TTS](https://huggingface.co/spaces/KPatrick/PaddleSpeechTTS) Demos on Hugging Face Spaces are available! - 👏🏻 2021.12.10: `CLI` is available for `Audio Classification`, `Automatic Speech Recognition`, `Speech Translation (English to Chinese)` and `Text-to-Speech`. -### 🔥 Hot Activities - - - -- 2021.12.21~12.24 - - 4 Days Live Courses: Depth interpretation of PaddleSpeech! - - **Courses videos and related materials: https://aistudio.baidu.com/aistudio/education/group/info/25130** ### Community -- Scan the QR code below with your Wechat (reply【语音】after your friend's application is approved), you can access to official technical exchange group. Look forward to your participation. +- Scan the QR code below with your Wechat, you can access to official technical exchange group and get the bonus ( more than 20GB learning materials, such as papers, codes and videos ) and the live link of the lessons. Look forward to your participation.
From ea71fddbdea8b9dd0eeff63612830fa613e593a4 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Mon, 23 May 2022 07:43:32 +0000 Subject: [PATCH 017/127] fix condition of wenetspeech --- paddlespeech/cli/asr/infer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index 8b10b6b65..2d74afa6d 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -181,7 +181,7 @@ class ASRExecutor(BaseExecutor): lm_url, os.path.dirname(self.config.decode.lang_model_path), lm_md5) - elif "conformer" in model_type or "transformer" in model_type or "wenetspeech" in model_type: + elif "conformer" in model_type or "transformer" in model_type: self.config.spm_model_prefix = os.path.join( self.res_path, self.config.spm_model_prefix) self.text_feature = TextFeaturizer( @@ -205,7 +205,7 @@ class ASRExecutor(BaseExecutor): self.model.set_state_dict(model_dict) # compute the max len limit - if "conformer" in model_type or "transformer" in model_type or "wenetspeech" in model_type: + if "conformer" in model_type or "transformer" in model_type: # in transformer like model, we may use the subsample rate cnn network subsample_rate = self.model.subsampling_rate() frame_shift_ms = self.config.preprocess_config.process[0][ @@ -242,7 +242,7 @@ class ASRExecutor(BaseExecutor): self._inputs["audio_len"] = audio_len logger.info(f"audio feat shape: {audio.shape}") - elif "conformer" in model_type or "transformer" in model_type or "wenetspeech" in model_type: + elif "conformer" in model_type or "transformer" in model_type: logger.info("get the preprocess conf") preprocess_conf = self.config.preprocess_config preprocess_args = {"train": False} From c1b512c58abf955ee65fcb559f28818c803bc0bb Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 23 May 2022 09:42:22 +0000 Subject: [PATCH 018/127] rm fluid in tts, test=tts --- paddlespeech/cli/download.py | 2 +- paddlespeech/server/download.py | 329 ------------------ paddlespeech/server/util.py | 2 +- .../speedyspeech/speedyspeech_updater.py | 15 +- paddlespeech/t2s/modules/losses.py | 38 +- paddlespeech/t2s/utils/profile.py | 34 -- paddlespeech/t2s/utils/timeline.py | 315 ----------------- 7 files changed, 13 insertions(+), 722 deletions(-) delete mode 100644 paddlespeech/server/download.py delete mode 100644 paddlespeech/t2s/utils/profile.py delete mode 100644 paddlespeech/t2s/utils/timeline.py diff --git a/paddlespeech/cli/download.py b/paddlespeech/cli/download.py index 0f09b6fad..ec7258747 100644 --- a/paddlespeech/cli/download.py +++ b/paddlespeech/cli/download.py @@ -86,7 +86,7 @@ def get_path_from_url(url, str: a local path to save downloaded models & weights & datasets. """ - from paddle.fluid.dygraph.parallel import ParallelEnv + from paddle.distributed import ParallelEnv assert _is_url(url), "downloading from {} not a url".format(url) # parse path after download to decompress under root_dir diff --git a/paddlespeech/server/download.py b/paddlespeech/server/download.py deleted file mode 100644 index ea943dd87..000000000 --- a/paddlespeech/server/download.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import hashlib -import os -import os.path as osp -import shutil -import subprocess -import tarfile -import time -import zipfile - -import requests -from tqdm import tqdm - -from paddlespeech.cli.log import logger - -__all__ = ['get_path_from_url'] - -DOWNLOAD_RETRY_LIMIT = 3 - - -def _is_url(path): - """ - Whether path is URL. - Args: - path (string): URL string or not. - """ - return path.startswith('http://') or path.startswith('https://') - - -def _map_path(url, root_dir): - # parse path after download under root_dir - fname = osp.split(url)[-1] - fpath = fname - return osp.join(root_dir, fpath) - - -def _get_unique_endpoints(trainer_endpoints): - # Sorting is to avoid different environmental variables for each card - trainer_endpoints.sort() - ips = set() - unique_endpoints = set() - for endpoint in trainer_endpoints: - ip = endpoint.split(":")[0] - if ip in ips: - continue - ips.add(ip) - unique_endpoints.add(endpoint) - logger.info("unique_endpoints {}".format(unique_endpoints)) - return unique_endpoints - - -def get_path_from_url(url, - root_dir, - md5sum=None, - check_exist=True, - decompress=True, - method='get'): - """ Download from given url to root_dir. - if file or directory specified by url is exists under - root_dir, return the path directly, otherwise download - from url and decompress it, return the path. - Args: - url (str): download url - root_dir (str): root dir for downloading, it should be - WEIGHTS_HOME or DATASET_HOME - md5sum (str): md5 sum of download package - decompress (bool): decompress zip or tar file. Default is `True` - method (str): which download method to use. Support `wget` and `get`. Default is `get`. - Returns: - str: a local path to save downloaded models & weights & datasets. - """ - - from paddle.fluid.dygraph.parallel import ParallelEnv - - assert _is_url(url), "downloading from {} not a url".format(url) - # parse path after download to decompress under root_dir - fullpath = _map_path(url, root_dir) - # Mainly used to solve the problem of downloading data from different - # machines in the case of multiple machines. Different ips will download - # data, and the same ip will only download data once. - unique_endpoints = _get_unique_endpoints(ParallelEnv().trainer_endpoints[:]) - if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): - logger.info("Found {}".format(fullpath)) - else: - if ParallelEnv().current_endpoint in unique_endpoints: - fullpath = _download(url, root_dir, md5sum, method=method) - else: - while not os.path.exists(fullpath): - time.sleep(1) - - if ParallelEnv().current_endpoint in unique_endpoints: - if decompress and (tarfile.is_tarfile(fullpath) or - zipfile.is_zipfile(fullpath)): - fullpath = _decompress(fullpath) - - return fullpath - - -def _get_download(url, fullname): - # using requests.get method - fname = osp.basename(fullname) - try: - req = requests.get(url, stream=True) - except Exception as e: # requests.exceptions.ConnectionError - logger.info("Downloading {} from {} failed with exception {}".format( - fname, url, str(e))) - return False - - if req.status_code != 200: - raise RuntimeError("Downloading from {} failed with code " - "{}!".format(url, req.status_code)) - - # For protecting download interupted, download to - # tmp_fullname firstly, move tmp_fullname to fullname - # after download finished - tmp_fullname = fullname + "_tmp" - total_size = req.headers.get('content-length') - with open(tmp_fullname, 'wb') as f: - if total_size: - with tqdm(total=(int(total_size) + 1023) // 1024) as pbar: - for chunk in req.iter_content(chunk_size=1024): - f.write(chunk) - pbar.update(1) - else: - for chunk in req.iter_content(chunk_size=1024): - if chunk: - f.write(chunk) - shutil.move(tmp_fullname, fullname) - - return fullname - - -def _wget_download(url, fullname): - # using wget to download url - tmp_fullname = fullname + "_tmp" - # –user-agent - command = 'wget -O {} -t {} {}'.format(tmp_fullname, DOWNLOAD_RETRY_LIMIT, - url) - subprc = subprocess.Popen( - command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - _ = subprc.communicate() - - if subprc.returncode != 0: - raise RuntimeError( - '{} failed. Please make sure `wget` is installed or {} exists'. - format(command, url)) - - shutil.move(tmp_fullname, fullname) - - return fullname - - -_download_methods = { - 'get': _get_download, - 'wget': _wget_download, -} - - -def _download(url, path, md5sum=None, method='get'): - """ - Download from url, save to path. - url (str): download url - path (str): download to given path - md5sum (str): md5 sum of download package - method (str): which download method to use. Support `wget` and `get`. Default is `get`. - """ - assert method in _download_methods, 'make sure `{}` implemented'.format( - method) - - if not osp.exists(path): - os.makedirs(path) - - fname = osp.split(url)[-1] - fullname = osp.join(path, fname) - retry_cnt = 0 - - logger.info("Downloading {} from {}".format(fname, url)) - while not (osp.exists(fullname) and _md5check(fullname, md5sum)): - if retry_cnt < DOWNLOAD_RETRY_LIMIT: - retry_cnt += 1 - else: - raise RuntimeError("Download from {} failed. " - "Retry limit reached".format(url)) - - if not _download_methods[method](url, fullname): - time.sleep(1) - continue - - return fullname - - -def _md5check(fullname, md5sum=None): - if md5sum is None: - return True - - logger.info("File {} md5 checking...".format(fullname)) - md5 = hashlib.md5() - with open(fullname, 'rb') as f: - for chunk in iter(lambda: f.read(4096), b""): - md5.update(chunk) - calc_md5sum = md5.hexdigest() - - if calc_md5sum != md5sum: - logger.info("File {} md5 check failed, {}(calc) != " - "{}(base)".format(fullname, calc_md5sum, md5sum)) - return False - return True - - -def _decompress(fname): - """ - Decompress for zip and tar file - """ - logger.info("Decompressing {}...".format(fname)) - - # For protecting decompressing interupted, - # decompress to fpath_tmp directory firstly, if decompress - # successed, move decompress files to fpath and delete - # fpath_tmp and remove download compress file. - - if tarfile.is_tarfile(fname): - uncompressed_path = _uncompress_file_tar(fname) - elif zipfile.is_zipfile(fname): - uncompressed_path = _uncompress_file_zip(fname) - else: - raise TypeError("Unsupport compress file type {}".format(fname)) - - return uncompressed_path - - -def _uncompress_file_zip(filepath): - files = zipfile.ZipFile(filepath, 'r') - file_list = files.namelist() - - file_dir = os.path.dirname(filepath) - - if _is_a_single_file(file_list): - rootpath = file_list[0] - uncompressed_path = os.path.join(file_dir, rootpath) - - for item in file_list: - files.extract(item, file_dir) - - elif _is_a_single_dir(file_list): - rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[0] - uncompressed_path = os.path.join(file_dir, rootpath) - - for item in file_list: - files.extract(item, file_dir) - - else: - rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - if not os.path.exists(uncompressed_path): - os.makedirs(uncompressed_path) - for item in file_list: - files.extract(item, os.path.join(file_dir, rootpath)) - - files.close() - - return uncompressed_path - - -def _uncompress_file_tar(filepath, mode="r:*"): - files = tarfile.open(filepath, mode) - file_list = files.getnames() - - file_dir = os.path.dirname(filepath) - - if _is_a_single_file(file_list): - rootpath = file_list[0] - uncompressed_path = os.path.join(file_dir, rootpath) - for item in file_list: - files.extract(item, file_dir) - elif _is_a_single_dir(file_list): - rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - for item in file_list: - files.extract(item, file_dir) - else: - rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - if not os.path.exists(uncompressed_path): - os.makedirs(uncompressed_path) - - for item in file_list: - files.extract(item, os.path.join(file_dir, rootpath)) - - files.close() - - return uncompressed_path - - -def _is_a_single_file(file_list): - if len(file_list) == 1 and file_list[0].find(os.sep) < -1: - return True - return False - - -def _is_a_single_dir(file_list): - new_file_list = [] - for file_path in file_list: - if '/' in file_path: - file_path = file_path.replace('/', os.sep) - elif '\\' in file_path: - file_path = file_path.replace('\\', os.sep) - new_file_list.append(file_path) - - file_name = new_file_list[0].split(os.sep)[0] - for i in range(1, len(new_file_list)): - if file_name != new_file_list[i].split(os.sep)[0]: - return False - return True diff --git a/paddlespeech/server/util.py b/paddlespeech/server/util.py index ae3e9c6aa..13f2ddf6e 100644 --- a/paddlespeech/server/util.py +++ b/paddlespeech/server/util.py @@ -29,9 +29,9 @@ import requests import yaml from paddle.framework import load -from . import download from .entry import client_commands from .entry import server_commands +from paddlespeech.cli import download try: from .. import __version__ except ImportError: diff --git a/paddlespeech/t2s/models/speedyspeech/speedyspeech_updater.py b/paddlespeech/t2s/models/speedyspeech/speedyspeech_updater.py index e30a3fe1a..b20fda1f7 100644 --- a/paddlespeech/t2s/models/speedyspeech/speedyspeech_updater.py +++ b/paddlespeech/t2s/models/speedyspeech/speedyspeech_updater.py @@ -16,7 +16,6 @@ from pathlib import Path import paddle from paddle import distributed as dist -from paddle.fluid.layers import huber_loss from paddle.io import DataLoader from paddle.nn import functional as F from paddle.nn import Layer @@ -78,8 +77,11 @@ class SpeedySpeechUpdater(StandardUpdater): target_durations.astype(predicted_durations.dtype), paddle.to_tensor([1.0])) duration_loss = weighted_mean( - huber_loss( - predicted_durations, paddle.log(target_durations), delta=1.0), + F.smooth_l1_loss( + predicted_durations, + paddle.log(target_durations), + delta=1.0, + reduction='none', ), text_mask, ) # ssim loss @@ -146,8 +148,11 @@ class SpeedySpeechEvaluator(StandardEvaluator): target_durations.astype(predicted_durations.dtype), paddle.to_tensor([1.0])) duration_loss = weighted_mean( - huber_loss( - predicted_durations, paddle.log(target_durations), delta=1.0), + F.smooth_l1_loss( + predicted_durations, + paddle.log(target_durations), + delta=1.0, + reduction='none', ), text_mask, ) # ssim loss diff --git a/paddlespeech/t2s/modules/losses.py b/paddlespeech/t2s/modules/losses.py index db31bcfbb..86dffbe91 100644 --- a/paddlespeech/t2s/modules/losses.py +++ b/paddlespeech/t2s/modules/losses.py @@ -17,7 +17,6 @@ import librosa import numpy as np import paddle from paddle import nn -from paddle.fluid.layers import sequence_mask from paddle.nn import functional as F from scipy import signal @@ -160,7 +159,7 @@ def sample_from_discretized_mix_logistic(y, log_scale_min=None): return x -# Loss for new Tacotron2 +# Loss for Tacotron2 class GuidedAttentionLoss(nn.Layer): """Guided attention loss function module. @@ -428,41 +427,6 @@ class Tacotron2Loss(nn.Layer): return l1_loss, mse_loss, bce_loss -# Loss for Tacotron2 -def attention_guide(dec_lens, enc_lens, N, T, g, dtype=None): - """Build that W matrix. shape(B, T_dec, T_enc) - W[i, n, t] = 1 - exp(-(n/dec_lens[i] - t/enc_lens[i])**2 / (2g**2)) - - See also: - Tachibana, Hideyuki, Katsuya Uenoyama, and Shunsuke Aihara. 2017. “Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention.” ArXiv:1710.08969 [Cs, Eess], October. http://arxiv.org/abs/1710.08969. - """ - dtype = dtype or paddle.get_default_dtype() - dec_pos = paddle.arange(0, N).astype(dtype) / dec_lens.unsqueeze( - -1) # n/N # shape(B, T_dec) - enc_pos = paddle.arange(0, T).astype(dtype) / enc_lens.unsqueeze( - -1) # t/T # shape(B, T_enc) - W = 1 - paddle.exp(-(dec_pos.unsqueeze(-1) - enc_pos.unsqueeze(1))**2 / - (2 * g**2)) - - dec_mask = sequence_mask(dec_lens, maxlen=N) - enc_mask = sequence_mask(enc_lens, maxlen=T) - mask = dec_mask.unsqueeze(-1) * enc_mask.unsqueeze(1) - mask = paddle.cast(mask, W.dtype) - - W *= mask - return W - - -def guided_attention_loss(attention_weight, dec_lens, enc_lens, g): - """Guided attention loss, masked to excluded padding parts.""" - _, N, T = attention_weight.shape - W = attention_guide(dec_lens, enc_lens, N, T, g, attention_weight.dtype) - - total_tokens = (dec_lens * enc_lens).astype(W.dtype) - loss = paddle.mean(paddle.sum(W * attention_weight, [1, 2]) / total_tokens) - return loss - - # Losses for GAN Vocoder def stft(x, fft_size, diff --git a/paddlespeech/t2s/utils/profile.py b/paddlespeech/t2s/utils/profile.py deleted file mode 100644 index 5f9b49526..000000000 --- a/paddlespeech/t2s/utils/profile.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from contextlib import contextmanager - -import paddle -from paddle.framework import core -from paddle.framework import CUDAPlace - - -def synchronize(): - """Trigger cuda synchronization for better timing.""" - place = paddle.fluid.framework._current_expected_place() - if isinstance(place, CUDAPlace): - paddle.fluid.core._cuda_synchronize(place) - - -@contextmanager -def nvtx_span(name): - try: - core.nvprof_nvtx_push(name) - yield - finally: - core.nvprof_nvtx_pop() diff --git a/paddlespeech/t2s/utils/timeline.py b/paddlespeech/t2s/utils/timeline.py deleted file mode 100644 index 0a5509dbe..000000000 --- a/paddlespeech/t2s/utils/timeline.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import argparse -import json - -import paddle.fluid.proto.profiler.profiler_pb2 as profiler_pb2 -import six - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument( - '--profile_path', - type=str, - default='', - help='Input profile file name. If there are multiple file, the format ' - 'should be trainer1=file1,trainer2=file2,ps=file3') -parser.add_argument( - '--timeline_path', type=str, default='', help='Output timeline file name.') -args = parser.parse_args() - - -class _ChromeTraceFormatter(object): - def __init__(self): - self._events = [] - self._metadata = [] - - def _create_event(self, ph, category, name, pid, tid, timestamp): - """Creates a new Chrome Trace event. - - For details of the file format, see: - https://github.com/catapult-project/catapult/blob/master/tracing/README.md - - Args: - ph: The type of event - usually a single character. - category: The event category as a string. - name: The event name as a string. - pid: Identifier of the process generating this event as an integer. - tid: Identifier of the thread generating this event as an integer. - timestamp: The timestamp of this event as a long integer. - - Returns: - A JSON compatible event object. - """ - event = {} - event['ph'] = ph - event['cat'] = category - event['name'] = name.replace("ParallelExecutor::Run/", "") - event['pid'] = pid - event['tid'] = tid - event['ts'] = timestamp - return event - - def emit_pid(self, name, pid): - """Adds a process metadata event to the trace. - - Args: - name: The process name as a string. - pid: Identifier of the process as an integer. - """ - event = {} - event['name'] = 'process_name' - event['ph'] = 'M' - event['pid'] = pid - event['args'] = {'name': name} - self._metadata.append(event) - - def emit_region(self, timestamp, duration, pid, tid, category, name, args): - """Adds a region event to the trace. - - Args: - timestamp: The start timestamp of this region as a long integer. - duration: The duration of this region as a long integer. - pid: Identifier of the process generating this event as an integer. - tid: Identifier of the thread generating this event as an integer. - category: The event category as a string. - name: The event name as a string. - args: A JSON-compatible dictionary of event arguments. - """ - event = self._create_event('X', category, name, pid, tid, timestamp) - event['dur'] = duration - event['args'] = args - self._events.append(event) - - def emit_counter(self, category, name, pid, timestamp, counter, value): - """Emits a record for a single counter. - - Args: - category: The event category as string - name: The event name as string - pid: Identifier of the process generating this event as integer - timestamp: The timestamps of this event as long integer - counter: Name of the counter as string - value: Value of the counter as integer - tid: Thread id of the allocation as integer - """ - event = self._create_event('C', category, name, pid, 0, timestamp) - event['args'] = {counter: value} - self._events.append(event) - - def format_to_string(self, pretty=False): - """Formats the chrome trace to a string. - - Args: - pretty: (Optional.) If True, produce human-readable JSON output. - - Returns: - A JSON-formatted string in Chrome Trace format. - """ - trace = {} - trace['traceEvents'] = self._metadata + self._events - if pretty: - return json.dumps(trace, indent=4, separators=(',', ': ')) - else: - return json.dumps(trace, separators=(',', ':')) - - -class Timeline(object): - def __init__(self, profile_dict): - self._profile_dict = profile_dict - self._pid = 0 - self._devices = dict() - self._mem_devices = dict() - self._chrome_trace = _ChromeTraceFormatter() - - def _allocate_pid(self): - cur_pid = self._pid - self._pid += 1 - return cur_pid - - def _allocate_pids(self): - for k, profile_pb in six.iteritems(self._profile_dict): - for event in profile_pb.events: - if event.type == profiler_pb2.Event.CPU: - if (k, event.device_id, "CPU") not in self._devices: - pid = self._allocate_pid() - self._devices[(k, event.device_id, "CPU")] = pid - # -1 device id represents CUDA API(RunTime) call.(e.g. cudaLaunch, cudaMemcpy) - if event.device_id == -1: - self._chrome_trace.emit_pid("%s:cuda_api" % k, pid) - else: - self._chrome_trace.emit_pid( - "%s:cpu:block:%d" % (k, event.device_id), pid) - elif event.type == profiler_pb2.Event.GPUKernel: - if (k, event.device_id, "GPUKernel") not in self._devices: - pid = self._allocate_pid() - self._devices[(k, event.device_id, "GPUKernel")] = pid - self._chrome_trace.emit_pid("%s:gpu:%d" % - (k, event.device_id), pid) - if not hasattr(profile_pb, "mem_events"): - continue - for mevent in profile_pb.mem_events: - if mevent.place == profiler_pb2.MemEvent.CUDAPlace: - if (k, mevent.device_id, "GPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, mevent.device_id, "GPU")] = pid - self._chrome_trace.emit_pid( - "memory usage on %s:gpu:%d" % (k, mevent.device_id), - pid) - elif mevent.place == profiler_pb2.MemEvent.CPUPlace: - if (k, mevent.device_id, "CPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, mevent.device_id, "CPU")] = pid - self._chrome_trace.emit_pid( - "memory usage on %s:cpu:%d" % (k, mevent.device_id), - pid) - elif mevent.place == profiler_pb2.MemEvent.CUDAPinnedPlace: - if (k, mevent.device_id, - "CUDAPinnedPlace") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, mevent.device_id, - "CUDAPinnedPlace")] = pid - self._chrome_trace.emit_pid( - "memory usage on %s:cudapinnedplace:%d" % - (k, mevent.device_id), pid) - elif mevent.place == profiler_pb2.MemEvent.NPUPlace: - if (k, mevent.device_id, "NPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, mevent.device_id, "NPU")] = pid - self._chrome_trace.emit_pid( - "memory usage on %s:npu:%d" % (k, mevent.device_id), - pid) - if (k, 0, "CPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, 0, "CPU")] = pid - self._chrome_trace.emit_pid("memory usage on %s:cpu:%d" % - (k, 0), pid) - if (k, 0, "GPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, 0, "GPU")] = pid - self._chrome_trace.emit_pid("memory usage on %s:gpu:%d" % - (k, 0), pid) - if (k, 0, "CUDAPinnedPlace") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, 0, "CUDAPinnedPlace")] = pid - self._chrome_trace.emit_pid( - "memory usage on %s:cudapinnedplace:%d" % (k, 0), pid) - if (k, 0, "NPU") not in self._mem_devices: - pid = self._allocate_pid() - self._mem_devices[(k, 0, "NPU")] = pid - self._chrome_trace.emit_pid("memory usage on %s:npu:%d" % - (k, 0), pid) - - def _allocate_events(self): - for k, profile_pb in six.iteritems(self._profile_dict): - for event in profile_pb.events: - if event.type == profiler_pb2.Event.CPU: - type = "CPU" - elif event.type == profiler_pb2.Event.GPUKernel: - type = "GPUKernel" - pid = self._devices[(k, event.device_id, type)] - args = {'name': event.name} - if event.memcopy.bytes > 0: - args['mem_bytes'] = event.memcopy.bytes - if hasattr(event, "detail_info") and event.detail_info: - args['detail_info'] = event.detail_info - # TODO(panyx0718): Chrome tracing only handles ms. However, some - # ops takes micro-seconds. Hence, we keep the ns here. - self._chrome_trace.emit_region( - event.start_ns, (event.end_ns - event.start_ns) / 1.0, pid, - event.sub_device_id, 'Op', event.name, args) - - def _allocate_memory_event(self): - if not hasattr(profiler_pb2, "MemEvent"): - return - place_to_str = { - profiler_pb2.MemEvent.CPUPlace: "CPU", - profiler_pb2.MemEvent.CUDAPlace: "GPU", - profiler_pb2.MemEvent.CUDAPinnedPlace: "CUDAPinnedPlace", - profiler_pb2.MemEvent.NPUPlace: "NPU" - } - for k, profile_pb in six.iteritems(self._profile_dict): - mem_list = [] - end_profiler = 0 - for mevent in profile_pb.mem_events: - crt_info = dict() - crt_info['time'] = mevent.start_ns - crt_info['size'] = mevent.bytes - if mevent.place in place_to_str: - place = place_to_str[mevent.place] - else: - place = "UnDefine" - crt_info['place'] = place - pid = self._mem_devices[(k, mevent.device_id, place)] - crt_info['pid'] = pid - crt_info['thread_id'] = mevent.thread_id - crt_info['device_id'] = mevent.device_id - mem_list.append(crt_info) - crt_info = dict() - crt_info['place'] = place - crt_info['pid'] = pid - crt_info['thread_id'] = mevent.thread_id - crt_info['device_id'] = mevent.device_id - crt_info['time'] = mevent.end_ns - crt_info['size'] = -mevent.bytes - mem_list.append(crt_info) - end_profiler = max(end_profiler, crt_info['time']) - mem_list.sort(key=lambda tmp: (tmp.get('time', 0))) - i = 0 - total_size = 0 - while i < len(mem_list): - total_size += mem_list[i]['size'] - while i < len(mem_list) - 1 and mem_list[i]['time'] == mem_list[ - i + 1]['time']: - total_size += mem_list[i + 1]['size'] - i += 1 - - self._chrome_trace.emit_counter( - "Memory", "Memory", mem_list[i]['pid'], mem_list[i]['time'], - 0, total_size) - i += 1 - - def generate_chrome_trace(self): - self._allocate_pids() - self._allocate_events() - self._allocate_memory_event() - return self._chrome_trace.format_to_string() - - -profile_path = '/tmp/profile' -if args.profile_path: - profile_path = args.profile_path -timeline_path = '/tmp/timeline' -if args.timeline_path: - timeline_path = args.timeline_path - -profile_paths = profile_path.split(',') -profile_dict = dict() -if len(profile_paths) == 1: - with open(profile_path, 'rb') as f: - profile_s = f.read() - profile_pb = profiler_pb2.Profile() - profile_pb.ParseFromString(profile_s) - profile_dict['trainer'] = profile_pb -else: - for profile_path in profile_paths: - k, v = profile_path.split('=') - with open(v, 'rb') as f: - profile_s = f.read() - profile_pb = profiler_pb2.Profile() - profile_pb.ParseFromString(profile_s) - profile_dict[k] = profile_pb - -tl = Timeline(profile_dict) -with open(timeline_path, 'w') as f: - f.write(tl.generate_chrome_trace()) From 943272385a6b11fafa00e18ab13a30513e8ea9ec Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Mon, 23 May 2022 10:19:53 +0000 Subject: [PATCH 019/127] refactor asr online --- .../server/engine/asr/online/asr_engine.py | 581 ++++++------------ 1 file changed, 200 insertions(+), 381 deletions(-) diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/asr_engine.py index fd57a3d52..3b88cabb4 100644 --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/asr_engine.py @@ -38,7 +38,7 @@ from paddlespeech.server.engine.base_engine import BaseEngine from paddlespeech.server.utils.audio_process import pcm2float from paddlespeech.server.utils.paddle_predictor import init_predictor -__all__ = ['ASREngine'] +__all__ = ['PaddleASRConnectionHanddler', 'ASRServerExecutor', 'ASREngine'] # ASR server connection process class @@ -67,7 +67,7 @@ class PaddleASRConnectionHanddler: # tokens to text self.text_feature = self.asr_engine.executor.text_feature - if "deepspeech2online" in self.model_type or "deepspeech2offline" in self.model_type: + if "deepspeech2" in self.model_type: from paddlespeech.s2t.io.collator import SpeechCollator self.am_predictor = self.asr_engine.executor.am_predictor @@ -89,8 +89,8 @@ class PaddleASRConnectionHanddler: cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta, cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n, cfg.num_proc_bsearch) - # frame window samples length and frame shift samples length + # frame window and frame shift, in samples unit self.win_length = int(self.model_config.window_ms / 1000 * self.sample_rate) self.n_shift = int(self.model_config.stride_ms / 1000 * @@ -109,16 +109,15 @@ class PaddleASRConnectionHanddler: self.preprocess_args = {"train": False} self.preprocessing = Transformation(self.preprocess_conf) - # frame window samples length and frame shift samples length + # frame window and frame shift, in samples unit self.win_length = self.preprocess_conf.process[0]['win_length'] self.n_shift = self.preprocess_conf.process[0]['n_shift'] + else: + raise ValueError(f"Not supported: {self.model_type}") def extract_feat(self, samples): - # we compute the elapsed time of first char occuring # and we record the start time at the first pcm sample arraving - # if self.first_char_occur_elapsed is not None: - # self.first_char_occur_elapsed = time.time() if "deepspeech2online" in self.model_type: # self.reamined_wav stores all the samples, @@ -154,28 +153,28 @@ class PaddleASRConnectionHanddler: spectrum = self.collate_fn_test._normalizer.apply(spectrum) # spectrum augment - audio = self.collate_fn_test.augmentation.transform_feature( + feat = self.collate_fn_test.augmentation.transform_feature( spectrum) - audio_len = audio.shape[0] - audio = paddle.to_tensor(audio, dtype='float32') - # audio_len = paddle.to_tensor(audio_len) - audio = paddle.unsqueeze(audio, axis=0) + # audio_len is frame num + frame_num = feat.shape[0] + feat = paddle.to_tensor(feat, dtype='float32') + feat = paddle.unsqueeze(feat, axis=0) if self.cached_feat is None: - self.cached_feat = audio + self.cached_feat = feat else: - assert (len(audio.shape) == 3) + assert (len(feat.shape) == 3) assert (len(self.cached_feat.shape) == 3) self.cached_feat = paddle.concat( - [self.cached_feat, audio], axis=1) + [self.cached_feat, feat], axis=1) # set the feat device if self.device is None: self.device = self.cached_feat.place - self.num_frames += audio_len - self.remained_wav = self.remained_wav[self.n_shift * audio_len:] + self.num_frames += frame_num + self.remained_wav = self.remained_wav[self.n_shift * frame_num:] logger.info( f"process the audio feature success, the connection feat shape: {self.cached_feat.shape}" @@ -183,25 +182,28 @@ class PaddleASRConnectionHanddler: logger.info( f"After extract feat, the connection remain the audio samples: {self.remained_wav.shape}" ) + elif "conformer_online" in self.model_type: logger.info("Online ASR extract the feat") samples = np.frombuffer(samples, dtype=np.int16) assert samples.ndim == 1 - logger.info(f"This package receive {samples.shape[0]} pcm data") self.num_samples += samples.shape[0] + logger.info(f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}") # self.reamined_wav stores all the samples, # include the original remained_wav and this package samples if self.remained_wav is None: self.remained_wav = samples else: - assert self.remained_wav.ndim == 1 + assert self.remained_wav.ndim == 1 # (T,) self.remained_wav = np.concatenate([self.remained_wav, samples]) logger.info( - f"The connection remain the audio samples: {self.remained_wav.shape}" + f"The concatenation of remain and now audio samples length is: {self.remained_wav.shape}" ) + if len(self.remained_wav) < self.win_length: + # samples not enough for feature window return 0 # fbank @@ -209,11 +211,13 @@ class PaddleASRConnectionHanddler: **self.preprocess_args) x_chunk = paddle.to_tensor( x_chunk, dtype="float32").unsqueeze(axis=0) + + # feature cache if self.cached_feat is None: self.cached_feat = x_chunk else: - assert (len(x_chunk.shape) == 3) - assert (len(self.cached_feat.shape) == 3) + assert (len(x_chunk.shape) == 3) # (B,T,D) + assert (len(self.cached_feat.shape) == 3) # (B,T,D) self.cached_feat = paddle.concat( [self.cached_feat, x_chunk], axis=1) @@ -221,20 +225,30 @@ class PaddleASRConnectionHanddler: if self.device is None: self.device = self.cached_feat.place + # cur frame step num_frames = x_chunk.shape[1] + + # global frame step self.num_frames += num_frames + + # update remained wav self.remained_wav = self.remained_wav[self.n_shift * num_frames:] + logger.info( - f"process the audio feature success, the connection feat shape: {self.cached_feat.shape}" + f"process the audio feature success, the cached feat shape: {self.cached_feat.shape}" ) logger.info( - f"After extract feat, the connection remain the audio samples: {self.remained_wav.shape}" + f"After extract feat, the cached remain the audio samples: {self.remained_wav.shape}" ) - # logger.info(f"accumulate samples: {self.num_samples}") + logger.info(f"global samples: {self.num_samples}") + logger.info(f"global frames: {self.num_frames}") + else: + raise ValueError(f"not supported: {self.model_type}") + def reset(self): - if "deepspeech2online" in self.model_type or "deepspeech2offline" in self.model_type: + if "deepspeech2" in self.model_type: # for deepspeech2 self.chunk_state_h_box = copy.deepcopy( self.asr_engine.executor.chunk_state_h_box) @@ -242,35 +256,63 @@ class PaddleASRConnectionHanddler: self.asr_engine.executor.chunk_state_c_box) self.decoder.reset_decoder(batch_size=1) - # for conformer online + self.device = None + + ## common + + # global sample and frame step + self.num_samples = 0 + self.num_frames = 0 + + # cache for audio and feat + self.remained_wav = None + self.cached_feat = None + + + # partial/ending decoding results + self.result_transcripts = [''] + + ## conformer + + # cache for conformer online self.subsampling_cache = None self.elayers_output_cache = None self.conformer_cnn_cache = None self.encoder_out = None - self.cached_feat = None - self.remained_wav = None - self.offset = 0 - self.num_samples = 0 - self.device = None + # conformer decoding state + self.chunk_num = 0 # globa decoding chunk num + self.offset = 0 # global offset in decoding frame unit self.hyps = [] - self.num_frames = 0 - self.chunk_num = 0 - self.global_frame_offset = 0 - self.result_transcripts = [''] + + # token timestamp result self.word_time_stamp = [] + + # one best timestamp viterbi prob is large. self.time_stamp = [] - self.first_char_occur_elapsed = None + def decode(self, is_finished=False): + """advance decoding + + Args: + is_finished (bool, optional): Is last frame or not. Defaults to False. + + Raises: + Exception: when not support model. + + Returns: + None: nothing + """ if "deepspeech2online" in self.model_type: - # x_chunk 是特征数据 - decoding_chunk_size = 1 # decoding_chunk_size=1 in deepspeech2 model - context = 7 # context=7 in deepspeech2 model - subsampling = 4 # subsampling=4 in deepspeech2 model - stride = subsampling * decoding_chunk_size + decoding_chunk_size = 1 # decoding chunk size = 1. int decoding frame unit + context = 7 # context=7, in audio frame unit + subsampling = 4 # subsampling=4, in audio frame unit + cached_feature_num = context - subsampling - # decoding window for model + # decoding window for model, in audio frame unit decoding_window = (decoding_chunk_size - 1) * subsampling + context + # decoding stride for model, in audio frame unit + stride = subsampling * decoding_chunk_size if self.cached_feat is None: logger.info("no audio feat, please input more pcm data") @@ -280,6 +322,7 @@ class PaddleASRConnectionHanddler: logger.info( f"Required decoding window {decoding_window} frames, and the connection has {num_frames} frames" ) + # the cached feat must be larger decoding_window if num_frames < decoding_window and not is_finished: logger.info( @@ -293,6 +336,7 @@ class PaddleASRConnectionHanddler: "flast {num_frames} is less than context {context} frames, and we cannot do model forward" ) return None, None + logger.info("start to do model forward") # num_frames - context + 1 ensure that current frame can get context window if is_finished: @@ -302,6 +346,7 @@ class PaddleASRConnectionHanddler: # we only process decoding_window frames for one chunk left_frames = decoding_window + end = None for cur in range(0, num_frames - left_frames + 1, stride): end = min(cur + decoding_window, num_frames) # extract the audio @@ -311,7 +356,9 @@ class PaddleASRConnectionHanddler: self.result_transcripts = [trans_best] + # update feat cache self.cached_feat = self.cached_feat[:, end - cached_feature_num:, :] + # return trans_best[0] elif "conformer" in self.model_type or "transformer" in self.model_type: try: @@ -326,9 +373,19 @@ class PaddleASRConnectionHanddler: else: raise Exception("invalid model name") + @paddle.no_grad() def decode_one_chunk(self, x_chunk, x_chunk_lens): - logger.info("start to decoce one chunk with deepspeech2 model") + """forward one chunk frames + + Args: + x_chunk (np.ndarray): (B,T,D), audio frames. + x_chunk_lens ([type]): (B,), audio frame lens + + Returns: + logprob: poster probability. + """ + logger.info("start to decoce one chunk for deepspeech2") input_names = self.am_predictor.get_input_names() audio_handle = self.am_predictor.get_input_handle(input_names[0]) audio_len_handle = self.am_predictor.get_input_handle(input_names[1]) @@ -365,24 +422,31 @@ class PaddleASRConnectionHanddler: self.decoder.next(output_chunk_probs, output_chunk_lens) trans_best, trans_beam = self.decoder.decode() - logger.info(f"decode one best result: {trans_best[0]}") + logger.info(f"decode one best result for deepspeech2: {trans_best[0]}") return trans_best[0] + @paddle.no_grad() def advance_decoding(self, is_finished=False): - logger.info("start to decode with advanced_decoding method") + logger.info("Conformer/Transformer: start to decode with advanced_decoding method") cfg = self.ctc_decode_config + + # cur chunk size, in decoding frame unit decoding_chunk_size = cfg.decoding_chunk_size + # using num of history chunks num_decoding_left_chunks = cfg.num_decoding_left_chunks - assert decoding_chunk_size > 0 + subsampling = self.model.encoder.embed.subsampling_rate context = self.model.encoder.embed.right_context + 1 - stride = subsampling * decoding_chunk_size - cached_feature_num = context - subsampling # processed chunk feature cached for next chunk - # decoding window for model + # processed chunk feature cached for next chunk + cached_feature_num = context - subsampling + # decoding stride, in audio frame unit + stride = subsampling * decoding_chunk_size + # decoding window, in audio frame unit decoding_window = (decoding_chunk_size - 1) * subsampling + context + if self.cached_feat is None: logger.info("no audio feat, please input more pcm data") return @@ -407,6 +471,7 @@ class PaddleASRConnectionHanddler: return None, None logger.info("start to do model forward") + # hist of chunks, in deocding frame unit required_cache_size = decoding_chunk_size * num_decoding_left_chunks outputs = [] @@ -423,8 +488,11 @@ class PaddleASRConnectionHanddler: for cur in range(0, num_frames - left_frames + 1, stride): end = min(cur + decoding_window, num_frames) + # global chunk_num self.chunk_num += 1 + # cur chunk chunk_xs = self.cached_feat[:, cur:end, :] + # forward chunk (y, self.subsampling_cache, self.elayers_output_cache, self.conformer_cnn_cache) = self.model.encoder.forward_chunk( chunk_xs, self.offset, required_cache_size, @@ -432,7 +500,7 @@ class PaddleASRConnectionHanddler: self.conformer_cnn_cache) outputs.append(y) - # update the offset + # update the global offset, in decoding frame unit self.offset += y.shape[1] ys = paddle.cat(outputs, 1) @@ -445,12 +513,15 @@ class PaddleASRConnectionHanddler: ctc_probs = self.model.ctc.log_softmax(ys) # (1, maxlen, vocab_size) ctc_probs = ctc_probs.squeeze(0) + # advance decoding self.searcher.search(ctc_probs, self.cached_feat.place) - + # get one best hyps self.hyps = self.searcher.get_one_best_hyps() + assert self.cached_feat.shape[0] == 1 assert end >= cached_feature_num + # advance cache of feat self.cached_feat = self.cached_feat[0, end - cached_feature_num:, :].unsqueeze(0) assert len( @@ -462,50 +533,79 @@ class PaddleASRConnectionHanddler: ) def update_result(self): + """Conformer/Transformer hyps to result. + """ logger.info("update the final result") hyps = self.hyps + + # output results and tokenids self.result_transcripts = [ self.text_feature.defeaturize(hyp) for hyp in hyps ] self.result_tokenids = [hyp for hyp in hyps] def get_result(self): + """return partial/ending asr result. + + Returns: + str: one best result of partial/ending. + """ if len(self.result_transcripts) > 0: return self.result_transcripts[0] else: return '' def get_word_time_stamp(self): + """return token timestamp result. + + Returns: + list: List of ('w':token, 'bg':time, 'ed':time) + """ return self.word_time_stamp + @paddle.no_grad() def rescoring(self): - if "deepspeech2online" in self.model_type or "deepspeech2offline" in self.model_type: + """Second-Pass Decoding, + only for conformer and transformer model. + """ + if "deepspeech2" in self.model_type: + logger.info("deepspeech2 not support rescoring decoding.") return - - logger.info("rescoring the final result") + if "attention_rescoring" != self.ctc_decode_config.decoding_method: + logger.info(f"decoding method not match: {self.ctc_decode_config.decoding_method}, need attention_rescoring") return + logger.info("rescoring the final result") + + # last decoding for last audio self.searcher.finalize_search() + # update beam search results self.update_result() beam_size = self.ctc_decode_config.beam_size hyps = self.searcher.get_hyps() if hyps is None or len(hyps) == 0: + logger.info("No Hyps!") return + # rescore by decoder post probability + # assert len(hyps) == beam_size + # list of Tensor hyp_list = [] for hyp in hyps: hyp_content = hyp[0] # Prevent the hyp is empty if len(hyp_content) == 0: hyp_content = (self.model.ctc.blank_id, ) + hyp_content = paddle.to_tensor( hyp_content, place=self.device, dtype=paddle.long) hyp_list.append(hyp_content) - hyps_pad = pad_sequence(hyp_list, True, self.model.ignore_id) + + hyps_pad = pad_sequence(hyp_list, batch_first=True, padding_value=self.model.ignore_id) hyps_lens = paddle.to_tensor( [len(hyp[0]) for hyp in hyps], place=self.device, dtype=paddle.long) # (beam_size,) @@ -531,10 +631,12 @@ class PaddleASRConnectionHanddler: score = 0.0 for j, w in enumerate(hyp[0]): score += decoder_out[i][j][w] + # last decoder output token is `eos`, for laste decoder input token. score += decoder_out[i][len(hyp[0])][self.model.eos] # add ctc score (which in ln domain) score += hyp[1] * self.ctc_decode_config.ctc_weight + if score > best_score: best_score = score best_index = i @@ -542,47 +644,57 @@ class PaddleASRConnectionHanddler: # update the one best result # hyps stored the beam results and each fields is: - logger.info(f"best index: {best_index}") + logger.info(f"best hyp index: {best_index}") # logger.info(f'best result: {hyps[best_index]}') # the field of the hyps is: + ## asr results # hyps[0][0]: the sentence word-id in the vocab with a tuple # hyps[0][1]: the sentence decoding probability with all paths + ## timestamp # hyps[0][2]: viterbi_blank ending probability - # hyps[0][3]: viterbi_non_blank probability + # hyps[0][3]: viterbi_non_blank dending probability # hyps[0][4]: current_token_prob, - # hyps[0][5]: times_viterbi_blank, - # hyps[0][6]: times_titerbi_non_blank + # hyps[0][5]: times_viterbi_blank ending timestamp, + # hyps[0][6]: times_titerbi_non_blank encding timestamp. self.hyps = [hyps[best_index][0]] + logger.info(f"best hyp ids: {self.hyps}") # update the hyps time stamp self.time_stamp = hyps[best_index][5] if hyps[best_index][2] > hyps[ best_index][3] else hyps[best_index][6] logger.info(f"time stamp: {self.time_stamp}") + # update one best result self.update_result() # update each word start and end time stamp - frame_shift_in_ms = self.model.encoder.embed.subsampling_rate * self.n_shift / self.sample_rate - logger.info(f"frame shift ms: {frame_shift_in_ms}") + # decoding frame to audio frame + frame_shift = self.model.encoder.embed.subsampling_rate + frame_shift_in_sec = frame_shift * (self.n_shift / self.sample_rate) + logger.info(f"frame shift sec: {frame_shift_in_sec}") + word_time_stamp = [] for idx, _ in enumerate(self.time_stamp): start = (self.time_stamp[idx - 1] + self.time_stamp[idx] ) / 2.0 if idx > 0 else 0 - start = start * frame_shift_in_ms + start = start * frame_shift_in_sec end = (self.time_stamp[idx] + self.time_stamp[idx + 1] ) / 2.0 if idx < len(self.time_stamp) - 1 else self.offset - end = end * frame_shift_in_ms + + end = end * frame_shift_in_sec word_time_stamp.append({ "w": self.result_transcripts[0][idx], "bg": start, "ed": end }) - # logger.info(f"{self.result_transcripts[0][idx]}, start: {start}, end: {end}") + # logger.info(f"{word_time_stamp[-1]}") + self.word_time_stamp = word_time_stamp logger.info(f"word time stamp: {self.word_time_stamp}") + class ASRServerExecutor(ASRExecutor): def __init__(self): super().__init__() @@ -610,6 +722,7 @@ class ASRServerExecutor(ASRExecutor): self.sample_rate = sample_rate sample_rate_str = '16k' if sample_rate == 16000 else '8k' tag = model_type + '-' + lang + '-' + sample_rate_str + if cfg_path is None or am_model is None or am_params is None: logger.info(f"Load the pretrained model, tag = {tag}") res_path = self._get_pretrained_path(tag) # wenetspeech_zh @@ -628,7 +741,7 @@ class ASRServerExecutor(ASRExecutor): self.am_model = os.path.abspath(am_model) self.am_params = os.path.abspath(am_params) self.res_path = os.path.dirname( - os.path.dirname(os.path.abspath(self.cfg_path))) + os.path.dirname(os.path.abspath(self.cfg_path))) logger.info(self.cfg_path) logger.info(self.am_model) @@ -639,7 +752,7 @@ class ASRServerExecutor(ASRExecutor): self.config.merge_from_file(self.cfg_path) with UpdateConfig(self.config): - if "deepspeech2online" in model_type or "deepspeech2offline" in model_type: + if "deepspeech2" in model_type: from paddlespeech.s2t.io.collator import SpeechCollator self.vocab = self.config.vocab_filepath self.config.decode.lang_model_path = os.path.join( @@ -655,6 +768,7 @@ class ASRServerExecutor(ASRExecutor): self.download_lm( lm_url, os.path.dirname(self.config.decode.lang_model_path), lm_md5) + elif "conformer" in model_type or "transformer" in model_type: logger.info("start to create the stream conformer asr engine") if self.config.spm_model_prefix: @@ -682,7 +796,8 @@ class ASRServerExecutor(ASRExecutor): ], f"we only support ctc_prefix_beam_search and attention_rescoring dedoding method, current decoding method is {self.config.decode.decoding_method}" else: raise Exception("wrong type") - if "deepspeech2online" in model_type or "deepspeech2offline" in model_type: + + if "deepspeech2" in model_type: # AM predictor logger.info("ASR engine start to init the am predictor") self.am_predictor_conf = am_predictor_conf @@ -719,6 +834,7 @@ class ASRServerExecutor(ASRExecutor): self.chunk_state_c_box = np.zeros( (self.config.num_rnn_layers, 1, self.config.rnn_layer_size), dtype=float32) + elif "conformer" in model_type or "transformer" in model_type: model_name = model_type[:model_type.rindex( '_')] # model_type: {model_name}_{dataset} @@ -737,277 +853,14 @@ class ASRServerExecutor(ASRExecutor): # update the ctc decoding self.searcher = CTCPrefixBeamSearch(self.config.decode) self.transformer_decode_reset() - - return True - - def reset_decoder_and_chunk(self): - """reset decoder and chunk state for an new audio - """ - if "deepspeech2online" in self.model_type or "deepspeech2offline" in self.model_type: - self.decoder.reset_decoder(batch_size=1) - # init state box, for new audio request - self.chunk_state_h_box = np.zeros( - (self.config.num_rnn_layers, 1, self.config.rnn_layer_size), - dtype=float32) - self.chunk_state_c_box = np.zeros( - (self.config.num_rnn_layers, 1, self.config.rnn_layer_size), - dtype=float32) - elif "conformer" in self.model_type or "transformer" in self.model_type: - self.transformer_decode_reset() - - def decode_one_chunk(self, x_chunk, x_chunk_lens, model_type: str): - """decode one chunk - - Args: - x_chunk (numpy.array): shape[B, T, D] - x_chunk_lens (numpy.array): shape[B] - model_type (str): online model type - - Returns: - str: one best result - """ - logger.info("start to decoce chunk by chunk") - if "deepspeech2online" in model_type: - input_names = self.am_predictor.get_input_names() - audio_handle = self.am_predictor.get_input_handle(input_names[0]) - audio_len_handle = self.am_predictor.get_input_handle( - input_names[1]) - h_box_handle = self.am_predictor.get_input_handle(input_names[2]) - c_box_handle = self.am_predictor.get_input_handle(input_names[3]) - - audio_handle.reshape(x_chunk.shape) - audio_handle.copy_from_cpu(x_chunk) - - audio_len_handle.reshape(x_chunk_lens.shape) - audio_len_handle.copy_from_cpu(x_chunk_lens) - - h_box_handle.reshape(self.chunk_state_h_box.shape) - h_box_handle.copy_from_cpu(self.chunk_state_h_box) - - c_box_handle.reshape(self.chunk_state_c_box.shape) - c_box_handle.copy_from_cpu(self.chunk_state_c_box) - - output_names = self.am_predictor.get_output_names() - output_handle = self.am_predictor.get_output_handle(output_names[0]) - output_lens_handle = self.am_predictor.get_output_handle( - output_names[1]) - output_state_h_handle = self.am_predictor.get_output_handle( - output_names[2]) - output_state_c_handle = self.am_predictor.get_output_handle( - output_names[3]) - - self.am_predictor.run() - - output_chunk_probs = output_handle.copy_to_cpu() - output_chunk_lens = output_lens_handle.copy_to_cpu() - self.chunk_state_h_box = output_state_h_handle.copy_to_cpu() - self.chunk_state_c_box = output_state_c_handle.copy_to_cpu() - - self.decoder.next(output_chunk_probs, output_chunk_lens) - trans_best, trans_beam = self.decoder.decode() - logger.info(f"decode one best result: {trans_best[0]}") - return trans_best[0] - - elif "conformer" in model_type or "transformer" in model_type: - try: - logger.info( - f"we will use the transformer like model : {self.model_type}" - ) - self.advanced_decoding(x_chunk, x_chunk_lens) - self.update_result() - - return self.result_transcripts[0] - except Exception as e: - logger.exception(e) else: - raise Exception("invalid model name") - - def advanced_decoding(self, xs: paddle.Tensor, x_chunk_lens): - logger.info("start to decode with advanced_decoding method") - encoder_out, encoder_mask = self.encoder_forward(xs) - ctc_probs = self.model.ctc.log_softmax( - encoder_out) # (1, maxlen, vocab_size) - ctc_probs = ctc_probs.squeeze(0) - self.searcher.search(ctc_probs, xs.place) - # update the one best result - self.hyps = self.searcher.get_one_best_hyps() - - # now we supprot ctc_prefix_beam_search and attention_rescoring - if "attention_rescoring" in self.config.decode.decoding_method: - self.rescoring(encoder_out, xs.place) - - def encoder_forward(self, xs): - logger.info("get the model out from the feat") - cfg = self.config.decode - decoding_chunk_size = cfg.decoding_chunk_size - num_decoding_left_chunks = cfg.num_decoding_left_chunks - - assert decoding_chunk_size > 0 - subsampling = self.model.encoder.embed.subsampling_rate - context = self.model.encoder.embed.right_context + 1 - stride = subsampling * decoding_chunk_size - - # decoding window for model - decoding_window = (decoding_chunk_size - 1) * subsampling + context - num_frames = xs.shape[1] - required_cache_size = decoding_chunk_size * num_decoding_left_chunks - - logger.info("start to do model forward") - outputs = [] - - # num_frames - context + 1 ensure that current frame can get context window - for cur in range(0, num_frames - context + 1, stride): - end = min(cur + decoding_window, num_frames) - chunk_xs = xs[:, cur:end, :] - (y, self.subsampling_cache, self.elayers_output_cache, - self.conformer_cnn_cache) = self.model.encoder.forward_chunk( - chunk_xs, self.offset, required_cache_size, - self.subsampling_cache, self.elayers_output_cache, - self.conformer_cnn_cache) - outputs.append(y) - self.offset += y.shape[1] - - ys = paddle.cat(outputs, 1) - masks = paddle.ones([1, ys.shape[1]], dtype=paddle.bool) - masks = masks.unsqueeze(1) - return ys, masks - - def rescoring(self, encoder_out, device): - logger.info("start to rescoring the hyps") - beam_size = self.config.decode.beam_size - hyps = self.searcher.get_hyps() - assert len(hyps) == beam_size - - hyp_list = [] - for hyp in hyps: - hyp_content = hyp[0] - # Prevent the hyp is empty - if len(hyp_content) == 0: - hyp_content = (self.model.ctc.blank_id, ) - hyp_content = paddle.to_tensor( - hyp_content, place=device, dtype=paddle.long) - hyp_list.append(hyp_content) - hyps_pad = pad_sequence(hyp_list, True, self.model.ignore_id) - hyps_lens = paddle.to_tensor( - [len(hyp[0]) for hyp in hyps], place=device, - dtype=paddle.long) # (beam_size,) - hyps_pad, _ = add_sos_eos(hyps_pad, self.model.sos, self.model.eos, - self.model.ignore_id) - hyps_lens = hyps_lens + 1 # Add at begining - - encoder_out = encoder_out.repeat(beam_size, 1, 1) - encoder_mask = paddle.ones( - (beam_size, 1, encoder_out.shape[1]), dtype=paddle.bool) - decoder_out, _ = self.model.decoder( - encoder_out, encoder_mask, hyps_pad, - hyps_lens) # (beam_size, max_hyps_len, vocab_size) - # ctc score in ln domain - decoder_out = paddle.nn.functional.log_softmax(decoder_out, axis=-1) - decoder_out = decoder_out.numpy() - - # Only use decoder score for rescoring - best_score = -float('inf') - best_index = 0 - # hyps is List[(Text=List[int], Score=float)], len(hyps)=beam_size - for i, hyp in enumerate(hyps): - score = 0.0 - for j, w in enumerate(hyp[0]): - score += decoder_out[i][j][w] - # last decoder output token is `eos`, for laste decoder input token. - score += decoder_out[i][len(hyp[0])][self.model.eos] - # add ctc score (which in ln domain) - score += hyp[1] * self.config.decode.ctc_weight - if score > best_score: - best_score = score - best_index = i - - # update the one best result - self.hyps = [hyps[best_index][0]] - return hyps[best_index][0] - - def transformer_decode_reset(self): - self.subsampling_cache = None - self.elayers_output_cache = None - self.conformer_cnn_cache = None - self.offset = 0 - # decoding reset - self.searcher.reset() - - def update_result(self): - logger.info("update the final result") - hyps = self.hyps - self.result_transcripts = [ - self.text_feature.defeaturize(hyp) for hyp in hyps - ] - self.result_tokenids = [hyp for hyp in hyps] - - def extract_feat(self, samples, sample_rate): - """extract feat - - Args: - samples (numpy.array): numpy.float32 - sample_rate (int): sample rate - - Returns: - x_chunk (numpy.array): shape[B, T, D] - x_chunk_lens (numpy.array): shape[B] - """ - - if "deepspeech2online" in self.model_type: - # pcm16 -> pcm 32 - samples = pcm2float(samples) - # read audio - speech_segment = SpeechSegment.from_pcm( - samples, sample_rate, transcript=" ") - # audio augment - self.collate_fn_test.augmentation.transform_audio(speech_segment) - - # extract speech feature - spectrum, transcript_part = self.collate_fn_test._speech_featurizer.featurize( - speech_segment, self.collate_fn_test.keep_transcription_text) - # CMVN spectrum - if self.collate_fn_test._normalizer: - spectrum = self.collate_fn_test._normalizer.apply(spectrum) - - # spectrum augment - audio = self.collate_fn_test.augmentation.transform_feature( - spectrum) - - audio_len = audio.shape[0] - audio = paddle.to_tensor(audio, dtype='float32') - # audio_len = paddle.to_tensor(audio_len) - audio = paddle.unsqueeze(audio, axis=0) - - x_chunk = audio.numpy() - x_chunk_lens = np.array([audio_len]) - - return x_chunk, x_chunk_lens - elif "conformer_online" in self.model_type: - - if sample_rate != self.sample_rate: - logger.info(f"audio sample rate {sample_rate} is not match," - "the model sample_rate is {self.sample_rate}") - logger.info(f"ASR Engine use the {self.model_type} to process") - logger.info("Create the preprocess instance") - preprocess_conf = self.config.preprocess_config - preprocess_args = {"train": False} - preprocessing = Transformation(preprocess_conf) - - logger.info("Read the audio file") - logger.info(f"audio shape: {samples.shape}") - # fbank - x_chunk = preprocessing(samples, **preprocess_args) - x_chunk_lens = paddle.to_tensor(x_chunk.shape[0]) - x_chunk = paddle.to_tensor( - x_chunk, dtype="float32").unsqueeze(axis=0) - logger.info( - f"process the audio feature success, feat shape: {x_chunk.shape}" - ) - return x_chunk, x_chunk_lens + raise ValueError(f"Not support: {model_type}") + + return True class ASREngine(BaseEngine): - """ASR server engine + """ASR server resource Args: metaclass: Defaults to Singleton. @@ -1015,7 +868,7 @@ class ASREngine(BaseEngine): def __init__(self): super(ASREngine, self).__init__() - logger.info("create the online asr engine instance") + logger.info("create the online asr engine resource instance") def init(self, config: dict) -> bool: """init engine resource @@ -1026,17 +879,12 @@ class ASREngine(BaseEngine): Returns: bool: init failed or success """ - self.input = None - self.output = "" - self.executor = ASRServerExecutor() self.config = config + self.executor = ASRServerExecutor() + try: - if self.config.get("device", None): - self.device = self.config.device - else: - self.device = paddle.get_device() - logger.info(f"paddlespeech_server set the device: {self.device}") - paddle.set_device(self.device) + default_dev = paddle.get_device() + paddle.set_device(self.config.get("device", default_dev)) except BaseException as e: logger.error( f"Set device failed, please check if device '{self.device}' is already used and the parameter 'device' in the yaml file" @@ -1045,6 +893,8 @@ class ASREngine(BaseEngine): "If all GPU or XPU is used, you can set the server to 'cpu'") sys.exit(-1) + logger.info(f"paddlespeech_server set the device: {self.device}") + if not self.executor._init_from_path( model_type=self.config.model_type, am_model=self.config.am_model, @@ -1062,42 +912,11 @@ class ASREngine(BaseEngine): logger.info("Initialize ASR server engine successfully.") return True - def preprocess(self, - samples, - sample_rate, - model_type="deepspeech2online_aishell-zh-16k"): - """preprocess - - Args: - samples (numpy.array): numpy.float32 - sample_rate (int): sample rate - - Returns: - x_chunk (numpy.array): shape[B, T, D] - x_chunk_lens (numpy.array): shape[B] - """ - # if "deepspeech" in model_type: - x_chunk, x_chunk_lens = self.executor.extract_feat(samples, sample_rate) - return x_chunk, x_chunk_lens + def preprocess(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") - def run(self, x_chunk, x_chunk_lens, decoder_chunk_size=1): - """run online engine - - Args: - x_chunk (numpy.array): shape[B, T, D] - x_chunk_lens (numpy.array): shape[B] - decoder_chunk_size(int) - """ - self.output = self.executor.decode_one_chunk(x_chunk, x_chunk_lens, - self.config.model_type) + def run(self, *args, **kwargs): + raise NotImplementedError("Online not using this.") def postprocess(self): - """postprocess - """ - return self.output - - def reset(self): - """reset engine decoder and inference state - """ - self.executor.reset_decoder_and_chunk() - self.output = "" + raise NotImplementedError("Online not using this.") From 9eb6565978f1b3432bf87a1e5c6a9c76aec1ebef Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 23 May 2022 11:39:55 +0000 Subject: [PATCH 020/127] add aistudio courses' link, test=doc --- README.md | 5 ++--- README_cn.md | 15 +-------------- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index af3a5c4e4..ed60b8d69 100644 --- a/README.md +++ b/README.md @@ -26,12 +26,11 @@ | Quick Start Streaming Server | Documents | Models List - | + | AIStudio Courses
- - +------------------------------------------------------------------------------------ **PaddleSpeech** is an open-source toolkit on [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) platform for a variety of critical tasks in speech and audio, with the state-of-art and influential models. diff --git a/README_cn.md b/README_cn.md index 744b00fae..3604f4748 100644 --- a/README_cn.md +++ b/README_cn.md @@ -24,25 +24,12 @@ | 快速使用流式服务 | 教程文档 | 模型列表 + | AIStudio 课程 ------------------------------------------------------------------------------------ - - - - - - - - **PaddleSpeech** 是基于飞桨 [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) 的语音方向的开源模型库,用于语音和音频中的各种关键任务的开发,包含大量基于深度学习前沿和有影响力的模型,一些典型的应用示例如下: ##### 语音识别 From 7e8e5c1a4353ba5292f13a31483a0ea3b73b9eb0 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Mon, 23 May 2022 12:10:37 +0000 Subject: [PATCH 021/127] update README, test=doc --- README.md | 5 +---- README_cn.md | 28 +++++++++++----------------- 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index ed60b8d69..a43e21bd2 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,4 @@ ([简体中文](./README_cn.md)|English) - - -

@@ -21,7 +18,7 @@

- | Quick Start + Quick Start | Quick Start Server | Quick Start Streaming Server | Documents diff --git a/README_cn.md b/README_cn.md index 3604f4748..ed5c6a90d 100644 --- a/README_cn.md +++ b/README_cn.md @@ -18,13 +18,14 @@

@@ -156,11 +157,7 @@ ### 近期更新 - - -- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md)流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md)流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md)全链路声纹识别系统 +- 👑 2022.05.13: PaddleSpeech 发布 [PP-ASR](./docs/source/asr/PPASR_cn.md) 流式语音识别系统、[PP-TTS](./docs/source/tts/PPTTS_cn.md) 流式语音合成系统、[PP-VPR](docs/source/vpr/PPVPR_cn.md) 全链路声纹识别系统 - 👏🏻 2022.05.06: PaddleSpeech Streaming Server 上线! 覆盖了语音识别(标点恢复、时间戳),和语音合成。 - 👏🏻 2022.05.06: PaddleSpeech Server 上线! 覆盖了声音分类、语音识别、语音合成、声纹识别,标点恢复。 - 👏🏻 2022.03.28: PaddleSpeech CLI 覆盖声音分类、语音识别、语音翻译(英译中)、语音合成,声纹验证。 @@ -169,7 +166,7 @@ ### 🔥 加入技术交流群获取入群福利 - - 3 日直播课链接: 深度解读 PP-TTS、PP-ASR、PP-VPR三项核心语音系统关键技术 + - 3 日直播课链接: 深度解读 PP-TTS、PP-ASR、PP-VPR 三项核心语音系统关键技术 - 20G 学习大礼包:视频课程、前沿论文与学习资料 微信扫描二维码关注公众号,点击“马上报名”填写问卷加入官方交流群,获得更高效的问题答疑,与各行各业开发者充分交流,期待您的加入。 @@ -178,9 +175,6 @@

- - - ## 安装 我们强烈建议用户在 **Linux** 环境下,*3.7* 以上版本的 *python* 上安装 PaddleSpeech。 @@ -262,27 +256,27 @@ paddlespeech_client cls --server_ip 127.0.0.1 --port 8090 --input input.wav ## 快速使用流式服务 -开发者可以尝试[流式ASR](./demos/streaming_asr_server/README.md)和 [流式TTS](./demos/streaming_tts_server/README.md)服务. +开发者可以尝试 [流式 ASR](./demos/streaming_asr_server/README.md) 和 [流式 TTS](./demos/streaming_tts_server/README.md) 服务. -**启动流式ASR服务** +**启动流式 ASR 服务** ``` paddlespeech_server start --config_file ./demos/streaming_asr_server/conf/application.yaml ``` -**访问流式ASR服务** +**访问流式 ASR 服务** ``` paddlespeech_client asr_online --server_ip 127.0.0.1 --port 8090 --input input_16k.wav ``` -**启动流式TTS服务** +**启动流式 TTS 服务** ``` paddlespeech_server start --config_file ./demos/streaming_tts_server/conf/tts_online_application.yaml ``` -**访问流式TTS服务** +**访问流式 TTS 服务** ``` paddlespeech_client tts_online --server_ip 127.0.0.1 --port 8092 --protocol http --input "您好,欢迎使用百度飞桨语音合成服务。" --output output.wav @@ -644,7 +638,7 @@ PaddleSpeech 的 **语音合成** 主要包含三个模块:文本前端、声 ## 参与 PaddleSpeech 的开发 -热烈欢迎您在[Discussions](https://github.com/PaddlePaddle/PaddleSpeech/discussions) 中提交问题,并在[Issues](https://github.com/PaddlePaddle/PaddleSpeech/issues) 中指出发现的 bug。此外,我们非常希望您参与到 PaddleSpeech 的开发中! +热烈欢迎您在 [Discussions](https://github.com/PaddlePaddle/PaddleSpeech/discussions) 中提交问题,并在 [Issues](https://github.com/PaddlePaddle/PaddleSpeech/issues) 中指出发现的 bug。此外,我们非常希望您参与到 PaddleSpeech 的开发中! ### 贡献者

From 9735109bdd6b3cb7d492f36eec88963ac3efcbc5 Mon Sep 17 00:00:00 2001 From: Jackwaterveg <87408988+Jackwaterveg@users.noreply.github.com> Date: Mon, 23 May 2022 20:26:41 +0800 Subject: [PATCH 022/127] Updata install_cn.md, test=doc --- docs/source/install_cn.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/install_cn.md b/docs/source/install_cn.md index 55fef93d5..5a967f404 100644 --- a/docs/source/install_cn.md +++ b/docs/source/install_cn.md @@ -3,7 +3,7 @@ `PaddleSpeech` 有三种安装方法。根据安装的难易程度,这三种方法可以分为 **简单**, **中等** 和 **困难**. | 方式 | 功能 | 支持系统 | | :--- | :----------------------------------------------------------- | :------------------ | -| 简单 | (1) 使用 PaddleSpeech 的命令行功能.
(2) 在 Aistudio上体验 PaddleSpeech. | Linux, Mac(不支持M1芯片),Windows | +| 简单 | (1) 使用 PaddleSpeech 的命令行功能.
(2) 在 Aistudio上体验 PaddleSpeech. | Linux, Mac(不支持M1芯片),Windows (安装详情查看[#1195](https://github.com/PaddlePaddle/PaddleSpeech/discussions/1195)) | | 中等 | 支持 PaddleSpeech 主要功能,比如使用已有 examples 中的模型和使用 PaddleSpeech 来训练自己的模型. | Linux | | 困难 | 支持 PaddleSpeech 的各项功能,包含结合kaldi使用 join ctc decoder 方式解码,训练语言模型,使用强制对齐等。并且你更能成为一名开发者! | Ubuntu | ## 先决条件 From dc2fa2e974fad9d73645e907aac9a706532efa70 Mon Sep 17 00:00:00 2001 From: Jackwaterveg <87408988+Jackwaterveg@users.noreply.github.com> Date: Mon, 23 May 2022 20:29:22 +0800 Subject: [PATCH 023/127] test=doc --- docs/source/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/install.md b/docs/source/install.md index 43cc784cc..e3ea74b27 100644 --- a/docs/source/install.md +++ b/docs/source/install.md @@ -4,7 +4,7 @@ There are 3 ways to use `PaddleSpeech`. According to the degree of difficulty, t | Way | Function | Support| |:---- |:----------------------------------------------------------- |:----| -| Easy | (1) Use command-line functions of PaddleSpeech.
(2) Experience PaddleSpeech on Ai Studio. | Linux, Mac(not support M1 chip),Windows | +| Easy | (1) Use command-line functions of PaddleSpeech.
(2) Experience PaddleSpeech on Ai Studio. | Linux, Mac(not support M1 chip),Windows ( For more information about installation, see [#1195](https://github.com/PaddlePaddle/PaddleSpeech/discussions/1195)) | | Medium | Support major functions ,such as using the` ready-made `examples and using PaddleSpeech to train your model. | Linux | | Hard | Support full function of Paddlespeech, including using join ctc decoder with kaldi, training n-gram language model, Montreal-Forced-Aligner, and so on. And you are more able to be a developer! | Ubuntu | From 327509951f0d864b7e50e482ad357283a8973ae0 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 24 May 2022 03:17:16 +0000 Subject: [PATCH 024/127] rm unused comment, test=tts --- paddlespeech/t2s/models/vits/flow.py | 3 --- paddlespeech/t2s/models/vits/residual_coupling.py | 2 -- 2 files changed, 5 deletions(-) diff --git a/paddlespeech/t2s/models/vits/flow.py b/paddlespeech/t2s/models/vits/flow.py index 8726748e5..3c8f89356 100644 --- a/paddlespeech/t2s/models/vits/flow.py +++ b/paddlespeech/t2s/models/vits/flow.py @@ -252,9 +252,6 @@ class ConvFlow(nn.Layer): self.half_channels * (bins * 3 - 1), 1, ) - # self.proj.weight.data.zero_() - # self.proj.bias.data.zero_() - weight = paddle.zeros(paddle.shape(self.proj.weight)) self.proj.weight = paddle.create_parameter( diff --git a/paddlespeech/t2s/models/vits/residual_coupling.py b/paddlespeech/t2s/models/vits/residual_coupling.py index 8671462d8..c18beedd0 100644 --- a/paddlespeech/t2s/models/vits/residual_coupling.py +++ b/paddlespeech/t2s/models/vits/residual_coupling.py @@ -186,8 +186,6 @@ class ResidualAffineCouplingLayer(nn.Layer): hidden_channels, self.half_channels * 2, 1, ) - # self.proj.weight.data.zero_() - # self.proj.bias.data.zero_() weight = paddle.zeros(paddle.shape(self.proj.weight)) From c15278ed8063f23c04e74c355028d1434929b1ee Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Tue, 24 May 2022 15:52:06 +0800 Subject: [PATCH 025/127] format --- demos/audio_searching/src/operations/load.py | 5 +- demos/custom_streaming_asr/README.md | 2 +- .../streaming_asr_server/websocket_client.py | 2 - docs/source/asr/PPASR_cn.md | 2 - paddlespeech/cli/utils.py | 2 +- paddlespeech/s2t/io/sampler.py | 2 +- paddlespeech/server/engine/acs/__init__.py | 13 +++++ .../server/engine/acs/python/__init__.py | 13 +++++ .../server/engine/asr/online/asr_engine.py | 54 +++++++++---------- paddlespeech/server/restful/api.py | 2 +- paddlespeech/server/utils/audio_handler.py | 2 +- paddlespeech/server/utils/buffer.py | 7 +-- .../t2s/exps/speedyspeech/synthesize_e2e.py | 5 +- paddlespeech/t2s/exps/speedyspeech/train.py | 5 +- .../t2s/modules/transformer/repeat.py | 2 +- setup.py | 8 +-- tests/unit/cli/aishell_test_prepare.py | 4 +- 17 files changed, 78 insertions(+), 52 deletions(-) diff --git a/demos/audio_searching/src/operations/load.py b/demos/audio_searching/src/operations/load.py index d1ea00576..0d9edb784 100644 --- a/demos/audio_searching/src/operations/load.py +++ b/demos/audio_searching/src/operations/load.py @@ -26,9 +26,8 @@ def get_audios(path): """ supported_formats = [".wav", ".mp3", ".ogg", ".flac", ".m4a"] return [ - item - for sublist in [[os.path.join(dir, file) for file in files] - for dir, _, files in list(os.walk(path))] + item for sublist in [[os.path.join(dir, file) for file in files] + for dir, _, files in list(os.walk(path))] for item in sublist if os.path.splitext(item)[1] in supported_formats ] diff --git a/demos/custom_streaming_asr/README.md b/demos/custom_streaming_asr/README.md index aa28d502f..d8cd37a7e 100644 --- a/demos/custom_streaming_asr/README.md +++ b/demos/custom_streaming_asr/README.md @@ -62,4 +62,4 @@ I0513 10:58:13.884493 41768 feature_cache.h:52] set finished I0513 10:58:24.247171 41768 paddle_nnet.h:76] Tensor neml: 10240 I0513 10:58:24.247249 41768 paddle_nnet.h:76] Tensor neml: 10240 LOG ([5.5.544~2-f21d7]:main():decoder/recognizer_test_main.cc:90) the result of case_10 is 五月十二日二十二点三十六分加班打车回家四十一元 -``` \ No newline at end of file +``` diff --git a/demos/streaming_asr_server/websocket_client.py b/demos/streaming_asr_server/websocket_client.py index 8a4fe330a..8e1f19a58 100644 --- a/demos/streaming_asr_server/websocket_client.py +++ b/demos/streaming_asr_server/websocket_client.py @@ -13,9 +13,7 @@ # limitations under the License. #!/usr/bin/python # -*- coding: UTF-8 -*- - # script for calc RTF: grep -rn RTF log.txt | awk '{print $NF}' | awk -F "=" '{sum += $NF} END {print "all time",sum, "audio num", NR, "RTF", sum/NR}' - import argparse import asyncio import codecs diff --git a/docs/source/asr/PPASR_cn.md b/docs/source/asr/PPASR_cn.md index 82b1c1d37..2e3f1cd97 100644 --- a/docs/source/asr/PPASR_cn.md +++ b/docs/source/asr/PPASR_cn.md @@ -92,5 +92,3 @@ server 的 demo: [streaming_asr_server](https://github.com/PaddlePaddle/Paddle ## 4. 快速开始 关于如果使用 PP-ASR,可以看这里的 [install](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install_cn.md),其中提供了 **简单**、**中等**、**困难** 三种安装方式。如果想体验 paddlespeech 的推理功能,可以用 **简单** 安装方式。 - - diff --git a/paddlespeech/cli/utils.py b/paddlespeech/cli/utils.py index 82d40c8bc..e7b499f72 100644 --- a/paddlespeech/cli/utils.py +++ b/paddlespeech/cli/utils.py @@ -24,11 +24,11 @@ from typing import Any from typing import Dict import paddle +import paddleaudio import requests import yaml from paddle.framework import load -import paddleaudio from . import download from .entry import commands try: diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py index 89752bb9f..ac55af123 100644 --- a/paddlespeech/s2t/io/sampler.py +++ b/paddlespeech/s2t/io/sampler.py @@ -51,7 +51,7 @@ def _batch_shuffle(indices, batch_size, epoch, clipped=False): """ rng = np.random.RandomState(epoch) shift_len = rng.randint(0, batch_size - 1) - batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size)) + batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size)) rng.shuffle(batch_indices) batch_indices = [item for batch in batch_indices for item in batch] assert clipped is False diff --git a/paddlespeech/server/engine/acs/__init__.py b/paddlespeech/server/engine/acs/__init__.py index e69de29bb..97043fd7b 100644 --- a/paddlespeech/server/engine/acs/__init__.py +++ b/paddlespeech/server/engine/acs/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/server/engine/acs/python/__init__.py b/paddlespeech/server/engine/acs/python/__init__.py index e69de29bb..97043fd7b 100644 --- a/paddlespeech/server/engine/acs/python/__init__.py +++ b/paddlespeech/server/engine/acs/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/asr_engine.py index 3b88cabb4..70bfcfb66 100644 --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/asr_engine.py @@ -153,8 +153,7 @@ class PaddleASRConnectionHanddler: spectrum = self.collate_fn_test._normalizer.apply(spectrum) # spectrum augment - feat = self.collate_fn_test.augmentation.transform_feature( - spectrum) + feat = self.collate_fn_test.augmentation.transform_feature(spectrum) # audio_len is frame num frame_num = feat.shape[0] @@ -189,14 +188,16 @@ class PaddleASRConnectionHanddler: assert samples.ndim == 1 self.num_samples += samples.shape[0] - logger.info(f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}") + logger.info( + f"This package receive {samples.shape[0]} pcm data. Global samples:{self.num_samples}" + ) # self.reamined_wav stores all the samples, # include the original remained_wav and this package samples if self.remained_wav is None: self.remained_wav = samples else: - assert self.remained_wav.ndim == 1 # (T,) + assert self.remained_wav.ndim == 1 # (T,) self.remained_wav = np.concatenate([self.remained_wav, samples]) logger.info( f"The concatenation of remain and now audio samples length is: {self.remained_wav.shape}" @@ -216,8 +217,8 @@ class PaddleASRConnectionHanddler: if self.cached_feat is None: self.cached_feat = x_chunk else: - assert (len(x_chunk.shape) == 3) # (B,T,D) - assert (len(self.cached_feat.shape) == 3) # (B,T,D) + assert (len(x_chunk.shape) == 3) # (B,T,D) + assert (len(self.cached_feat.shape) == 3) # (B,T,D) self.cached_feat = paddle.concat( [self.cached_feat, x_chunk], axis=1) @@ -234,18 +235,16 @@ class PaddleASRConnectionHanddler: # update remained wav self.remained_wav = self.remained_wav[self.n_shift * num_frames:] - logger.info( f"process the audio feature success, the cached feat shape: {self.cached_feat.shape}" ) logger.info( f"After extract feat, the cached remain the audio samples: {self.remained_wav.shape}" ) - logger.info(f"global samples: {self.num_samples}") + logger.info(f"global samples: {self.num_samples}") logger.info(f"global frames: {self.num_frames}") else: - raise ValueError(f"not supported: {self.model_type}") - + raise ValueError(f"not supported: {self.model_type}") def reset(self): if "deepspeech2" in self.model_type: @@ -263,12 +262,11 @@ class PaddleASRConnectionHanddler: # global sample and frame step self.num_samples = 0 self.num_frames = 0 - + # cache for audio and feat self.remained_wav = None self.cached_feat = None - # partial/ending decoding results self.result_transcripts = [''] @@ -280,17 +278,16 @@ class PaddleASRConnectionHanddler: self.conformer_cnn_cache = None self.encoder_out = None # conformer decoding state - self.chunk_num = 0 # globa decoding chunk num - self.offset = 0 # global offset in decoding frame unit + self.chunk_num = 0 # globa decoding chunk num + self.offset = 0 # global offset in decoding frame unit self.hyps = [] - + # token timestamp result self.word_time_stamp = [] # one best timestamp viterbi prob is large. self.time_stamp = [] - def decode(self, is_finished=False): """advance decoding @@ -307,7 +304,7 @@ class PaddleASRConnectionHanddler: decoding_chunk_size = 1 # decoding chunk size = 1. int decoding frame unit context = 7 # context=7, in audio frame unit subsampling = 4 # subsampling=4, in audio frame unit - + cached_feature_num = context - subsampling # decoding window for model, in audio frame unit decoding_window = (decoding_chunk_size - 1) * subsampling + context @@ -373,7 +370,6 @@ class PaddleASRConnectionHanddler: else: raise Exception("invalid model name") - @paddle.no_grad() def decode_one_chunk(self, x_chunk, x_chunk_lens): """forward one chunk frames @@ -425,10 +421,11 @@ class PaddleASRConnectionHanddler: logger.info(f"decode one best result for deepspeech2: {trans_best[0]}") return trans_best[0] - @paddle.no_grad() def advance_decoding(self, is_finished=False): - logger.info("Conformer/Transformer: start to decode with advanced_decoding method") + logger.info( + "Conformer/Transformer: start to decode with advanced_decoding method" + ) cfg = self.ctc_decode_config # cur chunk size, in decoding frame unit @@ -563,7 +560,6 @@ class PaddleASRConnectionHanddler: """ return self.word_time_stamp - @paddle.no_grad() def rescoring(self): """Second-Pass Decoding, @@ -572,9 +568,11 @@ class PaddleASRConnectionHanddler: if "deepspeech2" in self.model_type: logger.info("deepspeech2 not support rescoring decoding.") return - + if "attention_rescoring" != self.ctc_decode_config.decoding_method: - logger.info(f"decoding method not match: {self.ctc_decode_config.decoding_method}, need attention_rescoring") + logger.info( + f"decoding method not match: {self.ctc_decode_config.decoding_method}, need attention_rescoring" + ) return logger.info("rescoring the final result") @@ -605,7 +603,8 @@ class PaddleASRConnectionHanddler: hyp_content, place=self.device, dtype=paddle.long) hyp_list.append(hyp_content) - hyps_pad = pad_sequence(hyp_list, batch_first=True, padding_value=self.model.ignore_id) + hyps_pad = pad_sequence( + hyp_list, batch_first=True, padding_value=self.model.ignore_id) hyps_lens = paddle.to_tensor( [len(hyp[0]) for hyp in hyps], place=self.device, dtype=paddle.long) # (beam_size,) @@ -689,12 +688,11 @@ class PaddleASRConnectionHanddler: "ed": end }) # logger.info(f"{word_time_stamp[-1]}") - + self.word_time_stamp = word_time_stamp logger.info(f"word time stamp: {self.word_time_stamp}") - class ASRServerExecutor(ASRExecutor): def __init__(self): super().__init__() @@ -741,7 +739,7 @@ class ASRServerExecutor(ASRExecutor): self.am_model = os.path.abspath(am_model) self.am_params = os.path.abspath(am_params) self.res_path = os.path.dirname( - os.path.dirname(os.path.abspath(self.cfg_path))) + os.path.dirname(os.path.abspath(self.cfg_path))) logger.info(self.cfg_path) logger.info(self.am_model) @@ -855,7 +853,7 @@ class ASRServerExecutor(ASRExecutor): self.transformer_decode_reset() else: raise ValueError(f"Not support: {model_type}") - + return True diff --git a/paddlespeech/server/restful/api.py b/paddlespeech/server/restful/api.py index 1c2dd2814..9722c2614 100644 --- a/paddlespeech/server/restful/api.py +++ b/paddlespeech/server/restful/api.py @@ -17,12 +17,12 @@ from typing import List from fastapi import APIRouter from paddlespeech.cli.log import logger +from paddlespeech.server.restful.acs_api import router as acs_router from paddlespeech.server.restful.asr_api import router as asr_router from paddlespeech.server.restful.cls_api import router as cls_router from paddlespeech.server.restful.text_api import router as text_router from paddlespeech.server.restful.tts_api import router as tts_router from paddlespeech.server.restful.vector_api import router as vec_router -from paddlespeech.server.restful.acs_api import router as acs_router _router = APIRouter() diff --git a/paddlespeech/server/utils/audio_handler.py b/paddlespeech/server/utils/audio_handler.py index baa7b9343..e3d90d469 100644 --- a/paddlespeech/server/utils/audio_handler.py +++ b/paddlespeech/server/utils/audio_handler.py @@ -248,7 +248,7 @@ class ASRHttpHandler: } res = requests.post(url=self.url, data=json.dumps(data)) - + return res.json() diff --git a/paddlespeech/server/utils/buffer.py b/paddlespeech/server/utils/buffer.py index f56db752d..20cd3cf62 100644 --- a/paddlespeech/server/utils/buffer.py +++ b/paddlespeech/server/utils/buffer.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class Frame(object): """Represents a "frame" of audio data.""" @@ -45,7 +46,7 @@ class ChunkBuffer(object): self.shift_ms = shift_ms self.sample_rate = sample_rate self.sample_width = sample_width # int16 = 2; float32 = 4 - + self.window_sec = float((self.window_n - 1) * self.shift_ms + self.window_ms) / 1000.0 self.shift_sec = float(self.shift_n * self.shift_ms / 1000.0) @@ -77,8 +78,8 @@ class ChunkBuffer(object): offset = 0 while offset + self.window_bytes <= len(audio): - yield Frame(audio[offset:offset + self.window_bytes], self.timestamp, - self.window_sec) + yield Frame(audio[offset:offset + self.window_bytes], + self.timestamp, self.window_sec) self.timestamp += self.shift_sec offset += self.shift_bytes diff --git a/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py b/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py index 252ac9326..644ec250d 100644 --- a/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py +++ b/paddlespeech/t2s/exps/speedyspeech/synthesize_e2e.py @@ -176,7 +176,10 @@ def main(): parser.add_argument( "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu or xpu.") parser.add_argument( - "--nxpu", type=int, default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") + "--nxpu", + type=int, + default=0, + help="if nxpu == 0 and ngpu == 0, use cpu.") args, _ = parser.parse_known_args() diff --git a/paddlespeech/t2s/exps/speedyspeech/train.py b/paddlespeech/t2s/exps/speedyspeech/train.py index d4cfe3488..7b422e64f 100644 --- a/paddlespeech/t2s/exps/speedyspeech/train.py +++ b/paddlespeech/t2s/exps/speedyspeech/train.py @@ -188,7 +188,10 @@ def main(): parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") parser.add_argument( - "--nxpu", type=int, default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") + "--nxpu", + type=int, + default=0, + help="if nxpu == 0 and ngpu == 0, use cpu.") parser.add_argument( "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu or xpu") diff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py index 2073a78b9..1e946adf7 100644 --- a/paddlespeech/t2s/modules/transformer/repeat.py +++ b/paddlespeech/t2s/modules/transformer/repeat.py @@ -36,4 +36,4 @@ def repeat(N, fn): Returns: MultiSequential: Repeated model instance. """ - return MultiSequential(*[fn(n) for n in range(N)]) + return MultiSequential(* [fn(n) for n in range(N)]) diff --git a/setup.py b/setup.py index ad353d42b..657de6c5f 100644 --- a/setup.py +++ b/setup.py @@ -98,7 +98,6 @@ requirements = { } - def check_call(cmd: str, shell=False, executable=None): try: sp.check_call( @@ -112,12 +111,13 @@ def check_call(cmd: str, shell=False, executable=None): file=sys.stderr) raise e + def check_output(cmd: str, shell=False): try: out_bytes = sp.check_output(cmd.split()) except sp.CalledProcessError as e: - out_bytes = e.output # Output generated before error - code = e.returncode # Return code + out_bytes = e.output # Output generated before error + code = e.returncode # Return code print( f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:", out_bytes, @@ -146,6 +146,7 @@ def _remove(files: str): for f in files: f.unlink() + ################################# Install ################################## @@ -308,6 +309,5 @@ setup_info = dict( ] }) - with version_info(): setup(**setup_info) diff --git a/tests/unit/cli/aishell_test_prepare.py b/tests/unit/cli/aishell_test_prepare.py index 5088d7a48..ed542c571 100644 --- a/tests/unit/cli/aishell_test_prepare.py +++ b/tests/unit/cli/aishell_test_prepare.py @@ -20,7 +20,6 @@ of each audio file in the data set. """ import argparse import codecs -import json import os from pathlib import Path @@ -89,7 +88,7 @@ def create_manifest(data_dir, manifest_path_prefix): duration = float(len(audio_data) / samplerate) text = transcript_dict[audio_id] json_lines.append(audio_path) - reference_lines.append(str(total_num+1) + "\t" + text) + reference_lines.append(str(total_num + 1) + "\t" + text) total_sec += duration total_text += len(text) @@ -106,6 +105,7 @@ def create_manifest(data_dir, manifest_path_prefix): manifest_dir = os.path.dirname(manifest_path_prefix) + def prepare_dataset(url, md5sum, target_dir, manifest_path=None): """Download, unpack and create manifest file.""" data_dir = os.path.join(target_dir, 'data_aishell') From daadec0c63975ee7bdb0893411325d57c4534935 Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Tue, 24 May 2022 16:23:52 +0800 Subject: [PATCH 026/127] add custom asr script --- demos/custom_streaming_asr/README.md | 4 +- demos/custom_streaming_asr/README_cn.md | 2 + speechx/CMakeLists.txt | 12 +- speechx/examples/custom_asr/README.md | 32 ++ .../local/compile_lexicon_token_fst.sh | 89 +++++ .../custom_asr/local/mk_slot_graph.sh | 74 ++++ .../custom_asr/local/mk_tlg_with_slot.sh | 61 +++ .../custom_asr/local/train_lm_with_slot.sh | 55 +++ speechx/examples/custom_asr/path.sh | 17 + speechx/examples/custom_asr/run.sh | 88 ++++ speechx/examples/custom_asr/utils | 1 + speechx/speechx/kaldi/CMakeLists.txt | 4 + speechx/speechx/kaldi/fstbin/CMakeLists.txt | 15 + .../kaldi}/fstbin/fstaddselfloops.cc | 0 .../kaldi}/fstbin/fstdeterminizestar.cc | 0 .../kaldi}/fstbin/fstisstochastic.cc | 0 .../kaldi}/fstbin/fstminimizeencoded.cc | 0 .../kaldi}/fstbin/fsttablecompose.cc | 0 speechx/speechx/kaldi/fstext/CMakeLists.txt | 2 +- speechx/speechx/kaldi/lm/CMakeLists.txt | 6 + speechx/speechx/kaldi/lm/arpa-file-parser.cc | 281 +++++++++++++ speechx/speechx/kaldi/lm/arpa-file-parser.h | 146 +++++++ speechx/speechx/kaldi/lm/arpa-lm-compiler.cc | 377 ++++++++++++++++++ speechx/speechx/kaldi/lm/arpa-lm-compiler.h | 65 +++ .../kaldi}/lmbin/CMakeLists.txt | 3 +- .../kaldi}/lmbin/arpa2fst.cc | 0 26 files changed, 1328 insertions(+), 6 deletions(-) create mode 100644 speechx/examples/custom_asr/README.md create mode 100755 speechx/examples/custom_asr/local/compile_lexicon_token_fst.sh create mode 100755 speechx/examples/custom_asr/local/mk_slot_graph.sh create mode 100755 speechx/examples/custom_asr/local/mk_tlg_with_slot.sh create mode 100755 speechx/examples/custom_asr/local/train_lm_with_slot.sh create mode 100644 speechx/examples/custom_asr/path.sh create mode 100644 speechx/examples/custom_asr/run.sh create mode 120000 speechx/examples/custom_asr/utils create mode 100644 speechx/speechx/kaldi/fstbin/CMakeLists.txt rename speechx/{tools => speechx/kaldi}/fstbin/fstaddselfloops.cc (100%) rename speechx/{tools => speechx/kaldi}/fstbin/fstdeterminizestar.cc (100%) rename speechx/{tools => speechx/kaldi}/fstbin/fstisstochastic.cc (100%) rename speechx/{tools => speechx/kaldi}/fstbin/fstminimizeencoded.cc (100%) rename speechx/{tools => speechx/kaldi}/fstbin/fsttablecompose.cc (100%) create mode 100644 speechx/speechx/kaldi/lm/CMakeLists.txt create mode 100644 speechx/speechx/kaldi/lm/arpa-file-parser.cc create mode 100644 speechx/speechx/kaldi/lm/arpa-file-parser.h create mode 100644 speechx/speechx/kaldi/lm/arpa-lm-compiler.cc create mode 100644 speechx/speechx/kaldi/lm/arpa-lm-compiler.h rename speechx/{tools => speechx/kaldi}/lmbin/CMakeLists.txt (64%) rename speechx/{tools => speechx/kaldi}/lmbin/arpa2fst.cc (100%) diff --git a/demos/custom_streaming_asr/README.md b/demos/custom_streaming_asr/README.md index aa28d502f..74af59a77 100644 --- a/demos/custom_streaming_asr/README.md +++ b/demos/custom_streaming_asr/README.md @@ -7,6 +7,8 @@ In some cases, we need to recognize the specific rare words with high accuracy. this demo is customized for expense account, which need to recognize rare address. +the scripts are in PaddleSpeech/speechx/examples/custom_asr. + * G with slot: 打车到 "address_slot"。 ![](https://ai-studio-static-online.cdn.bcebos.com/28d9ef132a7f47a895a65ae9e5c4f55b8f472c9f3dd24be8a2e66e0b88b173a4) @@ -62,4 +64,4 @@ I0513 10:58:13.884493 41768 feature_cache.h:52] set finished I0513 10:58:24.247171 41768 paddle_nnet.h:76] Tensor neml: 10240 I0513 10:58:24.247249 41768 paddle_nnet.h:76] Tensor neml: 10240 LOG ([5.5.544~2-f21d7]:main():decoder/recognizer_test_main.cc:90) the result of case_10 is 五月十二日二十二点三十六分加班打车回家四十一元 -``` \ No newline at end of file +``` diff --git a/demos/custom_streaming_asr/README_cn.md b/demos/custom_streaming_asr/README_cn.md index ffbf682fb..5c0f7e89c 100644 --- a/demos/custom_streaming_asr/README_cn.md +++ b/demos/custom_streaming_asr/README_cn.md @@ -6,6 +6,8 @@ 这个 demo 是打车报销单的场景识别,需要识别一些稀有的地名,可以通过如下操作实现。 +相关脚本:PaddleSpeech/speechx/examples/custom_asr + * G with slot: 打车到 "address_slot"。 ![](https://ai-studio-static-online.cdn.bcebos.com/28d9ef132a7f47a895a65ae9e5c4f55b8f472c9f3dd24be8a2e66e0b88b173a4) diff --git a/speechx/CMakeLists.txt b/speechx/CMakeLists.txt index 98d9e6374..db5c3cc6f 100644 --- a/speechx/CMakeLists.txt +++ b/speechx/CMakeLists.txt @@ -57,7 +57,7 @@ include(gtest) include(absl) # libsndfile -include(libsndfile) +#include(libsndfile) # boost # include(boost) # not work @@ -73,9 +73,17 @@ find_package(Eigen3 REQUIRED) # Kenlm include(kenlm) add_dependencies(kenlm eigen boost) +#set(kenlm_install_dir $(fc_patch)/kenlm-build) +#link_directories(${Kenlm_install_dir}/lib) +#include_directories(${fc_patch}/kenlm-src) #openblas -include(openblas) +#include(openblas) +set(OpenBLAS_INSTALL_PREFIX ${fc_patch}/openblas-install) +link_directories(${OpenBLAS_INSTALL_PREFIX}/lib) +include_directories(${OpenBLAS_INSTALL_PREFIX}/include) + + # openfst include(openfst) diff --git a/speechx/examples/custom_asr/README.md b/speechx/examples/custom_asr/README.md new file mode 100644 index 000000000..bfc071cb9 --- /dev/null +++ b/speechx/examples/custom_asr/README.md @@ -0,0 +1,32 @@ +# customized Auto Speech Recognition + +## introduction +those scripts are tutorials to show you how make your own decoding graph. + +eg: +* G with slot: 打车到 "address_slot"。 +![](https://ai-studio-static-online.cdn.bcebos.com/28d9ef132a7f47a895a65ae9e5c4f55b8f472c9f3dd24be8a2e66e0b88b173a4) + +* this is address slot wfst, you can add the address which want to recognize. +![](https://ai-studio-static-online.cdn.bcebos.com/47c89100ef8c465bac733605ffc53d76abefba33d62f4d818d351f8cea3c8fe2) + +* after replace operation, G = fstreplace(G_with_slot, address_slot), we will get the customized graph. +![](https://ai-studio-static-online.cdn.bcebos.com/60a3095293044f10b73039ab10c7950d139a6717580a44a3ba878c6e74de402b) + +those operations are in the scripts, please check out. we will lanuch more detail scripts. + +## How to run + +``` +bash run.sh +``` + +## Results + +### CTC WFST + +``` +Overall -> 1.23 % N=1134 C=1126 S=6 D=2 I=6 +Mandarin -> 1.24 % N=1132 C=1124 S=6 D=2 I=6 +English -> 0.00 % N=2 C=2 S=0 D=0 I=0 +``` \ No newline at end of file diff --git a/speechx/examples/custom_asr/local/compile_lexicon_token_fst.sh b/speechx/examples/custom_asr/local/compile_lexicon_token_fst.sh new file mode 100755 index 000000000..8411f7ed6 --- /dev/null +++ b/speechx/examples/custom_asr/local/compile_lexicon_token_fst.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# Copyright 2015 Yajie Miao (Carnegie Mellon University) + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +# This script compiles the lexicon and CTC tokens into FSTs. FST compiling slightly differs between the +# phoneme and character-based lexicons. +set -eo pipefail +. utils/parse_options.sh + +if [ $# -ne 3 ]; then + echo "usage: utils/fst/compile_lexicon_token_fst.sh " + echo "e.g.: utils/fst/compile_lexicon_token_fst.sh data/local/dict data/local/lang_tmp data/lang" + echo " should contain the following files:" + echo "lexicon.txt lexicon_numbers.txt units.txt" + echo "options: " + exit 1; +fi + +srcdir=$1 +tmpdir=$2 +dir=$3 +mkdir -p $dir $tmpdir + +[ -f path.sh ] && . ./path.sh + +cp $srcdir/units.txt $dir + +# Add probabilities to lexicon entries. There is in fact no point of doing this here since all the entries have 1.0. +# But utils/make_lexicon_fst.pl requires a probabilistic version, so we just leave it as it is. +perl -ape 's/(\S+\s+)(.+)/${1}1.0\t$2/;' < $srcdir/lexicon.txt > $tmpdir/lexiconp.txt || exit 1; + +# Add disambiguation symbols to the lexicon. This is necessary for determinizing the composition of L.fst and G.fst. +# Without these symbols, determinization will fail. +# default first disambiguation is #1 +ndisambig=`utils/fst/add_lex_disambig.pl $tmpdir/lexiconp.txt $tmpdir/lexiconp_disambig.txt` +# add #0 (#0 reserved for symbol in grammar). +ndisambig=$[$ndisambig+1]; + +( for n in `seq 0 $ndisambig`; do echo '#'$n; done ) > $tmpdir/disambig.list + +# Get the full list of CTC tokens used in FST. These tokens include , the blank , +# the actual model unit, and the disambiguation symbols. +cat $srcdir/units.txt | awk '{print $1}' > $tmpdir/units.list +(echo '';) | cat - $tmpdir/units.list $tmpdir/disambig.list | awk '{print $1 " " (NR-1)}' > $dir/tokens.txt + +# ctc_token_fst_corrected is too big and too slow for character based chinese modeling, +# so here just use simple ctc_token_fst +utils/fst/ctc_token_fst.py --token_file $dir/tokens.txt | \ + fstcompile --isymbols=$dir/tokens.txt --osymbols=$dir/tokens.txt --keep_isymbols=false --keep_osymbols=false | \ + fstarcsort --sort_type=olabel > $dir/T.fst || exit 1; + +# Encode the words with indices. Will be used in lexicon and language model FST compiling. +cat $tmpdir/lexiconp.txt | awk '{print $1}' | sort | awk ' + BEGIN { + print " 0"; + } + { + printf("%s %d\n", $1, NR); + } + END { + printf("#0 %d\n", NR+1); + printf(" %d\n", NR+2); + printf(" %d\n", NR+3); + printf("ROOT %d\n", NR+4); + }' > $dir/words.txt || exit 1; + +# Now compile the lexicon FST. Depending on the size of your lexicon, it may take some time. +token_disambig_symbol=`grep \#0 $dir/tokens.txt | awk '{print $2}'` +word_disambig_symbol=`grep \#0 $dir/words.txt | awk '{print $2}'` + +utils/fst/make_lexicon_fst.pl --pron-probs $tmpdir/lexiconp_disambig.txt 0 "sil" '#'$ndisambig | \ + fstcompile --isymbols=$dir/tokens.txt --osymbols=$dir/words.txt \ + --keep_isymbols=false --keep_osymbols=false | \ + fstaddselfloops "echo $token_disambig_symbol |" "echo $word_disambig_symbol |" | \ + fstarcsort --sort_type=olabel > $dir/L.fst || exit 1; + +echo "Lexicon and Token FSTs compiling succeeded" diff --git a/speechx/examples/custom_asr/local/mk_slot_graph.sh b/speechx/examples/custom_asr/local/mk_slot_graph.sh new file mode 100755 index 000000000..8298a5d09 --- /dev/null +++ b/speechx/examples/custom_asr/local/mk_slot_graph.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +graph_slot=$1 +dir=$2 + +[ -f path.sh ] && . ./path.sh + +sym=$dir/../lang/words.txt +cat > $dir/address_slot.txt < +0 5 上海 上海 +0 5 北京 北京 +0 5 合肥 合肥 +5 1 南站 南站 +0 6 立水 立水 +6 1 桥 桥 +0 7 青岛 青岛 +7 1 站 站 +1 +EOF + +fstcompile --isymbols=$sym --osymbols=$sym $dir/address_slot.txt $dir/address_slot.fst +fstcompile --isymbols=$sym --osymbols=$sym $graph_slot/time_slot.txt $dir/time_slot.fst +fstcompile --isymbols=$sym --osymbols=$sym $graph_slot/date_slot.txt $dir/date_slot.fst +fstcompile --isymbols=$sym --osymbols=$sym $graph_slot/money_slot.txt $dir/money_slot.fst +fstcompile --isymbols=$sym --osymbols=$sym $graph_slot/year_slot.txt $dir/year_slot.fst diff --git a/speechx/examples/custom_asr/local/mk_tlg_with_slot.sh b/speechx/examples/custom_asr/local/mk_tlg_with_slot.sh new file mode 100755 index 000000000..a5569f400 --- /dev/null +++ b/speechx/examples/custom_asr/local/mk_tlg_with_slot.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License + +lm=$1 +lang=$2 +tgt_lang=$3 + +unset GREP_OPTIONS + +sym=$lang/words.txt +arpa_lm=$lm/lm.arpa +# Compose the language model to FST +cat $arpa_lm | \ + grep -v ' ' | \ + grep -v ' ' | \ + grep -v ' ' | \ + grep -v -i '' | \ + grep -v -i '' | \ + arpa2fst --read-symbol-table=$sym --keep-symbols=true - | fstprint | \ + utils/fst/eps2disambig.pl | utils/fst/s2eps.pl | fstcompile --isymbols=$sym \ + --osymbols=$sym --keep_isymbols=false --keep_osymbols=false | \ + fstrmepsilon | fstarcsort --sort_type=ilabel > $tgt_lang/G_with_slot.fst + +root_label=`grep ROOT $sym | awk '{print $2}'` +address_slot_label=`grep \ $sym | awk '{print $2}'` +time_slot_label=`grep \ $sym | awk '{print $2}'` +date_slot_label=`grep \ $sym | awk '{print $2}'` +money_slot_label=`grep \ $sym | awk '{print $2}'` +year_slot_label=`grep \ $sym | awk '{print $2}'` + +fstisstochastic $tgt_lang/G_with_slot.fst + +fstreplace --epsilon_on_replace $tgt_lang/G_with_slot.fst \ + $root_label $tgt_lang/address_slot.fst $address_slot_label \ + $tgt_lang/date_slot.fst $date_slot_label \ + $tgt_lang/money_slot.fst $money_slot_label \ + $tgt_lang/time_slot.fst $time_slot_label \ + $tgt_lang/year_slot.fst $year_slot_label $tgt_lang/G.fst + +fstisstochastic $tgt_lang/G.fst + +# Compose the token, lexicon and language-model FST into the final decoding graph +fsttablecompose $lang/L.fst $tgt_lang/G.fst | fstdeterminizestar --use-log=true | \ + fstminimizeencoded | fstarcsort --sort_type=ilabel > $tgt_lang/LG.fst || exit 1; +fsttablecompose $lang/T.fst $tgt_lang/LG.fst > $tgt_lang/TLG.fst || exit 1; +rm $tgt_lang/LG.fst + +echo "Composing decoding graph TLG.fst succeeded" \ No newline at end of file diff --git a/speechx/examples/custom_asr/local/train_lm_with_slot.sh b/speechx/examples/custom_asr/local/train_lm_with_slot.sh new file mode 100755 index 000000000..3f557ec39 --- /dev/null +++ b/speechx/examples/custom_asr/local/train_lm_with_slot.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# To be run from one directory above this script. +. ./path.sh +src=ds2_graph_with_slot +text=$src/train_text +lexicon=$src/local/dict/lexicon.txt + +dir=$src/local/lm +mkdir -p $dir + +for f in "$text" "$lexicon"; do + [ ! -f $x ] && echo "$0: No such file $f" && exit 1; +done + +# Check SRILM tools +if ! which ngram-count > /dev/null; then + pushd $MAIN_ROOT/tools + make srilm.done + popd +fi + +# This script takes no arguments. It assumes you have already run +# It takes as input the files +# data/local/lm/text +# data/local/dict/lexicon.txt + + +cleantext=$dir/text.no_oov + +cat $text | awk -v lex=$lexicon 'BEGIN{while((getline0){ seen[$1]=1; } } + {for(n=1; n<=NF;n++) { if (seen[$n]) { printf("%s ", $n); } else {printf(" ");} } printf("\n");}' \ + > $cleantext || exit 1; + +cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | sort | uniq -c | \ + sort -nr > $dir/word.counts || exit 1; +# Get counts from acoustic training transcripts, and add one-count +# for each word in the lexicon (but not silence, we don't want it +# in the LM-- we'll add it optionally later). +cat $cleantext | awk '{for(n=2;n<=NF;n++) print $n; }' | \ + cat - <(grep -w -v '!SIL' $lexicon | awk '{print $1}') | \ + sort | uniq -c | sort -nr > $dir/unigram.counts || exit 1; + +# filter the words which are not in the text +cat $dir/unigram.counts | awk '$1>1{print $0}' | awk '{print $2}' | cat - <(echo ""; echo "" ) > $dir/wordlist + +# kaldi_lm results +mkdir -p $dir +cat $cleantext | awk '{for(n=2;n<=NF;n++){ printf $n; if(n $dir/train + +ngram-count -text $dir/train -order 3 -limit-vocab -vocab $dir/wordlist -unk \ + -map-unk "" -gt3max 0 -gt2max 0 -gt1max 0 -lm $dir/lm.arpa + +#ngram-count -text $dir/train -order 3 -limit-vocab -vocab $dir/wordlist -unk \ +# -map-unk "" -lm $dir/lm2.arpa \ No newline at end of file diff --git a/speechx/examples/custom_asr/path.sh b/speechx/examples/custom_asr/path.sh new file mode 100644 index 000000000..1907c79f9 --- /dev/null +++ b/speechx/examples/custom_asr/path.sh @@ -0,0 +1,17 @@ +# This contains the locations of binarys build required for running the examples. + +MAIN_ROOT=`realpath $PWD/../../../` +SPEECHX_ROOT=`realpath $MAIN_ROOT/speechx` +SPEECHX_EXAMPLES=$SPEECHX_ROOT/build/examples + +export LC_AL=C + +# srilm +export LIBLBFGS=${MAIN_ROOT}/tools/liblbfgs-1.10 +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:${LIBLBFGS}/lib/.libs +export SRILM=${MAIN_ROOT}/tools/srilm + +# kaldi lm +KALDI_DIR=$SPEECHX_ROOT/build/speechx/kaldi/ +OPENFST_DIR=$SPEECHX_ROOT/fc_patch/openfst-build/src +export PATH=${PATH}:${SRILM}/bin:${SRILM}/bin/i686-m64:$KALDI_DIR/lmbin:$KALDI_DIR/fstbin:$OPENFST_DIR/bin:$SPEECHX_EXAMPLES/ds2_ol/decoder diff --git a/speechx/examples/custom_asr/run.sh b/speechx/examples/custom_asr/run.sh new file mode 100644 index 000000000..8d88000dc --- /dev/null +++ b/speechx/examples/custom_asr/run.sh @@ -0,0 +1,88 @@ +#!/bin/bash +set +x +set -e + +export GLOG_logtostderr=1 + +. ./path.sh || exit 1; + +# ds2 means deepspeech2 (acoutic model type) +dir=$PWD/ds2_graph_with_slot +data=$PWD/data +stage=0 +stop_stage=10 + +mkdir -p $dir + +model_dir=$PWD/resource/model +vocab=$model_dir/vocab.txt +cmvn=$data/cmvn.ark +text_with_slot=$data/text_with_slot +resource=$PWD/resource +# download resource +if [ ! -f $cmvn ]; then + wget -c https://paddlespeech.bj.bcebos.com/s2t/paddle_asr_online/resource.tar.gz + tar xzfv resource.tar.gz + ln -s ./resource/data . +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + # make dict + unit_file=$vocab + mkdir -p $dir/local/dict + cp $unit_file $dir/local/dict/units.txt + cp $text_with_slot $dir/train_text + utils/fst/prepare_dict.py --unit_file $unit_file --in_lexicon $data/lexicon.txt \ + --out_lexicon $dir/local/dict/lexicon.txt + # add slot to lexicon, just in case the lm training script filter the slot. + echo " 一" >> $dir/local/dict/lexicon.txt + echo " 一" >> $dir/local/dict/lexicon.txt + echo " 一" >> $dir/local/dict/lexicon.txt + echo " 一" >> $dir/local/dict/lexicon.txt + echo " 一" >> $dir/local/dict/lexicon.txt +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + # train lm + lm=$dir/local/lm + mkdir -p $lm + # this script is different with the common lm training script + local/train_lm_with_slot.sh +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + # make T & L + local/compile_lexicon_token_fst.sh $dir/local/dict $dir/local/tmp $dir/local/lang + mkdir -p $dir/local/lang_test + # make slot graph + local/mk_slot_graph.sh $resource/graph $dir/local/lang_test + # make TLG + local/mk_tlg_with_slot.sh $dir/local/lm $dir/local/lang $dir/local/lang_test || exit 1; + mv $dir/local/lang_test/TLG.fst $dir/local/lang/ +fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + # test TLG + model_dir=$PWD/resource/model + cmvn=$data/cmvn.ark + wav_scp=$data/wav.scp + graph=$dir/local/lang + + recognizer_test_main \ + --wav_rspecifier=scp:$wav_scp \ + --cmvn_file=$cmvn \ + --streaming_chunk=30 \ + --use_fbank=true \ + --model_path=$model_dir/avg_10.jit.pdmodel \ + --param_path=$model_dir/avg_10.jit.pdiparams \ + --model_cache_shapes="5-1-2048,5-1-2048" \ + --model_output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 \ + --word_symbol_table=$graph/words.txt \ + --graph_path=$graph/TLG.fst --max_active=7500 \ + --acoustic_scale=12 \ + --result_wspecifier=ark,t:./result_run.txt + + # the data/wav.trans is the label. + utils/compute-wer.py --char=1 --v=1 data/wav.trans result_run.txt > wer_run + tail -n 7 wer_run +fi diff --git a/speechx/examples/custom_asr/utils b/speechx/examples/custom_asr/utils new file mode 120000 index 000000000..973afe674 --- /dev/null +++ b/speechx/examples/custom_asr/utils @@ -0,0 +1 @@ +../../../utils \ No newline at end of file diff --git a/speechx/speechx/kaldi/CMakeLists.txt b/speechx/speechx/kaldi/CMakeLists.txt index 6f7398cd1..ce6b43f63 100644 --- a/speechx/speechx/kaldi/CMakeLists.txt +++ b/speechx/speechx/kaldi/CMakeLists.txt @@ -7,3 +7,7 @@ add_subdirectory(matrix) add_subdirectory(lat) add_subdirectory(fstext) add_subdirectory(decoder) +add_subdirectory(lm) + +add_subdirectory(fstbin) +add_subdirectory(lmbin) \ No newline at end of file diff --git a/speechx/speechx/kaldi/fstbin/CMakeLists.txt b/speechx/speechx/kaldi/fstbin/CMakeLists.txt new file mode 100644 index 000000000..05d0501f3 --- /dev/null +++ b/speechx/speechx/kaldi/fstbin/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 3.14 FATAL_ERROR) + +set(BINS +fstaddselfloops +fstisstochastic +fstminimizeencoded +fstdeterminizestar +fsttablecompose +) + +foreach(binary IN LISTS BINS) + add_executable(${binary} ${CMAKE_CURRENT_SOURCE_DIR}/${binary}.cc) + target_include_directories(${binary} PRIVATE ${SPEECHX_ROOT} ${SPEECHX_ROOT}/kaldi) + target_link_libraries(${binary} PUBLIC kaldi-fstext glog gflags fst dl) +endforeach() diff --git a/speechx/tools/fstbin/fstaddselfloops.cc b/speechx/speechx/kaldi/fstbin/fstaddselfloops.cc similarity index 100% rename from speechx/tools/fstbin/fstaddselfloops.cc rename to speechx/speechx/kaldi/fstbin/fstaddselfloops.cc diff --git a/speechx/tools/fstbin/fstdeterminizestar.cc b/speechx/speechx/kaldi/fstbin/fstdeterminizestar.cc similarity index 100% rename from speechx/tools/fstbin/fstdeterminizestar.cc rename to speechx/speechx/kaldi/fstbin/fstdeterminizestar.cc diff --git a/speechx/tools/fstbin/fstisstochastic.cc b/speechx/speechx/kaldi/fstbin/fstisstochastic.cc similarity index 100% rename from speechx/tools/fstbin/fstisstochastic.cc rename to speechx/speechx/kaldi/fstbin/fstisstochastic.cc diff --git a/speechx/tools/fstbin/fstminimizeencoded.cc b/speechx/speechx/kaldi/fstbin/fstminimizeencoded.cc similarity index 100% rename from speechx/tools/fstbin/fstminimizeencoded.cc rename to speechx/speechx/kaldi/fstbin/fstminimizeencoded.cc diff --git a/speechx/tools/fstbin/fsttablecompose.cc b/speechx/speechx/kaldi/fstbin/fsttablecompose.cc similarity index 100% rename from speechx/tools/fstbin/fsttablecompose.cc rename to speechx/speechx/kaldi/fstbin/fsttablecompose.cc diff --git a/speechx/speechx/kaldi/fstext/CMakeLists.txt b/speechx/speechx/kaldi/fstext/CMakeLists.txt index af91fd985..465d9dba7 100644 --- a/speechx/speechx/kaldi/fstext/CMakeLists.txt +++ b/speechx/speechx/kaldi/fstext/CMakeLists.txt @@ -1,5 +1,5 @@ add_library(kaldi-fstext -kaldi-fst-io.cc + kaldi-fst-io.cc ) target_link_libraries(kaldi-fstext PUBLIC kaldi-util) diff --git a/speechx/speechx/kaldi/lm/CMakeLists.txt b/speechx/speechx/kaldi/lm/CMakeLists.txt new file mode 100644 index 000000000..75c1567e7 --- /dev/null +++ b/speechx/speechx/kaldi/lm/CMakeLists.txt @@ -0,0 +1,6 @@ + +add_library(kaldi-lm + arpa-file-parser.cc + arpa-lm-compiler.cc +) +target_link_libraries(kaldi-lm PUBLIC kaldi-util) \ No newline at end of file diff --git a/speechx/speechx/kaldi/lm/arpa-file-parser.cc b/speechx/speechx/kaldi/lm/arpa-file-parser.cc new file mode 100644 index 000000000..81b63ed13 --- /dev/null +++ b/speechx/speechx/kaldi/lm/arpa-file-parser.cc @@ -0,0 +1,281 @@ +// lm/arpa-file-parser.cc + +// Copyright 2014 Guoguo Chen +// Copyright 2016 Smart Action Company LLC (kkm) + +// See ../../COPYING for clarification regarding multiple authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +// MERCHANTABLITY OR NON-INFRINGEMENT. +// See the Apache 2 License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "base/kaldi-error.h" +#include "base/kaldi-math.h" +#include "lm/arpa-file-parser.h" +#include "util/text-utils.h" + +namespace kaldi { + +ArpaFileParser::ArpaFileParser(const ArpaParseOptions& options, + fst::SymbolTable* symbols) + : options_(options), symbols_(symbols), + line_number_(0), warning_count_(0) { +} + +ArpaFileParser::~ArpaFileParser() { +} + +void TrimTrailingWhitespace(std::string *str) { + str->erase(str->find_last_not_of(" \n\r\t") + 1); +} + +void ArpaFileParser::Read(std::istream &is) { + // Argument sanity checks. + if (options_.bos_symbol <= 0 || options_.eos_symbol <= 0 || + options_.bos_symbol == options_.eos_symbol) + KALDI_ERR << "BOS and EOS symbols are required, must not be epsilons, and " + << "differ from each other. Given:" + << " BOS=" << options_.bos_symbol + << " EOS=" << options_.eos_symbol; + if (symbols_ != NULL && + options_.oov_handling == ArpaParseOptions::kReplaceWithUnk && + (options_.unk_symbol <= 0 || + options_.unk_symbol == options_.bos_symbol || + options_.unk_symbol == options_.eos_symbol)) + KALDI_ERR << "When symbol table is given and OOV mode is kReplaceWithUnk, " + << "UNK symbol is required, must not be epsilon, and " + << "differ from both BOS and EOS symbols. Given:" + << " UNK=" << options_.unk_symbol + << " BOS=" << options_.bos_symbol + << " EOS=" << options_.eos_symbol; + if (symbols_ != NULL && symbols_->Find(options_.bos_symbol).empty()) + KALDI_ERR << "BOS symbol must exist in symbol table"; + if (symbols_ != NULL && symbols_->Find(options_.eos_symbol).empty()) + KALDI_ERR << "EOS symbol must exist in symbol table"; + if (symbols_ != NULL && options_.unk_symbol > 0 && + symbols_->Find(options_.unk_symbol).empty()) + KALDI_ERR << "UNK symbol must exist in symbol table"; + + ngram_counts_.clear(); + line_number_ = 0; + warning_count_ = 0; + current_line_.clear(); + +#define PARSE_ERR KALDI_ERR << LineReference() << ": " + + // Give derived class an opportunity to prepare its state. + ReadStarted(); + + // Processes "\data\" section. + bool keyword_found = false; + while (++line_number_, getline(is, current_line_) && !is.eof()) { + if (current_line_.find_first_not_of(" \t\n\r") == std::string::npos) { + continue; + } + + TrimTrailingWhitespace(¤t_line_); + + // Continue skipping lines until the \data\ marker alone on a line is found. + if (!keyword_found) { + if (current_line_ == "\\data\\") { + KALDI_LOG << "Reading \\data\\ section."; + keyword_found = true; + } + continue; + } + + if (current_line_[0] == '\\') break; + + // Enters "\data\" section, and looks for patterns like "ngram 1=1000", + // which means there are 1000 unigrams. + std::size_t equal_symbol_pos = current_line_.find("="); + if (equal_symbol_pos != std::string::npos) + // Guaranteed spaces around the "=". + current_line_.replace(equal_symbol_pos, 1, " = "); + std::vector col; + SplitStringToVector(current_line_, " \t", true, &col); + if (col.size() == 4 && col[0] == "ngram" && col[2] == "=") { + int32 order, ngram_count = 0; + if (!ConvertStringToInteger(col[1], &order) || + !ConvertStringToInteger(col[3], &ngram_count)) { + PARSE_ERR << "cannot parse ngram count"; + } + if (ngram_counts_.size() <= order) { + ngram_counts_.resize(order); + } + ngram_counts_[order - 1] = ngram_count; + } else { + KALDI_WARN << LineReference() + << ": uninterpretable line in \\data\\ section"; + } + } + + if (ngram_counts_.size() == 0) + PARSE_ERR << "\\data\\ section missing or empty."; + + // Signal that grammar order and n-gram counts are known. + HeaderAvailable(); + + NGram ngram; + ngram.words.reserve(ngram_counts_.size()); + + // Processes "\N-grams:" section. + for (int32 cur_order = 1; cur_order <= ngram_counts_.size(); ++cur_order) { + // Skips n-grams with zero count. + if (ngram_counts_[cur_order - 1] == 0) + KALDI_WARN << "Zero ngram count in ngram order " << cur_order + << "(look for 'ngram " << cur_order << "=0' in the \\data\\ " + << " section). There is possibly a problem with the file."; + + // Must be looking at a \k-grams: directive at this point. + std::ostringstream keyword; + keyword << "\\" << cur_order << "-grams:"; + if (current_line_ != keyword.str()) { + PARSE_ERR << "invalid directive, expecting '" << keyword.str() << "'"; + } + KALDI_LOG << "Reading " << current_line_ << " section."; + + int32 ngram_count = 0; + while (++line_number_, getline(is, current_line_) && !is.eof()) { + if (current_line_.find_first_not_of(" \n\t\r") == std::string::npos) { + continue; + } + if (current_line_[0] == '\\') { + TrimTrailingWhitespace(¤t_line_); + std::ostringstream next_keyword; + next_keyword << "\\" << cur_order + 1 << "-grams:"; + if ((current_line_ != next_keyword.str()) && + (current_line_ != "\\end\\")) { + if (ShouldWarn()) { + KALDI_WARN << "ignoring possible directive '" << current_line_ + << "' expecting '" << next_keyword.str() << "'"; + + if (warning_count_ > 0 && + warning_count_ > static_cast(options_.max_warnings)) { + KALDI_WARN << "Of " << warning_count_ << " parse warnings, " + << options_.max_warnings << " were reported. " + << "Run program with --max-arpa-warnings=-1 " + << "to see all warnings"; + } + } + } else { + break; + } + } + + std::vector col; + SplitStringToVector(current_line_, " \t", true, &col); + + if (col.size() < 1 + cur_order || + col.size() > 2 + cur_order || + (cur_order == ngram_counts_.size() && col.size() != 1 + cur_order)) { + PARSE_ERR << "Invalid n-gram data line"; + } + ++ngram_count; + + // Parse out n-gram logprob and, if present, backoff weight. + if (!ConvertStringToReal(col[0], &ngram.logprob)) { + PARSE_ERR << "invalid n-gram logprob '" << col[0] << "'"; + } + ngram.backoff = 0.0; + if (col.size() > cur_order + 1) { + if (!ConvertStringToReal(col[cur_order + 1], &ngram.backoff)) + PARSE_ERR << "invalid backoff weight '" << col[cur_order + 1] << "'"; + } + // Convert to natural log. + ngram.logprob *= M_LN10; + ngram.backoff *= M_LN10; + + ngram.words.resize(cur_order); + bool skip_ngram = false; + for (int32 index = 0; !skip_ngram && index < cur_order; ++index) { + int32 word; + if (symbols_) { + // Symbol table provided, so symbol labels are expected. + if (options_.oov_handling == ArpaParseOptions::kAddToSymbols) { + word = symbols_->AddSymbol(col[1 + index]); + } else { + word = symbols_->Find(col[1 + index]); + if (word == -1) { // fst::kNoSymbol + switch (options_.oov_handling) { + case ArpaParseOptions::kReplaceWithUnk: + word = options_.unk_symbol; + break; + case ArpaParseOptions::kSkipNGram: + if (ShouldWarn()) + KALDI_WARN << LineReference() << " skipped: word '" + << col[1 + index] << "' not in symbol table"; + skip_ngram = true; + break; + default: + PARSE_ERR << "word '" << col[1 + index] + << "' not in symbol table"; + } + } + } + } else { + // Symbols not provided, LM file should contain integers. + if (!ConvertStringToInteger(col[1 + index], &word) || word < 0) { + PARSE_ERR << "invalid symbol '" << col[1 + index] << "'"; + } + } + // Whichever way we got it, an epsilon is invalid. + if (word == 0) { + PARSE_ERR << "epsilon symbol '" << col[1 + index] + << "' is illegal in ARPA LM"; + } + ngram.words[index] = word; + } + if (!skip_ngram) { + ConsumeNGram(ngram); + } + } + if (ngram_count > ngram_counts_[cur_order - 1]) { + PARSE_ERR << "header said there would be " << ngram_counts_[cur_order - 1] + << " n-grams of order " << cur_order + << ", but we saw more already."; + } + } + + if (current_line_ != "\\end\\") { + PARSE_ERR << "invalid or unexpected directive line, expecting \\end\\"; + } + + if (warning_count_ > 0 && + warning_count_ > static_cast(options_.max_warnings)) { + KALDI_WARN << "Of " << warning_count_ << " parse warnings, " + << options_.max_warnings << " were reported. Run program with " + << "--max-arpa-warnings=-1 to see all warnings"; + } + + current_line_.clear(); + ReadComplete(); + +#undef PARSE_ERR +} + +std::string ArpaFileParser::LineReference() const { + std::ostringstream ss; + ss << "line " << line_number_ << " [" << current_line_ << "]"; + return ss.str(); +} + +bool ArpaFileParser::ShouldWarn() { + return (warning_count_ != -1) && + (++warning_count_ <= static_cast(options_.max_warnings)); +} + +} // namespace kaldi diff --git a/speechx/speechx/kaldi/lm/arpa-file-parser.h b/speechx/speechx/kaldi/lm/arpa-file-parser.h new file mode 100644 index 000000000..99ffba029 --- /dev/null +++ b/speechx/speechx/kaldi/lm/arpa-file-parser.h @@ -0,0 +1,146 @@ +// lm/arpa-file-parser.h + +// Copyright 2014 Guoguo Chen +// Copyright 2016 Smart Action Company LLC (kkm) + +// See ../../COPYING for clarification regarding multiple authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +// MERCHANTABLITY OR NON-INFRINGEMENT. +// See the Apache 2 License for the specific language governing permissions and +// limitations under the License. + +#ifndef KALDI_LM_ARPA_FILE_PARSER_H_ +#define KALDI_LM_ARPA_FILE_PARSER_H_ + +#include + +#include +#include + +#include "base/kaldi-types.h" +#include "util/options-itf.h" + +namespace kaldi { + +/** + Options that control ArpaFileParser +*/ +struct ArpaParseOptions { + enum OovHandling { + kRaiseError, ///< Abort on OOV words + kAddToSymbols, ///< Add novel words to the symbol table. + kReplaceWithUnk, ///< Replace OOV words with . + kSkipNGram ///< Skip n-gram with OOV word and continue. + }; + + ArpaParseOptions(): + bos_symbol(-1), eos_symbol(-1), unk_symbol(-1), + oov_handling(kRaiseError), max_warnings(30) { } + + void Register(OptionsItf *opts) { + // Registering only the max_warnings count, since other options are + // treated differently by client programs: some want integer symbols, + // while other are passed words in their command line. + opts->Register("max-arpa-warnings", &max_warnings, + "Maximum warnings to report on ARPA parsing, " + "0 to disable, -1 to show all"); + } + + int32 bos_symbol; ///< Symbol for , Required non-epsilon. + int32 eos_symbol; ///< Symbol for , Required non-epsilon. + int32 unk_symbol; ///< Symbol for , Required for kReplaceWithUnk. + OovHandling oov_handling; ///< How to handle OOV words in the file. + int32 max_warnings; ///< Maximum warnings to report, <0 unlimited. +}; + +/** + A parsed n-gram from ARPA LM file. +*/ +struct NGram { + NGram() : logprob(0.0), backoff(0.0) { } + std::vector words; ///< Symbols in left to right order. + float logprob; ///< Log-prob of the n-gram. + float backoff; ///< log-backoff weight of the n-gram. + ///< Defaults to zero if not specified. +}; + +/** + ArpaFileParser is an abstract base class for ARPA LM file conversion. + + See ConstArpaLmBuilder and ArpaLmCompiler for usage examples. +*/ +class ArpaFileParser { + public: + /// Constructs the parser with the given options and optional symbol table. + /// If symbol table is provided, then the file should contain text n-grams, + /// and the words are mapped to symbols through it. bos_symbol and + /// eos_symbol in the options structure must be valid symbols in the table, + /// and so must be unk_symbol if provided. The table is not owned by the + /// parser, but may be augmented, if oov_handling is set to kAddToSymbols. + /// If symbol table is a null pointer, the file should contain integer + /// symbol values, and oov_handling has no effect. bos_symbol and eos_symbol + /// must be valid symbols still. + ArpaFileParser(const ArpaParseOptions& options, fst::SymbolTable* symbols); + virtual ~ArpaFileParser(); + + /// Read ARPA LM file from a stream. + void Read(std::istream &is); + + /// Parser options. + const ArpaParseOptions& Options() const { return options_; } + + protected: + /// Override called before reading starts. This is the point to prepare + /// any state in the derived class. + virtual void ReadStarted() { } + + /// Override function called to signal that ARPA header with the expected + /// number of n-grams has been read, and ngram_counts() is now valid. + virtual void HeaderAvailable() { } + + /// Pure override that must be implemented to process current n-gram. The + /// n-grams are sent in the file order, which guarantees that all + /// (k-1)-grams are processed before the first k-gram is. + virtual void ConsumeNGram(const NGram&) = 0; + + /// Override function called after the last n-gram has been consumed. + virtual void ReadComplete() { } + + /// Read-only access to symbol table. Not owned, do not make public. + const fst::SymbolTable* Symbols() const { return symbols_; } + + /// Inside ConsumeNGram(), provides the current line number. + int32 LineNumber() const { return line_number_; } + + /// Inside ConsumeNGram(), returns a formatted reference to the line being + /// compiled, to print out as part of diagnostics. + std::string LineReference() const; + + /// Increments warning count, and returns true if a warning should be + /// printed or false if the count has exceeded the set maximum. + bool ShouldWarn(); + + /// N-gram counts. Valid from the point when HeaderAvailable() is called. + const std::vector& NgramCounts() const { return ngram_counts_; } + + private: + ArpaParseOptions options_; + fst::SymbolTable* symbols_; // the pointer is not owned here. + int32 line_number_; + uint32 warning_count_; + std::string current_line_; + std::vector ngram_counts_; +}; + +} // namespace kaldi + +#endif // KALDI_LM_ARPA_FILE_PARSER_H_ diff --git a/speechx/speechx/kaldi/lm/arpa-lm-compiler.cc b/speechx/speechx/kaldi/lm/arpa-lm-compiler.cc new file mode 100644 index 000000000..47bd20d47 --- /dev/null +++ b/speechx/speechx/kaldi/lm/arpa-lm-compiler.cc @@ -0,0 +1,377 @@ +// lm/arpa-lm-compiler.cc + +// Copyright 2009-2011 Gilles Boulianne +// Copyright 2016 Smart Action LLC (kkm) +// Copyright 2017 Xiaohui Zhang + +// See ../../COPYING for clarification regarding multiple authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +// MERCHANTABLITY OR NON-INFRINGEMENT. +// See the Apache 2 License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "base/kaldi-math.h" +#include "lm/arpa-lm-compiler.h" +#include "util/stl-utils.h" +#include "util/text-utils.h" +#include "fstext/remove-eps-local.h" + +namespace kaldi { + +class ArpaLmCompilerImplInterface { + public: + virtual ~ArpaLmCompilerImplInterface() { } + virtual void ConsumeNGram(const NGram& ngram, bool is_highest) = 0; +}; + +namespace { + +typedef int32 StateId; +typedef int32 Symbol; + +// GeneralHistKey can represent state history in an arbitrarily large n +// n-gram model with symbol ids fitting int32. +class GeneralHistKey { + public: + // Construct key from being and end iterators. + template + GeneralHistKey(InputIt begin, InputIt end) : vector_(begin, end) { } + // Construct empty history key. + GeneralHistKey() : vector_() { } + // Return tails of the key as a GeneralHistKey. The tails of an n-gram + // w[1..n] is the sequence w[2..n] (and the heads is w[1..n-1], but the + // key class does not need this operartion). + GeneralHistKey Tails() const { + return GeneralHistKey(vector_.begin() + 1, vector_.end()); + } + // Keys are equal if represent same state. + friend bool operator==(const GeneralHistKey& a, const GeneralHistKey& b) { + return a.vector_ == b.vector_; + } + // Public typename HashType for hashing. + struct HashType : public std::unary_function { + size_t operator()(const GeneralHistKey& key) const { + return VectorHasher().operator()(key.vector_); + } + }; + + private: + std::vector vector_; +}; + +// OptimizedHistKey combines 3 21-bit symbol ID values into one 64-bit +// machine word. allowing significant memory reduction and some runtime +// benefit over GeneralHistKey. Since 3 symbols are enough to track history +// in a 4-gram model, this optimized key is used for smaller models with up +// to 4-gram and symbol values up to 2^21-1. +// +// See GeneralHistKey for interface requirements of a key class. +class OptimizedHistKey { + public: + enum { + kShift = 21, // 21 * 3 = 63 bits for data. + kMaxData = (1 << kShift) - 1 + }; + template + OptimizedHistKey(InputIt begin, InputIt end) : data_(0) { + for (uint32 shift = 0; begin != end; ++begin, shift += kShift) { + data_ |= static_cast(*begin) << shift; + } + } + OptimizedHistKey() : data_(0) { } + OptimizedHistKey Tails() const { + return OptimizedHistKey(data_ >> kShift); + } + friend bool operator==(const OptimizedHistKey& a, const OptimizedHistKey& b) { + return a.data_ == b.data_; + } + struct HashType : public std::unary_function { + size_t operator()(const OptimizedHistKey& key) const { return key.data_; } + }; + + private: + explicit OptimizedHistKey(uint64 data) : data_(data) { } + uint64 data_; +}; + +} // namespace + +template +class ArpaLmCompilerImpl : public ArpaLmCompilerImplInterface { + public: + ArpaLmCompilerImpl(ArpaLmCompiler* parent, fst::StdVectorFst* fst, + Symbol sub_eps); + + virtual void ConsumeNGram(const NGram &ngram, bool is_highest); + + private: + StateId AddStateWithBackoff(HistKey key, float backoff); + void CreateBackoff(HistKey key, StateId state, float weight); + + ArpaLmCompiler *parent_; // Not owned. + fst::StdVectorFst* fst_; // Not owned. + Symbol bos_symbol_; + Symbol eos_symbol_; + Symbol sub_eps_; + + StateId eos_state_; + typedef unordered_map HistoryMap; + HistoryMap history_; +}; + +template +ArpaLmCompilerImpl::ArpaLmCompilerImpl( + ArpaLmCompiler* parent, fst::StdVectorFst* fst, Symbol sub_eps) + : parent_(parent), fst_(fst), bos_symbol_(parent->Options().bos_symbol), + eos_symbol_(parent->Options().eos_symbol), sub_eps_(sub_eps) { + // The algorithm maintains state per history. The 0-gram is a special state + // for empty history. All unigrams (including BOS) backoff into this state. + StateId zerogram = fst_->AddState(); + history_[HistKey()] = zerogram; + + // Also, if
is not treated as epsilon, create a common end state for + // all transitions accepting the
, since they do not back off. This small + // optimization saves about 2% states in an average grammar. + if (sub_eps_ == 0) { + eos_state_ = fst_->AddState(); + fst_->SetFinal(eos_state_, 0); + } +} + +template +void ArpaLmCompilerImpl::ConsumeNGram(const NGram &ngram, + bool is_highest) { + // Generally, we do the following. Suppose we are adding an n-gram "A B + // C". Then find the node for "A B", add a new node for "A B C", and connect + // them with the arc accepting "C" with the specified weight. Also, add a + // backoff arc from the new "A B C" node to its backoff state "B C". + // + // Two notable exceptions are the highest order n-grams, and final n-grams. + // + // When adding a highest order n-gram (e. g., our "A B C" is in a 3-gram LM), + // the following optimization is performed. There is no point adding a node + // for "A B C" with a "C" arc from "A B", since there will be no other + // arcs ingoing to this node, and an epsilon backoff arc into the backoff + // model "B C", with the weight of \bar{1}. To save a node, create an arc + // accepting "C" directly from "A B" to "B C". This saves as many nodes + // as there are the highest order n-grams, which is typically about half + // the size of a large 3-gram model. + // + // Indeed, this does not apply to n-grams ending in EOS, since they do not + // back off. These are special, as they do not have a back-off state, and + // the node for "(..anything..)
" is always final. These are handled + // in one of the two possible ways, If symbols and are being + // replaced by epsilons, neither node nor arc is created, and the logprob + // of the n-gram is applied to its source node as final weight. If and + // are preserved, then a special final node for
is allocated and + // used as the destination of the "" acceptor arc. + HistKey heads(ngram.words.begin(), ngram.words.end() - 1); + typename HistoryMap::iterator source_it = history_.find(heads); + if (source_it == history_.end()) { + // There was no "A B", therefore the probability of "A B C" is zero. + // Print a warning and discard current n-gram. + if (parent_->ShouldWarn()) + KALDI_WARN << parent_->LineReference() + << " skipped: no parent (n-1)-gram exists"; + return; + } + + StateId source = source_it->second; + StateId dest; + Symbol sym = ngram.words.back(); + float weight = -ngram.logprob; + if (sym == sub_eps_ || sym == 0) { + KALDI_ERR << " or disambiguation symbol " << sym << "found in the ARPA file. "; + } + if (sym == eos_symbol_) { + if (sub_eps_ == 0) { + // Keep as a real symbol when not substituting. + dest = eos_state_; + } else { + // Treat as if it was epsilon: mark source final, with the weight + // of the n-gram. + fst_->SetFinal(source, weight); + return; + } + } else { + // For the highest order n-gram, this may find an existing state, for + // non-highest, will create one (unless there are duplicate n-grams + // in the grammar, which cannot be reliably detected if highest order, + // so we better do not do that at all). + dest = AddStateWithBackoff( + HistKey(ngram.words.begin() + (is_highest ? 1 : 0), + ngram.words.end()), + -ngram.backoff); + } + + if (sym == bos_symbol_) { + weight = 0; // Accepting is always free. + if (sub_eps_ == 0) { + // is as a real symbol, only accepted in the start state. + source = fst_->AddState(); + fst_->SetStart(source); + } else { + // The new state for unigram history *is* the start state. + fst_->SetStart(dest); + return; + } + } + + // Add arc from source to dest, whichever way it was found. + fst_->AddArc(source, fst::StdArc(sym, sym, weight, dest)); + return; +} + +// Find or create a new state for n-gram defined by key, and ensure it has a +// backoff transition. The key is either the current n-gram for all but +// highest orders, or the tails of the n-gram for the highest order. The +// latter arises from the chain-collapsing optimization described above. +template +StateId ArpaLmCompilerImpl::AddStateWithBackoff(HistKey key, + float backoff) { + typename HistoryMap::iterator dest_it = history_.find(key); + if (dest_it != history_.end()) { + // Found an existing state in the history map. Invariant: if the state in + // the map, then its backoff arc is in the FST. We are done. + return dest_it->second; + } + // Otherwise create a new state and its backoff arc, and register in the map. + StateId dest = fst_->AddState(); + history_[key] = dest; + CreateBackoff(key.Tails(), dest, backoff); + return dest; +} + +// Create a backoff arc for a state. Key is a backoff destination that may or +// may not exist. When the destination is not found, naturally fall back to +// the lower order model, and all the way down until one is found (since the +// 0-gram model is always present, the search is guaranteed to terminate). +template +inline void ArpaLmCompilerImpl::CreateBackoff( + HistKey key, StateId state, float weight) { + typename HistoryMap::iterator dest_it = history_.find(key); + while (dest_it == history_.end()) { + key = key.Tails(); + dest_it = history_.find(key); + } + + // The arc should transduce either or #0 to , depending on the + // epsilon substitution mode. This is the only case when input and output + // label may differ. + fst_->AddArc(state, fst::StdArc(sub_eps_, 0, weight, dest_it->second)); +} + +ArpaLmCompiler::~ArpaLmCompiler() { + if (impl_ != NULL) + delete impl_; +} + +void ArpaLmCompiler::HeaderAvailable() { + KALDI_ASSERT(impl_ == NULL); + // Use optimized implementation if the grammar is 4-gram or less, and the + // maximum attained symbol id will fit into the optimized range. + int64 max_symbol = 0; + if (Symbols() != NULL) + max_symbol = Symbols()->AvailableKey() - 1; + // If augmenting the symbol table, assume the worst case when all words in + // the model being read are novel. + if (Options().oov_handling == ArpaParseOptions::kAddToSymbols) + max_symbol += NgramCounts()[0]; + + if (NgramCounts().size() <= 4 && max_symbol < OptimizedHistKey::kMaxData) { + impl_ = new ArpaLmCompilerImpl(this, &fst_, sub_eps_); + } else { + impl_ = new ArpaLmCompilerImpl(this, &fst_, sub_eps_); + KALDI_LOG << "Reverting to slower state tracking because model is large: " + << NgramCounts().size() << "-gram with symbols up to " + << max_symbol; + } +} + +void ArpaLmCompiler::ConsumeNGram(const NGram &ngram) { + // is invalid in tails, in heads of an n-gram. + for (int i = 0; i < ngram.words.size(); ++i) { + if ((i > 0 && ngram.words[i] == Options().bos_symbol) || + (i + 1 < ngram.words.size() + && ngram.words[i] == Options().eos_symbol)) { + if (ShouldWarn()) + KALDI_WARN << LineReference() + << " skipped: n-gram has invalid BOS/EOS placement"; + return; + } + } + + bool is_highest = ngram.words.size() == NgramCounts().size(); + impl_->ConsumeNGram(ngram, is_highest); +} + +void ArpaLmCompiler::RemoveRedundantStates() { + fst::StdArc::Label backoff_symbol = sub_eps_; + if (backoff_symbol == 0) { + // The method of removing redundant states implemented in this function + // leads to slow determinization of L o G when people use the older style of + // usage of arpa2fst where the --disambig-symbol option was not specified. + // The issue seems to be that it creates a non-deterministic FST, while G is + // supposed to be deterministic. By 'return'ing below, we just disable this + // method if people were using an older script. This method isn't really + // that consequential anyway, and people will move to the newer-style + // scripts (see current utils/format_lm.sh), so this isn't much of a + // problem. + return; + } + + fst::StdArc::StateId num_states = fst_.NumStates(); + + + // replace the #0 symbols on the input of arcs out of redundant states (states + // that are not final and have only a backoff arc leaving them), with . + for (fst::StdArc::StateId state = 0; state < num_states; state++) { + if (fst_.NumArcs(state) == 1 && fst_.Final(state) == fst::TropicalWeight::Zero()) { + fst::MutableArcIterator iter(&fst_, state); + fst::StdArc arc = iter.Value(); + if (arc.ilabel == backoff_symbol) { + arc.ilabel = 0; + iter.SetValue(arc); + } + } + } + + // we could call fst::RemoveEps, and it would have the same effect in normal + // cases, where backoff_symbol != 0 and there are no epsilons in unexpected + // places, but RemoveEpsLocal is a bit safer in case something weird is going + // on; it guarantees not to blow up the FST. + fst::RemoveEpsLocal(&fst_); + KALDI_LOG << "Reduced num-states from " << num_states << " to " + << fst_.NumStates(); +} + +void ArpaLmCompiler::Check() const { + if (fst_.Start() == fst::kNoStateId) { + KALDI_ERR << "Arpa file did not contain the beginning-of-sentence symbol " + << Symbols()->Find(Options().bos_symbol) << "."; + } +} + +void ArpaLmCompiler::ReadComplete() { + fst_.SetInputSymbols(Symbols()); + fst_.SetOutputSymbols(Symbols()); + RemoveRedundantStates(); + Check(); +} + +} // namespace kaldi diff --git a/speechx/speechx/kaldi/lm/arpa-lm-compiler.h b/speechx/speechx/kaldi/lm/arpa-lm-compiler.h new file mode 100644 index 000000000..67a18273f --- /dev/null +++ b/speechx/speechx/kaldi/lm/arpa-lm-compiler.h @@ -0,0 +1,65 @@ +// lm/arpa-lm-compiler.h + +// Copyright 2009-2011 Gilles Boulianne +// Copyright 2016 Smart Action LLC (kkm) + +// See ../../COPYING for clarification regarding multiple authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +// MERCHANTABLITY OR NON-INFRINGEMENT. +// See the Apache 2 License for the specific language governing permissions and +// limitations under the License. + +#ifndef KALDI_LM_ARPA_LM_COMPILER_H_ +#define KALDI_LM_ARPA_LM_COMPILER_H_ + +#include + +#include "lm/arpa-file-parser.h" + +namespace kaldi { + +class ArpaLmCompilerImplInterface; + +class ArpaLmCompiler : public ArpaFileParser { + public: + ArpaLmCompiler(const ArpaParseOptions& options, int sub_eps, + fst::SymbolTable* symbols) + : ArpaFileParser(options, symbols), + sub_eps_(sub_eps), impl_(NULL) { + } + ~ArpaLmCompiler(); + + const fst::StdVectorFst& Fst() const { return fst_; } + fst::StdVectorFst* MutableFst() { return &fst_; } + + protected: + // ArpaFileParser overrides. + virtual void HeaderAvailable(); + virtual void ConsumeNGram(const NGram& ngram); + virtual void ReadComplete(); + + + private: + // this function removes states that only have a backoff arc coming + // out of them. + void RemoveRedundantStates(); + void Check() const; + + int sub_eps_; + ArpaLmCompilerImplInterface* impl_; // Owned. + fst::StdVectorFst fst_; + template friend class ArpaLmCompilerImpl; +}; + +} // namespace kaldi + +#endif // KALDI_LM_ARPA_LM_COMPILER_H_ diff --git a/speechx/tools/lmbin/CMakeLists.txt b/speechx/speechx/kaldi/lmbin/CMakeLists.txt similarity index 64% rename from speechx/tools/lmbin/CMakeLists.txt rename to speechx/speechx/kaldi/lmbin/CMakeLists.txt index 277e20776..2b0932f7d 100644 --- a/speechx/tools/lmbin/CMakeLists.txt +++ b/speechx/speechx/kaldi/lmbin/CMakeLists.txt @@ -1,5 +1,4 @@ -cmake_minimum_required(VERSION 3.14 FATAL_ERROR) add_executable(arpa2fst ${CMAKE_CURRENT_SOURCE_DIR}/arpa2fst.cc) target_include_directories(arpa2fst PRIVATE ${SPEECHX_ROOT} ${SPEECHX_ROOT}/kaldi) -target_link_libraries(arpa2fst ) +target_link_libraries(arpa2fst PUBLIC kaldi-lm glog gflags fst) diff --git a/speechx/tools/lmbin/arpa2fst.cc b/speechx/speechx/kaldi/lmbin/arpa2fst.cc similarity index 100% rename from speechx/tools/lmbin/arpa2fst.cc rename to speechx/speechx/kaldi/lmbin/arpa2fst.cc From 80068354d454c3682afd2ce824a1f2e368ed6002 Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Tue, 24 May 2022 16:29:35 +0800 Subject: [PATCH 027/127] fix cmakelist --- speechx/CMakeLists.txt | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/speechx/CMakeLists.txt b/speechx/CMakeLists.txt index db5c3cc6f..98d9e6374 100644 --- a/speechx/CMakeLists.txt +++ b/speechx/CMakeLists.txt @@ -57,7 +57,7 @@ include(gtest) include(absl) # libsndfile -#include(libsndfile) +include(libsndfile) # boost # include(boost) # not work @@ -73,17 +73,9 @@ find_package(Eigen3 REQUIRED) # Kenlm include(kenlm) add_dependencies(kenlm eigen boost) -#set(kenlm_install_dir $(fc_patch)/kenlm-build) -#link_directories(${Kenlm_install_dir}/lib) -#include_directories(${fc_patch}/kenlm-src) #openblas -#include(openblas) -set(OpenBLAS_INSTALL_PREFIX ${fc_patch}/openblas-install) -link_directories(${OpenBLAS_INSTALL_PREFIX}/lib) -include_directories(${OpenBLAS_INSTALL_PREFIX}/include) - - +include(openblas) # openfst include(openfst) From fef3e5f15fe57f9537bafd57dd7185056b77dd6d Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Tue, 24 May 2022 17:03:01 +0800 Subject: [PATCH 028/127] fix --- demos/custom_streaming_asr/README.md | 3 ++- demos/custom_streaming_asr/README_cn.md | 2 +- speechx/examples/custom_asr/README.md | 6 +++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/demos/custom_streaming_asr/README.md b/demos/custom_streaming_asr/README.md index 74af59a77..da86e90ab 100644 --- a/demos/custom_streaming_asr/README.md +++ b/demos/custom_streaming_asr/README.md @@ -3,11 +3,12 @@ # Customized Auto Speech Recognition ## introduction + In some cases, we need to recognize the specific rare words with high accuracy. eg: address recognition in navigation apps. customized ASR can slove those issues. this demo is customized for expense account, which need to recognize rare address. -the scripts are in PaddleSpeech/speechx/examples/custom_asr. +the scripts are in https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/speechx/examples/custom_asr * G with slot: 打车到 "address_slot"。 ![](https://ai-studio-static-online.cdn.bcebos.com/28d9ef132a7f47a895a65ae9e5c4f55b8f472c9f3dd24be8a2e66e0b88b173a4) diff --git a/demos/custom_streaming_asr/README_cn.md b/demos/custom_streaming_asr/README_cn.md index 5c0f7e89c..f9981a6ae 100644 --- a/demos/custom_streaming_asr/README_cn.md +++ b/demos/custom_streaming_asr/README_cn.md @@ -6,7 +6,7 @@ 这个 demo 是打车报销单的场景识别,需要识别一些稀有的地名,可以通过如下操作实现。 -相关脚本:PaddleSpeech/speechx/examples/custom_asr +相关脚本:https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/speechx/examples/custom_asr * G with slot: 打车到 "address_slot"。 ![](https://ai-studio-static-online.cdn.bcebos.com/28d9ef132a7f47a895a65ae9e5c4f55b8f472c9f3dd24be8a2e66e0b88b173a4) diff --git a/speechx/examples/custom_asr/README.md b/speechx/examples/custom_asr/README.md index bfc071cb9..5ffa21b50 100644 --- a/speechx/examples/custom_asr/README.md +++ b/speechx/examples/custom_asr/README.md @@ -1,7 +1,7 @@ # customized Auto Speech Recognition ## introduction -those scripts are tutorials to show you how make your own decoding graph. +These scripts are tutorials to show you how build your own decoding graph. eg: * G with slot: 打车到 "address_slot"。 @@ -13,7 +13,7 @@ eg: * after replace operation, G = fstreplace(G_with_slot, address_slot), we will get the customized graph. ![](https://ai-studio-static-online.cdn.bcebos.com/60a3095293044f10b73039ab10c7950d139a6717580a44a3ba878c6e74de402b) -those operations are in the scripts, please check out. we will lanuch more detail scripts. +These operations are in the scripts, please check out. we will lanuch more detail scripts. ## How to run @@ -29,4 +29,4 @@ bash run.sh Overall -> 1.23 % N=1134 C=1126 S=6 D=2 I=6 Mandarin -> 1.24 % N=1132 C=1124 S=6 D=2 I=6 English -> 0.00 % N=2 C=2 S=0 D=0 I=0 -``` \ No newline at end of file +``` From 1cdd41bd03488b38c6082c766bf819b6bc94f61c Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Tue, 24 May 2022 09:46:12 +0000 Subject: [PATCH 029/127] fix pad_sequence, test=asr --- paddlespeech/s2t/utils/tensor_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddlespeech/s2t/utils/tensor_utils.py b/paddlespeech/s2t/utils/tensor_utils.py index 0dbaa0b6b..e105253c2 100644 --- a/paddlespeech/s2t/utils/tensor_utils.py +++ b/paddlespeech/s2t/utils/tensor_utils.py @@ -82,7 +82,7 @@ def pad_sequence(sequences: List[paddle.Tensor], max_size = sequences[0].size() # (TODO Hui Zhang): slice not supprot `end==start` # trailing_dims = max_size[1:] - trailing_dims = max_size[1:] if max_size.ndim >= 2 else () + trailing_dims = tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () max_len = max([s.shape[0] for s in sequences]) if batch_first: out_dims = (len(sequences), max_len) + trailing_dims @@ -99,7 +99,7 @@ def pad_sequence(sequences: List[paddle.Tensor], if batch_first: # TODO (Hui Zhang): set_value op not supprot `end==start` # TODO (Hui Zhang): set_value op not support int16 - # TODO (Hui Zhang): set_varbase 2 rank not support [0,0,...] + # TODO (Hui Zhang): set_varbase 2 rank not support [0,0,...] # out_tensor[i, :length, ...] = tensor if length != 0: out_tensor[i, :length] = tensor @@ -145,7 +145,7 @@ def add_sos_eos(ys_pad: paddle.Tensor, sos: int, eos: int, [ 4, 5, 6, 11, -1, -1], [ 7, 8, 9, 11, -1, -1]]) """ - # TODO(Hui Zhang): using comment code, + # TODO(Hui Zhang): using comment code, #_sos = paddle.to_tensor( # [sos], dtype=paddle.long, stop_gradient=True, place=ys_pad.place) #_eos = paddle.to_tensor( From 7a88e3f4e45a6b884b13b4f4c8492f4ebe54a667 Mon Sep 17 00:00:00 2001 From: TianYuan Date: Tue, 24 May 2022 09:41:08 +0000 Subject: [PATCH 030/127] update readme, test=doc --- examples/aishell3/tts3/README.md | 11 ++--------- examples/aishell3/vc0/README.md | 11 ++--------- examples/aishell3/vc1/README.md | 11 ++--------- examples/aishell3/voc1/README.md | 11 ++--------- examples/aishell3/voc5/README.md | 10 +--------- examples/csmsc/tts0/README.md | 2 +- examples/csmsc/tts2/README.md | 2 +- examples/csmsc/tts3/README.md | 2 +- examples/csmsc/tts3/README_cn.md | 2 +- examples/csmsc/voc1/README.md | 2 +- examples/csmsc/voc3/README.md | 2 +- examples/csmsc/voc4/README.md | 2 +- examples/csmsc/voc5/README.md | 2 +- examples/csmsc/voc6/README.md | 2 +- examples/ljspeech/tts0/README.md | 2 +- examples/ljspeech/tts1/README.md | 11 ++++------- examples/ljspeech/tts3/README.md | 2 +- examples/ljspeech/voc0/README.md | 8 ++------ examples/ljspeech/voc1/README.md | 2 +- examples/ljspeech/voc5/README.md | 2 +- examples/vctk/tts3/README.md | 2 +- examples/vctk/voc1/README.md | 2 +- examples/vctk/voc5/README.md | 2 +- 23 files changed, 31 insertions(+), 74 deletions(-) diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md index d02ad1b63..9c505679c 100644 --- a/examples/aishell3/tts3/README.md +++ b/examples/aishell3/tts3/README.md @@ -6,15 +6,8 @@ AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpu We use AISHELL-3 to train a multi-speaker fastspeech2 model here. ## Dataset ### Download and Extract -Download AISHELL-3. -```bash -wget https://www.openslr.org/resources/93/data_aishell3.tgz -``` -Extract AISHELL-3. -```bash -mkdir data_aishell3 -tar zxvf data_aishell3.tgz -C data_aishell3 -``` +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + ### Get MFA Result and Extract We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. diff --git a/examples/aishell3/vc0/README.md b/examples/aishell3/vc0/README.md index 925663ab1..d64f961ad 100644 --- a/examples/aishell3/vc0/README.md +++ b/examples/aishell3/vc0/README.md @@ -6,15 +6,8 @@ This example contains code used to train a [Tacotron2](https://arxiv.org/abs/171 ## Dataset ### Download and Extract -Download AISHELL-3. -```bash -wget https://www.openslr.org/resources/93/data_aishell3.tgz -``` -Extract AISHELL-3. -```bash -mkdir data_aishell3 -tar zxvf data_aishell3.tgz -C data_aishell3 -``` +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + ### Get MFA Result and Extract We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for Tacotron2, the durations of MFA are not needed here. You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. diff --git a/examples/aishell3/vc1/README.md b/examples/aishell3/vc1/README.md index 8ab0f9c8c..aab525103 100644 --- a/examples/aishell3/vc1/README.md +++ b/examples/aishell3/vc1/README.md @@ -6,15 +6,8 @@ This example contains code used to train a [FastSpeech2](https://arxiv.org/abs/2 ## Dataset ### Download and Extract -Download AISHELL-3. -```bash -wget https://www.openslr.org/resources/93/data_aishell3.tgz -``` -Extract AISHELL-3. -```bash -mkdir data_aishell3 -tar zxvf data_aishell3.tgz -C data_aishell3 -``` +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + ### Get MFA Result and Extract We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. diff --git a/examples/aishell3/voc1/README.md b/examples/aishell3/voc1/README.md index eb30e7c40..e9e012d29 100644 --- a/examples/aishell3/voc1/README.md +++ b/examples/aishell3/voc1/README.md @@ -4,15 +4,8 @@ This example contains code used to train a [parallel wavegan](http://arxiv.org/a AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus that could be used to train multi-speaker Text-to-Speech (TTS) systems. ## Dataset ### Download and Extract -Download AISHELL-3. -```bash -wget https://www.openslr.org/resources/93/data_aishell3.tgz -``` -Extract AISHELL-3. -```bash -mkdir data_aishell3 -tar zxvf data_aishell3.tgz -C data_aishell3 -``` +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. + ### Get MFA Result and Extract We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. diff --git a/examples/aishell3/voc5/README.md b/examples/aishell3/voc5/README.md index c957c4a3a..84bcd78ef 100644 --- a/examples/aishell3/voc5/README.md +++ b/examples/aishell3/voc5/README.md @@ -4,15 +4,7 @@ This example contains code used to train a [HiFiGAN](https://arxiv.org/abs/2010. AISHELL-3 is a large-scale and high-fidelity multi-speaker Mandarin speech corpus that could be used to train multi-speaker Text-to-Speech (TTS) systems. ## Dataset ### Download and Extract -Download AISHELL-3. -```bash -wget https://www.openslr.org/resources/93/data_aishell3.tgz -``` -Extract AISHELL-3. -```bash -mkdir data_aishell3 -tar zxvf data_aishell3.tgz -C data_aishell3 -``` +Download AISHELL-3 from it's [Official Website](http://www.aishelltech.com/aishell_3) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/data_aishell3`. ### Get MFA Result and Extract We use [MFA2.x](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for aishell3_fastspeech2. You can download from here [aishell3_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/AISHELL-3/with_tone/aishell3_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) (use MFA1.x now) of our repo. diff --git a/examples/csmsc/tts0/README.md b/examples/csmsc/tts0/README.md index 01376bd61..d62c90117 100644 --- a/examples/csmsc/tts0/README.md +++ b/examples/csmsc/tts0/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [Tacotron2](https://arxiv.org/abs/171 ## Dataset ### Download and Extract -Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/source). +Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for Tacotron2, the durations of MFA are not needed here. diff --git a/examples/csmsc/tts2/README.md b/examples/csmsc/tts2/README.md index 081d85848..1bcfb383f 100644 --- a/examples/csmsc/tts2/README.md +++ b/examples/csmsc/tts2/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [SpeedySpeech](http://arxiv.org/abs/2 ## Dataset ### Download and Extract -Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/source). +Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for SPEEDYSPEECH. diff --git a/examples/csmsc/tts3/README.md b/examples/csmsc/tts3/README.md index c734199b4..1f7dfa0fd 100644 --- a/examples/csmsc/tts3/README.md +++ b/examples/csmsc/tts3/README.md @@ -4,7 +4,7 @@ This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2 ## Dataset ### Download and Extract -Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/source). +Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for fastspeech2. diff --git a/examples/csmsc/tts3/README_cn.md b/examples/csmsc/tts3/README_cn.md index 25931ecb1..f08ca724c 100644 --- a/examples/csmsc/tts3/README_cn.md +++ b/examples/csmsc/tts3/README_cn.md @@ -5,7 +5,7 @@ ## 数据集 ### 下载并解压 -从 [官方网站](https://test.data-baker.com/data/index/source) 下载数据集 +从 [官方网站](https://test.data-baker.com/data/index/TNtts/) 下载数据集 ### 获取MFA结果并解压 我们使用 [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) 去获得 fastspeech2 的音素持续时间。 diff --git a/examples/csmsc/voc1/README.md b/examples/csmsc/voc1/README.md index 77da5b185..d5bec1cd7 100644 --- a/examples/csmsc/voc1/README.md +++ b/examples/csmsc/voc1/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [parallel wavegan](http://arxiv.org/abs/1910.11480) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). ## Dataset ### Download and Extract -Download CSMSC from the [official website](https://www.data-baker.com/data/index/source) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. +Download CSMSC from it's [official website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut silence at the edge of audio. diff --git a/examples/csmsc/voc3/README.md b/examples/csmsc/voc3/README.md index 12adaf7f4..e188bcb35 100644 --- a/examples/csmsc/voc3/README.md +++ b/examples/csmsc/voc3/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [Multi Band MelGAN](https://arxiv.org/abs/2005.05106) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). ## Dataset ### Download and Extract -Download CSMSC from the [official website](https://www.data-baker.com/data/index/source) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. +Download CSMSC from it's [official website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. diff --git a/examples/csmsc/voc4/README.md b/examples/csmsc/voc4/README.md index b7add3e57..19836134e 100644 --- a/examples/csmsc/voc4/README.md +++ b/examples/csmsc/voc4/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [Style MelGAN](https://arxiv.org/abs/2011.01557) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). ## Dataset ### Download and Extract -Download CSMSC from the [official website](https://www.data-baker.com/data/index/source) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. +Download CSMSC from it's [official website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. diff --git a/examples/csmsc/voc5/README.md b/examples/csmsc/voc5/README.md index 94f93b48b..4c38b5987 100644 --- a/examples/csmsc/voc5/README.md +++ b/examples/csmsc/voc5/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [HiFiGAN](https://arxiv.org/abs/2010.05646) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). ## Dataset ### Download and Extract -Download CSMSC from the [official website](https://www.data-baker.com/data/index/source) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. +Download CSMSC from it's [official website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut silence at the edge of audio. diff --git a/examples/csmsc/voc6/README.md b/examples/csmsc/voc6/README.md index 7dcf133bd..0e5ce6334 100644 --- a/examples/csmsc/voc6/README.md +++ b/examples/csmsc/voc6/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [WaveRNN](https://arxiv.org/abs/1802.08435) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). ## Dataset ### Download and Extract -Download CSMSC from the [official website](https://www.data-baker.com/data/index/source) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. +Download CSMSC from it's [official website](https://test.data-baker.com/data/index/TNtts/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/BZNSYP`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut silence at the edge of audio. diff --git a/examples/ljspeech/tts0/README.md b/examples/ljspeech/tts0/README.md index ba7ad6193..e3292957b 100644 --- a/examples/ljspeech/tts0/README.md +++ b/examples/ljspeech/tts0/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [Tacotron2](https://arxiv.org/abs/171 ## Dataset ### Download and Extract -Download LJSpeech-1.1 from the [official website](https://keithito.com/LJ-Speech-Dataset/). +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for Tacotron2, the durations of MFA are not needed here. diff --git a/examples/ljspeech/tts1/README.md b/examples/ljspeech/tts1/README.md index 7f32522ac..9f82185ca 100644 --- a/examples/ljspeech/tts1/README.md +++ b/examples/ljspeech/tts1/README.md @@ -1,13 +1,10 @@ # TransformerTTS with LJSpeech ## Dataset -We experiment with the LJSpeech dataset. Download and unzip [LJSpeech](https://keithito.com/LJ-Speech-Dataset/). - -```bash -wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 -tar xjvf LJSpeech-1.1.tar.bz2 -``` +### Download and Extract +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ## Get Started -Assume the path to the dataset is `~/datasets/LJSpeech-1.1`. +Assume the path to the dataset is `~/datasets/LJSpeech-1.1` and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. + Run the command below to 1. **source path**. 2. preprocess the dataset. diff --git a/examples/ljspeech/tts3/README.md b/examples/ljspeech/tts3/README.md index e028fa05d..8a666193f 100644 --- a/examples/ljspeech/tts3/README.md +++ b/examples/ljspeech/tts3/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2 ## Dataset ### Download and Extract -Download LJSpeech-1.1 from the [official website](https://keithito.com/LJ-Speech-Dataset/). +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for fastspeech2. diff --git a/examples/ljspeech/voc0/README.md b/examples/ljspeech/voc0/README.md index 41b08d57f..ae48a9a7f 100644 --- a/examples/ljspeech/voc0/README.md +++ b/examples/ljspeech/voc0/README.md @@ -1,11 +1,7 @@ # WaveFlow with LJSpeech ## Dataset -We experiment with the LJSpeech dataset. Download and unzip [LJSpeech](https://keithito.com/LJ-Speech-Dataset/). - -```bash -wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2 -tar xjvf LJSpeech-1.1.tar.bz2 -``` +### Download and Extract +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ## Get Started Assume the path to the dataset is `~/datasets/LJSpeech-1.1`. Assume the path to the Tacotron2 generated mels is `../tts0/output/test`. diff --git a/examples/ljspeech/voc1/README.md b/examples/ljspeech/voc1/README.md index 4513b2a05..491444261 100644 --- a/examples/ljspeech/voc1/README.md +++ b/examples/ljspeech/voc1/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [parallel wavegan](http://arxiv.org/abs/1910.11480) model with [LJSpeech-1.1](https://keithito.com/LJ-Speech-Dataset/). ## Dataset ### Download and Extract -Download LJSpeech-1.1 from the [official website](https://keithito.com/LJ-Speech-Dataset/). +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. You can download from here [ljspeech_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/ljspeech_alignment.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) of our repo. diff --git a/examples/ljspeech/voc5/README.md b/examples/ljspeech/voc5/README.md index 9b31e2650..830515042 100644 --- a/examples/ljspeech/voc5/README.md +++ b/examples/ljspeech/voc5/README.md @@ -2,7 +2,7 @@ This example contains code used to train a [HiFiGAN](https://arxiv.org/abs/2010.05646) model with [LJSpeech-1.1](https://keithito.com/LJ-Speech-Dataset/). ## Dataset ### Download and Extract -Download LJSpeech-1.1 from the [official website](https://keithito.com/LJ-Speech-Dataset/). +Download LJSpeech-1.1 from it's [Official Website](https://keithito.com/LJ-Speech-Dataset/) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/LJSpeech-1.1`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. You can download from here [ljspeech_alignment.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/LJSpeech-1.1/ljspeech_alignment.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) of our repo. diff --git a/examples/vctk/tts3/README.md b/examples/vctk/tts3/README.md index f373ca6a3..886491087 100644 --- a/examples/vctk/tts3/README.md +++ b/examples/vctk/tts3/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [Fastspeech2](https://arxiv.org/abs/2 ## Dataset ### Download and Extract the dataset -Download VCTK-0.92 from the [official website](https://datashare.ed.ac.uk/handle/10283/3443). +Download VCTK-0.92 from it's [Official Website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get durations for fastspeech2. diff --git a/examples/vctk/voc1/README.md b/examples/vctk/voc1/README.md index 1c3016f88..45ba51013 100644 --- a/examples/vctk/voc1/README.md +++ b/examples/vctk/voc1/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [parallel wavegan](http://arxiv.org/a ## Dataset ### Download and Extract -Download VCTK-0.92 from the [official website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in directory `~/datasets/VCTK-Corpus-0.92`. +Download VCTK-0.92 from it's [Official Website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. diff --git a/examples/vctk/voc5/README.md b/examples/vctk/voc5/README.md index 4eb25c02d..514af4679 100644 --- a/examples/vctk/voc5/README.md +++ b/examples/vctk/voc5/README.md @@ -3,7 +3,7 @@ This example contains code used to train a [HiFiGAN](https://arxiv.org/abs/2010. ## Dataset ### Download and Extract -Download VCTK-0.92 from the [official website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in directory `~/datasets/VCTK-Corpus-0.92`. +Download VCTK-0.92 from it's [Official Website](https://datashare.ed.ac.uk/handle/10283/3443) and extract it to `~/datasets`. Then the dataset is in the directory `~/datasets/VCTK-Corpus-0.92`. ### Get MFA Result and Extract We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) results to cut the silence in the edge of audio. From 1e91f7da354c61e7b12740240524e0ce0330e689 Mon Sep 17 00:00:00 2001 From: Zhangjingyu06 Date: Tue, 24 May 2022 10:10:38 +0000 Subject: [PATCH 031/127] deepspeech2 modify for kunlun --- paddlespeech/s2t/exps/deepspeech2/bin/export.py | 5 +++++ paddlespeech/s2t/exps/deepspeech2/bin/test.py | 5 +++++ paddlespeech/s2t/exps/deepspeech2/bin/test_export.py | 5 +++++ paddlespeech/s2t/exps/deepspeech2/bin/train.py | 5 +++++ paddlespeech/s2t/training/trainer.py | 8 +++++++- 5 files changed, 27 insertions(+), 1 deletion(-) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/export.py b/paddlespeech/s2t/exps/deepspeech2/bin/export.py index ee013d79e..ae43bf82c 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/export.py @@ -37,6 +37,11 @@ if __name__ == "__main__": "--export_path", type=str, help="path of the jit model to save") parser.add_argument( "--model_type", type=str, default='offline', help="offline/online") + parser.add_argument( + '--nxpu', + type=int, + default=1, + help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) print_arguments(args) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test.py b/paddlespeech/s2t/exps/deepspeech2/bin/test.py index 388b380d1..f29f50832 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test.py @@ -37,6 +37,11 @@ if __name__ == "__main__": # save asr result to parser.add_argument( "--result_file", type=str, help="path of save the asr result") + parser.add_argument( + '--nxpu', + type=int, + default=1, + help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print_arguments(args, globals()) print("model_type:{}".format(args.model_type)) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py index 707eb9e1b..c136ddf29 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py @@ -40,6 +40,11 @@ if __name__ == "__main__": "--export_path", type=str, help="path of the jit model to save") parser.add_argument( "--model_type", type=str, default='offline', help='offline/online') + parser.add_argument( + '--nxpu', + type=int, + default=1, + help="if nxpu == 0 and ngpu == 0, use cpu.") parser.add_argument( "--enable-auto-log", action="store_true", help="use auto log") args = parser.parse_args() diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/train.py b/paddlespeech/s2t/exps/deepspeech2/bin/train.py index e2c68d4be..cb4867ef2 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/train.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/train.py @@ -33,6 +33,11 @@ if __name__ == "__main__": parser = default_argument_parser() parser.add_argument( "--model_type", type=str, default='offline', help='offline/online') + parser.add_argument( + '--nxpu', + type=int, + default=1, + help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) print_arguments(args, globals()) diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index 84da251aa..d30556ca1 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -112,7 +112,13 @@ class Trainer(): logger.info(f"Rank: {self.rank}/{self.world_size}") # set device - paddle.set_device('gpu' if self.args.ngpu > 0 else 'cpu') + if self.args.ngpu == 0: + if self.args.nxpu == 0: + paddle.set_device('cpu') + else: + paddle.set_device('xpu') + elif self.args.ngpu > 0: + paddle.set_device("gpu") if self.parallel: self.init_parallel() From b0eaeccd670b53ac77acd044fa738dca92f20032 Mon Sep 17 00:00:00 2001 From: Zhangjingyu06 Date: Tue, 24 May 2022 11:58:49 +0000 Subject: [PATCH 032/127] deepspeech2 modify for kunlun --- paddlespeech/s2t/exps/deepspeech2/bin/export.py | 2 +- paddlespeech/s2t/exps/deepspeech2/bin/test.py | 2 +- paddlespeech/s2t/exps/deepspeech2/bin/test_export.py | 2 +- paddlespeech/s2t/exps/deepspeech2/bin/train.py | 2 +- paddlespeech/s2t/training/trainer.py | 3 +++ 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/export.py b/paddlespeech/s2t/exps/deepspeech2/bin/export.py index ae43bf82c..f1b84969d 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/export.py @@ -40,7 +40,7 @@ if __name__ == "__main__": parser.add_argument( '--nxpu', type=int, - default=1, + default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test.py b/paddlespeech/s2t/exps/deepspeech2/bin/test.py index f29f50832..c91be411a 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test.py @@ -40,7 +40,7 @@ if __name__ == "__main__": parser.add_argument( '--nxpu', type=int, - default=1, + default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print_arguments(args, globals()) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py index c136ddf29..923c3db49 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py @@ -43,7 +43,7 @@ if __name__ == "__main__": parser.add_argument( '--nxpu', type=int, - default=1, + default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") parser.add_argument( "--enable-auto-log", action="store_true", help="use auto log") diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/train.py b/paddlespeech/s2t/exps/deepspeech2/bin/train.py index cb4867ef2..d80cd0cb2 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/train.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/train.py @@ -36,7 +36,7 @@ if __name__ == "__main__": parser.add_argument( '--nxpu', type=int, - default=1, + default=0, help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) diff --git a/paddlespeech/s2t/training/trainer.py b/paddlespeech/s2t/training/trainer.py index d30556ca1..a7eb9892d 100644 --- a/paddlespeech/s2t/training/trainer.py +++ b/paddlespeech/s2t/training/trainer.py @@ -119,6 +119,9 @@ class Trainer(): paddle.set_device('xpu') elif self.args.ngpu > 0: paddle.set_device("gpu") + else: + raise Exception("invalid device") + if self.parallel: self.init_parallel() From acb19cf4650858f84b7d73e164409432965573ce Mon Sep 17 00:00:00 2001 From: Zhangjingyu06 Date: Tue, 24 May 2022 12:31:05 +0000 Subject: [PATCH 033/127] deepspeech2 modify for kunlun --- paddlespeech/s2t/exps/deepspeech2/bin/export.py | 1 + paddlespeech/s2t/exps/deepspeech2/bin/test.py | 1 + paddlespeech/s2t/exps/deepspeech2/bin/test_export.py | 1 + paddlespeech/s2t/exps/deepspeech2/bin/train.py | 1 + 4 files changed, 4 insertions(+) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/export.py b/paddlespeech/s2t/exps/deepspeech2/bin/export.py index f1b84969d..62bf191df 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/export.py @@ -41,6 +41,7 @@ if __name__ == "__main__": '--nxpu', type=int, default=0, + choices=[0, 1], help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test.py b/paddlespeech/s2t/exps/deepspeech2/bin/test.py index c91be411a..a7d99e02a 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test.py @@ -41,6 +41,7 @@ if __name__ == "__main__": '--nxpu', type=int, default=0, + choices=[0, 1], help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print_arguments(args, globals()) diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py index 923c3db49..58815ad63 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/test_export.py @@ -44,6 +44,7 @@ if __name__ == "__main__": '--nxpu', type=int, default=0, + choices=[0, 1], help="if nxpu == 0 and ngpu == 0, use cpu.") parser.add_argument( "--enable-auto-log", action="store_true", help="use auto log") diff --git a/paddlespeech/s2t/exps/deepspeech2/bin/train.py b/paddlespeech/s2t/exps/deepspeech2/bin/train.py index d80cd0cb2..3906b2fc6 100644 --- a/paddlespeech/s2t/exps/deepspeech2/bin/train.py +++ b/paddlespeech/s2t/exps/deepspeech2/bin/train.py @@ -37,6 +37,7 @@ if __name__ == "__main__": '--nxpu', type=int, default=0, + choices=[0, 1], help="if nxpu == 0 and ngpu == 0, use cpu.") args = parser.parse_args() print("model_type:{}".format(args.model_type)) From e1888f9ae6d239b8c28f9739f7fd2a0120caac9e Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Tue, 24 May 2022 12:37:42 +0000 Subject: [PATCH 034/127] remove size,test=asr --- paddlespeech/s2t/__init__.py | 19 ------------- .../s2t/decoders/beam_search/beam_search.py | 10 +++---- paddlespeech/s2t/decoders/scorers/ctc.py | 4 +-- .../s2t/decoders/scorers/ctc_prefix_score.py | 27 +++++++++---------- paddlespeech/s2t/models/u2/u2.py | 2 +- paddlespeech/s2t/modules/decoder.py | 2 +- paddlespeech/s2t/modules/embedding.py | 4 +-- paddlespeech/s2t/utils/tensor_utils.py | 6 ++--- 8 files changed, 27 insertions(+), 47 deletions(-) diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index 2365071f3..7ec9e1aba 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -189,25 +189,6 @@ if not hasattr(paddle.Tensor, 'contiguous'): paddle.static.Variable.contiguous = contiguous -def size(xs: paddle.Tensor, *args: int) -> paddle.Tensor: - nargs = len(args) - assert (nargs <= 1) - s = paddle.shape(xs) - if nargs == 1: - return s[args[0]] - else: - return s - - -#`to_static` do not process `size` property, maybe some `paddle` api dependent on it. -logger.debug( - "override size of paddle.Tensor " - "(`to_static` do not process `size` property, maybe some `paddle` api dependent on it), remove this when fixed!" -) -paddle.Tensor.size = size -paddle.static.Variable.size = size - - def view(xs: paddle.Tensor, *args: int) -> paddle.Tensor: return xs.reshape(args) diff --git a/paddlespeech/s2t/decoders/beam_search/beam_search.py b/paddlespeech/s2t/decoders/beam_search/beam_search.py index f331cb1c9..5029e1577 100644 --- a/paddlespeech/s2t/decoders/beam_search/beam_search.py +++ b/paddlespeech/s2t/decoders/beam_search/beam_search.py @@ -194,7 +194,7 @@ class BeamSearch(paddle.nn.Layer): Args: hyp (Hypothesis): Hypothesis with prefix tokens to score - ids (paddle.Tensor): 1D tensor of new partial tokens to score, + ids (paddle.Tensor): 1D tensor of new partial tokens to score, len(ids) < n_vocab x (paddle.Tensor): Corresponding input feature, (T, D) @@ -224,14 +224,14 @@ class BeamSearch(paddle.nn.Layer): ids (paddle.Tensor): The partial token ids(Global) to compute topk. Returns: - Tuple[paddle.Tensor, paddle.Tensor]: + Tuple[paddle.Tensor, paddle.Tensor]: The topk full token ids and partial token ids. Their shapes are `(self.beam_size,)`. i.e. (global ids, global relative local ids). """ # no pre beam performed, `ids` equal to `weighted_scores` - if weighted_scores.size(0) == ids.size(0): + if weighted_scores.shape[0] == ids.shape[0]: top_ids = weighted_scores.topk( self.beam_size)[1] # index in n_vocab return top_ids, top_ids @@ -374,8 +374,8 @@ class BeamSearch(paddle.nn.Layer): elif maxlenratio < 0: maxlen = -1 * int(maxlenratio) else: - maxlen = max(1, int(maxlenratio * x.size(0))) - minlen = int(minlenratio * x.size(0)) + maxlen = max(1, int(maxlenratio * x.shape[0])) + minlen = int(minlenratio * x.shape[0]) logger.info("decoder input length: " + str(x.shape[0])) logger.info("max output length: " + str(maxlen)) logger.info("min output length: " + str(minlen)) diff --git a/paddlespeech/s2t/decoders/scorers/ctc.py b/paddlespeech/s2t/decoders/scorers/ctc.py index 81d8b0783..6f1d8c007 100644 --- a/paddlespeech/s2t/decoders/scorers/ctc.py +++ b/paddlespeech/s2t/decoders/scorers/ctc.py @@ -69,7 +69,7 @@ class CTCPrefixScorer(BatchPartialScorerInterface): return sc[i], st[i] else: # for CTCPrefixScorePD (need new_id > 0) r, log_psi, f_min, f_max, scoring_idmap = state - s = log_psi[i, new_id].expand(log_psi.size(1)) + s = log_psi[i, new_id].expand(log_psi.shape[1]) if scoring_idmap is not None: return r[:, :, i, scoring_idmap[i, new_id]], s, f_min, f_max else: @@ -107,7 +107,7 @@ class CTCPrefixScorer(BatchPartialScorerInterface): """ logp = self.ctc.log_softmax(x.unsqueeze(0)) # assuming batch_size = 1 - xlen = paddle.to_tensor([logp.size(1)]) + xlen = paddle.to_tensor([logp.shape[1]]) self.impl = CTCPrefixScorePD(logp, xlen, 0, self.eos) return None diff --git a/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py b/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py index 78b8fe36c..0e63a52a8 100644 --- a/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py +++ b/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py @@ -33,9 +33,9 @@ class CTCPrefixScorePD(): self.logzero = -10000000000.0 self.blank = blank self.eos = eos - self.batch = x.size(0) - self.input_length = x.size(1) - self.odim = x.size(2) + self.batch = x.shape[0] + self.input_length = x.shape[1] + self.odim = x.shape[2] self.dtype = x.dtype # Pad the rest of posteriors in the batch @@ -76,8 +76,7 @@ class CTCPrefixScorePD(): last_ids = [yi[-1] for yi in y] # last output label ids n_bh = len(last_ids) # batch * hyps n_hyps = n_bh // self.batch # assuming each utterance has the same # of hyps - self.scoring_num = scoring_ids.size( - -1) if scoring_ids is not None else 0 + self.scoring_num = scoring_ids.shape[-1] if scoring_ids is not None else 0 # prepare state info if state is None: r_prev = paddle.full( @@ -153,7 +152,7 @@ class CTCPrefixScorePD(): # compute forward probabilities log(r_t^n(h)) and log(r_t^b(h)) for t in range(start, end): - rp = r[t - 1] # (2 x BW x O') + rp = r[t - 1] # (2 x BW x O') rr = paddle.stack([rp[0], log_phi[t - 1], rp[0], rp[1]]).view( 2, 2, n_bh, snum) # (2,2,BW,O') r[t] = paddle.logsumexp(rr, 1) + x_[:, t] @@ -227,7 +226,7 @@ class CTCPrefixScorePD(): if self.x.shape[1] < x.shape[1]: # self.x (2,T,B,O); x (B,T,O) # Pad the rest of posteriors in the batch # TODO(takaaki-hori): need a better way without for-loops - xlens = [x.size(1)] + xlens = [x.shape[1]] for i, l in enumerate(xlens): if l < self.input_length: x[i, l:, :] = self.logzero @@ -237,7 +236,7 @@ class CTCPrefixScorePD(): xb = xn[:, :, self.blank].unsqueeze(2).expand(-1, -1, self.odim) self.x = paddle.stack([xn, xb]) # (2, T, B, O) self.x[:, :tmp_x.shape[1], :, :] = tmp_x - self.input_length = x.size(1) + self.input_length = x.shape[1] self.end_frames = paddle.to_tensor(xlens) - 1 def extend_state(self, state): @@ -318,16 +317,16 @@ class CTCPrefixScore(): r[0, 0] = xs[0] r[0, 1] = self.logzero else: - # Although the code does not exactly follow Algorithm 2, - # we don't have to change it because we can assume - # r_t(h)=0 for t < |h| in CTC forward computation + # Although the code does not exactly follow Algorithm 2, + # we don't have to change it because we can assume + # r_t(h)=0 for t < |h| in CTC forward computation # (Note: we assume here that index t starts with 0). # The purpose of this difference is to reduce the number of for-loops. # https://github.com/espnet/espnet/pull/3655 - # where we start to accumulate r_t(h) from t=|h| - # and iterate r_t(h) = (r_{t-1}(h) + ...) to T-1, + # where we start to accumulate r_t(h) from t=|h| + # and iterate r_t(h) = (r_{t-1}(h) + ...) to T-1, # avoiding accumulating zeros for t=1~|h|-1. - # Thus, we need to set r_{|h|-1}(h) = 0, + # Thus, we need to set r_{|h|-1}(h) = 0, # i.e., r[output_length-1] = logzero, for initialization. # This is just for reducing the computation. r[output_length - 1] = self.logzero diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index 530840d0f..e3f46b15a 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -775,7 +775,7 @@ class U2DecodeModel(U2BaseModel): """ self.eval() x = paddle.to_tensor(x).unsqueeze(0) - ilen = x.size(1) + ilen = x.shape[1] enc_output, _ = self._forward_encoder(x, ilen) return enc_output.squeeze(0) diff --git a/paddlespeech/s2t/modules/decoder.py b/paddlespeech/s2t/modules/decoder.py index 42ac119b4..ce78059c0 100644 --- a/paddlespeech/s2t/modules/decoder.py +++ b/paddlespeech/s2t/modules/decoder.py @@ -242,7 +242,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer): ] # batch decoding - ys_mask = subsequent_mask(ys.size(-1)).unsqueeze(0) # (B,L,L) + ys_mask = subsequent_mask(ys.shape[-1]).unsqueeze(0) # (B,L,L) xs_mask = make_xs_mask(xs).unsqueeze(1) # (B,1,T) logp, states = self.forward_one_step( xs, xs_mask, ys, ys_mask, cache=batch_state) diff --git a/paddlespeech/s2t/modules/embedding.py b/paddlespeech/s2t/modules/embedding.py index 596f61b78..cc1fdffe2 100644 --- a/paddlespeech/s2t/modules/embedding.py +++ b/paddlespeech/s2t/modules/embedding.py @@ -115,7 +115,7 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface): assert offset + x.shape[ 1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format( offset, x.shape[1], self.max_len) - #TODO(Hui Zhang): using T = x.size(1), __getitem__ not support Tensor + #TODO(Hui Zhang): using T = x.shape[1], __getitem__ not support Tensor pos_emb = self.pe[:, offset:offset + T] x = x * self.xscale + pos_emb return self.dropout(x), self.dropout(pos_emb) @@ -165,6 +165,6 @@ class RelPositionalEncoding(PositionalEncoding): 1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format( offset, x.shape[1], self.max_len) x = x * self.xscale - #TODO(Hui Zhang): using x.size(1), __getitem__ not support Tensor + #TODO(Hui Zhang): using x.shape[1], __getitem__ not support Tensor pos_emb = self.pe[:, offset:offset + x.shape[1]] return self.dropout(x), self.dropout(pos_emb) diff --git a/paddlespeech/s2t/utils/tensor_utils.py b/paddlespeech/s2t/utils/tensor_utils.py index e105253c2..ca8689569 100644 --- a/paddlespeech/s2t/utils/tensor_utils.py +++ b/paddlespeech/s2t/utils/tensor_utils.py @@ -58,8 +58,8 @@ def pad_sequence(sequences: List[paddle.Tensor], >>> a = paddle.ones(25, 300) >>> b = paddle.ones(22, 300) >>> c = paddle.ones(15, 300) - >>> pad_sequence([a, b, c]).size() - paddle.Tensor([25, 3, 300]) + >>> pad_sequence([a, b, c]).shape + [25, 3, 300] Note: This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` @@ -79,7 +79,7 @@ def pad_sequence(sequences: List[paddle.Tensor], # assuming trailing dimensions and type of all the Tensors # in sequences are same and fetching those from sequences[0] - max_size = sequences[0].size() + max_size = sequences[0].shape # (TODO Hui Zhang): slice not supprot `end==start` # trailing_dims = max_size[1:] trailing_dims = tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () From 4c09927f61668952ee263cd178798b0ea5634760 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Tue, 24 May 2022 13:34:01 +0000 Subject: [PATCH 035/127] fix --- paddlespeech/s2t/__init__.py | 2 +- paddlespeech/s2t/models/lm/transformer.py | 4 ++-- paddlespeech/s2t/modules/encoder.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index 7ec9e1aba..a2fce3057 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -200,7 +200,7 @@ if not hasattr(paddle.Tensor, 'view'): def view_as(xs: paddle.Tensor, ys: paddle.Tensor) -> paddle.Tensor: - return xs.reshape(ys.size()) + return xs.reshape(ys.shape) if not hasattr(paddle.Tensor, 'view_as'): diff --git a/paddlespeech/s2t/models/lm/transformer.py b/paddlespeech/s2t/models/lm/transformer.py index 85bd7c232..bb281168f 100644 --- a/paddlespeech/s2t/models/lm/transformer.py +++ b/paddlespeech/s2t/models/lm/transformer.py @@ -90,7 +90,7 @@ class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface): def _target_mask(self, ys_in_pad): ys_mask = ys_in_pad != 0 - m = subsequent_mask(ys_mask.size(-1)).unsqueeze(0) + m = subsequent_mask(ys_mask.shape[-1])).unsqueeze(0) return ys_mask.unsqueeze(-2) & m def forward(self, x: paddle.Tensor, t: paddle.Tensor @@ -112,7 +112,7 @@ class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface): in perplexity: p(t)^{-n} = exp(-log p(t) / n) """ - batch_size = x.size(0) + batch_size = x.shape[0] xm = x != 0 xlen = xm.sum(axis=1) if self.embed_drop is not None: diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index 669a12d65..7298c61f2 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -218,7 +218,7 @@ class BaseEncoder(nn.Layer): assert xs.shape[0] == 1 # batch size must be one # tmp_masks is just for interface compatibility # TODO(Hui Zhang): stride_slice not support bool tensor - # tmp_masks = paddle.ones([1, xs.size(1)], dtype=paddle.bool) + # tmp_masks = paddle.ones([1, xs.shape[1]], dtype=paddle.bool) tmp_masks = paddle.ones([1, xs.shape[1]], dtype=paddle.int32) tmp_masks = tmp_masks.unsqueeze(1) #[B=1, C=1, T] From a7037dc029b07ac65a13ca855b5d9b0193789782 Mon Sep 17 00:00:00 2001 From: qingen Date: Tue, 24 May 2022 22:10:30 +0800 Subject: [PATCH 036/127] [vec][doc] update der result, test=doc --- examples/ami/sd0/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/ami/sd0/README.md b/examples/ami/sd0/README.md index e9ecc2854..30f7a438d 100644 --- a/examples/ami/sd0/README.md +++ b/examples/ami/sd0/README.md @@ -26,4 +26,7 @@ Use the following command to run diarization on AMI corpus. ./run.sh --data_folder ./amicorpus --manual_annot_folder ./ami_public_manual_1.6.2 ``` -## Results (DER) coming soon! :) +## Best performance in terms of Diarization Error Rate (DER). + | System | Mic. |Orcl. (Dev)|Orcl. (Eval)| Est. (Dev) |Est. (Eval)| + | --------|-------- | ---------|----------- | --------|-----------| + | ECAPA-TDNN + SC | HeadsetMix| 1.54 % | 3.07 %| 1.56 %| 3.28 % | From b23bde8ec5ff4ed3990f151246dfbb8c9dccf385 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Wed, 25 May 2022 03:30:48 +0000 Subject: [PATCH 037/127] tensor.shape => paddle.shape(tensor) --- paddlespeech/s2t/__init__.py | 2 +- paddlespeech/s2t/decoders/beam_search/beam_search.py | 10 +++++----- paddlespeech/s2t/decoders/scorers/ctc.py | 4 ++-- .../s2t/decoders/scorers/ctc_prefix_score.py | 12 ++++++------ paddlespeech/s2t/models/lm/transformer.py | 6 +++--- paddlespeech/s2t/models/u2/u2.py | 2 +- paddlespeech/s2t/modules/decoder.py | 2 +- paddlespeech/s2t/modules/embedding.py | 4 ++-- paddlespeech/s2t/modules/encoder.py | 2 +- paddlespeech/s2t/utils/tensor_utils.py | 4 ++-- 10 files changed, 24 insertions(+), 24 deletions(-) diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index a2fce3057..2da68435c 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -200,7 +200,7 @@ if not hasattr(paddle.Tensor, 'view'): def view_as(xs: paddle.Tensor, ys: paddle.Tensor) -> paddle.Tensor: - return xs.reshape(ys.shape) + return xs.reshape(paddle.shape(ys)) if not hasattr(paddle.Tensor, 'view_as'): diff --git a/paddlespeech/s2t/decoders/beam_search/beam_search.py b/paddlespeech/s2t/decoders/beam_search/beam_search.py index 5029e1577..f6a2b4b0a 100644 --- a/paddlespeech/s2t/decoders/beam_search/beam_search.py +++ b/paddlespeech/s2t/decoders/beam_search/beam_search.py @@ -231,7 +231,7 @@ class BeamSearch(paddle.nn.Layer): """ # no pre beam performed, `ids` equal to `weighted_scores` - if weighted_scores.shape[0] == ids.shape[0]: + if paddle.shape(weighted_scores)[0] == paddle.shape(ids)[0]: top_ids = weighted_scores.topk( self.beam_size)[1] # index in n_vocab return top_ids, top_ids @@ -370,13 +370,13 @@ class BeamSearch(paddle.nn.Layer): """ # set length bounds if maxlenratio == 0: - maxlen = x.shape[0] + maxlen = paddle.shape(x)[0] elif maxlenratio < 0: maxlen = -1 * int(maxlenratio) else: - maxlen = max(1, int(maxlenratio * x.shape[0])) - minlen = int(minlenratio * x.shape[0]) - logger.info("decoder input length: " + str(x.shape[0])) + maxlen = max(1, int(maxlenratio * paddle.shape(x)[0])) + minlen = int(minlenratio * paddle.shape(x)[0]) + logger.info("decoder input length: " + str(paddle.shape(x)[0])) logger.info("max output length: " + str(maxlen)) logger.info("min output length: " + str(minlen)) diff --git a/paddlespeech/s2t/decoders/scorers/ctc.py b/paddlespeech/s2t/decoders/scorers/ctc.py index 6f1d8c007..3c1d4cf80 100644 --- a/paddlespeech/s2t/decoders/scorers/ctc.py +++ b/paddlespeech/s2t/decoders/scorers/ctc.py @@ -69,7 +69,7 @@ class CTCPrefixScorer(BatchPartialScorerInterface): return sc[i], st[i] else: # for CTCPrefixScorePD (need new_id > 0) r, log_psi, f_min, f_max, scoring_idmap = state - s = log_psi[i, new_id].expand(log_psi.shape[1]) + s = log_psi[i, new_id].expand(paddle.shape(log_psi)[1]) if scoring_idmap is not None: return r[:, :, i, scoring_idmap[i, new_id]], s, f_min, f_max else: @@ -107,7 +107,7 @@ class CTCPrefixScorer(BatchPartialScorerInterface): """ logp = self.ctc.log_softmax(x.unsqueeze(0)) # assuming batch_size = 1 - xlen = paddle.to_tensor([logp.shape[1]]) + xlen = paddle.to_tensor([paddle.shape(logp)[1]]) self.impl = CTCPrefixScorePD(logp, xlen, 0, self.eos) return None diff --git a/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py b/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py index 0e63a52a8..d8ca5ccde 100644 --- a/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py +++ b/paddlespeech/s2t/decoders/scorers/ctc_prefix_score.py @@ -33,9 +33,9 @@ class CTCPrefixScorePD(): self.logzero = -10000000000.0 self.blank = blank self.eos = eos - self.batch = x.shape[0] - self.input_length = x.shape[1] - self.odim = x.shape[2] + self.batch = paddle.shape(x)[0] + self.input_length = paddle.shape(x)[1] + self.odim = paddle.shape(x)[2] self.dtype = x.dtype # Pad the rest of posteriors in the batch @@ -76,7 +76,7 @@ class CTCPrefixScorePD(): last_ids = [yi[-1] for yi in y] # last output label ids n_bh = len(last_ids) # batch * hyps n_hyps = n_bh // self.batch # assuming each utterance has the same # of hyps - self.scoring_num = scoring_ids.shape[-1] if scoring_ids is not None else 0 + self.scoring_num = paddle.shape(scoring_ids)[-1] if scoring_ids is not None else 0 # prepare state info if state is None: r_prev = paddle.full( @@ -226,7 +226,7 @@ class CTCPrefixScorePD(): if self.x.shape[1] < x.shape[1]: # self.x (2,T,B,O); x (B,T,O) # Pad the rest of posteriors in the batch # TODO(takaaki-hori): need a better way without for-loops - xlens = [x.shape[1]] + xlens = [paddle.shape(x)[1]] for i, l in enumerate(xlens): if l < self.input_length: x[i, l:, :] = self.logzero @@ -236,7 +236,7 @@ class CTCPrefixScorePD(): xb = xn[:, :, self.blank].unsqueeze(2).expand(-1, -1, self.odim) self.x = paddle.stack([xn, xb]) # (2, T, B, O) self.x[:, :tmp_x.shape[1], :, :] = tmp_x - self.input_length = x.shape[1] + self.input_length = paddle.shape(x)[1] self.end_frames = paddle.to_tensor(xlens) - 1 def extend_state(self, state): diff --git a/paddlespeech/s2t/models/lm/transformer.py b/paddlespeech/s2t/models/lm/transformer.py index bb281168f..d14f99563 100644 --- a/paddlespeech/s2t/models/lm/transformer.py +++ b/paddlespeech/s2t/models/lm/transformer.py @@ -90,7 +90,7 @@ class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface): def _target_mask(self, ys_in_pad): ys_mask = ys_in_pad != 0 - m = subsequent_mask(ys_mask.shape[-1])).unsqueeze(0) + m = subsequent_mask(paddle.shape(ys_mask)[-1])).unsqueeze(0) return ys_mask.unsqueeze(-2) & m def forward(self, x: paddle.Tensor, t: paddle.Tensor @@ -112,7 +112,7 @@ class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface): in perplexity: p(t)^{-n} = exp(-log p(t) / n) """ - batch_size = x.shape[0] + batch_size = paddle.shape(x)[0] xm = x != 0 xlen = xm.sum(axis=1) if self.embed_drop is not None: @@ -122,7 +122,7 @@ class TransformerLM(nn.Layer, LMInterface, BatchScorerInterface): h, _ = self.encoder(emb, xlen) y = self.decoder(h) loss = F.cross_entropy( - y.view(-1, y.shape[-1]), t.view(-1), reduction="none") + y.view(-1, paddle.shape(y)[-1]), t.view(-1), reduction="none") mask = xm.to(loss.dtype) logp = loss * mask.view(-1) nll = logp.view(batch_size, -1).sum(-1) diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index e3f46b15a..d5471369f 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -775,7 +775,7 @@ class U2DecodeModel(U2BaseModel): """ self.eval() x = paddle.to_tensor(x).unsqueeze(0) - ilen = x.shape[1] + ilen = paddle.shape(x)[1] enc_output, _ = self._forward_encoder(x, ilen) return enc_output.squeeze(0) diff --git a/paddlespeech/s2t/modules/decoder.py b/paddlespeech/s2t/modules/decoder.py index ce78059c0..ccc8482d5 100644 --- a/paddlespeech/s2t/modules/decoder.py +++ b/paddlespeech/s2t/modules/decoder.py @@ -242,7 +242,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer): ] # batch decoding - ys_mask = subsequent_mask(ys.shape[-1]).unsqueeze(0) # (B,L,L) + ys_mask = subsequent_mask(paddle.shape(ys)[-1]).unsqueeze(0) # (B,L,L) xs_mask = make_xs_mask(xs).unsqueeze(1) # (B,1,T) logp, states = self.forward_one_step( xs, xs_mask, ys, ys_mask, cache=batch_state) diff --git a/paddlespeech/s2t/modules/embedding.py b/paddlespeech/s2t/modules/embedding.py index cc1fdffe2..51e558eb8 100644 --- a/paddlespeech/s2t/modules/embedding.py +++ b/paddlespeech/s2t/modules/embedding.py @@ -115,7 +115,7 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface): assert offset + x.shape[ 1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format( offset, x.shape[1], self.max_len) - #TODO(Hui Zhang): using T = x.shape[1], __getitem__ not support Tensor + #TODO(Hui Zhang): using T = paddle.shape(x)[1], __getitem__ not support Tensor pos_emb = self.pe[:, offset:offset + T] x = x * self.xscale + pos_emb return self.dropout(x), self.dropout(pos_emb) @@ -165,6 +165,6 @@ class RelPositionalEncoding(PositionalEncoding): 1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format( offset, x.shape[1], self.max_len) x = x * self.xscale - #TODO(Hui Zhang): using x.shape[1], __getitem__ not support Tensor + #TODO(Hui Zhang): using paddle.shape(x)[1], __getitem__ not support Tensor pos_emb = self.pe[:, offset:offset + x.shape[1]] return self.dropout(x), self.dropout(pos_emb) diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index 7298c61f2..4d31acf1a 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -218,7 +218,7 @@ class BaseEncoder(nn.Layer): assert xs.shape[0] == 1 # batch size must be one # tmp_masks is just for interface compatibility # TODO(Hui Zhang): stride_slice not support bool tensor - # tmp_masks = paddle.ones([1, xs.shape[1]], dtype=paddle.bool) + # tmp_masks = paddle.ones([1, paddle.shape(xs)[1]], dtype=paddle.bool) tmp_masks = paddle.ones([1, xs.shape[1]], dtype=paddle.int32) tmp_masks = tmp_masks.unsqueeze(1) #[B=1, C=1, T] diff --git a/paddlespeech/s2t/utils/tensor_utils.py b/paddlespeech/s2t/utils/tensor_utils.py index ca8689569..bc557b130 100644 --- a/paddlespeech/s2t/utils/tensor_utils.py +++ b/paddlespeech/s2t/utils/tensor_utils.py @@ -59,7 +59,7 @@ def pad_sequence(sequences: List[paddle.Tensor], >>> b = paddle.ones(22, 300) >>> c = paddle.ones(15, 300) >>> pad_sequence([a, b, c]).shape - [25, 3, 300] + paddle.Tensor([25, 3, 300]) Note: This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` @@ -79,7 +79,7 @@ def pad_sequence(sequences: List[paddle.Tensor], # assuming trailing dimensions and type of all the Tensors # in sequences are same and fetching those from sequences[0] - max_size = sequences[0].shape + max_size = paddle.shape(sequences[0]) # (TODO Hui Zhang): slice not supprot `end==start` # trailing_dims = max_size[1:] trailing_dims = tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () From 7afbdbefadd6b249e0560d21afa47c7a33a9ab6f Mon Sep 17 00:00:00 2001 From: xiongxinlei Date: Tue, 24 May 2022 21:29:36 +0800 Subject: [PATCH 038/127] update the vector model, test=doc --- demos/audio_content_search/requirements.txt | 1 + .../streaming_asr_server.py | 38 +++++ demos/speaker_verification/README.md | 159 +++++++++--------- demos/speaker_verification/README_cn.md | 158 ++++++++--------- docs/source/released_model.md | 2 +- examples/voxceleb/sv0/README.md | 6 +- examples/voxceleb/sv0/RESULT.md | 1 + examples/voxceleb/sv0/conf/ecapa_tdnn.yaml | 8 + .../voxceleb/sv0/conf/ecapa_tdnn_small.yaml | 7 + paddlespeech/cli/vector/pretrained_models.py | 4 +- .../server/engine/acs/python/acs_engine.py | 56 ++++-- .../server/tests/vector/http_client.py | 59 +++++++ 12 files changed, 324 insertions(+), 175 deletions(-) create mode 100644 demos/audio_content_search/requirements.txt create mode 100644 demos/audio_content_search/streaming_asr_server.py create mode 100644 paddlespeech/server/tests/vector/http_client.py diff --git a/demos/audio_content_search/requirements.txt b/demos/audio_content_search/requirements.txt new file mode 100644 index 000000000..4126a4868 --- /dev/null +++ b/demos/audio_content_search/requirements.txt @@ -0,0 +1 @@ +websocket-client \ No newline at end of file diff --git a/demos/audio_content_search/streaming_asr_server.py b/demos/audio_content_search/streaming_asr_server.py new file mode 100644 index 000000000..011b009aa --- /dev/null +++ b/demos/audio_content_search/streaming_asr_server.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse + +from paddlespeech.cli.log import logger +from paddlespeech.server.bin.paddlespeech_server import ServerExecutor +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog='paddlespeech_server.start', add_help=True) + parser.add_argument( + "--config_file", + action="store", + help="yaml file of the app", + default=None, + required=True) + + parser.add_argument( + "--log_file", + action="store", + help="log file", + default="./log/paddlespeech.log") + logger.info("start to parse the args") + args = parser.parse_args() + + logger.info("start to launch the streaming asr server") + streaming_asr_server = ServerExecutor() + streaming_asr_server(config_file=args.config_file, log_file=args.log_file) diff --git a/demos/speaker_verification/README.md b/demos/speaker_verification/README.md index b6a1d9bcc..a7d0f819d 100644 --- a/demos/speaker_verification/README.md +++ b/demos/speaker_verification/README.md @@ -53,50 +53,49 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav Output: ```bash - demo [ 1.4217498 5.626253 -5.342073 1.1773866 3.308055 - 1.756596 5.167894 10.80636 -3.8226728 -5.6141334 - 2.623845 -0.8072968 1.9635103 -7.3128724 0.01103897 - -9.723131 0.6619743 -6.976803 10.213478 7.494748 - 2.9105635 3.8949256 3.7999806 7.1061673 16.905321 - -7.1493764 8.733103 3.4230042 -4.831653 -11.403367 - 11.232214 7.1274667 -4.2828417 2.452362 -5.130748 - -18.177666 -2.6116815 -11.000337 -6.7314315 1.6564683 - 0.7618269 1.1253023 -2.083836 4.725744 -8.782597 - -3.539873 3.814236 5.1420674 2.162061 4.096431 - -6.4162116 12.747448 1.9429878 -15.152943 6.417416 - 16.097002 -9.716668 -1.9920526 -3.3649497 -1.871939 - 11.567354 3.69788 11.258265 7.442363 9.183411 - 4.5281515 -1.2417862 4.3959084 6.6727695 5.8898783 - 7.627124 -0.66919386 -11.889693 -9.208865 -7.4274073 - -3.7776625 6.917234 -9.848748 -2.0944717 -5.135116 - 0.49563864 9.317534 -5.9141874 -1.8098574 -0.11738578 - -7.169265 -1.0578263 -5.7216787 -5.1173844 16.137651 - -4.473626 7.6624317 -0.55381083 9.631587 -6.4704556 - -8.548508 4.3716145 -0.79702514 4.478997 -2.9758704 - 3.272176 2.8382776 5.134597 -9.190781 -0.5657382 - -4.8745747 2.3165567 -5.984303 -2.1798875 0.35541576 - -0.31784213 9.493548 2.1144536 4.358092 -12.089823 - 8.451689 -7.925461 4.6242585 4.4289427 18.692003 - -2.6204622 -5.149185 -0.35821092 8.488551 4.981496 - -9.32683 -2.2544234 6.6417594 1.2119585 10.977129 - 16.555033 3.3238444 9.551863 -1.6676947 -0.79539716 - -8.605674 -0.47356385 2.6741948 -5.359179 -2.6673796 - 0.66607 15.443222 4.740594 -3.4725387 11.592567 - -2.054497 1.7361217 -8.265324 -9.30447 5.4068313 - -1.5180256 -7.746615 -6.089606 0.07112726 -0.34904733 - -8.649895 -9.998958 -2.564841 -0.53999114 2.601808 - -0.31927416 -1.8815292 -2.07215 -3.4105783 -8.2998085 - 1.483641 -15.365992 -8.288208 3.8847756 -3.4876456 - 7.3629923 0.4657332 3.132599 12.438889 -1.8337058 - 4.532936 2.7264361 10.145339 -6.521951 2.897153 - -3.3925855 5.079156 7.759716 4.677565 5.8457737 - 2.402413 7.7071047 3.9711342 -6.390043 6.1268735 - -3.7760346 -11.118123 ] + demo [ -1.3251206 7.8606825 -4.620626 0.3000721 2.2648535 + -1.1931441 3.0647137 7.673595 -6.0044727 -12.02426 + -1.9496069 3.1269536 1.618838 -7.6383104 -1.2299773 + -12.338331 2.1373026 -5.3957124 9.717328 5.6752305 + 3.7805123 3.0597172 3.429692 8.97601 13.174125 + -0.53132284 8.9424715 4.46511 -4.4262476 -9.726503 + 8.399328 7.2239175 -7.435854 2.9441683 -4.3430395 + -13.886965 -1.6346735 -10.9027405 -5.311245 3.8007221 + 3.8976038 -2.1230774 -2.3521194 4.151031 -7.4048667 + 0.13911647 2.4626107 4.9664545 0.9897574 5.4839754 + -3.3574002 10.1340065 -0.6120171 -10.403095 4.6007543 + 16.00935 -7.7836914 -4.1945305 -6.9368606 1.1789556 + 11.490801 4.2380238 9.550931 8.375046 7.5089145 + -0.65707296 -0.30051577 2.8406055 3.0828028 0.730817 + 6.148354 0.13766119 -13.424735 -7.7461405 -2.3227983 + -8.305252 2.9879124 -10.995229 0.15211068 -2.3820348 + -1.7984174 8.495629 -5.8522367 -3.755498 0.6989711 + -5.2702994 -2.6188622 -1.8828466 -4.64665 14.078544 + -0.5495333 10.579158 -3.2160501 9.349004 -4.381078 + -11.675817 -2.8630207 4.5721755 2.246612 -4.574342 + 1.8610188 2.3767874 5.6257877 -9.784078 0.64967257 + -1.4579505 0.4263264 -4.9211264 -2.454784 3.4869802 + -0.42654222 8.341269 1.356552 7.0966883 -13.102829 + 8.016734 -7.1159344 1.8699781 0.208721 14.699384 + -1.025278 -2.6107233 -2.5082312 8.427193 6.9138527 + -6.2912464 0.6157366 2.489688 -3.4668267 9.921763 + 11.200815 -0.1966403 7.4916005 -0.62312716 -0.25848144 + -9.947997 -0.9611041 1.1649219 -2.1907122 -1.5028487 + -0.51926106 15.165954 2.4649463 -0.9980445 7.4416637 + -2.0768049 3.5896823 -7.3055434 -7.5620847 4.323335 + 0.0804418 -6.56401 -2.3148053 -1.7642345 -2.4708817 + -7.675618 -9.548878 -1.0177554 0.16986446 2.5877135 + -1.8752296 -0.36614323 -6.0493784 -2.3965611 -5.9453387 + 0.9424033 -13.155974 -7.457801 0.14658108 -3.742797 + 5.8414927 -1.2872906 5.5694313 12.57059 1.0939219 + 2.2142086 1.9181576 6.9914207 -5.888139 3.1409824 + -2.003628 2.4434285 9.973139 5.03668 2.0051203 + 2.8615603 5.860224 2.9176188 -1.6311141 2.0292206 + -4.070415 -6.831437 ] ``` - Python API ```python - import paddle from paddlespeech.cli import VectorExecutor vector_executor = VectorExecutor() @@ -169,47 +168,47 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav -3.7760346 -11.118123 ] # get the test embedding Test embedding Result: - [ -1.902964 2.0690894 -8.034194 3.5472693 0.18089125 - 6.9085927 1.4097427 -1.9487704 -10.021278 -0.20755845 - -8.04332 4.344489 2.3200977 -14.306299 5.184692 - -11.55602 -3.8497238 0.6444722 1.2833948 2.6766639 - 0.5878921 0.7946299 1.7207596 2.5791872 14.998469 - -1.3385371 15.031221 -0.8006958 1.99287 -9.52007 - 2.435466 4.003221 -4.33817 -4.898601 -5.304714 - -18.033886 10.790787 -12.784645 -5.641755 2.9761686 - -10.566622 1.4839455 6.152458 -5.7195854 2.8603241 - 6.112133 8.489869 5.5958056 1.2836679 -1.2293907 - 0.89927405 7.0288725 -2.854029 -0.9782962 5.8255906 - 14.905906 -5.025907 0.7866458 -4.2444224 -16.354029 - 10.521315 0.9604709 -3.3257897 7.144871 -13.592733 - -8.568869 -1.7953678 0.26313916 10.916714 -6.9374123 - 1.857403 -6.2746415 2.8154466 -7.2338667 -2.293357 - -0.05452765 5.4287076 5.0849075 -6.690375 -1.6183422 - 3.654291 0.94352573 -9.200294 -5.4749465 -3.5235846 - 1.3420814 4.240421 -2.772944 -2.8451524 16.311104 - 4.2969875 -1.762936 -12.5758915 8.595198 -0.8835239 - -1.5708797 1.568961 1.1413603 3.5032008 -0.45251232 - -6.786333 16.89443 5.3366146 -8.789056 0.6355629 - 3.2579517 -3.328322 7.5969577 0.66025066 -6.550468 - -9.148656 2.020372 -0.4615173 1.1965656 -3.8764873 - 11.6562195 -6.0750933 12.182899 3.2218833 0.81969476 - 5.570001 -3.8459578 -7.205299 7.9262037 -7.6611166 - -5.249467 -2.2671914 7.2658715 -13.298164 4.821147 - -2.7263982 11.691089 -3.8918593 -2.838112 -1.0336838 - -3.8034165 2.8536487 -5.60398 -1.1972581 1.3455094 - -3.4903061 2.2408795 5.5010734 -3.970756 11.99696 - -7.8858757 0.43160373 -5.5059714 4.3426995 16.322706 - 11.635366 0.72157705 -9.245714 -3.91465 -4.449838 - -1.5716927 7.713747 -2.2430465 -6.198303 -13.481864 - 2.8156567 -5.7812386 5.1456156 2.7289324 -14.505571 - 13.270688 3.448231 -7.0659585 4.5886116 -4.466099 - -0.296428 -11.463529 -2.6076477 14.110243 -6.9725137 - -1.9962958 2.7119343 19.391657 0.01961198 14.607133 - -1.6695905 -4.391516 1.3131028 -6.670972 -5.888604 - 12.0612335 5.9285784 3.3715196 1.492534 10.723728 - -0.95514804 -12.085431 ] + [ 2.5247195 5.119042 -4.335273 4.4583654 5.047907 + 3.5059214 1.6159848 0.49364898 -11.6899185 -3.1014526 + -5.6589785 -0.42684984 2.674276 -11.937654 6.2248464 + -10.776924 -5.694543 1.112041 1.5709964 1.0961034 + 1.3976512 2.324352 1.339981 5.279319 13.734659 + -2.5753925 13.651442 -2.2357535 5.1575427 -3.251567 + 1.4023279 6.1191974 -6.0845175 -1.3646189 -2.6789894 + -15.220778 9.779349 -9.411551 -6.388947 6.8313975 + -9.245996 0.31196198 2.5509644 -4.413065 6.1649427 + 6.793837 2.6328635 8.620976 3.4832475 0.52491665 + 2.9115407 5.8392377 0.6702376 -3.2726715 2.6694255 + 16.91701 -5.5811176 0.23362345 -4.5573606 -11.801059 + 14.728292 -0.5198082 -3.999922 7.0927105 -7.0459595 + -5.4389 -0.46420583 -5.1085467 10.376568 -8.889225 + -0.37705845 -1.659806 2.6731026 -7.1909504 1.4608804 + -2.163136 -0.17949677 4.0241547 0.11319201 0.601279 + 2.039692 3.1910992 -11.649526 -8.121584 -4.8707457 + 0.3851982 1.4231744 -2.3321972 0.99332285 14.121717 + 5.899413 0.7384519 -17.760096 10.555021 4.1366534 + -0.3391071 -0.20792882 3.208204 0.8847948 -8.721497 + -6.432868 13.006379 4.8956 -9.155822 -1.9441519 + 5.7815638 -2.066733 10.425042 -0.8802383 -2.4314315 + -9.869258 0.35095334 -5.3549943 2.1076174 -8.290468 + 8.4433365 -4.689333 9.334139 -2.172678 -3.0250976 + 8.394216 -3.2110903 -7.93868 2.3960824 -2.3213403 + -1.4963245 -3.476059 4.132903 -10.893354 4.362673 + -0.45456508 10.258634 -1.1655927 -6.7799754 0.22885278 + -4.399287 2.333433 -4.84745 -4.2752337 -1.3577863 + -1.0685898 9.505196 7.3062205 0.08708266 12.927811 + -9.57974 1.3936648 -1.9444873 5.776769 15.251903 + 10.6118355 -1.4903594 -9.535318 -3.6553776 -1.6699586 + -0.5933151 7.600357 -4.8815503 -8.698617 -15.855757 + 0.25632986 -7.2235737 0.9506656 0.7128582 -9.051738 + 8.74869 -1.6426028 -6.5762258 2.506905 -6.7431564 + 5.129912 -12.189555 -3.6435068 12.068113 -6.0059533 + -2.3535995 2.9014351 22.3082 -1.5563312 13.193291 + 2.7583609 -7.468798 1.3407065 -4.599617 -6.2345777 + 10.7689295 7.137627 5.099476 0.3473359 9.647881 + -2.0484571 -5.8549366 ] # get the score between enroll and test - Eembeddings Score: 0.4292638301849365 + Eembeddings Score: 0.45332613587379456 ``` ### 4.Pretrained Models diff --git a/demos/speaker_verification/README_cn.md b/demos/speaker_verification/README_cn.md index 90bba38ac..04e1aeecd 100644 --- a/demos/speaker_verification/README_cn.md +++ b/demos/speaker_verification/README_cn.md @@ -51,45 +51,45 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav 输出: ```bash - demo [ 1.4217498 5.626253 -5.342073 1.1773866 3.308055 - 1.756596 5.167894 10.80636 -3.8226728 -5.6141334 - 2.623845 -0.8072968 1.9635103 -7.3128724 0.01103897 - -9.723131 0.6619743 -6.976803 10.213478 7.494748 - 2.9105635 3.8949256 3.7999806 7.1061673 16.905321 - -7.1493764 8.733103 3.4230042 -4.831653 -11.403367 - 11.232214 7.1274667 -4.2828417 2.452362 -5.130748 - -18.177666 -2.6116815 -11.000337 -6.7314315 1.6564683 - 0.7618269 1.1253023 -2.083836 4.725744 -8.782597 - -3.539873 3.814236 5.1420674 2.162061 4.096431 - -6.4162116 12.747448 1.9429878 -15.152943 6.417416 - 16.097002 -9.716668 -1.9920526 -3.3649497 -1.871939 - 11.567354 3.69788 11.258265 7.442363 9.183411 - 4.5281515 -1.2417862 4.3959084 6.6727695 5.8898783 - 7.627124 -0.66919386 -11.889693 -9.208865 -7.4274073 - -3.7776625 6.917234 -9.848748 -2.0944717 -5.135116 - 0.49563864 9.317534 -5.9141874 -1.8098574 -0.11738578 - -7.169265 -1.0578263 -5.7216787 -5.1173844 16.137651 - -4.473626 7.6624317 -0.55381083 9.631587 -6.4704556 - -8.548508 4.3716145 -0.79702514 4.478997 -2.9758704 - 3.272176 2.8382776 5.134597 -9.190781 -0.5657382 - -4.8745747 2.3165567 -5.984303 -2.1798875 0.35541576 - -0.31784213 9.493548 2.1144536 4.358092 -12.089823 - 8.451689 -7.925461 4.6242585 4.4289427 18.692003 - -2.6204622 -5.149185 -0.35821092 8.488551 4.981496 - -9.32683 -2.2544234 6.6417594 1.2119585 10.977129 - 16.555033 3.3238444 9.551863 -1.6676947 -0.79539716 - -8.605674 -0.47356385 2.6741948 -5.359179 -2.6673796 - 0.66607 15.443222 4.740594 -3.4725387 11.592567 - -2.054497 1.7361217 -8.265324 -9.30447 5.4068313 - -1.5180256 -7.746615 -6.089606 0.07112726 -0.34904733 - -8.649895 -9.998958 -2.564841 -0.53999114 2.601808 - -0.31927416 -1.8815292 -2.07215 -3.4105783 -8.2998085 - 1.483641 -15.365992 -8.288208 3.8847756 -3.4876456 - 7.3629923 0.4657332 3.132599 12.438889 -1.8337058 - 4.532936 2.7264361 10.145339 -6.521951 2.897153 - -3.3925855 5.079156 7.759716 4.677565 5.8457737 - 2.402413 7.7071047 3.9711342 -6.390043 6.1268735 - -3.7760346 -11.118123 ] + [ -1.3251206 7.8606825 -4.620626 0.3000721 2.2648535 + -1.1931441 3.0647137 7.673595 -6.0044727 -12.02426 + -1.9496069 3.1269536 1.618838 -7.6383104 -1.2299773 + -12.338331 2.1373026 -5.3957124 9.717328 5.6752305 + 3.7805123 3.0597172 3.429692 8.97601 13.174125 + -0.53132284 8.9424715 4.46511 -4.4262476 -9.726503 + 8.399328 7.2239175 -7.435854 2.9441683 -4.3430395 + -13.886965 -1.6346735 -10.9027405 -5.311245 3.8007221 + 3.8976038 -2.1230774 -2.3521194 4.151031 -7.4048667 + 0.13911647 2.4626107 4.9664545 0.9897574 5.4839754 + -3.3574002 10.1340065 -0.6120171 -10.403095 4.6007543 + 16.00935 -7.7836914 -4.1945305 -6.9368606 1.1789556 + 11.490801 4.2380238 9.550931 8.375046 7.5089145 + -0.65707296 -0.30051577 2.8406055 3.0828028 0.730817 + 6.148354 0.13766119 -13.424735 -7.7461405 -2.3227983 + -8.305252 2.9879124 -10.995229 0.15211068 -2.3820348 + -1.7984174 8.495629 -5.8522367 -3.755498 0.6989711 + -5.2702994 -2.6188622 -1.8828466 -4.64665 14.078544 + -0.5495333 10.579158 -3.2160501 9.349004 -4.381078 + -11.675817 -2.8630207 4.5721755 2.246612 -4.574342 + 1.8610188 2.3767874 5.6257877 -9.784078 0.64967257 + -1.4579505 0.4263264 -4.9211264 -2.454784 3.4869802 + -0.42654222 8.341269 1.356552 7.0966883 -13.102829 + 8.016734 -7.1159344 1.8699781 0.208721 14.699384 + -1.025278 -2.6107233 -2.5082312 8.427193 6.9138527 + -6.2912464 0.6157366 2.489688 -3.4668267 9.921763 + 11.200815 -0.1966403 7.4916005 -0.62312716 -0.25848144 + -9.947997 -0.9611041 1.1649219 -2.1907122 -1.5028487 + -0.51926106 15.165954 2.4649463 -0.9980445 7.4416637 + -2.0768049 3.5896823 -7.3055434 -7.5620847 4.323335 + 0.0804418 -6.56401 -2.3148053 -1.7642345 -2.4708817 + -7.675618 -9.548878 -1.0177554 0.16986446 2.5877135 + -1.8752296 -0.36614323 -6.0493784 -2.3965611 -5.9453387 + 0.9424033 -13.155974 -7.457801 0.14658108 -3.742797 + 5.8414927 -1.2872906 5.5694313 12.57059 1.0939219 + 2.2142086 1.9181576 6.9914207 -5.888139 3.1409824 + -2.003628 2.4434285 9.973139 5.03668 2.0051203 + 2.8615603 5.860224 2.9176188 -1.6311141 2.0292206 + -4.070415 -6.831437 ] ``` - Python API @@ -166,47 +166,47 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav -3.7760346 -11.118123 ] # get the test embedding Test embedding Result: - [ -1.902964 2.0690894 -8.034194 3.5472693 0.18089125 - 6.9085927 1.4097427 -1.9487704 -10.021278 -0.20755845 - -8.04332 4.344489 2.3200977 -14.306299 5.184692 - -11.55602 -3.8497238 0.6444722 1.2833948 2.6766639 - 0.5878921 0.7946299 1.7207596 2.5791872 14.998469 - -1.3385371 15.031221 -0.8006958 1.99287 -9.52007 - 2.435466 4.003221 -4.33817 -4.898601 -5.304714 - -18.033886 10.790787 -12.784645 -5.641755 2.9761686 - -10.566622 1.4839455 6.152458 -5.7195854 2.8603241 - 6.112133 8.489869 5.5958056 1.2836679 -1.2293907 - 0.89927405 7.0288725 -2.854029 -0.9782962 5.8255906 - 14.905906 -5.025907 0.7866458 -4.2444224 -16.354029 - 10.521315 0.9604709 -3.3257897 7.144871 -13.592733 - -8.568869 -1.7953678 0.26313916 10.916714 -6.9374123 - 1.857403 -6.2746415 2.8154466 -7.2338667 -2.293357 - -0.05452765 5.4287076 5.0849075 -6.690375 -1.6183422 - 3.654291 0.94352573 -9.200294 -5.4749465 -3.5235846 - 1.3420814 4.240421 -2.772944 -2.8451524 16.311104 - 4.2969875 -1.762936 -12.5758915 8.595198 -0.8835239 - -1.5708797 1.568961 1.1413603 3.5032008 -0.45251232 - -6.786333 16.89443 5.3366146 -8.789056 0.6355629 - 3.2579517 -3.328322 7.5969577 0.66025066 -6.550468 - -9.148656 2.020372 -0.4615173 1.1965656 -3.8764873 - 11.6562195 -6.0750933 12.182899 3.2218833 0.81969476 - 5.570001 -3.8459578 -7.205299 7.9262037 -7.6611166 - -5.249467 -2.2671914 7.2658715 -13.298164 4.821147 - -2.7263982 11.691089 -3.8918593 -2.838112 -1.0336838 - -3.8034165 2.8536487 -5.60398 -1.1972581 1.3455094 - -3.4903061 2.2408795 5.5010734 -3.970756 11.99696 - -7.8858757 0.43160373 -5.5059714 4.3426995 16.322706 - 11.635366 0.72157705 -9.245714 -3.91465 -4.449838 - -1.5716927 7.713747 -2.2430465 -6.198303 -13.481864 - 2.8156567 -5.7812386 5.1456156 2.7289324 -14.505571 - 13.270688 3.448231 -7.0659585 4.5886116 -4.466099 - -0.296428 -11.463529 -2.6076477 14.110243 -6.9725137 - -1.9962958 2.7119343 19.391657 0.01961198 14.607133 - -1.6695905 -4.391516 1.3131028 -6.670972 -5.888604 - 12.0612335 5.9285784 3.3715196 1.492534 10.723728 - -0.95514804 -12.085431 ] + [ 2.5247195 5.119042 -4.335273 4.4583654 5.047907 + 3.5059214 1.6159848 0.49364898 -11.6899185 -3.1014526 + -5.6589785 -0.42684984 2.674276 -11.937654 6.2248464 + -10.776924 -5.694543 1.112041 1.5709964 1.0961034 + 1.3976512 2.324352 1.339981 5.279319 13.734659 + -2.5753925 13.651442 -2.2357535 5.1575427 -3.251567 + 1.4023279 6.1191974 -6.0845175 -1.3646189 -2.6789894 + -15.220778 9.779349 -9.411551 -6.388947 6.8313975 + -9.245996 0.31196198 2.5509644 -4.413065 6.1649427 + 6.793837 2.6328635 8.620976 3.4832475 0.52491665 + 2.9115407 5.8392377 0.6702376 -3.2726715 2.6694255 + 16.91701 -5.5811176 0.23362345 -4.5573606 -11.801059 + 14.728292 -0.5198082 -3.999922 7.0927105 -7.0459595 + -5.4389 -0.46420583 -5.1085467 10.376568 -8.889225 + -0.37705845 -1.659806 2.6731026 -7.1909504 1.4608804 + -2.163136 -0.17949677 4.0241547 0.11319201 0.601279 + 2.039692 3.1910992 -11.649526 -8.121584 -4.8707457 + 0.3851982 1.4231744 -2.3321972 0.99332285 14.121717 + 5.899413 0.7384519 -17.760096 10.555021 4.1366534 + -0.3391071 -0.20792882 3.208204 0.8847948 -8.721497 + -6.432868 13.006379 4.8956 -9.155822 -1.9441519 + 5.7815638 -2.066733 10.425042 -0.8802383 -2.4314315 + -9.869258 0.35095334 -5.3549943 2.1076174 -8.290468 + 8.4433365 -4.689333 9.334139 -2.172678 -3.0250976 + 8.394216 -3.2110903 -7.93868 2.3960824 -2.3213403 + -1.4963245 -3.476059 4.132903 -10.893354 4.362673 + -0.45456508 10.258634 -1.1655927 -6.7799754 0.22885278 + -4.399287 2.333433 -4.84745 -4.2752337 -1.3577863 + -1.0685898 9.505196 7.3062205 0.08708266 12.927811 + -9.57974 1.3936648 -1.9444873 5.776769 15.251903 + 10.6118355 -1.4903594 -9.535318 -3.6553776 -1.6699586 + -0.5933151 7.600357 -4.8815503 -8.698617 -15.855757 + 0.25632986 -7.2235737 0.9506656 0.7128582 -9.051738 + 8.74869 -1.6426028 -6.5762258 2.506905 -6.7431564 + 5.129912 -12.189555 -3.6435068 12.068113 -6.0059533 + -2.3535995 2.9014351 22.3082 -1.5563312 13.193291 + 2.7583609 -7.468798 1.3407065 -4.599617 -6.2345777 + 10.7689295 7.137627 5.099476 0.3473359 9.647881 + -2.0484571 -5.8549366 ] # get the score between enroll and test - Eembeddings Score: 0.4292638301849365 + Eembeddings Score: 0.45332613587379456 ``` ### 4.预训练模型 diff --git a/docs/source/released_model.md b/docs/source/released_model.md index 74435ae1a..3231fecd4 100644 --- a/docs/source/released_model.md +++ b/docs/source/released_model.md @@ -82,7 +82,7 @@ PANN | ESC-50 |[pann-esc50](../../examples/esc50/cls0)|[esc50_cnn6.tar.gz](https Model Type | Dataset| Example Link | Pretrained Models | Static Models :-------------:| :------------:| :-----: | :-----: | :-----: -PANN | VoxCeleb| [voxceleb_ecapatdnn](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0) | [ecapatdnn.tar.gz](https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_0.tar.gz) | - +PANN | VoxCeleb| [voxceleb_ecapatdnn](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0) | [ecapatdnn.tar.gz](https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz) | - ## Punctuation Restoration Models Model Type | Dataset| Example Link | Pretrained Models diff --git a/examples/voxceleb/sv0/README.md b/examples/voxceleb/sv0/README.md index 418102b4f..26c95aca9 100644 --- a/examples/voxceleb/sv0/README.md +++ b/examples/voxceleb/sv0/README.md @@ -141,11 +141,11 @@ using the `tar` scripts to unpack the model and then you can use the script to t For example: ``` -wget https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_0.tar.gz -tar -xvf sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_0.tar.gz +wget https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz +tar -xvf sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz source path.sh # If you have processed the data and get the manifest file, you can skip the following 2 steps -CUDA_VISIBLE_DEVICES= bash ./local/test.sh ./data sv0_ecapa_tdnn_voxceleb12_ckpt_0_1_2/model/ conf/ecapa_tdnn.yaml +CUDA_VISIBLE_DEVICES= bash ./local/test.sh ./data sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1/model/ conf/ecapa_tdnn.yaml ``` The performance of the released models are shown in [this](./RESULTS.md) diff --git a/examples/voxceleb/sv0/RESULT.md b/examples/voxceleb/sv0/RESULT.md index 3a3f67d09..a1d2a1812 100644 --- a/examples/voxceleb/sv0/RESULT.md +++ b/examples/voxceleb/sv0/RESULT.md @@ -5,3 +5,4 @@ | Model | Number of Params | Release | Config | dim | Test set | Cosine | Cosine + S-Norm | | --- | --- | --- | --- | --- | --- | --- | ---- | | ECAPA-TDNN | 85M | 0.2.0 | conf/ecapa_tdnn.yaml |192 | test | 1.02 | 0.95 | +| ECAPA-TDNN | 85M | 0.2.1 | conf/ecapa_tdnn.yaml | 192 | test | 0.8188 | 0.7815| diff --git a/examples/voxceleb/sv0/conf/ecapa_tdnn.yaml b/examples/voxceleb/sv0/conf/ecapa_tdnn.yaml index 3e3a13072..b7b71d77d 100644 --- a/examples/voxceleb/sv0/conf/ecapa_tdnn.yaml +++ b/examples/voxceleb/sv0/conf/ecapa_tdnn.yaml @@ -59,3 +59,11 @@ global_embedding_norm: True embedding_mean_norm: True embedding_std_norm: False +########################################### +# score-norm # +########################################### +score_norm: s-norm +cohort_size: 20000 # amount of imposter utterances in normalization cohort +n_train_snts: 400000 # used for normalization stats + + diff --git a/examples/voxceleb/sv0/conf/ecapa_tdnn_small.yaml b/examples/voxceleb/sv0/conf/ecapa_tdnn_small.yaml index 5925e5730..40498c874 100644 --- a/examples/voxceleb/sv0/conf/ecapa_tdnn_small.yaml +++ b/examples/voxceleb/sv0/conf/ecapa_tdnn_small.yaml @@ -58,3 +58,10 @@ global_embedding_norm: True embedding_mean_norm: True embedding_std_norm: False +########################################### +# score-norm # +########################################### +score_norm: s-norm +cohort_size: 20000 # amount of imposter utterances in normalization cohort +n_train_snts: 400000 # used for normalization stats + diff --git a/paddlespeech/cli/vector/pretrained_models.py b/paddlespeech/cli/vector/pretrained_models.py index 686a22d8f..4d1d3a048 100644 --- a/paddlespeech/cli/vector/pretrained_models.py +++ b/paddlespeech/cli/vector/pretrained_models.py @@ -19,9 +19,9 @@ pretrained_models = { # "paddlespeech vector --task spk --model ecapatdnn_voxceleb12-16k --sr 16000 --input ./input.wav" "ecapatdnn_voxceleb12-16k": { 'url': - 'https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_0.tar.gz', + 'https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz', 'md5': - 'cc33023c54ab346cd318408f43fcaf95', + '67c7ff8885d5246bd16e0f5ac1cba99f', 'cfg_path': 'conf/model.yaml', # the yaml config path 'ckpt_path': diff --git a/paddlespeech/server/engine/acs/python/acs_engine.py b/paddlespeech/server/engine/acs/python/acs_engine.py index 30deeeb50..d52852dcf 100644 --- a/paddlespeech/server/engine/acs/python/acs_engine.py +++ b/paddlespeech/server/engine/acs/python/acs_engine.py @@ -16,6 +16,7 @@ import json import os import re +import numpy as np import paddle import soundfile import websocket @@ -44,11 +45,7 @@ class ACSEngine(BaseEngine): logger.info("Init the acs engine") try: self.config = config - if self.config.device: - self.device = self.config.device - else: - self.device = paddle.get_device() - + self.device = self.config.get("device", paddle.get_device()) paddle.set_device(self.device) logger.info(f"ACS Engine set the device: {self.device}") @@ -116,11 +113,17 @@ class ACSEngine(BaseEngine): logger.info("client receive msg={}".format(msg)) # send the total audio data - samples, sample_rate = soundfile.read(audio_data, dtype='int16') - ws.send_binary(samples.tobytes()) - msg = ws.recv() - msg = json.loads(msg) - logger.info(f"audio result: {msg}") + for chunk_data in self.read_wave(audio_data): + ws.send_binary(chunk_data.tobytes()) + msg = ws.recv() + msg = json.loads(msg) + logger.info(f"audio result: {msg}") + # samples, sample_rate = soundfile.read(audio_data, dtype='int16') + + # ws.send_binary(samples.tobytes()) + # msg = ws.recv() + # msg = json.loads(msg) + # logger.info(f"audio result: {msg}") # 3. send chunk audio data to engine logger.info("send the end signal") @@ -142,6 +145,39 @@ class ACSEngine(BaseEngine): return msg + def read_wave(self, audio_data: str): + """read the audio file from specific wavfile path + + Args: + audio_data (str): the audio data, + we assume that audio sample rate matches the model + + Yields: + numpy.array: the samall package audio pcm data + """ + samples, sample_rate = soundfile.read(audio_data, dtype='int16') + x_len = len(samples) + assert sample_rate == 16000 + + chunk_size = int(85 * sample_rate / 1000) # 85ms, sample_rate = 16kHz + + if x_len % chunk_size != 0: + padding_len_x = chunk_size - x_len % chunk_size + else: + padding_len_x = 0 + + padding = np.zeros((padding_len_x), dtype=samples.dtype) + padded_x = np.concatenate([samples, padding], axis=0) + + assert (x_len + padding_len_x) % chunk_size == 0 + num_chunk = (x_len + padding_len_x) / chunk_size + num_chunk = int(num_chunk) + for i in range(0, num_chunk): + start = i * chunk_size + end = start + chunk_size + x_chunk = padded_x[start:end] + yield x_chunk + def get_macthed_word(self, msg): """Get the matched info in msg diff --git a/paddlespeech/server/tests/vector/http_client.py b/paddlespeech/server/tests/vector/http_client.py new file mode 100644 index 000000000..49f2adf7c --- /dev/null +++ b/paddlespeech/server/tests/vector/http_client.py @@ -0,0 +1,59 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the +import base64 +import json +import time + +import requests + + +def readwav2base64(wav_file): + """ + read wave file and covert to base64 string + """ + with open(wav_file, 'rb') as f: + base64_bytes = base64.b64encode(f.read()) + base64_string = base64_bytes.decode('utf-8') + return base64_string + + +def main(): + """ + main func + """ + url = "http://127.0.0.1:8090/paddlespeech/asr" + + # start Timestamp + time_start = time.time() + + test_audio_dir = "./16_audio.wav" + audio = readwav2base64(test_audio_dir) + + data = { + "audio": audio, + "audio_format": "wav", + "sample_rate": 16000, + "lang": "zh_cn", + } + + r = requests.post(url=url, data=json.dumps(data)) + + # ending Timestamp + time_end = time.time() + print('time cost', time_end - time_start, 's') + + print(r.json()) + + +if __name__ == "__main__": + main() From a83374a78755079c0990b02d0a1d60e198a5d167 Mon Sep 17 00:00:00 2001 From: xiongxinlei Date: Wed, 25 May 2022 12:20:44 +0800 Subject: [PATCH 039/127] update the vector readme, test=doc --- demos/speaker_verification/README.md | 78 +++++++++---------- demos/speaker_verification/README_cn.md | 78 +++++++++---------- demos/speech_server/README.md | 35 +++++---- demos/speech_server/README_cn.md | 37 ++++----- .../server/tests/vector/http_client.py | 59 -------------- 5 files changed, 115 insertions(+), 172 deletions(-) delete mode 100644 paddlespeech/server/tests/vector/http_client.py diff --git a/demos/speaker_verification/README.md b/demos/speaker_verification/README.md index a7d0f819d..63dc9294e 100644 --- a/demos/speaker_verification/README.md +++ b/demos/speaker_verification/README.md @@ -127,45 +127,45 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav ```bash # Vector Result: Audio embedding Result: - [ 1.4217498 5.626253 -5.342073 1.1773866 3.308055 - 1.756596 5.167894 10.80636 -3.8226728 -5.6141334 - 2.623845 -0.8072968 1.9635103 -7.3128724 0.01103897 - -9.723131 0.6619743 -6.976803 10.213478 7.494748 - 2.9105635 3.8949256 3.7999806 7.1061673 16.905321 - -7.1493764 8.733103 3.4230042 -4.831653 -11.403367 - 11.232214 7.1274667 -4.2828417 2.452362 -5.130748 - -18.177666 -2.6116815 -11.000337 -6.7314315 1.6564683 - 0.7618269 1.1253023 -2.083836 4.725744 -8.782597 - -3.539873 3.814236 5.1420674 2.162061 4.096431 - -6.4162116 12.747448 1.9429878 -15.152943 6.417416 - 16.097002 -9.716668 -1.9920526 -3.3649497 -1.871939 - 11.567354 3.69788 11.258265 7.442363 9.183411 - 4.5281515 -1.2417862 4.3959084 6.6727695 5.8898783 - 7.627124 -0.66919386 -11.889693 -9.208865 -7.4274073 - -3.7776625 6.917234 -9.848748 -2.0944717 -5.135116 - 0.49563864 9.317534 -5.9141874 -1.8098574 -0.11738578 - -7.169265 -1.0578263 -5.7216787 -5.1173844 16.137651 - -4.473626 7.6624317 -0.55381083 9.631587 -6.4704556 - -8.548508 4.3716145 -0.79702514 4.478997 -2.9758704 - 3.272176 2.8382776 5.134597 -9.190781 -0.5657382 - -4.8745747 2.3165567 -5.984303 -2.1798875 0.35541576 - -0.31784213 9.493548 2.1144536 4.358092 -12.089823 - 8.451689 -7.925461 4.6242585 4.4289427 18.692003 - -2.6204622 -5.149185 -0.35821092 8.488551 4.981496 - -9.32683 -2.2544234 6.6417594 1.2119585 10.977129 - 16.555033 3.3238444 9.551863 -1.6676947 -0.79539716 - -8.605674 -0.47356385 2.6741948 -5.359179 -2.6673796 - 0.66607 15.443222 4.740594 -3.4725387 11.592567 - -2.054497 1.7361217 -8.265324 -9.30447 5.4068313 - -1.5180256 -7.746615 -6.089606 0.07112726 -0.34904733 - -8.649895 -9.998958 -2.564841 -0.53999114 2.601808 - -0.31927416 -1.8815292 -2.07215 -3.4105783 -8.2998085 - 1.483641 -15.365992 -8.288208 3.8847756 -3.4876456 - 7.3629923 0.4657332 3.132599 12.438889 -1.8337058 - 4.532936 2.7264361 10.145339 -6.521951 2.897153 - -3.3925855 5.079156 7.759716 4.677565 5.8457737 - 2.402413 7.7071047 3.9711342 -6.390043 6.1268735 - -3.7760346 -11.118123 ] + [ -1.3251206 7.8606825 -4.620626 0.3000721 2.2648535 + -1.1931441 3.0647137 7.673595 -6.0044727 -12.02426 + -1.9496069 3.1269536 1.618838 -7.6383104 -1.2299773 + -12.338331 2.1373026 -5.3957124 9.717328 5.6752305 + 3.7805123 3.0597172 3.429692 8.97601 13.174125 + -0.53132284 8.9424715 4.46511 -4.4262476 -9.726503 + 8.399328 7.2239175 -7.435854 2.9441683 -4.3430395 + -13.886965 -1.6346735 -10.9027405 -5.311245 3.8007221 + 3.8976038 -2.1230774 -2.3521194 4.151031 -7.4048667 + 0.13911647 2.4626107 4.9664545 0.9897574 5.4839754 + -3.3574002 10.1340065 -0.6120171 -10.403095 4.6007543 + 16.00935 -7.7836914 -4.1945305 -6.9368606 1.1789556 + 11.490801 4.2380238 9.550931 8.375046 7.5089145 + -0.65707296 -0.30051577 2.8406055 3.0828028 0.730817 + 6.148354 0.13766119 -13.424735 -7.7461405 -2.3227983 + -8.305252 2.9879124 -10.995229 0.15211068 -2.3820348 + -1.7984174 8.495629 -5.8522367 -3.755498 0.6989711 + -5.2702994 -2.6188622 -1.8828466 -4.64665 14.078544 + -0.5495333 10.579158 -3.2160501 9.349004 -4.381078 + -11.675817 -2.8630207 4.5721755 2.246612 -4.574342 + 1.8610188 2.3767874 5.6257877 -9.784078 0.64967257 + -1.4579505 0.4263264 -4.9211264 -2.454784 3.4869802 + -0.42654222 8.341269 1.356552 7.0966883 -13.102829 + 8.016734 -7.1159344 1.8699781 0.208721 14.699384 + -1.025278 -2.6107233 -2.5082312 8.427193 6.9138527 + -6.2912464 0.6157366 2.489688 -3.4668267 9.921763 + 11.200815 -0.1966403 7.4916005 -0.62312716 -0.25848144 + -9.947997 -0.9611041 1.1649219 -2.1907122 -1.5028487 + -0.51926106 15.165954 2.4649463 -0.9980445 7.4416637 + -2.0768049 3.5896823 -7.3055434 -7.5620847 4.323335 + 0.0804418 -6.56401 -2.3148053 -1.7642345 -2.4708817 + -7.675618 -9.548878 -1.0177554 0.16986446 2.5877135 + -1.8752296 -0.36614323 -6.0493784 -2.3965611 -5.9453387 + 0.9424033 -13.155974 -7.457801 0.14658108 -3.742797 + 5.8414927 -1.2872906 5.5694313 12.57059 1.0939219 + 2.2142086 1.9181576 6.9914207 -5.888139 3.1409824 + -2.003628 2.4434285 9.973139 5.03668 2.0051203 + 2.8615603 5.860224 2.9176188 -1.6311141 2.0292206 + -4.070415 -6.831437 ] # get the test embedding Test embedding Result: [ 2.5247195 5.119042 -4.335273 4.4583654 5.047907 diff --git a/demos/speaker_verification/README_cn.md b/demos/speaker_verification/README_cn.md index 04e1aeecd..07eeac2ee 100644 --- a/demos/speaker_verification/README_cn.md +++ b/demos/speaker_verification/README_cn.md @@ -125,45 +125,45 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav ```bash # Vector Result: Audio embedding Result: - [ 1.4217498 5.626253 -5.342073 1.1773866 3.308055 - 1.756596 5.167894 10.80636 -3.8226728 -5.6141334 - 2.623845 -0.8072968 1.9635103 -7.3128724 0.01103897 - -9.723131 0.6619743 -6.976803 10.213478 7.494748 - 2.9105635 3.8949256 3.7999806 7.1061673 16.905321 - -7.1493764 8.733103 3.4230042 -4.831653 -11.403367 - 11.232214 7.1274667 -4.2828417 2.452362 -5.130748 - -18.177666 -2.6116815 -11.000337 -6.7314315 1.6564683 - 0.7618269 1.1253023 -2.083836 4.725744 -8.782597 - -3.539873 3.814236 5.1420674 2.162061 4.096431 - -6.4162116 12.747448 1.9429878 -15.152943 6.417416 - 16.097002 -9.716668 -1.9920526 -3.3649497 -1.871939 - 11.567354 3.69788 11.258265 7.442363 9.183411 - 4.5281515 -1.2417862 4.3959084 6.6727695 5.8898783 - 7.627124 -0.66919386 -11.889693 -9.208865 -7.4274073 - -3.7776625 6.917234 -9.848748 -2.0944717 -5.135116 - 0.49563864 9.317534 -5.9141874 -1.8098574 -0.11738578 - -7.169265 -1.0578263 -5.7216787 -5.1173844 16.137651 - -4.473626 7.6624317 -0.55381083 9.631587 -6.4704556 - -8.548508 4.3716145 -0.79702514 4.478997 -2.9758704 - 3.272176 2.8382776 5.134597 -9.190781 -0.5657382 - -4.8745747 2.3165567 -5.984303 -2.1798875 0.35541576 - -0.31784213 9.493548 2.1144536 4.358092 -12.089823 - 8.451689 -7.925461 4.6242585 4.4289427 18.692003 - -2.6204622 -5.149185 -0.35821092 8.488551 4.981496 - -9.32683 -2.2544234 6.6417594 1.2119585 10.977129 - 16.555033 3.3238444 9.551863 -1.6676947 -0.79539716 - -8.605674 -0.47356385 2.6741948 -5.359179 -2.6673796 - 0.66607 15.443222 4.740594 -3.4725387 11.592567 - -2.054497 1.7361217 -8.265324 -9.30447 5.4068313 - -1.5180256 -7.746615 -6.089606 0.07112726 -0.34904733 - -8.649895 -9.998958 -2.564841 -0.53999114 2.601808 - -0.31927416 -1.8815292 -2.07215 -3.4105783 -8.2998085 - 1.483641 -15.365992 -8.288208 3.8847756 -3.4876456 - 7.3629923 0.4657332 3.132599 12.438889 -1.8337058 - 4.532936 2.7264361 10.145339 -6.521951 2.897153 - -3.3925855 5.079156 7.759716 4.677565 5.8457737 - 2.402413 7.7071047 3.9711342 -6.390043 6.1268735 - -3.7760346 -11.118123 ] + [ -1.3251206 7.8606825 -4.620626 0.3000721 2.2648535 + -1.1931441 3.0647137 7.673595 -6.0044727 -12.02426 + -1.9496069 3.1269536 1.618838 -7.6383104 -1.2299773 + -12.338331 2.1373026 -5.3957124 9.717328 5.6752305 + 3.7805123 3.0597172 3.429692 8.97601 13.174125 + -0.53132284 8.9424715 4.46511 -4.4262476 -9.726503 + 8.399328 7.2239175 -7.435854 2.9441683 -4.3430395 + -13.886965 -1.6346735 -10.9027405 -5.311245 3.8007221 + 3.8976038 -2.1230774 -2.3521194 4.151031 -7.4048667 + 0.13911647 2.4626107 4.9664545 0.9897574 5.4839754 + -3.3574002 10.1340065 -0.6120171 -10.403095 4.6007543 + 16.00935 -7.7836914 -4.1945305 -6.9368606 1.1789556 + 11.490801 4.2380238 9.550931 8.375046 7.5089145 + -0.65707296 -0.30051577 2.8406055 3.0828028 0.730817 + 6.148354 0.13766119 -13.424735 -7.7461405 -2.3227983 + -8.305252 2.9879124 -10.995229 0.15211068 -2.3820348 + -1.7984174 8.495629 -5.8522367 -3.755498 0.6989711 + -5.2702994 -2.6188622 -1.8828466 -4.64665 14.078544 + -0.5495333 10.579158 -3.2160501 9.349004 -4.381078 + -11.675817 -2.8630207 4.5721755 2.246612 -4.574342 + 1.8610188 2.3767874 5.6257877 -9.784078 0.64967257 + -1.4579505 0.4263264 -4.9211264 -2.454784 3.4869802 + -0.42654222 8.341269 1.356552 7.0966883 -13.102829 + 8.016734 -7.1159344 1.8699781 0.208721 14.699384 + -1.025278 -2.6107233 -2.5082312 8.427193 6.9138527 + -6.2912464 0.6157366 2.489688 -3.4668267 9.921763 + 11.200815 -0.1966403 7.4916005 -0.62312716 -0.25848144 + -9.947997 -0.9611041 1.1649219 -2.1907122 -1.5028487 + -0.51926106 15.165954 2.4649463 -0.9980445 7.4416637 + -2.0768049 3.5896823 -7.3055434 -7.5620847 4.323335 + 0.0804418 -6.56401 -2.3148053 -1.7642345 -2.4708817 + -7.675618 -9.548878 -1.0177554 0.16986446 2.5877135 + -1.8752296 -0.36614323 -6.0493784 -2.3965611 -5.9453387 + 0.9424033 -13.155974 -7.457801 0.14658108 -3.742797 + 5.8414927 -1.2872906 5.5694313 12.57059 1.0939219 + 2.2142086 1.9181576 6.9914207 -5.888139 3.1409824 + -2.003628 2.4434285 9.973139 5.03668 2.0051203 + 2.8615603 5.860224 2.9176188 -1.6311141 2.0292206 + -4.070415 -6.831437 ] # get the test embedding Test embedding Result: [ 2.5247195 5.119042 -4.335273 4.4583654 5.047907 diff --git a/demos/speech_server/README.md b/demos/speech_server/README.md index a03a43dff..14a88f078 100644 --- a/demos/speech_server/README.md +++ b/demos/speech_server/README.md @@ -274,12 +274,12 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Output: ```bash - [2022-05-08 00:18:44,249] [ INFO] - vector http client start - [2022-05-08 00:18:44,250] [ INFO] - the input audio: 85236145389.wav - [2022-05-08 00:18:44,250] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector - [2022-05-08 00:18:44,250] [ INFO] - http://127.0.0.1:8590/paddlespeech/vector - [2022-05-08 00:18:44,406] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} - [2022-05-08 00:18:44,406] [ INFO] - Response time 0.156481 s. + [2022-05-25 12:25:36,165] [ INFO] - vector http client start + [2022-05-25 12:25:36,165] [ INFO] - the input audio: 85236145389.wav + [2022-05-25 12:25:36,165] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector + [2022-05-25 12:25:36,166] [ INFO] - http://127.0.0.1:8790/paddlespeech/vector + [2022-05-25 12:25:36,324] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [-1.3251205682754517, 7.860682487487793, -4.620625972747803, 0.3000721037387848, 2.2648534774780273, -1.1931440830230713, 3.064713716506958, 7.673594951629639, -6.004472732543945, -12.024259567260742, -1.9496068954467773, 3.126953601837158, 1.6188379526138306, -7.638310432434082, -1.2299772500991821, -12.33833122253418, 2.1373026371002197, -5.395712375640869, 9.717328071594238, 5.675230503082275, 3.7805123329162598, 3.0597171783447266, 3.429692029953003, 8.9760103225708, 13.174124717712402, -0.5313228368759155, 8.942471504211426, 4.465109825134277, -4.426247596740723, -9.726503372192383, 8.399328231811523, 7.223917484283447, -7.435853958129883, 2.9441683292388916, -4.343039512634277, -13.886964797973633, -1.6346734762191772, -10.902740478515625, -5.311244964599609, 3.800722122192383, 3.897603750228882, -2.123077392578125, -2.3521194458007812, 4.151031017303467, -7.404866695404053, 0.13911646604537964, 2.4626107215881348, 4.96645450592041, 0.9897574186325073, 5.483975410461426, -3.3574001789093018, 10.13400650024414, -0.6120170950889587, -10.403095245361328, 4.600754261016846, 16.009349822998047, -7.78369140625, -4.194530487060547, -6.93686056137085, 1.1789555549621582, 11.490800857543945, 4.23802375793457, 9.550930976867676, 8.375045776367188, 7.508914470672607, -0.6570729613304138, -0.3005157709121704, 2.8406054973602295, 3.0828027725219727, 0.7308170199394226, 6.1483540534973145, 0.1376611888408661, -13.424735069274902, -7.746140480041504, -2.322798252105713, -8.305252075195312, 2.98791241645813, -10.99522876739502, 0.15211068093776703, -2.3820347785949707, -1.7984174489974976, 8.49562931060791, -5.852236747741699, -3.755497932434082, 0.6989710927009583, -5.270299434661865, -2.6188621520996094, -1.8828465938568115, -4.6466498374938965, 14.078543663024902, -0.5495333075523376, 10.579157829284668, -3.216050148010254, 9.349003791809082, -4.381077766418457, -11.675816535949707, -2.863020658493042, 4.5721755027771, 2.246612071990967, -4.574341773986816, 1.8610187768936157, 2.3767874240875244, 5.625787734985352, -9.784077644348145, 0.6496725678443909, -1.457950472831726, 0.4263263940811157, -4.921126365661621, -2.4547839164733887, 3.4869801998138428, -0.4265422224998474, 8.341268539428711, 1.356552004814148, 7.096688270568848, -13.102828979492188, 8.01673412322998, -7.115934371948242, 1.8699780702590942, 0.20872099697589874, 14.699383735656738, -1.0252779722213745, -2.6107232570648193, -2.5082311630249023, 8.427192687988281, 6.913852691650391, -6.29124641418457, 0.6157366037368774, 2.489687919616699, -3.4668266773223877, 9.92176342010498, 11.200815200805664, -0.19664029777050018, 7.491600513458252, -0.6231271624565125, -0.2584814429283142, -9.947997093200684, -0.9611040949821472, 1.1649218797683716, -2.1907122135162354, -1.502848744392395, -0.5192610621452332, 15.165953636169434, 2.4649462699890137, -0.998044490814209, 7.44166374206543, -2.0768048763275146, 3.5896823406219482, -7.305543422698975, -7.562084674835205, 4.32333517074585, 0.08044180274009705, -6.564010143280029, -2.314805269241333, -1.7642345428466797, -2.470881700515747, -7.6756181716918945, -9.548877716064453, -1.017755389213562, 0.1698644608259201, 2.5877134799957275, -1.8752295970916748, -0.36614322662353516, -6.049378395080566, -2.3965611457824707, -5.945338726043701, 0.9424033164978027, -13.155974388122559, -7.45780086517334, 0.14658108353614807, -3.7427968978881836, 5.841492652893066, -1.2872905731201172, 5.569431304931641, 12.570590019226074, 1.0939218997955322, 2.2142086029052734, 1.9181575775146484, 6.991420745849609, -5.888138771057129, 3.1409823894500732, -2.0036280155181885, 2.4434285163879395, 9.973138809204102, 5.036680221557617, 2.005120277404785, 2.861560344696045, 5.860223770141602, 2.917618751525879, -1.63111412525177, 2.0292205810546875, -4.070415019989014, -6.831437110900879]}} + [2022-05-25 12:25:36,324] [ INFO] - Response time 0.159053 s. ``` * Python API @@ -299,7 +299,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Output: ``` bash - {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} + {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [-1.3251205682754517, 7.860682487487793, -4.620625972747803, 0.3000721037387848, 2.2648534774780273, -1.1931440830230713, 3.064713716506958, 7.673594951629639, -6.004472732543945, -12.024259567260742, -1.9496068954467773, 3.126953601837158, 1.6188379526138306, -7.638310432434082, -1.2299772500991821, -12.33833122253418, 2.1373026371002197, -5.395712375640869, 9.717328071594238, 5.675230503082275, 3.7805123329162598, 3.0597171783447266, 3.429692029953003, 8.9760103225708, 13.174124717712402, -0.5313228368759155, 8.942471504211426, 4.465109825134277, -4.426247596740723, -9.726503372192383, 8.399328231811523, 7.223917484283447, -7.435853958129883, 2.9441683292388916, -4.343039512634277, -13.886964797973633, -1.6346734762191772, -10.902740478515625, -5.311244964599609, 3.800722122192383, 3.897603750228882, -2.123077392578125, -2.3521194458007812, 4.151031017303467, -7.404866695404053, 0.13911646604537964, 2.4626107215881348, 4.96645450592041, 0.9897574186325073, 5.483975410461426, -3.3574001789093018, 10.13400650024414, -0.6120170950889587, -10.403095245361328, 4.600754261016846, 16.009349822998047, -7.78369140625, -4.194530487060547, -6.93686056137085, 1.1789555549621582, 11.490800857543945, 4.23802375793457, 9.550930976867676, 8.375045776367188, 7.508914470672607, -0.6570729613304138, -0.3005157709121704, 2.8406054973602295, 3.0828027725219727, 0.7308170199394226, 6.1483540534973145, 0.1376611888408661, -13.424735069274902, -7.746140480041504, -2.322798252105713, -8.305252075195312, 2.98791241645813, -10.99522876739502, 0.15211068093776703, -2.3820347785949707, -1.7984174489974976, 8.49562931060791, -5.852236747741699, -3.755497932434082, 0.6989710927009583, -5.270299434661865, -2.6188621520996094, -1.8828465938568115, -4.6466498374938965, 14.078543663024902, -0.5495333075523376, 10.579157829284668, -3.216050148010254, 9.349003791809082, -4.381077766418457, -11.675816535949707, -2.863020658493042, 4.5721755027771, 2.246612071990967, -4.574341773986816, 1.8610187768936157, 2.3767874240875244, 5.625787734985352, -9.784077644348145, 0.6496725678443909, -1.457950472831726, 0.4263263940811157, -4.921126365661621, -2.4547839164733887, 3.4869801998138428, -0.4265422224998474, 8.341268539428711, 1.356552004814148, 7.096688270568848, -13.102828979492188, 8.01673412322998, -7.115934371948242, 1.8699780702590942, 0.20872099697589874, 14.699383735656738, -1.0252779722213745, -2.6107232570648193, -2.5082311630249023, 8.427192687988281, 6.913852691650391, -6.29124641418457, 0.6157366037368774, 2.489687919616699, -3.4668266773223877, 9.92176342010498, 11.200815200805664, -0.19664029777050018, 7.491600513458252, -0.6231271624565125, -0.2584814429283142, -9.947997093200684, -0.9611040949821472, 1.1649218797683716, -2.1907122135162354, -1.502848744392395, -0.5192610621452332, 15.165953636169434, 2.4649462699890137, -0.998044490814209, 7.44166374206543, -2.0768048763275146, 3.5896823406219482, -7.305543422698975, -7.562084674835205, 4.32333517074585, 0.08044180274009705, -6.564010143280029, -2.314805269241333, -1.7642345428466797, -2.470881700515747, -7.6756181716918945, -9.548877716064453, -1.017755389213562, 0.1698644608259201, 2.5877134799957275, -1.8752295970916748, -0.36614322662353516, -6.049378395080566, -2.3965611457824707, -5.945338726043701, 0.9424033164978027, -13.155974388122559, -7.45780086517334, 0.14658108353614807, -3.7427968978881836, 5.841492652893066, -1.2872905731201172, 5.569431304931641, 12.570590019226074, 1.0939218997955322, 2.2142086029052734, 1.9181575775146484, 6.991420745849609, -5.888138771057129, 3.1409823894500732, -2.0036280155181885, 2.4434285163879395, 9.973138809204102, 5.036680221557617, 2.005120277404785, 2.861560344696045, 5.860223770141602, 2.917618751525879, -1.63111412525177, 2.0292205810546875, -4.070415019989014, -6.831437110900879]}} ``` #### 7.2 Get the score between speaker audio embedding @@ -331,12 +331,12 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Output: ``` bash - [2022-05-09 10:28:40,556] [ INFO] - vector score http client start - [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score - [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} - [2022-05-09 10:28:40,731] [ INFO] - The vector: None - [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. + [2022-05-25 12:33:24,527] [ INFO] - vector score http client start + [2022-05-25 12:33:24,527] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-25 12:33:24,528] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector/score + [2022-05-25 12:33:24,695] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + [2022-05-25 12:33:24,696] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + [2022-05-25 12:33:24,696] [ INFO] - Response time 0.168271 s. ``` * Python API @@ -358,10 +358,11 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee Output: ``` bash - [2022-05-09 10:34:54,769] [ INFO] - vector score http client start - [2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score - [2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + [2022-05-25 12:30:14,143] [ INFO] - vector score http client start + [2022-05-25 12:30:14,143] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-25 12:30:14,143] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector/score + [2022-05-25 12:30:14,363] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} ``` ### 8. Punctuation prediction diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md index 4895b182b..8c95a989b 100644 --- a/demos/speech_server/README_cn.md +++ b/demos/speech_server/README_cn.md @@ -277,12 +277,12 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee 输出: ``` bash - [2022-05-08 00:18:44,249] [ INFO] - vector http client start - [2022-05-08 00:18:44,250] [ INFO] - the input audio: 85236145389.wav - [2022-05-08 00:18:44,250] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector - [2022-05-08 00:18:44,250] [ INFO] - http://127.0.0.1:8590/paddlespeech/vector - [2022-05-08 00:18:44,406] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} - [2022-05-08 00:18:44,406] [ INFO] - Response time 0.156481 s. + [2022-05-25 12:25:36,165] [ INFO] - vector http client start + [2022-05-25 12:25:36,165] [ INFO] - the input audio: 85236145389.wav + [2022-05-25 12:25:36,165] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector + [2022-05-25 12:25:36,166] [ INFO] - http://127.0.0.1:8790/paddlespeech/vector + [2022-05-25 12:25:36,324] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [-1.3251205682754517, 7.860682487487793, -4.620625972747803, 0.3000721037387848, 2.2648534774780273, -1.1931440830230713, 3.064713716506958, 7.673594951629639, -6.004472732543945, -12.024259567260742, -1.9496068954467773, 3.126953601837158, 1.6188379526138306, -7.638310432434082, -1.2299772500991821, -12.33833122253418, 2.1373026371002197, -5.395712375640869, 9.717328071594238, 5.675230503082275, 3.7805123329162598, 3.0597171783447266, 3.429692029953003, 8.9760103225708, 13.174124717712402, -0.5313228368759155, 8.942471504211426, 4.465109825134277, -4.426247596740723, -9.726503372192383, 8.399328231811523, 7.223917484283447, -7.435853958129883, 2.9441683292388916, -4.343039512634277, -13.886964797973633, -1.6346734762191772, -10.902740478515625, -5.311244964599609, 3.800722122192383, 3.897603750228882, -2.123077392578125, -2.3521194458007812, 4.151031017303467, -7.404866695404053, 0.13911646604537964, 2.4626107215881348, 4.96645450592041, 0.9897574186325073, 5.483975410461426, -3.3574001789093018, 10.13400650024414, -0.6120170950889587, -10.403095245361328, 4.600754261016846, 16.009349822998047, -7.78369140625, -4.194530487060547, -6.93686056137085, 1.1789555549621582, 11.490800857543945, 4.23802375793457, 9.550930976867676, 8.375045776367188, 7.508914470672607, -0.6570729613304138, -0.3005157709121704, 2.8406054973602295, 3.0828027725219727, 0.7308170199394226, 6.1483540534973145, 0.1376611888408661, -13.424735069274902, -7.746140480041504, -2.322798252105713, -8.305252075195312, 2.98791241645813, -10.99522876739502, 0.15211068093776703, -2.3820347785949707, -1.7984174489974976, 8.49562931060791, -5.852236747741699, -3.755497932434082, 0.6989710927009583, -5.270299434661865, -2.6188621520996094, -1.8828465938568115, -4.6466498374938965, 14.078543663024902, -0.5495333075523376, 10.579157829284668, -3.216050148010254, 9.349003791809082, -4.381077766418457, -11.675816535949707, -2.863020658493042, 4.5721755027771, 2.246612071990967, -4.574341773986816, 1.8610187768936157, 2.3767874240875244, 5.625787734985352, -9.784077644348145, 0.6496725678443909, -1.457950472831726, 0.4263263940811157, -4.921126365661621, -2.4547839164733887, 3.4869801998138428, -0.4265422224998474, 8.341268539428711, 1.356552004814148, 7.096688270568848, -13.102828979492188, 8.01673412322998, -7.115934371948242, 1.8699780702590942, 0.20872099697589874, 14.699383735656738, -1.0252779722213745, -2.6107232570648193, -2.5082311630249023, 8.427192687988281, 6.913852691650391, -6.29124641418457, 0.6157366037368774, 2.489687919616699, -3.4668266773223877, 9.92176342010498, 11.200815200805664, -0.19664029777050018, 7.491600513458252, -0.6231271624565125, -0.2584814429283142, -9.947997093200684, -0.9611040949821472, 1.1649218797683716, -2.1907122135162354, -1.502848744392395, -0.5192610621452332, 15.165953636169434, 2.4649462699890137, -0.998044490814209, 7.44166374206543, -2.0768048763275146, 3.5896823406219482, -7.305543422698975, -7.562084674835205, 4.32333517074585, 0.08044180274009705, -6.564010143280029, -2.314805269241333, -1.7642345428466797, -2.470881700515747, -7.6756181716918945, -9.548877716064453, -1.017755389213562, 0.1698644608259201, 2.5877134799957275, -1.8752295970916748, -0.36614322662353516, -6.049378395080566, -2.3965611457824707, -5.945338726043701, 0.9424033164978027, -13.155974388122559, -7.45780086517334, 0.14658108353614807, -3.7427968978881836, 5.841492652893066, -1.2872905731201172, 5.569431304931641, 12.570590019226074, 1.0939218997955322, 2.2142086029052734, 1.9181575775146484, 6.991420745849609, -5.888138771057129, 3.1409823894500732, -2.0036280155181885, 2.4434285163879395, 9.973138809204102, 5.036680221557617, 2.005120277404785, 2.861560344696045, 5.860223770141602, 2.917618751525879, -1.63111412525177, 2.0292205810546875, -4.070415019989014, -6.831437110900879]}} + [2022-05-25 12:25:36,324] [ INFO] - Response time 0.159053 s. ``` * Python API @@ -302,7 +302,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee 输出: ``` bash - {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [1.421751856803894, 5.626245498657227, -5.342077255249023, 1.1773887872695923, 3.3080549240112305, 1.7565933465957642, 5.167886257171631, 10.806358337402344, -3.8226819038391113, -5.614140033721924, 2.6238479614257812, -0.8072972893714905, 1.9635076522827148, -7.312870025634766, 0.011035939678549767, -9.723129272460938, 0.6619706153869629, -6.976806163787842, 10.213476181030273, 7.494769096374512, 2.9105682373046875, 3.8949244022369385, 3.799983501434326, 7.106168746948242, 16.90532875061035, -7.149388313293457, 8.733108520507812, 3.423006296157837, -4.831653594970703, -11.403363227844238, 11.232224464416504, 7.127461910247803, -4.282842636108398, 2.452359437942505, -5.130749702453613, -18.17766761779785, -2.6116831302642822, -11.000344276428223, -6.731433391571045, 1.6564682722091675, 0.7618281245231628, 1.125300407409668, -2.0838370323181152, 4.725743293762207, -8.782588005065918, -3.5398752689361572, 3.8142364025115967, 5.142068862915039, 2.1620609760284424, 4.09643030166626, -6.416214942932129, 12.747446060180664, 1.9429892301559448, -15.15294361114502, 6.417416095733643, 16.09701156616211, -9.716667175292969, -1.9920575618743896, -3.36494779586792, -1.8719440698623657, 11.567351341247559, 3.6978814601898193, 11.258262634277344, 7.442368507385254, 9.183408737182617, 4.528149127960205, -1.2417854070663452, 4.395912170410156, 6.6727728843688965, 5.88988733291626, 7.627128601074219, -0.6691966652870178, -11.889698028564453, -9.20886516571045, -7.42740535736084, -3.777663230895996, 6.917238712310791, -9.848755836486816, -2.0944676399230957, -5.1351165771484375, 0.4956451654434204, 9.317537307739258, -5.914181232452393, -1.809860348701477, -0.11738915741443634, -7.1692705154418945, -1.057827353477478, -5.721670627593994, -5.117385387420654, 16.13765525817871, -4.473617076873779, 7.6624321937561035, -0.55381840467453, 9.631585121154785, -6.470459461212158, -8.548508644104004, 4.371616840362549, -0.7970245480537415, 4.4789886474609375, -2.975860834121704, 3.2721822261810303, 2.838287830352783, 5.134591102600098, -9.19079875946045, -0.5657302737236023, -4.8745832443237305, 2.3165574073791504, -5.984319686889648, -2.1798853874206543, 0.3554139733314514, -0.3178512752056122, 9.493552207946777, 2.1144471168518066, 4.358094692230225, -12.089824676513672, 8.451693534851074, -7.925466537475586, 4.624246597290039, 4.428936958312988, 18.69200897216797, -2.6204581260681152, -5.14918851852417, -0.3582090139389038, 8.488558769226074, 4.98148775100708, -9.326835632324219, -2.2544219493865967, 6.641760349273682, 1.2119598388671875, 10.977124214172363, 16.555034637451172, 3.3238420486450195, 9.551861763000488, -1.6676981449127197, -0.7953944206237793, -8.605667114257812, -0.4735655188560486, 2.674196243286133, -5.359177112579346, -2.66738224029541, 0.6660683155059814, 15.44322681427002, 4.740593433380127, -3.472534418106079, 11.592567443847656, -2.0544962882995605, 1.736127495765686, -8.265326499938965, -9.30447769165039, 5.406829833984375, -1.518022894859314, -7.746612548828125, -6.089611053466797, 0.07112743705511093, -0.3490503430366516, -8.64989185333252, -9.998957633972168, -2.564845085144043, -0.5399947762489319, 2.6018123626708984, -0.3192799389362335, -1.8815255165100098, -2.0721492767333984, -3.410574436187744, -8.29980754852295, 1.483638048171997, -15.365986824035645, -8.288211822509766, 3.884779930114746, -3.4876468181610107, 7.362999439239502, 0.4657334089279175, 3.1326050758361816, 12.438895225524902, -1.8337041139602661, 4.532927989959717, 2.7264339923858643, 10.14534854888916, -6.521963596343994, 2.897155523300171, -3.392582654953003, 5.079153060913086, 7.7597246170043945, 4.677570819854736, 5.845779895782471, 2.402411460876465, 7.7071051597595215, 3.9711380004882812, -6.39003849029541, 6.12687873840332, -3.776029348373413, -11.118121147155762]}} + {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'vec': [-1.3251205682754517, 7.860682487487793, -4.620625972747803, 0.3000721037387848, 2.2648534774780273, -1.1931440830230713, 3.064713716506958, 7.673594951629639, -6.004472732543945, -12.024259567260742, -1.9496068954467773, 3.126953601837158, 1.6188379526138306, -7.638310432434082, -1.2299772500991821, -12.33833122253418, 2.1373026371002197, -5.395712375640869, 9.717328071594238, 5.675230503082275, 3.7805123329162598, 3.0597171783447266, 3.429692029953003, 8.9760103225708, 13.174124717712402, -0.5313228368759155, 8.942471504211426, 4.465109825134277, -4.426247596740723, -9.726503372192383, 8.399328231811523, 7.223917484283447, -7.435853958129883, 2.9441683292388916, -4.343039512634277, -13.886964797973633, -1.6346734762191772, -10.902740478515625, -5.311244964599609, 3.800722122192383, 3.897603750228882, -2.123077392578125, -2.3521194458007812, 4.151031017303467, -7.404866695404053, 0.13911646604537964, 2.4626107215881348, 4.96645450592041, 0.9897574186325073, 5.483975410461426, -3.3574001789093018, 10.13400650024414, -0.6120170950889587, -10.403095245361328, 4.600754261016846, 16.009349822998047, -7.78369140625, -4.194530487060547, -6.93686056137085, 1.1789555549621582, 11.490800857543945, 4.23802375793457, 9.550930976867676, 8.375045776367188, 7.508914470672607, -0.6570729613304138, -0.3005157709121704, 2.8406054973602295, 3.0828027725219727, 0.7308170199394226, 6.1483540534973145, 0.1376611888408661, -13.424735069274902, -7.746140480041504, -2.322798252105713, -8.305252075195312, 2.98791241645813, -10.99522876739502, 0.15211068093776703, -2.3820347785949707, -1.7984174489974976, 8.49562931060791, -5.852236747741699, -3.755497932434082, 0.6989710927009583, -5.270299434661865, -2.6188621520996094, -1.8828465938568115, -4.6466498374938965, 14.078543663024902, -0.5495333075523376, 10.579157829284668, -3.216050148010254, 9.349003791809082, -4.381077766418457, -11.675816535949707, -2.863020658493042, 4.5721755027771, 2.246612071990967, -4.574341773986816, 1.8610187768936157, 2.3767874240875244, 5.625787734985352, -9.784077644348145, 0.6496725678443909, -1.457950472831726, 0.4263263940811157, -4.921126365661621, -2.4547839164733887, 3.4869801998138428, -0.4265422224998474, 8.341268539428711, 1.356552004814148, 7.096688270568848, -13.102828979492188, 8.01673412322998, -7.115934371948242, 1.8699780702590942, 0.20872099697589874, 14.699383735656738, -1.0252779722213745, -2.6107232570648193, -2.5082311630249023, 8.427192687988281, 6.913852691650391, -6.29124641418457, 0.6157366037368774, 2.489687919616699, -3.4668266773223877, 9.92176342010498, 11.200815200805664, -0.19664029777050018, 7.491600513458252, -0.6231271624565125, -0.2584814429283142, -9.947997093200684, -0.9611040949821472, 1.1649218797683716, -2.1907122135162354, -1.502848744392395, -0.5192610621452332, 15.165953636169434, 2.4649462699890137, -0.998044490814209, 7.44166374206543, -2.0768048763275146, 3.5896823406219482, -7.305543422698975, -7.562084674835205, 4.32333517074585, 0.08044180274009705, -6.564010143280029, -2.314805269241333, -1.7642345428466797, -2.470881700515747, -7.6756181716918945, -9.548877716064453, -1.017755389213562, 0.1698644608259201, 2.5877134799957275, -1.8752295970916748, -0.36614322662353516, -6.049378395080566, -2.3965611457824707, -5.945338726043701, 0.9424033164978027, -13.155974388122559, -7.45780086517334, 0.14658108353614807, -3.7427968978881836, 5.841492652893066, -1.2872905731201172, 5.569431304931641, 12.570590019226074, 1.0939218997955322, 2.2142086029052734, 1.9181575775146484, 6.991420745849609, -5.888138771057129, 3.1409823894500732, -2.0036280155181885, 2.4434285163879395, 9.973138809204102, 5.036680221557617, 2.005120277404785, 2.861560344696045, 5.860223770141602, 2.917618751525879, -1.63111412525177, 2.0292205810546875, -4.070415019989014, -6.831437110900879]}} ``` #### 7.2 音频声纹打分 @@ -333,12 +333,12 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee 输出: ``` bash - [2022-05-09 10:28:40,556] [ INFO] - vector score http client start - [2022-05-09 10:28:40,556] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:28:40,556] [ INFO] - endpoint: http://127.0.0.1:8090/paddlespeech/vector/score - [2022-05-09 10:28:40,731] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} - [2022-05-09 10:28:40,731] [ INFO] - The vector: None - [2022-05-09 10:28:40,731] [ INFO] - Response time 0.175514 s. + [2022-05-25 12:33:24,527] [ INFO] - vector score http client start + [2022-05-25 12:33:24,527] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-25 12:33:24,528] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector/score + [2022-05-25 12:33:24,695] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + [2022-05-25 12:33:24,696] [ INFO] - The vector: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + [2022-05-25 12:33:24,696] [ INFO] - Response time 0.168271 s. ``` * Python API @@ -352,7 +352,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee enroll_audio="85236145389.wav", test_audio="123456789.wav", server_ip="127.0.0.1", - port=8090, + port=8790, task="score") print(res) ``` @@ -360,10 +360,11 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee 输出: ``` bash - [2022-05-09 10:34:54,769] [ INFO] - vector score http client start - [2022-05-09 10:34:54,771] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav - [2022-05-09 10:34:54,771] [ INFO] - endpoint: http://127.0.0.1:8590/paddlespeech/vector/score - [2022-05-09 10:34:55,026] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.4292638897895813}} + [2022-05-25 12:30:14,143] [ INFO] - vector score http client start + [2022-05-25 12:30:14,143] [ INFO] - enroll audio: 85236145389.wav, test audio: 123456789.wav + [2022-05-25 12:30:14,143] [ INFO] - endpoint: http://127.0.0.1:8790/paddlespeech/vector/score + [2022-05-25 12:30:14,363] [ INFO] - The vector score is: {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} + {'success': True, 'code': 200, 'message': {'description': 'success'}, 'result': {'score': 0.45332613587379456}} ``` diff --git a/paddlespeech/server/tests/vector/http_client.py b/paddlespeech/server/tests/vector/http_client.py deleted file mode 100644 index 49f2adf7c..000000000 --- a/paddlespeech/server/tests/vector/http_client.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the -import base64 -import json -import time - -import requests - - -def readwav2base64(wav_file): - """ - read wave file and covert to base64 string - """ - with open(wav_file, 'rb') as f: - base64_bytes = base64.b64encode(f.read()) - base64_string = base64_bytes.decode('utf-8') - return base64_string - - -def main(): - """ - main func - """ - url = "http://127.0.0.1:8090/paddlespeech/asr" - - # start Timestamp - time_start = time.time() - - test_audio_dir = "./16_audio.wav" - audio = readwav2base64(test_audio_dir) - - data = { - "audio": audio, - "audio_format": "wav", - "sample_rate": 16000, - "lang": "zh_cn", - } - - r = requests.post(url=url, data=json.dumps(data)) - - # ending Timestamp - time_end = time.time() - print('time cost', time_end - time_start, 's') - - print(r.json()) - - -if __name__ == "__main__": - main() From a5605978fad194e6877f6d74349c3d8ee2d89e54 Mon Sep 17 00:00:00 2001 From: xiongxinlei Date: Wed, 25 May 2022 13:07:56 +0800 Subject: [PATCH 040/127] update the acs note, test=doc --- paddlespeech/server/bin/paddlespeech_client.py | 1 + paddlespeech/server/engine/acs/python/acs_engine.py | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/paddlespeech/server/bin/paddlespeech_client.py b/paddlespeech/server/bin/paddlespeech_client.py index 74e7ce3fe..fb521b309 100644 --- a/paddlespeech/server/bin/paddlespeech_client.py +++ b/paddlespeech/server/bin/paddlespeech_client.py @@ -752,6 +752,7 @@ class VectorClientExecutor(BaseExecutor): res = handler.run(enroll_audio, test_audio, audio_format, sample_rate) logger.info(f"The vector score is: {res}") + return res else: logger.error(f"Sorry, we have not support such task {task}") diff --git a/paddlespeech/server/engine/acs/python/acs_engine.py b/paddlespeech/server/engine/acs/python/acs_engine.py index d52852dcf..3eb47e86d 100644 --- a/paddlespeech/server/engine/acs/python/acs_engine.py +++ b/paddlespeech/server/engine/acs/python/acs_engine.py @@ -118,12 +118,6 @@ class ACSEngine(BaseEngine): msg = ws.recv() msg = json.loads(msg) logger.info(f"audio result: {msg}") - # samples, sample_rate = soundfile.read(audio_data, dtype='int16') - - # ws.send_binary(samples.tobytes()) - # msg = ws.recv() - # msg = json.loads(msg) - # logger.info(f"audio result: {msg}") # 3. send chunk audio data to engine logger.info("send the end signal") From be8a78a9d1a70e771f841adb5453e91ca19d5966 Mon Sep 17 00:00:00 2001 From: xiongxinlei Date: Wed, 25 May 2022 14:22:38 +0800 Subject: [PATCH 041/127] fix the vector model type error, test=doc --- demos/audio_content_search/conf/acs_application.yaml | 1 + docs/source/released_model.md | 2 +- paddlespeech/server/engine/acs/python/acs_engine.py | 7 +++++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/demos/audio_content_search/conf/acs_application.yaml b/demos/audio_content_search/conf/acs_application.yaml index d3c5e3039..dbddd06fb 100644 --- a/demos/audio_content_search/conf/acs_application.yaml +++ b/demos/audio_content_search/conf/acs_application.yaml @@ -28,6 +28,7 @@ acs_python: word_list: "./conf/words.txt" sample_rate: 16000 device: 'cpu' # set 'gpu:id' or 'cpu' + ping_timeout: 100 # seconds diff --git a/docs/source/released_model.md b/docs/source/released_model.md index 3231fecd4..67e7b62e1 100644 --- a/docs/source/released_model.md +++ b/docs/source/released_model.md @@ -82,7 +82,7 @@ PANN | ESC-50 |[pann-esc50](../../examples/esc50/cls0)|[esc50_cnn6.tar.gz](https Model Type | Dataset| Example Link | Pretrained Models | Static Models :-------------:| :------------:| :-----: | :-----: | :-----: -PANN | VoxCeleb| [voxceleb_ecapatdnn](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0) | [ecapatdnn.tar.gz](https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz) | - +ECAPA-TDNN | VoxCeleb| [voxceleb_ecapatdnn](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/voxceleb/sv0) | [ecapatdnn.tar.gz](https://paddlespeech.bj.bcebos.com/vector/voxceleb/sv0_ecapa_tdnn_voxceleb12_ckpt_0_2_1.tar.gz) | - ## Punctuation Restoration Models Model Type | Dataset| Example Link | Pretrained Models diff --git a/paddlespeech/server/engine/acs/python/acs_engine.py b/paddlespeech/server/engine/acs/python/acs_engine.py index 3eb47e86d..930101ac9 100644 --- a/paddlespeech/server/engine/acs/python/acs_engine.py +++ b/paddlespeech/server/engine/acs/python/acs_engine.py @@ -46,6 +46,9 @@ class ACSEngine(BaseEngine): try: self.config = config self.device = self.config.get("device", paddle.get_device()) + + # websocket default ping timeout is 20 seconds + self.ping_timeout = self.config.get("ping_timeout", 20) paddle.set_device(self.device) logger.info(f"ACS Engine set the device: {self.device}") @@ -97,8 +100,8 @@ class ACSEngine(BaseEngine): logger.error("No asr server, please input valid ip and port") return "" ws = websocket.WebSocket() - ws.connect(self.url) - # with websocket.WebSocket.connect(self.url) as ws: + logger.info(f"set the ping timeout: {self.ping_timeout} seconds") + ws.connect(self.url, ping_timeout=self.ping_timeout) audio_info = json.dumps( { "name": "test.wav", From 07c0d7d7cc265ba191b002e6e62a40dccb1f55ff Mon Sep 17 00:00:00 2001 From: xiongxinlei Date: Wed, 25 May 2022 14:25:43 +0800 Subject: [PATCH 042/127] remove old vector model info, test=doc --- demos/audio_content_search/README.md | 7 ++++++- demos/audio_content_search/README_cn.md | 6 +++++- demos/speech_server/README_cn.md | 2 +- examples/voxceleb/sv0/RESULT.md | 1 - 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/demos/audio_content_search/README.md b/demos/audio_content_search/README.md index d73d6a59d..4428bf389 100644 --- a/demos/audio_content_search/README.md +++ b/demos/audio_content_search/README.md @@ -16,7 +16,12 @@ see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/doc You can choose one way from meduim and hard to install paddlespeech. -The dependency refers to the requirements.txt +The dependency refers to the requirements.txt, and install the dependency as follows: + +``` +pip install -r requriement.txt +``` + ### 2. Prepare Input File The input of this demo should be a WAV file(`.wav`), and the sample rate must be the same as the model. diff --git a/demos/audio_content_search/README_cn.md b/demos/audio_content_search/README_cn.md index c74af4cf1..6f51c4cf2 100644 --- a/demos/audio_content_search/README_cn.md +++ b/demos/audio_content_search/README_cn.md @@ -16,7 +16,11 @@ 请看[安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install_cn.md)。 你可以从 medium,hard 三中方式中选择一种方式安装。 -依赖参见 requirements.txt +依赖参见 requirements.txt, 安装依赖 + +``` +pip install -r requriement.txt +``` ### 2. 准备输入 这个 demo 的输入应该是一个 WAV 文件(`.wav`),并且采样率必须与模型的采样率相同。 diff --git a/demos/speech_server/README_cn.md b/demos/speech_server/README_cn.md index 8c95a989b..29629b7e8 100644 --- a/demos/speech_server/README_cn.md +++ b/demos/speech_server/README_cn.md @@ -352,7 +352,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee enroll_audio="85236145389.wav", test_audio="123456789.wav", server_ip="127.0.0.1", - port=8790, + port=8090, task="score") print(res) ``` diff --git a/examples/voxceleb/sv0/RESULT.md b/examples/voxceleb/sv0/RESULT.md index a1d2a1812..56ee887c6 100644 --- a/examples/voxceleb/sv0/RESULT.md +++ b/examples/voxceleb/sv0/RESULT.md @@ -4,5 +4,4 @@ | Model | Number of Params | Release | Config | dim | Test set | Cosine | Cosine + S-Norm | | --- | --- | --- | --- | --- | --- | --- | ---- | -| ECAPA-TDNN | 85M | 0.2.0 | conf/ecapa_tdnn.yaml |192 | test | 1.02 | 0.95 | | ECAPA-TDNN | 85M | 0.2.1 | conf/ecapa_tdnn.yaml | 192 | test | 0.8188 | 0.7815| From 6f7917b7f2b489b8341aeda2c8ff318975b84f78 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 25 May 2022 09:25:17 +0000 Subject: [PATCH 043/127] fix streaming asr --- .../conf/ws_conformer_application.yaml | 2 +- ...plication.yaml => ws_ds2_application.yaml} | 0 .../server/engine/asr/online/asr_engine.py | 53 ++++--------------- 3 files changed, 12 insertions(+), 43 deletions(-) rename demos/streaming_asr_server/conf/{ws_application.yaml => ws_ds2_application.yaml} (100%) diff --git a/demos/streaming_asr_server/conf/ws_conformer_application.yaml b/demos/streaming_asr_server/conf/ws_conformer_application.yaml index 2affde073..6a10741bd 100644 --- a/demos/streaming_asr_server/conf/ws_conformer_application.yaml +++ b/demos/streaming_asr_server/conf/ws_conformer_application.yaml @@ -4,7 +4,7 @@ # SERVER SETTING # ################################################################################# host: 0.0.0.0 -port: 8090 +port: 8091 # The task format in the engin_list is: _ # task choices = ['asr_online'] diff --git a/demos/streaming_asr_server/conf/ws_application.yaml b/demos/streaming_asr_server/conf/ws_ds2_application.yaml similarity index 100% rename from demos/streaming_asr_server/conf/ws_application.yaml rename to demos/streaming_asr_server/conf/ws_ds2_application.yaml diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/asr_engine.py index 70bfcfb66..d7bd458f8 100644 --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/asr_engine.py @@ -53,7 +53,7 @@ class PaddleASRConnectionHanddler: logger.info( "create an paddle asr connection handler to process the websocket connection" ) - self.config = asr_engine.config + self.config = asr_engine.config # server config self.model_config = asr_engine.executor.config self.asr_engine = asr_engine @@ -249,10 +249,13 @@ class PaddleASRConnectionHanddler: def reset(self): if "deepspeech2" in self.model_type: # for deepspeech2 - self.chunk_state_h_box = copy.deepcopy( - self.asr_engine.executor.chunk_state_h_box) - self.chunk_state_c_box = copy.deepcopy( - self.asr_engine.executor.chunk_state_c_box) + # init state + self.chunk_state_h_box = np.zeros( + (self.model_config .num_rnn_layers, 1, self.model_config.rnn_layer_size), + dtype=float32) + self.chunk_state_c_box = np.zeros( + (self.model_config.num_rnn_layers, 1, self.model_config.rnn_layer_size), + dtype=float32) self.decoder.reset_decoder(batch_size=1) self.device = None @@ -803,36 +806,6 @@ class ASRServerExecutor(ASRExecutor): model_file=self.am_model, params_file=self.am_params, predictor_conf=self.am_predictor_conf) - - # decoder - logger.info("ASR engine start to create the ctc decoder instance") - self.decoder = CTCDecoder( - odim=self.config.output_dim, # is in vocab - enc_n_units=self.config.rnn_layer_size * 2, - blank_id=self.config.blank_id, - dropout_rate=0.0, - reduction=True, # sum - batch_average=True, # sum / batch_size - grad_norm_type=self.config.get('ctc_grad_norm_type', None)) - - # init decoder - logger.info("ASR engine start to init the ctc decoder") - cfg = self.config.decode - decode_batch_size = 1 # for online - self.decoder.init_decoder( - decode_batch_size, self.text_feature.vocab_list, - cfg.decoding_method, cfg.lang_model_path, cfg.alpha, cfg.beta, - cfg.beam_size, cfg.cutoff_prob, cfg.cutoff_top_n, - cfg.num_proc_bsearch) - - # init state box - self.chunk_state_h_box = np.zeros( - (self.config.num_rnn_layers, 1, self.config.rnn_layer_size), - dtype=float32) - self.chunk_state_c_box = np.zeros( - (self.config.num_rnn_layers, 1, self.config.rnn_layer_size), - dtype=float32) - elif "conformer" in model_type or "transformer" in model_type: model_name = model_type[:model_type.rindex( '_')] # model_type: {model_name}_{dataset} @@ -847,15 +820,11 @@ class ASRServerExecutor(ASRExecutor): model_dict = paddle.load(self.am_model) self.model.set_state_dict(model_dict) logger.info("create the transformer like model success") - - # update the ctc decoding - self.searcher = CTCPrefixBeamSearch(self.config.decode) - self.transformer_decode_reset() else: raise ValueError(f"Not support: {model_type}") return True - + class ASREngine(BaseEngine): """ASR server resource @@ -881,8 +850,8 @@ class ASREngine(BaseEngine): self.executor = ASRServerExecutor() try: - default_dev = paddle.get_device() - paddle.set_device(self.config.get("device", default_dev)) + self.device = self.config.get("device", paddle.get_device()) + paddle.set_device(self.device) except BaseException as e: logger.error( f"Set device failed, please check if device '{self.device}' is already used and the parameter 'device' in the yaml file" From f9f014d159e28efa788f4d241794420716d369ad Mon Sep 17 00:00:00 2001 From: TianYuan Date: Wed, 25 May 2022 10:39:28 +0000 Subject: [PATCH 044/127] add VITS readme, test=tts --- examples/aishell3/tts3/README.md | 30 ++-- examples/aishell3/voc1/README.md | 2 +- examples/aishell3/voc5/README.md | 21 +-- examples/csmsc/tts0/README.md | 30 ++-- examples/csmsc/tts2/README.md | 30 ++-- examples/csmsc/tts3/README.md | 31 ++-- examples/csmsc/tts3/README_cn.md | 30 ++-- examples/csmsc/vits/README.md | 146 ++++++++++++++++++ examples/csmsc/voc1/README.md | 2 +- examples/csmsc/voc3/README.md | 2 +- examples/csmsc/voc4/README.md | 2 +- examples/csmsc/voc5/README.md | 2 +- examples/csmsc/voc6/README.md | 2 +- examples/ljspeech/tts0/README.md | 30 ++-- examples/ljspeech/tts1/README.md | 2 +- examples/ljspeech/tts3/README.md | 30 ++-- examples/ljspeech/voc1/README.md | 2 +- examples/ljspeech/voc5/README.md | 21 +-- examples/vctk/tts3/README.md | 30 ++-- examples/vctk/voc1/README.md | 2 +- examples/vctk/voc5/README.md | 21 +-- .../t2s/exps/gan_vocoder/hifigan/train.py | 3 +- .../gan_vocoder/multi_band_melgan/train.py | 2 +- .../gan_vocoder/parallelwave_gan/train.py | 2 +- .../exps/gan_vocoder/style_melgan/train.py | 3 +- .../t2s/exps/transformer_tts/train.py | 2 +- paddlespeech/t2s/exps/vits/train.py | 5 +- paddlespeech/t2s/exps/wavernn/train.py | 3 +- 28 files changed, 285 insertions(+), 203 deletions(-) diff --git a/examples/aishell3/tts3/README.md b/examples/aishell3/tts3/README.md index d02ad1b63..93ce62c96 100644 --- a/examples/aishell3/tts3/README.md +++ b/examples/aishell3/tts3/README.md @@ -120,12 +120,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -134,11 +134,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -150,10 +149,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -169,12 +168,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -184,11 +183,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -199,10 +197,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -215,9 +213,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/aishell3/voc1/README.md b/examples/aishell3/voc1/README.md index eb30e7c40..503f8a19d 100644 --- a/examples/aishell3/voc1/README.md +++ b/examples/aishell3/voc1/README.md @@ -75,7 +75,7 @@ Train a ParallelWaveGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG ParallelWaveGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/aishell3/voc5/README.md b/examples/aishell3/voc5/README.md index c957c4a3a..f8f28f409 100644 --- a/examples/aishell3/voc5/README.md +++ b/examples/aishell3/voc5/README.md @@ -67,15 +67,13 @@ Here's the complete help message. ```text usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] - [--ngpu NGPU] [--batch-size BATCH_SIZE] [--max-iter MAX_ITER] - [--run-benchmark RUN_BENCHMARK] - [--profiler_options PROFILER_OPTIONS] + [--ngpu NGPU] -Train a ParallelWaveGAN model. +Train a HiFiGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG HiFiGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA @@ -83,19 +81,6 @@ optional arguments: --output-dir OUTPUT_DIR output dir. --ngpu NGPU if ngpu == 0, use cpu. - -benchmark: - arguments related to benchmark. - - --batch-size BATCH_SIZE - batch size. - --max-iter MAX_ITER train max steps. - --run-benchmark RUN_BENCHMARK - runing benchmark or not, if True, use the --batch-size - and --max-iter. - --profiler_options PROFILER_OPTIONS - The option of profiler, which should be in format - "key1=value1;key2=value2;key3=value3". ``` 1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. diff --git a/examples/csmsc/tts0/README.md b/examples/csmsc/tts0/README.md index 01376bd61..a337c7d45 100644 --- a/examples/csmsc/tts0/README.md +++ b/examples/csmsc/tts0/README.md @@ -103,12 +103,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -117,11 +117,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -133,10 +132,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -152,12 +151,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -167,11 +166,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -182,10 +180,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -198,9 +196,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the Tacotron2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the Tacotron2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/csmsc/tts2/README.md b/examples/csmsc/tts2/README.md index 081d85848..553a370c9 100644 --- a/examples/csmsc/tts2/README.md +++ b/examples/csmsc/tts2/README.md @@ -109,12 +109,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -123,11 +123,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -139,10 +138,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -158,12 +157,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -173,11 +172,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -188,10 +186,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -204,9 +202,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat`, `--phones_dict` and `--tones_dict` are arguments for acoustic model, which correspond to the 5 files in the speedyspeech pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat`, `--phones_dict` and `--tones_dict` are arguments for acoustic model, which correspond to the 5 files in the speedyspeech pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/csmsc/tts3/README.md b/examples/csmsc/tts3/README.md index c734199b4..be18de7d6 100644 --- a/examples/csmsc/tts3/README.md +++ b/examples/csmsc/tts3/README.md @@ -111,12 +111,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -125,11 +125,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -141,10 +140,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -160,12 +159,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -175,11 +174,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -190,10 +188,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -204,11 +202,12 @@ optional arguments: --text TEXT text to synthesize, a 'utt_id sentence' pair per line. --output_dir OUTPUT_DIR output dir. + ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the fastspeech2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the fastspeech2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/csmsc/tts3/README_cn.md b/examples/csmsc/tts3/README_cn.md index 25931ecb1..a88615134 100644 --- a/examples/csmsc/tts3/README_cn.md +++ b/examples/csmsc/tts3/README_cn.md @@ -117,12 +117,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -131,11 +131,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -147,10 +146,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -167,12 +166,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -182,11 +181,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -197,10 +195,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -213,9 +211,9 @@ optional arguments: output dir. ``` 1. `--am` 声学模型格式是否符合 {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` 和 `--phones_dict` 是声学模型的参数,对应于 fastspeech2 预训练模型中的 4 个文件。 +2. `--am_config`, `--am_ckpt`, `--am_stat` 和 `--phones_dict` 是声学模型的参数,对应于 fastspeech2 预训练模型中的 4 个文件。 3. `--voc` 声码器(vocoder)格式是否符合 {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` 是声码器的参数,对应于 parallel wavegan 预训练模型中的 3 个文件。 +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` 是声码器的参数,对应于 parallel wavegan 预训练模型中的 3 个文件。 5. `--lang` 对应模型的语言可以是 `zh` 或 `en` 。 6. `--test_metadata` 应为 `dump` 文件夹中 `test` 下的规范化元数据文件、 7. `--text` 是文本文件,其中包含要合成的句子。 diff --git a/examples/csmsc/vits/README.md b/examples/csmsc/vits/README.md index e69de29bb..0c16840a0 100644 --- a/examples/csmsc/vits/README.md +++ b/examples/csmsc/vits/README.md @@ -0,0 +1,146 @@ +# VITS with CSMSC +This example contains code used to train a [VITS](https://arxiv.org/abs/2106.06103) model with [Chinese Standard Mandarin Speech Copus](https://www.data-baker.com/open_source.html). + +## Dataset +### Download and Extract +Download CSMSC from it's [Official Website](https://test.data-baker.com/data/index/source). + +### Get MFA Result and Extract +We use [MFA](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) to get phonemes for VITS, the durations of MFA are not needed here. +You can download from here [baker_alignment_tone.tar.gz](https://paddlespeech.bj.bcebos.com/MFA/BZNSYP/with_tone/baker_alignment_tone.tar.gz), or train your MFA model reference to [mfa example](https://github.com/PaddlePaddle/PaddleSpeech/tree/develop/examples/other/mfa) of our repo. + +## Get Started +Assume the path to the dataset is `~/datasets/BZNSYP`. +Assume the path to the MFA result of CSMSC is `./baker_alignment_tone`. +Run the command below to +1. **source path**. +2. preprocess the dataset. +3. train the model. +4. synthesize wavs. + - synthesize waveform from `metadata.jsonl`. + - synthesize waveform from a text file. + +```bash +./run.sh +``` +You can choose a range of stages you want to run, or set `stage` equal to `stop-stage` to use only one stage, for example, running the following command will only preprocess the dataset. +```bash +./run.sh --stage 0 --stop-stage 0 +``` +### Data Preprocessing +```bash +./local/preprocess.sh ${conf_path} +``` +When it is done. A `dump` folder is created in the current directory. The structure of the dump folder is listed below. + +```text +dump +├── dev +│   ├── norm +│   └── raw +├── phone_id_map.txt +├── speaker_id_map.txt +├── test +│   ├── norm +│   └── raw +└── train + ├── feats_stats.npy + ├── norm + └── raw +``` +The dataset is split into 3 parts, namely `train`, `dev`, and` test`, each of which contains a `norm` and `raw` subfolder. The raw folder contains wave and linear spectrogram of each utterance, while the norm folder contains normalized ones. The statistics used to normalize features are computed from the training set, which is located in `dump/train/feats_stats.npy`. + +Also, there is a `metadata.jsonl` in each subfolder. It is a table-like file that contains phones, text_lengths, feats, feats_lengths, the path of linear spectrogram features, the path of raw waves, speaker, and the id of each utterance. + +### Model Training +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/train.sh ${conf_path} ${train_output_path} +``` +`./local/train.sh` calls `${BIN_DIR}/train.py`. +Here's the complete help message. +```text +usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] + [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] + [--ngpu NGPU] [--phones-dict PHONES_DICT] + +Train a VITS model. + +optional arguments: + -h, --help show this help message and exit + --config CONFIG config file to overwrite default config. + --train-metadata TRAIN_METADATA + training data. + --dev-metadata DEV_METADATA + dev data. + --output-dir OUTPUT_DIR + output dir. + --ngpu NGPU if ngpu == 0, use cpu. + --phones-dict PHONES_DICT + phone vocabulary file. +``` +1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. +2. `--train-metadata` and `--dev-metadata` should be the metadata file in the normalized subfolder of `train` and `dev` in the `dump` folder. +3. `--output-dir` is the directory to save the results of the experiment. Checkpoints are saved in `checkpoints/` inside this directory. +4. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. +5. `--phones-dict` is the path of the phone vocabulary file. + +### Synthesizing + +`./local/synthesize.sh` calls `${BIN_DIR}/synthesize.py`, which can synthesize waveform from `metadata.jsonl`. + +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize.py [-h] [--config CONFIG] [--ckpt CKPT] + [--phones_dict PHONES_DICT] [--ngpu NGPU] + [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] + +Synthesize with VITS + +optional arguments: + -h, --help show this help message and exit + --config CONFIG Config of VITS. + --ckpt CKPT Checkpoint file of VITS. + --phones_dict PHONES_DICT + phone vocabulary file. + --ngpu NGPU if ngpu == 0, use cpu. + --test_metadata TEST_METADATA + test metadata. + --output_dir OUTPUT_DIR + output dir. +``` +`./local/synthesize_e2e.sh` calls `${BIN_DIR}/synthesize_e2e.py`, which can synthesize waveform from text file. +```bash +CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_output_path} ${ckpt_name} +``` +```text +usage: synthesize_e2e.py [-h] [--config CONFIG] [--ckpt CKPT] + [--phones_dict PHONES_DICT] [--lang LANG] + [--inference_dir INFERENCE_DIR] [--ngpu NGPU] + [--text TEXT] [--output_dir OUTPUT_DIR] + +Synthesize with VITS + +optional arguments: + -h, --help show this help message and exit + --config CONFIG Config of VITS. + --ckpt CKPT Checkpoint file of VITS. + --phones_dict PHONES_DICT + phone vocabulary file. + --lang LANG Choose model language. zh or en + --inference_dir INFERENCE_DIR + dir to save inference models + --ngpu NGPU if ngpu == 0, use cpu. + --text TEXT text to synthesize, a 'utt_id sentence' pair per line. + --output_dir OUTPUT_DIR + output dir. +``` +1. `--config`, `--ckpt`, and `--phones_dict` are arguments for acoustic model, which correspond to the 3 files in the VITS pretrained model. +2. `--lang` is the model language, which can be `zh` or `en`. +3. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. +4. `--text` is the text file, which contains sentences to synthesize. +5. `--output_dir` is the directory to save synthesized audio files. +6. `--ngpu` is the number of gpus to use, if ngpu == 0, use cpu. + +## Pretrained Model diff --git a/examples/csmsc/voc1/README.md b/examples/csmsc/voc1/README.md index 77da5b185..d19fe8497 100644 --- a/examples/csmsc/voc1/README.md +++ b/examples/csmsc/voc1/README.md @@ -65,7 +65,7 @@ Train a ParallelWaveGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG ParallelWaveGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/csmsc/voc3/README.md b/examples/csmsc/voc3/README.md index 12adaf7f4..eb7710362 100644 --- a/examples/csmsc/voc3/README.md +++ b/examples/csmsc/voc3/README.md @@ -63,7 +63,7 @@ Train a Multi-Band MelGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG Multi-Band MelGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/csmsc/voc4/README.md b/examples/csmsc/voc4/README.md index b7add3e57..d9e86a88d 100644 --- a/examples/csmsc/voc4/README.md +++ b/examples/csmsc/voc4/README.md @@ -63,7 +63,7 @@ Train a Style MelGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG Style MelGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/csmsc/voc5/README.md b/examples/csmsc/voc5/README.md index 94f93b48b..e044a0c74 100644 --- a/examples/csmsc/voc5/README.md +++ b/examples/csmsc/voc5/README.md @@ -63,7 +63,7 @@ Train a HiFiGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG HiFiGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/csmsc/voc6/README.md b/examples/csmsc/voc6/README.md index 7dcf133bd..f1a5ec3bb 100644 --- a/examples/csmsc/voc6/README.md +++ b/examples/csmsc/voc6/README.md @@ -63,7 +63,7 @@ Train a WaveRNN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG WaveRNN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/ljspeech/tts0/README.md b/examples/ljspeech/tts0/README.md index ba7ad6193..581f7930f 100644 --- a/examples/ljspeech/tts0/README.md +++ b/examples/ljspeech/tts0/README.md @@ -103,12 +103,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -117,11 +117,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -133,10 +132,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -152,12 +151,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -167,11 +166,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -182,10 +180,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -198,9 +196,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the Tacotron2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the Tacotron2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/ljspeech/tts1/README.md b/examples/ljspeech/tts1/README.md index 7f32522ac..f85991cba 100644 --- a/examples/ljspeech/tts1/README.md +++ b/examples/ljspeech/tts1/README.md @@ -61,7 +61,7 @@ Train a TransformerTTS model with LJSpeech TTS dataset. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG TransformerTTS config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/ljspeech/tts3/README.md b/examples/ljspeech/tts3/README.md index e028fa05d..a6724083d 100644 --- a/examples/ljspeech/tts3/README.md +++ b/examples/ljspeech/tts3/README.md @@ -109,12 +109,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ``text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -123,11 +123,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -139,10 +138,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -158,12 +157,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -173,11 +172,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -188,10 +186,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -204,9 +202,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the fastspeech2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat` and `--phones_dict` are arguments for acoustic model, which correspond to the 4 files in the fastspeech2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/ljspeech/voc1/README.md b/examples/ljspeech/voc1/README.md index 4513b2a05..6fd6cbe24 100644 --- a/examples/ljspeech/voc1/README.md +++ b/examples/ljspeech/voc1/README.md @@ -65,7 +65,7 @@ Train a ParallelWaveGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG ParallelWaveGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/ljspeech/voc5/README.md b/examples/ljspeech/voc5/README.md index 9b31e2650..afc1bb8be 100644 --- a/examples/ljspeech/voc5/README.md +++ b/examples/ljspeech/voc5/README.md @@ -57,15 +57,13 @@ Here's the complete help message. ```text usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] - [--ngpu NGPU] [--batch-size BATCH_SIZE] [--max-iter MAX_ITER] - [--run-benchmark RUN_BENCHMARK] - [--profiler_options PROFILER_OPTIONS] + [--ngpu NGPU] -Train a ParallelWaveGAN model. +Train a HiFiGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG HiFiGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA @@ -73,19 +71,6 @@ optional arguments: --output-dir OUTPUT_DIR output dir. --ngpu NGPU if ngpu == 0, use cpu. - -benchmark: - arguments related to benchmark. - - --batch-size BATCH_SIZE - batch size. - --max-iter MAX_ITER train max steps. - --run-benchmark RUN_BENCHMARK - runing benchmark or not, if True, use the --batch-size - and --max-iter. - --profiler_options PROFILER_OPTIONS - The option of profiler, which should be in format - "key1=value1;key2=value2;key3=value3". ``` 1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. diff --git a/examples/vctk/tts3/README.md b/examples/vctk/tts3/README.md index f373ca6a3..379f5c0fd 100644 --- a/examples/vctk/tts3/README.md +++ b/examples/vctk/tts3/README.md @@ -112,12 +112,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize.sh ${conf_path} ${train_output_p ``` ```text usage: synthesize.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--voice-cloning VOICE_CLONING] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--ngpu NGPU] [--test_metadata TEST_METADATA] [--output_dir OUTPUT_DIR] @@ -126,11 +126,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech,tacotron2_aishell3} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -142,10 +141,10 @@ optional arguments: speaker id map file. --voice-cloning VOICE_CLONING whether training voice cloning model. - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,wavernn_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,style_melgan_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -161,12 +160,12 @@ CUDA_VISIBLE_DEVICES=${gpus} ./local/synthesize_e2e.sh ${conf_path} ${train_outp ``` ```text usage: synthesize_e2e.py [-h] - [--am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk}] + [--am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech}] [--am_config AM_CONFIG] [--am_ckpt AM_CKPT] [--am_stat AM_STAT] [--phones_dict PHONES_DICT] [--tones_dict TONES_DICT] [--speaker_dict SPEAKER_DICT] [--spk_id SPK_ID] - [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc}] + [--voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc}] [--voc_config VOC_CONFIG] [--voc_ckpt VOC_CKPT] [--voc_stat VOC_STAT] [--lang LANG] [--inference_dir INFERENCE_DIR] [--ngpu NGPU] @@ -176,11 +175,10 @@ Synthesize with acoustic model & vocoder optional arguments: -h, --help show this help message and exit - --am {speedyspeech_csmsc,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk} + --am {speedyspeech_csmsc,speedyspeech_aishell3,fastspeech2_csmsc,fastspeech2_ljspeech,fastspeech2_aishell3,fastspeech2_vctk,tacotron2_csmsc,tacotron2_ljspeech} Choose acoustic model type of tts task. --am_config AM_CONFIG - Config of acoustic model. Use deault config when it is - None. + Config of acoustic model. --am_ckpt AM_CKPT Checkpoint file of acoustic model. --am_stat AM_STAT mean and standard deviation used to normalize spectrogram when training acoustic model. @@ -191,10 +189,10 @@ optional arguments: --speaker_dict SPEAKER_DICT speaker id map file. --spk_id SPK_ID spk id for multi speaker acoustic model - --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc} + --voc {pwgan_csmsc,pwgan_ljspeech,pwgan_aishell3,pwgan_vctk,mb_melgan_csmsc,style_melgan_csmsc,hifigan_csmsc,hifigan_ljspeech,hifigan_aishell3,hifigan_vctk,wavernn_csmsc} Choose vocoder type of tts task. --voc_config VOC_CONFIG - Config of voc. Use deault config when it is None. + Config of voc. --voc_ckpt VOC_CKPT Checkpoint file of voc. --voc_stat VOC_STAT mean and standard deviation used to normalize spectrogram when training voc. @@ -207,9 +205,9 @@ optional arguments: output dir. ``` 1. `--am` is acoustic model type with the format {model_name}_{dataset} -2. `--am_config`, `--am_checkpoint`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. +2. `--am_config`, `--am_ckpt`, `--am_stat`, `--phones_dict` `--speaker_dict` are arguments for acoustic model, which correspond to the 5 files in the fastspeech2 pretrained model. 3. `--voc` is vocoder type with the format {model_name}_{dataset} -4. `--voc_config`, `--voc_checkpoint`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. +4. `--voc_config`, `--voc_ckpt`, `--voc_stat` are arguments for vocoder, which correspond to the 3 files in the parallel wavegan pretrained model. 5. `--lang` is the model language, which can be `zh` or `en`. 6. `--test_metadata` should be the metadata file in the normalized subfolder of `test` in the `dump` folder. 7. `--text` is the text file, which contains sentences to synthesize. diff --git a/examples/vctk/voc1/README.md b/examples/vctk/voc1/README.md index 1c3016f88..c4c40d1d0 100644 --- a/examples/vctk/voc1/README.md +++ b/examples/vctk/voc1/README.md @@ -70,7 +70,7 @@ Train a ParallelWaveGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG ParallelWaveGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA diff --git a/examples/vctk/voc5/README.md b/examples/vctk/voc5/README.md index 4eb25c02d..c53d46325 100644 --- a/examples/vctk/voc5/README.md +++ b/examples/vctk/voc5/README.md @@ -62,15 +62,13 @@ Here's the complete help message. ```text usage: train.py [-h] [--config CONFIG] [--train-metadata TRAIN_METADATA] [--dev-metadata DEV_METADATA] [--output-dir OUTPUT_DIR] - [--ngpu NGPU] [--batch-size BATCH_SIZE] [--max-iter MAX_ITER] - [--run-benchmark RUN_BENCHMARK] - [--profiler_options PROFILER_OPTIONS] + [--ngpu NGPU] -Train a ParallelWaveGAN model. +Train a HiFiGAN model. optional arguments: -h, --help show this help message and exit - --config CONFIG config file to overwrite default config. + --config CONFIG HiFiGAN config file. --train-metadata TRAIN_METADATA training data. --dev-metadata DEV_METADATA @@ -78,19 +76,6 @@ optional arguments: --output-dir OUTPUT_DIR output dir. --ngpu NGPU if ngpu == 0, use cpu. - -benchmark: - arguments related to benchmark. - - --batch-size BATCH_SIZE - batch size. - --max-iter MAX_ITER train max steps. - --run-benchmark RUN_BENCHMARK - runing benchmark or not, if True, use the --batch-size - and --max-iter. - --profiler_options PROFILER_OPTIONS - The option of profiler, which should be in format - "key1=value1;key2=value2;key3=value3". ``` 1. `--config` is a config file in yaml format to overwrite the default config, which can be found at `conf/default.yaml`. diff --git a/paddlespeech/t2s/exps/gan_vocoder/hifigan/train.py b/paddlespeech/t2s/exps/gan_vocoder/hifigan/train.py index c70821e78..4c733dc9b 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/hifigan/train.py +++ b/paddlespeech/t2s/exps/gan_vocoder/hifigan/train.py @@ -243,8 +243,7 @@ def main(): # parse args and config and redirect to train_sp parser = argparse.ArgumentParser(description="Train a HiFiGAN model.") - parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + parser.add_argument("--config", type=str, help="HiFiGAN config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/gan_vocoder/multi_band_melgan/train.py b/paddlespeech/t2s/exps/gan_vocoder/multi_band_melgan/train.py index 27ffded63..3b3ebb478 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/multi_band_melgan/train.py +++ b/paddlespeech/t2s/exps/gan_vocoder/multi_band_melgan/train.py @@ -233,7 +233,7 @@ def main(): parser = argparse.ArgumentParser( description="Train a Multi-Band MelGAN model.") parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + "--config", type=str, help="Multi-Band MelGAN config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/gan_vocoder/parallelwave_gan/train.py b/paddlespeech/t2s/exps/gan_vocoder/parallelwave_gan/train.py index 92de7a2c4..b26407028 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/parallelwave_gan/train.py +++ b/paddlespeech/t2s/exps/gan_vocoder/parallelwave_gan/train.py @@ -208,7 +208,7 @@ def main(): parser = argparse.ArgumentParser( description="Train a ParallelWaveGAN model.") parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + "--config", type=str, help="ParallelWaveGAN config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/gan_vocoder/style_melgan/train.py b/paddlespeech/t2s/exps/gan_vocoder/style_melgan/train.py index be3ba7425..a87cc7a18 100644 --- a/paddlespeech/t2s/exps/gan_vocoder/style_melgan/train.py +++ b/paddlespeech/t2s/exps/gan_vocoder/style_melgan/train.py @@ -224,8 +224,7 @@ def main(): # parse args and config and redirect to train_sp parser = argparse.ArgumentParser(description="Train a Style MelGAN model.") - parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + parser.add_argument("--config", type=str, help="Style MelGAN config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/transformer_tts/train.py b/paddlespeech/t2s/exps/transformer_tts/train.py index 45ecb269b..da48b6b99 100644 --- a/paddlespeech/t2s/exps/transformer_tts/train.py +++ b/paddlespeech/t2s/exps/transformer_tts/train.py @@ -160,7 +160,7 @@ def main(): parser = argparse.ArgumentParser(description="Train a TransformerTTS " "model with LJSpeech TTS dataset.") parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + "--config", type=str, help="TransformerTTS config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/vits/train.py b/paddlespeech/t2s/exps/vits/train.py index b921f92af..dbda8b717 100644 --- a/paddlespeech/t2s/exps/vits/train.py +++ b/paddlespeech/t2s/exps/vits/train.py @@ -226,9 +226,8 @@ def train_sp(args, config): def main(): # parse args and config and redirect to train_sp - parser = argparse.ArgumentParser(description="Train a HiFiGAN model.") - parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + parser = argparse.ArgumentParser(description="Train a VITS model.") + parser.add_argument("--config", type=str, help="VITS config file") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") diff --git a/paddlespeech/t2s/exps/wavernn/train.py b/paddlespeech/t2s/exps/wavernn/train.py index 8661d311d..cf24ea268 100644 --- a/paddlespeech/t2s/exps/wavernn/train.py +++ b/paddlespeech/t2s/exps/wavernn/train.py @@ -180,8 +180,7 @@ def main(): # parse args and config and redirect to train_sp parser = argparse.ArgumentParser(description="Train a WaveRNN model.") - parser.add_argument( - "--config", type=str, help="config file to overwrite default config.") + parser.add_argument("--config", type=str, help="WaveRNN config file.") parser.add_argument("--train-metadata", type=str, help="training data.") parser.add_argument("--dev-metadata", type=str, help="dev data.") parser.add_argument("--output-dir", type=str, help="output dir.") From 537aff9704c5c61e8f5bc334486599996279fa82 Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Wed, 25 May 2022 20:54:11 +0800 Subject: [PATCH 045/127] refactor example dir & add aishell build TLG --- speechx/examples/dev/CMakeLists.txt | 3 - speechx/examples/dev/glog/CMakeLists.txt | 8 - speechx/examples/dev/glog/README.md | 25 --- .../dev/glog/glog_logtostderr_test.cc | 25 --- speechx/examples/dev/glog/glog_test.cc | 23 --- speechx/examples/dev/glog/path.sh | 15 -- speechx/examples/dev/glog/run.sh | 22 --- speechx/examples/ds2_ol/aishell/README.md | 37 ++++ .../aishell}/local/aishell_train_lms.sh | 0 .../aishell}/local/text_to_lexicon.py | 0 speechx/examples/ds2_ol/aishell/path.sh | 12 +- .../examples/ds2_ol/aishell/run_build_tlg.sh | 141 +++++++++++++ speechx/examples/ds2_ol/aishell/run_fbank.sh | 1 - speechx/examples/ngram/.gitignore | 2 - speechx/examples/ngram/en/README.md | 0 speechx/examples/ngram/zh/README.md | 101 ---------- speechx/examples/ngram/zh/local/split_data.sh | 30 --- speechx/examples/ngram/zh/path.sh | 12 -- speechx/examples/ngram/zh/run.sh | 68 ------- speechx/examples/ngram/zh/utils | 1 - speechx/examples/wfst/.gitignore | 1 - speechx/examples/wfst/README.md | 186 ------------------ speechx/examples/wfst/path.sh | 19 -- speechx/examples/wfst/run.sh | 29 --- speechx/examples/wfst/utils | 1 - 25 files changed, 189 insertions(+), 573 deletions(-) delete mode 100644 speechx/examples/dev/CMakeLists.txt delete mode 100644 speechx/examples/dev/glog/CMakeLists.txt delete mode 100644 speechx/examples/dev/glog/README.md delete mode 100644 speechx/examples/dev/glog/glog_logtostderr_test.cc delete mode 100644 speechx/examples/dev/glog/glog_test.cc delete mode 100644 speechx/examples/dev/glog/path.sh delete mode 100755 speechx/examples/dev/glog/run.sh rename speechx/examples/{ngram/zh => ds2_ol/aishell}/local/aishell_train_lms.sh (100%) rename speechx/examples/{ngram/zh => ds2_ol/aishell}/local/text_to_lexicon.py (100%) create mode 100755 speechx/examples/ds2_ol/aishell/run_build_tlg.sh delete mode 100644 speechx/examples/ngram/.gitignore delete mode 100644 speechx/examples/ngram/en/README.md delete mode 100644 speechx/examples/ngram/zh/README.md delete mode 100755 speechx/examples/ngram/zh/local/split_data.sh delete mode 100644 speechx/examples/ngram/zh/path.sh delete mode 100755 speechx/examples/ngram/zh/run.sh delete mode 120000 speechx/examples/ngram/zh/utils delete mode 100644 speechx/examples/wfst/.gitignore delete mode 100644 speechx/examples/wfst/README.md delete mode 100644 speechx/examples/wfst/path.sh delete mode 100755 speechx/examples/wfst/run.sh delete mode 120000 speechx/examples/wfst/utils diff --git a/speechx/examples/dev/CMakeLists.txt b/speechx/examples/dev/CMakeLists.txt deleted file mode 100644 index c8445fb82..000000000 --- a/speechx/examples/dev/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -cmake_minimum_required(VERSION 3.14 FATAL_ERROR) - -add_subdirectory(glog) diff --git a/speechx/examples/dev/glog/CMakeLists.txt b/speechx/examples/dev/glog/CMakeLists.txt deleted file mode 100644 index b4b0e6358..000000000 --- a/speechx/examples/dev/glog/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -cmake_minimum_required(VERSION 3.14 FATAL_ERROR) - -add_executable(glog_test ${CMAKE_CURRENT_SOURCE_DIR}/glog_test.cc) -target_link_libraries(glog_test glog) - - -add_executable(glog_logtostderr_test ${CMAKE_CURRENT_SOURCE_DIR}/glog_logtostderr_test.cc) -target_link_libraries(glog_logtostderr_test glog) \ No newline at end of file diff --git a/speechx/examples/dev/glog/README.md b/speechx/examples/dev/glog/README.md deleted file mode 100644 index 996e192e9..000000000 --- a/speechx/examples/dev/glog/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# [GLOG](https://rpg.ifi.uzh.ch/docs/glog.html) - -Unless otherwise specified, glog writes to the filename `/tmp/...log... --3.828829 -0.1600094 --6.157292 - -==> data/local/lm/text <== -BAC009S0002W0122 而 对 楼市 成交 抑制 作用 最 大 的 限 购 -BAC009S0002W0123 也 成为 地方 政府 的 眼中 钉 -BAC009S0002W0124 自 六月 底 呼和浩特 市 率先 宣布 取消 限 购 后 -BAC009S0002W0125 各地 政府 便 纷纷 跟进 -BAC009S0002W0126 仅 一 个 多 月 的 时间 里 -BAC009S0002W0127 除了 北京 上海 广州 深圳 四 个 一 线 城市 和 三亚 之外 -BAC009S0002W0128 四十六 个 限 购 城市 当中 -BAC009S0002W0129 四十一 个 已 正式 取消 或 变相 放松 了 限 购 -BAC009S0002W0130 财政 金融 政策 紧随 其后 而来 -BAC009S0002W0131 显示 出 了 极 强 的 威力 - -==> data/local/lm/text.no_oov <== - 而 对 楼市 成交 抑制 作用 最 大 的 限 购 - 也 成为 地方 政府 的 眼中 钉 - 自 六月 底 呼和浩特 市 率先 宣布 取消 限 购 后 - 各地 政府 便 纷纷 跟进 - 仅 一 个 多 月 的 时间 里 - 除了 北京 上海 广州 深圳 四 个 一 线 城市 和 三亚 之外 - 四十六 个 限 购 城市 当中 - 四十一 个 已 正式 取消 或 变相 放松 了 限 购 - 财政 ���融 政策 紧随 其后 而来 - 显示 出 了 极 强 的 威力 - -==> data/local/lm/train <== -汉莎 不 得 不 通过 这样 的 方式 寻求 新 的 发展 点 -并 计划 朝云 计算 方面 发展 -汉莎 的 基础 设施 部门 拥有 一千四百 名 员工 -媒体 就 曾 披露 这笔 交易 -虽然 双方 已经 正式 签署 了 外包 协议 -但是 这笔 交易 还 需要 得到 反 垄断 部门 的 批准 -陈 黎明 一九八九 年 获得 美国 康乃尔 大学 硕士 学位 -并 于 二零零三 年 顺利 完成 美国 哈佛 商学 院 高级 管理 课程 -曾 在 多家 国际 公司 任职 -拥有 业务 开发 商务 及 企业 治理 - -==> data/local/lm/unigram.counts <== - 57487 的 - 13099 在 - 11862 一 - 11397 了 - 10998 不 - 9913 是 - 7952 有 - 6250 和 - 6152 个 - 5422 将 - -==> data/local/lm/word.counts <== - 57486 的 - 13098 在 - 11861 一 - 11396 了 - 10997 不 - 9912 是 - 7951 有 - 6249 和 - 6151 个 - 5421 将 - -==> data/local/lm/wordlist <== -的 -在 -一 -了 -不 -是 -有 -和 -个 -将 -``` - -## Output - -``` -fstaddselfloops 'echo 4234 |' 'echo 123660 |' -Lexicon and Token FSTs compiling succeeded -arpa2fst --read-symbol-table=data/lang_test/words.txt --keep-symbols=true - -LOG (arpa2fst[5.5.0~1-5a37]:Read():arpa-file-parser.cc:94) Reading \data\ section. -LOG (arpa2fst[5.5.0~1-5a37]:Read():arpa-file-parser.cc:149) Reading \1-grams: section. -LOG (arpa2fst[5.5.0~1-5a37]:Read():arpa-file-parser.cc:149) Reading \2-grams: section. -LOG (arpa2fst[5.5.0~1-5a37]:Read():arpa-file-parser.cc:149) Reading \3-grams: section. -Checking how stochastic G is (the first of these numbers should be small): -fstisstochastic data/lang_test/G.fst -0 -1.14386 -fsttablecompose data/lang_test/L.fst data/lang_test/G.fst -fstminimizeencoded -fstdeterminizestar --use-log=true -fsttablecompose data/lang_test/T.fst data/lang_test/LG.fst -Composing decoding graph TLG.fst succeeded -Aishell build TLG done. -``` - -``` -data/ -├── lang_test -│ ├── G.fst -│ ├── L.fst -│ ├── LG.fst -│ ├── T.fst -│ ├── TLG.fst -│ ├── tokens.txt -│ ├── units.txt -│ └── words.txt -└── local - ├── lang - │ ├── L.fst - │ ├── T.fst - │ ├── tokens.txt - │ ├── units.txt - │ └── words.txt - └── tmp - ├── disambig.list - ├── lexiconp_disambig.txt - ├── lexiconp.txt - └── units.list -``` diff --git a/speechx/examples/wfst/path.sh b/speechx/examples/wfst/path.sh deleted file mode 100644 index a07c1297d..000000000 --- a/speechx/examples/wfst/path.sh +++ /dev/null @@ -1,19 +0,0 @@ -# This contains the locations of binarys build required for running the examples. - -MAIN_ROOT=`realpath $PWD/../../../` -SPEECHX_ROOT=`realpath $MAIN_ROOT/speechx` - -export LC_AL=C - -# srilm -export LIBLBFGS=${MAIN_ROOT}/tools/liblbfgs-1.10 -export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:${LIBLBFGS}/lib/.libs -export SRILM=${MAIN_ROOT}/tools/srilm -export PATH=${PATH}:${SRILM}/bin:${SRILM}/bin/i686-m64 - -# Kaldi -export KALDI_ROOT=${MAIN_ROOT}/tools/kaldi -[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh -export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH -[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present, can not using Kaldi!" -[ -f $KALDI_ROOT/tools/config/common_path.sh ] && . $KALDI_ROOT/tools/config/common_path.sh diff --git a/speechx/examples/wfst/run.sh b/speechx/examples/wfst/run.sh deleted file mode 100755 index 1354646af..000000000 --- a/speechx/examples/wfst/run.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -set -eo pipefail - -. path.sh - -stage=-1 -stop_stage=100 - -. utils/parse_options.sh - -if ! which fstprint ; then - pushd $MAIN_ROOT/tools - make kaldi.done - popd -fi - -if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then - # build T & L - # utils/fst/compile_lexicon_token_fst.sh - utils/fst/compile_lexicon_token_fst.sh \ - data/local/dict data/local/tmp data/local/lang - - # build G & LG & TLG - # utils/fst/make_tlg.sh - utils/fst/make_tlg.sh data/local/lm data/local/lang data/lang_test || exit 1; -fi - -echo "build TLG done." -exit 0 diff --git a/speechx/examples/wfst/utils b/speechx/examples/wfst/utils deleted file mode 120000 index 256f914ab..000000000 --- a/speechx/examples/wfst/utils +++ /dev/null @@ -1 +0,0 @@ -../../../utils/ \ No newline at end of file From f852514a3ef31b32f583c17bc282e4e0db809719 Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Wed, 25 May 2022 21:25:01 +0800 Subject: [PATCH 046/127] mv text_to_lexicon.py to utils --- .../examples/ds2_ol/aishell/run_build_tlg.sh | 2 +- utils/text_to_lexicon.py | 37 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) create mode 100755 utils/text_to_lexicon.py diff --git a/speechx/examples/ds2_ol/aishell/run_build_tlg.sh b/speechx/examples/ds2_ol/aishell/run_build_tlg.sh index 68a31de4f..4394ac5a0 100755 --- a/speechx/examples/ds2_ol/aishell/run_build_tlg.sh +++ b/speechx/examples/ds2_ol/aishell/run_build_tlg.sh @@ -55,7 +55,7 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then cp $unit data/local/dict/units.txt if [ ! -f $lexicon ];then - local/text_to_lexicon.py --has_key true --text $text --lexicon $lexicon + utils/text_to_lexicon.py --has_key true --text $text --lexicon $lexicon echo "Generate $lexicon from $text" fi diff --git a/utils/text_to_lexicon.py b/utils/text_to_lexicon.py new file mode 100755 index 000000000..ba5ab60ac --- /dev/null +++ b/utils/text_to_lexicon.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +import argparse +from collections import Counter + + +def main(args): + counter = Counter() + with open(args.text, 'r') as fin, open(args.lexicon, 'w') as fout: + for line in fin: + line = line.strip() + if args.has_key: + utt, text = line.split(maxsplit=1) + words = text.split() + else: + words = line.split() + + counter.update(words) + + for word in counter: + val = " ".join(list(word)) + fout.write(f"{word}\t{val}\n") + fout.flush() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='text(line:utt1 中国 人) to lexicon(line:中国 中 国).') + parser.add_argument( + '--has_key', default=True, help='text path, with utt or not') + parser.add_argument( + '--text', required=True, help='text path. line: utt1 中国 人 or 中国 人') + parser.add_argument( + '--lexicon', required=True, help='lexicon path. line:中国 中 国') + args = parser.parse_args() + print(args) + + main(args) From 6c57c2bf8e3568ab5518731de113d075467aeb9a Mon Sep 17 00:00:00 2001 From: KP <109694228@qq.com> Date: Wed, 25 May 2022 21:32:14 +0800 Subject: [PATCH 047/127] Dynamic cli commands registration. --- paddlespeech/cli/__init__.py | 7 ------- paddlespeech/cli/asr/infer.py | 3 --- paddlespeech/cli/base_commands.py | 18 ++++++++++++++++++ paddlespeech/cli/cls/infer.py | 5 +---- paddlespeech/cli/entry.py | 5 +++++ paddlespeech/cli/st/infer.py | 3 --- paddlespeech/cli/text/infer.py | 2 -- paddlespeech/cli/tts/infer.py | 3 --- paddlespeech/cli/utils.py | 11 +++++++++++ paddlespeech/cli/vector/infer.py | 6 +----- 10 files changed, 36 insertions(+), 27 deletions(-) diff --git a/paddlespeech/cli/__init__.py b/paddlespeech/cli/__init__.py index ddf0359bc..ca6993f2b 100644 --- a/paddlespeech/cli/__init__.py +++ b/paddlespeech/cli/__init__.py @@ -13,14 +13,7 @@ # limitations under the License. import _locale -from .asr import ASRExecutor from .base_commands import BaseCommand from .base_commands import HelpCommand -from .cls import CLSExecutor -from .st import STExecutor -from .stats import StatsExecutor -from .text import TextExecutor -from .tts import TTSExecutor -from .vector import VectorExecutor _locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8']) diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py index 2d74afa6d..09e8202fd 100644 --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -29,7 +29,6 @@ from yacs.config import CfgNode from ..download import get_path_from_url from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import CLI_TIMER from ..utils import MODEL_HOME from ..utils import stats_wrapper @@ -45,8 +44,6 @@ __all__ = ['ASRExecutor'] @timer_register -@cli_register( - name='paddlespeech.asr', description='Speech to text infer command.') class ASRExecutor(BaseExecutor): def __init__(self): super().__init__() diff --git a/paddlespeech/cli/base_commands.py b/paddlespeech/cli/base_commands.py index 0a26b1203..4d4d2cc69 100644 --- a/paddlespeech/cli/base_commands.py +++ b/paddlespeech/cli/base_commands.py @@ -15,6 +15,7 @@ from typing import List from .entry import commands from .utils import cli_register +from .utils import explicit_command_register from .utils import get_command __all__ = [ @@ -73,3 +74,20 @@ class VersionCommand: print(msg) return True + + +# Dynamic import when running specific command +_commands = { + 'asr': ['Speech to text infer command.', 'ASRExecutor'], + 'cls': ['Audio classification infer command.', 'CLSExecutor'], + 'st': ['Speech translation infer command.', 'STExecutor'], + 'text': ['Text command.', 'TextExecutor'], + 'tts': ['Text to Speech infer command.', 'TTSExecutor'], + 'vector': ['Speech to vector embedding infer command.', 'VectorExecutor'], +} + +for com, info in _commands.items(): + explicit_command_register( + name='paddlespeech.{}'.format(com), + description=info[0], + cls='paddlespeech.cli.{}.{}'.format(com, info[1])) diff --git a/paddlespeech/cli/cls/infer.py b/paddlespeech/cli/cls/infer.py index 40072d997..3d807b60b 100644 --- a/paddlespeech/cli/cls/infer.py +++ b/paddlespeech/cli/cls/infer.py @@ -27,7 +27,6 @@ from paddlespeech.utils.dynamic_import import dynamic_import from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models @@ -36,8 +35,6 @@ from .pretrained_models import pretrained_models __all__ = ['CLSExecutor'] -@cli_register( - name='paddlespeech.cls', description='Audio classification infer command.') class CLSExecutor(BaseExecutor): def __init__(self): super().__init__() @@ -246,4 +243,4 @@ class CLSExecutor(BaseExecutor): self.infer() res = self.postprocess(topk) # Retrieve result of cls. - return res \ No newline at end of file + return res diff --git a/paddlespeech/cli/entry.py b/paddlespeech/cli/entry.py index 32123ece7..e0c306d62 100644 --- a/paddlespeech/cli/entry.py +++ b/paddlespeech/cli/entry.py @@ -34,6 +34,11 @@ def _execute(): # The method 'execute' of a command instance returns 'True' for a success # while 'False' for a failure. Here converts this result into a exit status # in bash: 0 for a success and 1 for a failure. + if not callable(com['_entry']): + i = com['_entry'].rindex('.') + module, cls = com['_entry'][:i], com['_entry'][i + 1:] + exec("from {} import {}".format(module, cls)) + com['_entry'] = locals()[cls] status = 0 if com['_entry']().execute(sys.argv[idx:]) else 1 return status diff --git a/paddlespeech/cli/st/infer.py b/paddlespeech/cli/st/infer.py index 4f210fbe6..ae188b349 100644 --- a/paddlespeech/cli/st/infer.py +++ b/paddlespeech/cli/st/infer.py @@ -28,7 +28,6 @@ from yacs.config import CfgNode from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import download_and_decompress from ..utils import MODEL_HOME from ..utils import stats_wrapper @@ -42,8 +41,6 @@ from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ["STExecutor"] -@cli_register( - name="paddlespeech.st", description="Speech translation infer command.") class STExecutor(BaseExecutor): def __init__(self): super().__init__() diff --git a/paddlespeech/cli/text/infer.py b/paddlespeech/cli/text/infer.py index 97f3bbe21..be5b5a10d 100644 --- a/paddlespeech/cli/text/infer.py +++ b/paddlespeech/cli/text/infer.py @@ -23,7 +23,6 @@ import paddle from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models @@ -33,7 +32,6 @@ from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['TextExecutor'] -@cli_register(name='paddlespeech.text', description='Text infer command.') class TextExecutor(BaseExecutor): def __init__(self): super().__init__() diff --git a/paddlespeech/cli/tts/infer.py b/paddlespeech/cli/tts/infer.py index efab9cb25..5fa9b3ed0 100644 --- a/paddlespeech/cli/tts/infer.py +++ b/paddlespeech/cli/tts/infer.py @@ -28,7 +28,6 @@ from yacs.config import CfgNode from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models @@ -40,8 +39,6 @@ from paddlespeech.utils.dynamic_import import dynamic_import __all__ = ['TTSExecutor'] -@cli_register( - name='paddlespeech.tts', description='Text to Speech infer command.') class TTSExecutor(BaseExecutor): def __init__(self): super().__init__() diff --git a/paddlespeech/cli/utils.py b/paddlespeech/cli/utils.py index e7b499f72..128767e62 100644 --- a/paddlespeech/cli/utils.py +++ b/paddlespeech/cli/utils.py @@ -41,6 +41,7 @@ requests.adapters.DEFAULT_RETRIES = 3 __all__ = [ 'timer_register', 'cli_register', + 'explicit_command_register', 'get_command', 'download_and_decompress', 'load_state_dict_from_url', @@ -70,6 +71,16 @@ def cli_register(name: str, description: str='') -> Any: return _warpper +def explicit_command_register(name: str, description: str='', cls: str=''): + items = name.split('.') + com = commands + for item in items: + com = com[item] + com['_entry'] = cls + if description: + com['_description'] = description + + def get_command(name: str) -> Any: items = name.split('.') com = commands diff --git a/paddlespeech/cli/vector/infer.py b/paddlespeech/cli/vector/infer.py index cc664369f..07fb73a4c 100644 --- a/paddlespeech/cli/vector/infer.py +++ b/paddlespeech/cli/vector/infer.py @@ -28,7 +28,6 @@ from yacs.config import CfgNode from ..executor import BaseExecutor from ..log import logger -from ..utils import cli_register from ..utils import stats_wrapper from .pretrained_models import model_alias from .pretrained_models import pretrained_models @@ -37,9 +36,6 @@ from paddlespeech.vector.io.batch import feature_normalize from paddlespeech.vector.modules.sid_model import SpeakerIdetification -@cli_register( - name="paddlespeech.vector", - description="Speech to vector embedding infer command.") class VectorExecutor(BaseExecutor): def __init__(self): super().__init__() @@ -476,4 +472,4 @@ class VectorExecutor(BaseExecutor): else: logger.info("The audio file format is right") - return True \ No newline at end of file + return True From 27a5de1af7852a70526673495250cf3ae0bc6b86 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 26 May 2022 10:35:08 +0800 Subject: [PATCH 048/127] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a43e21bd2..c9d4796c8 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,8 @@ | Documents | Models List | AIStudio Courses + | Paper + | Gitee From fe3474729de6dd0720dd1f848eb92a480f485843 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Thu, 26 May 2022 10:36:05 +0800 Subject: [PATCH 049/127] Update README_cn.md --- README_cn.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README_cn.md b/README_cn.md index ed5c6a90d..c751b061d 100644 --- a/README_cn.md +++ b/README_cn.md @@ -25,6 +25,8 @@ | 教程文档 | 模型列表 | AIStudio 课程 + | 论文 + | Gitee From 780da806d75f8e07ba62ec47e16a2b5cfa636ac7 Mon Sep 17 00:00:00 2001 From: huangyuxin Date: Thu, 26 May 2022 03:46:01 +0000 Subject: [PATCH 050/127] fix test_cli, test=doc --- tests/unit/cli/test_cli.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/unit/cli/test_cli.sh b/tests/unit/cli/test_cli.sh index e1f1853f6..e0ebd1412 100755 --- a/tests/unit/cli/test_cli.sh +++ b/tests/unit/cli/test_cli.sh @@ -25,7 +25,7 @@ paddlespeech asr --model deepspeech2offline_librispeech --lang en --input ./en.w # long audio restriction { wget -c https://paddlespeech.bj.bcebos.com/datasets/single_wav/zh/test_long_audio_01.wav -paddlespeech asr --input test_long_audio_01.wav +paddlespeech asr --model deepspeech2online_wenetspeech --input test_long_audio_01.wav -y if [ $? -ne 255 ]; then echo -e "\e[1;31mTime restriction not passed\e[0m" exit 1 @@ -54,7 +54,7 @@ paddlespeech tts --am tacotron2_ljspeech --voc pwgan_ljspeech --lang en --input # Speech Translation (only support linux) paddlespeech st --input ./en.wav -# Speaker Verification +# Speaker Verification wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav paddlespeech vector --task spk --input 85236145389.wav @@ -65,7 +65,7 @@ echo -e "demo1 85236145389.wav \n demo2 85236145389.wav" > vec.job paddlespeech vector --task spk --input vec.job echo -e "demo3 85236145389.wav \n demo4 85236145389.wav" | paddlespeech vector --task spk -rm 85236145389.wav +rm 85236145389.wav rm vec.job # shell pipeline From 49dadc8044ace30a12782775dc1a8c659a5b30e7 Mon Sep 17 00:00:00 2001 From: KP <109694228@qq.com> Date: Thu, 26 May 2022 13:32:26 +0800 Subject: [PATCH 051/127] Update usage and doc of cli executor. --- demos/audio_searching/src/encode.py | 2 +- demos/audio_tagging/README.md | 2 +- demos/audio_tagging/README_cn.md | 2 +- demos/automatic_video_subtitiles/README.md | 3 ++- demos/automatic_video_subtitiles/README_cn.md | 3 ++- demos/automatic_video_subtitiles/recognize.py | 4 ++-- demos/punctuation_restoration/README.md | 2 +- demos/punctuation_restoration/README_cn.md | 2 +- demos/speaker_verification/README.md | 2 +- demos/speaker_verification/README_cn.md | 2 +- demos/speech_recognition/README.md | 2 +- demos/speech_recognition/README_cn.md | 2 +- demos/speech_translation/README.md | 2 +- demos/speech_translation/README_cn.md | 2 +- demos/text_to_speech/README.md | 2 +- demos/text_to_speech/README_cn.md | 2 +- 16 files changed, 19 insertions(+), 17 deletions(-) diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py index c89a11c1f..f6bcb00ad 100644 --- a/demos/audio_searching/src/encode.py +++ b/demos/audio_searching/src/encode.py @@ -14,7 +14,7 @@ import numpy as np from logs import LOGGER -from paddlespeech.cli import VectorExecutor +from paddlespeech.cli.vector import VectorExecutor vector_executor = VectorExecutor() diff --git a/demos/audio_tagging/README.md b/demos/audio_tagging/README.md index 9d4af0be6..fc4a334ea 100644 --- a/demos/audio_tagging/README.md +++ b/demos/audio_tagging/README.md @@ -57,7 +57,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/cat.wav https://paddlespe - Python API ```python import paddle - from paddlespeech.cli import CLSExecutor + from paddlespeech.cli.cls import CLSExecutor cls_executor = CLSExecutor() result = cls_executor( diff --git a/demos/audio_tagging/README_cn.md b/demos/audio_tagging/README_cn.md index 79f87bf8c..36b5d8aaf 100644 --- a/demos/audio_tagging/README_cn.md +++ b/demos/audio_tagging/README_cn.md @@ -57,7 +57,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/cat.wav https://paddlespe - Python API ```python import paddle - from paddlespeech.cli import CLSExecutor + from paddlespeech.cli.cls import CLSExecutor cls_executor = CLSExecutor() result = cls_executor( diff --git a/demos/automatic_video_subtitiles/README.md b/demos/automatic_video_subtitiles/README.md index db6da40db..b815425ec 100644 --- a/demos/automatic_video_subtitiles/README.md +++ b/demos/automatic_video_subtitiles/README.md @@ -28,7 +28,8 @@ ffmpeg -i subtitle_demo1.mp4 -ac 1 -ar 16000 -vn input.wav - Python API ```python import paddle - from paddlespeech.cli import ASRExecutor, TextExecutor + from paddlespeech.cli.asr import ASRExecutor + from paddlespeech.cli.text import TextExecutor asr_executor = ASRExecutor() text_executor = TextExecutor() diff --git a/demos/automatic_video_subtitiles/README_cn.md b/demos/automatic_video_subtitiles/README_cn.md index fc7b2cf6a..990ff6dbd 100644 --- a/demos/automatic_video_subtitiles/README_cn.md +++ b/demos/automatic_video_subtitiles/README_cn.md @@ -23,7 +23,8 @@ ffmpeg -i subtitle_demo1.mp4 -ac 1 -ar 16000 -vn input.wav - Python API ```python import paddle - from paddlespeech.cli import ASRExecutor, TextExecutor + from paddlespeech.cli.asr import ASRExecutor + from paddlespeech.cli.text import TextExecutor asr_executor = ASRExecutor() text_executor = TextExecutor() diff --git a/demos/automatic_video_subtitiles/recognize.py b/demos/automatic_video_subtitiles/recognize.py index 72e3c3a85..304599d19 100644 --- a/demos/automatic_video_subtitiles/recognize.py +++ b/demos/automatic_video_subtitiles/recognize.py @@ -16,8 +16,8 @@ import os import paddle -from paddlespeech.cli import ASRExecutor -from paddlespeech.cli import TextExecutor +from paddlespeech.cli.asr import ASRExecutor +from paddlespeech.cli.text import TextExecutor # yapf: disable parser = argparse.ArgumentParser(__doc__) diff --git a/demos/punctuation_restoration/README.md b/demos/punctuation_restoration/README.md index 518d437dc..458ab92f9 100644 --- a/demos/punctuation_restoration/README.md +++ b/demos/punctuation_restoration/README.md @@ -42,7 +42,7 @@ The input of this demo should be a text of the specific language that can be pas - Python API ```python import paddle - from paddlespeech.cli import TextExecutor + from paddlespeech.cli.text import TextExecutor text_executor = TextExecutor() result = text_executor( diff --git a/demos/punctuation_restoration/README_cn.md b/demos/punctuation_restoration/README_cn.md index 9d4be8bf0..f25acdadb 100644 --- a/demos/punctuation_restoration/README_cn.md +++ b/demos/punctuation_restoration/README_cn.md @@ -44,7 +44,7 @@ - Python API ```python import paddle - from paddlespeech.cli import TextExecutor + from paddlespeech.cli.text import TextExecutor text_executor = TextExecutor() result = text_executor( diff --git a/demos/speaker_verification/README.md b/demos/speaker_verification/README.md index 63dc9294e..900b5ae40 100644 --- a/demos/speaker_verification/README.md +++ b/demos/speaker_verification/README.md @@ -96,7 +96,7 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav - Python API ```python - from paddlespeech.cli import VectorExecutor + from paddlespeech.cli.vector import VectorExecutor vector_executor = VectorExecutor() audio_emb = vector_executor( diff --git a/demos/speaker_verification/README_cn.md b/demos/speaker_verification/README_cn.md index 07eeac2ee..f6afa86ac 100644 --- a/demos/speaker_verification/README_cn.md +++ b/demos/speaker_verification/README_cn.md @@ -95,7 +95,7 @@ wget -c https://paddlespeech.bj.bcebos.com/vector/audio/85236145389.wav - Python API ```python import paddle - from paddlespeech.cli import VectorExecutor + from paddlespeech.cli.vector import VectorExecutor vector_executor = VectorExecutor() audio_emb = vector_executor( diff --git a/demos/speech_recognition/README.md b/demos/speech_recognition/README.md index 6493e8e61..c815a88af 100644 --- a/demos/speech_recognition/README.md +++ b/demos/speech_recognition/README.md @@ -58,7 +58,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee - Python API ```python import paddle - from paddlespeech.cli import ASRExecutor + from paddlespeech.cli.asr import ASRExecutor asr_executor = ASRExecutor() text = asr_executor( diff --git a/demos/speech_recognition/README_cn.md b/demos/speech_recognition/README_cn.md index 8d631d89c..13aa9f277 100644 --- a/demos/speech_recognition/README_cn.md +++ b/demos/speech_recognition/README_cn.md @@ -56,7 +56,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee - Python API ```python import paddle - from paddlespeech.cli import ASRExecutor + from paddlespeech.cli.asr import ASRExecutor asr_executor = ASRExecutor() text = asr_executor( diff --git a/demos/speech_translation/README.md b/demos/speech_translation/README.md index f675a4eda..00a9c7932 100644 --- a/demos/speech_translation/README.md +++ b/demos/speech_translation/README.md @@ -47,7 +47,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee - Python API ```python import paddle - from paddlespeech.cli import STExecutor + from paddlespeech.cli.st import STExecutor st_executor = STExecutor() text = st_executor( diff --git a/demos/speech_translation/README_cn.md b/demos/speech_translation/README_cn.md index bad9b392f..5119bf9f4 100644 --- a/demos/speech_translation/README_cn.md +++ b/demos/speech_translation/README_cn.md @@ -47,7 +47,7 @@ wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespee - Python API ```python import paddle - from paddlespeech.cli import STExecutor + from paddlespeech.cli.st import STExecutor st_executor = STExecutor() text = st_executor( diff --git a/demos/text_to_speech/README.md b/demos/text_to_speech/README.md index 2df72a82d..389847a12 100644 --- a/demos/text_to_speech/README.md +++ b/demos/text_to_speech/README.md @@ -77,7 +77,7 @@ The input of this demo should be a text of the specific language that can be pas - Python API ```python import paddle - from paddlespeech.cli import TTSExecutor + from paddlespeech.cli.tts import TTSExecutor tts_executor = TTSExecutor() wav_file = tts_executor( diff --git a/demos/text_to_speech/README_cn.md b/demos/text_to_speech/README_cn.md index 7e02b9624..f967d3d4d 100644 --- a/demos/text_to_speech/README_cn.md +++ b/demos/text_to_speech/README_cn.md @@ -80,7 +80,7 @@ - Python API ```python import paddle - from paddlespeech.cli import TTSExecutor + from paddlespeech.cli.tts import TTSExecutor tts_executor = TTSExecutor() wav_file = tts_executor( From 418cc37ffb43773d24c486069a3b7e346bd8e5ae Mon Sep 17 00:00:00 2001 From: Yang Zhou Date: Thu, 26 May 2022 22:47:30 +0800 Subject: [PATCH 052/127] refactor file org & rename binary files --- speechx/examples/codelab/README.md | 8 + speechx/examples/codelab/decoder/.gitignore | 2 + speechx/examples/codelab/decoder/README.md | 12 ++ speechx/examples/codelab/decoder/path.sh | 14 ++ speechx/examples/codelab/decoder/run.sh | 78 +++++++ speechx/examples/codelab/decoder/valgrind.sh | 26 +++ speechx/examples/codelab/feat/README.md | 7 + speechx/examples/codelab/feat/path.sh | 14 ++ speechx/examples/codelab/feat/run.sh | 57 +++++ speechx/examples/codelab/feat/valgrind.sh | 24 +++ speechx/examples/codelab/nnet/.gitignore | 2 + speechx/examples/codelab/nnet/README.md | 3 + speechx/examples/codelab/nnet/path.sh | 14 ++ speechx/examples/codelab/nnet/run.sh | 29 +++ speechx/examples/codelab/nnet/valgrind.sh | 21 ++ speechx/examples/custom_asr/run.sh | 8 +- .../ds2_ol/aishell/local/text_to_lexicon.py | 37 ---- speechx/examples/ds2_ol/aishell/path.sh | 8 +- speechx/examples/ds2_ol/aishell/run.sh | 12 +- speechx/examples/ds2_ol/aishell/run_fbank.sh | 12 +- .../examples/ds2_ol/websocket/CMakeLists.txt | 9 - speechx/examples/ds2_ol/websocket/path.sh | 8 +- speechx/speechx/CMakeLists.txt | 6 + .../codelab}/CMakeLists.txt | 4 +- speechx/speechx/codelab/README.md | 7 + speechx/speechx/codelab/glog/CMakeLists.txt | 8 + speechx/speechx/codelab/glog/README.md | 38 ++++ .../codelab/glog/glog_logtostderr_main.cc | 25 +++ speechx/speechx/codelab/glog/glog_main.cc | 23 ++ speechx/speechx/codelab/nnet/CMakeLists.txt | 6 + .../codelab/nnet/ds2_model_test_main.cc | 203 ++++++++++++++++++ speechx/speechx/decoder/CMakeLists.txt | 13 ++ .../ctc_prefix_beam_search_decoder_main.cc | 167 ++++++++++++++ .../decoder/nnet_logprob_decoder_main.cc | 74 +++++++ speechx/speechx/decoder/recognizer_main.cc | 99 +++++++++ speechx/speechx/decoder/tlg_decoder_main.cc | 169 +++++++++++++++ speechx/speechx/frontend/audio/CMakeLists.txt | 15 +- .../frontend/audio/cmvn_json2kaldi_main.cc | 85 ++++++++ .../frontend/audio/compute_fbank_main.cc | 143 ++++++++++++ .../audio/compute_linear_spectrogram_main.cc | 145 +++++++++++++ speechx/speechx/websocket/CMakeLists.txt | 8 + .../websocket/websocket_client_main.cc | 0 .../websocket/websocket_server_main.cc | 0 43 files changed, 1570 insertions(+), 73 deletions(-) create mode 100644 speechx/examples/codelab/README.md create mode 100644 speechx/examples/codelab/decoder/.gitignore create mode 100644 speechx/examples/codelab/decoder/README.md create mode 100644 speechx/examples/codelab/decoder/path.sh create mode 100755 speechx/examples/codelab/decoder/run.sh create mode 100755 speechx/examples/codelab/decoder/valgrind.sh create mode 100644 speechx/examples/codelab/feat/README.md create mode 100644 speechx/examples/codelab/feat/path.sh create mode 100755 speechx/examples/codelab/feat/run.sh create mode 100755 speechx/examples/codelab/feat/valgrind.sh create mode 100644 speechx/examples/codelab/nnet/.gitignore create mode 100644 speechx/examples/codelab/nnet/README.md create mode 100644 speechx/examples/codelab/nnet/path.sh create mode 100755 speechx/examples/codelab/nnet/run.sh create mode 100755 speechx/examples/codelab/nnet/valgrind.sh delete mode 100755 speechx/examples/ds2_ol/aishell/local/text_to_lexicon.py delete mode 100644 speechx/examples/ds2_ol/websocket/CMakeLists.txt rename speechx/{examples => speechx/codelab}/CMakeLists.txt (52%) create mode 100644 speechx/speechx/codelab/README.md create mode 100644 speechx/speechx/codelab/glog/CMakeLists.txt create mode 100644 speechx/speechx/codelab/glog/README.md create mode 100644 speechx/speechx/codelab/glog/glog_logtostderr_main.cc create mode 100644 speechx/speechx/codelab/glog/glog_main.cc create mode 100644 speechx/speechx/codelab/nnet/CMakeLists.txt create mode 100644 speechx/speechx/codelab/nnet/ds2_model_test_main.cc create mode 100644 speechx/speechx/decoder/ctc_prefix_beam_search_decoder_main.cc create mode 100644 speechx/speechx/decoder/nnet_logprob_decoder_main.cc create mode 100644 speechx/speechx/decoder/recognizer_main.cc create mode 100644 speechx/speechx/decoder/tlg_decoder_main.cc create mode 100644 speechx/speechx/frontend/audio/cmvn_json2kaldi_main.cc create mode 100644 speechx/speechx/frontend/audio/compute_fbank_main.cc create mode 100644 speechx/speechx/frontend/audio/compute_linear_spectrogram_main.cc rename speechx/{examples/ds2_ol => speechx}/websocket/websocket_client_main.cc (100%) rename speechx/{examples/ds2_ol => speechx}/websocket/websocket_server_main.cc (100%) diff --git a/speechx/examples/codelab/README.md b/speechx/examples/codelab/README.md new file mode 100644 index 000000000..f89184de9 --- /dev/null +++ b/speechx/examples/codelab/README.md @@ -0,0 +1,8 @@ +# Codelab + +## introduction + +> The below is for developing and offline testing. Do not run it only if you know what it is. +* nnet +* feat +* decoder diff --git a/speechx/examples/codelab/decoder/.gitignore b/speechx/examples/codelab/decoder/.gitignore new file mode 100644 index 000000000..bbd86a25b --- /dev/null +++ b/speechx/examples/codelab/decoder/.gitignore @@ -0,0 +1,2 @@ +data +exp diff --git a/speechx/examples/codelab/decoder/README.md b/speechx/examples/codelab/decoder/README.md new file mode 100644 index 000000000..ead3b8e13 --- /dev/null +++ b/speechx/examples/codelab/decoder/README.md @@ -0,0 +1,12 @@ +# ASR Decoder + +ASR Decoder test bins. We using theses bins to test CTC BeamSearch decoder and WFST decoder. + +* decoder_test_main.cc +feed nnet output logprob, and only test decoder + +* offline_decoder_sliding_chunk_main.cc +feed streaming audio feature, decode as streaming manner. + +* offline_wfst_decoder_main.cc +feed streaming audio feature, decode using WFST as streaming manner. diff --git a/speechx/examples/codelab/decoder/path.sh b/speechx/examples/codelab/decoder/path.sh new file mode 100644 index 000000000..9d2291743 --- /dev/null +++ b/speechx/examples/codelab/decoder/path.sh @@ -0,0 +1,14 @@ +# This contains the locations of binarys build required for running the examples. + +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_BUILD=$SPEECHX_ROOT/build/speechx + +SPEECHX_TOOLS=$SPEECHX_ROOT/tools +TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin + +[ -d $SPEECHX_BUILD ] || { echo "Error: 'build/speechx' directory not found. please ensure that the project build successfully"; } + +export LC_AL=C + +SPEECHX_BIN=$SPEECHX_ROOT/build/speechx/decoder:$SPEECHX_ROOT/build/speechx/frontend/audio +export PATH=$PATH:$SPEECHX_BIN:$TOOLS_BIN diff --git a/speechx/examples/codelab/decoder/run.sh b/speechx/examples/codelab/decoder/run.sh new file mode 100755 index 000000000..a911eb033 --- /dev/null +++ b/speechx/examples/codelab/decoder/run.sh @@ -0,0 +1,78 @@ +#!/bin/bash +set +x +set -e + +. path.sh + +# 1. compile +if [ ! -d ${SPEECHX_EXAMPLES} ]; then + pushd ${SPEECHX_ROOT} + bash build.sh + popd +fi + +# input +mkdir -p data +data=$PWD/data +ckpt_dir=$data/model +model_dir=$ckpt_dir/exp/deepspeech2_online/checkpoints/ +vocb_dir=$ckpt_dir/data/lang_char/ + +lm=$data/zh_giga.no_cna_cmn.prune01244.klm + +# output +exp_dir=./exp +mkdir -p $exp_dir + +# 2. download model +if [[ ! -f data/model/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz ]]; then + mkdir -p data/model + pushd data/model + wget -c https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + tar xzfv asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + popd +fi + +# produce wav scp +if [ ! -f data/wav.scp ]; then + pushd data + wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav + echo "utt1 " $PWD/zh.wav > wav.scp + popd +fi + +# download lm +if [ ! -f $lm ]; then + pushd data + wget -c https://deepspeech.bj.bcebos.com/zh_lm/zh_giga.no_cna_cmn.prune01244.klm + popd +fi + +feat_wspecifier=$exp_dir/feats.ark +cmvn=$exp_dir/cmvn.ark + +export GLOG_logtostderr=1 + +# dump json cmvn to kaldi +cmvn_json2kaldi_main \ + --json_file $ckpt_dir/data/mean_std.json \ + --cmvn_write_path $cmvn \ + --binary=false +echo "convert json cmvn to kaldi ark." + + +# generate linear feature as streaming +compute_linear_spectrogram_main \ + --wav_rspecifier=scp:$data/wav.scp \ + --feature_wspecifier=ark,t:$feat_wspecifier \ + --cmvn_file=$cmvn +echo "compute linear spectrogram feature." + +# run ctc beam search decoder as streaming +ctc_prefix_beam_search_decoder_main \ + --result_wspecifier=ark,t:$exp_dir/result.txt \ + --feature_rspecifier=ark:$feat_wspecifier \ + --model_path=$model_dir/avg_1.jit.pdmodel \ + --param_path=$model_dir/avg_1.jit.pdiparams \ + --dict_file=$vocb_dir/vocab.txt \ + --lm_path=$lm diff --git a/speechx/examples/codelab/decoder/valgrind.sh b/speechx/examples/codelab/decoder/valgrind.sh new file mode 100755 index 000000000..14efe0ba4 --- /dev/null +++ b/speechx/examples/codelab/decoder/valgrind.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# this script is for memory check, so please run ./run.sh first. + +set +x +set -e + +. ./path.sh + +if [ ! -d ${SPEECHX_TOOLS}/valgrind/install ]; then + echo "please install valgrind in the speechx tools dir.\n" + exit 1 +fi + +model_dir=../paddle_asr_model +feat_wspecifier=./feats.ark +cmvn=./cmvn.ark + +valgrind --tool=memcheck --track-origins=yes --leak-check=full --show-leak-kinds=all \ + offline_decoder_main \ + --feature_respecifier=ark:$feat_wspecifier \ + --model_path=$model_dir/avg_1.jit.pdmodel \ + --param_path=$model_dir/avg_1.jit.pdparams \ + --dict_file=$model_dir/vocab.txt \ + --lm_path=$model_dir/avg_1.jit.klm + diff --git a/speechx/examples/codelab/feat/README.md b/speechx/examples/codelab/feat/README.md new file mode 100644 index 000000000..e59e02bf9 --- /dev/null +++ b/speechx/examples/codelab/feat/README.md @@ -0,0 +1,7 @@ +# Deepspeech2 Straming Audio Feature + +ASR audio feature test bins. We using theses bins to test linaer/fbank/mfcc asr feature as streaming manner. + +* compute_linear_spectrogram_main.cc + +compute linear spectrogram without db norm in streaming manner. diff --git a/speechx/examples/codelab/feat/path.sh b/speechx/examples/codelab/feat/path.sh new file mode 100644 index 000000000..3b89d01e9 --- /dev/null +++ b/speechx/examples/codelab/feat/path.sh @@ -0,0 +1,14 @@ +# This contains the locations of binarys build required for running the examples. + +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_EXAMPLES=$SPEECHX_ROOT/build/examples + +SPEECHX_TOOLS=$SPEECHX_ROOT/tools +TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin + +[ -d $SPEECHX_EXAMPLES ] || { echo "Error: 'build/examples' directory not found. please ensure that the project build successfully"; } + +export LC_AL=C + +SPEECHX_BIN=$SPEECHX_ROOT/build/speechx/decoder:$SPEECHX_ROOT/build/speechx/frontend/audio +export PATH=$PATH:$SPEECHX_BIN:$TOOLS_BIN diff --git a/speechx/examples/codelab/feat/run.sh b/speechx/examples/codelab/feat/run.sh new file mode 100755 index 000000000..1fa37f981 --- /dev/null +++ b/speechx/examples/codelab/feat/run.sh @@ -0,0 +1,57 @@ +#!/bin/bash +set +x +set -e + +. ./path.sh + +# 1. compile +if [ ! -d ${SPEECHX_EXAMPLES} ]; then + pushd ${SPEECHX_ROOT} + bash build.sh + popd +fi + +# 2. download model +if [ ! -e data/model/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz ]; then + mkdir -p data/model + pushd data/model + wget -c https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + tar xzfv asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + popd +fi + +# produce wav scp +if [ ! -f data/wav.scp ]; then + mkdir -p data + pushd data + wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav + echo "utt1 " $PWD/zh.wav > wav.scp + popd +fi + + +# input +data_dir=./data +exp_dir=./exp +model_dir=$data_dir/model/ + +mkdir -p $exp_dir + + +# 3. run feat +export GLOG_logtostderr=1 + +cmvn_json2kaldi_main \ + --json_file $model_dir/data/mean_std.json \ + --cmvn_write_path $exp_dir/cmvn.ark \ + --binary=false +echo "convert json cmvn to kaldi ark." + + +compute_linear_spectrogram_main \ + --wav_rspecifier=scp:$data_dir/wav.scp \ + --feature_wspecifier=ark,t:$exp_dir/feats.ark \ + --cmvn_file=$exp_dir/cmvn.ark +echo "compute linear spectrogram feature." + + diff --git a/speechx/examples/codelab/feat/valgrind.sh b/speechx/examples/codelab/feat/valgrind.sh new file mode 100755 index 000000000..ea50fdc23 --- /dev/null +++ b/speechx/examples/codelab/feat/valgrind.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# this script is for memory check, so please run ./run.sh first. + +set +x +set -e + +. ./path.sh + +if [ ! -d ${SPEECHX_TOOLS}/valgrind/install ]; then + echo "please install valgrind in the speechx tools dir.\n" + exit 1 +fi + +model_dir=../paddle_asr_model +feat_wspecifier=./feats.ark +cmvn=./cmvn.ark + +valgrind --tool=memcheck --track-origins=yes --leak-check=full --show-leak-kinds=all \ + compute_linear_spectrogram_main \ + --wav_rspecifier=scp:$model_dir/wav.scp \ + --feature_wspecifier=ark,t:$feat_wspecifier \ + --cmvn_write_path=$cmvn + diff --git a/speechx/examples/codelab/nnet/.gitignore b/speechx/examples/codelab/nnet/.gitignore new file mode 100644 index 000000000..bbd86a25b --- /dev/null +++ b/speechx/examples/codelab/nnet/.gitignore @@ -0,0 +1,2 @@ +data +exp diff --git a/speechx/examples/codelab/nnet/README.md b/speechx/examples/codelab/nnet/README.md new file mode 100644 index 000000000..772a58f0e --- /dev/null +++ b/speechx/examples/codelab/nnet/README.md @@ -0,0 +1,3 @@ +# Deepspeech2 Streaming NNet Test + +Using for ds2 streaming nnet inference test. diff --git a/speechx/examples/codelab/nnet/path.sh b/speechx/examples/codelab/nnet/path.sh new file mode 100644 index 000000000..7d395d648 --- /dev/null +++ b/speechx/examples/codelab/nnet/path.sh @@ -0,0 +1,14 @@ +# This contains the locations of binarys build required for running the examples. + +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_BUILD=$SPEECHX_ROOT/build/speechx + +SPEECHX_TOOLS=$SPEECHX_ROOT/tools +TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin + +[ -d $SPEECHX_EXAMPLES ] || { echo "Error: 'build/examples' directory not found. please ensure that the project build successfully"; } + +export LC_AL=C + +SPEECHX_BIN=$SPEECHX_BUILD/codelab/nnet +export PATH=$PATH:$SPEECHX_BIN:$TOOLS_BIN diff --git a/speechx/examples/codelab/nnet/run.sh b/speechx/examples/codelab/nnet/run.sh new file mode 100755 index 000000000..842499ba2 --- /dev/null +++ b/speechx/examples/codelab/nnet/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set +x +set -e + +. path.sh + +# 1. compile +if [ ! -d ${SPEECHX_EXAMPLES} ]; then + pushd ${SPEECHX_ROOT} + bash build.sh + popd +fi + +# 2. download model +if [ ! -f data/model/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz ]; then + mkdir -p data/model + pushd data/model + wget -c https://paddlespeech.bj.bcebos.com/s2t/aishell/asr0/asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + tar xzfv asr0_deepspeech2_online_aishell_ckpt_0.2.0.model.tar.gz + popd +fi + +ckpt_dir=./data/model +model_dir=$ckpt_dir/exp/deepspeech2_online/checkpoints/ + +ds2_model_test_main \ + --model_path=$model_dir/avg_1.jit.pdmodel \ + --param_path=$model_dir/avg_1.jit.pdiparams + diff --git a/speechx/examples/codelab/nnet/valgrind.sh b/speechx/examples/codelab/nnet/valgrind.sh new file mode 100755 index 000000000..a5aab6637 --- /dev/null +++ b/speechx/examples/codelab/nnet/valgrind.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# this script is for memory check, so please run ./run.sh first. + +set +x +set -e + +. ./path.sh + +if [ ! -d ${SPEECHX_TOOLS}/valgrind/install ]; then + echo "please install valgrind in the speechx tools dir.\n" + exit 1 +fi + +ckpt_dir=./data/model +model_dir=$ckpt_dir/exp/deepspeech2_online/checkpoints/ + +valgrind --tool=memcheck --track-origins=yes --leak-check=full --show-leak-kinds=all \ + ds2_model_test_main \ + --model_path=$model_dir/avg_1.jit.pdmodel \ + --param_path=$model_dir/avg_1.jit.pdparams diff --git a/speechx/examples/custom_asr/run.sh b/speechx/examples/custom_asr/run.sh index 8d88000dc..dddcf9fd1 100644 --- a/speechx/examples/custom_asr/run.sh +++ b/speechx/examples/custom_asr/run.sh @@ -7,7 +7,7 @@ export GLOG_logtostderr=1 . ./path.sh || exit 1; # ds2 means deepspeech2 (acoutic model type) -dir=$PWD/ds2_graph_with_slot +dir=$PWD/exp/ds2_graph_with_slot data=$PWD/data stage=0 stop_stage=10 @@ -80,9 +80,9 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then --word_symbol_table=$graph/words.txt \ --graph_path=$graph/TLG.fst --max_active=7500 \ --acoustic_scale=12 \ - --result_wspecifier=ark,t:./result_run.txt + --result_wspecifier=ark,t:./exp/result_run.txt # the data/wav.trans is the label. - utils/compute-wer.py --char=1 --v=1 data/wav.trans result_run.txt > wer_run - tail -n 7 wer_run + utils/compute-wer.py --char=1 --v=1 data/wav.trans exp/result_run.txt > exp/wer_run + tail -n 7 exp/wer_run fi diff --git a/speechx/examples/ds2_ol/aishell/local/text_to_lexicon.py b/speechx/examples/ds2_ol/aishell/local/text_to_lexicon.py deleted file mode 100755 index ba5ab60ac..000000000 --- a/speechx/examples/ds2_ol/aishell/local/text_to_lexicon.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python3 -import argparse -from collections import Counter - - -def main(args): - counter = Counter() - with open(args.text, 'r') as fin, open(args.lexicon, 'w') as fout: - for line in fin: - line = line.strip() - if args.has_key: - utt, text = line.split(maxsplit=1) - words = text.split() - else: - words = line.split() - - counter.update(words) - - for word in counter: - val = " ".join(list(word)) - fout.write(f"{word}\t{val}\n") - fout.flush() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='text(line:utt1 中国 人) to lexicon(line:中国 中 国).') - parser.add_argument( - '--has_key', default=True, help='text path, with utt or not') - parser.add_argument( - '--text', required=True, help='text path. line: utt1 中国 人 or 中国 人') - parser.add_argument( - '--lexicon', required=True, help='lexicon path. line:中国 中 国') - args = parser.parse_args() - print(args) - - main(args) diff --git a/speechx/examples/ds2_ol/aishell/path.sh b/speechx/examples/ds2_ol/aishell/path.sh index 1807a277a..69c78e746 100755 --- a/speechx/examples/ds2_ol/aishell/path.sh +++ b/speechx/examples/ds2_ol/aishell/path.sh @@ -1,13 +1,13 @@ # This contains the locations of binarys build required for running the examples. MAIN_ROOT=`realpath $PWD/../../../../` -SPEECHX_ROOT=$PWD/../../.. -SPEECHX_EXAMPLES=$SPEECHX_ROOT/build/examples +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_BUILD=$SPEECHX_ROOT/build/speechx SPEECHX_TOOLS=$SPEECHX_ROOT/tools TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin -[ -d $SPEECHX_EXAMPLES ] || { echo "Error: 'build/examples' directory not found. please ensure that the project build successfully"; } +[ -d $SPEECHX_BUILD ] || { echo "Error: 'build/speechx' directory not found. please ensure that the project build successfully"; } export LC_AL=C @@ -20,5 +20,5 @@ export LIBLBFGS=${MAIN_ROOT}/tools/liblbfgs-1.10 export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:${LIBLBFGS}/lib/.libs export SRILM=${MAIN_ROOT}/tools/srilm -SPEECHX_BIN=$SPEECHX_EXAMPLES/ds2_ol/decoder:$SPEECHX_EXAMPLES/ds2_ol/feat:$SPEECHX_EXAMPLES/ds2_ol/websocket +SPEECHX_BIN=$SPEECHX_BUILD/decoder:$SPEECHX_BUILD/frontend/audio:$SPEECHX_BUILD/websocket export PATH=$PATH:$SPEECHX_BIN:$TOOLS_BIN:${SRILM}/bin:${SRILM}/bin/i686-m64:$KALDI_DIR/lmbin:$KALDI_DIR/fstbin:$OPENFST_DIR/bin diff --git a/speechx/examples/ds2_ol/aishell/run.sh b/speechx/examples/ds2_ol/aishell/run.sh index 650cb1409..e1001e250 100755 --- a/speechx/examples/ds2_ol/aishell/run.sh +++ b/speechx/examples/ds2_ol/aishell/run.sh @@ -69,12 +69,12 @@ export GLOG_logtostderr=1 cmvn=$data/cmvn.ark if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # 3. gen linear feat - cmvn-json2kaldi --json_file=$ckpt_dir/data/mean_std.json --cmvn_write_path=$cmvn + cmvn_json2kaldi_main --json_file=$ckpt_dir/data/mean_std.json --cmvn_write_path=$cmvn ./local/split_data.sh $data $data/$aishell_wav_scp $aishell_wav_scp $nj utils/run.pl JOB=1:$nj $data/split${nj}/JOB/feat.log \ - linear-spectrogram-wo-db-norm-ol \ + compute_linear_spectrogram_main \ --wav_rspecifier=scp:$data/split${nj}/JOB/${aishell_wav_scp} \ --feature_wspecifier=ark,scp:$data/split${nj}/JOB/feat.ark,$data/split${nj}/JOB/feat.scp \ --cmvn_file=$cmvn \ @@ -85,7 +85,7 @@ fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # recognizer utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.wolm.log \ - ctc-prefix-beam-search-decoder-ol \ + ctc_prefix_beam_search_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/feat.scp \ --model_path=$model_dir/avg_1.jit.pdmodel \ --param_path=$model_dir/avg_1.jit.pdiparams \ @@ -102,7 +102,7 @@ fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # decode with lm utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.lm.log \ - ctc-prefix-beam-search-decoder-ol \ + ctc_prefix_beam_search_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/feat.scp \ --model_path=$model_dir/avg_1.jit.pdmodel \ --param_path=$model_dir/avg_1.jit.pdiparams \ @@ -132,7 +132,7 @@ fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then # TLG decoder utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.wfst.log \ - wfst-decoder-ol \ + tlg_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/feat.scp \ --model_path=$model_dir/avg_1.jit.pdmodel \ --param_path=$model_dir/avg_1.jit.pdiparams \ @@ -151,7 +151,7 @@ fi if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then # TLG decoder utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recognizer.log \ - recognizer_test_main \ + recognizer_main \ --wav_rspecifier=scp:$data/split${nj}/JOB/${aishell_wav_scp} \ --cmvn_file=$cmvn \ --model_path=$model_dir/avg_1.jit.pdmodel \ diff --git a/speechx/examples/ds2_ol/aishell/run_fbank.sh b/speechx/examples/ds2_ol/aishell/run_fbank.sh index 483fbfdfe..130f5a8c4 100755 --- a/speechx/examples/ds2_ol/aishell/run_fbank.sh +++ b/speechx/examples/ds2_ol/aishell/run_fbank.sh @@ -69,7 +69,7 @@ export GLOG_logtostderr=1 cmvn=$data/cmvn_fbank.ark if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then # 3. gen linear feat - cmvn-json2kaldi --json_file=$ckpt_dir/data/mean_std.json --cmvn_write_path=$cmvn --binary=false + cmvn_json2kaldi_main --json_file=$ckpt_dir/data/mean_std.json --cmvn_write_path=$cmvn --binary=false ./local/split_data.sh $data $data/$aishell_wav_scp $aishell_wav_scp $nj @@ -84,7 +84,7 @@ fi if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then # recognizer utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.fbank.wolm.log \ - ctc-prefix-beam-search-decoder-ol \ + ctc_prefix_beam_search_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/fbank_feat.scp \ --model_path=$model_dir/avg_5.jit.pdmodel \ --param_path=$model_dir/avg_5.jit.pdiparams \ @@ -100,12 +100,12 @@ fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then # decode with lm utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.fbank.lm.log \ - ctc-prefix-beam-search-decoder-ol \ + ctc_prefix_beam_search_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/fbank_feat.scp \ --model_path=$model_dir/avg_5.jit.pdmodel \ --param_path=$model_dir/avg_5.jit.pdiparams \ --model_output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 \ - --model_cache_shapes="5-1-2048,5-1-2048" \ + --model_cache_shapes="5-1-2048,5-1-2048" \ --dict_file=$vocb_dir/vocab.txt \ --lm_path=$lm \ --result_wspecifier=ark,t:$data/split${nj}/JOB/fbank_result_lm @@ -129,13 +129,13 @@ fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then # TLG decoder utils/run.pl JOB=1:$nj $data/split${nj}/JOB/recog.fbank.wfst.log \ - wfst-decoder-ol \ + tlg_decoder_main \ --feature_rspecifier=scp:$data/split${nj}/JOB/fbank_feat.scp \ --model_path=$model_dir/avg_5.jit.pdmodel \ --param_path=$model_dir/avg_5.jit.pdiparams \ --word_symbol_table=$wfst/words.txt \ --model_output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 \ - --model_cache_shapes="5-1-2048,5-1-2048" \ + --model_cache_shapes="5-1-2048,5-1-2048" \ --graph_path=$wfst/TLG.fst --max_active=7500 \ --acoustic_scale=1.2 \ --result_wspecifier=ark,t:$data/split${nj}/JOB/result_tlg diff --git a/speechx/examples/ds2_ol/websocket/CMakeLists.txt b/speechx/examples/ds2_ol/websocket/CMakeLists.txt deleted file mode 100644 index ed542aad0..000000000 --- a/speechx/examples/ds2_ol/websocket/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -cmake_minimum_required(VERSION 3.14 FATAL_ERROR) - -add_executable(websocket_server_main ${CMAKE_CURRENT_SOURCE_DIR}/websocket_server_main.cc) -target_include_directories(websocket_server_main PRIVATE ${SPEECHX_ROOT} ${SPEECHX_ROOT}/kaldi) -target_link_libraries(websocket_server_main PUBLIC frontend kaldi-feat-common nnet decoder fst utils gflags glog kaldi-base kaldi-matrix kaldi-util kaldi-decoder websocket ${DEPS}) - -add_executable(websocket_client_main ${CMAKE_CURRENT_SOURCE_DIR}/websocket_client_main.cc) -target_include_directories(websocket_client_main PRIVATE ${SPEECHX_ROOT} ${SPEECHX_ROOT}/kaldi) -target_link_libraries(websocket_client_main PUBLIC frontend kaldi-feat-common nnet decoder fst utils gflags glog kaldi-base kaldi-matrix kaldi-util kaldi-decoder websocket ${DEPS}) \ No newline at end of file diff --git a/speechx/examples/ds2_ol/websocket/path.sh b/speechx/examples/ds2_ol/websocket/path.sh index d66b5dcce..3ad032031 100755 --- a/speechx/examples/ds2_ol/websocket/path.sh +++ b/speechx/examples/ds2_ol/websocket/path.sh @@ -1,14 +1,14 @@ # This contains the locations of binarys build required for running the examples. -SPEECHX_ROOT=$PWD/../../.. -SPEECHX_EXAMPLES=$SPEECHX_ROOT/build/examples +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_BUILD=$SPEECHX_ROOT/build/speechx SPEECHX_TOOLS=$SPEECHX_ROOT/tools TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin -[ -d $SPEECHX_EXAMPLES ] || { echo "Error: 'build/examples' directory not found. please ensure that the project build successfully"; } +[ -d $SPEECHX_BUILD ] || { echo "Error: 'build/speechx' directory not found. please ensure that the project build successfully"; } export LC_AL=C -SPEECHX_BIN=$SPEECHX_EXAMPLES/ds2_ol/websocket:$SPEECHX_EXAMPLES/ds2_ol/feat +SPEECHX_BIN=$SPEECHX_BUILD/websocket export PATH=$PATH:$SPEECHX_BIN:$TOOLS_BIN diff --git a/speechx/speechx/CMakeLists.txt b/speechx/speechx/CMakeLists.txt index b4da095d8..a9a8a398d 100644 --- a/speechx/speechx/CMakeLists.txt +++ b/speechx/speechx/CMakeLists.txt @@ -37,3 +37,9 @@ ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/websocket ) add_subdirectory(websocket) + +include_directories( +${CMAKE_CURRENT_SOURCE_DIR} +${CMAKE_CURRENT_SOURCE_DIR}/codelab +) +add_subdirectory(codelab) diff --git a/speechx/examples/CMakeLists.txt b/speechx/speechx/codelab/CMakeLists.txt similarity index 52% rename from speechx/examples/CMakeLists.txt rename to speechx/speechx/codelab/CMakeLists.txt index 3c274a20a..950432637 100644 --- a/speechx/examples/CMakeLists.txt +++ b/speechx/speechx/codelab/CMakeLists.txt @@ -1,4 +1,4 @@ cmake_minimum_required(VERSION 3.14 FATAL_ERROR) -add_subdirectory(ds2_ol) -add_subdirectory(dev) \ No newline at end of file +add_subdirectory(glog) +add_subdirectory(nnet) diff --git a/speechx/speechx/codelab/README.md b/speechx/speechx/codelab/README.md new file mode 100644 index 000000000..aee60de67 --- /dev/null +++ b/speechx/speechx/codelab/README.md @@ -0,0 +1,7 @@ + +## For Developer + +> Reminder: Only for developer. + +* codelab - for speechx developer, using for test. + diff --git a/speechx/speechx/codelab/glog/CMakeLists.txt b/speechx/speechx/codelab/glog/CMakeLists.txt new file mode 100644 index 000000000..08a98641f --- /dev/null +++ b/speechx/speechx/codelab/glog/CMakeLists.txt @@ -0,0 +1,8 @@ +cmake_minimum_required(VERSION 3.14 FATAL_ERROR) + +add_executable(glog_main ${CMAKE_CURRENT_SOURCE_DIR}/glog_main.cc) +target_link_libraries(glog_main glog) + + +add_executable(glog_logtostderr_main ${CMAKE_CURRENT_SOURCE_DIR}/glog_logtostderr_main.cc) +target_link_libraries(glog_logtostderr_main glog) diff --git a/speechx/speechx/codelab/glog/README.md b/speechx/speechx/codelab/glog/README.md new file mode 100644 index 000000000..3282c920d --- /dev/null +++ b/speechx/speechx/codelab/glog/README.md @@ -0,0 +1,38 @@ +# [GLOG](https://rpg.ifi.uzh.ch/docs/glog.html) + +Unless otherwise specified, glog writes to the filename `/tmp/...log...