add server test, test=doc

pull/1530/head
lym0302 2 years ago
parent 85d4a31e04
commit e50c1b3b1d

@ -10,12 +10,13 @@ This demo is an implementation of starting the voice service and accessing the s
### 1. Installation
see [installation](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
It is recommended to use **paddlepaddle 2.2.1** or above.
You can choose one way from easy, meduim and hard to install paddlespeech.
### 2. Prepare config File
The configuration file contains the service-related configuration files and the model configuration related to the voice tasks contained in the service. They are all under the `conf` folder.
**Note: The configuration of `engine_backend` in `application.yaml` represents all speech tasks included in the started service. **
**Note: The configuration of `engine_backend` in `application.yaml` represents all speech tasks included in the started service.**
If the service you want to start contains only a certain speech task, then you need to comment out the speech tasks that do not need to be included. For example, if you only want to use the speech recognition (ASR) service, then you can comment out the speech synthesis (TTS) service, as in the following example:
```bash
engine_backend:
@ -23,7 +24,7 @@ engine_backend:
#tts: 'conf/tts/tts.yaml'
```
**Note: The configuration file of `engine_backend` in `application.yaml` needs to match the configuration type of `engine_type`. **
**Note: The configuration file of `engine_backend` in `application.yaml` needs to match the configuration type of `engine_type`.**
When the configuration file of `engine_backend` is `XXX.yaml`, the configuration type of `engine_type` needs to be set to `python`; when the configuration file of `engine_backend` is `XXX_pd.yaml`, the configuration of `engine_type` needs to be set type is `inference`;
The input of ASR client demo should be a WAV file(`.wav`), and the sample rate must be the same as the model.

@ -10,8 +10,10 @@
### 1. 安装
请看 [安装文档](https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/docs/source/install.md).
推荐使用 **paddlepaddle 2.2.1** 或以上版本。
你可以从 easymediumhard 三中方式中选择一种方式安装 PaddleSpeech。
### 2. 准备配置文件
配置文件包含服务相关的配置文件和服务中包含的语音任务相关的模型配置。 它们都在 `conf` 文件夹下。
**注意:`application.yaml` 中 `engine_backend` 的配置表示启动的服务中包含的所有语音任务。**

@ -3,7 +3,7 @@
##################################################################
# SERVER SETTING #
##################################################################
host: '127.0.0.1'
host: 127.0.0.1
port: 8090
##################################################################

@ -3,7 +3,7 @@
##################################################################
# SERVER SETTING #
##################################################################
host: '127.0.0.1'
host: 127.0.0.1
port: 8090
##################################################################

@ -0,0 +1,114 @@
#!/usr/bin/python
import argparse
import os
import yaml
def change_speech_yaml(yaml_name: str, device: str):
"""Change the settings of the device under the voice task configuration file
Args:
yaml_name (str): asr or asr_pd or tts or tts_pd
cpu (bool): True means set device to "cpu"
model_type (dict): change model type
"""
if "asr" in yaml_name:
dirpath = "./conf/asr/"
elif 'tts' in yaml_name:
dirpath = "./conf/tts/"
yamlfile = dirpath + yaml_name + ".yaml"
tmp_yamlfile = dirpath + yaml_name + "_tmp.yaml"
os.system("cp %s %s" % (yamlfile, tmp_yamlfile))
with open(tmp_yamlfile) as f, open(yamlfile, "w+", encoding="utf-8") as fw:
y = yaml.safe_load(f)
if device == 'cpu':
print("Set device: cpu")
if yaml_name == 'asr':
y['device'] = 'cpu'
elif yaml_name == 'asr_pd':
y['am_predictor_conf']['device'] = 'cpu'
elif yaml_name == 'tts':
y['device'] = 'cpu'
elif yaml_name == 'tts_pd':
y['am_predictor_conf']['device'] = 'cpu'
y['voc_predictor_conf']['device'] = 'cpu'
elif device == 'gpu':
print("Set device: gpu")
if yaml_name == 'asr':
y['device'] = 'gpu:0'
elif yaml_name == 'asr_pd':
y['am_predictor_conf']['device'] = 'gpu:0'
elif yaml_name == 'tts':
y['device'] = 'gpu:0'
elif yaml_name == 'tts_pd':
y['am_predictor_conf']['device'] = 'gpu:0'
y['voc_predictor_conf']['device'] = 'gpu:0'
else:
print("Please set correct device: cpu or gpu.")
print("The content of '%s': " % (yamlfile))
print(yaml.dump(y, default_flow_style=False, sort_keys=False))
yaml.dump(y, fw, allow_unicode=True)
os.system("rm %s" % (tmp_yamlfile))
print("Change %s successfully." % (yamlfile))
def change_app_yaml(task: str, engine_type: str):
"""Change the engine type and corresponding configuration file of the speech task in application.yaml
Args:
task (str): asr or tts
"""
yamlfile = "./conf/application.yaml"
tmp_yamlfile = "./conf/application_tmp.yaml"
os.system("cp %s %s" % (yamlfile, tmp_yamlfile))
with open(tmp_yamlfile) as f, open(yamlfile, "w+", encoding="utf-8") as fw:
y = yaml.safe_load(f)
y['engine_type'][task] = engine_type
path_list = ["./conf/", task, "/", task]
if engine_type == 'python':
path_list.append(".yaml")
elif engine_type == 'inference':
path_list.append("_pd.yaml")
y['engine_backend'][task] = ''.join(path_list)
print("The content of './conf/application.yaml': ")
print(yaml.dump(y, default_flow_style=False, sort_keys=False))
yaml.dump(y, fw, allow_unicode=True)
os.system("rm %s" % (tmp_yamlfile))
print("Change %s successfully." % (yamlfile))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--change_task',
type=str,
default=None,
help='Change task',
choices=[
'app-asr-python',
'app-asr-inference',
'app-tts-python',
'app-tts-inference',
'speech-asr-cpu',
'speech-asr-gpu',
'speech-asr_pd-cpu',
'speech-asr_pd-gpu',
'speech-tts-cpu',
'speech-tts-gpu',
'speech-tts_pd-cpu',
'speech-tts_pd-gpu',
],
required=True)
args = parser.parse_args()
types = args.change_task.split("-")
if types[0] == "app":
change_app_yaml(types[1], types[2])
elif types[0] == "speech":
change_speech_yaml(types[1], types[2])
else:
print("Error change task, please check change_task.")

@ -0,0 +1,27 @@
# This is the parameter configuration file for PaddleSpeech Serving.
##################################################################
# SERVER SETTING #
##################################################################
host: 127.0.0.1
port: 8090
##################################################################
# CONFIG FILE #
##################################################################
# add engine backend type (Options: asr, tts) and config file here.
# Adding a speech task to engine_backend means starting the service.
engine_backend:
asr: 'conf/asr/asr.yaml'
tts: 'conf/tts/tts.yaml'
# The engine_type of speech task needs to keep the same type as the config file of speech task.
# E.g: The engine_type of asr is 'python', the engine_backend of asr is 'XX/asr.yaml'
# E.g: The engine_type of asr is 'inference', the engine_backend of asr is 'XX/asr_pd.yaml'
#
# add engine type (Options: python, inference)
engine_type:
asr: 'python'
tts: 'python'

@ -0,0 +1,8 @@
model: 'conformer_wenetspeech'
lang: 'zh'
sample_rate: 16000
cfg_path: # [optional]
ckpt_path: # [optional]
decode_method: 'attention_rescoring'
force_yes: True
device: # set 'gpu:id' or 'cpu'

@ -0,0 +1,26 @@
# This is the parameter configuration file for ASR server.
# These are the static models that support paddle inference.
##################################################################
# ACOUSTIC MODEL SETTING #
# am choices=['deepspeech2offline_aishell'] TODO
##################################################################
model_type: 'deepspeech2offline_aishell'
am_model: # the pdmodel file of am static model [optional]
am_params: # the pdiparams file of am static model [optional]
lang: 'zh'
sample_rate: 16000
cfg_path:
decode_method:
force_yes: True
am_predictor_conf:
device: # set 'gpu:id' or 'cpu'
switch_ir_optim: True
glog_info: False # True -> print glog
summary: True # False -> do not show predictor config
##################################################################
# OTHERS #
##################################################################

@ -0,0 +1,32 @@
# This is the parameter configuration file for TTS server.
##################################################################
# ACOUSTIC MODEL SETTING #
# am choices=['speedyspeech_csmsc', 'fastspeech2_csmsc',
# 'fastspeech2_ljspeech', 'fastspeech2_aishell3',
# 'fastspeech2_vctk']
##################################################################
am: 'fastspeech2_csmsc'
am_config:
am_ckpt:
am_stat:
phones_dict:
tones_dict:
speaker_dict:
spk_id: 0
##################################################################
# VOCODER SETTING #
# voc choices=['pwgan_csmsc', 'pwgan_ljspeech', 'pwgan_aishell3',
# 'pwgan_vctk', 'mb_melgan_csmsc']
##################################################################
voc: 'pwgan_csmsc'
voc_config:
voc_ckpt:
voc_stat:
##################################################################
# OTHERS #
##################################################################
lang: 'zh'
device: # set 'gpu:id' or 'cpu'

@ -0,0 +1,42 @@
# This is the parameter configuration file for TTS server.
# These are the static models that support paddle inference.
##################################################################
# ACOUSTIC MODEL SETTING #
# am choices=['speedyspeech_csmsc', 'fastspeech2_csmsc']
##################################################################
am: 'fastspeech2_csmsc'
am_model: # the pdmodel file of your am static model (XX.pdmodel)
am_params: # the pdiparams file of your am static model (XX.pdipparams)
am_sample_rate: 24000
phones_dict:
tones_dict:
speaker_dict:
spk_id: 0
am_predictor_conf:
device: # set 'gpu:id' or 'cpu'
switch_ir_optim: True
glog_info: False # True -> print glog
summary: True # False -> do not show predictor config
##################################################################
# VOCODER SETTING #
# voc choices=['pwgan_csmsc', 'mb_melgan_csmsc','hifigan_csmsc']
##################################################################
voc: 'pwgan_csmsc'
voc_model: # the pdmodel file of your vocoder static model (XX.pdmodel)
voc_params: # the pdiparams file of your vocoder static model (XX.pdipparams)
voc_sample_rate: 24000
voc_predictor_conf:
device: # set 'gpu:id' or 'cpu'
switch_ir_optim: True
glog_info: False # True -> print glog
summary: True # False -> do not show predictor config
##################################################################
# OTHERS #
##################################################################
lang: 'zh'

@ -0,0 +1,184 @@
#!/bin/bash
StartService(){
# Start service
paddlespeech_server start --config_file $config_file 1>>log/server.log 2>>log/server.log.wf &
echo $! > pid
start_num=$(cat log/server.log.wf | grep "INFO: Uvicorn running on http://" -c)
flag="normal"
while [[ $start_num -lt $target_start_num && $flag == "normal" ]]
do
start_num=$(cat log/server.log.wf | grep "INFO: Uvicorn running on http://" -c)
# start service failed
if [ $(cat log/server.log.wf | grep -i "error" -c) -gt $error_time ];then
echo "Service started failed." | tee -a ./log/test_result.log
error_time=$(cat log/server.log.wf | grep -i "error" -c)
flag="unnormal"
fi
done
}
ClientTest(){
# Client test
# test asr client
paddlespeech_client asr --server_ip $server_ip --port $port --input ./zh.wav
((test_times+=1))
paddlespeech_client asr --server_ip $server_ip --port $port --input ./zh.wav
((test_times+=1))
# test tts client
paddlespeech_client tts --server_ip $server_ip --port $port --input "您好,欢迎使用百度飞桨语音合成服务。" --output output.wav
((test_times+=1))
paddlespeech_client tts --server_ip $server_ip --port $port --input "您好,欢迎使用百度飞桨语音合成服务。" --output output.wav
((test_times+=1))
}
GetTestResult() {
# Determine if the test was successful
response_success_time=$(cat log/server.log | grep "200 OK" -c)
if (( $response_success_time == $test_times )) ; then
echo "Testing successfully. The service configuration is: asr engine type: $1; tts engine type: $1; device: $2." | tee -a ./log/test_result.log
else
echo "Testing failed. The service configuration is: asr engine type: $1; tts engine type: $1; device: $2." | tee -a ./log/test_result.log
fi
test_times=$response_success_time
}
mkdir -p log
rm -rf log/server.log.wf
rm -rf log/server.log
rm -rf log/test_result.log
config_file=./conf/application.yaml
server_ip=$(cat $config_file | grep "host" | awk -F " " '{print $2}')
port=$(cat $config_file | grep "port" | awk '/port:/ {print $2}')
echo "Sevice ip: $server_ip" | tee ./log/test_result.log
echo "Sevice port: $port" | tee -a ./log/test_result.log
# whether a process is listening on $port
pid=`lsof -i :"$port"|grep -v "PID" | awk '{print $2}'`
if [ "$pid" != "" ]; then
echo "The port: $port is occupied, please change another port"
exit
fi
# download test audios for ASR client
wget -c https://paddlespeech.bj.bcebos.com/PaddleAudio/zh.wav https://paddlespeech.bj.bcebos.com/PaddleAudio/en.wav
target_start_num=0 # the number of start service
test_times=0 # The number of client test
error_time=0 # The number of error occurrences in the startup failure server.log.wf file
# start server: asr engine type: python; tts engine type: python; device: gpu
echo "Start the service: asr engine type: python; tts engine type: python; device: gpu" | tee -a ./log/test_result.log
((target_start_num+=1))
StartService
if [[ $start_num -eq $target_start_num && $flag == "normal" ]]; then
echo "Service started successfully." | tee -a ./log/test_result.log
ClientTest
echo "This round of testing is over." | tee -a ./log/test_result.log
GetTestResult python gpu
else
echo "Service failed to start, no client test."
target_start_num=$start_num
fi
kill -9 `cat pid`
rm -rf pid
sleep 2s
echo "**************************************************************************************" | tee -a ./log/test_result.log
# start server: asr engine type: python; tts engine type: python; device: cpu
python change_yaml.py --change_task speech-asr-cpu # change asr.yaml device: cpu
python change_yaml.py --change_task speech-tts-cpu # change tts.yaml device: cpu
echo "Start the service: asr engine type: python; tts engine type: python; device: cpu" | tee -a ./log/test_result.log
((target_start_num+=1))
StartService
if [[ $start_num -eq $target_start_num && $flag == "normal" ]]; then
echo "Service started successfully." | tee -a ./log/test_result.log
ClientTest
echo "This round of testing is over." | tee -a ./log/test_result.log
GetTestResult python cpu
else
echo "Service failed to start, no client test."
target_start_num=$start_num
fi
kill -9 `cat pid`
rm -rf pid
sleep 2s
echo "**************************************************************************************" | tee -a ./log/test_result.log
# start server: asr engine type: inference; tts engine type: inference; device: gpu
python change_yaml.py --change_task app-asr-inference # change application.yaml, asr engine_type: inference; asr engine_backend: asr_pd.yaml
python change_yaml.py --change_task app-tts-inference # change application.yaml, tts engine_type: inference; tts engine_backend: tts_pd.yaml
echo "Start the service: asr engine type: inference; tts engine type: inference; device: gpu" | tee -a ./log/test_result.log
((target_start_num+=1))
StartService
if [[ $start_num -eq $target_start_num && $flag == "normal" ]]; then
echo "Service started successfully." | tee -a ./log/test_result.log
ClientTest
echo "This round of testing is over." | tee -a ./log/test_result.log
GetTestResult inference gpu
else
echo "Service failed to start, no client test."
target_start_num=$start_num
fi
kill -9 `cat pid`
rm -rf pid
sleep 2s
echo "**************************************************************************************" | tee -a ./log/test_result.log
# start server: asr engine type: inference; tts engine type: inference; device: cpu
python change_yaml.py --change_task speech-asr_pd-cpu # change asr_pd.yaml device: cpu
python change_yaml.py --change_task speech-tts_pd-cpu # change tts_pd.yaml device: cpu
echo "start the service: asr engine type: inference; tts engine type: inference; device: cpu" | tee -a ./log/test_result.log
((target_start_num+=1))
StartService
if [[ $start_num -eq $target_start_num && $flag == "normal" ]]; then
echo "Service started successfully." | tee -a ./log/test_result.log
ClientTest
echo "This round of testing is over." | tee -a ./log/test_result.log
GetTestResult inference cpu
else
echo "Service failed to start, no client test."
target_start_num=$start_num
fi
kill -9 `cat pid`
rm -rf pid
sleep 2s
echo "**************************************************************************************" | tee -a ./log/test_result.log
echo "All tests completed." | tee -a ./log/test_result.log
# sohw all the test results
echo "***************** Here are all the test results ********************"
cat ./log/test_result.log
# Restoring conf is the same as demos/speech_server
cp ../../../demos/speech_server/conf/ ./ -rf
Loading…
Cancel
Save