parent
f2e64beadc
commit
59a78f2a46
@ -0,0 +1,2 @@
|
||||
exp
|
||||
|
@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(prog=__doc__)
|
||||
parser.add_argument(
|
||||
'--logfile', type=str, required=True, help='ws client log file')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
rtfs = []
|
||||
with open(args.logfile, 'r') as f:
|
||||
for line in f:
|
||||
if 'RTF=' in line:
|
||||
# udio duration: 6.126, elapsed time: 3.471978187561035, RTF=0.5667610492264177
|
||||
line = line.strip()
|
||||
beg = line.index("audio")
|
||||
line = line[beg:]
|
||||
|
||||
items = line.split(',')
|
||||
vals = []
|
||||
for elem in items:
|
||||
if "RTF=" in elem:
|
||||
continue
|
||||
_, val = elem.split(":")
|
||||
vals.append(eval(val))
|
||||
keys = ['T', 'P']
|
||||
meta = dict(zip(keys, vals))
|
||||
|
||||
rtfs.append(meta)
|
||||
|
||||
T = 0.0
|
||||
P = 0.0
|
||||
n = 0
|
||||
for m in rtfs:
|
||||
n += 1
|
||||
T += m['T']
|
||||
P += m['P']
|
||||
|
||||
print(f"RTF: {P/T}, utts: {n}")
|
@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ $# != 1 ];then
|
||||
echo "usage: $0 wav_scp"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
scp=$1
|
||||
|
||||
# calc RTF
|
||||
# wav_scp can generate from `speechx/examples/ds2_ol/aishell`
|
||||
|
||||
exp=exp
|
||||
mkdir -p $exp
|
||||
|
||||
python3 local/websocket_client.py --server_ip 127.0.0.1 --port 8090 --wavscp $scp &> $exp/log.rsl
|
||||
|
||||
python3 local/rtf_from_log.py --logfile $exp/log.rsl
|
||||
|
||||
|
||||
|
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
|
||||
import onnxruntime as ort
|
||||
|
||||
# onnxruntime optimizer.
|
||||
# https://onnxruntime.ai/docs/performance/graph-optimizations.html
|
||||
# https://onnxruntime.ai/docs/api/python/api_summary.html#api
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'--model_in', required=True, type=str, help='Path to onnx model.')
|
||||
parser.add_argument(
|
||||
'--opt_level',
|
||||
required=True,
|
||||
type=int,
|
||||
default=0,
|
||||
choices=[0, 1, 2],
|
||||
help='Path to onnx model.')
|
||||
parser.add_argument(
|
||||
'--model_out', required=True, help='path to save the optimized model.')
|
||||
parser.add_argument('--debug', default=False, help='output debug info.')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_arguments()
|
||||
|
||||
sess_options = ort.SessionOptions()
|
||||
|
||||
# Set graph optimization level
|
||||
print(f"opt level: {args.opt_level}")
|
||||
if args.opt_level == 0:
|
||||
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_BASIC
|
||||
elif args.opt_level == 1:
|
||||
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
|
||||
else:
|
||||
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
|
||||
# To enable model serialization after graph optimization set this
|
||||
sess_options.optimized_model_filepath = args.model_out
|
||||
|
||||
session = ort.InferenceSession(args.model_in, sess_options)
|
Loading…
Reference in new issue