From 5ad2f6f7fbee39b8f64a02c848a768b5bcae7ab6 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 8 Jun 2022 11:48:03 +0000 Subject: [PATCH 01/10] pdmodel prune and infershape --- speechx/examples/ds2_ol/onnx/.gitignore | 1 + speechx/examples/ds2_ol/onnx/local/netron.sh | 12 ++ .../examples/ds2_ol/onnx/local/onnx_opt.sh | 4 + .../ds2_ol/onnx/local/pd_infer_shape.py | 109 ++++++++++++ .../ds2_ol/onnx/local/pd_prune_model.py | 156 ++++++++++++++++++ speechx/examples/ds2_ol/onnx/local/prune.sh | 22 +++ speechx/examples/ds2_ol/onnx/local/tonnx.sh | 23 +++ speechx/examples/ds2_ol/onnx/path.sh | 14 ++ speechx/examples/ds2_ol/onnx/run.sh | 45 +++++ speechx/examples/ds2_ol/onnx/utils | 1 + 10 files changed, 387 insertions(+) create mode 100644 speechx/examples/ds2_ol/onnx/.gitignore create mode 100755 speechx/examples/ds2_ol/onnx/local/netron.sh create mode 100755 speechx/examples/ds2_ol/onnx/local/onnx_opt.sh create mode 100644 speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py create mode 100755 speechx/examples/ds2_ol/onnx/local/pd_prune_model.py create mode 100755 speechx/examples/ds2_ol/onnx/local/prune.sh create mode 100755 speechx/examples/ds2_ol/onnx/local/tonnx.sh create mode 100755 speechx/examples/ds2_ol/onnx/path.sh create mode 100755 speechx/examples/ds2_ol/onnx/run.sh create mode 120000 speechx/examples/ds2_ol/onnx/utils diff --git a/speechx/examples/ds2_ol/onnx/.gitignore b/speechx/examples/ds2_ol/onnx/.gitignore new file mode 100644 index 00000000..1269488f --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/.gitignore @@ -0,0 +1 @@ +data diff --git a/speechx/examples/ds2_ol/onnx/local/netron.sh b/speechx/examples/ds2_ol/onnx/local/netron.sh new file mode 100755 index 00000000..73e089ed --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/netron.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +if [ $# != 1 ];then + echo "usage: $0 model_path" + exit 1 +fi + + +file=$1 + +pip install netron +netron -p 8082 --host $(hostname -i) $file \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh new file mode 100755 index 00000000..dd8fbd20 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh @@ -0,0 +1,4 @@ +#!/bin/bash + + +onnx-simplifier \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py new file mode 100644 index 00000000..318131f9 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 -W ignore::DeprecationWarning +# https://github.com/jiangjiajun/PaddleUtils/blob/main/paddle/README.md#2-%E4%BF%AE%E6%94%B9paddle%E6%A8%A1%E5%9E%8B%E8%BE%93%E5%85%A5shape +import argparse + + +def process_old_ops_desc(program): + """set matmul op head_number attr to 1 is not exist. + + Args: + program (_type_): _description_ + """ + for i in range(len(program.blocks[0].ops)): + if program.blocks[0].ops[i].type == "matmul": + if not program.blocks[0].ops[i].has_attr("head_number"): + program.blocks[0].ops[i]._set_attr("head_number", 1) + + +def infer_shape(program, input_shape_dict): + # 2002002 + model_version = program.desc._version() + # 2.2.2 + paddle_version = paddle.__version__ + major_ver = model_version // 1000000 + minor_ver = (model_version - major_ver * 1000000) // 1000 + patch_ver = model_version - major_ver * 1000000 - minor_ver * 1000 + model_version = "{}.{}.{}".format(major_ver, minor_ver, patch_ver) + if model_version != paddle_version: + print( + f"[WARNING] The model is saved by paddlepaddle v{model_version}, but now your paddlepaddle is version of {paddle_version}, this difference may cause error, it is recommend you reinstall a same version of paddlepaddle for this model" + ) + + OP_WITHOUT_KERNEL_SET = { + 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad', + 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv', + 'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify', + 'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id', + 'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream', + 'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv', + 'c_wait_comm', 'c_wait_compute', 'c_gen_hccl_id', 'c_comm_init_hccl', + 'copy_cross_scope' + } + + for k, v in input_shape_dict.items(): + program.blocks[0].var(k).desc.set_shape(v) + + for i in range(len(program.blocks)): + for j in range(len(program.blocks[0].ops)): + # for ops + if program.blocks[i].ops[j].type in OP_WITHOUT_KERNEL_SET: + print(f"not infer: {program.blocks[i].ops[j].type} op") + continue + print(f"infer: {program.blocks[i].ops[j].type} op") + program.blocks[i].ops[j].desc.infer_shape(program.blocks[i].desc) + + +def parse_arguments(): + # python pd_infer_shape.py --model_dir data/exp/deepspeech2_online/checkpoints \ + # --model_filename avg_1.jit.pdmodel\ + # --params_filename avg_1.jit.pdiparams \ + # --save_dir . \ + # --input_shape_dict="{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}" + parser = argparse.ArgumentParser() + parser.add_argument( + '--model_dir', + required=True, + help='Path of directory saved the input model.') + parser.add_argument( + '--model_filename', required=True, help='model.pdmodel.') + parser.add_argument( + '--params_filename', required=True, help='model.pdiparams.') + parser.add_argument( + '--save_dir', + required=True, + help='directory to save the exported model.') + parser.add_argument( + '--input_shape_dict', required=True, help="The new shape information.") + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + + import paddle + paddle.enable_static() + import paddle.fluid as fluid + + input_shape_dict_str = args.input_shape_dict + input_shape_dict = eval(input_shape_dict_str) + + print("Start to load paddle model...") + exe = fluid.Executor(fluid.CPUPlace()) + + prog, ipts, outs = fluid.io.load_inference_model( + args.model_dir, + exe, + model_filename=args.model_filename, + params_filename=args.params_filename) + + process_old_ops_desc(prog) + infer_shape(prog, input_shape_dict) + + fluid.io.save_inference_model( + args.save_dir, + ipts, + outs, + exe, + prog, + model_filename=args.model_filename, + params_filename=args.params_filename) diff --git a/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py new file mode 100755 index 00000000..d723c7ce --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 -W ignore::DeprecationWarning +# https://github.com/jiangjiajun/PaddleUtils/blob/main/paddle/README.md#1-%E8%A3%81%E5%89%AApaddle%E6%A8%A1%E5%9E%8B +import argparse +import sys +from typing import List + + +def prepend_feed_ops(program, + feed_target_names: List[str], + feed_holder_name='feed'): + import paddle.fluid.core as core + if len(feed_target_names) == 0: + return + + global_block = program.global_block() + feed_var = global_block.create_var( + name=feed_holder_name, + type=core.VarDesc.VarType.FEED_MINIBATCH, + persistable=True, ) + + for i, name in enumerate(feed_target_names, 0): + if not global_block.has_var(name): + print( + f"The input[{i}]: '{name}' doesn't exist in pruned inference program, which will be ignored in new saved model." + ) + continue + + out = global_block.var(name) + global_block._prepend_op( + type='feed', + inputs={'X': [feed_var]}, + outputs={'Out': [out]}, + attrs={'col': i}, ) + + +def append_fetch_ops(program, + fetch_target_names: List[str], + fetch_holder_name='fetch'): + """in the place, we will add the fetch op + + Args: + program (_type_): inference program + fetch_target_names (List[str]): target names + fetch_holder_name (str, optional): fetch op name. Defaults to 'fetch'. + """ + import paddle.fluid.core as core + global_block = program.global_block() + fetch_var = global_block.create_var( + name=fetch_holder_name, + type=core.VarDesc.VarType.FETCH_LIST, + persistable=True, ) + + print(f"the len of fetch_target_names: {len(fetch_target_names)}") + + for i, name in enumerate(fetch_target_names): + global_block.append_op( + type='fetch', + inputs={'X': [name]}, + outputs={'Out': [fetch_var]}, + attrs={'col': i}, ) + + +def insert_fetch(program, + fetch_target_names: List[str], + fetch_holder_name='fetch'): + """in the place, we will add the fetch op + + Args: + program (_type_): inference program + fetch_target_names (List[str]): target names + fetch_holder_name (str, optional): fetch op name. Defaults to 'fetch'. + """ + global_block = program.global_block() + + # remove fetch + need_to_remove_op_index = [] + for i, op in enumerate(global_block.ops): + if op.type == 'fetch': + need_to_remove_op_index.append(i) + + for index in reversed(need_to_remove_op_index): + global_block._remove_op(index) + + program.desc.flush() + + # append new fetch + append_fetch_ops(program, fetch_target_names, fetch_holder_name) + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--model_dir', + required=True, + help='Path of directory saved the input model.') + parser.add_argument( + '--model_filename', required=True, help='model.pdmodel.') + parser.add_argument( + '--params_filename', required=True, help='model.pdiparams.') + parser.add_argument( + '--output_names', + required=True, + help='The outputs of model. sep by comma') + parser.add_argument( + '--save_dir', + required=True, + help='directory to save the exported model.') + parser.add_argument('--debug', default=False, help='output debug info.') + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + + args.output_names = args.output_names.split(",") + + if len(set(args.output_names)) < len(args.output_names): + print( + f"[ERROR] There's dumplicate name in --output_names {args.output_names}, which is not allowed." + ) + sys.exit(-1) + + import paddle + paddle.enable_static() + # hack prepend_feed_ops + paddle.fluid.io.prepend_feed_ops = prepend_feed_ops + + import paddle.fluid as fluid + + print("start to load paddle model") + exe = fluid.Executor(fluid.CPUPlace()) + prog, ipts, outs = fluid.io.load_inference_model( + args.model_dir, + exe, + model_filename=args.model_filename, + params_filename=args.params_filename) + + print("start to load insert fetch op") + new_outputs = [] + insert_fetch(prog, args.output_names) + for out_name in args.output_names: + new_outputs.append(prog.global_block().var(out_name)) + + # not equal to paddle.static.save_inference_model + fluid.io.save_inference_model( + args.save_dir, + ipts, + new_outputs, + exe, + prog, + model_filename=args.model_filename, + params_filename=args.params_filename) + + if args.debug: + for op in prog.global_block().ops: + print(op) diff --git a/speechx/examples/ds2_ol/onnx/local/prune.sh b/speechx/examples/ds2_ol/onnx/local/prune.sh new file mode 100755 index 00000000..ee5f6b5f --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/prune.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +if [ $# != 5 ]; then + echo "usage: $0 model_dir model_filename param_filename outputs_names save_dir" + exit 1 +fi + +dir=$1 +model=$2 +param=$3 +outputs=$4 +save_dir=$5 + + +python local/pd_prune_model.py \ + --model_dir $dir \ + --model_filename $model \ + --params_filename $param \ + --output_names $outputs \ + --save_dir $save_dir \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/tonnx.sh b/speechx/examples/ds2_ol/onnx/local/tonnx.sh new file mode 100755 index 00000000..a57b84f6 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/tonnx.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +if [ $# != 4 ];then + echo "usage: $0 model_dir model_name param_name onnx_output_name" + exit 1 +fi + +dir=$1 +model=$2 +param=$3 +output=$4 + +pip install paddle2onnx + +# https://github.com/PaddlePaddle/Paddle2ONNX#%E5%91%BD%E4%BB%A4%E8%A1%8C%E8%BD%AC%E6%8D%A2 +paddle2onnx --model_dir $dir \ + --model_filename $model \ + --params_filename $param \ + --save_file $output \ + --enable_dev_version True \ + --opset_version 9 \ + --enable_onnx_checker True \ + \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/path.sh b/speechx/examples/ds2_ol/onnx/path.sh new file mode 100755 index 00000000..97d48737 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/path.sh @@ -0,0 +1,14 @@ +# This contains the locations of binarys build required for running the examples. + +MAIN_ROOT=`realpath $PWD/../../../../` +SPEECHX_ROOT=$PWD/../../../ +SPEECHX_BUILD=$SPEECHX_ROOT/build/speechx + +SPEECHX_TOOLS=$SPEECHX_ROOT/tools +TOOLS_BIN=$SPEECHX_TOOLS/valgrind/install/bin + +[ -d $SPEECHX_BUILD ] || { echo "Error: 'build/speechx' directory not found. please ensure that the project build successfully"; } + +export LC_AL=C + +export PATH=$PATH:$TOOLS_BIN diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh new file mode 100755 index 00000000..37d4f7f7 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e + +. path.sh + +stage=0 +stop_stage=100 + +. utils/parse_options.sh + +data=data + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ];then + test -f $data/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz || wget -c https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz -P $data + + pushd $data + tar zxvf asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz + popd +fi + +dir=$data/exp/deepspeech2_online/checkpoints +model=avg_1.jit.pdmodel +param=avg_1.jit.pdiparams + +output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ];then + mkdir -p $data/prune + + # prune model deps on output_names. + ./local/prune.sh $dir $model $param $output_names $data/prune +fi + +input_shape_dict="{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}" +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then + mkdir -p $data/shape + + python3 local/pd_infer_shape.py \ + --model_dir $dir \ + --model_filename $model \ + --params_filename $param \ + --save_dir $data/shape \ + --input_shape_dict=${input_shape_dict} +fi + diff --git a/speechx/examples/ds2_ol/onnx/utils b/speechx/examples/ds2_ol/onnx/utils new file mode 120000 index 00000000..c2519a9d --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/utils @@ -0,0 +1 @@ +../../../../utils/ \ No newline at end of file From 28c1794b9b15e94a75c58b627eb2d155afccdf8b Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 8 Jun 2022 11:48:17 +0000 Subject: [PATCH 02/10] format --- paddlespeech/server/engine/asr/online/asr_engine.py | 2 +- paddlespeech/server/ws/asr_api.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/asr_engine.py index 0c5e4586..ec19aaf1 100644 --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/asr_engine.py @@ -794,7 +794,7 @@ class ASRServerExecutor(ASRExecutor): # update num_decoding_left_chunks if num_decoding_left_chunks: self.config.decode.num_decoding_left_chunks = num_decoding_left_chunks - assert self.config.decode.num_decoding_left_chunks == -1 or self.config.decode.num_decoding_left_chunks >= 0, f"num_decoding_left_chunks should be -1 or >=0" + assert self.config.decode.num_decoding_left_chunks == -1 or self.config.decode.num_decoding_left_chunks >= 0, "num_decoding_left_chunks should be -1 or >=0" # we only support ctc_prefix_beam_search and attention_rescoring dedoding method # Generally we set the decoding_method to attention_rescoring if self.config.decode.decoding_method not in [ diff --git a/paddlespeech/server/ws/asr_api.py b/paddlespeech/server/ws/asr_api.py index 23609b41..ae1c8831 100644 --- a/paddlespeech/server/ws/asr_api.py +++ b/paddlespeech/server/ws/asr_api.py @@ -92,7 +92,7 @@ async def websocket_endpoint(websocket: WebSocket): else: resp = {"status": "ok", "message": "no valid json data"} await websocket.send_json(resp) - + elif "bytes" in message: # bytes for the pcm data message = message["bytes"] From 6477b6f3e6d68199050d0a318f80a27c9c75c72f Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 8 Jun 2022 12:42:57 +0000 Subject: [PATCH 03/10] onxx rename and prune --- .../ds2_ol/onnx/local/onnx_prune_model.py | 127 ++++++++++++++++++ .../ds2_ol/onnx/local/onnx_rename_model.py | 110 +++++++++++++++ .../ds2_ol/onnx/local/pd_infer_shape.py | 0 speechx/examples/ds2_ol/onnx/local/prune.sh | 1 + speechx/examples/ds2_ol/onnx/local/tonnx.sh | 2 + speechx/examples/ds2_ol/onnx/run.sh | 14 +- 6 files changed, 250 insertions(+), 4 deletions(-) create mode 100644 speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py create mode 100755 speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py mode change 100644 => 100755 speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py new file mode 100644 index 00000000..a5148edd --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 -W ignore::DeprecationWarning +import argparse +import copy +import sys + +import onnx + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--model', + required=True, + help='Path of directory saved the input model.') + parser.add_argument( + '--output_names', + required=True, + nargs='+', + help='The outputs of pruned model.') + parser.add_argument( + '--save_file', required=True, help='Path to save the new onnx model.') + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + + if len(set(args.output_names)) < len(args.output_names): + print( + "[ERROR] There's dumplicate name in --output_names, which is not allowed." + ) + sys.exit(-1) + + model = onnx.load(args.model) + + # collect all node outputs and graph output + output_tensor_names = set() + for node in model.graph.node: + for out in node.output: + # may contain model output + output_tensor_names.add(out) + + # for out in model.graph.output: + # output_tensor_names.add(out.name) + + for output_name in args.output_names: + if output_name not in output_tensor_names: + print( + "[ERROR] Cannot find output tensor name '{}' in onnx model graph.". + format(output_name)) + sys.exit(-1) + + output_node_indices = set() # has output names + output_to_node = dict() # all node outputs + for i, node in enumerate(model.graph.node): + for out in node.output: + output_to_node[out] = i + if out in args.output_names: + output_node_indices.add(i) + + # from outputs find all the ancestors + reserved_node_indices = copy.deepcopy( + output_node_indices) # nodes need to keep + reserved_inputs = set() # model input to keep + new_output_node_indices = copy.deepcopy(output_node_indices) + + while True and len(new_output_node_indices) > 0: + output_node_indices = copy.deepcopy(new_output_node_indices) + + new_output_node_indices = set() + + for out_node_idx in output_node_indices: + # backtrace to parenet + for ipt in model.graph.node[out_node_idx].input: + if ipt in output_to_node: + reserved_node_indices.add(output_to_node[ipt]) + new_output_node_indices.add(output_to_node[ipt]) + else: + reserved_inputs.add(ipt) + + num_inputs = len(model.graph.input) + num_outputs = len(model.graph.output) + num_nodes = len(model.graph.node) + print( + f"old graph has {num_inputs} inputs, {num_outputs} outpus, {num_nodes} nodes" + ) + print(f"{len(reserved_node_indices)} node to keep.") + + # del node not to keep + for idx in range(num_nodes - 1, -1, -1): + if idx not in reserved_node_indices: + del model.graph.node[idx] + + # del graph input not to keep + for idx in range(num_inputs - 1, -1, -1): + if model.graph.input[idx].name not in reserved_inputs: + del model.graph.input[idx] + + # del old graph outputs + for i in range(num_outputs): + del model.graph.output[0] + + # new graph output as user input + for out in args.output_names: + model.graph.output.extend([onnx.ValueInfoProto(name=out)]) + + # infer shape + try: + from onnx_infer_shape import SymbolicShapeInference + model = SymbolicShapeInference.infer_shapes( + model, + int_max=2**31 - 1, + auto_merge=True, + guess_output_rank=False, + verbose=1) + except Exception as e: + print(f"skip infer shape step: {e}") + + # check onnx model + onnx.checker.check_model(model) + # save onnx model + onnx.save(model, args.save_file) + print("[Finished] The new model saved in {}.".format(args.save_file)) + print("[DEBUG INFO] The inputs of new model: {}".format( + [x.name for x in model.graph.input])) + print("[DEBUG INFO] The outputs of new model: {}".format( + [x.name for x in model.graph.output])) diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py new file mode 100755 index 00000000..f508c0a3 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 -W ignore::DeprecationWarning +import argparse +import sys + +import onnx + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--model', + required=True, + help='Path of directory saved the input model.') + parser.add_argument( + '--origin_names', + required=True, + nargs='+', + help='The original name you want to modify.') + parser.add_argument( + '--new_names', + required=True, + nargs='+', + help='The new name you want change to, the number of new_names should be same with the number of origin_names' + ) + parser.add_argument( + '--save_file', required=True, help='Path to save the new onnx model.') + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + + if len(set(args.origin_names)) < len(args.origin_names): + print( + "[ERROR] There's dumplicate name in --origin_names, which is not allowed." + ) + sys.exit(-1) + + if len(set(args.new_names)) < len(args.new_names): + print( + "[ERROR] There's dumplicate name in --new_names, which is not allowed." + ) + sys.exit(-1) + + if len(args.new_names) != len(args.origin_names): + print( + "[ERROR] Number of --new_names must be same with the number of --origin_names." + ) + sys.exit(-1) + + model = onnx.load(args.model) + + # collect input and all node output + output_tensor_names = set() + for ipt in model.graph.input: + output_tensor_names.add(ipt.name) + + for node in model.graph.node: + for out in node.output: + output_tensor_names.add(out) + + for origin_name in args.origin_names: + if origin_name not in output_tensor_names: + print( + f"[ERROR] Cannot find tensor name '{origin_name}' in onnx model graph." + ) + sys.exit(-1) + + for new_name in args.new_names: + if new_name in output_tensor_names: + print( + "[ERROR] The defined new_name '{}' is already exist in the onnx model, which is not allowed." + ) + sys.exit(-1) + + # rename graph input + for i, ipt in enumerate(model.graph.input): + if ipt.name in args.origin_names: + idx = args.origin_names.index(ipt.name) + model.graph.input[i].name = args.new_names[idx] + + # rename node input and output + for i, node in enumerate(model.graph.node): + for j, ipt in enumerate(node.input): + if ipt in args.origin_names: + idx = args.origin_names.index(ipt) + model.graph.node[i].input[j] = args.new_names[idx] + + for j, out in enumerate(node.output): + if out in args.origin_names: + idx = args.origin_names.index(out) + model.graph.node[i].output[j] = args.new_names[idx] + + # rename graph output + for i, out in enumerate(model.graph.output): + if out.name in args.origin_names: + idx = args.origin_names.index(out.name) + model.graph.output[i].name = args.new_names[idx] + + # check onnx model + onnx.checker.check_model(model) + + # save model + onnx.save(model, args.save_file) + + print("[Finished] The new model saved in {}.".format(args.save_file)) + print("[DEBUG INFO] The inputs of new model: {}".format( + [x.name for x in model.graph.input])) + print("[DEBUG INFO] The outputs of new model: {}".format( + [x.name for x in model.graph.output])) diff --git a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py old mode 100644 new mode 100755 diff --git a/speechx/examples/ds2_ol/onnx/local/prune.sh b/speechx/examples/ds2_ol/onnx/local/prune.sh index ee5f6b5f..64636bcc 100755 --- a/speechx/examples/ds2_ol/onnx/local/prune.sh +++ b/speechx/examples/ds2_ol/onnx/local/prune.sh @@ -3,6 +3,7 @@ set -e if [ $# != 5 ]; then + # local/prune.sh data/exp/deepspeech2_online/checkpoints avg_1.jit.pdmodel avg_1.jit.pdiparams softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 $PWD echo "usage: $0 model_dir model_filename param_filename outputs_names save_dir" exit 1 fi diff --git a/speechx/examples/ds2_ol/onnx/local/tonnx.sh b/speechx/examples/ds2_ol/onnx/local/tonnx.sh index a57b84f6..58f0d736 100755 --- a/speechx/examples/ds2_ol/onnx/local/tonnx.sh +++ b/speechx/examples/ds2_ol/onnx/local/tonnx.sh @@ -1,6 +1,7 @@ #!/bin/bash if [ $# != 4 ];then + # local/tonnx.sh data/exp/deepspeech2_online/checkpoints avg_1.jit.pdmodel avg_1.jit.pdiparams exp/model.onnx echo "usage: $0 model_dir model_name param_name onnx_output_name" exit 1 fi @@ -11,6 +12,7 @@ param=$3 output=$4 pip install paddle2onnx +pip install onnx # https://github.com/PaddlePaddle/Paddle2ONNX#%E5%91%BD%E4%BB%A4%E8%A1%8C%E8%BD%AC%E6%8D%A2 paddle2onnx --model_dir $dir \ diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index 37d4f7f7..a9f7681c 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -10,6 +10,9 @@ stop_stage=100 . utils/parse_options.sh data=data +exp=exp + +mkdir -p $data $exp if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ];then test -f $data/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz || wget -c https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz -P $data @@ -25,21 +28,24 @@ param=avg_1.jit.pdiparams output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ];then - mkdir -p $data/prune + mkdir -p $exp/prune # prune model deps on output_names. - ./local/prune.sh $dir $model $param $output_names $data/prune + ./local/prune.sh $dir $model $param $output_names $exp/prune fi input_shape_dict="{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}" if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then - mkdir -p $data/shape + mkdir -p $exp/shape python3 local/pd_infer_shape.py \ --model_dir $dir \ --model_filename $model \ --params_filename $param \ - --save_dir $data/shape \ + --save_dir $exp/shape \ --input_shape_dict=${input_shape_dict} fi +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then + ./local/tonnx.sh $dir $model $param $exp/model.onnx +fi \ No newline at end of file From c90be85398411d074bc01f7db5004f34e1b61caf Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Fri, 10 Jun 2022 03:52:42 +0000 Subject: [PATCH 04/10] to onnx --- .gitignore | 3 + speechx/examples/ds2_ol/onnx/.gitignore | 1 + .../examples/ds2_ol/onnx/local/onnx_clone.sh | 6 + .../ds2_ol/onnx/local/onnx_infer_shape.py | 2517 +++++++++++++++++ speechx/examples/ds2_ol/onnx/local/tonnx.sh | 2 +- speechx/examples/ds2_ol/onnx/run.sh | 4 +- 6 files changed, 2531 insertions(+), 2 deletions(-) create mode 100644 speechx/examples/ds2_ol/onnx/local/onnx_clone.sh create mode 100644 speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py diff --git a/.gitignore b/.gitignore index 7328b329..1ed76375 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,9 @@ tools/env.sh tools/openfst-1.8.1/ tools/libsndfile/ tools/python-soundfile/ +tools/onnx +tools/onnxruntime +tools/Paddle2ONNX speechx/fc_patch/ diff --git a/speechx/examples/ds2_ol/onnx/.gitignore b/speechx/examples/ds2_ol/onnx/.gitignore index 1269488f..328c8678 100644 --- a/speechx/examples/ds2_ol/onnx/.gitignore +++ b/speechx/examples/ds2_ol/onnx/.gitignore @@ -1 +1,2 @@ data +log diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh b/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh new file mode 100644 index 00000000..0a472af4 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh @@ -0,0 +1,6 @@ + +#!/bin/bash + +git clone https://github.com/onnx/onnx.git +git clone https://github.com/microsoft/onnxruntime.git +git clone https://github.com/PaddlePaddle/Paddle2ONNX.git \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py new file mode 100644 index 00000000..c5f83d3e --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py @@ -0,0 +1,2517 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import argparse +import logging + +import numpy as np +import onnx +import sympy +from onnx import helper +from onnx import numpy_helper +from onnx import shape_inference +from packaging import version +assert version.parse(onnx.__version__) >= version.parse("1.8.0") + +logger = logging.getLogger(__name__) + + +def get_attribute(node, attr_name, default_value=None): + found = [attr for attr in node.attribute if attr.name == attr_name] + if found: + return helper.get_attribute_value(found[0]) + return default_value + + +def get_dim_from_proto(dim): + return getattr(dim, dim.WhichOneof('value')) if type( + dim.WhichOneof('value')) == str else None + + +def is_sequence(type_proto): + cls_type = type_proto.WhichOneof('value') + assert cls_type in ['tensor_type', 'sequence_type'] + return cls_type == 'sequence_type' + + +def get_shape_from_type_proto(type_proto): + assert not is_sequence(type_proto) + if type_proto.tensor_type.HasField('shape'): + return [get_dim_from_proto(d) for d in type_proto.tensor_type.shape.dim] + else: + return None # note no shape is different from shape without dim (scalar) + + +def get_shape_from_value_info(vi): + cls_type = vi.type.WhichOneof('value') + if cls_type is None: + return None + if is_sequence(vi.type): + if 'tensor_type' == vi.type.sequence_type.elem_type.WhichOneof('value'): + return get_shape_from_type_proto(vi.type.sequence_type.elem_type) + else: + return None + else: + return get_shape_from_type_proto(vi.type) + + +def make_named_value_info(name): + vi = onnx.ValueInfoProto() + vi.name = name + return vi + + +def get_shape_from_sympy_shape(sympy_shape): + return [ + None if i is None else (int(i) if is_literal(i) else str(i)) + for i in sympy_shape + ] + + +def is_literal(dim): + return type(dim) in [int, np.int64, np.int32, sympy.Integer] or (hasattr( + dim, 'is_number') and dim.is_number) + + +def handle_negative_axis(axis, rank): + assert axis < rank and axis >= -rank + return axis if axis >= 0 else rank + axis + + +def get_opset(mp, domain=None): + domain = domain or ['', 'onnx', 'ai.onnx'] + if type(domain) != list: + domain = [domain] + for opset in mp.opset_import: + if opset.domain in domain: + return opset.version + + return None + + +def as_scalar(x): + if type(x) == list: + assert len(x) == 1 + return x[0] + elif type(x) == np.ndarray: + return x.item() + else: + return x + + +def as_list(x, keep_none): + if type(x) == list: + return x + elif type(x) == np.ndarray: + return list(x) + elif keep_none and x is None: + return None + else: + return [x] + + +def sympy_reduce_product(x): + if type(x) == list: + value = sympy.Integer(1) + for v in x: + value = value * v + else: + value = x + return value + + +class SymbolicShapeInference: + def __init__(self, + int_max, + auto_merge, + guess_output_rank, + verbose, + prefix=''): + self.dispatcher_ = { + 'Add': + self._infer_symbolic_compute_ops, + 'ArrayFeatureExtractor': + self._infer_ArrayFeatureExtractor, + 'AveragePool': + self._infer_Pool, + 'BatchNormalization': + self._infer_BatchNormalization, + 'Cast': + self._infer_Cast, + 'CategoryMapper': + self._infer_CategoryMapper, + 'Compress': + self._infer_Compress, + 'Concat': + self._infer_Concat, + 'ConcatFromSequence': + self._infer_ConcatFromSequence, + 'Constant': + self._infer_Constant, + 'ConstantOfShape': + self._infer_ConstantOfShape, + 'Conv': + self._infer_Conv, + 'CumSum': + self._pass_on_shape_and_type, + 'Div': + self._infer_symbolic_compute_ops, + 'Einsum': + self._infer_Einsum, + 'Expand': + self._infer_Expand, + 'Equal': + self._infer_symbolic_compute_ops, + 'Floor': + self._infer_symbolic_compute_ops, + 'Gather': + self._infer_Gather, + 'GatherElements': + self._infer_GatherElements, + 'GatherND': + self._infer_GatherND, + 'Gelu': + self._pass_on_shape_and_type, + 'If': + self._infer_If, + 'Loop': + self._infer_Loop, + 'MatMul': + self._infer_MatMul, + 'MatMulInteger16': + self._infer_MatMulInteger, + 'MaxPool': + self._infer_Pool, + 'Max': + self._infer_symbolic_compute_ops, + 'Min': + self._infer_symbolic_compute_ops, + 'Mul': + self._infer_symbolic_compute_ops, + 'NonMaxSuppression': + self._infer_NonMaxSuppression, + 'NonZero': + self._infer_NonZero, + 'OneHot': + self._infer_OneHot, + 'Pad': + self._infer_Pad, + 'Range': + self._infer_Range, + 'Reciprocal': + self._pass_on_shape_and_type, + 'ReduceSum': + self._infer_ReduceSum, + 'ReduceProd': + self._infer_ReduceProd, + 'Reshape': + self._infer_Reshape, + 'Resize': + self._infer_Resize, + 'Round': + self._pass_on_shape_and_type, + 'Scan': + self._infer_Scan, + 'ScatterElements': + self._infer_ScatterElements, + 'SequenceAt': + self._infer_SequenceAt, + 'SequenceInsert': + self._infer_SequenceInsert, + 'Shape': + self._infer_Shape, + 'Size': + self._infer_Size, + 'Slice': + self._infer_Slice, + 'SoftmaxCrossEntropyLoss': + self._infer_SoftmaxCrossEntropyLoss, + 'SoftmaxCrossEntropyLossInternal': + self._infer_SoftmaxCrossEntropyLoss, + 'NegativeLogLikelihoodLossInternal': + self._infer_SoftmaxCrossEntropyLoss, + 'Split': + self._infer_Split, + 'SplitToSequence': + self._infer_SplitToSequence, + 'Squeeze': + self._infer_Squeeze, + 'Sub': + self._infer_symbolic_compute_ops, + 'Tile': + self._infer_Tile, + 'TopK': + self._infer_TopK, + 'Transpose': + self._infer_Transpose, + 'Unsqueeze': + self._infer_Unsqueeze, + 'Where': + self._infer_symbolic_compute_ops, + 'ZipMap': + self._infer_ZipMap, + 'Neg': + self._infer_symbolic_compute_ops, + # contrib ops: + 'Attention': + self._infer_Attention, + 'BiasGelu': + self._infer_BiasGelu, + 'EmbedLayerNormalization': + self._infer_EmbedLayerNormalization, + 'FastGelu': + self._infer_FastGelu, + 'Gelu': + self._infer_Gelu, + 'LayerNormalization': + self._infer_LayerNormalization, + 'LongformerAttention': + self._infer_LongformerAttention, + 'PythonOp': + self._infer_PythonOp, + 'SkipLayerNormalization': + self._infer_SkipLayerNormalization + } + self.aten_op_dispatcher_ = { + 'aten::embedding': self._infer_Gather, + 'aten::bitwise_or': self._infer_aten_bitwise_or, + 'aten::diagonal': self._infer_aten_diagonal, + 'aten::max_pool2d_with_indices': self._infer_aten_pool2d, + 'aten::multinomial': self._infer_aten_multinomial, + 'aten::unfold': self._infer_aten_unfold, + 'aten::argmax': self._infer_aten_argmax, + 'aten::avg_pool2d': self._infer_aten_pool2d, + 'aten::_adaptive_avg_pool2d': self._infer_aten_pool2d, + 'aten::binary_cross_entropy_with_logits': self._infer_aten_bce, + 'aten::numpy_T': self._infer_Transpose, + } + self.run_ = True + self.suggested_merge_ = {} + self.symbolic_dims_ = {} + self.input_symbols_ = {} + self.auto_merge_ = auto_merge + self.guess_output_rank_ = guess_output_rank + self.verbose_ = verbose + self.int_max_ = int_max + self.subgraph_id_ = 0 + self.prefix_ = prefix + + def _add_suggested_merge(self, symbols, apply=False): + assert all([(type(s) == str and s in self.symbolic_dims_) or + is_literal(s) for s in symbols]) + symbols = set(symbols) + for k, v in self.suggested_merge_.items(): + if k in symbols: + symbols.remove(k) + symbols.add(v) + map_to = None + # if there is literal, map to it first + for s in symbols: + if is_literal(s): + map_to = s + break + # when no literals, map to input symbolic dims, then existing symbolic dims + if map_to is None: + for s in symbols: + if s in self.input_symbols_: + map_to = s + break + if map_to is None: + for s in symbols: + if type(self.symbolic_dims_[s]) == sympy.Symbol: + map_to = s + break + # when nothing to map to, use the shorter one + if map_to is None: + if self.verbose_ > 0: + logger.warning( + 'Potential unsafe merge between symbolic expressions: ({})'. + format(','.join(symbols))) + symbols_list = list(symbols) + lens = [len(s) for s in symbols_list] + map_to = symbols_list[lens.index(min(lens))] + symbols.remove(map_to) + + for s in symbols: + if s == map_to: + continue + if is_literal(map_to) and is_literal(s): + assert int(map_to) == int(s) + self.suggested_merge_[s] = int(map_to) if is_literal( + map_to) else map_to + for k, v in self.suggested_merge_.items(): + if v == s: + self.suggested_merge_[k] = map_to + if apply and self.auto_merge_: + self._apply_suggested_merge() + + def _apply_suggested_merge(self, graph_input_only=False): + if not self.suggested_merge_: + return + for i in list(self.out_mp_.graph.input) + ( + [] if graph_input_only else list(self.out_mp_.graph.value_info)): + for d in i.type.tensor_type.shape.dim: + if d.dim_param in self.suggested_merge_: + v = self.suggested_merge_[d.dim_param] + if is_literal(v): + d.dim_value = int(v) + else: + d.dim_param = v + + def _preprocess(self, in_mp): + self.out_mp_ = onnx.ModelProto() + self.out_mp_.CopyFrom(in_mp) + self.graph_inputs_ = dict( + [(i.name, i) for i in list(self.out_mp_.graph.input)]) + self.initializers_ = dict( + [(i.name, i) for i in self.out_mp_.graph.initializer]) + self.known_vi_ = dict( + [(i.name, i) for i in list(self.out_mp_.graph.input)]) + self.known_vi_.update( + dict([(i.name, helper.make_tensor_value_info(i.name, i.data_type, + list(i.dims))) + for i in self.out_mp_.graph.initializer])) + + def _merge_symbols(self, dims): + if not all([type(d) == str for d in dims]): + if self.auto_merge_: + unique_dims = list(set(dims)) + is_int = [is_literal(d) for d in unique_dims] + assert sum( + is_int + ) <= 1 # if there are more than 1 unique ints, something is wrong + if sum(is_int) == 1: + int_dim = is_int.index(1) + if self.verbose_ > 0: + logger.debug('dim {} has been merged with value {}'. + format(unique_dims[:int_dim] + unique_dims[ + int_dim + 1:], unique_dims[int_dim])) + self._check_merged_dims(unique_dims, allow_broadcast=False) + return unique_dims[int_dim] + else: + if self.verbose_ > 0: + logger.debug('dim {} has been mergd with dim {}'.format( + unique_dims[1:], unique_dims[0])) + return dims[0] + else: + return None + if all([d == dims[0] for d in dims]): + return dims[0] + merged = [ + self.suggested_merge_[d] if d in self.suggested_merge_ else d + for d in dims + ] + if all([d == merged[0] for d in merged]): + assert merged[0] in self.symbolic_dims_ + return merged[0] + else: + return None + + # broadcast from right to left, and merge symbolic dims if needed + def _broadcast_shapes(self, shape1, shape2): + new_shape = [] + rank1 = len(shape1) + rank2 = len(shape2) + new_rank = max(rank1, rank2) + for i in range(new_rank): + dim1 = shape1[rank1 - 1 - i] if i < rank1 else 1 + dim2 = shape2[rank2 - 1 - i] if i < rank2 else 1 + if dim1 == 1 or dim1 == dim2: + new_dim = dim2 + elif dim2 == 1: + new_dim = dim1 + else: + new_dim = self._merge_symbols([dim1, dim2]) + if not new_dim: + # warning about unsupported broadcast when not auto merge + # note that auto merge has the risk of incorrectly merge symbols while one of them being 1 + # for example, 'a' = 1, 'b' = 5 at runtime is valid broadcasting, but with auto merge 'a' == 'b' + if self.auto_merge_: + self._add_suggested_merge([dim1, dim2], apply=True) + else: + logger.warning('unsupported broadcast between ' + str( + dim1) + ' ' + str(dim2)) + new_shape = [new_dim] + new_shape + return new_shape + + def _get_shape(self, node, idx): + name = node.input[idx] + if name in self.known_vi_: + vi = self.known_vi_[name] + return get_shape_from_value_info(vi) + else: + assert name in self.initializers_ + return list(self.initializers_[name].dims) + + def _get_shape_rank(self, node, idx): + return len(self._get_shape(node, idx)) + + def _get_sympy_shape(self, node, idx): + sympy_shape = [] + for d in self._get_shape(node, idx): + if type(d) == str: + sympy_shape.append(self.symbolic_dims_[d] if d in + self.symbolic_dims_ else sympy.Symbol( + d, integer=True, nonnegative=True)) + else: + assert None != d + sympy_shape.append(d) + return sympy_shape + + def _get_value(self, node, idx): + name = node.input[idx] + assert name in self.sympy_data_ or name in self.initializers_ + return self.sympy_data_[ + name] if name in self.sympy_data_ else numpy_helper.to_array( + self.initializers_[name]) + + def _try_get_value(self, node, idx): + if idx >= len(node.input): + return None + name = node.input[idx] + if name in self.sympy_data_ or name in self.initializers_: + return self._get_value(node, idx) + return None + + def _update_computed_dims(self, new_sympy_shape): + for i, new_dim in enumerate(new_sympy_shape): + if not is_literal(new_dim) and not type(new_dim) == str: + str_dim = str(new_dim) + if str_dim in self.suggested_merge_: + if is_literal(self.suggested_merge_[str_dim]): + continue # no need to create dim for literals + new_sympy_shape[i] = self.symbolic_dims_[ + self.suggested_merge_[str_dim]] + else: + # add new_dim if it's a computational expression + if not str(new_dim) in self.symbolic_dims_: + self.symbolic_dims_[str(new_dim)] = new_dim + + def _onnx_infer_single_node(self, node): + # skip onnx shape inference for some ops, as they are handled in _infer_* + skip_infer = node.op_type in [ + 'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', \ + # contrib ops + + + + 'Attention', 'BiasGelu', \ + 'EmbedLayerNormalization', \ + 'FastGelu', 'Gelu', 'LayerNormalization', \ + 'LongformerAttention', \ + 'SkipLayerNormalization', \ + 'PythonOp' + ] + + if not skip_infer: + # Only pass initializers that satisfy the following condition: + # (1) Operator need value of some input for shape inference. + # For example, Unsqueeze in opset 13 uses the axes input to calculate shape of output. + # (2) opset version >= 9. In older version, initializer is required in graph input by onnx spec. + # (3) The initializer is not in graph input. The means the node input is "constant" in inference. + initializers = [] + if (get_opset(self.out_mp_) >= 9) and node.op_type in ['Unsqueeze']: + initializers = [ + self.initializers_[name] for name in node.input + if (name in self.initializers_ and + name not in self.graph_inputs_) + ] + + # run single node inference with self.known_vi_ shapes + tmp_graph = helper.make_graph( + [node], 'tmp', [self.known_vi_[i] for i in node.input if i], + [make_named_value_info(i) for i in node.output], initializers) + + self.tmp_mp_.graph.CopyFrom(tmp_graph) + + self.tmp_mp_ = shape_inference.infer_shapes(self.tmp_mp_) + + for i_o in range(len(node.output)): + o = node.output[i_o] + vi = self.out_mp_.graph.value_info.add() + if not skip_infer: + vi.CopyFrom(self.tmp_mp_.graph.output[i_o]) + else: + vi.name = o + self.known_vi_[o] = vi + + def _onnx_infer_subgraph(self, + node, + subgraph, + use_node_input=True, + inc_subgraph_id=True): + if self.verbose_ > 2: + logger.debug( + 'Inferencing subgraph of node {} with output({}...): {}'.format( + node.name, node.output[0], node.op_type)) + # node inputs are not passed directly to the subgraph + # it's up to the node dispatcher to prepare subgraph input + # for example, with Scan/Loop, subgraph input shape would be trimmed from node input shape + # besides, inputs in subgraph could shadow implicit inputs + subgraph_inputs = set( + [i.name for i in list(subgraph.initializer) + list(subgraph.input)]) + subgraph_implicit_input = set([ + name for name in self.known_vi_.keys() + if not name in subgraph_inputs + ]) + tmp_graph = helper.make_graph( + list(subgraph.node), 'tmp', + list(subgraph.input) + + [self.known_vi_[i] for i in subgraph_implicit_input], + [make_named_value_info(i.name) for i in subgraph.output]) + tmp_graph.initializer.extend([ + i for i in self.out_mp_.graph.initializer + if i.name in subgraph_implicit_input + ]) + tmp_graph.initializer.extend(subgraph.initializer) + self.tmp_mp_.graph.CopyFrom(tmp_graph) + + symbolic_shape_inference = SymbolicShapeInference( + self.int_max_, + self.auto_merge_, + self.guess_output_rank_, + self.verbose_, + prefix=self.prefix_ + '_' + str(self.subgraph_id_)) + if inc_subgraph_id: + self.subgraph_id_ += 1 + + all_shapes_inferred = False + symbolic_shape_inference._preprocess(self.tmp_mp_) + symbolic_shape_inference.suggested_merge_ = self.suggested_merge_.copy() + while symbolic_shape_inference.run_: + all_shapes_inferred = symbolic_shape_inference._infer_impl( + self.sympy_data_.copy()) + symbolic_shape_inference._update_output_from_vi() + if use_node_input: + # if subgraph uses node input, it needs to update to merged dims + subgraph.ClearField('input') + subgraph.input.extend( + symbolic_shape_inference.out_mp_.graph.input[:len(node.input)]) + subgraph.ClearField('output') + subgraph.output.extend(symbolic_shape_inference.out_mp_.graph.output) + subgraph.ClearField('value_info') + subgraph.value_info.extend( + symbolic_shape_inference.out_mp_.graph.value_info) + subgraph.ClearField('node') + subgraph.node.extend(symbolic_shape_inference.out_mp_.graph.node) + # for new symbolic dims from subgraph output, add to main graph symbolic dims + subgraph_shapes = [ + get_shape_from_value_info(o) + for o in symbolic_shape_inference.out_mp_.graph.output + ] + subgraph_new_symbolic_dims = set([ + d for s in subgraph_shapes if s for d in s + if type(d) == str and not d in self.symbolic_dims_ + ]) + new_dims = {} + for d in subgraph_new_symbolic_dims: + assert d in symbolic_shape_inference.symbolic_dims_ + new_dims[d] = symbolic_shape_inference.symbolic_dims_[d] + self.symbolic_dims_.update(new_dims) + return symbolic_shape_inference + + def _get_int_values(self, node, broadcast=False): + values = [self._try_get_value(node, i) for i in range(len(node.input))] + if all([v is not None for v in values]): + # some shape compute is in floating point, cast to int for sympy + for i, v in enumerate(values): + if type(v) != np.ndarray: + continue + if len(v.shape) > 1: + new_v = None # ignore value for rank > 1 + elif len(v.shape) == 0: + new_v = int(v.item()) + else: + assert len(v.shape) == 1 + new_v = [int(vv) for vv in v] + values[i] = new_v + values_len = [len(v) if type(v) == list else 0 for v in values] + max_len = max(values_len) + if max_len >= 1 and broadcast: + # broadcast + for i, v in enumerate(values): + if v is None: + continue # don't broadcast if value is unknown + if type(v) == list: + if len(v) < max_len: + values[i] = v * max_len + else: + assert len(v) == max_len + else: + values[i] = [v] * max_len + return values + + def _compute_on_sympy_data(self, node, op_func): + assert len(node.output) == 1 + values = self._get_int_values(node, broadcast=True) + if all([v is not None for v in values]): + is_list = [type(v) == list for v in values] + as_list = any(is_list) + if as_list: + self.sympy_data_[node.output[ + 0]] = [op_func(vs) for vs in zip(*values)] + else: + self.sympy_data_[node.output[0]] = op_func(values) + + def _pass_on_sympy_data(self, node): + assert len( + node. + input) == 1 or node.op_type in ['Reshape', 'Unsqueeze', 'Squeeze'] + self._compute_on_sympy_data(node, lambda x: x[0]) + + def _pass_on_shape_and_type(self, node): + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, + self._get_shape(node, 0))) + + def _new_symbolic_dim(self, prefix, dim): + new_dim = '{}_d{}'.format(prefix, dim) + if new_dim in self.suggested_merge_: + v = self.suggested_merge_[new_dim] + new_symbolic_dim = sympy.Integer(int(v)) if is_literal(v) else v + else: + new_symbolic_dim = sympy.Symbol( + new_dim, integer=True, nonnegative=True) + self.symbolic_dims_[new_dim] = new_symbolic_dim + return new_symbolic_dim + + def _new_symbolic_dim_from_output(self, node, out_idx=0, dim=0): + return self._new_symbolic_dim('{}{}_{}_o{}_'.format( + node.op_type, self.prefix_, + list(self.out_mp_.graph.node).index(node), out_idx), dim) + + def _new_symbolic_shape(self, rank, node, out_idx=0): + return [ + self._new_symbolic_dim_from_output(node, out_idx, i) + for i in range(rank) + ] + + def _compute_conv_pool_shape(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + if len(node.input) > 1: + W_shape = self._get_sympy_shape(node, 1) + rank = len(W_shape) - 2 # number of spatial axes + kernel_shape = W_shape[-rank:] + sympy_shape[1] = W_shape[0] + else: + W_shape = None + kernel_shape = get_attribute(node, 'kernel_shape') + rank = len(kernel_shape) + + assert len(sympy_shape) == rank + 2 + + # only need to symbolic shape inference if input has symbolic dims in spatial axes + is_symbolic_dims = [not is_literal(i) for i in sympy_shape[-rank:]] + + if not any(is_symbolic_dims): + shape = get_shape_from_value_info(self.known_vi_[node.output[0]]) + if len(shape) > 0: + assert len(sympy_shape) == len(shape) + sympy_shape[-rank:] = [sympy.Integer(d) for d in shape[-rank:]] + return sympy_shape + + dilations = get_attribute(node, 'dilations', [1] * rank) + strides = get_attribute(node, 'strides', [1] * rank) + effective_kernel_shape = [(k - 1) * d + 1 + for k, d in zip(kernel_shape, dilations)] + pads = get_attribute(node, 'pads') + if pads is None: + pads = [0] * (2 * rank) + auto_pad = get_attribute(node, 'auto_pad', + b'NOTSET').decode('utf-8') + if auto_pad != 'VALID' and auto_pad != 'NOTSET': + try: + residual = [ + sympy.Mod(d, s) + for d, s in zip(sympy_shape[-rank:], strides) + ] + total_pads = [ + max(0, (k - s) if r == 0 else (k - r)) for k, s, r in + zip(effective_kernel_shape, strides, residual) + ] + except TypeError: # sympy may throw TypeError: cannot determine truth value of Relational + total_pads = [ + max(0, (k - s)) + for k, s in zip(effective_kernel_shape, strides) + ] # assuming no residual if sympy throws error + elif auto_pad == 'VALID': + total_pads = [] + else: + total_pads = [0] * rank + else: + assert len(pads) == 2 * rank + total_pads = [p1 + p2 for p1, p2 in zip(pads[:rank], pads[rank:])] + + ceil_mode = get_attribute(node, 'ceil_mode', 0) + for i in range(rank): + effective_input_size = sympy_shape[-rank + i] + if len(total_pads) > 0: + effective_input_size = effective_input_size + total_pads[i] + if ceil_mode: + strided_kernel_positions = sympy.ceiling( + (effective_input_size - effective_kernel_shape[i]) / + strides[i]) + else: + strided_kernel_positions = ( + effective_input_size - effective_kernel_shape[i] + ) // strides[i] + sympy_shape[-rank + i] = strided_kernel_positions + 1 + return sympy_shape + + def _check_merged_dims(self, dims, allow_broadcast=True): + if allow_broadcast: + dims = [d for d in dims if not (is_literal(d) and int(d) <= 1)] + if not all([d == dims[0] for d in dims]): + self._add_suggested_merge(dims, apply=True) + + def _compute_matmul_shape(self, node, output_dtype=None): + lhs_shape = self._get_shape(node, 0) + rhs_shape = self._get_shape(node, 1) + lhs_rank = len(lhs_shape) + rhs_rank = len(rhs_shape) + lhs_reduce_dim = 0 + rhs_reduce_dim = 0 + assert lhs_rank > 0 and rhs_rank > 0 + if lhs_rank == 1 and rhs_rank == 1: + new_shape = [] + elif lhs_rank == 1: + rhs_reduce_dim = -2 + new_shape = rhs_shape[:rhs_reduce_dim] + [rhs_shape[-1]] + elif rhs_rank == 1: + lhs_reduce_dim = -1 + new_shape = lhs_shape[:lhs_reduce_dim] + else: + lhs_reduce_dim = -1 + rhs_reduce_dim = -2 + new_shape = self._broadcast_shapes( + lhs_shape[:-2], + rhs_shape[:-2]) + [lhs_shape[-2]] + [rhs_shape[-1]] + # merge reduce dim + self._check_merged_dims( + [lhs_shape[lhs_reduce_dim], rhs_shape[rhs_reduce_dim]], + allow_broadcast=False) + if output_dtype is None: + # infer output_dtype from input type when not specified + output_dtype = self.known_vi_[node.input[ + 0]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], output_dtype, + new_shape)) + + def _fuse_tensor_type(self, node, out_idx, dst_type, src_type): + ''' + update dst_tensor_type to be compatible with src_tensor_type when dimension mismatches + ''' + dst_tensor_type = dst_type.sequence_type.elem_type.tensor_type if is_sequence( + dst_type) else dst_type.tensor_type + src_tensor_type = src_type.sequence_type.elem_type.tensor_type if is_sequence( + src_type) else src_type.tensor_type + if dst_tensor_type.elem_type != src_tensor_type.elem_type: + node_id = node.name if node.name else node.op_type + raise ValueError( + f"For node {node_id}, dst_tensor_type.elem_type != src_tensor_type.elem_type: " + f"{onnx.onnx_pb.TensorProto.DataType.Name(dst_tensor_type.elem_type)} vs " + f"{onnx.onnx_pb.TensorProto.DataType.Name(src_tensor_type.elem_type)}" + ) + if dst_tensor_type.HasField('shape'): + for di, ds in enumerate( + zip(dst_tensor_type.shape.dim, src_tensor_type.shape.dim)): + if ds[0] != ds[1]: + # create a new symbolic dimension for node/out_idx/mismatch dim id in dst_tensor_type for tensor_type + # for sequence_type, clear the dimension + new_dim = onnx.TensorShapeProto.Dimension() + if not is_sequence(dst_type): + new_dim.dim_param = str( + self._new_symbolic_dim_from_output(node, out_idx, + di)) + dst_tensor_type.shape.dim[di].CopyFrom(new_dim) + else: + dst_tensor_type.CopyFrom(src_tensor_type) + + def _infer_ArrayFeatureExtractor(self, node): + data_shape = self._get_shape(node, 0) + indices_shape = self._get_shape(node, 1) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, data_shape[:-1] + + indices_shape)) + + def _infer_symbolic_compute_ops(self, node): + funcs = { + 'Add': + lambda l: l[0] + l[1], + 'Div': + lambda l: l[0] // l[1], # integer div in sympy + 'Equal': + lambda l: l[0] == l[1], + 'Floor': + lambda l: sympy.floor(l[0]), + 'Max': + lambda l: l[1] if is_literal(l[0]) and int(l[0]) < -self.int_max_ else (l[0] if is_literal(l[1]) and int(l[1]) < -self.int_max_ else sympy.Max(l[0], l[1])), + 'Min': + lambda l: l[1] if is_literal(l[0]) and int(l[0]) > self.int_max_ else (l[0] if is_literal(l[1]) and int(l[1]) > self.int_max_ else sympy.Min(l[0], l[1])), + 'Mul': + lambda l: l[0] * l[1], + 'Sub': + lambda l: l[0] - l[1], + 'Where': + lambda l: l[1] if l[0] else l[2], + 'Neg': + lambda l: -l[0] + } + assert node.op_type in funcs + self._compute_on_sympy_data(node, funcs[node.op_type]) + + def _infer_Cast(self, node): + self._pass_on_sympy_data(node) + + def _infer_CategoryMapper(self, node): + input_type = self.known_vi_[node.input[0]].type.tensor_type.elem_type + if input_type == onnx.TensorProto.STRING: + output_type = onnx.TensorProto.INT64 + else: + output_type = onnx.TensorProto.STRING + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], output_type, + self._get_shape(node, 0))) + + def _infer_Compress(self, node): + input_shape = self._get_shape(node, 0) + # create a new symbolic dimension for Compress output + compress_len = str(self._new_symbolic_dim_from_output(node)) + axis = get_attribute(node, 'axis') + if axis == None: + # when axis is not specified, input is flattened before compress so output is 1D + output_shape = [compress_len] + else: + output_shape = input_shape + output_shape[handle_negative_axis(axis, len( + input_shape))] = compress_len + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, output_shape)) + + def _infer_Concat(self, node): + if any([ + i in self.sympy_data_ or i in self.initializers_ + for i in node.input + ]): + values = self._get_int_values(node) + print("=======", values, node.name, get_attribute(node, 'axis')) + if all([v is not None for v in values]): + axis = get_attribute(node, 'axis') + if axis < 0: + axis = axis + len(values[0]) + assert 0 == axis + self.sympy_data_[node.output[0]] = [] + for i in range(len(node.input)): + value = values[i] + if type(value) == list: + self.sympy_data_[node.output[0]].extend(value) + else: + self.sympy_data_[node.output[0]].append(value) + + sympy_shape = self._get_sympy_shape(node, 0) + axis = handle_negative_axis( + get_attribute(node, 'axis'), len(sympy_shape)) + for i_idx in range(1, len(node.input)): + input_shape = self._get_sympy_shape(node, i_idx) + if input_shape: + sympy_shape[axis] = sympy_shape[axis] + input_shape[axis] + self._update_computed_dims(sympy_shape) + # merge symbolic dims for non-concat axes + for d in range(len(sympy_shape)): + if d == axis: + continue + dims = [ + self._get_shape(node, i_idx)[d] + for i_idx in range(len(node.input)) + if self._get_shape(node, i_idx) + ] + if all([d == dims[0] for d in dims]): + continue + merged = self._merge_symbols(dims) + if type(merged) == str: + sympy_shape[d] = self.symbolic_dims_[merged] if merged else None + else: + sympy_shape[d] = merged + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], self.known_vi_[node.input[0]].type.tensor_type. + elem_type, get_shape_from_sympy_shape(sympy_shape))) + + def _infer_ConcatFromSequence(self, node): + seq_shape = self._get_shape(node, 0) + new_axis = 1 if get_attribute(node, 'new_axis') else 0 + axis = handle_negative_axis( + get_attribute(node, 'axis'), len(seq_shape) + new_axis) + concat_dim = str(self._new_symbolic_dim_from_output(node, 0, axis)) + new_shape = seq_shape + if new_axis: + new_shape = seq_shape[:axis] + [concat_dim] + seq_shape[axis:] + else: + new_shape[axis] = concat_dim + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], self.known_vi_[node.input[0]] + .type.sequence_type.elem_type.tensor_type.elem_type, new_shape)) + + def _infer_Constant(self, node): + t = get_attribute(node, 'value') + self.sympy_data_[node.output[0]] = numpy_helper.to_array(t) + + def _infer_ConstantOfShape(self, node): + sympy_shape = self._get_int_values(node)[0] + vi = self.known_vi_[node.output[0]] + if sympy_shape is not None: + if type(sympy_shape) != list: + sympy_shape = [sympy_shape] + self._update_computed_dims(sympy_shape) + # update sympy data if output type is int, and shape is known + if vi.type.tensor_type.elem_type == onnx.TensorProto.INT64 and all( + [is_literal(x) for x in sympy_shape]): + self.sympy_data_[node.output[0]] = np.ones( + [int(x) for x in sympy_shape], + dtype=np.int64) * numpy_helper.to_array( + get_attribute(node, 'value', 0)) + else: + # create new dynamic shape + # note input0 is a 1D vector of shape, the new symbolic shape has the rank of the shape vector length + sympy_shape = self._new_symbolic_shape( + self._get_shape(node, 0)[0], node) + + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape(sympy_shape))) + + def _infer_Conv(self, node): + sympy_shape = self._compute_conv_pool_shape(node) + self._update_computed_dims(sympy_shape) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape(sympy_shape))) + + def _infer_Einsum(self, node): + # ref:https://github.com/onnx/onnx/blob/623dfaa0151b2e4ce49779c3ec31cbd78c592b80/onnx/defs/math/defs.cc#L3275 + equation = get_attribute(node, 'equation') + equation = equation.replace(b' ', b'') + mid_index = equation.find(b'->') + left_equation = equation[:mid_index] if mid_index != -1 else equation + + num_operands = 0 + num_ellipsis = 0 + num_ellipsis_indices = 0 + + letter_to_dim = {} + + terms = left_equation.split(b',') + for term in terms: + ellipsis_index = term.find(b'...') + shape = self._get_shape(node, num_operands) + rank = len(shape) + if ellipsis_index != -1: + if num_ellipsis == 0: + num_ellipsis_indices = rank - len(term) + 3 + num_ellipsis = num_ellipsis + 1 + for i in range(1, rank + 1): + letter = term[-i] + if letter != 46: # letter != b'.' + dim = shape[-i] + if letter not in letter_to_dim.keys(): + letter_to_dim[letter] = dim + elif type(dim) != sympy.Symbol: + letter_to_dim[letter] = dim + num_operands = num_operands + 1 + + new_sympy_shape = [] + from collections import OrderedDict + num_letter_occurrences = OrderedDict() + if mid_index != -1: + right_equation = equation[mid_index + 2:] + right_ellipsis_index = right_equation.find(b'...') + if right_ellipsis_index != -1: + for i in range(num_ellipsis_indices): + new_sympy_shape.append(shape[i]) + for c in right_equation: + if c != 46: # c != b'.' + new_sympy_shape.append(letter_to_dim[c]) + else: + for i in range(num_ellipsis_indices): + new_sympy_shape.append(shape[i]) + for c in left_equation: + if c != 44 and c != 46: # c != b',' and c != b'.': + if c in num_letter_occurrences: + num_letter_occurrences[c] = num_letter_occurrences[ + c] + 1 + else: + num_letter_occurrences[c] = 1 + for key, value in num_letter_occurrences.items(): + if value == 1: + new_sympy_shape.append(letter_to_dim[key]) + + output_dtype = self.known_vi_[node.input[0]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], output_dtype, + new_sympy_shape)) + + def _infer_Expand(self, node): + expand_to_shape = as_list(self._try_get_value(node, 1), keep_none=True) + if expand_to_shape is not None: + # new_shape's dim can come from shape value + self._update_computed_dims(expand_to_shape) + shape = self._get_shape(node, 0) + new_shape = self._broadcast_shapes( + shape, get_shape_from_sympy_shape(expand_to_shape)) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, new_shape)) + + def _infer_Gather(self, node): + data_shape = self._get_shape(node, 0) + axis = handle_negative_axis( + get_attribute(node, 'axis', 0), len(data_shape)) + indices_shape = self._get_shape(node, 1) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, data_shape[:axis] + + indices_shape + data_shape[axis + + 1:])) + # for 1D input, do some sympy compute + if node.input[0] in self.sympy_data_ and len( + data_shape) == 1 and 0 == get_attribute(node, 'axis', 0): + idx = self._try_get_value(node, 1) + if idx is not None: + data = self.sympy_data_[node.input[0]] + if type(data) == list: + if type(idx) == np.ndarray and len(idx.shape) == 1: + self.sympy_data_[node.output[ + 0]] = [data[int(i)] for i in idx] + else: + self.sympy_data_[node.output[0]] = data[int(idx)] + else: + assert idx == 0 or idx == -1 + self.sympy_data_[node.output[0]] = data + + def _infer_GatherElements(self, node): + indices_shape = self._get_shape(node, 1) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, indices_shape)) + + def _infer_GatherND(self, node): + data_shape = self._get_shape(node, 0) + data_rank = len(data_shape) + indices_shape = self._get_shape(node, 1) + indices_rank = len(indices_shape) + last_index_dimension = indices_shape[-1] + assert is_literal( + last_index_dimension) and last_index_dimension <= data_rank + new_shape = indices_shape[:-1] + data_shape[last_index_dimension:] + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, new_shape)) + + def _infer_If(self, node): + # special case for constant condition, in case there are mismatching shape from the non-executed branch + subgraphs = [ + get_attribute(node, 'then_branch'), get_attribute(node, + 'else_branch') + ] + cond = self._try_get_value(node, 0) + if cond is not None: + if as_scalar(cond) > 0: + subgraphs[1].CopyFrom(subgraphs[0]) + else: + subgraphs[0].CopyFrom(subgraphs[1]) + + for i_sub, subgraph in enumerate(subgraphs): + subgraph_infer = self._onnx_infer_subgraph( + node, subgraph, use_node_input=False) + for i_out in range(len(node.output)): + vi = self.known_vi_[node.output[i_out]] + if i_sub == 0: + vi.CopyFrom(subgraph.output[i_out]) + vi.name = node.output[i_out] + else: + self._fuse_tensor_type(node, i_out, vi.type, + subgraph.output[i_out].type) + + # pass on sympy data from subgraph, if cond is constant + if cond is not None and i_sub == (0 if as_scalar(cond) > 0 else + 1): + if subgraph.output[ + i_out].name in subgraph_infer.sympy_data_: + self.sympy_data_[vi.name] = subgraph_infer.sympy_data_[ + subgraph.output[i_out].name] + + def _infer_Loop(self, node): + subgraph = get_attribute(node, 'body') + assert len(subgraph.input) == len(node.input) + num_loop_carried = len( + node.input) - 2 # minus the length and initial loop condition + # when sequence_type is used as loop carried input + # needs to run subgraph infer twice if the tensor shape in sequence contains None + for i, si in enumerate(subgraph.input): + si_name = si.name + si.CopyFrom(self.known_vi_[node.input[i]]) + si.name = si_name + + self._onnx_infer_subgraph(node, subgraph) + + # check subgraph input/output for shape changes in loop carried variables + # for tensor_type, create new symbolic dim when changing, i.e., output = Concat(input, a) + # for sequence_type, propagate from output to input + need_second_infer = False + for i_out in range(1, num_loop_carried + 1): + so = subgraph.output[i_out] + so_shape = get_shape_from_value_info(so) + if is_sequence(so.type): + if so_shape and None in so_shape: + # copy shape from output to input + # note that loop input is [loop_len, cond, input_0, input_1, ...] + # while loop output is [cond, output_0, output_1, ...] + subgraph.input[i_out + + 1].type.sequence_type.elem_type.CopyFrom( + so.type.sequence_type.elem_type) + need_second_infer = True + else: + si = subgraph.input[i_out + 1] + si_shape = get_shape_from_value_info(si) + for di, dims in enumerate(zip(si_shape, so_shape)): + if dims[0] != dims[1]: + new_dim = onnx.TensorShapeProto.Dimension() + new_dim.dim_param = str( + self._new_symbolic_dim_from_output(node, i_out, di)) + si.type.tensor_type.shape.dim[di].CopyFrom(new_dim) + so.type.tensor_type.shape.dim[di].CopyFrom(new_dim) + need_second_infer = True + + if need_second_infer: + if self.verbose_ > 2: + logger.debug( + "Rerun Loop: {}({}...), because of sequence in loop carried variables". + format(node.name, node.output[0])) + self._onnx_infer_subgraph(node, subgraph, inc_subgraph_id=False) + + # create a new symbolic dimension for iteration dependent dimension + loop_iter_dim = str(self._new_symbolic_dim_from_output(node)) + for i in range(len(node.output)): + vi = self.known_vi_[node.output[i]] + vi.CopyFrom(subgraph.output[ + i + + 1]) # first subgraph output is condition, not in node output + if i >= num_loop_carried: + assert not is_sequence( + vi.type) # TODO: handle loop accumulation in sequence_type + subgraph_vi_dim = subgraph.output[i + + 1].type.tensor_type.shape.dim + vi.type.tensor_type.shape.ClearField('dim') + vi_dim = vi.type.tensor_type.shape.dim + vi_dim.add().dim_param = loop_iter_dim + vi_dim.extend(list(subgraph_vi_dim)) + vi.name = node.output[i] + + def _infer_MatMul(self, node): + self._compute_matmul_shape(node) + + def _infer_MatMulInteger(self, node): + self._compute_matmul_shape(node, onnx.TensorProto.INT32) + + def _infer_NonMaxSuppression(self, node): + selected = str(self._new_symbolic_dim_from_output(node)) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], onnx.TensorProto.INT64, [selected, 3])) + + def _infer_NonZero(self, node): + input_rank = self._get_shape_rank(node, 0) + # create a new symbolic dimension for NonZero output + nz_len = str(self._new_symbolic_dim_from_output(node, 0, 1)) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], vi.type.tensor_type.elem_type, [input_rank, nz_len])) + + def _infer_OneHot(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + depth = self._try_get_value(node, 1) + axis = get_attribute(node, 'axis', -1) + axis = handle_negative_axis(axis, len(sympy_shape) + 1) + new_shape = get_shape_from_sympy_shape(sympy_shape[:axis] + [ + self._new_symbolic_dim_from_output(node) + if not is_literal(depth) else depth + ] + sympy_shape[axis:]) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[2]].type.tensor_type.elem_type, new_shape)) + + def _infer_Pad(self, node): + if get_opset(self.out_mp_) <= 10: + pads = get_attribute(node, 'pads') + else: + pads = self._try_get_value(node, 1) + + sympy_shape = self._get_sympy_shape(node, 0) + rank = len(sympy_shape) + + if pads is not None: + assert len(pads) == 2 * rank + new_sympy_shape = [ + d + pad_up + pad_down for d, pad_up, pad_down in + zip(sympy_shape, pads[:rank], pads[rank:]) + ] + self._update_computed_dims(new_sympy_shape) + else: + # dynamic pads, create new symbolic dimensions + new_sympy_shape = self._new_symbolic_shape(rank, node) + output_tp = self.known_vi_[node.input[0]].type.tensor_type.elem_type + + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], output_tp, get_shape_from_sympy_shape(new_sympy_shape))) + + def _infer_Pool(self, node): + sympy_shape = self._compute_conv_pool_shape(node) + self._update_computed_dims(sympy_shape) + for o in node.output: + if not o: + continue + vi = self.known_vi_[o] + vi.CopyFrom( + helper.make_tensor_value_info(o, vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape( + sympy_shape))) + + def _infer_aten_bitwise_or(self, node): + shape0 = self._get_shape(node, 0) + shape1 = self._get_shape(node, 1) + new_shape = self._broadcast_shapes(shape0, shape1) + t0 = self.known_vi_[node.input[0]] + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], t0.type.tensor_type.elem_type, new_shape)) + + def _infer_aten_diagonal(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + rank = len(sympy_shape) + offset = self._try_get_value(node, 1) + dim1 = self._try_get_value(node, 2) + dim2 = self._try_get_value(node, 3) + + assert offset is not None and dim1 is not None and dim2 is not None + dim1 = handle_negative_axis(dim1, rank) + dim2 = handle_negative_axis(dim2, rank) + + new_shape = [] + for dim, val in enumerate(sympy_shape): + if dim not in [dim1, dim2]: + new_shape.append(val) + + shape1 = sympy_shape[dim1] + shape2 = sympy_shape[dim2] + if offset >= 0: + diag_shape = sympy.Max(0, sympy.Min(shape1, shape2 - offset)) + else: + diag_shape = sympy.Max(0, sympy.Min(shape1 + offset, shape2)) + new_shape.append(diag_shape) + + if node.output[0]: + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape( + new_shape))) + + def _infer_aten_multinomial(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + rank = len(sympy_shape) + assert rank in [1, 2] + num_samples = self._try_get_value(node, 1) + di = rank - 1 + last_dim = num_samples if num_samples else str( + self._new_symbolic_dim_from_output(node, 0, di)) + output_shape = sympy_shape[:-1] + [last_dim] + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], onnx.TensorProto.INT64, + get_shape_from_sympy_shape(output_shape))) + + def _infer_aten_pool2d(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + assert len(sympy_shape) == 4 + sympy_shape[-2:] = [ + self._new_symbolic_dim_from_output(node, 0, i) for i in [2, 3] + ] + self._update_computed_dims(sympy_shape) + for i, o in enumerate(node.output): + if not o: + continue + vi = self.known_vi_[o] + elem_type = onnx.TensorProto.INT64 if i == 1 else self.known_vi_[ + node.input[0]].type.tensor_type.elem_type + vi.CopyFrom( + helper.make_tensor_value_info( + o, elem_type, get_shape_from_sympy_shape(sympy_shape))) + + def _infer_aten_unfold(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + dimension = self._try_get_value(node, 1) + size = self._try_get_value(node, 2) + step = self._try_get_value(node, 3) + if dimension is not None and size is not None and step is not None: + assert dimension < len(sympy_shape) + sympy_shape[dimension] = (sympy_shape[dimension] - size) // step + 1 + sympy_shape.append(size) + else: + rank = len(sympy_shape) + sympy_shape = self._new_symbolic_shape(rank + 1, node) + self._update_computed_dims(sympy_shape) + if node.output[0]: + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape( + sympy_shape))) + + def _infer_aten_argmax(self, node): + new_shape = None + if node.input[1] == '': + # The argmax of the flattened input is returned. + new_shape = [] + else: + dim = self._try_get_value(node, 1) + keepdim = self._try_get_value(node, 2) + if keepdim is not None: + sympy_shape = self._get_sympy_shape(node, 0) + if dim is not None: + dim = handle_negative_axis(dim, len(sympy_shape)) + if keepdim: + sympy_shape[dim] = 1 + else: + del sympy_shape[dim] + else: + rank = len(sympy_shape) + sympy_shape = self._new_symbolic_shape(rank if keepdim else + rank - 1, node) + self._update_computed_dims(sympy_shape) + new_shape = get_shape_from_sympy_shape(sympy_shape) + if node.output[0] and new_shape is not None: + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], onnx.TensorProto.INT64, new_shape)) + + def _infer_aten_bce(self, node): + reduction = self._try_get_value(node, 4) + if reduction is None: + reduction = 1 + elem_type = self.known_vi_[node.input[0]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[0]] + if reduction == 0: + vi.type.tensor_type.elem_type = elem_type + vi.type.tensor_type.shape.CopyFrom(onnx.TensorShapeProto()) + else: + vi.CopyFrom( + helper.make_tensor_value_info(vi.name, elem_type, + self._get_shape(node, 0))) + + def _infer_BatchNormalization(self, node): + self._propagate_shape_and_type(node) + + # this works for opsets < 14 and 14 since we check i < len(node.output) in the loop + for i in [1, 2, 3, 4]: + if i < len(node.output) and node.output[i] != "": + # all of these parameters have the same shape as the 1st input + self._propagate_shape_and_type( + node, input_index=1, output_index=i) + + def _infer_Range(self, node): + vi = self.known_vi_[node.output[0]] + input_data = self._get_int_values(node) + if all([i is not None for i in input_data]): + start = as_scalar(input_data[0]) + limit = as_scalar(input_data[1]) + delta = as_scalar(input_data[2]) + new_sympy_shape = [ + sympy.Max(sympy.ceiling((limit - start) / delta), 0) + ] + else: + new_sympy_shape = [self._new_symbolic_dim_from_output(node)] + self._update_computed_dims(new_sympy_shape) + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], self.known_vi_[node.input[0]].type.tensor_type. + elem_type, get_shape_from_sympy_shape(new_sympy_shape))) + + def _infer_ReduceSum(self, node): + keep_dims = get_attribute(node, 'keepdims', 1) + if get_opset(self.out_mp_) >= 13 and len(node.input) > 1: + # ReduceSum changes axes to input[1] in opset 13 + axes = self._try_get_value(node, 1) + vi = self.known_vi_[node.output[0]] + if axes is None: + assert keep_dims # can only handle keep_dims==True when axes is unknown, by generating new ranks + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], self.known_vi_[node.input[ + 0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape( + self._new_symbolic_shape( + self._get_shape_rank(node, 0), node)))) + else: + shape = self._get_shape(node, 0) + output_shape = [] + axes = [handle_negative_axis(a, len(shape)) for a in axes] + for i, d in enumerate(shape): + if i in axes: + if keep_dims: + output_shape.append(1) + else: + output_shape.append(d) + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 0], self.known_vi_[node.input[ + 0]].type.tensor_type.elem_type, output_shape)) + + def _infer_ReduceProd(self, node): + axes = get_attribute(node, 'axes') + keep_dims = get_attribute(node, 'keepdims', 1) + if keep_dims == 0 and axes == [0]: + data = self._get_int_values(node)[0] + if data is not None: + self.sympy_data_[node.output[0]] = sympy_reduce_product(data) + + def _infer_Reshape(self, node): + shape_value = self._try_get_value(node, 1) + vi = self.known_vi_[node.output[0]] + if shape_value is None: + shape_shape = self._get_shape(node, 1) + assert len(shape_shape) == 1 + shape_rank = shape_shape[0] + assert is_literal(shape_rank) + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape( + self._new_symbolic_shape(shape_rank, node)))) + else: + input_sympy_shape = self._get_sympy_shape(node, 0) + total = int(1) + for d in input_sympy_shape: + total = total * d + new_sympy_shape = [] + deferred_dim_idx = -1 + non_deferred_size = int(1) + for i, d in enumerate(shape_value): + if type(d) == sympy.Symbol: + new_sympy_shape.append(d) + elif d == 0: + new_sympy_shape.append(input_sympy_shape[i]) + non_deferred_size = non_deferred_size * input_sympy_shape[i] + else: + new_sympy_shape.append(d) + if d == -1: + deferred_dim_idx = i + elif d != 0: + non_deferred_size = non_deferred_size * d + + assert new_sympy_shape.count(-1) < 2 + if -1 in new_sympy_shape: + new_dim = total // non_deferred_size + new_sympy_shape[deferred_dim_idx] = new_dim + + self._update_computed_dims(new_sympy_shape) + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape(new_sympy_shape))) + + self._pass_on_sympy_data(node) + + def _infer_Resize(self, node): + vi = self.known_vi_[node.output[0]] + input_sympy_shape = self._get_sympy_shape(node, 0) + if get_opset(self.out_mp_) <= 10: + scales = self._try_get_value(node, 1) + if scales is not None: + new_sympy_shape = [ + sympy.simplify(sympy.floor(d * s)) + for d, s in zip(input_sympy_shape, scales) + ] + self._update_computed_dims(new_sympy_shape) + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], self.known_vi_[node.input[ + 0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape(new_sympy_shape))) + else: + roi = self._try_get_value(node, 1) + scales = self._try_get_value(node, 2) + sizes = self._try_get_value(node, 3) + if sizes is not None: + new_sympy_shape = [ + sympy.simplify(sympy.floor(s)) for s in sizes + ] + self._update_computed_dims(new_sympy_shape) + elif scales is not None: + rank = len(scales) + if get_attribute(node, 'coordinate_transformation_mode' + ) == 'tf_crop_and_resize': + assert len(roi) == 2 * rank + roi_start = list(roi)[:rank] + roi_end = list(roi)[rank:] + else: + roi_start = [0] * rank + roi_end = [1] * rank + scales = list(scales) + new_sympy_shape = [ + sympy.simplify(sympy.floor(d * (end - start) * scale)) + for d, start, end, scale in + zip(input_sympy_shape, roi_start, roi_end, scales) + ] + self._update_computed_dims(new_sympy_shape) + else: + new_sympy_shape = self._new_symbolic_shape( + self._get_shape_rank(node, 0), node) + + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape( + new_sympy_shape))) + + def _infer_Scan(self, node): + subgraph = get_attribute(node, 'body') + num_scan_inputs = get_attribute(node, 'num_scan_inputs') + scan_input_axes = get_attribute(node, 'scan_input_axes', [0] * + num_scan_inputs) + num_scan_states = len(node.input) - num_scan_inputs + scan_input_axes = [ + handle_negative_axis( + ax, self._get_shape_rank(node, i + num_scan_states)) + for i, ax in enumerate(scan_input_axes) + ] + # We may have cases where the subgraph has optionial inputs that appear in both subgraph's input and initializer, + # but not in the node's input. In such cases, the input model might be invalid, but let's skip those optional inputs. + assert len(subgraph.input) >= len(node.input) + subgraph_inputs = subgraph.input[:len(node.input)] + for i, si in enumerate(subgraph_inputs): + subgraph_name = si.name + si.CopyFrom(self.known_vi_[node.input[i]]) + if i >= num_scan_states: + scan_input_dim = si.type.tensor_type.shape.dim + scan_input_dim.remove( + scan_input_dim[scan_input_axes[i - num_scan_states]]) + si.name = subgraph_name + self._onnx_infer_subgraph(node, subgraph) + num_scan_outputs = len(node.output) - num_scan_states + scan_output_axes = get_attribute(node, 'scan_output_axes', [0] * + num_scan_outputs) + scan_input_dim = get_shape_from_type_proto( + self.known_vi_[node.input[-1]].type)[scan_input_axes[-1]] + for i, o in enumerate(node.output): + vi = self.known_vi_[o] + if i >= num_scan_states: + shape = get_shape_from_type_proto(subgraph.output[i].type) + new_dim = handle_negative_axis( + scan_output_axes[i - num_scan_states], len(shape) + 1) + shape = shape[:new_dim] + [scan_input_dim] + shape[new_dim:] + vi.CopyFrom( + helper.make_tensor_value_info(o, subgraph.output[ + i].type.tensor_type.elem_type, shape)) + else: + vi.CopyFrom(subgraph.output[i]) + vi.name = o + + def _infer_ScatterElements(self, node): + data_shape = self._get_shape(node, 0) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, data_shape)) + + def _infer_SequenceAt(self, node): + # need to create new symbolic dimension if sequence shape has None: + seq_shape = self._get_shape(node, 0) + vi = self.known_vi_[node.output[0]] + if seq_shape is not None: + for di, d in enumerate(seq_shape): + if d is not None: + continue + new_dim = onnx.TensorShapeProto.Dimension() + new_dim.dim_param = str( + self._new_symbolic_dim_from_output(node, 0, di)) + vi.type.tensor_type.shape.dim[di].CopyFrom(new_dim) + + def _infer_SequenceInsert(self, node): + # workaround bug in onnx's shape inference + vi_seq = self.known_vi_[node.input[0]] + vi_tensor = self.known_vi_[node.input[1]] + vi_out_seq = self.known_vi_[node.output[0]] + vi_out_seq.CopyFrom(vi_seq) + vi_out_seq.name = node.output[0] + self._fuse_tensor_type(node, 0, vi_out_seq.type, vi_tensor.type) + + def _infer_Shape(self, node): + self.sympy_data_[node.output[0]] = self._get_sympy_shape(node, 0) + + def _infer_Size(self, node): + sympy_shape = self._get_sympy_shape(node, 0) + self.sympy_data_[node.output[0]] = sympy_reduce_product(sympy_shape) + self.known_vi_[node.output[0]].CopyFrom( + helper.make_tensor_value_info(node.output[0], + onnx.TensorProto.INT64, [])) + + def _infer_Slice(self, node): + def less_equal(x, y): + try: + return bool(x <= y) + except TypeError: + pass + try: + return bool(y >= x) + except TypeError: + pass + try: + return bool(-x >= -y) + except TypeError: + pass + try: + return bool(-y <= -x) + except TypeError: + # the last attempt; this may raise TypeError + return bool(y - x >= 0) + + def handle_negative_index(index, bound): + """ normalizes a negative index to be in [0, bound) """ + try: + if not less_equal(0, index): + if is_literal(index) and index <= -self.int_max_: + # this case is handled separately + return index + return bound + index + except TypeError: + logger.warning("Cannot determine if {} < 0".format(index)) + return index + + if get_opset(self.out_mp_) <= 9: + axes = get_attribute(node, 'axes') + starts = get_attribute(node, 'starts') + ends = get_attribute(node, 'ends') + if not axes: + axes = list(range(len(starts))) + steps = [1] * len(axes) + else: + starts = as_list(self._try_get_value(node, 1), keep_none=True) + ends = as_list(self._try_get_value(node, 2), keep_none=True) + axes = self._try_get_value(node, 3) + steps = self._try_get_value(node, 4) + if axes is None and not (starts is None and ends is None): + axes = list( + range(0, len(starts if starts is not None else ends))) + if steps is None and not (starts is None and ends is None): + steps = [1] * len(starts if starts is not None else ends) + axes = as_list(axes, keep_none=True) + steps = as_list(steps, keep_none=True) + + new_sympy_shape = self._get_sympy_shape(node, 0) + if starts is None or ends is None: + if axes is None: + for i in range(len(new_sympy_shape)): + new_sympy_shape[i] = self._new_symbolic_dim_from_output( + node, 0, i) + else: + new_sympy_shape = get_shape_from_sympy_shape(new_sympy_shape) + for i in axes: + new_sympy_shape[i] = self._new_symbolic_dim_from_output( + node, 0, i) + else: + for i, s, e, t in zip(axes, starts, ends, steps): + e = handle_negative_index(e, new_sympy_shape[i]) + if is_literal(e): + if e >= self.int_max_: + e = new_sympy_shape[i] + elif e <= -self.int_max_: + e = 0 if s > 0 else -1 + elif is_literal(new_sympy_shape[i]): + if e < 0: + e = max(0, e + new_sympy_shape[i]) + e = min(e, new_sympy_shape[i]) + else: + if e > 0: + e = sympy.Min( + e, new_sympy_shape[i] + ) if e > 1 else e #special case for slicing first to make computation easier + else: + if is_literal(new_sympy_shape[i]): + e = sympy.Min(e, new_sympy_shape[i]) + else: + try: + if not less_equal(e, new_sympy_shape[i]): + e = new_sympy_shape[i] + except Exception: + logger.warning( + 'Unable to determine if {} <= {}, treat as equal'. + format(e, new_sympy_shape[i])) + e = new_sympy_shape[i] + + s = handle_negative_index(s, new_sympy_shape[i]) + if is_literal(new_sympy_shape[i]) and is_literal(s): + s = max(0, min(s, new_sympy_shape[i])) + + new_sympy_shape[i] = sympy.simplify( + (e - s + t + (-1 if t > 0 else 1)) // t) + + self._update_computed_dims(new_sympy_shape) + + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape(new_sympy_shape))) + + # handle sympy_data if needed, for slice in shape computation + if (node.input[0] in self.sympy_data_ and [0] == axes and + len(starts) == 1 and len(ends) == 1 and len(steps) == 1): + input_sympy_data = self.sympy_data_[node.input[0]] + if type(input_sympy_data) == list or ( + type(input_sympy_data) == np.array and + len(input_sympy_data.shape) == 1): + self.sympy_data_[node.output[0]] = input_sympy_data[starts[ + 0]:ends[0]:steps[0]] + + def _infer_SoftmaxCrossEntropyLoss(self, node): + vi = self.known_vi_[node.output[0]] + elem_type = self.known_vi_[node.input[0]].type.tensor_type.elem_type + vi.type.tensor_type.elem_type = elem_type + vi.type.tensor_type.shape.CopyFrom(onnx.TensorShapeProto()) + + if len(node.output) > 1: + data_shape = self._get_shape(node, 0) + vi = self.known_vi_[node.output[1]] + vi.CopyFrom( + helper.make_tensor_value_info(vi.name, elem_type, data_shape)) + + def _infer_Split_Common(self, node, make_value_info_func): + input_sympy_shape = self._get_sympy_shape(node, 0) + axis = handle_negative_axis( + get_attribute(node, 'axis', 0), len(input_sympy_shape)) + split = get_attribute(node, 'split') + if not split: + num_outputs = len(node.output) + split = [input_sympy_shape[axis] / sympy.Integer(num_outputs) + ] * num_outputs + self._update_computed_dims(split) + else: + split = [sympy.Integer(s) for s in split] + + for i_o in range(len(split)): + vi = self.known_vi_[node.output[i_o]] + vi.CopyFrom( + make_value_info_func(node.output[i_o], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, + get_shape_from_sympy_shape( + input_sympy_shape[:axis] + [ + split[i_o] + ] + input_sympy_shape[axis + 1:]))) + self.known_vi_[vi.name] = vi + + def _infer_Split(self, node): + self._infer_Split_Common(node, helper.make_tensor_value_info) + + def _infer_SplitToSequence(self, node): + self._infer_Split_Common(node, helper.make_sequence_value_info) + + def _infer_Squeeze(self, node): + input_shape = self._get_shape(node, 0) + op_set = get_opset(self.out_mp_) + + # Depending on op-version 'axes' are provided as attribute or via 2nd input + if op_set < 13: + axes = get_attribute(node, 'axes') + assert self._try_get_value(node, 1) is None + else: + axes = self._try_get_value(node, 1) + assert get_attribute(node, 'axes') is None + + if axes is None: + # No axes have been provided (neither via attribute nor via input). + # In this case the 'Shape' op should remove all axis with dimension 1. + # For symbolic dimensions we guess they are !=1. + output_shape = [s for s in input_shape if s != 1] + if self.verbose_ > 0: + symbolic_dimensions = [s for s in input_shape if type(s) != int] + if len(symbolic_dimensions) > 0: + logger.debug( + f"Symbolic dimensions in input shape of op: '{node.op_type}' node: '{node.name}'. " + + + f"Assuming the following dimensions are never equal to 1: {symbolic_dimensions}" + ) + else: + axes = [handle_negative_axis(a, len(input_shape)) for a in axes] + output_shape = [] + for i in range(len(input_shape)): + if i not in axes: + output_shape.append(input_shape[i]) + else: + assert input_shape[i] == 1 or type(input_shape[i]) != int + if self.verbose_ > 0 and type(input_shape[i]) != int: + logger.debug( + f"Symbolic dimensions in input shape of op: '{node.op_type}' node: '{node.name}'. " + + + f"Assuming the dimension '{input_shape[i]}' at index {i} of the input to be equal to 1." + ) + + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, output_shape)) + self._pass_on_sympy_data(node) + + def _infer_Tile(self, node): + repeats_value = self._try_get_value(node, 1) + new_sympy_shape = [] + if repeats_value is not None: + input_sympy_shape = self._get_sympy_shape(node, 0) + for i, d in enumerate(input_sympy_shape): + new_dim = d * repeats_value[i] + new_sympy_shape.append(new_dim) + self._update_computed_dims(new_sympy_shape) + else: + new_sympy_shape = self._new_symbolic_shape( + self._get_shape_rank(node, 0), node) + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info( + node.output[0], vi.type.tensor_type.elem_type, + get_shape_from_sympy_shape(new_sympy_shape))) + + def _infer_TopK(self, node): + rank = self._get_shape_rank(node, 0) + axis = handle_negative_axis(get_attribute(node, 'axis', -1), rank) + new_shape = self._get_shape(node, 0) + + if get_opset(self.out_mp_) <= 9: + k = get_attribute(node, 'k') + else: + k = self._get_int_values(node)[1] + + if k == None: + k = self._new_symbolic_dim_from_output(node) + else: + k = as_scalar(k) + + if type(k) in [int, str]: + new_shape[axis] = k + else: + new_sympy_shape = self._get_sympy_shape(node, 0) + new_sympy_shape[axis] = k + self._update_computed_dims( + new_sympy_shape + ) # note that TopK dim could be computed in sympy_data, so need to update computed_dims when it enters shape + new_shape = get_shape_from_sympy_shape(new_sympy_shape) + + for i_o in range(len(node.output)): + vi = self.known_vi_[node.output[i_o]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + i_o], vi.type.tensor_type.elem_type, new_shape)) + + def _infer_Transpose(self, node): + if node.input[0] in self.sympy_data_: + data_shape = self._get_shape(node, 0) + perm = get_attribute(node, 'perm', + reversed(list(range(len(data_shape))))) + input_data = self.sympy_data_[node.input[0]] + self.sympy_data_[node.output[0]] = np.transpose( + np.array(input_data).reshape(*data_shape), + axes=tuple(perm)).flatten().tolist() + + def _infer_Unsqueeze(self, node): + input_shape = self._get_shape(node, 0) + op_set = get_opset(self.out_mp_) + + # Depending on op-version 'axes' are provided as attribute or via 2nd input + if op_set < 13: + axes = get_attribute(node, 'axes') + assert self._try_get_value(node, 1) is None + else: + axes = self._try_get_value(node, 1) + assert get_attribute(node, 'axes') is None + + output_rank = len(input_shape) + len(axes) + axes = [handle_negative_axis(a, output_rank) for a in axes] + + input_axis = 0 + output_shape = [] + for i in range(output_rank): + if i in axes: + output_shape.append(1) + else: + output_shape.append(input_shape[input_axis]) + input_axis += 1 + + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], self.known_vi_[ + node.input[0]].type.tensor_type.elem_type, output_shape)) + + self._pass_on_sympy_data(node) + + def _infer_ZipMap(self, node): + map_key_type = None + if get_attribute(node, 'classlabels_int64s') is not None: + map_key_type = onnx.TensorProto.INT64 + elif get_attribute(node, 'classlabels_strings') is not None: + map_key_type = onnx.TensorProto.STRING + + assert map_key_type is not None + new_vi = onnx.ValueInfoProto() + new_vi.name = node.output[0] + new_vi.type.sequence_type.elem_type.map_type.value_type.tensor_type.elem_type = onnx.TensorProto.FLOAT + new_vi.type.sequence_type.elem_type.map_type.key_type = map_key_type + vi = self.known_vi_[node.output[0]] + vi.CopyFrom(new_vi) + + def _infer_Attention(self, node): + shape = self._get_shape(node, 0) + shape_bias = self._get_shape(node, 2) + assert len(shape) == 3 and len(shape_bias) == 1 + qkv_hidden_sizes_attr = get_attribute(node, 'qkv_hidden_sizes') + if qkv_hidden_sizes_attr is not None: + assert len(qkv_hidden_sizes_attr) == 3 + shape[2] = int(qkv_hidden_sizes_attr[2]) + else: + shape[2] = int(shape_bias[0] / 3) + output_dtype = self.known_vi_[node.input[0]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], output_dtype, shape)) + + if len(node.output) > 1: + # input shape: (batch_size, sequence_length, hidden_size) + # past shape: (2, batch_size, num_heads, past_sequence_length, head_size) + # mask shape: (batch_size, total_sequence_length) or (batch_size, sequence_length, total_sequence_length) or (batch_size, 1, max_seq_len, max_seq_len) + # present shape: (2, batch_size, num_heads, total_sequence_length, head_size), where total_sequence_length=sequence_length+past_sequence_length + input_shape = self._get_shape(node, 0) + past_shape = self._get_shape(node, 4) + mask_shape = self._get_shape(node, 3) + if len(past_shape) == 5: + if len(mask_shape) in [2, 3]: + past_shape[3] = mask_shape[-1] + elif isinstance(input_shape[1], int) and isinstance( + past_shape[3], int): + past_shape[3] = input_shape[1] + past_shape[3] + else: + past_shape[3] = f"{past_shape[3]}+{input_shape[1]}" + vi = self.known_vi_[node.output[1]] + vi.CopyFrom( + helper.make_tensor_value_info(vi.name, output_dtype, + past_shape)) + + def _infer_BiasGelu(self, node): + self._propagate_shape_and_type(node) + + def _infer_FastGelu(self, node): + self._propagate_shape_and_type(node) + + def _infer_Gelu(self, node): + self._propagate_shape_and_type(node) + + def _infer_LayerNormalization(self, node): + self._propagate_shape_and_type(node) + + def _infer_LongformerAttention(self, node): + self._propagate_shape_and_type(node) + + def _infer_EmbedLayerNormalization(self, node): + input_ids_shape = self._get_shape(node, 0) + word_embedding_shape = self._get_shape(node, 2) + assert len(input_ids_shape) == 2 and len(word_embedding_shape) == 2 + output_shape = input_ids_shape + [word_embedding_shape[1]] + + word_embedding_dtype = self.known_vi_[node.input[ + 2]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], word_embedding_dtype, + output_shape)) + + mask_index_shape = [input_ids_shape[0]] + vi = self.known_vi_[node.output[1]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 1], onnx.TensorProto.INT32, mask_index_shape)) + + if len(node.output) > 2: + # Optional output of add before layer nomalization is done + # shape is same as the output + vi = self.known_vi_[node.output[2]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[ + 2], word_embedding_dtype, output_shape)) + + def _infer_SkipLayerNormalization(self, node): + self._propagate_shape_and_type(node) + + def _infer_PythonOp(self, node): + output_tensor_types = get_attribute(node, 'output_tensor_types') + assert output_tensor_types + output_tensor_ranks = get_attribute(node, 'output_tensor_ranks') + assert output_tensor_ranks + + # set the context output seperately. + # The first output is autograd's context. + vi = self.known_vi_[node.output[0]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[0], + onnx.TensorProto.INT64, [])) + + # Outputs after autograd's context are tensors. + # We assume their ranks are fixed for different model inputs. + for i in range(len(node.output) - 1): + # Process the i-th tensor outputs. + vi = self.known_vi_[node.output[i + 1]] + sympy_shape = self._new_symbolic_shape(output_tensor_ranks[i], node) + shape = get_shape_from_sympy_shape(sympy_shape) + value_info = helper.make_tensor_value_info( + node.output[i + 1], output_tensor_types[i], shape) + vi.CopyFrom(value_info) + + def _propagate_shape_and_type(self, node, input_index=0, output_index=0): + shape = self._get_shape(node, input_index) + output_dtype = self.known_vi_[node.input[ + input_index]].type.tensor_type.elem_type + vi = self.known_vi_[node.output[output_index]] + vi.CopyFrom( + helper.make_tensor_value_info(node.output[output_index], + output_dtype, shape)) + + def _is_none_dim(self, dim_value): + if type(dim_value) != str: + return False + if "unk__" not in dim_value: + return False + if dim_value in self.symbolic_dims_.keys(): + return False + return True + + def _is_shape_contains_none_dim(self, out_shape): + for out in out_shape: + if self._is_none_dim(out): + return out + return None + + def _infer_impl(self, start_sympy_data=None): + self.sympy_data_ = start_sympy_data or {} + self.out_mp_.graph.ClearField('value_info') + self._apply_suggested_merge(graph_input_only=True) + self.input_symbols_ = set() + for i in self.out_mp_.graph.input: + input_shape = get_shape_from_value_info(i) + if input_shape is None: + continue + + if is_sequence(i.type): + input_dims = i.type.sequence_type.elem_type.tensor_type.shape.dim + else: + input_dims = i.type.tensor_type.shape.dim + + for i_dim, dim in enumerate(input_shape): + if dim is None: + # some models use None for symbolic dim in input, replace it with a string + input_dims[i_dim].dim_param = str( + self._new_symbolic_dim(i.name, i_dim)) + + self.input_symbols_.update( + [d for d in input_shape if type(d) == str]) + + for s in self.input_symbols_: + if s in self.suggested_merge_: + s_merge = self.suggested_merge_[s] + assert s_merge in self.symbolic_dims_ + self.symbolic_dims_[s] = self.symbolic_dims_[s_merge] + else: + # Since inputs are not produced by other ops, we can assume positivity + self.symbolic_dims_[s] = sympy.Symbol( + s, integer=True, positive=True) + # create a temporary ModelProto for single node inference + # note that we remove initializer to have faster inference + # for tensor ops like Reshape/Tile/Expand that read initializer, we need to do sympy computation based inference anyways + self.tmp_mp_ = onnx.ModelProto() + self.tmp_mp_.CopyFrom(self.out_mp_) + self.tmp_mp_.graph.ClearField('initializer') + + # compute prerequesite for node for topological sort + # node with subgraphs may have dependency on implicit inputs, which will affect topological sort + prereq_for_node = { + } # map from node to all its inputs, including implicit ones in subgraph + + def get_prereq(node): + names = set(i for i in node.input if i) + subgraphs = [] + if 'If' == node.op_type: + subgraphs = [ + get_attribute(node, 'then_branch'), get_attribute( + node, 'else_branch') + ] + elif node.op_type in ['Loop', 'Scan']: + subgraphs = [get_attribute(node, 'body')] + for g in subgraphs: + g_outputs_and_initializers = {i.name for i in g.initializer} + g_prereq = set() + for n in g.node: + g_outputs_and_initializers.update(n.output) + for n in g.node: + g_prereq.update([ + i for i in get_prereq(n) + if i not in g_outputs_and_initializers + ]) + names.update(g_prereq) + # remove subgraph inputs from g_prereq since those are local-only + for i in g.input: + if i.name in names: + names.remove(i.name) + return names + + for n in self.tmp_mp_.graph.node: + prereq_for_node[n.output[0]] = get_prereq(n) + + # topological sort nodes, note there might be dead nodes so we check if all graph outputs are reached to terminate + sorted_nodes = [] + sorted_known_vi = set([ + i.name for i in list(self.out_mp_.graph.input) + + list(self.out_mp_.graph.initializer) + ]) + if any([o.name in sorted_known_vi for o in self.out_mp_.graph.output]): + # Loop/Scan will have some graph output in graph inputs, so don't do topological sort + sorted_nodes = self.out_mp_.graph.node + else: + while not all( + [o.name in sorted_known_vi for o in self.out_mp_.graph.output]): + old_sorted_nodes_len = len(sorted_nodes) + for node in self.out_mp_.graph.node: + if (node.output[0] not in sorted_known_vi) and all([ + i in sorted_known_vi + for i in prereq_for_node[node.output[0]] if i + ]): + sorted_known_vi.update(node.output) + sorted_nodes.append(node) + if old_sorted_nodes_len == len(sorted_nodes) and not all([ + o.name in sorted_known_vi + for o in self.out_mp_.graph.output + ]): + raise Exception('Invalid model with cyclic graph') + + for node in sorted_nodes: + assert all([i in self.known_vi_ for i in node.input if i]) + self._onnx_infer_single_node(node) + known_aten_op = False + if node.op_type in self.dispatcher_: + self.dispatcher_[node.op_type](node) + elif node.op_type in ['ConvTranspose']: + # onnx shape inference ops like ConvTranspose may have empty shape for symbolic input + # before adding symbolic compute for them + # mark the output type as UNDEFINED to allow guessing of rank + vi = self.known_vi_[node.output[0]] + if len(vi.type.tensor_type.shape.dim) == 0: + vi.type.tensor_type.elem_type = onnx.TensorProto.UNDEFINED + elif node.op_type == 'ATen' and node.domain == 'org.pytorch.aten': + for attr in node.attribute: + # TODO: Is overload_name needed? + if attr.name == 'operator': + aten_op_name = attr.s.decode('utf-8') if isinstance( + attr.s, bytes) else attr.s + if aten_op_name in self.aten_op_dispatcher_: + known_aten_op = True + self.aten_op_dispatcher_[aten_op_name](node) + break + + if self.verbose_ > 2: + logger.debug(node.op_type + ': ' + node.name) + for i, name in enumerate(node.input): + logger.debug(' Input {}: {} {}'.format( + i, name, 'initializer' + if name in self.initializers_ else '')) + + # onnx automatically merge dims with value, i.e. Mul(['aaa', 'bbb'], [1000, 1]) -> [1000, 'bbb'] + # symbolic shape inference needs to apply merge of 'aaa' -> 1000 in this case + if node.op_type in [ + 'Add', 'Sub', 'Mul', 'Div', 'MatMul', 'MatMulInteger', + 'MatMulInteger16', 'Where', 'Sum' + ]: + vi = self.known_vi_[node.output[0]] + out_rank = len(get_shape_from_type_proto(vi.type)) + in_shapes = [ + self._get_shape(node, i) for i in range(len(node.input)) + ] + for d in range(out_rank - (2 if node.op_type in [ + 'MatMul', 'MatMulInteger', 'MatMulInteger16' + ] else 0)): + in_dims = [ + s[len(s) - out_rank + d] for s in in_shapes + if len(s) + d >= out_rank + ] + if len(in_dims) > 1: + self._check_merged_dims(in_dims, allow_broadcast=True) + + for i_o in range(len(node.output)): + vi = self.known_vi_[node.output[i_o]] + out_type = vi.type + out_type_kind = out_type.WhichOneof('value') + + # do not process shape for non-tensors + if out_type_kind not in [ + 'tensor_type', 'sparse_tensor_type', None + ]: + if self.verbose_ > 2: + if out_type_kind == 'sequence_type': + seq_cls_type = out_type.sequence_type.elem_type.WhichOneof( + 'value') + if 'tensor_type' == seq_cls_type: + logger.debug(' {}: sequence of {} {}'.format( + node.output[i_o], + str(get_shape_from_value_info(vi)), + onnx.TensorProto.DataType.Name( + vi.type.sequence_type.elem_type. + tensor_type.elem_type))) + else: + logger.debug(' {}: sequence of {}'.format( + node.output[i_o], seq_cls_type)) + else: + logger.debug(' {}: {}'.format(node.output[i_o], + out_type_kind)) + continue + + out_shape = get_shape_from_value_info(vi) + out_type_undefined = out_type.tensor_type.elem_type == onnx.TensorProto.UNDEFINED + if self.verbose_ > 2: + logger.debug(' {}: {} {}'.format( + node.output[i_o], + str(out_shape), + onnx.TensorProto.DataType.Name( + vi.type.tensor_type.elem_type))) + if node.output[i_o] in self.sympy_data_: + logger.debug(' Sympy Data: ' + str(self.sympy_data_[ + node.output[i_o]])) + + # onnx >= 1.11.0, use unk__#index instead of None when the shape dim is uncertain + if (out_shape is not None and + (None in out_shape or + self._is_shape_contains_none_dim(out_shape)) + ) or out_type_undefined: + if self.auto_merge_: + if node.op_type in [ + 'Add', 'Sub', 'Mul', 'Div', 'MatMul', + 'MatMulInteger', 'MatMulInteger16', 'Concat', + 'Where', 'Sum', 'Equal', 'Less', 'Greater', + 'LessOrEqual', 'GreaterOrEqual' + ]: + shapes = [ + self._get_shape(node, i) for i in range( + len(node.input)) + ] + if node.op_type in [ + 'MatMul', 'MatMulInteger', 'MatMulInteger16' + ]: + if None in out_shape or self._is_shape_contains_none_dim( + out_shape): + if None in out_shape: + idx = out_shape.index(None) + else: + idx = out_shape.index( + self._is_shape_contains_none_dim( + out_shape)) + dim_idx = [ + len(s) - len(out_shape) + idx + for s in shapes + ] + # only support auto merge for MatMul for dim < rank-2 when rank > 2 + assert len( + shapes[0]) > 2 and dim_idx[0] < len( + shapes[0]) - 2 + assert len( + shapes[1]) > 2 and dim_idx[1] < len( + shapes[1]) - 2 + elif node.op_type == 'Expand': + # auto merge for cases like Expand([min(batch, 1), min(seq, 512)], [batch, seq]) + shapes = [ + self._get_shape(node, 0), self._get_value(node, + 1) + ] + else: + shapes = [] + + if shapes: + for idx in range(len(out_shape)): + if out_shape[ + idx] is not None and not self._is_none_dim( + out_shape[idx]): + continue + # note that the broadcasting rule aligns from right to left + # if a tensor has a lower rank (dim_idx[idx] < 0), it would automatically broadcast and need no merge + dim_idx = [ + len(s) - len(out_shape) + idx + for s in shapes + ] + if len(dim_idx) > 0: + self._add_suggested_merge([ + s[i] if is_literal(s[i]) else str(s[i]) + for s, i in zip(shapes, dim_idx) + if i >= 0 + ]) + self.run_ = True + else: + self.run_ = False + else: + self.run_ = False + + # create new dynamic dims for ops not handled by symbolic shape inference + if self.run_ == False and not node.op_type in self.dispatcher_ and not known_aten_op: + is_unknown_op = out_type_undefined and ( + out_shape is None or len(out_shape) == 0) + if is_unknown_op: + # unknown op to ONNX, maybe from higher opset or other domain + # only guess the output rank from input 0 when using guess_output_rank option + out_rank = self._get_shape_rank( + node, 0) if self.guess_output_rank_ else -1 + else: + # valid ONNX op, but not handled by symbolic shape inference, just assign dynamic shape + out_rank = len(out_shape) + + if out_rank >= 0: + new_shape = self._new_symbolic_shape(out_rank, node, + i_o) + if out_type_undefined: + # guess output data type from input vi if not defined + out_dtype = self.known_vi_[node.input[ + 0]].type.tensor_type.elem_type + else: + # otherwise, use original data type + out_dtype = vi.type.tensor_type.elem_type + vi.CopyFrom( + helper.make_tensor_value_info( + vi.name, out_dtype, + get_shape_from_sympy_shape(new_shape))) + + if self.verbose_ > 0: + if is_unknown_op: + logger.debug( + "Possible unknown op: {} node: {}, guessing {} shape". + format(node.op_type, node.name, + vi.name)) + if self.verbose_ > 2: + logger.debug(' {}: {} {}'.format( + node.output[i_o], + str(new_shape), + vi.type.tensor_type.elem_type)) + + self.run_ = True + continue # continue the inference after guess, no need to stop as no merge is needed + + if self.verbose_ > 0 or not self.auto_merge_ or out_type_undefined: + logger.debug( + 'Stopping at incomplete shape inference at ' + + node.op_type + ': ' + node.name) + logger.debug('node inputs:') + for i in node.input: + logger.debug(self.known_vi_[i]) + logger.debug('node outputs:') + for o in node.output: + logger.debug(self.known_vi_[o]) + if self.auto_merge_ and not out_type_undefined: + logger.debug('Merging: ' + str( + self.suggested_merge_)) + return False + + self.run_ = False + return True + + def _update_output_from_vi(self): + for output in self.out_mp_.graph.output: + if output.name in self.known_vi_: + output.CopyFrom(self.known_vi_[output.name]) + + @staticmethod + def infer_shapes(in_mp, + int_max=2**31 - 1, + auto_merge=False, + guess_output_rank=False, + verbose=0): + onnx_opset = get_opset(in_mp) + if (not onnx_opset) or onnx_opset < 7: + logger.warning('Only support models of onnx opset 7 and above.') + return None + symbolic_shape_inference = SymbolicShapeInference( + int_max, auto_merge, guess_output_rank, verbose) + all_shapes_inferred = False + symbolic_shape_inference._preprocess(in_mp) + while symbolic_shape_inference.run_: + all_shapes_inferred = symbolic_shape_inference._infer_impl() + symbolic_shape_inference._update_output_from_vi() + if not all_shapes_inferred: + raise Exception("Incomplete symbolic shape inference") + return symbolic_shape_inference.out_mp_ + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('--input', required=True, help='The input model file') + parser.add_argument('--output', help='The output model file') + parser.add_argument( + '--auto_merge', + help='Automatically merge symbolic dims when confliction happens', + action='store_true', + default=False) + parser.add_argument( + '--int_max', + help='maximum value for integer to be treated as boundless for ops like slice', + type=int, + default=2**31 - 1) + parser.add_argument( + '--guess_output_rank', + help='guess output rank to be the same as input 0 for unknown ops', + action='store_true', + default=False) + parser.add_argument( + '--verbose', + help='Prints detailed logs of inference, 0: turn off, 1: warnings, 3: detailed', + type=int, + default=0) + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_arguments() + logger.info('input model: ' + args.input) + if args.output: + logger.info('output model ' + args.output) + logger.info('Doing symbolic shape inference...') + out_mp = SymbolicShapeInference.infer_shapes( + onnx.load(args.input), args.int_max, args.auto_merge, + args.guess_output_rank, args.verbose) + if args.output and out_mp: + onnx.save(out_mp, args.output) + logger.info('Done!') diff --git a/speechx/examples/ds2_ol/onnx/local/tonnx.sh b/speechx/examples/ds2_ol/onnx/local/tonnx.sh index 58f0d736..ffedf001 100755 --- a/speechx/examples/ds2_ol/onnx/local/tonnx.sh +++ b/speechx/examples/ds2_ol/onnx/local/tonnx.sh @@ -21,5 +21,5 @@ paddle2onnx --model_dir $dir \ --save_file $output \ --enable_dev_version True \ --opset_version 9 \ - --enable_onnx_checker True \ + --enable_onnx_checker True \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index a9f7681c..07706749 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -28,6 +28,7 @@ param=avg_1.jit.pdiparams output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ];then + # prune model by outputs mkdir -p $exp/prune # prune model deps on output_names. @@ -36,8 +37,8 @@ fi input_shape_dict="{'audio_chunk':[1,-1,161], 'audio_chunk_lens':[1], 'chunk_state_c_box':[5, 1, 1024], 'chunk_state_h_box':[5,1,1024]}" if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then + # infer shape by new shape mkdir -p $exp/shape - python3 local/pd_infer_shape.py \ --model_dir $dir \ --model_filename $model \ @@ -47,5 +48,6 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then + # to onnx ./local/tonnx.sh $dir $model $param $exp/model.onnx fi \ No newline at end of file From 17a96cd6ea1f72307f5a873be1caab3e67a8f5c3 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Tue, 14 Jun 2022 11:02:55 +0000 Subject: [PATCH 05/10] pd ort infer check --- .../examples/ds2_ol/onnx/local/infer_check.py | 86 +++++++++++++++++++ speechx/examples/ds2_ol/onnx/run.sh | 7 +- 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100755 speechx/examples/ds2_ol/onnx/local/infer_check.py diff --git a/speechx/examples/ds2_ol/onnx/local/infer_check.py b/speechx/examples/ds2_ol/onnx/local/infer_check.py new file mode 100755 index 00000000..4debf4d3 --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/local/infer_check.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import numpy as np +import onnxruntime +import paddle +import os +import pickle + +def parse_args(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--input_file', + type=str, + default="static_ds2online_inputs.pickle", + help="ds2 input pickle file.", + ) + parser.add_argument( + '--model_dir', + type=str, + default=".", + help="paddle model dir." + ) + parser.add_argument( + '--onnx_model', + type=str, + default='./model.old.onnx', + help="onnx model." + ) + + return parser.parse_args() + + +if __name__ == '__main__': + FLAGS = parse_args() + + # input and output + with open(FLAGS.input_file, 'rb') as f: + iodict = pickle.load(f) + print(iodict.keys()) + + audio_chunk = iodict['audio_chunk'] + audio_chunk_lens = iodict['audio_chunk_lens'] + chunk_state_h_box = iodict['chunk_state_h_box'] + chunk_state_c_box = iodict['chunk_state_c_bos'] + + # paddle + model = paddle.jit.load(os.path.join(FLAGS.model_dir, "avg_1.jit")) + res_chunk, res_lens, chunk_state_h, chunk_state_c = model( + paddle.to_tensor(audio_chunk), + paddle.to_tensor(audio_chunk_lens), + paddle.to_tensor(chunk_state_h_box), + paddle.to_tensor(chunk_state_c_box), + ) + + # onnxruntime + options = onnxruntime.SessionOptions() + options.enable_profiling=True + sess = onnxruntime.InferenceSession(FLAGS.onnx_model, sess_options=options) + ort_res_chunk, ort_res_lens, ort_chunk_state_h, ort_chunk_state_c = sess.run( + ['softmax_0.tmp_0', 'tmp_5', 'concat_0.tmp_0', 'concat_1.tmp_0'], + {"audio_chunk": audio_chunk, + "audio_chunk_lens":audio_chunk_lens, + "chunk_state_h_box": chunk_state_h_box, + "chunk_state_c_box":chunk_state_c_box}) + + print(sess.end_profiling()) + + # assert paddle equal ort + print(np.allclose(ort_res_chunk, res_chunk, atol=1e-6)) + print(np.allclose(ort_res_lens, res_lens, atol=1e-6)) + print(np.allclose(ort_chunk_state_h, chunk_state_h, atol=1e-6)) + print(np.allclose(ort_chunk_state_c, chunk_state_c, atol=1e-6)) \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index 07706749..b7c7e2fb 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -22,10 +22,12 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ];then popd fi + dir=$data/exp/deepspeech2_online/checkpoints model=avg_1.jit.pdmodel param=avg_1.jit.pdiparams + output_names=softmax_0.tmp_0,tmp_5,concat_0.tmp_0,concat_1.tmp_0 if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ];then # prune model by outputs @@ -47,7 +49,10 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then --input_shape_dict=${input_shape_dict} fi + if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then # to onnx ./local/tonnx.sh $dir $model $param $exp/model.onnx -fi \ No newline at end of file + ./local/infer_check.py --input_file 'static_ds2online_inputs.pickle' --model_dir $dir --onnx_model $exp/model.onnx +fi + From de6e4d0c20fb28e4cb9f10a699bc3d776c3cf5d0 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Tue, 14 Jun 2022 11:31:12 +0000 Subject: [PATCH 06/10] more docstring --- speechx/examples/ds2_ol/onnx/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 speechx/examples/ds2_ol/onnx/README.md diff --git a/speechx/examples/ds2_ol/onnx/README.md b/speechx/examples/ds2_ol/onnx/README.md new file mode 100644 index 00000000..2dc0de3e --- /dev/null +++ b/speechx/examples/ds2_ol/onnx/README.md @@ -0,0 +1,16 @@ +# DeepSpeech2 ONNX model + +1. convert ds2 model to ONNX, using Paddle2ONNX. +2. check paddleinference and onnxruntime output equal. + +## Using + +``` +bash run.sh +``` + +## Outputs +∂∂∂ +``` +ls exp/model.onnx∂∂∂ +``` \ No newline at end of file From 7cd40e40be15f4a117605c9b5c0f5533df0f524e Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Tue, 14 Jun 2022 11:31:33 +0000 Subject: [PATCH 07/10] more docstring --- speechx/examples/ds2_ol/onnx/local/netron.sh | 2 ++ speechx/examples/ds2_ol/onnx/local/onnx_clone.sh | 1 + speechx/examples/ds2_ol/onnx/local/onnx_opt.sh | 2 +- speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py | 4 +++- speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py | 2 ++ speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py | 1 + speechx/examples/ds2_ol/onnx/local/pd_prune_model.py | 1 + 7 files changed, 11 insertions(+), 2 deletions(-) diff --git a/speechx/examples/ds2_ol/onnx/local/netron.sh b/speechx/examples/ds2_ol/onnx/local/netron.sh index 73e089ed..6dd9a39c 100755 --- a/speechx/examples/ds2_ol/onnx/local/netron.sh +++ b/speechx/examples/ds2_ol/onnx/local/netron.sh @@ -1,5 +1,7 @@ #!/bin/bash +# show model + if [ $# != 1 ];then echo "usage: $0 model_path" exit 1 diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh b/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh index 0a472af4..bce22dbc 100644 --- a/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh +++ b/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh @@ -1,6 +1,7 @@ #!/bin/bash +# clone onnx repos git clone https://github.com/onnx/onnx.git git clone https://github.com/microsoft/onnxruntime.git git clone https://github.com/PaddlePaddle/Paddle2ONNX.git \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh index dd8fbd20..470a9a31 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh +++ b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh @@ -1,4 +1,4 @@ #!/bin/bash - +# onnx optimizer onnx-simplifier \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py index a5148edd..f709b8f3 100644 --- a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py @@ -1,11 +1,13 @@ #!/usr/bin/env python3 -W ignore::DeprecationWarning + +# prune model by output names + import argparse import copy import sys import onnx - def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument( diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py index f508c0a3..8724c84d 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py @@ -1,4 +1,6 @@ #!/usr/bin/env python3 -W ignore::DeprecationWarning + +# rename node to new names import argparse import sys diff --git a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py index 318131f9..723137b8 100755 --- a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py +++ b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py @@ -2,6 +2,7 @@ # https://github.com/jiangjiajun/PaddleUtils/blob/main/paddle/README.md#2-%E4%BF%AE%E6%94%B9paddle%E6%A8%A1%E5%9E%8B%E8%BE%93%E5%85%A5shape import argparse +# paddle inference shape def process_old_ops_desc(program): """set matmul op head_number attr to 1 is not exist. diff --git a/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py index d723c7ce..78346651 100755 --- a/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py +++ b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py @@ -4,6 +4,7 @@ import argparse import sys from typing import List +# paddle prune model. def prepend_feed_ops(program, feed_target_names: List[str], From 1c9eb445589cbee999c2b66d3d18bbc564057a86 Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 15 Jun 2022 03:55:24 +0000 Subject: [PATCH 08/10] onnx opt and check result --- .../examples/ds2_ol/onnx/local/infer_check.py | 8 +++++++- .../examples/ds2_ol/onnx/local/onnx_clone.sh | 0 .../ds2_ol/onnx/local/onnx_infer_shape.py | 0 speechx/examples/ds2_ol/onnx/local/onnx_opt.sh | 18 +++++++++++++++++- .../ds2_ol/onnx/local/onnx_prune_model.py | 0 speechx/examples/ds2_ol/onnx/run.sh | 9 +++++++++ 6 files changed, 33 insertions(+), 2 deletions(-) mode change 100644 => 100755 speechx/examples/ds2_ol/onnx/local/onnx_clone.sh mode change 100644 => 100755 speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py mode change 100644 => 100755 speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py diff --git a/speechx/examples/ds2_ol/onnx/local/infer_check.py b/speechx/examples/ds2_ol/onnx/local/infer_check.py index 4debf4d3..5484b314 100755 --- a/speechx/examples/ds2_ol/onnx/local/infer_check.py +++ b/speechx/examples/ds2_ol/onnx/local/infer_check.py @@ -34,6 +34,12 @@ def parse_args(): default=".", help="paddle model dir." ) + parser.add_argument( + '--model_prefix', + type=str, + default="avg_1.jit", + help="paddle model prefix." + ) parser.add_argument( '--onnx_model', type=str, @@ -58,7 +64,7 @@ if __name__ == '__main__': chunk_state_c_box = iodict['chunk_state_c_bos'] # paddle - model = paddle.jit.load(os.path.join(FLAGS.model_dir, "avg_1.jit")) + model = paddle.jit.load(os.path.join(FLAGS.model_dir, FLAGS.model_prefix)) res_chunk, res_lens, chunk_state_h, chunk_state_c = model( paddle.to_tensor(audio_chunk), paddle.to_tensor(audio_chunk_lens), diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh b/speechx/examples/ds2_ol/onnx/local/onnx_clone.sh old mode 100644 new mode 100755 diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py old mode 100644 new mode 100755 diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh index 470a9a31..db039a6e 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh +++ b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh @@ -1,4 +1,20 @@ #!/bin/bash +set -e + +if [ $# != 3 ];then + # ./local/onnx_opt.sh model.old.onnx model.opt.onnx "audio_chunk:1,-1,161 audio_chunk_lens:1 chunk_state_c_box:5,1,1024 chunk_state_h_box:5,1,1024" + echo "usage: $0 onnx.model.in onnx.model.out input_shape " + exit 1 +fi + # onnx optimizer -onnx-simplifier \ No newline at end of file +pip install onnx-simplifier + +in=$1 +out=$2 +input_shape=$3 + +check_n=3 + +onnxsim $in $2 $check_n --dynamic-input-shape --input-shape $input_shape \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py old mode 100644 new mode 100755 diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index b7c7e2fb..9ed2aa80 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -53,6 +53,15 @@ fi if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then # to onnx ./local/tonnx.sh $dir $model $param $exp/model.onnx + ./local/infer_check.py --input_file 'static_ds2online_inputs.pickle' --model_dir $dir --onnx_model $exp/model.onnx fi + +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ] ;then + input_shape="audio_chunk:1,-1,161 audio_chunk_lens:1 chunk_state_c_box:5,1,1024 chunk_state_h_box:5,1,1024" + # simplifying onnx model + ./local/onnx_opt.sh $exp/model.onnx $exp/model.opt.onnx $input_shape + + ./local/infer_check.py --input_file 'static_ds2online_inputs.pickle' --model_dir $dir --onnx_model $exp/model.opt.onnx +fi \ No newline at end of file From 5a4e35b5436c96a154dc1e480a6878621a8735eb Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 15 Jun 2022 06:21:05 +0000 Subject: [PATCH 09/10] test with run.sh --- speechx/examples/ds2_ol/onnx/.gitignore | 1 + speechx/examples/ds2_ol/onnx/README.md | 31 ++++++++++++++++--- .../examples/ds2_ol/onnx/local/onnx_opt.sh | 2 +- speechx/examples/ds2_ol/onnx/run.sh | 19 +++++++++--- 4 files changed, 42 insertions(+), 11 deletions(-) diff --git a/speechx/examples/ds2_ol/onnx/.gitignore b/speechx/examples/ds2_ol/onnx/.gitignore index 328c8678..f862f73e 100644 --- a/speechx/examples/ds2_ol/onnx/.gitignore +++ b/speechx/examples/ds2_ol/onnx/.gitignore @@ -1,2 +1,3 @@ data log +exp diff --git a/speechx/examples/ds2_ol/onnx/README.md b/speechx/examples/ds2_ol/onnx/README.md index 2dc0de3e..bba5d610 100644 --- a/speechx/examples/ds2_ol/onnx/README.md +++ b/speechx/examples/ds2_ol/onnx/README.md @@ -1,7 +1,27 @@ # DeepSpeech2 ONNX model -1. convert ds2 model to ONNX, using Paddle2ONNX. +1. convert deepspeech2 model to ONNX, using Paddle2ONNX. 2. check paddleinference and onnxruntime output equal. +3. optimize onnx model +4. check paddleinference and optimized onnxruntime output equal. + +Please make sure [Paddle2ONNX](https://github.com/PaddlePaddle/Paddle2ONNX) and [onnx-simplifier](https://github.com/zh794390558/onnx-simplifier/tree/dyn_time_shape) version is correct. + +The example test with these packages installed: +``` +paddle2onnx 0.9.8rc0 # develop af4354b4e9a61a93be6490640059a02a4499bc7a +paddleaudio 0.2.1 +paddlefsl 1.1.0 +paddlenlp 2.2.6 +paddlepaddle-gpu 2.2.2 +paddlespeech 0.0.0 # develop +paddlespeech-ctcdecoders 0.2.0 +paddlespeech-feat 0.1.0 +onnx 1.11.0 +onnx-simplifier 0.0.0 # https://github.com/zh794390558/onnx-simplifier/tree/dyn_time_shape +onnxoptimizer 0.2.7 +onnxruntime 1.11.0 +``` ## Using @@ -9,8 +29,9 @@ bash run.sh ``` +For more details please see `run.sh`. + ## Outputs -∂∂∂ -``` -ls exp/model.onnx∂∂∂ -``` \ No newline at end of file +The optimized onnx model is `exp/model.opt.onnx`. + +To show the graph, please using `local/netron.sh`. \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh index db039a6e..ce2f24e5 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh +++ b/speechx/examples/ds2_ol/onnx/local/onnx_opt.sh @@ -17,4 +17,4 @@ input_shape=$3 check_n=3 -onnxsim $in $2 $check_n --dynamic-input-shape --input-shape $input_shape \ No newline at end of file +onnxsim $in $out $check_n --dynamic-input-shape --input-shape $input_shape \ No newline at end of file diff --git a/speechx/examples/ds2_ol/onnx/run.sh b/speechx/examples/ds2_ol/onnx/run.sh index 9ed2aa80..dda5a57a 100755 --- a/speechx/examples/ds2_ol/onnx/run.sh +++ b/speechx/examples/ds2_ol/onnx/run.sh @@ -17,11 +17,18 @@ mkdir -p $data $exp if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ];then test -f $data/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz || wget -c https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr0/asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz -P $data + # wenetspeech ds2 model pushd $data tar zxvf asr0_deepspeech2_online_wenetspeech_ckpt_1.0.0a.model.tar.gz popd -fi + # ds2 model demo inputs + pushd $exp + wget -c http://paddlespeech.bj.bcebos.com/speechx/examples/ds2_ol/onnx/static_ds2online_inputs.pickle + popd + + +fi dir=$data/exp/deepspeech2_online/checkpoints model=avg_1.jit.pdmodel @@ -46,22 +53,24 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ];then --model_filename $model \ --params_filename $param \ --save_dir $exp/shape \ - --input_shape_dict=${input_shape_dict} + --input_shape_dict="${input_shape_dict}" fi +input_file=$exp/static_ds2online_inputs.pickle +test -e $input_file if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ];then # to onnx ./local/tonnx.sh $dir $model $param $exp/model.onnx - ./local/infer_check.py --input_file 'static_ds2online_inputs.pickle' --model_dir $dir --onnx_model $exp/model.onnx + ./local/infer_check.py --input_file $input_file --model_dir $dir --onnx_model $exp/model.onnx fi if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ] ;then input_shape="audio_chunk:1,-1,161 audio_chunk_lens:1 chunk_state_c_box:5,1,1024 chunk_state_h_box:5,1,1024" # simplifying onnx model - ./local/onnx_opt.sh $exp/model.onnx $exp/model.opt.onnx $input_shape + ./local/onnx_opt.sh $exp/model.onnx $exp/model.opt.onnx "$input_shape" - ./local/infer_check.py --input_file 'static_ds2online_inputs.pickle' --model_dir $dir --onnx_model $exp/model.opt.onnx + ./local/infer_check.py --input_file $input_file --model_dir $dir --onnx_model $exp/model.opt.onnx fi \ No newline at end of file From b472a148dcf5cce9c238082c9f5ffc39fb31f3ef Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Wed, 15 Jun 2022 06:26:05 +0000 Subject: [PATCH 10/10] format --- speechx/examples/ds2_ol/onnx/README.md | 2 +- .../examples/ds2_ol/onnx/local/infer_check.py | 40 ++++++++----------- .../ds2_ol/onnx/local/onnx_infer_shape.py | 24 +++++------ .../ds2_ol/onnx/local/onnx_prune_model.py | 3 +- .../ds2_ol/onnx/local/onnx_rename_model.py | 1 - .../ds2_ol/onnx/local/pd_infer_shape.py | 1 + .../ds2_ol/onnx/local/pd_prune_model.py | 1 + utils/zh_tn.py | 2 +- 8 files changed, 33 insertions(+), 41 deletions(-) diff --git a/speechx/examples/ds2_ol/onnx/README.md b/speechx/examples/ds2_ol/onnx/README.md index bba5d610..566a4597 100644 --- a/speechx/examples/ds2_ol/onnx/README.md +++ b/speechx/examples/ds2_ol/onnx/README.md @@ -34,4 +34,4 @@ For more details please see `run.sh`. ## Outputs The optimized onnx model is `exp/model.opt.onnx`. -To show the graph, please using `local/netron.sh`. \ No newline at end of file +To show the graph, please using `local/netron.sh`. diff --git a/speechx/examples/ds2_ol/onnx/local/infer_check.py b/speechx/examples/ds2_ol/onnx/local/infer_check.py index 5484b314..307a764c 100755 --- a/speechx/examples/ds2_ol/onnx/local/infer_check.py +++ b/speechx/examples/ds2_ol/onnx/local/infer_check.py @@ -12,13 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import argparse +import os +import pickle + import numpy as np import onnxruntime import paddle -import os -import pickle + def parse_args(): parser = argparse.ArgumentParser(description=__doc__) @@ -26,26 +27,19 @@ def parse_args(): '--input_file', type=str, default="static_ds2online_inputs.pickle", - help="ds2 input pickle file.", - ) + help="ds2 input pickle file.", ) parser.add_argument( - '--model_dir', - type=str, - default=".", - help="paddle model dir." - ) + '--model_dir', type=str, default=".", help="paddle model dir.") parser.add_argument( '--model_prefix', type=str, default="avg_1.jit", - help="paddle model prefix." - ) + help="paddle model prefix.") parser.add_argument( '--onnx_model', type=str, default='./model.old.onnx', - help="onnx model." - ) + help="onnx model.") return parser.parse_args() @@ -69,19 +63,19 @@ if __name__ == '__main__': paddle.to_tensor(audio_chunk), paddle.to_tensor(audio_chunk_lens), paddle.to_tensor(chunk_state_h_box), - paddle.to_tensor(chunk_state_c_box), - ) + paddle.to_tensor(chunk_state_c_box), ) # onnxruntime options = onnxruntime.SessionOptions() - options.enable_profiling=True + options.enable_profiling = True sess = onnxruntime.InferenceSession(FLAGS.onnx_model, sess_options=options) ort_res_chunk, ort_res_lens, ort_chunk_state_h, ort_chunk_state_c = sess.run( - ['softmax_0.tmp_0', 'tmp_5', 'concat_0.tmp_0', 'concat_1.tmp_0'], - {"audio_chunk": audio_chunk, - "audio_chunk_lens":audio_chunk_lens, - "chunk_state_h_box": chunk_state_h_box, - "chunk_state_c_box":chunk_state_c_box}) + ['softmax_0.tmp_0', 'tmp_5', 'concat_0.tmp_0', 'concat_1.tmp_0'], { + "audio_chunk": audio_chunk, + "audio_chunk_lens": audio_chunk_lens, + "chunk_state_h_box": chunk_state_h_box, + "chunk_state_c_box": chunk_state_c_box + }) print(sess.end_profiling()) @@ -89,4 +83,4 @@ if __name__ == '__main__': print(np.allclose(ort_res_chunk, res_chunk, atol=1e-6)) print(np.allclose(ort_res_lens, res_lens, atol=1e-6)) print(np.allclose(ort_chunk_state_h, chunk_state_h, atol=1e-6)) - print(np.allclose(ort_chunk_state_c, chunk_state_c, atol=1e-6)) \ No newline at end of file + print(np.allclose(ort_chunk_state_c, chunk_state_c, atol=1e-6)) diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py index c5f83d3e..c41e66b7 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_infer_shape.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# flake8: noqa import argparse import logging @@ -491,9 +492,6 @@ class SymbolicShapeInference: skip_infer = node.op_type in [ 'If', 'Loop', 'Scan', 'SplitToSequence', 'ZipMap', \ # contrib ops - - - 'Attention', 'BiasGelu', \ 'EmbedLayerNormalization', \ 'FastGelu', 'Gelu', 'LayerNormalization', \ @@ -1605,8 +1603,8 @@ class SymbolicShapeInference: def _infer_Scan(self, node): subgraph = get_attribute(node, 'body') num_scan_inputs = get_attribute(node, 'num_scan_inputs') - scan_input_axes = get_attribute(node, 'scan_input_axes', [0] * - num_scan_inputs) + scan_input_axes = get_attribute(node, 'scan_input_axes', + [0] * num_scan_inputs) num_scan_states = len(node.input) - num_scan_inputs scan_input_axes = [ handle_negative_axis( @@ -1627,8 +1625,8 @@ class SymbolicShapeInference: si.name = subgraph_name self._onnx_infer_subgraph(node, subgraph) num_scan_outputs = len(node.output) - num_scan_states - scan_output_axes = get_attribute(node, 'scan_output_axes', [0] * - num_scan_outputs) + scan_output_axes = get_attribute(node, 'scan_output_axes', + [0] * num_scan_outputs) scan_input_dim = get_shape_from_type_proto( self.known_vi_[node.input[-1]].type)[scan_input_axes[-1]] for i, o in enumerate(node.output): @@ -1821,8 +1819,8 @@ class SymbolicShapeInference: split = get_attribute(node, 'split') if not split: num_outputs = len(node.output) - split = [input_sympy_shape[axis] / sympy.Integer(num_outputs) - ] * num_outputs + split = [input_sympy_shape[axis] / + sympy.Integer(num_outputs)] * num_outputs self._update_computed_dims(split) else: split = [sympy.Integer(s) for s in split] @@ -2174,8 +2172,8 @@ class SymbolicShapeInference: subgraphs = [] if 'If' == node.op_type: subgraphs = [ - get_attribute(node, 'then_branch'), get_attribute( - node, 'else_branch') + get_attribute(node, 'then_branch'), + get_attribute(node, 'else_branch') ] elif node.op_type in ['Loop', 'Scan']: subgraphs = [get_attribute(node, 'body')] @@ -2330,8 +2328,8 @@ class SymbolicShapeInference: 'LessOrEqual', 'GreaterOrEqual' ]: shapes = [ - self._get_shape(node, i) for i in range( - len(node.input)) + self._get_shape(node, i) + for i in range(len(node.input)) ] if node.op_type in [ 'MatMul', 'MatMulInteger', 'MatMulInteger16' diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py index f709b8f3..5b85eef3 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_prune_model.py @@ -1,13 +1,12 @@ #!/usr/bin/env python3 -W ignore::DeprecationWarning - # prune model by output names - import argparse import copy import sys import onnx + def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument( diff --git a/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py index 8724c84d..fc00a82e 100755 --- a/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py +++ b/speechx/examples/ds2_ol/onnx/local/onnx_rename_model.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -W ignore::DeprecationWarning - # rename node to new names import argparse import sys diff --git a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py index 723137b8..c6e693c6 100755 --- a/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py +++ b/speechx/examples/ds2_ol/onnx/local/pd_infer_shape.py @@ -4,6 +4,7 @@ import argparse # paddle inference shape + def process_old_ops_desc(program): """set matmul op head_number attr to 1 is not exist. diff --git a/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py index 78346651..5386a971 100755 --- a/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py +++ b/speechx/examples/ds2_ol/onnx/local/pd_prune_model.py @@ -6,6 +6,7 @@ from typing import List # paddle prune model. + def prepend_feed_ops(program, feed_target_names: List[str], feed_holder_name='feed'): diff --git a/utils/zh_tn.py b/utils/zh_tn.py index 73bb8af2..6fee626b 100755 --- a/utils/zh_tn.py +++ b/utils/zh_tn.py @@ -747,7 +747,7 @@ def num2chn(number_string, previous_symbol, (CNU, type(None))): if next_symbol.power != 1 and ( (previous_symbol is None) or - (previous_symbol.power != 1)): + (previous_symbol.power != 1)): # noqa: E129 result_symbols[i] = liang # if big is True, '两' will not be used and `alt_two` has no impact on output