parent
6dfe7273e6
commit
3cf1f1f0b5
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import onnx
|
||||
from onnx import version_converter, helper
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(prog=__doc__)
|
||||
parser.add_argument("--model-file", type=str, required=True, help='path/to/the/model.onnx.')
|
||||
parser.add_argument("--save-model", type=str, required=True, help='path/to/saved/model.onnx.')
|
||||
# Models must be opset10 or higher to be quantized.
|
||||
parser.add_argument("--target-opset", type=int, default=11, help='path/to/the/model.onnx.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"to opset: {args.target_opset}")
|
||||
|
||||
# Preprocessing: load the model to be converted.
|
||||
model_path = args.model_file
|
||||
original_model = onnx.load(model_path)
|
||||
|
||||
# print('The model before conversion:\n{}'.format(original_model))
|
||||
|
||||
# A full list of supported adapters can be found here:
|
||||
# https://github.com/onnx/onnx/blob/main/onnx/version_converter.py#L21
|
||||
# Apply the version conversion on the original model
|
||||
converted_model = version_converter.convert_version(original_model, args.target_opset)
|
||||
|
||||
# print('The model after conversion:\n{}'.format(converted_model))
|
||||
onnx.save(converted_model, args.save_model)
|
@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import onnx
|
||||
from onnxruntime.quantization import quantize_dynamic, QuantType
|
||||
|
||||
def quantize_onnx_model(onnx_model_path, quantized_model_path, nodes_to_exclude=[]):
|
||||
print("Starting quantization...")
|
||||
from onnxruntime.quantization import QuantType, quantize_dynamic
|
||||
|
||||
quantize_dynamic(onnx_model_path, quantized_model_path, weight_type=QuantType.QInt8, nodes_to_exclude=nodes_to_exclude)
|
||||
|
||||
print(f"Quantized model saved to: {quantized_model_path}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--model-in",
|
||||
type=str,
|
||||
required=True,
|
||||
help="ONNX model",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model-out",
|
||||
type=str,
|
||||
required=True,
|
||||
default='model.quant.onnx',
|
||||
help="ONNX model",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nodes-to-exclude",
|
||||
type=str,
|
||||
required=True,
|
||||
help="nodes to exclude. e.g. conv,linear.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
nodes_to_exclude = args.nodes_to_exclude.split(',')
|
||||
quantize_onnx_model(args.model_in, args.model_out, nodes_to_exclude)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in new issue