diff --git a/python/mxnet/contrib/onnx/__init__.py b/python/mxnet/contrib/onnx/__init__.py index 9f27060d3d6f..30ac62bb1300 100644 --- a/python/mxnet/contrib/onnx/__init__.py +++ b/python/mxnet/contrib/onnx/__init__.py @@ -16,6 +16,38 @@ # under the License. """Module for ONNX model format support for Apache MXNet.""" -from .onnx2mx.import_model import import_model, get_model_metadata -from .onnx2mx.import_to_gluon import import_to_gluon -from .mx2onnx.export_model import export_model +from .onnx2mx.import_model import import_model as import_model_ +from .onnx2mx.import_model import get_model_metadata as get_model_metadata_ +from .onnx2mx.import_to_gluon import import_to_gluon as import_to_gluon_ +from ...onnx import export_model as export_model_ + +def import_model(*args, **kwargs): + print('Calling mxnet.contrib.onnx.import_model...') + print('Please be advised that importing ONNX models into MXNet is going to be deprecated ' + 'in the upcoming MXNet v1.10 release. The following apis will be deleted: ' + 'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.') + return import_model_(*args, **kwargs) + + +def get_model_metadata(*args, **kwargs): + print('Calling mxnet.contrib.onnx.get_model_metadata...') + print('Please be advised that importing ONNX models into MXNet is going to be deprecated ' + 'in the upcoming MXNet v1.10 release. The following apis will be deleted: ' + 'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.') + return get_model_metadata_(*args, **kwargs) + + +def import_to_gluon(*args, **kwargs): + print('Calling mxnet.contrib.onnx.import_to_gluon...') + print('Please be advised that importing ONNX models into MXNet is going to be deprecated ' + 'in the upcoming MXNet v1.10 release. The following apis will be deleted: ' + 'mxnet.contrib.onnx.import_model/get_model_metadata/import_to_gluon.') + return import_to_gluon_(*args, **kwargs) + + +def export_model(*args, **kwargs): + print('Calling mxnet.contrib.onnx.export_model...') + print('Please be advised that the ONNX module has been moved to mxnet.onnx and ' + 'mxnet.onnx.export_model is the preferred path. The current path will be deprecated ' + 'in the upcoming MXNet v1.10 release.') + return export_model_(*args, **kwargs) diff --git a/python/mxnet/onnx/README.md b/python/mxnet/onnx/README.md new file mode 100644 index 000000000000..e46a3328b57a --- /dev/null +++ b/python/mxnet/onnx/README.md @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + +# ONNX Export Support for MXNet + +### Overview +[ONNX](https://onnx.ai/), or Open Neural Network Exchange, is an open source deep learning model format that acts as a framework neutral graph representation between DL frameworks or between training and inference. With the ability to export models to the ONNX format, MXNet users can enjoy faster inference and a wider range of deployment device choices, including edge and mobile devices where MXNet installation may be constrained. Popular hardware-accelerated and/or cross-platform ONNX runtime frameworks include Nvidia [TensorRT](https://github.com/onnx/onnx-tensorrt), Microsoft [ONNXRuntime](https://github.com/microsoft/onnxruntime), Apple [CoreML](https://github.com/onnx/onnx-coreml) and [TVM](https://tvm.apache.org/docs/tutorials/frontend/from_onnx.html), etc. + +### ONNX Versions Supported +ONNX 1.7 -- Fully Supported +ONNX 1.8 -- Work in Progress + +### Installation +From the 1.9 release and on, the ONNX export module has become an offical, built-in module in MXNet. You can access the module at `mxnet.onnx`. + +If you are a user of earlier MXNet versions and do not want to upgrade MXNet, you can still enjoy the latest ONNX suppor by pulling the MXNet source code and building the wheel for only the mx2onnx module. Just do `cd python/mxnet/onnx` and then build the wheel with `python3 -m build`. You should be able to find the wheel under `python/mxnet/onnx/dist/mx2onnx-0.0.0-py3-none-any.whl` and install it with `pip install mx2onnx-0.0.0-py3-none-any.whl`. You should be able to access the module with `import mx2onnx` then. + +### APIs + +### Operator Support Matrix - ONNX 1.7 + +### GluonCV Pretrained Model Support Matrix + +### GluonNLP Pretrained Model Support Matrix diff --git a/python/mxnet/onnx/__init__.py b/python/mxnet/onnx/__init__.py new file mode 100644 index 000000000000..3caab21a6202 --- /dev/null +++ b/python/mxnet/onnx/__init__.py @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# coding: utf-8 +"""ONNX Export module""" + +from .mx2onnx import export_model diff --git a/python/mxnet/contrib/onnx/mx2onnx/LICENSE b/python/mxnet/onnx/mx2onnx/LICENSE similarity index 100% rename from python/mxnet/contrib/onnx/mx2onnx/LICENSE rename to python/mxnet/onnx/mx2onnx/LICENSE diff --git a/python/mxnet/contrib/onnx/mx2onnx/__init__.py b/python/mxnet/onnx/mx2onnx/__init__.py similarity index 94% rename from python/mxnet/contrib/onnx/mx2onnx/__init__.py rename to python/mxnet/onnx/mx2onnx/__init__.py index 779ce86621d6..d8a6d5a50a31 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/__init__.py +++ b/python/mxnet/onnx/mx2onnx/__init__.py @@ -18,6 +18,5 @@ # coding: utf-8 """ONNX Export module""" -from . import export_model -from . import export_onnx +from ._export_model import export_model from . import _op_translations diff --git a/python/mxnet/contrib/onnx/mx2onnx/_export_helper.py b/python/mxnet/onnx/mx2onnx/_export_helper.py similarity index 100% rename from python/mxnet/contrib/onnx/mx2onnx/_export_helper.py rename to python/mxnet/onnx/mx2onnx/_export_helper.py diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_model.py b/python/mxnet/onnx/mx2onnx/_export_model.py similarity index 98% rename from python/mxnet/contrib/onnx/mx2onnx/export_model.py rename to python/mxnet/onnx/mx2onnx/_export_model.py index 1c50db56058c..d9be998ed24f 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/export_model.py +++ b/python/mxnet/onnx/mx2onnx/_export_model.py @@ -22,9 +22,9 @@ import logging import numpy as np -from ....base import string_types -from .... import symbol -from .export_onnx import MXNetGraph +from mxnet.base import string_types +from mxnet import symbol +from ._export_onnx import MXNetGraph from ._export_helper import load_module diff --git a/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py b/python/mxnet/onnx/mx2onnx/_export_onnx.py similarity index 99% rename from python/mxnet/contrib/onnx/mx2onnx/export_onnx.py rename to python/mxnet/onnx/mx2onnx/_export_onnx.py index 4cec6985a9c7..903b0cd1c51f 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/export_onnx.py +++ b/python/mxnet/onnx/mx2onnx/_export_onnx.py @@ -50,7 +50,7 @@ import logging import json -from .... import ndarray as nd +from mxnet import ndarray as nd class MXNetGraph(object): diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/onnx/mx2onnx/_op_translations.py similarity index 99% rename from python/mxnet/contrib/onnx/mx2onnx/_op_translations.py rename to python/mxnet/onnx/mx2onnx/_op_translations.py index eb91a3c5384a..ef65fa25a39b 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/onnx/mx2onnx/_op_translations.py @@ -56,7 +56,7 @@ import re import logging import numpy as np -from .export_onnx import MXNetGraph as mx_op +from ._export_onnx import MXNetGraph as mx_op try: import onnx except ImportError: diff --git a/python/mxnet/onnx/setup.py b/python/mxnet/onnx/setup.py new file mode 100644 index 000000000000..d0ef2332d32f --- /dev/null +++ b/python/mxnet/onnx/setup.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +setup.py for mx2onnx +""" + +from setuptools import setup, find_packages + +setup( + name='mx2onnx', + version='0.0.0', + description='Module to convert MXNet models to the ONNX format', + author='', + author_email='', + url='https://github.com/apache/incubator-mxnet/tree/v1.x/python/mxnet/onnx', + install_requires=[ + 'onnx >= 1.7.0', + ], + classifiers=[ + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3 :: Only', + ], + packages=find_packages(), + python_requires='>=3.6' +) diff --git a/tests/python-pytest/onnx/backend.py b/tests/python-pytest/onnx/backend.py index 6d8b1af6baff..d294b93b3cff 100644 --- a/tests/python-pytest/onnx/backend.py +++ b/tests/python-pytest/onnx/backend.py @@ -19,7 +19,7 @@ """MXNet/Gluon backend wrapper for onnx test infrastructure""" from mxnet.contrib.onnx.onnx2mx.import_onnx import GraphProto -from mxnet.contrib.onnx.mx2onnx.export_onnx import MXNetGraph +from mxnet.onnx.export_onnx import MXNetGraph import mxnet as mx import numpy as np diff --git a/tests/python-pytest/onnx/test_onnxruntime_cv.py b/tests/python-pytest/onnx/test_onnxruntime_cv.py index 4e455147dcdd..393bd70de265 100644 --- a/tests/python-pytest/onnx/test_onnxruntime_cv.py +++ b/tests/python-pytest/onnx/test_onnxruntime_cv.py @@ -50,15 +50,15 @@ def export(self): def export_onnx(self): onnx_file = self.modelpath + ".onnx" - mx.contrib.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params", - [self.input_shape], self.input_dtype, onnx_file) + mx.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params", + [self.input_shape], self.input_dtype, onnx_file) return onnx_file def export_onnx_dynamic(self, dynamic_input_shapes): onnx_file = self.modelpath + ".onnx" - mx.contrib.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params", - [self.input_shape], self.input_dtype, onnx_file, dynamic=True, - dynamic_input_shapes=dynamic_input_shapes) + mx.onnx.export_model(self.modelpath + "-symbol.json", self.modelpath + "-0000.params", + [self.input_shape], self.input_dtype, onnx_file, dynamic=True, + dynamic_input_shapes=dynamic_input_shapes) return onnx_file def predict(self, data): diff --git a/tests/python-pytest/onnx/test_onnxruntime_nlp.py b/tests/python-pytest/onnx/test_onnxruntime_nlp.py index ecd94df1e630..d2d5f58b6972 100644 --- a/tests/python-pytest/onnx/test_onnxruntime_nlp.py +++ b/tests/python-pytest/onnx/test_onnxruntime_nlp.py @@ -63,9 +63,9 @@ def test_roberta_inference_onnxruntime(tmp_path, model_name): params_file = "%s-0000.params" % prefix onnx_file = "%s.onnx" % prefix input_shapes = [(batch, seq_length), (batch,), (batch, num_masked_positions)] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - [np.float32, np.float32, np.int32], - onnx_file, verbose=True) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + [np.float32, np.float32, np.int32], + onnx_file, verbose=True) sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL @@ -120,7 +120,7 @@ def test_bert_inference_onnxruntime(tmp_path, model): input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)] input_types = [np.float32, np.float32, np.float32] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, input_types, onnx_file) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, input_types, onnx_file) # create onnxruntime session using the generated onnx file @@ -169,9 +169,9 @@ def test_distilbert_inference_onnxruntime(tmp_path, model_name): onnx_file = "%s.onnx" % prefix input_shapes = [(batch, seq_length), (batch,)] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - [np.float32, np.float32], - onnx_file, verbose=True) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + [np.float32, np.float32], + onnx_file, verbose=True) sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess = onnxruntime.InferenceSession(onnx_file, sess_options) @@ -219,9 +219,9 @@ def test_standard_rnn_lstm_pretrained_inference_onnxruntime(tmp_path, model_name onnx_file = "%s.onnx" % prefix input_shapes = [(seq_length, batch), np.shape(begin_state[0]), np.shape(begin_state[1])] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - [np.float32, np.float32, np.float32], - onnx_file, verbose=True) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + [np.float32, np.float32, np.float32], + onnx_file, verbose=True) sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess = onnxruntime.InferenceSession(onnx_file, sess_options) @@ -278,10 +278,10 @@ def test_dynamic_shape_bert_inference_onnxruntime(tmp_path, model): dynamic_input_shapes = [(None, seq_length), (None, seq_length), (None,)] input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)] input_types = [np.float32, np.float32, np.float32] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - input_types, onnx_file, - dynamic=True, - dynamic_input_shapes=dynamic_input_shapes) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + input_types, onnx_file, + dynamic=True, + dynamic_input_shapes=dynamic_input_shapes) # create onnxruntime session using the generated onnx file ses_opt = onnxruntime.SessionOptions() @@ -345,8 +345,8 @@ def test_awd_rnn_lstm_pretrained_inference_onnxruntime(tmp_path, model_name, seq np.shape(begin_state[2][0]), np.shape(begin_state[2][1])] input_types = [np.float32, np.float32, np.float32, np.float32, np.float32, np.float32, np.float32] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - input_types, onnx_file, verbose=True) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + input_types, onnx_file, verbose=True) sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL @@ -408,8 +408,8 @@ def test_ernie_inference_onnxruntime(tmp_path, model_name): input_shapes = [(batch, seq_length), (batch, seq_length), (batch,)] input_types = [np.float32, np.float32, np.float32] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - input_types, onnx_file) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + input_types, onnx_file) # create onnxruntime session using the generated onnx file ses_opt = onnxruntime.SessionOptions() @@ -475,8 +475,8 @@ def export_to_onnx(prefix, input_shapes, input_types, **kwargs): sym_file = "%s-symbol.json" % prefix params_file = "%s-0000.params" % prefix onnx_file = "%s.onnx" % prefix - return mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, input_types, - onnx_file, **kwargs) + return mx.onnx.export_model(sym_file, params_file, input_shapes, input_types, + onnx_file, **kwargs) def onnx_runtime_predict(onnx_file, onnx_inputs): ses_opt = onnxruntime.SessionOptions() @@ -650,8 +650,8 @@ def test_gpt_pretrained_inference_onnxruntime(tmp_path, model_params): input_shapes = [(batch, seq_length)] input_types = [np.float32] - converted_model_path = mx.contrib.onnx.export_model(sym_file, params_file, input_shapes, - input_types, onnx_file) + converted_model_path = mx.onnx.export_model(sym_file, params_file, input_shapes, + input_types, onnx_file) ses_opt = onnxruntime.SessionOptions() ses_opt.log_severity_level = 3 diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 51170e671a43..b032fa7fc1bd 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -46,8 +46,8 @@ def export_to_onnx(model, model_name, inputs): sym_file = '{}-symbol.json'.format(model_path) params_file = '{}-0000.params'.format(model_path) onnx_file = '{}/{}.onnx'.format(tmp_path, model_name) - mx.contrib.onnx.export_model(sym_file, params_file, [inp.shape for inp in inputs], - [inp.dtype for inp in inputs], onnx_file) + mx.onnx.export_model(sym_file, params_file, [inp.shape for inp in inputs], + [inp.dtype for inp in inputs], onnx_file) return onnx_file def onnx_rt(onnx_file, inputs): diff --git a/tools/license_header.py b/tools/license_header.py index a745ea20fc7d..71b28115b3e5 100755 --- a/tools/license_header.py +++ b/tools/license_header.py @@ -122,6 +122,10 @@ # This file 'tools/license_header.py', + # Dual-Licensed under Apache 2.0 and Nvidia BSD-3 + 'python/mxnet/onnx/mx2onnx/_export_onnx.py', + 'python/mxnet/onnx/mx2onnx/_op_translations.py', + # Github template '.github/ISSUE_TEMPLATE/bug_report.md', '.github/ISSUE_TEMPLATE/feature_request.md',