Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

onnx export operators added #13641

Closed
wants to merge 8 commits into from
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 60 additions & 5 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -630,12 +630,20 @@ def convert_exp(node, **kwargs):
return create_basic_op_node('Exp', node, kwargs)

@mx_op.register("_copy")
def convert_identity(node, **kwargs):
def convert_copy(node, **kwargs):
"""Map MXNet's _copy operator attributes to onnx's Identity operator
and return the created node.
"""
return create_basic_op_node('Identity', node, kwargs)

@mx_op.register("identity")
def convert_identity(node, **kwargs):
"""Map MXNet's identity operator attributes to onnx's ConstantFill operator
and return the created node.
"""
return create_basic_op_node('ConstantFill', node, kwargs)


@mx_op.register("InstanceNorm")
def convert_instancenorm(node, **kwargs):
"""Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator
Expand Down Expand Up @@ -726,6 +734,32 @@ def convert_softmax_output(node, **kwargs):

return [softmax_node]

@mx_op.register("LogisticRegressionOutput")
def convert_logistic_regression_output(node, **kwargs):
"""Map MXNet's SoftmaxOutput operator attributes to onnx's Softmax operator
and return the created node.
"""
name = node["name"]
input1_idx = kwargs["index_lookup"][node["inputs"][0][0]]
input1 = kwargs["proc_nodes"][input1_idx]
sigmoid_node = onnx.helper.make_node(
"Sigmoid",
[input1.name],
[name],
name=name
)
return [sigmoid_node]

@mx_op.register("BlockGrad")
def convert_blockgrad(node, **kwargs):
""" Skip operator """
return create_basic_op_node('ConstantFill', node, kwargs)

@mx_op.register("MakeLoss")
def convert_makeloss(node, **kwargs):
""" Skip operator """
return create_basic_op_node('ConstantFill', node, kwargs)


@mx_op.register("Concat")
def convert_concat(node, **kwargs):
Expand Down Expand Up @@ -872,6 +906,7 @@ def convert_clip(node, **kwargs):
def scalar_op_helper(node, op_name, **kwargs):
"""Helper function for scalar arithmetic operations"""
name, input_nodes, attrs = get_inputs(node, kwargs)
from onnx import numpy_helper

input_type = kwargs["in_type"]
scalar_value = np.array([attrs.get("scalar", 1)],
Expand All @@ -884,13 +919,19 @@ def scalar_op_helper(node, op_name, **kwargs):
for i in initializer:
if i.name == input_nodes[0]:
if op_name == 'Mul':
new_initializer = onnx.numpy_helper.to_array(i) * scalar_value[0]
new_initializer = numpy_helper.to_array(i) * scalar_value[0]
elif op_name == 'Sub':
new_initializer = onnx.numpy_helper.to_array(i) - scalar_value[0]
if name.startswith("_rminusscalar"):
new_initializer = scalar_value[0] - numpy_helper.to_array(i)
else:
new_initializer = numpy_helper.to_array(i) - scalar_value[0]
elif op_name == 'Add':
new_initializer = onnx.numpy_helper.to_array(i) + scalar_value[0]
new_initializer = numpy_helper.to_array(i) + scalar_value[0]
elif op_name == 'Div':
new_initializer = onnx.numpy_helper.to_array(i) / scalar_value[0]
if name.startswith("_rdivscalar"):
new_initializer = scalar_value[0] / numpy_helper.to_array(i)
else:
new_initializer = numpy_helper.to_array(i) / scalar_value[0]
flag = False
break

Expand Down Expand Up @@ -956,6 +997,13 @@ def convert_minus_scalar(node, **kwargs):
"""
return scalar_op_helper(node, 'Sub', **kwargs)

@mx_op.register("_rminus_scalar")
def convert_rminus_scalar(node, **kwargs):
"""Map MXNet's _rminus_scalar operator attributes to onnx's Sub operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Sub', **kwargs)

# Convert scalar value into node and pass it as input to mul_node
@mx_op.register("_plus_scalar")
Expand All @@ -975,6 +1023,13 @@ def convert_div_scalar(node, **kwargs):
"""
return scalar_op_helper(node, 'Div', **kwargs)

@mx_op.register("_rdiv_scalar")
def convert_rdiv_scalar(node, **kwargs):
"""Map MXNet's _rdiv_scalar operator attributes to onnx's Div operator.
Creates a new node for the input scalar value, adds it to the initializer
and return multiple created nodes.
"""
return scalar_op_helper(node, 'Div', **kwargs)

Roshrini marked this conversation as resolved.
Show resolved Hide resolved
# Sorting and Searching
@mx_op.register("argmax")
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/contrib/onnx/onnx2mx/_translation_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def _fix_broadcast(op_name, inputs, broadcast_axis, proto_obj):
assert len(list(inputs)) == 2

input0_shape = get_input_shape(inputs[0], proto_obj)
#creating reshape shape
# creating reshape shape
reshape_shape = list(len(input0_shape) * (1,))
reshape_shape[broadcast_axis] = -1
reshape_shape = tuple(reshape_shape)
Expand Down
67 changes: 67 additions & 0 deletions tests/python-pytest/onnx/export/mxnet_export_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,73 @@ def test_softmax():
# Comparing result of forward pass before using onnx export, import
npt.assert_almost_equal(result, softmax_out)

def test_logisticRegressionOutput():
input1 = np.random.rand(1000, 1000).astype("float32")
label1 = np.random.rand(1000, 1000)
input_nd = mx.nd.array(input1)
label_nd = mx.nd.array(label1)

ipsym = mx.sym.Variable("ipsym")
label = mx.sym.Variable('label')
sym = mx.sym.LogisticRegressionOutput(data=ipsym, label=label)
ex = sym.bind(ctx=mx.cpu(0), args={'ipsym': input_nd, 'label': label_nd})
ex.forward(is_train=True)
logistic_out = ex.outputs[0].asnumpy()

converted_model = onnx_mxnet.export_model(sym, {}, [(1000, 1000), (1000, 1000)], np.float32, "logisticop.onnx")

sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
result = forward_pass(sym, arg_params, aux_params, ['ipsym'], input1)

# Comparing result of forward pass before using onnx export, import
npt.assert_almost_equal(result, logistic_out)


def _test_scalar_op(input1, outsym, np_out):
model = mx.mod.Module(symbol=outsym, data_names=['input1'], label_names=None)
model.bind(for_training=False, data_shapes=[('input1', np.shape(input1))], label_shapes=None)
model.init_params()

args, auxs = model.get_params()
params = {}
params.update(args)
params.update(auxs)

converted_model = onnx_mxnet.export_model(outsym, params, [np.shape(input1)], np.float32,
onnx_file_path=outsym.name+".onnx")

sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
result = forward_pass(sym, arg_params, aux_params, ['input1'], input1)

npt.assert_almost_equal(result, np_out)

@with_seed()
def test_scalarops():
input1 = np.random.randint(1, 10, (2, 3)).astype("float32")
ipsym = mx.sym.Variable("input1")
operators = ['Add', 'Sub', 'rSub' 'Mul', 'Div', 'rDiv']
for op in operators:
if op == 'Add':
out = 2 + ipsym
np_out = np.add(2, input1)
_test_scalar_op(input1, out, np_out)
if op == "Sub":
out = ipsym - 2
np_out = np.subtract(input1, 2)
_test_scalar_op(input1, out, np_out)
if op == "rSub":
out = 2 - ipsym
np_out = np.subtract(2, input1)
_test_scalar_op(input1, out, np_out)
if op == "Mul":
out = 2 * ipsym
np_out = np.multiply(2, input1)
_test_scalar_op(input1, out, np_out)
if op == "Div":
Roshrini marked this conversation as resolved.
Show resolved Hide resolved
np_out = input1/2
out = ipsym / 2
_test_scalar_op(input1, out, np_out)
Roshrini marked this conversation as resolved.
Show resolved Hide resolved

@with_seed()
def test_comparison_ops():
"""Test greater, lesser, equal"""
Expand Down