Skip to content

Commit

Permalink
ONNX export: Fully connected operator w/o bias, ReduceSum, Square (ap…
Browse files Browse the repository at this point in the history
…ache#12646)

* ONNX export: Fully connected operator with no bias

* ONNX export: Helper function to convert bool string attributes to int

* ONNX export: ReduceSum operator

* ONNX import/export: Make pow backward compatible

* ONNX export: Square operator
  • Loading branch information
vandanavk authored and piyushghai committed Oct 19, 2018
1 parent 42cc591 commit f788cf8
Show file tree
Hide file tree
Showing 4 changed files with 179 additions and 17 deletions.
157 changes: 144 additions & 13 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,14 @@ def convert_string_to_list(string_val):

return result_list

def get_boolean_attribute_value(attrs, attr_name):
""" Helper function to convert a string version
of Boolean attributes to integer for ONNX.
Takes attribute dictionary and attr_name as
parameters.
"""
return 1 if attrs.get(attr_name, 0) in ["True", "1"] else 0

@mx_op.register("null")
def convert_weights_and_inputs(node, **kwargs):
"""Helper function to convert weights and inputs.
Expand Down Expand Up @@ -214,17 +222,42 @@ def convert_fully_connected(node, **kwargs):
onnx = import_onnx_modules()
name = node["name"]
inputs = node["inputs"]
attrs = node["attrs"]
initializer = kwargs["initializer"]

no_bias = get_boolean_attribute_value(attrs, "no_bias")

input_node_id = kwargs["index_lookup"][inputs[0][0]]
weight_node_id = kwargs["index_lookup"][inputs[1][0]]
bias_node_id = kwargs["index_lookup"][inputs[2][0]]

proc_nodes = kwargs["proc_nodes"]
input_node = proc_nodes[input_node_id]
weights_node = proc_nodes[weight_node_id]
bias_node = proc_nodes[bias_node_id]

input_node = proc_nodes[input_node_id]
input_name = input_node.name

weights_node = proc_nodes[weight_node_id]
weights_name = weights_node.name
bias_name = bias_node.name

fcnode = []

if no_bias == 0:
bias_node_id = kwargs["index_lookup"][inputs[2][0]]
bias_node = proc_nodes[bias_node_id]
bias_name = bias_node.name
else:
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]
bias_name = "bias" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=bias_name,
data_type=data_type,
dims=(1,),
vals=[0],
raw=False,
)
)
fcnode.append(tensor_node)

node = onnx.helper.make_node(
"Gemm",
Expand All @@ -237,7 +270,9 @@ def convert_fully_connected(node, **kwargs):
name=name
)

return [node]
fcnode.append(node)

return fcnode


@mx_op.register("BatchNorm")
Expand Down Expand Up @@ -587,10 +622,8 @@ def convert_dot(node, **kwargs):
trans_a_node = None
trans_b_node = None

trans_a = 1 if ("transpose_a" in attrs) and \
attrs.get("transpose_a") in ["True", "1"] else 0
trans_b = 1 if ("transpose_b" in attrs) and \
attrs.get("transpose_b") in ["True", "1"] else 0
trans_a = get_boolean_attribute_value(attrs, "transpose_a")
trans_b = get_boolean_attribute_value(attrs, "transpose_b")

op_name = "transpose" + str(kwargs["idx"])
create_helper_trans_node(op_name, input_node_a, 'a')
Expand Down Expand Up @@ -732,8 +765,8 @@ def convert_pooling(node, **kwargs):
kernel = eval(attrs["kernel"])
pool_type = attrs["pool_type"]
stride = eval(attrs["stride"]) if attrs.get("stride") else None
global_pool = True if "global_pool" in attrs and\
attrs.get("global_pool") == "True" else False
global_pool = get_boolean_attribute_value(attrs, "global_pool")

node_inputs = node["inputs"]
input_node_idx = kwargs["index_lookup"][node_inputs[0][0]]
input_node = proc_nodes[input_node_idx]
Expand Down Expand Up @@ -2053,7 +2086,31 @@ def convert_power(node, **kwargs):
"Pow",
[input_node_a, input_node_b],
[name],
name=None
name=name
)
return [node]

@mx_op.register("broadcast_power")
def convert_broadcast_power(node, **kwargs):
"""Map MXNet's _power operator attributes to onnx's Pow operator
and return the created node.
"""
onnx = import_onnx_modules()
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
inputs = node["inputs"]

input_node_a_id = kwargs["index_lookup"][inputs[0][0]]
input_node_b_id = kwargs["index_lookup"][inputs[1][0]]

input_node_a = proc_nodes[input_node_a_id].name
input_node_b = proc_nodes[input_node_b_id].name

node = onnx.helper.make_node(
"Pow",
[input_node_a, input_node_b],
[name],
name=name
)
return [node]

Expand Down Expand Up @@ -2127,3 +2184,77 @@ def convert_spacetodepth(node, **kwargs):
name=name,
)
return [node]

@mx_op.register("square")
def convert_square(node, **kwargs):
"""Map MXNet's square operator attributes to onnx's Pow operator
and return the created node.
"""
onnx = import_onnx_modules()
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
inputs = node["inputs"]

input_node_a_id = kwargs["index_lookup"][inputs[0][0]]
input_node_a = proc_nodes[input_node_a_id].name

initializer = kwargs["initializer"]
data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')]

power2_name = "square_tensor" + str(kwargs["idx"])
tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,))
initializer.append(
onnx.helper.make_tensor(
name=power2_name,
data_type=data_type,
dims=(1,),
vals=[2],
raw=False,
)
)

node = onnx.helper.make_node(
"Pow",
[input_node_a, power2_name],
[name],
name=name
)
return [tensor_node, node]

@mx_op.register("sum")
def convert_sum(node, **kwargs):
"""Map MXNet's sum operator attributes to onnx's ReduceSum operator
and return the created node.
"""
onnx = import_onnx_modules()
name = node["name"]
proc_nodes = kwargs["proc_nodes"]
inputs = node["inputs"]
attrs = node["attrs"]

mx_axis = attrs.get("axis", None)
axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None

keepdims = get_boolean_attribute_value(attrs, "keepdims")

input_node_id = kwargs["index_lookup"][inputs[0][0]]
input_node = proc_nodes[input_node_id].name

if axes:
node = onnx.helper.make_node(
'ReduceSum',
inputs=[input_node],
outputs=[name],
axes=axes,
keepdims=keepdims,
name=name
)
else:
node = onnx.helper.make_node(
'ReduceSum',
inputs=[input_node],
outputs=[name],
keepdims=keepdims,
name=name
)
return [node]
11 changes: 8 additions & 3 deletions python/mxnet/contrib/onnx/onnx2mx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,10 +534,15 @@ def squareroot(attrs, inputs, proto_obj):
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs and attrs['broadcast'] == 1:
if 'broadcast' in attrs:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return 'broadcast_power', new_attrs, inputs
return 'pow', new_attrs, inputs
if attrs['broadcast'] == 1:
return 'broadcast_power', new_attrs, inputs
else:
mxnet_op = symbol.pow(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
mxnet_op = symbol.broadcast_power(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs

def exponent(attrs, inputs, proto_obj):
"""Elementwise exponent of input array."""
Expand Down
26 changes: 25 additions & 1 deletion tests/python-pytest/onnx/export/mxnet_export_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,30 @@ def test_spacetodepth():

npt.assert_almost_equal(output[0], numpy_op)

@with_seed()
def test_square():
input1 = np.random.randint(1, 10, (2, 3)).astype("float32")

ipsym = mx.sym.Variable("input1")
square = mx.sym.square(data=ipsym)
model = mx.mod.Module(symbol=square, data_names=['input1'], label_names=None)
model.bind(for_training=False, data_shapes=[('input1', np.shape(input1))], label_shapes=None)
model.init_params()

args, auxs = model.get_params()
params = {}
params.update(args)
params.update(auxs)

converted_model = onnx_mxnet.export_model(square, params, [np.shape(input1)], np.float32, "square.onnx")

sym, arg_params, aux_params = onnx_mxnet.import_model(converted_model)
result = forward_pass(sym, arg_params, aux_params, ['input1'], input1)

numpy_op = np.square(input1)

npt.assert_almost_equal(result, numpy_op)

if __name__ == '__main__':
test_models("bvlc_googlenet", (1, 3, 224, 224), (1, 1000))
test_models("bvlc_reference_caffenet", (1, 3, 224, 224), (1, 1000))
Expand All @@ -224,4 +248,4 @@ def test_spacetodepth():
test_model_accuracy("inception_v1", (1, 3, 224, 224))
test_model_accuracy("inception_v2", (1, 3, 224, 224))

unittest.main()
unittest.main()
2 changes: 2 additions & 0 deletions tests/python-pytest/onnx/export/onnx_backend_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@
'test_reduce_max',
'test_reduce_mean',
'test_reduce_prod',
'test_reduce_sum_d',
'test_reduce_sum_keepdims_random',
'test_squeeze',
'test_softmax_example',
'test_softmax_large_number',
Expand Down

0 comments on commit f788cf8

Please sign in to comment.