Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -3720,12 +3720,14 @@ def _impl_v10(cls, inputs, attr, params):
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
zp = fold_constant(x_zero_point)
assert isinstance(zp, relay.Constant), "Zero point expected to be a constant"
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
pad_value=x_zero_point.data,
pad_value=zp.data,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
Expand Down Expand Up @@ -5133,7 +5135,7 @@ def _fix_outputs(self, op_name, outputs):


def from_onnx(
model, shape=None, dtype="float32", opset=None, freeze_params=False, convert_config=None
model, shape=None, dtype="float32", opset=None, freeze_params=True, convert_config=None
):
"""Convert a ONNX model into an equivalent Relay Function.

Expand Down Expand Up @@ -5223,4 +5225,8 @@ def from_onnx(
# Use the graph proto as a scope so that ops can access other nodes if needed.
with g:
mod, params = g.from_onnx(graph, opset)

if freeze_params:
mod = relay.transform.DynamicToStatic()(mod)

return mod, params
31 changes: 4 additions & 27 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def get_tvm_output_with_vm(
dev,
opset=None,
freeze_params=False,
convert_to_static=False,
convert_config=None,
):
"""Generic function to execute and get tvm output with vm executor"""
Expand All @@ -69,9 +68,6 @@ def get_tvm_output_with_vm(
convert_config=convert_config,
)

if convert_to_static:
mod = relay.transform.DynamicToStatic()(mod)

result = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
*input_data, **params
)
Expand Down Expand Up @@ -154,7 +150,6 @@ def verify_with_ort_with_inputs(
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
Expand All @@ -174,7 +169,6 @@ def verify_with_ort_with_inputs(
dev,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
convert_config=convert_config,
)
else:
Expand Down Expand Up @@ -211,7 +205,6 @@ def verify_with_ort(
use_vm=False,
opset=None,
freeze_params=False,
convert_to_static=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
Expand All @@ -226,7 +219,6 @@ def verify_with_ort(
use_vm=use_vm,
opset=opset,
freeze_params=freeze_params,
convert_to_static=convert_to_static,
dtype=dtype,
rtol=rtol,
atol=atol,
Expand Down Expand Up @@ -2221,7 +2213,6 @@ def verify_prelu(x_shape, a_shape):
[x_shape, a_shape],
out_shape=[list(x_shape)],
use_vm=True,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -2705,7 +2696,6 @@ def verify_conv(
[x_shape, w_shape],
[y_shape],
use_vm=True,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -2859,9 +2849,7 @@ def verify_convtranspose_with_padding(

model = helper.make_model(graph, producer_name="convtranspose_pad_test")

verify_with_ort(
model, [x_shape, w_shape], use_vm=True, convert_to_static=True, target=target, dev=dev
)
verify_with_ort(model, [x_shape, w_shape], use_vm=True, target=target, dev=dev)

def verify_convtranspose(x_shape, w_shape, y_shape, p, group=1):
node = onnx.helper.make_node(
Expand Down Expand Up @@ -3042,7 +3030,6 @@ def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_p
[x_shape],
[out_shape],
use_vm=False,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -3156,7 +3143,6 @@ def verify_global_pooling(x_shape, mode):
[x_shape],
[out_shape],
use_vm=False,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -3488,7 +3474,6 @@ def verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad="
[x_shape],
[out_shape],
use_vm=True,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -3592,9 +3577,7 @@ def verify_global_lppool(x_shape, p, out_shape, target, dev):
)

model = helper.make_model(graph, producer_name="global_lppool_test")
verify_with_ort(
model, [x_shape], out_shape, use_vm=True, convert_to_static=True, target=target, dev=dev
)
verify_with_ort(model, [x_shape], out_shape, use_vm=True, target=target, dev=dev)


@tvm.testing.parametrize_targets
Expand Down Expand Up @@ -4676,7 +4659,6 @@ def verify_tensor_loop(shapeless_output=False):
input_vals,
use_vm=True,
freeze_params=True,
convert_to_static=True,
opset=11,
target=target,
dev=dev,
Expand Down Expand Up @@ -5272,7 +5254,6 @@ def verify_embedding_bag(num_embedding, embedding_dim, data_shape, num_bags=None
onnx_model,
tvm_inputs,
freeze_params=True,
convert_to_static=True,
target=target,
dev=dev,
)
Expand Down Expand Up @@ -5316,9 +5297,7 @@ def verify_index_put(data_shape, indices, accumulate):
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)

tvm_out = get_tvm_output_with_vm(
onnx_model, tvm_inputs, target, dev, freeze_params=True, convert_to_static=True
)
tvm_out = get_tvm_output_with_vm(onnx_model, tvm_inputs, target, dev, freeze_params=True)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)

shape = (3, 5)
Expand Down Expand Up @@ -5347,9 +5326,7 @@ def verify_index_put_slice(data_shape, value_shape, accumulate):
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)

tvm_out = get_tvm_output_with_vm(
onnx_model, tvm_inputs, target, dev, freeze_params=True, convert_to_static=True
)
tvm_out = get_tvm_output_with_vm(onnx_model, tvm_inputs, target, dev, freeze_params=True)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)

verify_index_put_slice((3, 3), (2, 2), False)
Expand Down