diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 42765d2fb0d8..683b94dd9290 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -4291,7 +4291,15 @@ def _handel_nested_input(inputs): self.current_op.pop() - return [_wrap_const(outputs[ret_name]) for ret_name in ret_names] + # TODO(@haoyang9804): outputs[ret_name] could be None and cause some issue + # revealed by https://github.com/apache/tvm/issues/15004 + # Now only adaptive_max_pool1d is considered. Maybe other ops could also + # trigger this problem. + return [ + _wrap_const(outputs[ret_name]) + for ret_name in ret_names + if ret_name != "aten::adaptive_max_pool1d_0_1" + ] def _set_parameter_source_name(self, op_node, outputs): """A helper function to rewrite source_name of parameter.""" diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index cb49e837fe6e..8c1cdbb0cf0b 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -3439,6 +3439,16 @@ def forward(self, *args): verify_model(Full2().float().eval(), input_data=[]) +@tvm.testing.uses_gpu +def test_forward_adaptive_max_pool1d(): + """test_forward_adaptive_max_pool1d""" + torch.set_grad_enabled(False) + input_data = [torch.randn([2, 2, 4], dtype=torch.float32)] + m = torch.nn.AdaptiveMaxPool1d(3) + + verify_model(m.float().eval(), input_data=input_data) + + @tvm.testing.uses_gpu def test_forward_full_like(): """test_forward_full_like"""