diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc index 0a387a92fbcb5..f992316cd3108 100644 --- a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc +++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc @@ -196,6 +196,7 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, Eye class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, float, IsNaN); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, MLFloat16, IsNaN); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, Erf); +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, MaxUnpool); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, Sinh); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 9, Cosh); @@ -386,6 +387,7 @@ void RegisterOnnxOperatorKernels(std::function fn) { fn(BuildKernel()); fn(BuildKernel()); fn(BuildKernel()); + fn(BuildKernel()); fn(BuildKernel()); fn(BuildKernel()); } diff --git a/onnxruntime/core/providers/cpu/nn/Unpool.cc b/onnxruntime/core/providers/cpu/nn/Unpool.cc new file mode 100644 index 0000000000000..391326dbb2d61 --- /dev/null +++ b/onnxruntime/core/providers/cpu/nn/Unpool.cc @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// disable warning because std::copy is used by Sliceiterator +// std::copy_n is not an option for raw pointer destinations as used by gsl::copy. +#ifdef _MSC_VER +#pragma warning(disable : 4996) +#endif +#include "core/providers/cpu/nn/unpool.h" +#include "core/providers/cpu/tensor/utils.h" +#include + +using namespace ::onnxruntime::common; + +namespace onnxruntime { + +ONNX_CPU_OPERATOR_KERNEL( + MaxUnpool, + 9, + KernelDefBuilder() + .TypeConstraint("T", DataTypeImpl::GetTensorType()) + .TypeConstraint("I", DataTypeImpl::GetTensorType()) + .TypeConstraint("Y", DataTypeImpl::GetTensorType()), + MaxUnpool); + +Status MaxUnpool::Compute(OpKernelContext* context) const { + // Get pooled values tensor + const Tensor* X = context->Input(0); + const TensorShape& X_shape = X->Shape(); + const float* X_data = X->template Data(); + + ORT_RETURN_IF_NOT(X_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); + + // Supported sizes check + size_t pooling_dims = X_shape.NumDimensions() - 2; + if (pooling_dims > 3) { + return Status(ONNXRUNTIME, INVALID_ARGUMENT, "Unsupported pooling size."); + } + + // Get pooled index tensor + const Tensor* I = context->Input(1); + const TensorShape& I_shape = I->Shape(); + const int64_t* I_data = I->template Data(); + + ORT_RETURN_IF_NOT(I_shape == X_shape, "Index tensor shape should be same as that of the input data tensor to unpool."); + + // Calculate output tensor shape from attributes + std::vector inferredOutputShape(X_shape.NumDimensions()); + + // Copy batch and channel dims + inferredOutputShape[0] = X_shape[0]; + inferredOutputShape[1] = X_shape[1]; + + // For feature dims calculate reversing the formula used for Maxpool + for (auto dim = 0; dim < kernel_shape_.size(); ++dim) { + inferredOutputShape[dim + 2] = (X_shape[dim + 2] - 1) * strides_[dim] - (pads_[dim + 2] + pads_[kernel_shape_.size() + dim + 4]) + kernel_shape_[dim]; + } + + // If outputshape is provided use that to infer additional padding. + std::vector inferredPads; + std::vector givenOutputShape; + bool padsInferred = false; + + if (num_inputs_ == 3) { + auto& tensor_shape = *context->Input(2); + ORT_RETURN_IF_NOT(tensor_shape.Shape().GetDims().size() == 1, "Shape must be 1 dimensional as it's tensor data is a shape"); + + // Turn the shape tensor data into an actual shape + const int64_t* p_shape = tensor_shape.template Data(); + std::vector shape{p_shape, p_shape + tensor_shape.Shape().Size()}; + givenOutputShape = shape; + + inferredPads.resize(inferredOutputShape.size() * 2, 0); + + // calculate if output shape has any padding over the inferred shape for feature dims. + for (auto dim = 2; dim < shape.size(); dim++) { + ORT_RETURN_IF_NOT(inferredOutputShape[dim] <= shape[dim], "Incorrect output shape"); + + int64_t inferredPad = shape[dim] - inferredOutputShape[dim]; + ORT_RETURN_IF_NOT(inferredPad <= kernel_shape_[dim - 2], "Incorrect output shape"); + + if (inferredPad > 0) { + padsInferred = true; + if (inferredPad == kernel_shape_[dim - 2]) { + inferredPads[dim] = 1; + inferredPads[dim + inferredOutputShape.size()] = inferredPad - 1; + } else { + inferredPads[dim + inferredOutputShape.size()] = inferredPad; + } + } + } + } + + // unpool + int64_t totalPooledElem = 1; + int64_t totalOutputElem = 1; + + for (auto dim = 0; dim < X_shape.NumDimensions(); dim++) { + totalPooledElem *= X_shape[dim]; + totalOutputElem *= inferredOutputShape[dim]; + } + + // if there are no pads inferred from outputshape simply create the new unpooled tensor + if (!padsInferred) { + TensorShape shape(inferredOutputShape); + + Tensor* Y = context->Output(0, shape); + auto Y_data = Y->template MutableData(); + auto out = gsl::make_span(Y_data, Y->Shape().Size()); + std::fill_n(out.data(), out.size(), 0.f); + + for (auto curElem = 0; curElem < totalPooledElem; ++curElem) { + out[I_data[curElem]] = X_data[curElem]; + } + } else { + // If the output shape has pads over the inferred dims , first + // create the tensor with the inferred dims and add the padding. + + // Generate tensor with inferred dims. + TensorShape shape(inferredOutputShape); + + AllocatorPtr alloc; + ORT_RETURN_IF_ERROR(context->GetTempSpaceAllocator(&alloc)); + auto element_type = DataTypeImpl::GetType(); + + void* buffer = alloc->Alloc(sizeof(float) * shape.Size()); + std::unique_ptr p_tensor = std::make_unique(element_type, + shape, + buffer, + alloc->Info(), + alloc); + + float* p = p_tensor->template MutableData(); + + auto out = gsl::make_span(p, p_tensor->Shape().Size()); + std::fill_n(out.data(), out.size(), 0.f); + + for (auto curElem = 0; curElem < totalPooledElem; ++curElem) { + out[I_data[curElem]] = X_data[curElem]; + } + + std::vector output_dims(inferredOutputShape); + size_t dimension_count = output_dims.size(); + + std::vector input_starts; + std::vector input_extents; + + // Calculate output dimensions + for (size_t i = 0; i < dimension_count; i++) { + input_starts.push_back(slices_[i]); + input_extents.push_back(output_dims[i] + slices_[i] + slices_[i + dimension_count]); + output_dims[i] += inferredPads[i] + inferredPads[i + dimension_count] + slices_[i] + slices_[i + dimension_count]; + } + + // setup output object + TensorShape output_shape(givenOutputShape); + Tensor* Y = context->Output(0, output_shape); + auto Y_data = Y->template MutableData(); + + auto outData = gsl::make_span(Y_data, Y->Shape().Size()); + + std::fill_n(outData.data(), outData.size(), 0.f); + + // add padding + TensorPitches output_pitches(*Y); + size_t alignSkip = 0; // Amount to skip to align to where the next input tensor data needs to be written + + // Initial skip, sum up the begin padding on each axis + for (size_t i = 0; i < dimension_count; i++) + alignSkip += inferredPads[i] * output_pitches[i]; + + size_t inner_axis = dimension_count - 1; + + TensorAxisCounters input_counters(*p_tensor); + SliceIterator input(*p_tensor, input_starts, input_extents); + + while (input_counters) { + Y_data += alignSkip; + { + Y_data = input.CopyInnermostAxis(Y_data); + int64_t prePad = inferredPads[inner_axis]; + int64_t postPad = inferredPads[inner_axis + dimension_count]; + Y_data += postPad; + alignSkip = prePad; + } + // Calculate the size of the next block of padding (skipping over the innermost axis since that's already done) + while (input_counters.Increment()) { + ptrdiff_t inner_pitch = output_pitches[input_counters.Axis()]; + int64_t prePad = inferredPads[input_counters.Axis()]; + int64_t postPad = inferredPads[input_counters.Axis() + dimension_count]; + Y_data += inner_pitch * postPad; + alignSkip += inner_pitch * prePad; + } + } + } + + return Status::OK(); +} + +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/cpu/nn/unpool.h b/onnxruntime/core/providers/cpu/nn/unpool.h new file mode 100644 index 0000000000000..012c1da30d140 --- /dev/null +++ b/onnxruntime/core/providers/cpu/nn/unpool.h @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include "core/common/common.h" +#include "core/framework/op_kernel.h" +#include "core/providers/cpu/nn/autopad_type.h" + +namespace onnxruntime { + +class MaxUnpool : public OpKernel { + public: + MaxUnpool(const OpKernelInfo& info) : OpKernel(info) { + ORT_ENFORCE(info.GetAttrs("kernel_shape", kernel_shape_).IsOK(), + "No kernel shape is set."); + + num_inputs_ = OpKernel::Node().InputDefs().size(); + + if (num_inputs_ == 3 && !pads_.empty()) { + // ignore pads attribute value + } + + // setup defaults. + if (!info.GetAttrs("pads", pads_).IsOK() || pads_.empty()) { + pads_.resize(kernel_shape_.size() * 2, 0); + } + + if (!info.GetAttrs("strides", strides_).IsOK() || strides_.empty()) { + strides_.resize(kernel_shape_.size(), 1); + } + + for (size_t dim = 0; dim < kernel_shape_.size(); ++dim) { + ORT_ENFORCE(kernel_shape_[dim] > 0); + ORT_ENFORCE(pads_[dim] < kernel_shape_[dim] && pads_[dim + kernel_shape_.size()] < kernel_shape_[dim], + "Pad should be smaller than kernel."); + } + + ORT_ENFORCE(strides_.size() == kernel_shape_.size()); + + // Add 4 pad values (0) for batch and channel dimensions + pads_.insert(pads_.begin(), {0, 0}); + pads_.insert(pads_.begin() + 2 + kernel_shape_.size(), {0, 0}); + + // Separate out any negative pads_ into the slices_ array + slices_.resize(pads_.size(), 0); + for (size_t index = 0; index < pads_.size(); index++) { + if (pads_[index] < 0) { + slices_[index] = pads_[index]; + pads_[index] = 0; + } + } + } + + ~MaxUnpool() override{}; + + Status Compute(OpKernelContext* context) const override; + + private: + std::vector kernel_shape_; + std::vector pads_; + std::vector strides_; + std::vector slices_; // All of the negative padding values are separated out into slices_ + int64_t num_inputs_; +}; + +} // namespace onnxruntime diff --git a/onnxruntime/test/onnx/main.cc b/onnxruntime/test/onnx/main.cc index 966f674ed90d2..93a64012db583 100644 --- a/onnxruntime/test/onnx/main.cc +++ b/onnxruntime/test/onnx/main.cc @@ -309,8 +309,6 @@ int real_main(int argc, char* argv[]) { {"operator_rnn_single_layer", "disable reason"}, {"prelu_broadcast", "disable reason"}, {"prelu_example", "disable reason"}, - {"maxunpool_export_with_output_shape", "opset 9 not supported yet"}, - {"maxunpool_export_without_output_shape", "opset 9 not supported yet"}, {"upsample_nearest", "opset 9 not supported yet"}, {"onehot_with_axis", "opset 9 not supported yet"}, {"onehot_without_axis", "opset 9 not supported yet"}, // also has bug in current test re: output type. Spandan to fix. diff --git a/onnxruntime/test/providers/cpu/nn/unpool_op_test.cc b/onnxruntime/test/providers/cpu/nn/unpool_op_test.cc new file mode 100644 index 0000000000000..fd9b20ba9c3e9 --- /dev/null +++ b/onnxruntime/test/providers/cpu/nn/unpool_op_test.cc @@ -0,0 +1,382 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "gtest/gtest.h" +#include "test/providers/provider_test_utils.h" + +using namespace std; +namespace onnxruntime { +namespace test { + +TEST(UnpoolTest, MaxUnPool1D) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2}); + test.AddAttribute("kernel_shape", vector{2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 4}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 4}; + + std::vector expected_dims = {1, 1, 8}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4, 0}; + + std::vector inputDims = {3}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", inputDims, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool2D) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2}); + test.AddAttribute("kernel_shape", std::vector{2, 2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 2, 2}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 2, 2}; + + std::vector expected_dims = {1, 1, 4, 4}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + std::vector inputDims = {4}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", inputDims, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool3D) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2, 2}); + + std::vector t_vals = {1, 2, 3, 4, 5, 6, 7, 8}; + std::vector t_dims = {1, 1, 2, 2, 2}; + + std::vector i_vals = {1, 3, 24, 30, 32, 38, 60, 62}; + std::vector i_dims = {1, 1, 2, 2, 2}; + + std::vector expected_dims = {1, 1, 4, 4, 4}; + std::vector expectedDims_Size = {5}; + + std::vector expected_vals = + { + //slice 1 + 0, 1, 0, 2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + // slice 2 + 0, 0, 0, 0, + 0, 0, 0, 0, + 3, 0, 0, 0, + 0, 0, 4, 0, + + //slice 3 + 5, 0, 0, 0, + 0, 0, 6, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + // slice 4 + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 7, 0, 8, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", expectedDims_Size, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool1D_Without_OutputShape) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2}); + test.AddAttribute("kernel_shape", vector{2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 4}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 4}; + + std::vector expected_dims = {1, 1, 8}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool2D_Without_OutputShape) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 2, 2}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 2, 2}; + + std::vector expected_dims = {1, 1, 4, 4}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool3D_Without_OutputShape) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2, 2}); + + std::vector t_vals = {1, 2, 3, 4, 5, 6, 7, 8}; + std::vector t_dims = {1, 1, 2, 2, 2}; + + std::vector i_vals = {1, 3, 24, 30, 32, 38, 60, 62}; + std::vector i_dims = {1, 1, 2, 2, 2}; + + std::vector expected_dims = {1, 1, 4, 4, 4}; + + std::vector expected_vals = + { + //slice 1 + 0, 1, 0, 2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + // slice 2 + 0, 0, 0, 0, + 0, 0, 0, 0, + 3, 0, 0, 0, + 0, 0, 4, 0, + + //slice 3 + 5, 0, 0, 0, + 0, 0, 6, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + + // slice 4 + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 7, 0, 8, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool1D_Padding) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2}); + test.AddAttribute("kernel_shape", vector{2}); + test.AddAttribute("pads", vector{1, 0}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 4}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 4}; + + std::vector expected_dims = {1, 1, 7}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("YP", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool2D_Padding) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2}); + test.AddAttribute("pads", vector{1, 1, 0, 0}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 2, 2}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 2, 2}; + + std::vector expected_dims = {1, 1, 3, 3}; + std::vector expected_vals = {0, 1, 0, 2, 3, 0, 4, 0, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool3D_Padding) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2, 2}); + test.AddAttribute("pads", vector{0, 1, 1, 0, 0, 0}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 1, 2, 2}; + + std::vector i_vals = {1, 4, 8, 12}; + std::vector i_dims = {1, 1, 1, 2, 2}; + + std::vector expected_dims = {1, 1, 2, 3, 3}; + + std::vector expected_vals = { + 0, 1, 0, + 0, 2, 0, + 0, 0, 3, + 0, 0, 0, + 4, 0, 0, + 0, 0, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool1D_WithPaddedOutput) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2}); + test.AddAttribute("kernel_shape", vector{2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 4}; + + std::vector i_vals = {1, 3, 4, 6}; + std::vector i_dims = {1, 1, 4}; + + std::vector expected_dims = {1, 1, 10}; + std::vector expected_vals = {0, 0, 1, 0, 2, 3, 0, 4, 0, 0}; + + std::vector inputDims = {3}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", inputDims, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool2D_WithPaddedOutput) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2}); + test.AddAttribute("kernel_shape", std::vector{2, 2}); + + std::vector t_vals = {1, 2, 3, 4}; + std::vector t_dims = {1, 1, 2, 2}; + + std::vector i_vals = {1, 3, 8, 10}; + std::vector i_dims = {1, 1, 2, 2}; + + std::vector expected_dims = {1, 1, 5, 5}; + std::vector expected_vals = { + 0, 1, 0, 2, 0, + 0, 0, 0, 0, 0, + 3, 0, 4, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0}; + + std::vector inputDims = {4}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", inputDims, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +TEST(UnpoolTest, MaxUnPool3D_WithPaddedOutput) { + OpTester test("MaxUnpool", 9); + + test.AddAttribute("strides", std::vector{2, 2, 2}); + test.AddAttribute("kernel_shape", vector{2, 2, 2}); + + std::vector t_vals = {1, 2, 3, 4, 5, 6, 7, 8}; + std::vector t_dims = {1, 1, 2, 2, 2}; + + std::vector i_vals = {1, 3, 24, 30, 32, 38, 60, 62}; + std::vector i_dims = {1, 1, 2, 2, 2}; + + std::vector expected_dims = {1, 1, 4, 4, 5}; + std::vector expectedDims_Size = {5}; + + std::vector expected_vals = + { + //slice 1 + 0, 1, 0, 2, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + // slice 2 + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 3, 0, 0, 0, 0, + 0, 0, 4, 0, 0, + + //slice 3 + 5, 0, 0, 0, 0, + 0, 0, 6, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + // slice 4 + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + 7, 0, 8, 0, 0}; + + test.AddInput("xT", t_dims, t_vals); + test.AddInput("xI", i_dims, i_vals); + test.AddInput("output_shape", expectedDims_Size, expected_dims); + + test.AddOutput("Y", expected_dims, expected_vals); + test.Run(); +} + +} // namespace test +} // namespace onnxruntime diff --git a/onnxruntime/test/python/onnx_backend_test_series.py b/onnxruntime/test/python/onnx_backend_test_series.py index 5410c4dbcacc5..ea6ea135bcd20 100644 --- a/onnxruntime/test/python/onnx_backend_test_series.py +++ b/onnxruntime/test/python/onnx_backend_test_series.py @@ -34,8 +34,6 @@ '|test_eyelike_with_dtype_cpu.*' '|test_eyelike_without_dtype_cpu.*' '|test_gru_seq_length_cpu.*' -'|test_maxunpool_export_with_output_shape_cpu.*' -'|test_maxunpool_export_without_output_shape_cpu.*' '|test_onehot_with_axis_cpu.*' '|test_onehot_without_axis_cpu.*' '|test_scan_sum_cpu.*'