diff --git a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp index 20346b4d1fe4b..1a9b361756086 100644 --- a/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp +++ b/mlir/lib/Dialect/Tosa/IR/TosaOps.cpp @@ -214,6 +214,16 @@ void mlir::tosa::printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, } } +//===----------------------------------------------------------------------===// +// Tosa utilities. +//===----------------------------------------------------------------------===// + +std::optional idivCheck(const int64_t lhs, const int64_t rhs) { + if (lhs % rhs != 0) + return std::nullopt; + return lhs / rhs; +} + //===----------------------------------------------------------------------===// // TOSA Operator Verifiers. //===----------------------------------------------------------------------===// @@ -1621,13 +1631,6 @@ LogicalResult tosa::ResizeOp::verify() { const int64_t borderY = borderValues[0]; const int64_t borderX = borderValues[1]; - auto idivCheck = [](const int64_t lhs, - const int64_t rhs) -> std::optional { - if (lhs % rhs != 0) - return std::nullopt; - return lhs / rhs; - }; - // Don't check with input height that could be broadcast (ih != 1) // since Linalg, a consumer of TOSA, expects broadcasting support // in resize to be available. Taking the cautious approach for now, @@ -1967,6 +1970,97 @@ LogicalResult Conv2DOp::inferReturnTypeComponents( LogicalResult Conv2DOp::verify() { if (verifyConvOp(*this).failed() || verifyConvOpModes(*this).failed()) return failure(); + + llvm::ArrayRef padding = getPad(); + if (llvm::any_of(padding, [](int64_t p) { return p < 0; })) + return emitOpError("expect all padding values to be >= 0, got ") << padding; + + llvm::ArrayRef strides = getStride(); + if (llvm::any_of(strides, [](int64_t s) { return s < 1; })) + return emitOpError("expect all stride values to be >= 1, got ") << strides; + + llvm::ArrayRef dilations = getDilation(); + if (llvm::any_of(dilations, [](int64_t d) { return d < 1; })) + return emitOpError("expect all dilation values to be >= 1, got ") + << dilations; + + const RankedTensorType outputType = + llvm::dyn_cast(getOutput().getType()); + if (!outputType) + // Skip following checks if output is not ranked + return success(); + + const RankedTensorType inputType = + llvm::dyn_cast(getInput().getType()); + const RankedTensorType weightType = + llvm::dyn_cast(getWeight().getType()); + + if (inputType && weightType) { + const auto verifyOutputSize = + [this](const int64_t inputSize, const int64_t kernelSize, + const int64_t outputSize, const int64_t padBefore, + const int64_t padAfter, const int64_t stride, + const int64_t dilation, const llvm::StringRef dimName, + const llvm::StringRef dimAxis, + const llvm::StringRef padBeforeName, + const llvm::StringRef padAfterName) -> LogicalResult { + if (inputSize == ShapedType::kDynamic || + kernelSize == ShapedType::kDynamic) + return success(); + + const std::optional calculatedOutSizeMinusOne = idivCheck( + inputSize - 1 + padBefore + padAfter - (kernelSize - 1) * dilation, + stride); + if (!calculatedOutSizeMinusOne.has_value()) + return emitOpError("expected input_") + << dimName << " - 1 + pad_" << padBeforeName << " + pad_" + << padAfterName << " - (kernel_" << dimName + << " - 1) * dilation_" << dimAxis + << " to be wholly divisible by stride_" << dimAxis << ", got (" + << inputSize << " - 1 + " << padBefore << " + " << padAfter + << " - (" << kernelSize << " - 1) * " << dilation << ") / " + << stride; + + const int64_t calculatedOutSize = calculatedOutSizeMinusOne.value() + 1; + if (outputSize != ShapedType::kDynamic && calculatedOutSize != outputSize) + return emitOpError("calculated output ") + << dimName << " did not match expected: " + << "calculated=" << calculatedOutSize + << ", expected=" << outputSize; + + return success(); + }; + + if (failed(verifyOutputSize( + inputType.getDimSize(1), weightType.getDimSize(1), + outputType.getDimSize(1), padding[0], padding[1], strides[0], + dilations[0], "height", "y", "top", "bottom"))) + return failure(); + + if (failed(verifyOutputSize( + inputType.getDimSize(2), weightType.getDimSize(2), + outputType.getDimSize(2), padding[2], padding[3], strides[1], + dilations[1], "width", "x", "left", "right"))) + return failure(); + } + + const RankedTensorType biasType = + llvm::dyn_cast(getBias().getType()); + if (!biasType) + // Skip following checks if bias is not ranked + return success(); + + const int64_t biasChannels = biasType.getDimSize(0); + const int64_t outputChannels = outputType.getDimSize(3); + if (biasChannels == ShapedType::kDynamic || + outputChannels == ShapedType::kDynamic) + // Skip following checks if biasChannels or outputChannels is dynamic dim + return success(); + + if (biasChannels != outputChannels && biasChannels != 1) + return emitOpError( + "bias channels expected to be equal to output channels (") + << outputChannels << ") or 1, got " << biasChannels; return success(); } diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir index 1f096ce177488..3f10ebbaedcca 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -464,16 +464,16 @@ func.func @conv2d_scalar_bias_f32(%input: tensor<1x49x42x27xf32>, %weights: tens // CHECK-LABEL: @conv2d_i8 func.func @conv2d_i8(%input: tensor<1x49x42x27xi8>, %weights: tensor<28x1x1x27xi8>, %bias: tensor<28xi8>) -> () { // HWCF: %[[TRANSPOSE:.+]] = linalg.transpose ins(%arg1 : tensor<28x1x1x27xi8>) outs(%[[TRANSPOSEDINIT:.+]] : tensor<1x1x27x28xi8>) permutation = [1, 2, 3, 0] - // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x45x40x28xi32> - // CHECK: %[[BROADCAST:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2 : tensor<28xi8>) outs(%[[INIT]] : tensor<1x45x40x28xi32>) { + // CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x49x42x28xi32> + // CHECK: %[[BROADCAST:.+]] = linalg.generic {indexing_maps = [#[[$MAP1]], #[[$MAP2]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2 : tensor<28xi8>) outs(%[[INIT]] : tensor<1x49x42x28xi32>) { // CHECK: arith.extsi // CHECK: linalg.yield - // CHECK: } -> tensor<1x45x40x28xi32> - // CHECK: linalg.conv_2d_nhwc_fhwc_q {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, %c0_i32, %c0_i32_0 : tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, i32, i32) outs(%[[BROADCAST]] : tensor<1x45x40x28xi32>) -> tensor<1x45x40x28xi32> - // HWCF: linalg.conv_2d_nhwc_hwcf_q {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %[[TRANSPOSE]], %c0_i32, %c0_i32_0 : tensor<1x49x42x27xi8>, tensor<1x1x27x28xi8>, i32, i32) outs(%{{[a-zA-Z0-9_]*}} : tensor<1x45x40x28xi32>) -> tensor<1x45x40x28xi32> + // CHECK: } -> tensor<1x49x42x28xi32> + // CHECK: linalg.conv_2d_nhwc_fhwc_q {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, %c0_i32, %c0_i32_0 : tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, i32, i32) outs(%[[BROADCAST]] : tensor<1x49x42x28xi32>) -> tensor<1x49x42x28xi32> + // HWCF: linalg.conv_2d_nhwc_hwcf_q {dilations = dense<[2, 1]> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %[[TRANSPOSE]], %c0_i32, %c0_i32_0 : tensor<1x49x42x27xi8>, tensor<1x1x27x28xi8>, i32, i32) outs(%{{[a-zA-Z0-9_]*}} : tensor<1x49x42x28xi32>) -> tensor<1x49x42x28xi32> %zp = "tosa.const"() {value = dense<0> : tensor<1xi8>} : () -> tensor<1xi8> - %0 = tosa.conv2d %input, %weights, %bias, %zp, %zp {acc_type = i32, dilation = array, pad = array, stride = array} : (tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, tensor<28xi8>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x45x40x28xi32> + %0 = tosa.conv2d %input, %weights, %bias, %zp, %zp {acc_type = i32, dilation = array, pad = array, stride = array} : (tensor<1x49x42x27xi8>, tensor<28x1x1x27xi8>, tensor<28xi8>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x49x42x28xi32> return } diff --git a/mlir/test/Dialect/Tosa/canonicalize.mlir b/mlir/test/Dialect/Tosa/canonicalize.mlir index ef1185e11b459..03d5bb5dae941 100644 --- a/mlir/test/Dialect/Tosa/canonicalize.mlir +++ b/mlir/test/Dialect/Tosa/canonicalize.mlir @@ -201,23 +201,23 @@ func.func @concat_fold_cast(%arg0: tensor) -> tensor { // ----- // CHECK-LABEL: @conv2d_stride_2 -func.func @conv2d_stride_2(%arg0: tensor<4x10x10x2xf32>) -> tensor<4x10x10x3xf32> { +func.func @conv2d_stride_2(%arg0: tensor<4x11x11x2xf32>) -> tensor<4x6x6x3xf32> { // CHECK: tosa.conv2d %weight = "tosa.const"() {value = dense<[[[[1.0, 1.0]]], [[[1.0, 1.0]]], [[[1.0, 1.0]]]]> : tensor<3x1x1x2xf32>} : ()-> tensor<3x1x1x2xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<3xf32>} : ()-> tensor<3xf32> - %0 = tosa.conv2d %arg0, %weight, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x10x10x3xf32> - return %0 : tensor<4x10x10x3xf32> + %0 = tosa.conv2d %arg0, %weight, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x11x11x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<4x6x6x3xf32> + return %0 : tensor<4x6x6x3xf32> } // ----- // CHECK-LABEL: @conv2d_weight_2x2 -func.func @conv2d_weight_2x2(%arg0: tensor<4x10x10x1xf32>) -> tensor<4x10x10x1xf32> { +func.func @conv2d_weight_2x2(%arg0: tensor<4x10x10x1xf32>) -> tensor<4x9x9x1xf32> { // CHECK: tosa.conv2d %weight = "tosa.const"() {value = dense<[[[[1.0], [1.0]], [[1.0], [1.0]]]]> : tensor<1x2x2x1xf32>} : ()-> tensor<1x2x2x1xf32> %bias = "tosa.const"() {value = dense<0.0> : tensor<1xf32>} : ()-> tensor<1xf32> - %0 = tosa.conv2d %arg0, %weight, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x10x10x1xf32> - return %0 : tensor<4x10x10x1xf32> + %0 = tosa.conv2d %arg0, %weight, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<4x10x10x1xf32>, tensor<1x2x2x1xf32>, tensor<1xf32>) -> tensor<4x9x9x1xf32> + return %0 : tensor<4x9x9x1xf32> } // ----- diff --git a/mlir/test/Dialect/Tosa/invalid.mlir b/mlir/test/Dialect/Tosa/invalid.mlir index 9123f84ab25b8..8e66551d71692 100644 --- a/mlir/test/Dialect/Tosa/invalid.mlir +++ b/mlir/test/Dialect/Tosa/invalid.mlir @@ -1171,3 +1171,75 @@ func.func @broadcast_resize_bilinear_i8(%arg0 : tensor<3x1x1x7xi8>) -> tensor<3x return %resize : tensor<3x4x5x7xi32> } + +// ----- + +func.func @test_conv2d_invalid_padding(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op expect all padding values to be >= 0, got 0, 0, -1, 0}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} + +// ----- + +func.func @test_conv2d_invalid_stride(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op expect all stride values to be >= 1, got 0, 1}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} + +// ----- + +func.func @test_conv2d_invalid_dilation(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op expect all dilation values to be >= 1, got 1, 0}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} + +// ----- + +func.func @test_conv2d_wholly_divisible_height(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op expected input_height - 1 + pad_top + pad_bottom - (kernel_height - 1) * dilation_y to be wholly divisible by stride_y, got (4 - 1 + 0 + 0 - (1 - 1) * 1) / 2}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} + +// ----- + +func.func @test_conv2d_wholly_divisible_width(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op expected input_width - 1 + pad_left + pad_right - (kernel_width - 1) * dilation_x to be wholly divisible by stride_x, got (4 - 1 + 0 + 0 - (1 - 1) * 1) / 2}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} + +// ----- + +func.func @test_conv2d_unexpected_output_height(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x6x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op calculated output height did not match expected: calculated=4, expected=6}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x6x4x8xf32> + return %0 : tensor<1x6x4x8xf32> +} + +// ----- + +func.func @test_conv2d_unexpected_output_width(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<8xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x6x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op calculated output width did not match expected: calculated=4, expected=6}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<8xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x6x8xf32> + return %0 : tensor<1x4x6x8xf32> +} + +// ----- + +func.func @test_conv2d_invalid_bias_size(%arg0: tensor<1x4x4x4xf32>, %arg1: tensor<8x1x1x4xf32>, %arg2: tensor<7xf32>, %arg3: tensor<1xf32>, %arg4: tensor<1xf32>) -> tensor<1x4x4x8xf32> { + // expected-error@+1 {{'tosa.conv2d' op bias channels expected to be equal to output channels (8) or 1, got 7}} + %0 = tosa.conv2d %arg0, %arg1, %arg2, %arg3, %arg4 {acc_type = f32, dilation = array, pad = array, stride = array, local_bound = true} + : (tensor<1x4x4x4xf32>, tensor<8x1x1x4xf32>, tensor<7xf32>, tensor<1xf32>, tensor<1xf32>) -> tensor<1x4x4x8xf32> + return %0 : tensor<1x4x4x8xf32> +} diff --git a/mlir/test/Dialect/Tosa/level_check.mlir b/mlir/test/Dialect/Tosa/level_check.mlir index 2a6561fea67b5..2a119a87abc52 100644 --- a/mlir/test/Dialect/Tosa/level_check.mlir +++ b/mlir/test/Dialect/Tosa/level_check.mlir @@ -225,74 +225,74 @@ func.func @test_avgpool2d_pad_right(%arg0: tensor<1x32x32x8xf32>) -> tensor<1x32 // ----- -func.func @test_conv2d_dilation_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_dilation_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<*xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: dilation_y * KH <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<*xf32> + return %0 : tensor<*xf32> } // ----- -func.func @test_conv2d_dilation_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_dilation_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<*xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: dilation_x * KW <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<*xf32> + return %0 : tensor<*xf32> } // ----- -func.func @test_conv2d_pad_top(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_pad_top(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x8225x32x16xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: pad <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x8225x32x16xf32> + return %0 : tensor<1x8225x32x16xf32> } // ----- -func.func @test_conv2d_pad_bottom(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_pad_bottom(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x8224x32x16xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: pad <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x8224x32x16xf32> + return %0 : tensor<1x8224x32x16xf32> } // ----- -func.func @test_conv2d_pad_left(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_pad_left(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x8225x16xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: pad <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x8225x16xf32> + return %0 : tensor<1x32x8225x16xf32> } // ----- -func.func @test_conv2d_pad_right(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_pad_right(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x8224x16xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: pad <= MAX_KERNEL}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x8224x16xf32> + return %0 : tensor<1x32x8224x16xf32> } // ----- -func.func @test_conv2d_stride_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_stride_y(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<*xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: stride <= MAX_STRIDE}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<*xf32> + return %0 : tensor<*xf32> } // ----- -func.func @test_conv2d_stride_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> { +func.func @test_conv2d_stride_x(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x2x2x8xf32>, %arg2: tensor<16xf32>) -> tensor<*xf32> { // expected-error@+1 {{'tosa.conv2d' op failed level check: stride <= MAX_STRIDE}} %0 = "tosa.conv2d"(%arg0, %arg1, %arg2) {acc_type = f32, dilation = array, pad = array, stride = array} : - (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32> - return %0 : tensor<1x32x32x16xf32> + (tensor<1x32x32x8xf32>, tensor<16x2x2x8xf32>, tensor<16xf32>) -> tensor<*xf32> + return %0 : tensor<*xf32> } // ----- diff --git a/mlir/test/Dialect/Tosa/quant-test.mlir b/mlir/test/Dialect/Tosa/quant-test.mlir index ee6caf285a248..0ed55ce7a1a6b 100644 --- a/mlir/test/Dialect/Tosa/quant-test.mlir +++ b/mlir/test/Dialect/Tosa/quant-test.mlir @@ -10,11 +10,11 @@ func.func @test_build_qtype(%arg0 : tensor<16x1x1x8x!quant.uniform:f32 // ----- // CHECK-LABEL: test_build_mult_and_shift -func.func @test_build_mult_and_shift(%arg0: tensor<1x32x32x8x!quant.uniform>, %arg1 : tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, %arg2 : tensor<16xi32>) -> tensor<1x32x32x16x!quant.uniform> { +func.func @test_build_mult_and_shift(%arg0: tensor<1x32x32x8x!quant.uniform>, %arg1 : tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, %arg2 : tensor<16xi32>) -> tensor<1x34x36x16x!quant.uniform> { // CHECK: tosa.conv2d %input_zp = "tosa.const"() {value = dense<-1> : tensor<1xi8>} : () -> tensor<1xi8> %weight_zp = "tosa.const"() {value = dense<1> : tensor<1xi8>} : () -> tensor<1xi8> - %0 = "tosa.conv2d"(%arg0, %arg1, %arg2, %input_zp, %weight_zp) {acc_type = i32, pad = array, dilation = array, stride = array} : (tensor<1x32x32x8x!quant.uniform>, tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, tensor<16xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x32x32x16x!quant.uniform> - return %0 : tensor<1x32x32x16x!quant.uniform> + %0 = "tosa.conv2d"(%arg0, %arg1, %arg2, %input_zp, %weight_zp) {acc_type = i32, pad = array, dilation = array, stride = array} : (tensor<1x32x32x8x!quant.uniform>, tensor<16x1x1x8x!quant.uniform:f32, 0.015680249780416489>>, tensor<16xi32>, tensor<1xi8>, tensor<1xi8>) -> tensor<1x34x36x16x!quant.uniform> + return %0 : tensor<1x34x36x16x!quant.uniform> } diff --git a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir index 1821b78091aad..cb8bd461e5901 100644 --- a/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir +++ b/mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir @@ -770,9 +770,9 @@ func.func @conv2d_dilated(%input: tensor<2x12x14x3xf32>, %weights: tensor<5x3x6x // ----- // CHECK-LABEL: @conv2d_strided -func.func @conv2d_strided(%input: tensor<1x13x14x1xf32>, %weights: tensor<1x1x1x1xf32>, %bias: tensor<1xf32>) -> () { - // CHECK: -> tensor<1x5x7x1xf32> - %0 = tosa.conv2d %input, %weights, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<1x13x14x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor +func.func @conv2d_strided(%input: tensor<1x13x15x1xf32>, %weights: tensor<1x1x1x1xf32>, %bias: tensor<1xf32>) -> () { + // CHECK: -> tensor<1x5x8x1xf32> + %0 = tosa.conv2d %input, %weights, %bias {acc_type = f32, pad = array, stride = array, dilation = array} : (tensor<1x13x15x1xf32>, tensor<1x1x1x1xf32>, tensor<1xf32>) -> tensor return }