|
| 1 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +# or more contributor license agreements. See the NOTICE file |
| 3 | +# distributed with this work for additional information |
| 4 | +# regarding copyright ownership. The ASF licenses this file |
| 5 | +# to you under the Apache License, Version 2.0 (the |
| 6 | +# "License"); you may not use this file except in compliance |
| 7 | +# with the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, |
| 12 | +# software distributed under the License is distributed on an |
| 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +# KIND, either express or implied. See the License for the |
| 15 | +# specific language governing permissions and limitations |
| 16 | +# under the License. |
| 17 | +import tvm |
| 18 | +from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN |
| 19 | + |
| 20 | +from tvm.tir import Schedule |
| 21 | +from tvm.script import tir as T |
| 22 | +from tvm.tir.schedule.transform import tile_with_tensor_intrin |
| 23 | + |
| 24 | + |
| 25 | +@tvm.script.ir_module |
| 26 | +class DenseVNNIModule: |
| 27 | + @T.prim_func |
| 28 | + def main( |
| 29 | + placeholder: T.Buffer[(1024, 1024), "uint8"], |
| 30 | + placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"], |
| 31 | + compute: T.Buffer[(1024, 1024), "int32"], |
| 32 | + ) -> None: |
| 33 | + T.func_attr({"global_symbol": "main", "tir.noalias": True}) |
| 34 | + with T.block("root"): |
| 35 | + T.reads() |
| 36 | + T.writes() |
| 37 | + for i0, i1, i2 in T.grid(1024, 1024, 1024): |
| 38 | + with T.block("compute"): |
| 39 | + i, j, k = T.axis.remap("SSR", [i0, i1, i2]) |
| 40 | + T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4]) |
| 41 | + T.writes(compute[i, j]) |
| 42 | + with T.init(): |
| 43 | + compute[i, j] = 0 |
| 44 | + compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast( |
| 45 | + placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32" |
| 46 | + ) |
| 47 | + |
| 48 | + |
| 49 | +@tvm.script.ir_module |
| 50 | +class DenseVNNIModuleTiled: |
| 51 | + @T.prim_func |
| 52 | + def main( |
| 53 | + placeholder: T.Buffer[(1024, 1024), "uint8"], |
| 54 | + placeholder_1: T.Buffer[(64, 256, 16, 4), "int8"], |
| 55 | + compute: T.Buffer[(1024, 1024), "int32"], |
| 56 | + ) -> None: |
| 57 | + # function attr dict |
| 58 | + T.func_attr({"global_symbol": "main", "tir.noalias": True}) |
| 59 | + # body |
| 60 | + # with T.block("root") |
| 61 | + for i0, i1_0, i2_0, i1_1, i2_1 in T.grid(1024, 64, 256, 16, 4): |
| 62 | + with T.block("compute"): |
| 63 | + i = T.axis.spatial(1024, i0) |
| 64 | + j = T.axis.spatial(1024, i1_0 * 16 + i1_1) |
| 65 | + k = T.axis.reduce(1024, i2_0 * 4 + i2_1) |
| 66 | + T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4]) |
| 67 | + T.writes(compute[i, j]) |
| 68 | + with T.init(): |
| 69 | + compute[i, j] = 0 |
| 70 | + compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast( |
| 71 | + placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32" |
| 72 | + ) |
| 73 | + |
| 74 | + |
| 75 | +@tvm.script.ir_module |
| 76 | +class Conv2dNCHWcVNNIModule: |
| 77 | + @T.prim_func |
| 78 | + def main( |
| 79 | + placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"], |
| 80 | + placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"], |
| 81 | + conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"], |
| 82 | + ) -> None: |
| 83 | + T.func_attr({"global_symbol": "main", "tir.noalias": True}) |
| 84 | + for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4): |
| 85 | + with T.block("conv2d_NCHWc_int8"): |
| 86 | + ( |
| 87 | + n, |
| 88 | + oc_chunk, |
| 89 | + oh, |
| 90 | + ow, |
| 91 | + oc_block, |
| 92 | + kh, |
| 93 | + kw, |
| 94 | + ic_outer, |
| 95 | + ic_f_inner, |
| 96 | + ic_s_inner, |
| 97 | + ) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9]) |
| 98 | + T.reads( |
| 99 | + placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], |
| 100 | + placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], |
| 101 | + ) |
| 102 | + T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block]) |
| 103 | + with T.init(): |
| 104 | + conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0 |
| 105 | + conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[ |
| 106 | + n, oc_chunk, oh, ow, oc_block |
| 107 | + ] + T.cast( |
| 108 | + placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32" |
| 109 | + ) * T.cast( |
| 110 | + placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], |
| 111 | + "int32", |
| 112 | + ) |
| 113 | + |
| 114 | + |
| 115 | +@tvm.script.ir_module |
| 116 | +class Conv2dNCHWcVNNIModuleTiled: |
| 117 | + @T.prim_func |
| 118 | + def main( |
| 119 | + placeholder: T.Buffer[(1, 4, 56, 56, 16), "uint8"], |
| 120 | + placeholder_1: T.Buffer[(16, 4, 1, 1, 4, 16, 4), "int8"], |
| 121 | + conv2d_NCHWc_int8: T.Buffer[(1, 16, 56, 56, 16), "int32"], |
| 122 | + ) -> None: |
| 123 | + # function attr dict |
| 124 | + T.func_attr({"global_symbol": "main", "tir.noalias": True}) |
| 125 | + # body |
| 126 | + # with T.block("root") |
| 127 | + for i0, i1, i2, i3, i4_0, i5, i6, i7, i8, i9_0, i4_1, i9_1 in T.grid( |
| 128 | + 1, 16, 56, 56, 1, 1, 1, 4, 4, 1, 16, 4 |
| 129 | + ): |
| 130 | + with T.block("conv2d_NCHWc_int8"): |
| 131 | + n = T.axis.spatial(1, 0) |
| 132 | + oc_chunk, oh, ow, oc_block = T.axis.remap("SSSS", [i1, i2, i3, i4_1]) |
| 133 | + kh = T.axis.reduce(1, 0) |
| 134 | + kw = T.axis.reduce(1, 0) |
| 135 | + ic_outer, ic_f_inner, ic_s_inner = T.axis.remap("RRR", [i7, i8, i9_1]) |
| 136 | + T.reads( |
| 137 | + placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], |
| 138 | + placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], |
| 139 | + ) |
| 140 | + T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block]) |
| 141 | + with T.init(): |
| 142 | + conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0 |
| 143 | + conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[ |
| 144 | + n, oc_chunk, oh, ow, oc_block |
| 145 | + ] + T.cast( |
| 146 | + placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32" |
| 147 | + ) * T.cast( |
| 148 | + placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], |
| 149 | + "int32", |
| 150 | + ) |
| 151 | + |
| 152 | + |
| 153 | +def test_tile_with_tensor_intrin_dense_vnni(): |
| 154 | + s = Schedule(DenseVNNIModule) |
| 155 | + block = s.get_block("compute") |
| 156 | + |
| 157 | + tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN) |
| 158 | + |
| 159 | + _, _, _, i1_1, _ = s.get_loops(block) |
| 160 | + |
| 161 | + assert s.get(tiled_loop) == s.get(i1_1) |
| 162 | + tvm.ir.assert_structural_equal(s.mod, DenseVNNIModuleTiled) |
| 163 | + |
| 164 | + |
| 165 | +def test_tile_with_tensor_intrin_conv2d_nchwc_vnni(): |
| 166 | + s = Schedule(Conv2dNCHWcVNNIModule) |
| 167 | + block = s.get_block("conv2d_NCHWc_int8") |
| 168 | + |
| 169 | + tiled_loop = tile_with_tensor_intrin(s, block, VNNI_DOT_16x4_INTRIN) |
| 170 | + |
| 171 | + tiled_loops = s.get_loops(block) |
| 172 | + |
| 173 | + assert len(tiled_loops) == 12 |
| 174 | + assert s.get(tiled_loop) == s.get(tiled_loops[-2]) |
| 175 | + |
| 176 | + tvm.ir.assert_structural_equal(s.mod, Conv2dNCHWcVNNIModuleTiled) |
| 177 | + |
| 178 | + |
| 179 | +if __name__ == "__main__": |
| 180 | + test_tile_with_tensor_intrin_dense_vnni() |
| 181 | + test_tile_with_tensor_intrin_conv2d_nchwc_vnni() |
0 commit comments