|
| 1 | +# Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +# or more contributor license agreements. See the NOTICE file |
| 3 | +# distributed with this work for additional information |
| 4 | +# regarding copyright ownership. The ASF licenses this file |
| 5 | +# to you under the Apache License, Version 2.0 (the |
| 6 | +# "License"); you may not use this file except in compliance |
| 7 | +# with the License. You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, |
| 12 | +# software distributed under the License is distributed on an |
| 13 | +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +# KIND, either express or implied. See the License for the |
| 15 | +# specific language governing permissions and limitations |
| 16 | +# under the License. |
| 17 | +# pylint: disable=invalid-name |
| 18 | +"""Legalization functions for DLTensor inspection.""" |
| 19 | + |
| 20 | +import enum |
| 21 | + |
| 22 | +from tvm.script import tir as T |
| 23 | + |
| 24 | +from ...block_builder import BlockBuilder |
| 25 | +from ...expr import Call, Expr |
| 26 | +from .common import register_legalize |
| 27 | + |
| 28 | + |
| 29 | +class TVMStructFieldKind(enum.IntEnum): |
| 30 | + """Equivalent to tvm::tir::builtin::TVMStructFieldKind |
| 31 | + |
| 32 | + This does not use `enum.auto()` to define the values, because |
| 33 | + `enum.auto()` starts from 1, and this must match the C++ |
| 34 | + definition which starts from 0. |
| 35 | + """ |
| 36 | + |
| 37 | + kArrAddr = 0 |
| 38 | + kArrData = 1 |
| 39 | + kArrShape = 2 |
| 40 | + kArrStrides = 3 |
| 41 | + kArrNDim = 4 |
| 42 | + kArrTypeCode = 5 |
| 43 | + kArrTypeBits = 6 |
| 44 | + kArrTypeLanes = 7 |
| 45 | + kArrByteOffset = 8 |
| 46 | + kArrDeviceId = 9 |
| 47 | + kArrDeviceType = 10 |
| 48 | + kArrKindBound_ = 11 |
| 49 | + kTVMValueContent = 12 |
| 50 | + kTVMValueKindBound_ = 13 |
| 51 | + |
| 52 | + |
| 53 | +@register_legalize("relax.inspect.tensor_stride_i") |
| 54 | +def _tensor_stride_i(bb: BlockBuilder, call: Call) -> Expr: |
| 55 | + @T.prim_func(private=True) |
| 56 | + def _get_tensor_stride_i(dlpack_handle: T.handle, axis: T.int64) -> T.int64: |
| 57 | + T.func_attr({"tir.is_host": T.bool(True), "tir.is_scheduled": T.bool(True)}) |
| 58 | + assert T.int64(0) <= axis, "Specified axis may not be negative" |
| 59 | + ndim: T.int32 = T.tvm_struct_get( |
| 60 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrNDim), "int32" |
| 61 | + ) |
| 62 | + assert axis < T.Cast( |
| 63 | + "int64", ndim |
| 64 | + ), "Specified axis may not be larger than the tensor's dimensionality" |
| 65 | + stride_ptr: T.handle("int64") = T.tvm_struct_get( |
| 66 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrStrides), "handle" |
| 67 | + ) |
| 68 | + |
| 69 | + if T.isnullptr(stride_ptr): |
| 70 | + shape_ptr: T.handle("int64") = T.tvm_struct_get( |
| 71 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrShape), "handle" |
| 72 | + ) |
| 73 | + shape = T.decl_buffer(ndim, "int64", data=shape_ptr) |
| 74 | + |
| 75 | + product = T.decl_buffer([], "int64") |
| 76 | + product[()] = 1 |
| 77 | + |
| 78 | + # TODO(Lunderberg): Add a TIR lowering pass to allow |
| 79 | + # ranges to start somewhere other than zero. This loop |
| 80 | + # could then iterate on `range(axis+1, ndim)`. |
| 81 | + for dim_offset in range(ndim - (axis + 1)): |
| 82 | + dim = dim_offset + (axis + 1) |
| 83 | + product[()] = product[()] * shape[dim] |
| 84 | + |
| 85 | + return product[()] |
| 86 | + else: |
| 87 | + strides = T.decl_buffer(ndim, "int64", data=stride_ptr) |
| 88 | + stride: T.int64 = strides[axis] |
| 89 | + return stride |
| 90 | + |
| 91 | + gvar = bb.add_func(_get_tensor_stride_i, "_get_tensor_stride_i") |
| 92 | + return Call(gvar, call.args) |
| 93 | + |
| 94 | + |
| 95 | +@register_legalize("relax.inspect.tensor_byte_offset") |
| 96 | +def _tensor_byte_offset(bb: BlockBuilder, call: Call) -> Expr: |
| 97 | + @T.prim_func(private=True) |
| 98 | + def _get_tensor_byte_offset(dlpack_handle: T.handle) -> T.int64: |
| 99 | + T.func_attr({"tir.is_host": T.bool(True), "tir.is_scheduled": T.bool(True)}) |
| 100 | + byte_offset: T.uint64 = T.tvm_struct_get( |
| 101 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrByteOffset), "uint64" |
| 102 | + ) |
| 103 | + return byte_offset |
| 104 | + |
| 105 | + gvar = bb.add_func(_get_tensor_byte_offset, "_get_tensor_byte_offset") |
| 106 | + return Call(gvar, call.args) |
| 107 | + |
| 108 | + |
| 109 | +@register_legalize("relax.inspect.tensor_elem_offset") |
| 110 | +def _tensor_elem_offset(bb: BlockBuilder, call: Call) -> Expr: |
| 111 | + @T.prim_func(private=True) |
| 112 | + def _get_tensor_elem_offset(dlpack_handle: T.handle) -> T.int64: |
| 113 | + T.func_attr({"tir.is_host": T.bool(True), "tir.is_scheduled": T.bool(True)}) |
| 114 | + byte_offset: T.uint64 = T.tvm_struct_get( |
| 115 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrByteOffset), "uint64" |
| 116 | + ) |
| 117 | + scalar_bits: T.uint8 = T.tvm_struct_get( |
| 118 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrTypeBits), "uint8" |
| 119 | + ) |
| 120 | + lanes: T.uint16 = T.tvm_struct_get( |
| 121 | + dlpack_handle, 0, int(TVMStructFieldKind.kArrTypeLanes), "uint16" |
| 122 | + ) |
| 123 | + bytes_per_element = T.ceildiv(scalar_bits.astype("uint64") * lanes.astype("uint64"), 8) |
| 124 | + elem_offset = byte_offset // bytes_per_element |
| 125 | + return elem_offset |
| 126 | + |
| 127 | + gvar = bb.add_func(_get_tensor_elem_offset, "_get_tensor_elem_offset") |
| 128 | + return Call(gvar, call.args) |
0 commit comments