Skip to content

Commit a229184

Browse files
committed
Merge branch 'main' into upstream_irmodule_parser_from_unity_pr_14487
2 parents 08a9187 + e51ba29 commit a229184

File tree

64 files changed

+1994
-185
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+1994
-185
lines changed

docker/Dockerfile.ci_cpu

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@ RUN bash /install/ubuntu_install_python_package.sh
4747
COPY install/ubuntu1804_install_llvm.sh /install/ubuntu1804_install_llvm.sh
4848
RUN bash /install/ubuntu1804_install_llvm.sh
4949

50+
COPY install/ubuntu_install_llvm_from_source.sh /install/ubuntu_install_llvm_from_source.sh
51+
RUN bash /install/ubuntu_install_llvm_from_source.sh 15.0.7 8b5fcb24b4128cf04df1b0b9410ce8b1a729cb3c544e6da885d234280dedeac6
52+
5053
COPY install/ubuntu_install_dnnl.sh /install/ubuntu_install_dnnl.sh
5154
RUN bash /install/ubuntu_install_dnnl.sh
5255

docker/install/ubuntu_install_llvm_from_source.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ cmake \
6060
-DLLVM_ENABLE_ASSERTIONS=ON \
6161
-DLLVM_ENABLE_RTTI=ON \
6262
-DLLVM_ENABLE_OCAMLDOC=OFF \
63+
-DLLVM_ENABLE_PROJECTS=mlir \
6364
-DLLVM_USE_INTEL_JITEVENTS=ON \
6465
-DLLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN=ON \
6566
-DPYTHON_EXECUTABLE="$(cpython_path 3.7)/bin/python" \

include/tvm/relay/attrs/nn.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -671,10 +671,10 @@ struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
671671
"dimensions respectively. Convolution is applied on the"
672672
"'W' dimension.");
673673
TVM_ATTR_FIELD(kernel_layout)
674-
.set_default("OIW")
674+
.set_default("IOW")
675675
.describe(
676-
"Dimension ordering of data and weight. Can be 'OIW', 'OIW16o16i', etc."
677-
"'O', 'I', 'W' stands for num_filter, input_channel, and width"
676+
"Dimension ordering of data and weight. Can be 'IOW', 'IOW16o16i', etc."
677+
"'I', 'O', 'W' stands for input_channel, num_filter and width"
678678
"dimensions respectively.");
679679
TVM_ATTR_FIELD(out_layout)
680680
.set_default("")

include/tvm/tir/schedule/schedule.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,16 @@ class ScheduleNode : public runtime::Object {
292292
*/
293293
virtual Array<BlockRV> GetConsumers(const BlockRV& block_rv) = 0;
294294
/******** Schedule: Transform loops ********/
295+
/*!
296+
* \brief Merge a list of loops into one. The loops under their LCA requires:
297+
* 1) Under the same scope
298+
* 2) Can't have annotations or thread bindings
299+
* 3) Start with 0 and have same extent and same nesting depth
300+
* 4) From target loop to their LCA, the inner loop must be the only child of the outer loop
301+
* \param loop_rvs The loops to be merged
302+
* \return The new loop after merge
303+
*/
304+
virtual LoopRV Merge(const Array<LoopRV>& loop_rvs) = 0;
295305
/*!
296306
* \brief Fuse a list of consecutive loops into one. It requires:
297307
* 1) The loops can't have annotations or thread bindings.
@@ -328,6 +338,12 @@ class ScheduleNode : public runtime::Object {
328338
* \param ordered_loop_rvs The loops in the new order
329339
*/
330340
virtual void Reorder(const Array<LoopRV>& ordered_loop_rvs) = 0;
341+
/*!
342+
* \brief Reorder the itervars inside a block.
343+
* \param block_rv The block to be transformed.
344+
* \param new_order The new itervar order.
345+
*/
346+
virtual void ReorderBlockIterVar(const BlockRV& block_rv, const Array<Integer> new_order) = 0;
331347
/*!
332348
* \brief Create a new unit loop on top of the specific block.
333349
* \param block_rv The block above which the new loop is created

python/tvm/relay/frontend/keras.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,8 @@ def _convert_dense(
282282

283283

284284
def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=None):
285+
is_deconv = type(keras_layer).__name__ == "Conv1DTranspose"
286+
285287
if input_shape is None:
286288
input_shape = keras_layer.input_shape
287289
_check_data_format(keras_layer)
@@ -290,19 +292,21 @@ def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=N
290292

291293
if data_layout == "NWC":
292294
kernel_layout = "WIO"
295+
if is_deconv:
296+
kernel_layout = "WOI"
293297
else:
294298
kernel_layout = "OIW"
299+
if is_deconv:
300+
kernel_layout = "IOW"
295301
msg = (
296302
"Kernel layout with {} is not supported for operator Convolution1D "
297303
"in frontend Keras."
298304
)
299305
raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout))
300306

301-
is_deconv = type(keras_layer).__name__ == "Conv1DTranspose"
302-
303307
if is_deconv:
304-
if kernel_layout == "OIW":
305-
weight = weight.transpose([2, 0, 1])
308+
if kernel_layout == "IOW":
309+
weight = weight.transpose([2, 1, 0])
306310
kernel_w, n_filters, _ = weight.shape
307311
else:
308312
kernel_w, _, n_filters = weight.shape

python/tvm/relay/frontend/mxnet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ def _mx_conv1d_transpose(inputs, attrs):
304304
if data_layout != "NCW":
305305
raise tvm.error.OpAttributeInvalid('Only "NCW" data layout is supported for 1D Convolution')
306306
channel_axis = 1
307-
kernel_layout = "OIW"
307+
kernel_layout = "IOW"
308308
new_attrs = {}
309309
new_attrs["channels"] = attrs.get_int("num_filter")
310310
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")

python/tvm/relay/frontend/oneflow.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,8 @@ def get_node_info(node):
8484
shape = tuple(node.input_conf.blob_conf.shape.dim)
8585
# get data type
8686
dtype = node.input_conf.blob_conf.data_type
87-
if dtype in list(FLOW_2_NP_DTYPE.keys()):
88-
data_type = FLOW_2_NP_DTYPE[dtype]
87+
if dtype in list(FLOW_2_STR_DTYPE.keys()):
88+
data_type = FLOW_2_STR_DTYPE[dtype]
8989
else:
9090
raise IndexError("Please check the data type of your node: %s" % node.name)
9191

python/tvm/relay/frontend/pytorch.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1263,6 +1263,9 @@ def convolution(self, inputs, input_types):
12631263
else:
12641264
data_layout = "NCW"
12651265
kernel_layout = "OIW"
1266+
if use_transpose:
1267+
# Transposed convolutions have IOW layout.
1268+
kernel_layout = "IOW"
12661269

12671270
# Conv1d does not currently support grouped convolution so we convert it to conv2d
12681271
is_grouped_conv1d = False

python/tvm/relay/op/contrib/arm_compute_lib.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,10 @@ def qnn_conv2d(expr):
359359
kernel_typ = args[1].checked_type
360360
if len(kernel_typ.shape) != 4 or kernel_typ.dtype not in qnn_dtypes:
361361
return False
362+
if is_per_channel_quantization(
363+
zero_point=args[2], scale=args[4]
364+
) or is_per_channel_quantization(zero_point=args[3], scale=args[5]):
365+
return False
362366
is_depthwise = is_depthwise_conv2d(
363367
data_typ.shape,
364368
attrs["data_layout"],
@@ -422,6 +426,10 @@ def qnn_dense(expr):
422426
return False
423427
if attrs.out_dtype != "int32":
424428
return False
429+
if is_per_channel_quantization(
430+
zero_point=args[2], scale=args[4]
431+
) or is_per_channel_quantization(zero_point=args[3], scale=args[5]):
432+
return False
425433
return True
426434

427435

@@ -514,10 +522,24 @@ def qnn_add(expr):
514522
for typ in [args[0].checked_type, args[1].checked_type]:
515523
if typ.dtype not in ["int8", "uint8"]:
516524
return False
517-
525+
if (
526+
is_per_channel_quantization(zero_point=args[3], scale=args[2])
527+
or is_per_channel_quantization(zero_point=args[5], scale=args[4])
528+
or is_per_channel_quantization(zero_point=args[7], scale=args[6])
529+
):
530+
return False
518531
return True
519532

520533

534+
def is_per_channel_quantization(zero_point, scale):
535+
"""Check if the quantization is per-channel"""
536+
for value in [zero_point, scale]:
537+
shape = value.checked_type.shape
538+
if len(shape) != 0 and shape[0] != 1:
539+
return True
540+
return False
541+
542+
521543
class OpAttrContext(object):
522544
"""Temporarily changes the attr of an op."""
523545

python/tvm/relay/op/contrib/ethosu.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1671,7 +1671,18 @@ def check_compatible_size(mode, method, upscale_size, ifm_size):
16711671
return False
16721672
if self.method not in ("nearest_neighbor", "linear"):
16731673
return False
1674-
if self.coordinate_transformation_mode not in ("asymmetric", "align_corners"):
1674+
if self.coordinate_transformation_mode not in (
1675+
"asymmetric",
1676+
"align_corners",
1677+
"half_pixel",
1678+
):
1679+
return False
1680+
if (
1681+
self.coordinate_transformation_mode == "half_pixel"
1682+
and self.rounding_method != "round_prefer_ceil"
1683+
or self.coordinate_transformation_mode != "half_pixel"
1684+
and self.rounding_method != ""
1685+
):
16751686
return False
16761687
if not check_compatible_size(
16771688
self.coordinate_transformation_mode,
@@ -1680,8 +1691,6 @@ def check_compatible_size(mode, method, upscale_size, ifm_size):
16801691
self.ifm.shape[1:3],
16811692
):
16821693
return False
1683-
if self.rounding_method != "":
1684-
return False
16851694
if self.out_dtype and self.out_dtype != "int8":
16861695
return False
16871696
return True

0 commit comments

Comments
 (0)