Skip to content

Commit 5d05eed

Browse files
author
Ashutosh Parkhi
committed
Added few tests for scalar_to_tensor_constant pass
Change-Id: Ib916e4b29be124ccdf4c88936f10499b6642f141
1 parent ecef7bd commit 5d05eed

File tree

5 files changed

+189
-16
lines changed

5 files changed

+189
-16
lines changed

python/tvm/relay/op/contrib/cmsisnn.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,8 @@ def partition_for_cmsisnn(mod, params=None, **opts):
5656
transform.MergeComposite(pattern_table()),
5757
transform.AnnotateTarget("cmsis-nn"),
5858
transform.PartitionGraph(),
59-
transform.InferType(),
6059
GenerateCMSISNNConstants(),
6160
ScalarToTensorConstants(),
62-
transform.InferType(),
6361
ExtractConstantsFromPartitionedFunction(),
6462
transform.InferType(),
6563
]

src/relay/backend/contrib/cmsisnn/extract_constants.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,7 @@ transform::Pass ExtractConstantsFromPartitionedFunction() {
229229
runtime::TypedPackedFunc<IRModule(IRModule, transform::PassContext)> pass_func =
230230
[=](IRModule m, transform::PassContext pc) { return ExtractConstants(m); };
231231
return tvm::transform::CreateModulePass(pass_func, 0, "ExtractConstantsFromPartitionedFunction",
232-
{});
232+
{"InferType"});
233233
}
234234

235235
TVM_REGISTER_GLOBAL("relay.ext.cmsisnn.transform.ExtractConstantsFromPartitionedFunction")

src/relay/backend/contrib/cmsisnn/scalar_to_tensor_constant.cc

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,6 @@ class ScalarToTensorConstantMutator : public MixedModeMutator {
7171
// in the binary op. This needs to be done only when one of the arguments is a scalar.
7272
if (auto* opnode = call->op.as<OpNode>()) {
7373
String op_name = opnode->name;
74-
if (op_name != "qnn.mul" && op_name != "qnn.add") {
75-
return final_call;
76-
}
7774
Array<Expr> new_args;
7875
for (uint32_t i = 0; i < call->args.size(); ++i) {
7976
Expr arg = call->args[i];
@@ -179,7 +176,7 @@ IRModule ScalarToTensorConstant(const IRModule& mod) {
179176
transform::Pass ScalarToTensorConstantPass() {
180177
runtime::TypedPackedFunc<IRModule(IRModule, transform::PassContext)> pass_func =
181178
[=](IRModule m, transform::PassContext pc) { return ScalarToTensorConstant(m); };
182-
return tvm::transform::CreateModulePass(pass_func, 0, "ScalarToTensorConstant", {});
179+
return tvm::transform::CreateModulePass(pass_func, 0, "ScalarToTensorConstant", {"InferType"});
183180
}
184181

185182
TVM_REGISTER_GLOBAL("relay.ext.cmsisnn.transform.ScalarToTensorConstants")

tests/python/contrib/test_cmsisnn/test_extract_constants.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,6 @@
2323
import tvm
2424
from tvm import relay
2525

26-
from utils import (
27-
make_module,
28-
count_num_calls,
29-
get_range_for_dtype_str,
30-
get_same_padding,
31-
get_conv2d_qnn_params,
32-
make_qnn_relu,
33-
)
34-
3526
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
3627

3728

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one
2+
# or more contributor license agreements. See the NOTICE file
3+
# distributed with this work for additional information
4+
# regarding copyright ownership. The ASF licenses this file
5+
# to you under the Apache License, Version 2.0 (the
6+
# "License"); you may not use this file except in compliance
7+
# with the License. You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing,
12+
# software distributed under the License is distributed on an
13+
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14+
# KIND, either express or implied. See the License for the
15+
# specific language governing permissions and limitations
16+
# under the License.
17+
18+
"""CMSIS-NN integration tests: scalar_to_tensor_constant pass"""
19+
import itertools
20+
import math
21+
import numpy as np
22+
import pytest
23+
import tvm
24+
from tvm import relay
25+
26+
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
27+
28+
29+
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
30+
def __init__(self):
31+
super().__init__()
32+
self.num_constants_ = 0
33+
34+
def visit_call(self, call):
35+
super().visit_call(call)
36+
for arg in call.args:
37+
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
38+
self.num_constants_ += 1
39+
40+
def check_num_constants(self, func):
41+
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
42+
43+
44+
def set_external_func_attr(func, compiler, ext_symbol):
45+
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
46+
func = func.with_attr("Compiler", compiler)
47+
func = func.with_attr("global_symbol", ext_symbol)
48+
return func
49+
50+
51+
def set_composite_func_attr(func, name):
52+
func = func.with_attr("Composite", name)
53+
return func
54+
55+
56+
@tvm.testing.requires_cmsisnn
57+
def test_single_scalar_position_0():
58+
x0 = relay.var("x0", shape=None)
59+
x1 = relay.var("x1", shape=(8, 8))
60+
z1 = x0 + x1
61+
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
62+
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
63+
64+
y0 = relay.expr.const(3, "float32")
65+
y1 = relay.var("y1", shape=(8, 8))
66+
c0 = relay.Call(lf, [y0, y1])
67+
ef = relay.Function([y1], c0, relay.TensorType((8, 8), "float32"))
68+
69+
x = relay.var("x", shape=(8, 8))
70+
ev = relay.GlobalVar("external_function")
71+
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
72+
c = relay.Call(ev, [x])
73+
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
74+
mv = relay.GlobalVar("main")
75+
76+
mod = tvm.IRModule()
77+
mod[ev] = ef
78+
mod[mv] = mf
79+
80+
mod = relay.transform.InferType()(mod)
81+
mod = ScalarToTensorConstants()(mod)
82+
check_for_constants = CheckFunctionsForConstants()
83+
check_for_constants.visit_call(mod[ev].body)
84+
assert (
85+
check_for_constants.num_constants_ == 1
86+
), "Scalar constant wasn't converted into tensor constant"
87+
88+
89+
@tvm.testing.requires_cmsisnn
90+
def test_single_scalar_position_1():
91+
x0 = relay.var("x0", shape=(8, 8))
92+
x1 = relay.var("x1", shape=None)
93+
z1 = x0 + x1
94+
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
95+
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
96+
97+
y0 = relay.var("y0", shape=(8, 8))
98+
y1 = relay.expr.const(3, "float32")
99+
c0 = relay.Call(lf, [y0, y1])
100+
ef = relay.Function([y0], c0, relay.TensorType((8, 8), "float32"))
101+
102+
x = relay.var("x", shape=(8, 8))
103+
ev = relay.GlobalVar("external_function")
104+
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
105+
c = relay.Call(ev, [x])
106+
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
107+
mv = relay.GlobalVar("main")
108+
109+
mod = tvm.IRModule()
110+
mod[ev] = ef
111+
mod[mv] = mf
112+
113+
mod = relay.transform.InferType()(mod)
114+
mod = ScalarToTensorConstants()(mod)
115+
check_for_constants = CheckFunctionsForConstants()
116+
check_for_constants.visit_call(mod[ev].body)
117+
assert (
118+
check_for_constants.num_constants_ == 1
119+
), "Scalar constant wasn't converted into tensor constant"
120+
121+
122+
@tvm.testing.requires_cmsisnn
123+
def test_two_scalars():
124+
x1 = relay.var("x1", shape=None)
125+
x2 = relay.var("x2", shape=None)
126+
z1 = x1 + x2
127+
lf = relay.Function([x1, x2], z1, relay.TensorType((), "float32"))
128+
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
129+
130+
y0 = relay.expr.const(5, "float32")
131+
y1 = relay.expr.const(3, "float32")
132+
c0 = relay.Call(lf, [y0, y1])
133+
ef = relay.Function([], c0, relay.TensorType((), "float32"))
134+
135+
ev = relay.GlobalVar("external_function")
136+
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
137+
c = relay.Call(ev, [])
138+
mf = relay.Function([], c, relay.TensorType((), "float32"))
139+
mv = relay.GlobalVar("main")
140+
141+
mod = tvm.IRModule()
142+
mod[ev] = ef
143+
mod[mv] = mf
144+
145+
mod = relay.transform.InferType()(mod)
146+
mod = ScalarToTensorConstants()(mod)
147+
check_for_constants = CheckFunctionsForConstants()
148+
check_for_constants.visit_call(mod[ev].body)
149+
assert (
150+
check_for_constants.num_constants_ == 0
151+
), "Scalar constant wasn't converted into tensor constant"
152+
153+
154+
@tvm.testing.requires_cmsisnn
155+
def test_two_tensor_constants():
156+
x0 = relay.var("x0", shape=(8, 8))
157+
x1 = relay.var("x1", shape=(8, 8))
158+
z1 = x0 + x1
159+
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
160+
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
161+
162+
y0 = relay.const(np.random.uniform(0, 1, (8, 8)).astype("float32"), "float32")
163+
y1 = relay.const(np.random.uniform(0, 1, (8, 8)).astype("float32"), "float32")
164+
c0 = relay.Call(lf, [y0, y1])
165+
ef = relay.Function([], c0, relay.TensorType((8, 8), "float32"))
166+
167+
ev = relay.GlobalVar("external_function")
168+
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
169+
c = relay.Call(ev, [])
170+
mf = relay.Function([], c, relay.TensorType((8, 8), "float32"))
171+
mv = relay.GlobalVar("main")
172+
173+
mod = tvm.IRModule()
174+
mod[ev] = ef
175+
mod[mv] = mf
176+
177+
mod = relay.transform.InferType()(mod)
178+
mod = ScalarToTensorConstants()(mod)
179+
check_for_constants = CheckFunctionsForConstants()
180+
check_for_constants.visit_call(mod[ev].body)
181+
assert (
182+
check_for_constants.num_constants_ == 2
183+
), "Scalar constant wasn't converted into tensor constant"
184+
185+
186+
if __name__ == "__main__":
187+
sys.exit(pytest.main([__file__] + sys.argv[1:]))

0 commit comments

Comments
 (0)