Skip to content

Commit 4c608be

Browse files
authored
[Relay] Remove DynamicToStatic pass from graph runtime build (#10691)
Closes #10692 To solve this problem, we can either remove this pass from `relay.build(...)` pipeline or run `DynamicToStatic` in both VM and non-VM paths. I propose to remove it because (1) usually `DynamicToStatic` is supposed to be applied after model import and (2) the only case running `DynamicToStatic` during `relay.build(...)` helps is when the input is entirely static but a frontend fails to produce a static mod AND a user forgets to run `DynamicToStatic` after model import. I hope the latter case happens rarely but if not, that's something we should fix in the frontend side. We should avoid relying on `DynamicToStatic` that runs during `relay.build(...)` since not all use cases of TVM use `relay.build(...)` (BYOC, for example).
1 parent 9f58089 commit 4c608be

File tree

5 files changed

+16
-19
lines changed

5 files changed

+16
-19
lines changed

python/tvm/relay/frontend/paddlepaddle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1281,7 +1281,7 @@ def convert_prelu(g, op, block):
12811281
shape = _op.strided_slice(shape_of(x), [0], [1])
12821282
else:
12831283
shape = _op.strided_slice(shape_of(x), [1], [2])
1284-
alpha = _op.broadcast_to(alpha, shape)
1284+
alpha = _op.broadcast_to(alpha, fold_constant(shape))
12851285
out = _op.nn.prelu(x, alpha, axis)
12861286
g.add_node(op.output("Out")[0], out)
12871287

python/tvm/relay/frontend/pytorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -641,7 +641,7 @@ def full_impl(self, data, fill_value, dtype):
641641
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
642642
size = _op.concatenate(tmp, axis=0)
643643

644-
out = _op.full(_expr.const(fill_value), size, dtype=dtype)
644+
out = _op.full(_expr.const(fill_value, dtype=dtype), size, dtype=dtype)
645645
if need_reshape:
646646
out = _op.reshape(out, new_shape)
647647
return out

python/tvm/relay/frontend/tflite.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
from ..backend.name_transforms import sanitize_name
3434
from .common import ExprTable
3535
from .common import infer_shape as _infer_shape
36-
from .common import to_int_list
36+
from .common import to_int_list, shape_of
3737
from .tflite_flexbuffer import FlexBufferDecoder
3838

3939
__all__ = ["from_tflite"]
@@ -846,7 +846,7 @@ def convert_shape(self, op):
846846
input_tensors = self.get_input_tensors(op)
847847
assert len(input_tensors) == 1, "input tensors length should be 1"
848848

849-
out = _op.shape_of(self.get_tensor_expr(input_tensors[0]))
849+
out = shape_of(self.get_tensor_expr(input_tensors[0]))
850850

851851
return out
852852

src/relay/backend/utils.cc

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -226,16 +226,6 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
226226
// eta expand to support constructors in argument position
227227
pass_seqs.push_back(transform::EtaExpand(
228228
/* expand_constructor */ true, /* expand_global_var */ false));
229-
} else {
230-
// DynamicToStatic runs FoldConstant, which affects SimplifyExpr below.
231-
// Task extraction uses the is_vm=true branch, meaning SimplifyExpr sees different
232-
// inputs from the ones when invoked via relay.build(...).
233-
// This causes workload lookups in ApplyHistoryBest to fail if the lookup depends on
234-
// the structual hash of the input relay module (e.g. MetaScheduler).
235-
// TODO(masahi): Either remove DynamicToStatic below or always run it
236-
237-
// Convert Dynamic ops to static versions
238-
pass_seqs.push_back(transform::DynamicToStatic());
239229
}
240230

241231
PackedFunc fskip = PackedFunc([](TVMArgs args, TVMRetValue* rv) {
@@ -252,12 +242,12 @@ Array<Pass> GetPassPrefix(bool is_homegeneous, bool is_vm) {
252242
*rv = false;
253243
});
254244
pass_seqs.push_back(transform::EliminateCommonSubexpr(fskip));
255-
pass_seqs.push_back(transform::SimplifyExpr());
256245
pass_seqs.push_back(transform::CombineParallelConv2D(3));
257246
pass_seqs.push_back(transform::CombineParallelDense(3));
258247
pass_seqs.push_back(transform::CombineParallelBatchMatmul(3));
259248
pass_seqs.push_back(transform::FoldConstant());
260249
pass_seqs.push_back(transform::FoldScaleAxis());
250+
pass_seqs.push_back(transform::SimplifyExpr());
261251
pass_seqs.push_back(transform::CanonicalizeCast());
262252
pass_seqs.push_back(transform::CanonicalizeOps());
263253

tests/python/frontend/tensorflow/test_forward.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ def run_tvm_graph(
147147
outputs=out_names,
148148
convert_config=convert_config,
149149
)
150+
150151
dev = tvm.device(target, 0)
151152
if mode == "debug":
152153
inputs = []
@@ -2421,10 +2422,11 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
24212422
)
24222423
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
24232424

2425+
# Output shape depends on a dynamic input, use VM.
24242426
if default_value == None:
24252427
output = tf.sparse_to_dense(indices, oshape, values)
24262428
compare_tf_with_tvm(
2427-
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name
2429+
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm"
24282430
)
24292431
else:
24302432
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
@@ -2433,6 +2435,7 @@ def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_s
24332435
[sparse_indices, sparse_values, default_value],
24342436
["indices:0", "values:0", "default_value:0"],
24352437
output.name,
2438+
mode="vm",
24362439
)
24372440

24382441

@@ -2494,7 +2497,8 @@ def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None
24942497

24952498
result = tf.sparse.to_dense(A_sp, default_value=default_value)
24962499

2497-
compare_tf_with_tvm([], [], result.name)
2500+
# The output shape depends on a dynamic input, use VM.
2501+
compare_tf_with_tvm([], [], result.name, mode="vm")
24982502

24992503

25002504
def test_forward_sparse_to_dense_v2():
@@ -5572,7 +5576,7 @@ def _test_unique(n, dtype, is_dyn):
55725576
if is_dyn:
55735577
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
55745578
else:
5575-
compare_tf_with_tvm(None, "", ["Unique:0", "Unique:1"])
5579+
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm")
55765580

55775581

55785582
def test_forward_unique():
@@ -5607,7 +5611,10 @@ def _test_unique_with_counts(n, dtype, is_dyn):
56075611
)
56085612
else:
56095613
compare_tf_with_tvm(
5610-
None, "", ["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"]
5614+
np_data,
5615+
"",
5616+
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
5617+
mode="vm",
56115618
)
56125619

56135620

0 commit comments

Comments
 (0)