@@ -280,9 +280,9 @@ def test_conv2d(remote, dtype, target, trials, executor_type):
280280 has_activation = composite [2 ],
281281 )
282282 outputs = _build_and_run_network (remote , func , params , inputs , target , executor_type )
283- out_rtol = 1e-1 if dtype == "float16" else 1e-5
283+ out_tol = 1e-1 if dtype == "float16" else 1e-5
284284 tvm .testing .assert_allclose (
285- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
285+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
286286 )
287287 args = (shape , kernel_h , kernel_w , pad , stride , dilation , groups , dtype , out_channels )
288288 exp_codegen = _get_conv_expected_codegen (
@@ -373,9 +373,9 @@ def test_conv2d_transpose(remote, dtype, target, trials, executor_type):
373373 func = relay .Function ([x , w ], y )
374374 mod = IRModule .from_expr (func )
375375 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
376- out_rtol = 1e-1 if dtype == "float16" else 1e-5
376+ out_tol = 1e-1 if dtype == "float16" else 1e-5
377377 tvm .testing .assert_allclose (
378- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
378+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
379379 )
380380 args = (
381381 dshape ,
@@ -425,9 +425,9 @@ def test_batchnorm(remote, dtype, target, trials, executor_type):
425425 "a" : input_arr ,
426426 }
427427 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
428- out_rtol = 1e-3 if dtype == "float16" else 1e-5
428+ out_tol = 1e-3 if dtype == "float16" else 1e-5
429429 tvm .testing .assert_allclose (
430- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
430+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
431431 )
432432 exp_codegen = [
433433 {
@@ -485,9 +485,9 @@ def test_concat(remote, dtype, target, trials, executor_type):
485485 func = relay .concatenate ((a , b ), axis = 1 )
486486
487487 outputs = _build_and_run_network (remote , func , params , inputs , target , executor_type )
488- out_rtol = 1e-2 if dtype == "float16" else 1e-5
488+ out_tol = 1e-2 if dtype == "float16" else 1e-5
489489 tvm .testing .assert_allclose (
490- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
490+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
491491 )
492492
493493 exp_codegen = [
@@ -601,9 +601,9 @@ def test_pool(remote, dtype, target, trials, executor_type):
601601 func = relay .nn .avg_pool2d (a , pool_size = pool_size , strides = stride , padding = padding )
602602
603603 outputs = _build_and_run_network (remote , func , params , inputs , target , executor_type )
604- out_rtol = 1e-2 if dtype == "float16" else 1e-5
604+ out_tol = 1e-2 if dtype == "float16" else 1e-5
605605 tvm .testing .assert_allclose (
606- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
606+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
607607 )
608608 args = (input_shape , pool_size , stride , padding , pooling_type , dtype )
609609 exp_codegen = _get_pool_expected_codegen (* args )
@@ -690,9 +690,9 @@ def _get_model(x_shape, k_shape, has_bias=False):
690690 def _verify (out , params , inputs , exp_codegen ):
691691 mod = IRModule .from_expr (out )
692692 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
693- out_rtol = 1e-1 if dtype == "float16" else 1e-5
693+ out_tol = 1e-1 if dtype == "float16" else 1e-5
694694 tvm .testing .assert_allclose (
695- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
695+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
696696 )
697697 verify_codegen (remote , mod , params , exp_codegen , target )
698698
@@ -718,9 +718,9 @@ def _get_model(a_shape, b_shape, op_func):
718718 def _verify (out , params , inputs ):
719719 mod = IRModule .from_expr (out )
720720 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
721- out_rtol = 1e-2 if dtype == "float16" else 1e-5
721+ out_tol = 1e-2 if dtype == "float16" else 1e-5
722722 tvm .testing .assert_allclose (
723- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
723+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
724724 )
725725 exp_codegen = [
726726 {
@@ -776,9 +776,9 @@ def _get_model(a_shape, op):
776776 def _verify (out , params , inputs ):
777777 mod = IRModule .from_expr (out )
778778 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
779- out_rtol = 1e-2 if dtype == "float16" else 1e-5
779+ out_tol = 1e-2 if dtype == "float16" else 1e-5
780780 tvm .testing .assert_allclose (
781- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
781+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
782782 )
783783
784784 exp_codegen = [
@@ -823,12 +823,11 @@ def _get_model(a_shape, block_size):
823823 def _verify (out , params , inputs ):
824824 mod = IRModule .from_expr (out )
825825 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
826- out_rtol = 1e-2 if dtype == "float16" else 1e-5
826+ out_tol = 1e-2 if dtype == "float16" else 1e-5
827827 tvm .testing .assert_allclose (
828- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
828+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
829829 )
830830
831- # Check to make sure these ops are offloaded to CLML instead of TVM.
832831 exp_codegen = [
833832 {
834833 "attrs" : {
@@ -877,12 +876,11 @@ def _get_model(a_shape, scale, align_corners):
877876 def _verify (out , params , inputs ):
878877 mod = IRModule .from_expr (out )
879878 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
880- out_rtol = 1e-2 if dtype == "float16" else 1e-5
879+ out_tol = 1e-2 if dtype == "float16" else 1e-5
881880 tvm .testing .assert_allclose (
882- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
881+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
883882 )
884883
885- # Check to make sure these ops are offloaded to CLML instead of TVM.
886884 exp_codegen = [
887885 {
888886 "attrs" : {
@@ -944,12 +942,11 @@ def _get_model(a_shape, b_shape, a_transpose, b_transpose):
944942 def _verify (out , params , inputs ):
945943 mod = IRModule .from_expr (out )
946944 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
947- out_rtol = 1e-1 if dtype == "float16" else 1e-5
945+ out_tol = 1e-1 if dtype == "float16" else 1e-5
948946 tvm .testing .assert_allclose (
949- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
947+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
950948 )
951949
952- # Check to make sure these ops are offloaded to CLML instead of TVM.
953950 exp_codegen = [
954951 {
955952 "attrs" : {
@@ -1026,20 +1023,30 @@ def _get_model(a_shape, axis):
10261023 params = {}
10271024 return out , params , inputs , axis
10281025
1029- def _verify (out , params , inputs , axis ):
1026+ def _verify (out , params , inputs , axis , out_tol ):
10301027 mod = IRModule .from_expr (out )
10311028 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
1032- out_rtol = 1e-1 if dtype == "float16" else 1e-5
10331029 tvm .testing .assert_allclose (
1034- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
1030+ outputs [0 ].asnumpy (), outputs [1 ].numpy (), rtol = out_tol , atol = out_tol
10351031 )
10361032 args = (inputs , dtype , outputs [0 ].shape , axis )
10371033 exp_codegen = _get_softmax_exp_codegen (* args )
10381034 verify_codegen (remote , mod , params , exp_codegen , target )
10391035
1040- _verify (* (_get_model ((1 , 5 ), 1 )))
1041- _verify (* (_get_model ((1 , 1000 ), 1 )))
1042- _verify (* (_get_model ((1 , 3 ), 1 )))
1036+ # 2D Tensor TEST CASES
1037+ _verify (* (_get_model ((1 , 5 ), 1 )), 1e-3 )
1038+ _verify (* (_get_model ((1 , 16 ), 1 )), 1e-3 )
1039+ _verify (* (_get_model ((1 , 1000 ), - 1 )), 1e-3 )
1040+
1041+ # 4D Tensor TEST CASES layout = NCHW
1042+ _verify (* (_get_model ((1 , 100 , 64 , 100 ), 1 )), 1e-3 )
1043+ _verify (* (_get_model ((1 , 64 , 64 , 64 ), 1 )), 1e-3 )
1044+ _verify (* (_get_model ((1 , 5 , 3 , 4 ), 1 )), 1e-3 )
1045+
1046+ # 4D Tensor TEST CASES layout = NHWC
1047+ _verify (* (_get_model ((1 , 64 , 100 , 100 ), 3 )), 1e-1 )
1048+ _verify (* (_get_model ((1 , 100 , 100 , 100 ), 3 )), 1e-1 )
1049+ _verify (* (_get_model ((1 , 64 , 5 , 32 ), - 1 )), 1e-1 )
10431050
10441051
10451052@pytest .mark .parametrize ("dtype" , ["float32" , "float16" ])
@@ -1066,9 +1073,9 @@ def _verify(in_shape, scale_h, scale_w):
10661073 )
10671074 mod = IRModule .from_expr (func )
10681075 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
1069- out_rtol = 1e-2 if dtype == "float16" else 1e-5
1076+ out_tol = 1e-2 if dtype == "float16" else 1e-5
10701077 tvm .testing .assert_allclose (
1071- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
1078+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
10721079 )
10731080 exp_codegen = [
10741081 {
@@ -1124,9 +1131,9 @@ def _verify(shape, newshape):
11241131 params = {}
11251132 mod = IRModule .from_expr (out )
11261133 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
1127- out_rtol = 1e-3 if dtype == "float16" else 1e-5
1134+ out_tol = 1e-3 if dtype == "float16" else 1e-5
11281135 tvm .testing .assert_allclose (
1129- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
1136+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
11301137 )
11311138 exp_codegen = [
11321139 {
@@ -1223,9 +1230,9 @@ def test_pool_global(remote, dtype, target, executor_type, trials):
12231230 func = relay .nn .global_avg_pool2d (a )
12241231 mod = IRModule .from_expr (func )
12251232 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
1226- out_rtol = 1e-3 if dtype == "float16" else 1e-5
1233+ out_tol = 1e-3 if dtype == "float16" else 1e-5
12271234 tvm .testing .assert_allclose (
1228- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
1235+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
12291236 )
12301237 args = (input_shape , pooling_type , dtype , outputs [0 ].shape )
12311238 exp_codegen = _get_pool_global_expected_codegen (* args )
@@ -1241,6 +1248,7 @@ def _get_model(a_shape):
12411248 # Defined the test case with unary operator
12421249 # Single batch_flatten op is failing in native OpenCL
12431250 # Empty TVM mod in VM doesn't pick appropriate cross compiler
1251+ np .random .seed (0 )
12441252 out = relay .nn .relu (a )
12451253 out = relay .nn .batch_flatten (out )
12461254 inputs = {"a" : tvm .nd .array (np .random .uniform (- 1 , 1 , a_shape ).astype (dtype ))}
@@ -1250,9 +1258,9 @@ def _get_model(a_shape):
12501258 def _verify (out , params , inputs ):
12511259 mod = IRModule .from_expr (out )
12521260 outputs = _build_and_run_network (remote , mod , params , inputs , target , executor_type )
1253- out_rtol = 1e-3 if dtype == "float16" else 1e-5
1261+ out_tol = 1e-3 if dtype == "float16" else 1e-5
12541262 tvm .testing .assert_allclose (
1255- outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_rtol , atol = out_rtol
1263+ outputs [0 ].asnumpy (), outputs [1 ].asnumpy (), rtol = out_tol , atol = out_tol
12561264 )
12571265 exp_codegen = [
12581266 {
0 commit comments