Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions tests/python/contrib/test_hexagon/pytest_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,22 @@ def get_single_param_chunk(param_val, param_desc: Optional[str]):
val_str = "F"
need_prefix_separator = True

elif type(param_val) == TensorContentConstant:
val_str = f"const({param_val.elem_value})"
need_prefix_separator = True

elif type(param_val) == TensorContentDtypeMin:
val_str = "min"
need_prefix_separator = True

elif type(param_val) == TensorContentDtypeMax:
val_str = "max"
need_prefix_separator = True

elif type(param_val) == TensorContentRandom:
val_str = "random"
need_prefix_separator = True

else:
val_str = str(param_val)
need_prefix_separator = True
Expand Down Expand Up @@ -91,3 +107,53 @@ def get_multitest_ids(
get_test_id(*single_test_param_list, test_param_descs=param_descs)
for single_test_param_list in multitest_params_list
]


def get_numpy_dtype_info(np_dtype_name: str) -> Union[np.finfo, np.iinfo]:
"""
Return an appropriate 'np.iinfo' or 'np.finfo' object corresponding to
the specified dtype.
"""
np_dtype = np.dtype(np_dtype_name)
kind = np_dtype.kind

if kind == "f":
return np.finfo(np_dtype_name)
elif kind == "i":
return np.iinfo(np_dtype_name)
else:
raise TypeError(
f"np_dtype_name ({np_dtype_name}) must indicate some floating-point or integral data type"
)


TensorContentConstant = collections.namedtuple("TensorContentConstant", ["elem_value"])
TensorContentRandom = collections.namedtuple("TensorContentRandom", [])
TensorContentDtypeMin = collections.namedtuple("TensorContentDtypeMin", [])
TensorContentDtypeMax = collections.namedtuple("TensorContentDtypeMax", [])


def create_populated_numpy_ndarray(
input_shape: Union[list, tuple], dtype: str, input_tensor_populator
) -> np.ndarray:
"""
Create a numpy tensor with the specified shape, dtype, and content.
"""
itp = input_tensor_populator # just for brevity

if type(itp) == TensorContentConstant:
return np.full(tuple(input_shape), itp.elem_value, dtype=dtype)

elif type(itp) == TensorContentDtypeMin:
info = get_numpy_dtype_info(dtype)
return np.full(tuple(input_shape), info.min, dtype=dtype)

elif type(itp) == TensorContentDtypeMax:
info = get_numpy_dtype_info(dtype)
return np.full(tuple(input_shape), info.max, dtype=dtype)

elif type(itp) == TensorContentRandom:
return np.random.random(input_shape).astype(dtype)

else:
raise ValueError(f"Unexpected input_tensor_populator type: {type(itp)}")
30 changes: 26 additions & 4 deletions tests/python/contrib/test_hexagon/topi/test_avg_pool2d_slice.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import pytest
import numpy as np
from typing import *
import collections

from tvm import te
import tvm.testing
Expand All @@ -27,7 +26,14 @@
from tvm.contrib.hexagon.session import Session
import tvm.topi.hexagon.slice_ops as sl
from ..infrastructure import allocate_hexagon_array, transform_numpy
from ..pytest_util import get_multitest_ids
from ..pytest_util import (
get_multitest_ids,
create_populated_numpy_ndarray,
TensorContentConstant,
TensorContentRandom,
TensorContentDtypeMin,
TensorContentDtypeMax,
)


input_layout = tvm.testing.parameter(
Expand All @@ -36,8 +42,8 @@


@tvm.testing.fixture
def input_np(input_shape, dtype):
return np.random.random(input_shape).astype(dtype)
def input_np(input_shape, dtype: str, input_tensor_populator):
return create_populated_numpy_ndarray(input_shape, dtype, input_tensor_populator)


@tvm.testing.fixture
Expand All @@ -61,6 +67,7 @@ class TestAvgPool2dSlice:
"cnt_padded", # count_include_pad
"out_layout", # output_layout
None, # dtype
None, # input_tensor_populator
]

_multitest_params = [
Expand All @@ -74,6 +81,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 16, 16, 32],
Expand All @@ -85,6 +93,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -96,6 +105,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
# Test non-one stride and dilation
(
Expand All @@ -108,6 +118,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -119,6 +130,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -130,6 +142,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
# Test non-zero padding
(
Expand All @@ -142,6 +155,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -153,6 +167,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -164,6 +179,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
(
[1, 8, 8, 32],
Expand All @@ -175,6 +191,7 @@ class TestAvgPool2dSlice:
True,
"nhwc-8h2w32c2w-2d",
"float16",
TensorContentRandom(),
),
# Test n11c-1024c-2d layout which will require input and output to have different layout
(
Expand All @@ -187,6 +204,7 @@ class TestAvgPool2dSlice:
True,
"n11c-1024c-2d",
"float16",
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
Expand All @@ -198,6 +216,7 @@ class TestAvgPool2dSlice:
True,
"n11c-1024c-2d",
"float16",
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
Expand All @@ -209,6 +228,7 @@ class TestAvgPool2dSlice:
True,
"n11c-1024c-2d",
"float16",
TensorContentRandom(),
),
(
[1, 1, 1, 2048],
Expand All @@ -220,6 +240,7 @@ class TestAvgPool2dSlice:
True,
"n11c-1024c-2d",
"float16",
TensorContentRandom(),
),
]

Expand All @@ -236,6 +257,7 @@ class TestAvgPool2dSlice:
count_include_pad,
output_layout,
dtype,
input_tensor_populator,
) = tvm.testing.parameters(*_multitest_params, ids=_param_ids)

@tvm.testing.fixture
Expand Down