diff --git a/python/tvm/autotvm/tuner/__init__.py b/python/tvm/autotvm/tuner/__init__.py index dd53862d7dd2..0f849ee73d57 100644 --- a/python/tvm/autotvm/tuner/__init__.py +++ b/python/tvm/autotvm/tuner/__init__.py @@ -22,10 +22,7 @@ """ from . import callback - -from .tuner import Tuner - -from .index_based_tuner import GridSearchTuner, RandomTuner from .ga_tuner import GATuner +from .index_based_tuner import GridSearchTuner, RandomTuner +from .tuner import Tuner from .xgboost_tuner import XGBTuner -from .droplet_turner import DropletTuner diff --git a/python/tvm/relax/dpl/pattern.py b/python/tvm/relax/dpl/pattern.py index 4fb08c4635fe..e5670dee4b7e 100644 --- a/python/tvm/relax/dpl/pattern.py +++ b/python/tvm/relax/dpl/pattern.py @@ -26,7 +26,7 @@ import tvm._ffi as tvm_ffi from tvm.ir.container import Array from tvm.ir.expr import PrimExpr -from tvm.relay.op import get +from tvm.ir.op import Op from ...ir import make_node from ...ir.base import Node @@ -756,7 +756,7 @@ def is_op(op_name: str) -> ExprPattern: result: tvm.relax.dpl.ExprPattern The resulting ExprPattern """ - op = get(op_name) + op = Op.get(op_name) return ExprPattern(op) diff --git a/python/tvm/relay/analysis/sparse_conv2d.py b/python/tvm/relay/analysis/sparse_conv2d.py index 1862ded831f6..043cff989a33 100644 --- a/python/tvm/relay/analysis/sparse_conv2d.py +++ b/python/tvm/relay/analysis/sparse_conv2d.py @@ -21,11 +21,12 @@ to block sparse model """ from collections import namedtuple + import numpy as np -import scipy.sparse as sp + import tvm -from . import _ffi_api +from . import _ffi_api SparseAnalysisResult = namedtuple( "SparseAnalysisResult", @@ -79,9 +80,11 @@ def process_params( """ # pylint: disable=import-outside-toplevel - from tvm.auto_scheduler.search_task import ( + import scipy.sparse as sp + + from tvm.auto_scheduler.search_task import ( # lazily import to avoid recursive dependency register_task_input_buffer, - ) # lazily import to avoid recursive dependency + ) memo = SparseAnalysisResult(weight_name=[], weight_shape=[]) weight_names = _search_conv2d_op_weight(expr) diff --git a/python/tvm/relay/analysis/sparse_dense.py b/python/tvm/relay/analysis/sparse_dense.py index 3199360592fa..16a724813de8 100644 --- a/python/tvm/relay/analysis/sparse_dense.py +++ b/python/tvm/relay/analysis/sparse_dense.py @@ -21,11 +21,12 @@ to block sparse model """ from collections import namedtuple + import numpy as np -import scipy.sparse as sp + import tvm -from . import _ffi_api +from . import _ffi_api SparseAnalysisResult = namedtuple( "SparseAnalysisResult", @@ -75,9 +76,11 @@ def process_params(expr, params, block_size, sparsity_threshold): """ # pylint: disable=import-outside-toplevel - from tvm.auto_scheduler.search_task import ( + import scipy.sparse as sp + + from tvm.auto_scheduler.search_task import ( # lazily import to avoid recursive dependency register_task_input_buffer, - ) # lazily import to avoid recursive dependency + ) memo = SparseAnalysisResult(weight_name=[], weight_shape=[]) weight_names = _search_dense_op_weight(expr) diff --git a/python/tvm/relay/frontend/tensorflow_ops.py b/python/tvm/relay/frontend/tensorflow_ops.py index e2c3a34252bf..14171afb3d44 100644 --- a/python/tvm/relay/frontend/tensorflow_ops.py +++ b/python/tvm/relay/frontend/tensorflow_ops.py @@ -1481,10 +1481,10 @@ def _impl(inputs, attr, params, mod): def _sparse_tensor_dense_add(): - # Sparse utility from scipy - from scipy.sparse import csr_matrix - def _impl(inputs, attr, params, mod): + # Sparse utility from scipy + from scipy.sparse import csr_matrix + assert ( len(inputs) == 4 ), "There should be 4 input tensors [sparse_indices, sparse_values, sparse_shape, dense]." diff --git a/python/tvm/relay/qnn/op/legalizations.py b/python/tvm/relay/qnn/op/legalizations.py index ce45b2e75959..81df386fc297 100644 --- a/python/tvm/relay/qnn/op/legalizations.py +++ b/python/tvm/relay/qnn/op/legalizations.py @@ -17,7 +17,7 @@ # pylint: disable=invalid-name, unused-argument """Backend QNN related feature registration""" import numpy as np -from scipy import special + import tvm from tvm import relay from tvm._ffi.base import TVMError @@ -78,7 +78,6 @@ def hardswish_func(x): register_qnn_unary_op_legalize("qnn.sqrt", np.sqrt) register_qnn_unary_op_legalize("qnn.rsqrt", lambda arr: 1 / np.sqrt(arr)) register_qnn_unary_op_legalize("qnn.exp", np.exp) -register_qnn_unary_op_legalize("qnn.erf", special.erf) register_qnn_unary_op_legalize("qnn.sigmoid", lambda arr: 1 / (1 + np.exp(-arr))) register_qnn_unary_op_legalize("qnn.hardswish", hardswish_func) register_qnn_unary_op_legalize("qnn.tanh", np.tanh) @@ -86,6 +85,22 @@ def hardswish_func(x): register_qnn_unary_op_legalize("qnn.abs", np.abs) +@reg.register_qnn_legalize("qnn.erf") +def _legalize_qnn_erf(attrs, inputs, types): + from scipy import special # pylint: disable=import-outside-toplevel + + return create_integer_lookup_op( + input_arg=inputs[0], + floating_point_func=special.erf, + in_scale=inputs[1], + in_zero_point=inputs[2], + out_scale=inputs[3], + out_zero_point=inputs[4], + in_dtype=types[0].dtype, + out_dtype=types[0].dtype, + ) + + # Default to None. If overridden by target, this will not be run. # Generic QNN Conv2D legalization function. @tvm.target.generic_func diff --git a/python/tvm/topi/arm_cpu/qnn_legalize.py b/python/tvm/topi/arm_cpu/qnn_legalize.py index 2833fbce26f1..dae869fcb7ee 100644 --- a/python/tvm/topi/arm_cpu/qnn_legalize.py +++ b/python/tvm/topi/arm_cpu/qnn_legalize.py @@ -30,11 +30,12 @@ """ import numpy as np -from scipy.signal import convolve2d -from tvm.topi.utils import get_const_tuple + from tvm import nd, relay -from .qnn_alter_op import prev_ops_match, edit_attrs +from tvm.topi.utils import get_const_tuple + from ..nn import bias_add_legalize +from .qnn_alter_op import edit_attrs, prev_ops_match def _compute_fixed_conv2d_outputs(requantize_op): @@ -112,6 +113,7 @@ def _compute_fixed_depthwise_outputs(requantize_op, fixed_channel_inputs): an output channel index, and each value is the value that all entries in that output channel will have. If the block has no fixed output channels, this dictionary will be empty. """ + from scipy.signal import convolve2d # pylint: disable=import-outside-toplevel bias_add_op = requantize_op.args[0] depthwise_op = bias_add_op.args[0] diff --git a/python/tvm/topi/cuda/sparse.py b/python/tvm/topi/cuda/sparse.py index 921075601e5a..cd977fb1b868 100644 --- a/python/tvm/topi/cuda/sparse.py +++ b/python/tvm/topi/cuda/sparse.py @@ -17,13 +17,12 @@ """Sparse operators""" import numpy as np -import scipy.sparse as sp import tvm from tvm import relay, te from .. import nn -from ..utils import traverse_inline, get_const_tuple, prod, get_const_int, ceil_div +from ..utils import ceil_div, get_const_int, get_const_tuple, prod, traverse_inline from .transform import schedule_transpose_from_existing @@ -358,6 +357,8 @@ def schedule_sparse_dense_padded(outs): def pad_sparse_matrix(matrix, blocksize): """Pad rows of sparse matrix matrix so that they are a multiple of blocksize.""" + import scipy.sparse as sp # pylint: disable=import-outside-toplevel + assert isinstance(matrix, sp.bsr_matrix) new_entries = np.zeros(matrix.shape[0], dtype=matrix.indptr.dtype) bsr = matrix.blocksize[0] @@ -394,6 +395,8 @@ def _alter_sparse_dense_layout(_attrs, inputs, _tinfos, _out_type): sparse_dense implementation for one that operates on a padded matrix. We also pad the matrix. """ + import scipy.sparse as sp # pylint: disable=import-outside-toplevel + # TODO(ANSHUMAN87): Handle for sparse_lhs case too if ( isinstance(inputs[1], relay.Constant) diff --git a/python/tvm/topi/nn/conv3d_transpose.py b/python/tvm/topi/nn/conv3d_transpose.py index 2d048f432f1b..92bcf6ecadc2 100644 --- a/python/tvm/topi/nn/conv3d_transpose.py +++ b/python/tvm/topi/nn/conv3d_transpose.py @@ -18,11 +18,11 @@ """Transposed 3D convolution operators (sometimes called Deconvolution).""" import tvm from tvm import te -from tvm import relay + +from ..utils import simplify from .dilate import dilate from .pad import pad from .utils import get_pad_tuple3d -from ..utils import simplify def conv3d_transpose_ncdhw(Input, Filter, strides, padding, out_dtype, output_padding): @@ -143,6 +143,8 @@ def conv3d_transpose_legalize(attrs, inputs, types): result : tvm.relay.Expr The legalized expr """ + from tvm import relay # pylint: disable=import-outside-toplevel + if attrs["data_layout"] == "NDHWC": data, kernel = inputs kernel_layout = attrs["kernel_layout"]