Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 88 additions & 3 deletions python/tvm/relay/op/strategy/hexagon.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Definition of Hexagon operator strategy."""

# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
Expand All @@ -24,6 +23,21 @@
from .. import op as _op


# --- Op strategy registration


@batch_matmul_strategy.register("hexagon")
def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
"""batch_matmul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.hexagon.schedule_batch_matmul),
name="batch_matmul.hexagon",
)
return strategy


@conv2d_strategy.register("hexagon")
def conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""Conv2d strategy for Hexagon"""
Expand All @@ -35,10 +49,81 @@ def conv2d_strategy_hexagon(attrs, inputs, out_type, target):
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nhwc),
name="conv2d.hexagon",
name="conv2d_nhwc.hexagon",
)
return strategy

if data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nchw),
name="conv2d_nchw.hexagon",
)
return strategy

raise RuntimeError(
"Unsupported layouts: data_layout:{}, kernel_layout:{}".format(data_layout, kernel_layout)
f"Unsupported layouts: data_layout:{data_layout}, kernel_layout:{kernel_layout}, "
f"groups:{attrs.groups}"
)


@dense_strategy.register("hexagon")
def dense_strategy_hexagon(attrs, inputs, out_type, target):
"""Dense strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.hexagon.schedule_dense),
name="dense.hexagon",
)
return strategy


@softmax_strategy.register("hexagon")
def softmax_strategy_hexagon(attrs, inputs, out_type, target):
"""Softmax strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.hexagon.schedule_softmax),
name="softmax.hexagon",
)
return strategy


# --- Op schedule registration


@schedule_adaptive_pool.register("hexagon")
def schedule_adaptive_pool_hexagon(attrs, outs, target):
"""Schedule adaptive pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_adaptive_pool(outs)


@schedule_concatenate.register("hexagon")
def schedule_concatenate_hexagon(attrs, outs, target):
"""Schedule concatenate ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)


@schedule_injective.register("hexagon")
def schedule_injective_hexagon(attrs, outs, target):
"""Schedule injective ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)


@schedule_pool.register("hexagon")
def schedule_pool_hexagon(attrs, outs, target):
"""Schedule pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_pool(outs)


@schedule_reduce.register("hexagon")
def schedule_reduce_hexagon(attrs, outs, target):
"""Schedule reduction ops for Hexagon"""
with target:
return topi.hexagon.schedule_reduce(outs)
5 changes: 5 additions & 0 deletions python/tvm/topi/hexagon/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,9 @@

# pylint: disable=wildcard-import

from .batch_matmul import *
from .conv2d import *
from .dense import *
from .injective import *
from .pooling import *
from .reduce import *
40 changes: 40 additions & 0 deletions python/tvm/topi/hexagon/batch_matmul.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Schedule for composition of batch_matmul operator"""

import tvm


def schedule_batch_matmul(outs):
"""Schedule for batch_matmul op.

Parameters
----------
outs: Array of Tensor
The computation graph description of batch_matmul in the format
of an array of tensors.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
32 changes: 30 additions & 2 deletions python/tvm/topi/hexagon/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,40 @@
# specific language governing permissions and limitations
# under the License.

""" Schedules for conv2d. """
"""Schedule for conv2d"""

import tvm


def schedule_conv2d_nhwc(outs):
"""Schedule for Conv2d NHWC operator."""
"""Schedule for conv2d NHWC operator.

Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d in the format
of an array of tensors.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s


def schedule_conv2d_nchw(outs):
return schedule_conv2d_nhwc(outs)


def schedule_conv2d(outs, layout="NHWC"):
layout_uncase = layout.casefold()
if layout_uncase == "NHWC".casefold():
return schedule_conv2d_nhwc(outs)
if layout_uncase == "NCHW".casefold():
return schedule_conv2d_nchw(outs)

raise ValueError(f"Unexpected layout={layout}")
40 changes: 40 additions & 0 deletions python/tvm/topi/hexagon/dense.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Schedule for dense operator"""

import tvm


def schedule_dense(outs):
"""Schedule for dense op.

Parameters
----------
outs: Array of Tensor
The computation graph description of dense in the format
of an array of tensors.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s
44 changes: 44 additions & 0 deletions python/tvm/topi/hexagon/injective.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Schedule for injective operators"""

import tvm


def schedule_injective(outs):
"""Schedule for injective op.

Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s


def schedule_softmax(outs):
return schedule_injective(outs)
47 changes: 47 additions & 0 deletions python/tvm/topi/hexagon/pooling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Schedule for pooling operators"""

import tvm


def schedule_pool(outs, layout="NHWC"): # pylint: disable=unused-argument
"""Schedule for pooling op.

Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.

layout: str
The tensor layout.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s


def schedule_adaptive_pool(outs):
return schedule_pool(outs)
40 changes: 40 additions & 0 deletions python/tvm/topi/hexagon/reduce.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

"""Schedule for composition of reduction operator"""

import tvm


def schedule_reduce(outs):
"""Schedule for reduction op.

Parameters
----------
outs: Array of Tensor
The computation graph description of reduction in the format
of an array of tensors.

Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.te.tensor.Tensor) else outs
s = tvm.te.create_schedule([x.op for x in outs])
tvm.te.schedule.AutoInlineInjective(s)
return s