This repository has been archived by the owner on Nov 17, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add np.equal implemented using tvmop Fix setting DLDataType conversion for boolean ndarray Add equal_gpu Fix inputs with different ndims Fix copying boolean ndarrays across devices Refactor binary logic op impl by tvm Add more logic ops Refactor TVMOpModule::Call to CallEx Add binary scalar logic op expr and schedule Add binary scalar logic ops Add free functions for logic ops Rebase with master to fix SetDLTensor bug Fix pylint Add sum op for boolean ndarrays using tvm op module Add sum boolean gpu compute Add bool type support to boolean_mask
- Loading branch information
Showing
26 changed files
with
1,416 additions
and
228 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,3 +21,4 @@ | |
from .utils import assign_by_req, reduce_axes | ||
|
||
from . import basic | ||
from . import core |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
|
||
from . import umath, fromnumeric |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
|
||
|
||
import tvm | ||
from .. import defop | ||
from ..utils import reduce_axes, assign_by_req | ||
|
||
|
||
def _compute_sum(itype, otype, ndim, reduce1st_dim, req): | ||
axes = ([reduce1st_dim, 1 - reduce1st_dim] * ndim)[:ndim] | ||
a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=itype) | ||
reduce_output = reduce_axes(a, axes, tvm.sum, otype) | ||
output_placeholder, final_output = assign_by_req(reduce_output, req) | ||
s = tvm.create_schedule(final_output.op) | ||
return s, a, output_placeholder, final_output, [reduce_output, final_output] | ||
|
||
|
||
@defop(name='sum_cpu', target='cpu', itype=['bool'], | ||
otype=['float32', 'float64', 'int32', 'int64'], | ||
ndim=[5], req=['kWriteTo', 'kAddTo'], reduce1st_dim=[0, 1], | ||
attrs=["reduce1st_dim", "req"]) | ||
def _sum_cpu(itype, otype, ndim, reduce1st_dim, req): | ||
s, a, output_placeholder, final_output, tensor_list = _compute_sum( | ||
itype, otype, ndim, reduce1st_dim, req) | ||
for t in tensor_list: | ||
axes = [axis for axis in t.op.axis] | ||
fused = s[t].fuse(*axes) | ||
s[t].parallel(fused) | ||
return s, [a, output_placeholder, final_output] | ||
|
||
|
||
@defop(name='sum_gpu', target='gpu', itype=['bool'], | ||
otype=['float32', 'float64', 'int32', 'int64'], | ||
ndim=[5], req=['kWriteTo', 'kAddTo'], reduce1st_dim=[0, 1], | ||
attrs=["reduce1st_dim", "req"]) | ||
def _sum_gpu(itype, otype, ndim, reduce1st_dim, req): | ||
s, a, output_placeholder, final_output, tensor_list = _compute_sum( | ||
itype, otype, ndim, reduce1st_dim, req) | ||
num_threads = 64 | ||
for t in tensor_list: | ||
block_x = tvm.thread_axis("blockIdx.x") | ||
thread_x = tvm.thread_axis("threadIdx.x") | ||
axes = [axis for axis in t.op.axis] | ||
fused = s[t].fuse(*axes) | ||
bx, tx = s[t].split(fused, factor=num_threads) | ||
s[t].bind(bx, block_x) | ||
s[t].bind(tx, thread_x) | ||
return s, [a, output_placeholder, final_output] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,122 @@ | ||
# Licensed to the Apache Software Foundation (ASF) under one | ||
# or more contributor license agreements. See the NOTICE file | ||
# distributed with this work for additional information | ||
# regarding copyright ownership. The ASF licenses this file | ||
# to you under the Apache License, Version 2.0 (the | ||
# "License"); you may not use this file except in compliance | ||
# with the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, | ||
# software distributed under the License is distributed on an | ||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY | ||
# KIND, either express or implied. See the License for the | ||
# specific language governing permissions and limitations | ||
# under the License. | ||
|
||
import tvm | ||
from .. import defop, AllTypes | ||
|
||
_bin_logic_op_map = { | ||
'equal': lambda a, b, *idx: a[idx] == b[idx], | ||
'not_equal': lambda a, b, *idx: a[idx] != b[idx], | ||
'greater': lambda a, b, *idx: a[idx] > b[idx], | ||
'less': lambda a, b, *idx: a[idx] < b[idx], | ||
'greater_equal': lambda a, b, *idx: a[idx] >= b[idx], | ||
'less_equal': lambda a, b, *idx: a[idx] <= b[idx], | ||
} | ||
|
||
|
||
def _compute_binary_logic(op, dtype, ndim): | ||
a = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='a') | ||
b = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='b') | ||
c = tvm.compute([tvm.var() for _ in range(ndim)], | ||
lambda *idx: _bin_logic_op_map[op](a, b, *idx), name='c') | ||
s = tvm.create_schedule(c.op) | ||
return s, a, b, c | ||
|
||
|
||
_bin_logic_cpu_attrs = { | ||
'compute_func': _compute_binary_logic, | ||
'target': 'cpu', | ||
'auto_broadcast': True, | ||
'itype': AllTypes + ['bool'], | ||
'ndim': list(range(6)) | ||
} | ||
|
||
_bin_logic_gpu_attrs = { | ||
'compute_func': _compute_binary_logic, | ||
'target': 'gpu', | ||
'auto_broadcast': True, | ||
'itype': AllTypes + ['bool'], | ||
'ndim': list(range(6)) | ||
} | ||
|
||
|
||
def _binary_logic_cpu(compute_func, op, itype, ndim): | ||
s, a, b, c = compute_func(op, itype, ndim) | ||
axes = [axis for axis in c.op.axis] | ||
fused = s[c].fuse(*axes) | ||
s[c].parallel(fused) | ||
return s, [a, b, c] | ||
|
||
|
||
def _binary_logic_gpu(compute_func, op, itype, ndim): | ||
s, a, b, c = compute_func(op, itype, ndim) | ||
axes = [axis for axis in c.op.axis] | ||
fused = s[c].fuse(*axes) | ||
bx, tx = s[c].split(fused, factor=64) | ||
s[c].bind(bx, tvm.thread_axis('blockIdx.x')) | ||
s[c].bind(tx, tvm.thread_axis('threadIdx.x')) | ||
return s, [a, b, c] | ||
|
||
|
||
# register binary element-wise logic ops with broadcasting supported | ||
for op_name in _bin_logic_op_map.keys(): | ||
defop(name='{}_cpu'.format(op_name), op=op_name, **_bin_logic_cpu_attrs)(_binary_logic_cpu) | ||
defop(name='{}_gpu'.format(op_name), op=op_name, **_bin_logic_gpu_attrs)(_binary_logic_gpu) | ||
|
||
|
||
# Note that `b.dtype` is hard-coded as 'float64'. | ||
# We should always promote `a`'s elements to `b.dtype`. | ||
_bin_scalar_logic_op_map = { | ||
'equal_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) == b, | ||
'not_equal_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) != b, | ||
'greater_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) > b, | ||
'less_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) < b, | ||
'greater_equal_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) >= b, | ||
'less_equal_scalar': lambda a, b, *idx: a[idx].astype(b.dtype) <= b, | ||
} | ||
|
||
|
||
def _compute_binary_scalar_logic(op, dtype, ndim): | ||
a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=dtype) | ||
b = tvm.var('b', dtype='float64') | ||
c = tvm.compute([tvm.var() for _ in range(ndim)], | ||
lambda *idx: _bin_scalar_logic_op_map[op](a, b, *idx), name='c') | ||
s = tvm.create_schedule(c.op) | ||
return s, a, b, c | ||
|
||
|
||
_bin_scalar_logic_cpu_attrs = { | ||
'compute_func': _compute_binary_scalar_logic, | ||
'target': 'cpu', | ||
'itype': AllTypes + ['bool'], | ||
'ndim': list(range(6)) | ||
} | ||
|
||
_bin_scalar_logic_gpu_attrs = { | ||
'compute_func': _compute_binary_scalar_logic, | ||
'target': 'gpu', | ||
'itype': AllTypes + ['bool'], | ||
'ndim': list(range(6)) | ||
} | ||
|
||
|
||
# register binary element-wise scalar logic ops | ||
for op_name in _bin_scalar_logic_op_map.keys(): | ||
defop(name='{}_cpu'.format(op_name), op=op_name, | ||
**_bin_scalar_logic_cpu_attrs)(_binary_logic_cpu) | ||
defop(name='{}_gpu'.format(op_name), op=op_name, | ||
**_bin_scalar_logic_gpu_attrs)(_binary_logic_gpu) |
Oops, something went wrong.