Skip to content

Commit

Permalink
bump version
Browse files Browse the repository at this point in the history
  • Loading branch information
makslevental committed Jul 19, 2023
1 parent 175450f commit 3dee9c5
Show file tree
Hide file tree
Showing 10 changed files with 78 additions and 44 deletions.
8 changes: 7 additions & 1 deletion .github/workflows/test_pypi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,26 @@ jobs:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.11'

- name: Build wheels
run: |
pip wheel -w wheelhouse .
- uses: actions/upload-artifact@v3
with:
path: ./wheelhouse/*.whl
path: ./wheelhouse/mlir_python_utils*.whl

build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.11'

- name: Build sdist
run: pipx run build --sdist
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/wheels.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
- name: Upload wheels
uses: actions/upload-artifact@v3
with:
path: wheelhouse/*.whl
path: wheelhouse/mlir_python_utils*.whl
name: build_artifact

upload_wheels:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ or for maximum convenience

```shell
$ pip install mlir-python-utils[mlir] \
-i https://test.pypi.org/simple \
-f https://github.com/makslevental/mlir-wheels/releases/expanded_assets/latest
-f https://github.com/makslevental/mlir-python-utils/releases/expanded_assets/latest
$ configure-mlir-python-utils mlir
```

Expand Down
8 changes: 4 additions & 4 deletions examples/throwaway.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from mlir_utils.dialects import gpu
from mlir_utils.dialects.ext import func
from mlir_utils.dialects.ext.arith import constant
from mlir_utils.types import f64, index
from mlir_utils.types import f64_t, index_t

generate_all_upstream_trampolines()
# from mlir.dialects.scf import WhileOp
Expand Down Expand Up @@ -51,11 +51,11 @@
#
with mlir_mod_ctx() as ctx:

one = constant(1, index)
two = constant(2, index)
one = constant(1, index_t)
two = constant(2, index_t)

@generate(
Tensor[(S, 3, S), f64], dynamic_extents=[one, two], block_args=[index] * 3
Tensor[(S, 3, S), f64_t], dynamic_extents=[one, two], block_args=[index_t] * 3
)
def demo_fun1(i, j, k):
one = constant(1.0)
Expand Down
3 changes: 3 additions & 0 deletions mlir_utils/_configuration/module_alias_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ def module_repr(self, module: ModuleType) -> str:

class AliasedModuleFinder(MetaPathFinder):
def __init__(self, alias_map: Mapping[str, str]):
for k, v in dict(alias_map).items():
if k == v:
alias_map.pop(k)
self.alias_map = alias_map

def find_spec(
Expand Down
16 changes: 13 additions & 3 deletions mlir_utils/dialects/util.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import ctypes
from functools import wraps
import inspect

from mlir.dialects._ods_common import get_op_result_or_value, get_op_results_or_values
from mlir.ir import InsertionPoint, Value
from mlir.ir import InsertionPoint, Value, Type


def get_result_or_results(op):
Expand Down Expand Up @@ -53,12 +54,21 @@ def maybe_cast(val: Value):
def region_op(op_constructor):
# the decorator itself
def op_decorator(*args, **kwargs):
block_arg_types = kwargs.pop("block_args", [])
op = op_constructor(*args, **kwargs)

def builder_wrapper(body_builder):
# add a block with block args having types ...
op.regions[0].blocks.append(*[t for t in block_arg_types])
sig = inspect.signature(body_builder)
types = [p.annotation for p in sig.parameters.values()]
if not (
len(types) == len(sig.parameters)
and all(isinstance(t, Type) for t in types)
):
raise ValueError(
f"for {body_builder=} either missing a type annotation or type annotation isn't a mlir type: {sig}"
)

op.regions[0].blocks.append(*types)
with InsertionPoint(op.regions[0].blocks[0]):
body_builder(
*[maybe_cast(a) for a in op.regions[0].blocks[0].arguments]
Expand Down
57 changes: 36 additions & 21 deletions mlir_utils/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,30 +8,31 @@
IndexType,
F16Type,
F32Type,
Type,
)

index = IndexType.get()
bool_ = IntegerType.get_signless(1)
i8 = IntegerType.get_signless(8)
i16 = IntegerType.get_signless(16)
i32 = IntegerType.get_signless(32)
i64 = IntegerType.get_signless(64)
f16 = F16Type.get()
f32 = F32Type.get()
f64 = F64Type.get()
index_t = IndexType.get()
bool_t = IntegerType.get_signless(1)
i8_t = IntegerType.get_signless(8)
i16_t = IntegerType.get_signless(16)
i32_t = IntegerType.get_signless(32)
i64_t = IntegerType.get_signless(64)
f16_t = F16Type.get()
f32_t = F32Type.get()
f64_t = F64Type.get()

NP_DTYPE_TO_MLIR_TYPE = lambda: {
np.int8: i8,
np.int16: i16,
np.int32: i32,
np.int64: i64,
np.int8: i8_t,
np.int16: i16_t,
np.int32: i32_t,
np.int64: i64_t,
# this is techincally wrong i guess but numpy by default casts python scalars to this
# so to support passing lists of ints we map this to index type
np.longlong: index,
np.uintp: index,
np.float16: f16,
np.float32: f32,
np.float64: f64,
np.longlong: index_t,
np.uintp: index_t,
np.float16: f16_t,
np.float32: f32_t,
np.float64: f64_t,
}

MLIR_TYPE_TO_NP_DTYPE = lambda: {v: k for k, v in NP_DTYPE_TO_MLIR_TYPE().items()}
Expand All @@ -51,15 +52,29 @@ def infer_mlir_type(
MLIR type corresponding to py_val.
"""
if isinstance(py_val, bool):
return bool_
return bool_t
elif isinstance(py_val, int):
return i64
return i64_t
elif isinstance(py_val, float):
return f64
return f64_t
elif isinstance(py_val, np.ndarray):
dtype = NP_DTYPE_TO_MLIR_TYPE()[py_val.dtype.type]
return RankedTensorType.get(py_val.shape, dtype)
else:
raise NotImplementedError(
f"Unsupported Python value {py_val=} with type {type(py_val)}"
)


def tensor_t(*args, element_type: Type = None):
if (element_type is None and not isinstance(args[-1], Type)) or (
isinstance(args[-1], Type) and element_type is not None
):
raise ValueError(
f"either element_type must be provided explicitly XOR last arg to tensor type constructor must be the element type"
)
if element_type is not None:
type = element_type
else:
type = args[-1]
return RankedTensorType.get(args[:-1], type)
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
[project]
name = "mlir-python-utils"
version = "0.0.1"
version = "0.0.2"
description = "The missing pieces (as far as boilerplate reduction goes) of the upstream MLIR python bindings."
requires-python = ">=3.11"
license = { file = "LICENSE" }
readme = "README.md"
dependencies = [
"numpy",
"black",
Expand Down
10 changes: 5 additions & 5 deletions tests/test_operator_overloading.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,24 @@

# noinspection PyUnresolvedReferences
from mlir_utils.testing import mlir_ctx as ctx, filecheck, MLIRContext
from mlir_utils.types import f64, index
from mlir_utils.types import f64_t, index_t

# needed since the fix isn't defined here nor conftest.py
pytest.mark.usefixtures("ctx")


def test_tensor_arithmetic(ctx: MLIRContext):
print()
one = constant(1, index)
one = constant(1, index_t)
assert isinstance(one, Scalar)
two = constant(2, index)
two = constant(2, index_t)
assert isinstance(two, Scalar)
three = one + two
assert isinstance(three, Scalar)

ten1 = empty((10, 10, 10), f64)
ten1 = empty((10, 10, 10), f64_t)
assert isinstance(ten1, Tensor)
ten2 = empty((10, 10, 10), f64)
ten2 = empty((10, 10, 10), f64_t)
assert isinstance(ten2, Tensor)
ten3 = ten1 + ten2
assert isinstance(ten3, Tensor)
Expand Down
12 changes: 5 additions & 7 deletions tests/test_regions.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

# noinspection PyUnresolvedReferences
from mlir_utils.testing import mlir_ctx as ctx, filecheck, MLIRContext
from mlir_utils.types import f64, index
from mlir_utils.types import f64_t, index_t, tensor_t

# needed since the fix isn't defined here nor conftest.py
pytest.mark.usefixtures("ctx")
Expand Down Expand Up @@ -93,13 +93,11 @@ def demo_fun1():


def test_block_args(ctx: MLIRContext):
one = constant(1, index)
two = constant(2, index)
one = constant(1, index_t)
two = constant(2, index_t)

@generate(
Tensor[(S, 3, S), f64], dynamic_extents=[one, two], block_args=[index] * 3
)
def demo_fun1(i, j, k):
@generate(tensor_t(S, 3, S, f64_t), dynamic_extents=[one, two])
def demo_fun1(i: index_t, j: index_t, k: index_t):
one = constant(1.0)
tensor_yield(one)

Expand Down

0 comments on commit 3dee9c5

Please sign in to comment.