diff --git a/.github/workflows/test_pypi.yml b/.github/workflows/test_pypi.yml index 9271738..d8ffc8c 100644 --- a/.github/workflows/test_pypi.yml +++ b/.github/workflows/test_pypi.yml @@ -12,6 +12,9 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' - name: Build wheels run: | @@ -19,13 +22,16 @@ jobs: - uses: actions/upload-artifact@v3 with: - path: ./wheelhouse/*.whl + path: ./wheelhouse/mlir_python_utils*.whl build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.11' - name: Build sdist run: pipx run build --sdist diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index edc2146..dcb26d0 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -29,7 +29,7 @@ jobs: - name: Upload wheels uses: actions/upload-artifact@v3 with: - path: wheelhouse/*.whl + path: wheelhouse/mlir_python_utils*.whl name: build_artifact upload_wheels: diff --git a/README.md b/README.md index b279c07..ac086e6 100644 --- a/README.md +++ b/README.md @@ -15,8 +15,8 @@ or for maximum convenience ```shell $ pip install mlir-python-utils[mlir] \ + -i https://test.pypi.org/simple \ -f https://github.com/makslevental/mlir-wheels/releases/expanded_assets/latest - -f https://github.com/makslevental/mlir-python-utils/releases/expanded_assets/latest $ configure-mlir-python-utils mlir ``` diff --git a/examples/throwaway.py b/examples/throwaway.py index 4057cda..bff997e 100644 --- a/examples/throwaway.py +++ b/examples/throwaway.py @@ -18,7 +18,7 @@ from mlir_utils.dialects import gpu from mlir_utils.dialects.ext import func from mlir_utils.dialects.ext.arith import constant -from mlir_utils.types import f64, index +from mlir_utils.types import f64_t, index_t generate_all_upstream_trampolines() # from mlir.dialects.scf import WhileOp @@ -51,11 +51,11 @@ # with mlir_mod_ctx() as ctx: - one = constant(1, index) - two = constant(2, index) + one = constant(1, index_t) + two = constant(2, index_t) @generate( - Tensor[(S, 3, S), f64], dynamic_extents=[one, two], block_args=[index] * 3 + Tensor[(S, 3, S), f64_t], dynamic_extents=[one, two], block_args=[index_t] * 3 ) def demo_fun1(i, j, k): one = constant(1.0) diff --git a/mlir_utils/_configuration/module_alias_map.py b/mlir_utils/_configuration/module_alias_map.py index 4afc143..a6260b6 100644 --- a/mlir_utils/_configuration/module_alias_map.py +++ b/mlir_utils/_configuration/module_alias_map.py @@ -56,6 +56,9 @@ def module_repr(self, module: ModuleType) -> str: class AliasedModuleFinder(MetaPathFinder): def __init__(self, alias_map: Mapping[str, str]): + for k, v in dict(alias_map).items(): + if k == v: + alias_map.pop(k) self.alias_map = alias_map def find_spec( diff --git a/mlir_utils/dialects/util.py b/mlir_utils/dialects/util.py index fddae6f..13e8ec2 100644 --- a/mlir_utils/dialects/util.py +++ b/mlir_utils/dialects/util.py @@ -1,8 +1,9 @@ import ctypes from functools import wraps +import inspect from mlir.dialects._ods_common import get_op_result_or_value, get_op_results_or_values -from mlir.ir import InsertionPoint, Value +from mlir.ir import InsertionPoint, Value, Type def get_result_or_results(op): @@ -53,12 +54,21 @@ def maybe_cast(val: Value): def region_op(op_constructor): # the decorator itself def op_decorator(*args, **kwargs): - block_arg_types = kwargs.pop("block_args", []) op = op_constructor(*args, **kwargs) def builder_wrapper(body_builder): # add a block with block args having types ... - op.regions[0].blocks.append(*[t for t in block_arg_types]) + sig = inspect.signature(body_builder) + types = [p.annotation for p in sig.parameters.values()] + if not ( + len(types) == len(sig.parameters) + and all(isinstance(t, Type) for t in types) + ): + raise ValueError( + f"for {body_builder=} either missing a type annotation or type annotation isn't a mlir type: {sig}" + ) + + op.regions[0].blocks.append(*types) with InsertionPoint(op.regions[0].blocks[0]): body_builder( *[maybe_cast(a) for a in op.regions[0].blocks[0].arguments] diff --git a/mlir_utils/types.py b/mlir_utils/types.py index 5b67c90..103b575 100644 --- a/mlir_utils/types.py +++ b/mlir_utils/types.py @@ -8,30 +8,31 @@ IndexType, F16Type, F32Type, + Type, ) -index = IndexType.get() -bool_ = IntegerType.get_signless(1) -i8 = IntegerType.get_signless(8) -i16 = IntegerType.get_signless(16) -i32 = IntegerType.get_signless(32) -i64 = IntegerType.get_signless(64) -f16 = F16Type.get() -f32 = F32Type.get() -f64 = F64Type.get() +index_t = IndexType.get() +bool_t = IntegerType.get_signless(1) +i8_t = IntegerType.get_signless(8) +i16_t = IntegerType.get_signless(16) +i32_t = IntegerType.get_signless(32) +i64_t = IntegerType.get_signless(64) +f16_t = F16Type.get() +f32_t = F32Type.get() +f64_t = F64Type.get() NP_DTYPE_TO_MLIR_TYPE = lambda: { - np.int8: i8, - np.int16: i16, - np.int32: i32, - np.int64: i64, + np.int8: i8_t, + np.int16: i16_t, + np.int32: i32_t, + np.int64: i64_t, # this is techincally wrong i guess but numpy by default casts python scalars to this # so to support passing lists of ints we map this to index type - np.longlong: index, - np.uintp: index, - np.float16: f16, - np.float32: f32, - np.float64: f64, + np.longlong: index_t, + np.uintp: index_t, + np.float16: f16_t, + np.float32: f32_t, + np.float64: f64_t, } MLIR_TYPE_TO_NP_DTYPE = lambda: {v: k for k, v in NP_DTYPE_TO_MLIR_TYPE().items()} @@ -51,11 +52,11 @@ def infer_mlir_type( MLIR type corresponding to py_val. """ if isinstance(py_val, bool): - return bool_ + return bool_t elif isinstance(py_val, int): - return i64 + return i64_t elif isinstance(py_val, float): - return f64 + return f64_t elif isinstance(py_val, np.ndarray): dtype = NP_DTYPE_TO_MLIR_TYPE()[py_val.dtype.type] return RankedTensorType.get(py_val.shape, dtype) @@ -63,3 +64,17 @@ def infer_mlir_type( raise NotImplementedError( f"Unsupported Python value {py_val=} with type {type(py_val)}" ) + + +def tensor_t(*args, element_type: Type = None): + if (element_type is None and not isinstance(args[-1], Type)) or ( + isinstance(args[-1], Type) and element_type is not None + ): + raise ValueError( + f"either element_type must be provided explicitly XOR last arg to tensor type constructor must be the element type" + ) + if element_type is not None: + type = element_type + else: + type = args[-1] + return RankedTensorType.get(args[:-1], type) diff --git a/pyproject.toml b/pyproject.toml index f75868a..752abdf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,10 @@ [project] name = "mlir-python-utils" -version = "0.0.1" +version = "0.0.2" +description = "The missing pieces (as far as boilerplate reduction goes) of the upstream MLIR python bindings." requires-python = ">=3.11" license = { file = "LICENSE" } +readme = "README.md" dependencies = [ "numpy", "black", diff --git a/tests/test_operator_overloading.py b/tests/test_operator_overloading.py index f496da1..eba7945 100644 --- a/tests/test_operator_overloading.py +++ b/tests/test_operator_overloading.py @@ -7,7 +7,7 @@ # noinspection PyUnresolvedReferences from mlir_utils.testing import mlir_ctx as ctx, filecheck, MLIRContext -from mlir_utils.types import f64, index +from mlir_utils.types import f64_t, index_t # needed since the fix isn't defined here nor conftest.py pytest.mark.usefixtures("ctx") @@ -15,16 +15,16 @@ def test_tensor_arithmetic(ctx: MLIRContext): print() - one = constant(1, index) + one = constant(1, index_t) assert isinstance(one, Scalar) - two = constant(2, index) + two = constant(2, index_t) assert isinstance(two, Scalar) three = one + two assert isinstance(three, Scalar) - ten1 = empty((10, 10, 10), f64) + ten1 = empty((10, 10, 10), f64_t) assert isinstance(ten1, Tensor) - ten2 = empty((10, 10, 10), f64) + ten2 = empty((10, 10, 10), f64_t) assert isinstance(ten2, Tensor) ten3 = ten1 + ten2 assert isinstance(ten3, Tensor) diff --git a/tests/test_regions.py b/tests/test_regions.py index 24ec409..2f76376 100644 --- a/tests/test_regions.py +++ b/tests/test_regions.py @@ -11,7 +11,7 @@ # noinspection PyUnresolvedReferences from mlir_utils.testing import mlir_ctx as ctx, filecheck, MLIRContext -from mlir_utils.types import f64, index +from mlir_utils.types import f64_t, index_t, tensor_t # needed since the fix isn't defined here nor conftest.py pytest.mark.usefixtures("ctx") @@ -93,13 +93,11 @@ def demo_fun1(): def test_block_args(ctx: MLIRContext): - one = constant(1, index) - two = constant(2, index) + one = constant(1, index_t) + two = constant(2, index_t) - @generate( - Tensor[(S, 3, S), f64], dynamic_extents=[one, two], block_args=[index] * 3 - ) - def demo_fun1(i, j, k): + @generate(tensor_t(S, 3, S, f64_t), dynamic_extents=[one, two]) + def demo_fun1(i: index_t, j: index_t, k: index_t): one = constant(1.0) tensor_yield(one)