Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrate llvm-project @f1595ecfdce5387e41826fd72ff930a1a39ae398 #18897

Merged
merged 8 commits into from
Oct 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -424,11 +424,11 @@ struct GenericTypeConvert final : ConversionPattern {
}
};

std::optional<Value> scalarToTensor(OpBuilder &builder, Type /*type*/,
ValueRange inputs, Location loc) {
Value scalarToTensor(OpBuilder &builder, Type /*type*/, ValueRange inputs,
Location loc) {
assert(inputs.size() == 1);
if (isa<ShapedType>(inputs.front().getType())) {
return std::nullopt;
return Value();
}
return builder
.create<tensor::FromElementsOp>(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,13 @@ Type convertShapedToSignless(ShapedType shapedType) {
return shapedType;
}

std::optional<Value> materializeCast(OpBuilder &builder, Type toType,
ValueRange inputs, Location loc) {
Value materializeCast(OpBuilder &builder, Type toType, ValueRange inputs,
Location loc) {
assert(inputs.size() == 1 && "too many inputs to type conversion");
Value fromValue = inputs[0];
auto fromType = dyn_cast<RankedTensorType>(fromValue.getType());
if (!fromType)
return std::nullopt;
return Value();

if (auto intFromType = dyn_cast<IntegerType>(fromType.getElementType())) {
Type castType = getElementTypeOrSelf(toType);
Expand Down
19 changes: 9 additions & 10 deletions compiler/plugins/input/StableHLO/Conversion/TypeConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,36 +31,35 @@ Type convertShapedType(ShapedType shapedType) {
return shapedType;
}

std::optional<Value> materializeCastFromIllegal(OpBuilder &builder, Type type,
ValueRange inputs,
Location loc) {
Value materializeCastFromIllegal(OpBuilder &builder, Type type,
ValueRange inputs, Location loc) {
Type fromType = getElementTypeOrSelf(inputs[0].getType());
Type toType = getElementTypeOrSelf(type);
if ((!fromType.isSignedInteger() && !fromType.isUnsignedInteger()) ||
!toType.isSignlessInteger())
return std::nullopt;
return Value();
// Use unrealized conversion casts to do signful->signless conversions.
return builder.create<UnrealizedConversionCastOp>(loc, type, inputs[0])
->getResult(0);
}

std::optional<Value> materializeCastToIllegal(OpBuilder &builder, Type type,
ValueRange inputs, Location loc) {
Value materializeCastToIllegal(OpBuilder &builder, Type type, ValueRange inputs,
Location loc) {
Type fromType = getElementTypeOrSelf(inputs[0].getType());
Type toType = getElementTypeOrSelf(type);
if (!fromType.isSignlessInteger() ||
(!toType.isSignedInteger() && !toType.isUnsignedInteger()))
return std::nullopt;
return Value();
// Use unrealized conversion casts to do signless->signful conversions.
return builder.create<UnrealizedConversionCastOp>(loc, type, inputs[0])
->getResult(0);
}

std::optional<Value> scalarToTensor(OpBuilder &builder, Type type,
ValueRange inputs, Location loc) {
Value scalarToTensor(OpBuilder &builder, Type type, ValueRange inputs,
Location loc) {
assert(inputs.size() == 1);
if (llvm::isa<ShapedType>(inputs.front().getType())) {
return std::nullopt;
return Value();
}
auto tensor =
builder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,9 +229,8 @@ struct ConvertMemRefStore final : OpConversionPattern<memref::StoreOp> {
// Helper functions
//===----------------------------------------------------------------------===//

std::optional<Value> materializeArithBitcast(OpBuilder &builder, Type resultTy,
mlir::ValueRange inputs,
mlir::Location loc) {
Value materializeArithBitcast(OpBuilder &builder, Type resultTy,
mlir::ValueRange inputs, mlir::Location loc) {
return builder.create<arith::BitcastOp>(loc, resultTy, inputs);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2088,9 +2088,7 @@ hal.executable private @dynamic_unpack {

// -----

#pipeline_layout = #hal.pipeline.layout<constants = 4, bindings = [
#hal.pipeline.binding<storage_buffer>,
#hal.pipeline.binding<storage_buffer>,
#pipeline_layout = #hal.pipeline.layout<constants = 6, bindings = [
#hal.pipeline.binding<storage_buffer>,
#hal.pipeline.binding<storage_buffer>
]>
Expand All @@ -2111,10 +2109,14 @@ hal.executable private @dynamic_unpack_dynamic_tile {
%cl_1 = hal.interface.constant.load layout(#pipeline_layout) ordinal(1) : i32
%cl_2 = hal.interface.constant.load layout(#pipeline_layout) ordinal(2) : i32
%cl_3 = hal.interface.constant.load layout(#pipeline_layout) ordinal(3) : i32
%cl_4 = hal.interface.constant.load layout(#pipeline_layout) ordinal(4) : i32
%cl_5 = hal.interface.constant.load layout(#pipeline_layout) ordinal(5) : i32
%0 = arith.index_castui %cl_0 : i32 to index
%1 = arith.index_castui %cl_1 : i32 to index
%2 = arith.index_castui %cl_2 : i32 to index
%3 = arith.index_castui %cl_3 : i32 to index
%tile0 = arith.index_castui %cl_3 : i32 to index
%tile1 = arith.index_castui %cl_3 : i32 to index
%4 = flow.dispatch.workload.ordinal %0, 0 : index
%5 = flow.dispatch.workload.ordinal %1, 1 : index
%6 = flow.dispatch.workload.ordinal %2, 2 : index
Expand All @@ -2123,7 +2125,7 @@ hal.executable private @dynamic_unpack_dynamic_tile {
%9 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c131072) : !flow.dispatch.tensor<writeonly:tensor<?x?xi32>>{%6, %7}
%10 = flow.dispatch.tensor.load %8, offsets = [0, 0, 0, 0], sizes = [%4, %5, %c32, %c16], strides = [1, 1, 1, 1] : !flow.dispatch.tensor<readonly:tensor<?x?x?x?xi32>>{%4, %5, %c32, %c16} -> tensor<?x?x?x?xi32>
%11 = tensor.empty(%6, %7) : tensor<?x?xi32>
%12 = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [%c32, %c16] into %11
%12 = tensor.unpack %10 inner_dims_pos = [0, 1] inner_tiles = [%tile0, %tile1] into %11
{lowering_config = #iree_codegen.lowering_config<tile_sizes = [[64, 64]]>}
: tensor<?x?x?x?xi32> -> tensor<?x?xi32>
flow.dispatch.tensor.store %12, %9, offsets = [0, 0], sizes = [%6, %7], strides = [1, 1] : tensor<?x?xi32> -> !flow.dispatch.tensor<writeonly:tensor<?x?xi32>>{%6, %7}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,25 +261,24 @@ struct ConvertToStreamPass final
resultTypes.push_back(indexType);
return success();
});
typeConverter.addArgumentMaterialization(
[](OpBuilder &builder, TensorType resultType, ValueRange inputs,
Location loc) -> std::optional<Value> {
assert(inputs.size() >= 2);
auto resourceValue = inputs[0];
auto resourceSize = inputs[1];
assert(inputs.size() == 2 &&
"expecting 2 operands (resource + size)");
Value cast = builder
.create<IREE::Stream::AsyncTransferOp>(
loc, resourceValue.getType(), resourceValue,
resourceSize, resourceSize,
/*source_affinity=*/nullptr,
/*result_affinity=*/nullptr)
.getResult();
return builder
.create<UnrealizedConversionCastOp>(loc, resultType, cast)
.getResult(0);
});
typeConverter.addArgumentMaterialization([](OpBuilder &builder,
TensorType resultType,
ValueRange inputs,
Location loc) -> Value {
assert(inputs.size() >= 2);
auto resourceValue = inputs[0];
auto resourceSize = inputs[1];
assert(inputs.size() == 2 && "expecting 2 operands (resource + size)");
Value cast = builder
.create<IREE::Stream::AsyncTransferOp>(
loc, resourceValue.getType(), resourceValue,
resourceSize, resourceSize,
/*source_affinity=*/nullptr,
/*result_affinity=*/nullptr)
.getResult();
return builder.create<UnrealizedConversionCastOp>(loc, resultType, cast)
.getResult(0);
});

populateUtilConversionPatterns(context, conversionTarget, typeConverter,
patterns);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,8 @@ namespace mlir::iree_compiler::IREE::Util {

namespace {

static std::optional<Value> buildUnrealizedConversionCastOp(OpBuilder &builder,
Type toType,
ValueRange inputs,
Location loc) {
static Value buildUnrealizedConversionCastOp(OpBuilder &builder, Type toType,
ValueRange inputs, Location loc) {
return builder.create<UnrealizedConversionCastOp>(loc, toType, inputs)
.getResult(0);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@
"onnx/node/generated/test_lppool_2d_same_lower",
"onnx/node/generated/test_lppool_2d_same_upper",
"onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one",
"onnx/node/generated/test_maxpool_2d_precomputed_same_upper",
"onnx/node/generated/test_maxpool_2d_same_lower",
"onnx/node/generated/test_maxpool_2d_same_upper",
"onnx/node/generated/test_maxpool_with_argmax_2d_precomputed_strides",
Expand Down Expand Up @@ -380,6 +379,7 @@
"onnx/node/generated/test_sce_none_weights_expanded",
"onnx/node/generated/test_sce_none_weights_log_prob",
"onnx/node/generated/test_sce_none_weights_log_prob_expanded",
"onnx/node/generated/test_shape_clip_start",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This shape test fails on top of main too, so I don't think the failure is related to the integrate. There is an issue filed in torch-mlir for this test here: llvm/torch-mlir#3841.

"onnx/node/generated/test_shape_end_negative_1",
"onnx/node/generated/test_slice",
"onnx/node/generated/test_slice_default_steps",
Expand Down Expand Up @@ -438,9 +438,8 @@
"onnx/node/generated/test_reduce_min_empty_set",
"onnx/node/generated/test_reduce_sum_empty_set_non_reduced_axis_zero",
"onnx/node/generated/test_resize_downsample_scales_linear_align_corners",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_scan9_sum",
"onnx/node/generated/test_shape_clip_start",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_shape_end_1",
"onnx/node/generated/test_shape_start_1",
"onnx/node/generated/test_shape_start_1_end_2",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@
"onnx/node/generated/test_lppool_2d_same_lower",
"onnx/node/generated/test_lppool_2d_same_upper",
"onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one",
"onnx/node/generated/test_maxpool_2d_precomputed_same_upper",
"onnx/node/generated/test_maxpool_2d_same_lower",
"onnx/node/generated/test_maxpool_2d_same_upper",
"onnx/node/generated/test_maxpool_with_argmax_2d_precomputed_strides",
Expand Down Expand Up @@ -381,6 +380,7 @@
"onnx/node/generated/test_sce_none_weights_expanded",
"onnx/node/generated/test_sce_none_weights_log_prob",
"onnx/node/generated/test_sce_none_weights_log_prob_expanded",
"onnx/node/generated/test_shape_clip_start",
"onnx/node/generated/test_shape_end_negative_1",
"onnx/node/generated/test_slice",
"onnx/node/generated/test_slice_default_steps",
Expand Down Expand Up @@ -488,11 +488,10 @@
"onnx/node/generated/test_reduce_sum_square_default_axes_keepdims_random",
"onnx/node/generated/test_reduce_sum_square_default_axes_keepdims_random_expanded",
"onnx/node/generated/test_resize_downsample_scales_linear_align_corners",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_scan9_sum",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_shape",
"onnx/node/generated/test_shape_clip_end",
"onnx/node/generated/test_shape_clip_start",
"onnx/node/generated/test_shape_end_1",
"onnx/node/generated/test_shape_example",
"onnx/node/generated/test_shape_start_1",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,9 +195,6 @@
"onnx/node/generated/test_max_uint16",
"onnx/node/generated/test_max_uint8",
"onnx/node/generated/test_maxpool_2d_ceil_output_size_reduce_by_one",
"onnx/node/generated/test_maxpool_2d_precomputed_same_upper",
"onnx/node/generated/test_maxpool_2d_same_lower",
"onnx/node/generated/test_maxpool_2d_same_upper",
"onnx/node/generated/test_maxpool_with_argmax_2d_precomputed_strides",
"onnx/node/generated/test_maxunpool_export_with_output_shape",
"onnx/node/generated/test_maxunpool_export_without_output_shape",
Expand Down Expand Up @@ -447,6 +444,7 @@
"onnx/node/generated/test_sce_none_weights_expanded",
"onnx/node/generated/test_sce_none_weights_log_prob",
"onnx/node/generated/test_sce_none_weights_log_prob_expanded",
"onnx/node/generated/test_shape_clip_start",
"onnx/node/generated/test_shape_end_negative_1",
"onnx/node/generated/test_slice",
"onnx/node/generated/test_slice_default_steps",
Expand Down Expand Up @@ -533,8 +531,8 @@
"onnx/node/generated/test_dynamicquantizelinear_expanded",
"onnx/node/generated/test_einsum_batch_diagonal",
"onnx/node/generated/test_einsum_batch_matmul",
"onnx/node/generated/test_einsum_transpose",
"onnx/node/generated/test_einsum_sum",
"onnx/node/generated/test_einsum_transpose",
"onnx/node/generated/test_eyelike_with_dtype",
"onnx/node/generated/test_isinf_float16",
"onnx/node/generated/test_isnan_float16",
Expand Down Expand Up @@ -595,9 +593,8 @@
"onnx/node/generated/test_reduce_sum_square_default_axes_keepdims_example_expanded",
"onnx/node/generated/test_reduce_sum_square_default_axes_keepdims_random",
"onnx/node/generated/test_reduce_sum_square_default_axes_keepdims_random_expanded",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_scan9_sum",
"onnx/node/generated/test_shape_clip_start",
"onnx/node/generated/test_scan_sum",
"onnx/node/generated/test_shape_end_1",
"onnx/node/generated/test_shape_start_1",
"onnx/node/generated/test_shape_start_1_end_2",
Expand Down
2 changes: 1 addition & 1 deletion third_party/llvm-project
Submodule llvm-project updated 3007 files
2 changes: 1 addition & 1 deletion third_party/stablehlo
Submodule stablehlo updated 44 files
+17 −0 BUILD.bazel
+2 −2 WORKSPACE.bazel
+1 −1 build_tools/llvm_version.txt
+1 −0 docs/generated/stablehlo_linalg_passes.md
+7 −0 docs/generated/stablehlo_passes.md
+1 −0 docs/generated/stablehlo_tosa_passes.md
+6 −2 docs/spec.md
+199 −0 rfcs/20241001-microscaling-formats.md
+48 −5 stablehlo/conversions/linalg/tests/pointwise.mlir
+47 −45 stablehlo/conversions/linalg/transforms/StablehloToLinalgPointwise.cpp
+9 −10 stablehlo/conversions/linalg/transforms/TypeConversion.cpp
+3 −2 stablehlo/dialect/Base.td
+2 −2 stablehlo/dialect/Version.cpp
+1 −1 stablehlo/dialect/Version.h
+49 −1 stablehlo/dialect/VhloBytecode.cpp
+1 −0 stablehlo/dialect/VhloDialect.td
+24 −0 stablehlo/dialect/VhloTypes.cpp
+12 −0 stablehlo/dialect/VhloTypes.td
+41 −45 stablehlo/reference/Tensor.cpp
+6 −4 stablehlo/reference/Types.cpp
+1 −1 stablehlo/testdata/igamma_float64_20_20_float64_20_20_chlo.mlir
+1 −1 stablehlo/testdata/igammac_float64_20_20_float64_20_20_chlo.mlir
+16 −0 stablehlo/tests/interpret/api_input_arguments.mlir
+32 −0 stablehlo/tests/interpret/constant.mlir
+40 −8 stablehlo/tests/ops_stablehlo.mlir
+53 −53 stablehlo/tests/ops_stablehlo_quantized.mlir
+4 −0 stablehlo/tests/ops_stablehlo_roundtrip.mlir
+156 −0 stablehlo/tests/transforms/stablehlo_aggressive_folder.mlir
+550 −526 stablehlo/tests/transforms/stablehlo_aggressive_simplification.mlir
+2,936 −0 stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.1_8_0.mlir
+ stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.1_8_0.mlir.bc
+32 −0 stablehlo/tests/vhlo/stablehlo_legalize_to_vhlo.mlir
+35 −0 stablehlo/tests/vhlo/vhlo_to_version_downgrade_invalid.1_7_0.mlir
+15 −0 stablehlo/tests/vhlo/vhlo_to_version_downgrade_patch.mlir
+41 −2 stablehlo/tools/StablehloTranslateMain.cpp
+7 −2 stablehlo/transforms/CMakeLists.txt
+31 −2 stablehlo/transforms/PassUtils.cpp
+27 −12 stablehlo/transforms/PassUtils.h
+5 −0 stablehlo/transforms/Passes.h
+2 −0 stablehlo/transforms/Passes.td
+190 −7 stablehlo/transforms/StablehloAggressiveFolder.cpp
+98 −492 stablehlo/transforms/StablehloAggressiveSimplification.cpp
+281 −0 stablehlo/transforms/StablehloAggressiveSimplificationPatterns.td
+7 −0 stablehlo/transforms/VhloToVersion.cpp
2 changes: 1 addition & 1 deletion third_party/torch-mlir
Submodule torch-mlir updated 27 files
+1 −1 externals/llvm-project
+1 −1 externals/stablehlo
+141 −0 include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
+29 −3 lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp
+31 −28 lib/Conversion/TorchToLinalg/Linear.cpp
+48 −35 lib/Conversion/TorchToTosa/TorchToTosa.cpp
+152 −14 lib/Dialect/Torch/IR/TorchOps.cpp
+79 −0 lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp
+132 −0 lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp
+2 −0 lib/Dialect/Torch/Transforms/LowerToBackendContract.cpp
+43 −43 lib/Dialect/TorchConversion/Transforms/BackendTypeConversion.cpp
+39 −42 projects/pt1/e2e_testing/xfail_sets.py
+49 −1 projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/abstract_interp_lib_gen.py
+15 −4 projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py
+161 −0 projects/pt1/python/torch_mlir_e2e_test/test_suite/backprop.py
+55 −39 projects/pt1/python/torch_mlir_e2e_test/test_suite/conv.py
+82 −0 projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py
+75 −55 projects/pt1/python/torch_mlir_e2e_test/test_suite/matmul.py
+1 −1 python/torch_mlir/fx.py
+1 −1 pytorch-hash.txt
+1 −1 pytorch-requirements.txt
+2 −2 test/CAPI/torch.c
+80 −0 test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir
+2 −6 test/Conversion/TorchToLinalg/convolution.mlir
+32 −0 test/Conversion/TorchToTosa/basic.mlir
+94 −0 test/Dialect/Torch/canonicalize.mlir
+1 −1 torchvision-requirements.txt
Loading