Skip to content

Commit

Permalink
[Encoding] Retire original_type field. (iree-org#18586)
Browse files Browse the repository at this point in the history
It is no longer needed because the encoding op has the padding semantics
now. We don't need the attribute for the original type.

Signed-off-by: hanhanW <[email protected]>
  • Loading branch information
hanhanW authored Sep 24, 2024
1 parent 863ca01 commit b2dd6db
Show file tree
Hide file tree
Showing 14 changed files with 183 additions and 196 deletions.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ func.func @matmul_lowering_i8i8i32_vmvx_ukernel() attributes {
#map2 = affine_map<(d0, d1, d2) -> (d0, d2)>
#map3 = affine_map<(d0, d1, d2) -> (d2, d1)>
#map4 = affine_map<(d0, d1, d2) -> (d0, d1)>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1x2xf32>, user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
#encoding_rhs = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<2x3xf32>, user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
#encoding_result = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1x3xf32>, user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
#encoding_rhs = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
#encoding_result = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map2, #map3, #map4], round_dims_to = array<i64: 16, 16, 16>>
func.func @fill_matmul(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index, %arg6: index, %arg7: index) attributes {
hal.executable.target = #hal.executable.target<"vmvx", "vmvx-bytecode-fb">
} {
Expand Down
9 changes: 1 addition & 8 deletions compiler/src/iree/compiler/Codegen/Common/EncodingUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,7 @@ static RankedTensorType transposeIfNarrowNResult(RankedTensorType tensorType) {
return tensorType;
}
auto newIndex = encoding.getOperandIndex();
TypeAttr originalTypeAttr = encoding.getOriginalType();
RankedTensorType originalType = tensorType;
if (originalTypeAttr) {
originalType =
llvm::dyn_cast<RankedTensorType>(originalTypeAttr.getValue());
}
SmallVector<int64_t> newOriginalShape(originalType.getShape());
SmallVector<int64_t> newOriginalShape(tensorType.getShape());
auto userIndexingMaps = encoding.getUserIndexingMaps();
SmallVector<AffineMap> maps;
for (auto a : userIndexingMaps) {
Expand Down Expand Up @@ -92,7 +86,6 @@ static RankedTensorType transposeIfNarrowNResult(RankedTensorType tensorType) {
// just use the original map for the new encoding.
auto newEncoding = IREE::Encoding::EncodingAttr::get(
context, newIndex, opTypeAttr, encoding.getElementTypes(),
TypeAttr::get(RankedTensorType::get(newOriginalShape, elemType)),
encoding.getMatmulNarrow_N(), encoding.getMatmulNarrow_M(),
newIndexingMaps, encoding.getBcastMap(),
DenseI64ArrayAttr::get(context, newRoundDimsTo));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
// 1. MFMA_F32_16x16x4_F32
//-----------------------------------------------------------------------------

#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<255x513xf32>,
#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand All @@ -28,7 +28,7 @@ func.func @empty_fill_encoding_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<255x513xf32>,
#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -61,7 +61,7 @@ func.func @set_encoding_LHS_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<?x?xf32>,
#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<constants = 2, bindings = [
Expand Down Expand Up @@ -104,7 +104,7 @@ func.func @set_encoding_LHS_dynamic_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<255x513xf32>,
#encoding = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -137,7 +137,7 @@ func.func @set_encoding_RHS_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<255x513xf32>,
#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -170,7 +170,7 @@ func.func @set_encoding_ACC_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<255x513xf32>,
#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -203,7 +203,7 @@ func.func @unset_encoding_ACC_unroll8x8x4_MFMA_F32_16x16x4_F32() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<?x?xf32>,
#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 16>>
#pipeline_layout = #hal.pipeline.layout<constants = 2, bindings = [
Expand Down Expand Up @@ -308,7 +308,7 @@ func.func @matmul_lowering_unroll8x8x4_MFMA_F32_16x16x4_F32() {
// 2. MFMA_I32_16x16x32_I8
//-----------------------------------------------------------------------------

#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [i8, i8, i32], original_type = tensor<255x513xi8>,
#encoding = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [i8, i8, i32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 32>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -341,7 +341,7 @@ func.func @set_encoding_LHS_unroll8x8x2_MFMA_I32_16x16x32_I8() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [i8, i8, i32], original_type = tensor<255x513xi8>,
#encoding = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [i8, i8, i32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 32>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -374,7 +374,7 @@ func.func @set_encoding_RHS_unroll8x8x2_MFMA_I32_16x16x32_I8() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [i8, i8, i32], original_type = tensor<255x513xi32>,
#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [i8, i8, i32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 32>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down Expand Up @@ -407,7 +407,7 @@ func.func @set_encoding_ACC_unroll8x8x2_MFMA_I32_16x16x32_I8() {

// -----

#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [i8, i8, i32], original_type = tensor<255x513xi32>,
#encoding = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [i8, i8, i32],
user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>],
round_dims_to = array<i64: 16, 16, 32>>
#pipeline_layout = #hal.pipeline.layout<bindings = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func.func @batch_matmul_fill_dynamic(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?
#hal.pipeline.binding<storage_buffer>,
#hal.pipeline.binding<storage_buffer>
]>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1x1xf32>, matmul_narrow_M = 1 : index, matmul_narrow_N = 1 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], matmul_narrow_M = 1 : index, matmul_narrow_N = 1 : index, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>
func.func @drop_encoding_for_hal_flow_ops_static() {
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<1x1xf32>>
Expand All @@ -201,7 +201,7 @@ func.func @drop_encoding_for_hal_flow_ops_static() {
#hal.pipeline.binding<storage_buffer>,
#hal.pipeline.binding<storage_buffer>
]>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [bf16, bf16, bf16], original_type = tensor<?x?xbf16>, user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>
#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [bf16, bf16, bf16], user_indexing_maps = [affine_map<(d0, d1, d2) -> (d0, d2)>, affine_map<(d0, d1, d2) -> (d2, d1)>, affine_map<(d0, d1, d2) -> (d0, d1)>]>
func.func @drop_encoding_for_hal_flow_ops_dynamic() {
%c0 = arith.constant 0 : index
%c32_i64 = arith.constant 32 : i64
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ func.func @elem_pack_ukernels() attributes {hal.executable.target = #executable_
%cst = arith.constant 0.000000e+00 : f32
%c0 = arith.constant 0 : index
%0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) flags(ReadOnly) : !flow.dispatch.tensor<readonly:tensor<1024x2048xf32>>
%1:2 = iree_codegen.query_tile_sizes tensor<1024x2048xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1024x2048xf32>>> -> index, index
%1:2 = iree_codegen.query_tile_sizes tensor<1024x2048xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32]>> -> index, index
%2 = affine.apply #map()[%1#0]
%3 = affine.apply #map1()[%1#1]
%4 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) : !flow.dispatch.tensor<writeonly:tensor<?x?x?x?xf32>>{%2, %3, %1#0, %1#1}
Expand All @@ -206,12 +206,12 @@ func.func @elem_pack_ukernels() attributes {hal.executable.target = #executable_
%15 = arith.addf %in, %in : f32
linalg.yield %15 : f32
} -> tensor<1024x2048xf32>
%8:2 = iree_codegen.query_tile_sizes tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1024x2048xf32>>> -> index, index
%8:2 = iree_codegen.query_tile_sizes tensor<?x?xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32]>> -> index, index
%9 = affine.apply #map()[%8#0]
%10 = affine.apply #map1()[%8#1]
%11 = tensor.empty(%9, %10, %8#0, %8#1) : tensor<?x?x?x?xf32>
%pack = tensor.pack %7 padding_value(%cst : f32) inner_dims_pos = [0, 1] inner_tiles = [%8#0, %8#1] into %11 : tensor<1024x2048xf32> -> tensor<?x?x?x?xf32>
%12:2 = iree_codegen.query_tile_sizes tensor<1024x2048xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], original_type = tensor<1024x2048xf32>>> -> index, index
%12:2 = iree_codegen.query_tile_sizes tensor<1024x2048xf32, #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32]>> -> index, index
%13 = affine.apply #map()[%12#0]
%14 = affine.apply #map1()[%12#1]
flow.dispatch.tensor.store %pack, %4, offsets = [0, 0, 0, 0], sizes = [%13, %14, %12#0, %12#1], strides = [1, 1, 1, 1] : tensor<?x?x?x?xf32> -> !flow.dispatch.tensor<writeonly:tensor<?x?x?x?xf32>>{%13, %14, %12#0, %12#1}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ def EncodingAttr :
AttrParameter<"IntegerAttr", "this tensor operand's index in the parameter list">:$operand_index,
AttrParameter<"EncodingOpTypeAttr", "operand type">:$op_type,
AttrParameter<"ArrayAttr", "element types of the user's operands">:$element_types,
OptionalParameter<"TypeAttr", "type of the original tensor type before padding">:$original_type,
// TODO(#15466): generalize matmul_narrow_{M,N} into a list?
OptionalParameter<"IntegerAttr", "optional M narrow dimension size (only for contraction op user_indexing_maps)">:$matmul_narrow_M,
OptionalParameter<"IntegerAttr", "optional N narrow dimension size (only for contraction op user_indexing_maps)">:$matmul_narrow_N,
Expand All @@ -91,7 +90,7 @@ def EncodingAttr :
let builders = [
AttrBuilder<(ins "int64_t":$operandIndex,
"EncodingOpType":$opType,
"ArrayRef<Type>":$elemTypes, "Type":$origType,
"ArrayRef<Type>":$elemTypes,
CArg<"std::optional<int64_t>", "{}">:$matmulNarrowM,
CArg<"std::optional<int64_t>", "{}">:$matmulNarrowN,
CArg<"ArrayRef<AffineMap>", "{}">:$maps,
Expand Down
14 changes: 6 additions & 8 deletions compiler/src/iree/compiler/Dialect/Encoding/IR/EncodingOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ LogicalResult UnsetEncodingOp::reifyResultShapes(

EncodingAttr EncodingAttr::get(MLIRContext *ctx, int64_t operandIndex,
EncodingOpType opType, ArrayRef<Type> elemTypes,
Type origType,
std::optional<int64_t> matmulNarrowM,
std::optional<int64_t> matmulNarrowN,
ArrayRef<AffineMap> maps,
Expand All @@ -109,17 +108,16 @@ EncodingAttr EncodingAttr::get(MLIRContext *ctx, int64_t operandIndex,
return x ? b.getIndexAttr(*x) : IntegerAttr();
};
auto opTypeAttr = EncodingOpTypeAttr::get(ctx, opType);
auto origTypeAttr = origType ? TypeAttr::get(origType) : TypeAttr();
auto roundDimsToAttr = roundDimsTo.empty()
? DenseI64ArrayAttr()
: b.getDenseI64ArrayAttr(roundDimsTo);
auto bcastMapAttr = bcastMap.has_value()
? AffineMapAttr::get(bcastMap.value())
: AffineMapAttr();
return get(ctx, b.getIndexAttr(operandIndex), opTypeAttr,
b.getTypeArrayAttr(elemTypes), origTypeAttr,
optionalToAttr(matmulNarrowM), optionalToAttr(matmulNarrowN),
b.getAffineMapArrayAttr(maps), bcastMapAttr, roundDimsToAttr);
b.getTypeArrayAttr(elemTypes), optionalToAttr(matmulNarrowM),
optionalToAttr(matmulNarrowN), b.getAffineMapArrayAttr(maps),
bcastMapAttr, roundDimsToAttr);
}

AffineMap EncodingAttr::getMapForOperandIndex() {
Expand Down Expand Up @@ -155,9 +153,9 @@ ArrayRef<int64_t> EncodingAttr::getRoundDimsToArray() {

EncodingAttr EncodingAttr::clone(AffineMap bcastMap) {
return get(bcastMap.getContext(), getOperandIndex(), getOpType(),
getElementTypes(), getOriginalType(), getMatmulNarrow_M(),
getMatmulNarrow_N(), getUserIndexingMaps(),
AffineMapAttr::get(bcastMap), getRoundDimsTo());
getElementTypes(), getMatmulNarrow_M(), getMatmulNarrow_N(),
getUserIndexingMaps(), AffineMapAttr::get(bcastMap),
getRoundDimsTo());
}

//===---------------------------------------------------------------------===//
Expand Down
Loading

0 comments on commit b2dd6db

Please sign in to comment.