Skip to content

Commit

Permalink
Merge branch 'main' into hamptonm/feature/string
Browse files Browse the repository at this point in the history
  • Loading branch information
hamptonm1 authored Sep 7, 2023
2 parents e05641e + 9876985 commit d1979a6
Show file tree
Hide file tree
Showing 28 changed files with 75 additions and 3,496 deletions.
2 changes: 1 addition & 1 deletion src/Builder/OpBuildTable.inc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ op_dialect_version_map_["OptionalGetElement"] = {18};
op_dialect_version_map_["OptionalHasElement"] = {18};
op_dialect_version_map_["Or"] = {7};
op_dialect_version_map_["PRelu"] = {16};
op_dialect_version_map_["Pad"] = {18, 13, 11, 2};
op_dialect_version_map_["Pad"] = {19, 13, 11, 2};
op_dialect_version_map_["Pow"] = {15};
op_dialect_version_map_["QLinearConv"] = {10};
op_dialect_version_map_["QLinearMatMul"] = {10};
Expand Down
9 changes: 0 additions & 9 deletions src/Compiler/CompilerOptions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ std::string instrumentOps; // onnx-mlir only
unsigned instrumentControlBits; // onnx-mlir only
bool instrumentONNXSignature; // onnx-mlir only
std::string ONNXOpStats; // onnx-mlir only
bool enableMemoryBundling; // onnx-mlir only
int onnxOpTransformThreshold; // onnx-mlir only
bool onnxOpTransformReport; // onnx-mlir only
bool enableParallel; // onnx-mlir only
Expand Down Expand Up @@ -376,14 +375,6 @@ static llvm::cl::opt<std::string, true> ONNXOpStatsOpt("onnx-op-stats",
llvm::cl::location(ONNXOpStats), llvm::cl::init(""),
llvm::cl::cat(OnnxMlirOptions));

static llvm::cl::opt<bool, true> enableMemoryBundlingOpt(
"enable-memory-bundling",
llvm::cl::desc(
"Enable memory bundling related optimizations (default=false)\n"
"Set to 'false' if you experience significant compile time."),
llvm::cl::location(enableMemoryBundling), llvm::cl::init(false),
llvm::cl::cat(OnnxMlirOptions));

static llvm::cl::opt<int, true> onnxOpTransformThresholdOpt(
"onnx-op-transform-threshold",
llvm::cl::desc(
Expand Down
1 change: 0 additions & 1 deletion src/Compiler/CompilerOptions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ extern std::string instrumentOps; // onnx-mlir only
extern unsigned instrumentControlBits; // onnx-mlir only
extern bool instrumentONNXSignature; // onnx-mlir only
extern std::string ONNXOpStats; // onnx-mlir only
extern bool enableMemoryBundling; // onnx-mlir only
extern int onnxOpTransformThreshold; // onnx-mlir only
extern bool onnxOpTransformReport; // onnx-mlir only
extern bool enableParallel; // onnx-mlir only
Expand Down
6 changes: 0 additions & 6 deletions src/Compiler/CompilerPasses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,12 +215,6 @@ void addKrnlToLLVMPasses(
// https://mlir.llvm.org/docs/BufferDeallocationInternals.
pm.addNestedPass<func::FuncOp>(
mlir::bufferization::createBufferDeallocationPass());
if (enableMemoryBundling) {
pm.addNestedPass<func::FuncOp>(krnl::createKrnlEnableMemoryPoolPass());
pm.addNestedPass<func::FuncOp>(krnl::createKrnlBundleMemoryPoolsPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addNestedPass<func::FuncOp>(krnl::createKrnlOptimizeMemoryPoolsPass());
}

// The pass below is needed for subview and collapseShape.. Unfortunately,
// MLIR supports only collapse for scalar loaded by scalar memory at this
Expand Down
4 changes: 2 additions & 2 deletions src/Conversion/KrnlToLLVM/KrnlVectorTypeCast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
* SPDX-License-Identifier: Apache-2.0
*/

//===------ KrnlGetRefOp.cpp - Lower KrnlGetRefOp -------------------------===//
//===------ KrnlVectorTypeCastOp.cpp - Lower KrnlVectorTypeCastOp ---------===//
//
// Copyright 2019-2023 The IBM Research Authors.
//
// =============================================================================
//
// This file lowers the KrnlGetRefOp operator.
// This file lowers the KrnlVectorTypeCastOp operator.
//
//===----------------------------------------------------------------------===//

Expand Down
1 change: 1 addition & 0 deletions src/Conversion/ONNXToKrnl/ML/CategoryMapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ struct ONNXCategoryMapperOpLowering
(shape[i] == ShapedType::kDynamic) ? 1 : shape[i]);
auto memRefType = MemRefType::get(
newShape, krnl::StringType::get(elementType.getContext()));
// Sole use of krnl.getRef.
Value stringMemRef = createKrnl.getRef(memRefType, memref, zero);
inputElem = createKrnl.load(stringMemRef, loopInd);
})
Expand Down
2 changes: 0 additions & 2 deletions src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -458,8 +458,6 @@ void populateLoweringONNXShapeTransformOpPattern(
void populateLoweringONNXCustomOpPattern(
mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *);

bool checkOpResultIsUsedByGetRef(mlir::memref::AllocOp *allocOp);

/// This function returns the index in the list of alloc arguments of the
/// dynamic dimension corresponding to `index` in the MemRef shape.
/// As an example:
Expand Down
25 changes: 25 additions & 0 deletions src/Dialect/ONNX/ONNXOps.td.inc
Original file line number Diff line number Diff line change
Expand Up @@ -5168,6 +5168,8 @@ def ONNXPadOp:ONNX_Op<"Pad",

3) `edge` - pads with the edge values of array

4) `wrap` - wrap-around padding as if the data tensor forms a torus


Example 1 (`constant` mode):

Expand Down Expand Up @@ -5232,6 +5234,29 @@ def ONNXPadOp:ONNX_Op<"Pad",
[4.5, 4.5, 4.5, 5.7],
]
```

Example 4 (`wrap` mode):

```
data = [
[1.0, 1.2],
[2.3, 3.4],
[4.5, 5.7],
]

pads = [2, 1, 1, 1]

mode = 'wrap'

output = [
[3.4, 2.3, 3.4, 2.3],
[5.7, 4.5, 5.7, 4.5],
[1.2, 1.0, 1.2, 1.0],
[3.4, 2.3, 3.4, 2.3],
[5.7, 4.5, 5.7, 4.5],
[1.2, 1.0, 1.2, 1.0],
]
```
}];
let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex<F32>]>, TensorOf<[Complex<F64>]>]>:$data,
TensorOf<[I64]>:$pads,
Expand Down
12 changes: 11 additions & 1 deletion src/Dialect/ONNX/Rewrite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,15 @@ bool AreTheSameAxesConstant(int64_t rank, Value lhs, Value rhs) {
createArrayAttrFromConstantOp(rhsConstOp));
}

/// Test if two values have the same static shape or not.
bool haveSameStaticShape(Value lhs, Value rhs) {
if (!hasShapeAndRank(lhs) || !hasShapeAndRank(rhs))
return false;
Type lhsT = lhs.getType();
Type rhsT = rhs.getType();
return hasStaticShape(lhsT) && (getShape(lhsT) == getShape(rhsT));
}

} // namespace onnx_mlir

// =============================================================================
Expand Down Expand Up @@ -1020,7 +1029,8 @@ void ONNXOrOp::getCanonicalizationPatterns(
void ONNXReshapeOp::getCanonicalizationPatterns(
RewritePatternSet &result, MLIRContext *context) {
result.insert<FuseReshapePattern>(context);
result.insert<RemoveIdentityReshapePattern>(context);
result.insert<RemoveIdentityReshapePattern1>(context);
result.insert<RemoveIdentityReshapePattern2>(context);
result.insert<SwapReshapeMatMulPattern>(context);
}

Expand Down
16 changes: 14 additions & 2 deletions src/Dialect/ONNX/Rewrite.td
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,10 @@ class HaveSameDim<int dim>: Constraint<
"$1.getType().cast<RankedTensorType>().getShape()[" # dim # "])">,
"Two tensors have the same specified dimension">;

def HaveSameStaticShape: Constraint<
CPred<"onnx_mlir::haveSameStaticShape($0, $1)">,
"Two tensors have the same static shape">;

// Create a unit constant that will be used as none input.
def CreateNoneValue : NativeCodeCall<"$_builder.create<ONNXNoneOp>($_loc).getResult()">;

Expand Down Expand Up @@ -575,14 +579,22 @@ def FuseReshapePattern: Pat<
// Remove the first reshape op.
(ONNXReshapeOp $v, $s2, $az2)>;

def RemoveIdentityReshapePattern: Pat<
def RemoveIdentityReshapePattern1: Pat<
// Remove an identity pattern. Input tensor already has the specified shape.
(ONNXReshapeOp $val, $shape, $az),
// Remove the transpose.
// Remove the reshape.
(replaceWithValue $val),
// Check that val has the specified shape.
[(HasSpecifiedConstantShape $val, $shape)]>;

def RemoveIdentityReshapePattern2: Pat<
// Remove an identity pattern. Output and input shapes are static and the same.
(ONNXReshapeOp:$out $val, $_, $_),
// Remove the reshape.
(replaceWithValue $val),
// Check that val and out have the same static shape.
[(HaveSameStaticShape $out, $val)]>;

def GetReturnTypeForMatMulOpND2D: NativeCodeCall<
"onnx_mlir::getReturnTypeForMatMulOpND2D($0, $1)"
>;
Expand Down
9 changes: 0 additions & 9 deletions src/Pass/Passes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,15 +99,6 @@ namespace krnl {
/// Pass for lowering frontend dialects to Krnl IR dialect.
std::unique_ptr<mlir::Pass> createConvertKrnlToAffinePass();

/// Pass for enabling a memory pool for MemRefs.
std::unique_ptr<mlir::Pass> createKrnlEnableMemoryPoolPass();

/// Pass for enabling a memory pool for MemRefs.
std::unique_ptr<mlir::Pass> createKrnlBundleMemoryPoolsPass();

/// Pass for optimizing memory pools.
std::unique_ptr<mlir::Pass> createKrnlOptimizeMemoryPoolsPass();

/// Pass for lowering Seq in Krnl dialect.
std::unique_ptr<mlir::Pass> createConvertSeqToMemrefPass();

Expand Down
Loading

0 comments on commit d1979a6

Please sign in to comment.