Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pull] main from llvm:main #18

Closed
wants to merge 23 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
9f64748
[FxImporter] Synchronize the collection of symbolic torch ops (#3236)
penguin-wwy Apr 29, 2024
aed2cf3
[Torch] emit aten.__contains__.str_list and add folder (#3249)
qingyunqu Apr 29, 2024
b218519
[NFC] Update black version (#3256)
penguin-wwy Apr 29, 2024
b1e2241
[ONNX] Fix Onnx.Selu lowering and canonicalizer for IntImplicit op (#…
vivekkhandelwal1 Apr 29, 2024
0a5ff68
[stablehlo] Support PrimsCollapseOp and PrimsSplitDimOp in stablehlo …
Apr 29, 2024
2176176
[FX] Add broadcast test with dynamic dim (#3123)
sjain-stanford Apr 29, 2024
087fea0
build: manually update PyTorch version (#3257)
vivekkhandelwal1 Apr 29, 2024
db67210
Integrate LLVM at llvm/llvm-project@593f6fdcb4bb3ff81ba4e6f89d7b16540…
rsuderman Apr 29, 2024
122cf22
Re-enable LTC Build (#3261)
antoniojkim Apr 29, 2024
b64c22c
Fix onnx sinh lowering (#3253)
jinchen62 Apr 30, 2024
aa471f1
Fix onnx cosh lowering (#3254)
jinchen62 Apr 30, 2024
fb49919
Fix onnx acosh lowering (#3262)
jinchen62 Apr 30, 2024
bf04b53
Fix onnx asinh lowering (#3263)
jinchen62 Apr 30, 2024
fbbad2d
Fix onnx atanh lowering (#3264)
jinchen62 Apr 30, 2024
fb8aed0
[Release Builds] Use `-no-build-isolation` to decouple from `pyprojec…
sjain-stanford Apr 30, 2024
f32ada9
[Stablehlo] Improve the lowering of pool op in stablehlo (#3259)
Apr 30, 2024
05f8b69
[MLIR][TORCH] Add OnnxToTorch support for BlackmanWindow function (#3…
vinayakdsci Apr 30, 2024
9442c66
[torch-mlir][sparse] add a few missing passes to the ref pipeline (#3…
aartbik Apr 30, 2024
72349f7
[TorchToLinalg] Adds Quantization Support for ConvTranspose (#3240)
zjgarvey Apr 30, 2024
315dc6c
[torch] `aten.eye` should use dynamic dims when no static dims are av…
renxida Apr 30, 2024
33eef15
Support onnx.If (#2825)
renxida Apr 30, 2024
0a2d21b
Add `.yamllint` and disable some annoying recurring warnings on every…
renxida Apr 30, 2024
8c48135
[linalg] Fix bug for conversion of complex dtype (#3269)
pashu123 May 1, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ repos:
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/psf/black
rev: 22.10.0
rev: 24.4.2
hooks:
- id: black

Expand Down
22 changes: 22 additions & 0 deletions .yamllint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
---

extends: default

rules:
# These do not appear to be conventional in GitHub actions.
document-end:
present: false
document-start:
present: false
# GitHub actions use "on" for triggers.
truthy: disable
# We have lots of long strings and command lines.
line-length: disable
comments:
# Formatters may do this (e.g. Prettier does) and it seems like the most
# trivial thing to get a failing check for.
min-spaces-from-content: 1
# This is not a useful check, especially when disabling entire blocks.
comments-indentation: disable

ignore: /third_party/*
1 change: 1 addition & 0 deletions build_tools/ci/build_posix.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ cmake -S "$repo_root/externals/llvm-project/llvm" -B "$build_dir" \
-DLLVM_EXTERNAL_TORCH_MLIR_SOURCE_DIR="$repo_root" \
-DLLVM_TARGETS_TO_BUILD=host \
-DMLIR_ENABLE_BINDINGS_PYTHON=ON \
-DTORCH_MLIR_ENABLE_LTC=ON
echo "::endgroup::"

echo "::group::Build"
Expand Down
8 changes: 5 additions & 3 deletions build_tools/python_deploy/build_linux_packages.sh
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,8 @@ function clean_build() {
}

function build_torch_mlir() {
# Disable LTC build for releases
export TORCH_MLIR_ENABLE_LTC=0
local torch_version="$1"
case $torch_version in
nightly)
Expand All @@ -440,7 +442,7 @@ function build_torch_mlir() {
--extra-index-url https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
CMAKE_GENERATOR=Ninja \
TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \
python -m pip wheel -v -w /wheelhouse /main_checkout/torch-mlir \
python -m pip wheel -v --no-build-isolation -w /wheelhouse /main_checkout/torch-mlir \
-f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
-r /main_checkout/torch-mlir/whl-requirements.txt
;;
Expand All @@ -450,7 +452,7 @@ function build_torch_mlir() {
python3 -m pip install --no-cache-dir -r /main_checkout/torch-mlir/build-requirements.txt
CMAKE_GENERATOR=Ninja \
TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \
python -m pip wheel -v -w /wheelhouse /main_checkout/torch-mlir
python -m pip wheel -v --no-build-isolation -w /wheelhouse /main_checkout/torch-mlir
;;
*)
echo "Unrecognized torch version '$torch_version'"
Expand All @@ -474,7 +476,7 @@ function build_torch_mlir_core() {
TORCH_MLIR_PYTHON_PACKAGE_VERSION=${TORCH_MLIR_PYTHON_PACKAGE_VERSION} \
TORCH_MLIR_ENABLE_JIT_IR_IMPORTER=0 \
TORCH_MLIR_ENABLE_ONLY_MLIR_PYTHON_BINDINGS=1 \
python -m pip wheel -v -w /wheelhouse /main_checkout/torch-mlir
python -m pip wheel -v --no-build-isolation -w /wheelhouse /main_checkout/torch-mlir
}

function clean_wheels() {
Expand Down
1 change: 1 addition & 0 deletions build_tools/scrape_releases.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

See https://github.com/llvm/torch-mlir/issues/1374
"""

import argparse
import json

Expand Down
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 5104 files
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ namespace detail {
LogicalResult verifyTMTensorOpInterface(Operation *op);
}

#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.h.inc" // IWYU pragma: export

/// Include the generated interface declarations.
#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOpInterfaces.h.inc" // IWYU pragma: export

} // namespace TMTensor
} // namespace torch
} // namespace mlir

#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorOps.h.inc" // IWYU pragma: export

#endif // TORCH_MLIR_DIALECTS_DIALECT_TMTENSOR_IR_TMTENSORINTERFACES_H_
25 changes: 25 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,31 @@ struct OpBinder {
return success();
}

ParseResult tensorResultTypes(llvm::SmallVector<mlir::Type> &typeList) {
for (auto result : op->getResults()) {
auto t = toValidTensorType(result.getType());
if (!t)
return failure();
typeList.push_back(t);
}
return success();
}

// The importer imports Onnx.GraphProto attributes as regions attached to the
// op.
ParseResult getRegionAtIndex(mlir::Region *&region, int64_t idx) {
if (idx >= op->getNumRegions())
return failure();

region = &op->getRegion(idx);

if (region == nullptr) {
return failure();
}

return success();
}

ParseResult tensorResultTypeAtIndex(Torch::ValueTensorType &typeIdx,
int64_t idx) {
if (idx >= op->getNumResults())
Expand Down
7 changes: 7 additions & 0 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,13 @@ Value createConstantIntList(OpBinder binder,

Type getQTorchTypeFromTorchIntType(Type ty);

template <typename T>
Value getItemOp(OpBinder binder, ConversionPatternRewriter &rewriter,
Value &ofItem) {
return rewriter.create<Torch::AtenItemOp>(binder.getLoc(),
rewriter.getType<T>(), ofItem);
}

LogicalResult OnnxLstmExpander(OpBinder binder,
ConversionPatternRewriter &rewriter);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,17 @@ FailureOr<Value> unsqueezeTensor(PatternRewriter &rewriter, Operation *op,
Value tensor, ArrayRef<int64_t> inputUnsqzDims,
size_t dimSizeIndexBits);

// Get a tensor that collapse the specified dimensions of the input tensor
FailureOr<Value> collapseTensor(PatternRewriter &rewriter, Operation *op,
Value tensor, int64_t collapseStartDim,
int64_t collapseEndDim,
size_t dimSizeIndexBits);

// Get a tensor that splits the specified dimensions of the input tensor
FailureOr<Value> splitTensor(PatternRewriter &rewriter, Operation *op,
Value tensor, int64_t splitDim,
int64_t outerLength, size_t dimSizeIndexBits);

Value getConstantOfShape(PatternRewriter &rewriter, Location loc,
const APFloat &constant, Value shape,
TensorType outType);
Expand Down
53 changes: 53 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -6637,6 +6637,34 @@ def Torch_AtenNativeLayerNormOp : Torch_Op<"aten.native_layer_norm", [
}];
}

def Torch_AtenMaxPool1dOp : Torch_Op<"aten.max_pool1d", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::max_pool1d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)`";
let arguments = (ins
AnyTorchTensorType:$self,
AnyTorchListOfTorchIntType:$kernel_size,
AnyTorchListOfTorchIntType:$stride,
AnyTorchListOfTorchIntType:$padding,
AnyTorchListOfTorchIntType:$dilation,
Torch_BoolType:$ceil_mode
);
let results = (outs
AnyTorchOptionalTensorType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult AtenMaxPool1dOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 6, 1);
}
void AtenMaxPool1dOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 6, 1);
}
}];
}

def Torch_AtenMaxPool2dOp : Torch_Op<"aten.max_pool2d", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down Expand Up @@ -13626,6 +13654,31 @@ def Torch_AtenWarnOp : Torch_Op<"aten.warn", [
}];
}

def Torch_Aten__Contains__StrListOp : Torch_Op<"aten.__contains__.str_list", [
AllowsTypeRefinement,
HasValueSemantics,
ReadOnly
]> {
let summary = "Generated op for `aten::__contains__.str_list : (str[], str) -> (bool)`";
let arguments = (ins
AnyTorchListOfTorchStringType:$l,
Torch_StringType:$item
);
let results = (outs
Torch_BoolType:$result
);
let hasCustomAssemblyFormat = 1;
let extraClassDefinition = [{
ParseResult Aten__Contains__StrListOp::parse(OpAsmParser &parser, OperationState &result) {
return parseDefaultTorchOp(parser, result, 2, 1);
}
void Aten__Contains__StrListOp::print(OpAsmPrinter &printer) {
printDefaultTorchOp(printer, *this, 2, 1);
}
}];
let hasFolder = 1;
}

def Torch_AtenFloatScalarOp : Torch_Op<"aten.Float.Scalar", [
AllowsTypeRefinement,
HasValueSemantics,
Expand Down
31 changes: 31 additions & 0 deletions include/torch-mlir/Dialect/Torch/IR/TorchOps.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,37 @@ m_TorchListOfConstantBools(SmallVectorImpl<bool> &bind_values) {
return detail::torch_list_of_constant_bools_op_binder(bind_values);
}

namespace detail {
/// Matches the constant strs stored in a `torch.ListConstruct`.
struct torch_list_of_constant_strs_op_binder {
SmallVectorImpl<std::string> &bind_values;

/// Creates a matcher instance that binds the value to bvs if match succeeds.
torch_list_of_constant_strs_op_binder(SmallVectorImpl<std::string> &bvs)
: bind_values(bvs) {}

bool match(Operation *op) {
auto listConstruct = dyn_cast<Torch::PrimListConstructOp>(op);
if (!listConstruct)
return false;
for (Value value : listConstruct.getElements()) {
std::string str;
if (matchPattern(value, m_TorchConstantStr(str)))
bind_values.push_back(str);
else
return false;
}
return true;
}
};
} // namespace detail

/// Matches the constant strs stored in a `torch.prim.ListConstruct`.
inline detail::torch_list_of_constant_strs_op_binder
m_TorchListOfConstantStrs(SmallVectorImpl<std::string> &bind_values) {
return detail::torch_list_of_constant_strs_op_binder(bind_values);
}

namespace detail {
/// Matches the expected tensor and dim from `torch.aten.size.int`.
struct torch_tensor_size_int_op_binder {
Expand Down
Loading
Loading