diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp index e905408a1e08..928d8077175c 100644 --- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp @@ -26,6 +26,7 @@ #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Tools/mlir-translate/Translation.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/StringSet.h" @@ -132,18 +133,17 @@ static LogicalResult convertInstructionImpl(OpBuilder &odsBuilder, return failure(); } -/// Get a topologically sorted list of blocks for the given function. +/// Get a topologically sorted list of blocks for the given basic blocks. static SetVector -getTopologicallySortedBlocks(llvm::Function *func) { +getTopologicallySortedBlocks(ArrayRef basicBlocks) { SetVector blocks; - for (llvm::BasicBlock &bb : *func) { - if (!blocks.contains(&bb)) { - llvm::ReversePostOrderTraversal traversal(&bb); + for (llvm::BasicBlock *basicBlock : basicBlocks) { + if (!blocks.contains(basicBlock)) { + llvm::ReversePostOrderTraversal traversal(basicBlock); blocks.insert(traversal.begin(), traversal.end()); } } - assert(blocks.size() == func->size() && "some blocks are not sorted"); - + assert(blocks.size() == basicBlocks.size() && "some blocks are not sorted"); return blocks; } @@ -1859,11 +1859,26 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) { if (func->isDeclaration()) return success(); - // Eagerly create all blocks. - for (llvm::BasicBlock &bb : *func) { - Block *block = - builder.createBlock(&funcOp.getBody(), funcOp.getBody().end()); - mapBlock(&bb, block); + // Collect the set of basic blocks reachable from the function's entry block. + // This step is crucial as LLVM IR can contain unreachable blocks that + // self-dominate. As a result, an operation might utilize a variable it + // defines, which the import does not support. Given that MLIR lacks block + // label support, we can safely remove unreachable blocks, as there are no + // indirect branch instructions that could potentially target these blocks. + llvm::df_iterator_default_set reachable; + for (llvm::BasicBlock *basicBlock : llvm::depth_first_ext(func, reachable)) + (void)basicBlock; + + // Eagerly create all reachable blocks. + SmallVector reachableBasicBlocks; + for (llvm::BasicBlock &basicBlock : *func) { + // Skip unreachable blocks. + if (!reachable.contains(&basicBlock)) + continue; + Region &body = funcOp.getBody(); + Block *block = builder.createBlock(&body, body.end()); + mapBlock(&basicBlock, block); + reachableBasicBlocks.push_back(&basicBlock); } // Add function arguments to the entry block. @@ -1876,10 +1891,11 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) { // Process the blocks in topological order. The ordered traversal ensures // operands defined in a dominating block have a valid mapping to an MLIR // value once a block is translated. - SetVector blocks = getTopologicallySortedBlocks(func); + SetVector blocks = + getTopologicallySortedBlocks(reachableBasicBlocks); setConstantInsertionPointToStart(lookupBlock(blocks.front())); - for (llvm::BasicBlock *bb : blocks) - if (failed(processBasicBlock(bb, lookupBlock(bb)))) + for (llvm::BasicBlock *basicBlock : blocks) + if (failed(processBasicBlock(basicBlock, lookupBlock(basicBlock)))) return failure(); // Process the debug intrinsics that require a delayed conversion after diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll index cd2d00ec0aa7..3c46f5b20c31 100644 --- a/mlir/test/Target/LLVMIR/Import/constant.ll +++ b/mlir/test/Target/LLVMIR/Import/constant.ll @@ -47,6 +47,16 @@ define void @undef_constant(i32 %arg0) { ; // ----- +; CHECK-LABEL: @poison_constant +define void @poison_constant(double %arg0) { + ; CHECK: %[[POISON:.+]] = llvm.mlir.poison : f64 + ; CHECK: llvm.fadd %[[POISON]], %{{.*}} : f64 + %1 = fadd double poison, %arg0 + ret void +} + +; // ----- + ; CHECK-LABEL: @null_constant define ptr @null_constant() { ; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr diff --git a/mlir/test/Target/LLVMIR/Import/exception.ll b/mlir/test/Target/LLVMIR/Import/exception.ll index de227645cc15..440d89ec147f 100644 --- a/mlir/test/Target/LLVMIR/Import/exception.ll +++ b/mlir/test/Target/LLVMIR/Import/exception.ll @@ -12,34 +12,35 @@ define i32 @invokeLandingpad() personality ptr @__gxx_personality_v0 { ; CHECK: %[[a1:[0-9]+]] = llvm.mlir.addressof @_ZTIii : !llvm.ptr ; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 {alignment = 1 : i64} : (i32) -> !llvm.ptr %1 = alloca i8 - ; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> () - invoke void @foo(ptr %1) to label %4 unwind label %2 + ; CHECK: llvm.invoke @foo(%[[a3]]) to ^[[bb1:.*]] unwind ^[[bb4:.*]] : (!llvm.ptr) -> () + invoke void @foo(ptr %1) to label %bb1 unwind label %bb4 -; CHECK: ^bb1: +; CHECK: ^[[bb1]]: +bb1: + ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^[[bb2:.*]] unwind ^[[bb4]] : (!llvm.ptr) -> !llvm.ptr + %2 = invoke ptr @bar(ptr %1) to label %bb2 unwind label %bb4 + +; CHECK: ^[[bb2]]: +bb2: + ; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^[[bb3:.*]] unwind ^[[bb4]] vararg(!llvm.func) : (!llvm.ptr, i32) -> () + invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %bb3 unwind label %bb4 + +; CHECK: ^[[bb3]]: +bb3: + ; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^[[bb5:.*]] unwind ^[[bb4]] vararg(!llvm.func) : !llvm.ptr, (!llvm.ptr, i32) -> () + invoke void (ptr, ...) undef(ptr %1, i32 0) to label %bb5 unwind label %bb4 + +; CHECK: ^[[bb4]]: +bb4: ; CHECK: %{{[0-9]+}} = llvm.landingpad (catch %{{[0-9]+}} : !llvm.ptr) (catch %[[a1]] : !llvm.ptr) (filter %{{[0-9]+}} : !llvm.array<1 x i1>) : !llvm.struct<(ptr, i32)> %3 = landingpad { ptr, i32 } catch ptr @_ZTIi catch ptr @_ZTIii filter [1 x i1] [i1 1] resume { ptr, i32 } %3 -; CHECK: ^bb2: +; CHECK: ^[[bb5]]: +bb5: ; CHECK: llvm.return %{{[0-9]+}} : i32 ret i32 1 - -; CHECK: ^bb3: - ; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> !llvm.ptr - %6 = invoke ptr @bar(ptr %1) to label %4 unwind label %2 - -; CHECK: ^bb4: - ; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func) : (!llvm.ptr, i32) -> () - invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %4 unwind label %2 - -; CHECK: ^bb5: - ; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func) : !llvm.ptr, (!llvm.ptr, i32) -> () - invoke void (ptr, ...) undef(ptr %1, i32 0) to label %4 unwind label %2 - -; CHECK: ^bb6: - ; CHECK: llvm.return %{{[0-9]+}} : i32 - ret i32 0 } declare i32 @foo2() diff --git a/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll b/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll new file mode 100644 index 000000000000..8a84f4b5c19b --- /dev/null +++ b/mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll @@ -0,0 +1,35 @@ +; RUN: mlir-translate -import-llvm %s | FileCheck %s + +; Test unreachable blocks are dropped. + +; CHECK-LABEL: llvm.func @unreachable_block +define void @unreachable_block(float %0) { +.entry: + ; CHECK: llvm.return + ret void + +unreachable: + ; CHECK-NOT: llvm.fadd + %1 = fadd float %0, %1 + br label %unreachable +} + +; Test unreachable blocks with back edges are supported. + +; CHECK-LABEL: llvm.func @back_edge +define i32 @back_edge(i32 %0) { +.entry: + ; CHECK: llvm.br ^[[RET:.*]](%{{.*}}) + br label %ret +ret: + ; CHECK: ^[[RET]](%{{.*}}: i32) + %1 = phi i32 [ %0, %.entry ], [ %2, %unreachable ] + ; CHECK: llvm.return %{{.*}} : i32 + ret i32 %1 + +unreachable: + ; CHECK-NOT: add + %2 = add i32 %0, %2 + %3 = icmp eq i32 %2, 42 + br i1 %3, label %ret, label %unreachable +}