diff --git a/src/Compiler/CompilerUtils.cpp b/src/Compiler/CompilerUtils.cpp index 546ad998a0..105763107e 100644 --- a/src/Compiler/CompilerUtils.cpp +++ b/src/Compiler/CompilerUtils.cpp @@ -60,9 +60,8 @@ mlir::TimingScope rootTimingScope; namespace onnx_mlir { // Values to report the current phase of compilation. -// Increase TOTAL_COMPILE_PHASE when having more phases. uint64_t CURRENT_COMPILE_PHASE = 1; -uint64_t TOTAL_COMPILE_PHASE = 6; +uint64_t TOTAL_COMPILE_PHASE = 0; // Make a function that forces preserving all files using the runtime arguments // and/or the overridePreserveFiles enum. @@ -977,11 +976,15 @@ static int emitOutput(mlir::OwningOpRef &module, int compileModule(mlir::OwningOpRef &module, mlir::MLIRContext &context, std::string outputNameNoExt, EmissionTargetType emissionTarget) { + // When a C++ program calls this function directly without using onnx-mlir + // driver, there is no importing phase (e.g. the model is .mlir, not .onnx). + // Thus, decrease the total number of phases. + if (CURRENT_COMPILE_PHASE == 1) { + SET_TOTAL_COMPILE_PHASE(emissionTarget); + TOTAL_COMPILE_PHASE--; + } + std::string msg = "Compiling and Optimizing MLIR Module"; - // There is no importing phase (e.g. the model is .mlir, not .onnx), adjust to - // correctly reflect the current phase. - if (CURRENT_COMPILE_PHASE == 1) - CURRENT_COMPILE_PHASE++; showCompilePhase(msg); auto compileModuleTiming = rootTimingScope.nest("[onnx-mlir] " + msg); diff --git a/src/Compiler/CompilerUtils.hpp b/src/Compiler/CompilerUtils.hpp index e3ecc1bd72..713e2fb8e3 100644 --- a/src/Compiler/CompilerUtils.hpp +++ b/src/Compiler/CompilerUtils.hpp @@ -33,10 +33,22 @@ extern mlir::TimingScope rootTimingScope; namespace onnx_mlir { // Values to report the current phase of compilation. -// Increase TOTAL_COMPILE_PHASE when having more phases. extern uint64_t CURRENT_COMPILE_PHASE; extern uint64_t TOTAL_COMPILE_PHASE; +// When having more phases, let increase TOTAL_COMPILE_PHASE. +#define SET_TOTAL_COMPILE_PHASE(emissionTarget) \ + { \ + if (emissionTarget == EmitObj) \ + TOTAL_COMPILE_PHASE = 5; \ + else if (emissionTarget == EmitLib) \ + TOTAL_COMPILE_PHASE = 6; \ + else if (emissionTarget == EmitJNI) \ + TOTAL_COMPILE_PHASE = 8; \ + else \ + TOTAL_COMPILE_PHASE = 3; \ + } + struct Command { std::string _path; diff --git a/src/onnx-mlir.cpp b/src/onnx-mlir.cpp index be1d40554e..b29c66344f 100644 --- a/src/onnx-mlir.cpp +++ b/src/onnx-mlir.cpp @@ -79,6 +79,8 @@ int main(int argc, char *argv[]) { // may better determine which compilation we are dealing with. std::filesystem::path p(inputFilename); std::string modelShortName = p.filename(); + // Configure compile phase information. + SET_TOTAL_COMPILE_PHASE(emissionTarget); std::string msg = "Importing ONNX Model to MLIR Module from \"" + modelShortName + "\""; showCompilePhase(msg); diff --git a/test/mlir/driver/compile_phases.mlir b/test/mlir/driver/compile_phases.mlir index 3e94ccbfb0..3393aa28b6 100644 --- a/test/mlir/driver/compile_phases.mlir +++ b/test/mlir/driver/compile_phases.mlir @@ -1,14 +1,38 @@ -// RUN: onnx-mlir %s -o %t| FileCheck %s && rm %t.so - -// CHECK: [1/6] {{.*}} Importing ONNX Model to MLIR Module from -// CHECK: [2/6] {{.*}} Compiling and Optimizing MLIR Module -// CHECK: [3/6] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode -// CHECK: [4/6] {{.*}} Generating Object from LLVM Bitcode -// CHECK: [5/6] {{.*}} Linking and Generating the Output Shared Library -// CHECK: [6/6] {{.*}} Compilation completed +// RUN: onnx-mlir %s -o %t | FileCheck --check-prefix=EMIT-LIB %s && rm %t.so +// RUN: onnx-mlir %s --EmitObj -o %t | FileCheck --check-prefix=EMIT-OBJ %s && rm %t.o +// RUN: onnx-mlir %s --EmitJNI -o %t | FileCheck --check-prefix=EMIT-JNI %s && rm %t.jar +// RUN: onnx-mlir %s --EmitLLVMIR -o %t | FileCheck --check-prefix=EMIT-LLVMIR %s && rm %t.onnx.mlir + +// EMIT-LIB: [1/6] {{.*}} Importing ONNX Model to MLIR Module from +// EMIT-LIB: [2/6] {{.*}} Compiling and Optimizing MLIR Module +// EMIT-LIB: [3/6] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode +// EMIT-LIB: [4/6] {{.*}} Generating Object from LLVM Bitcode +// EMIT-LIB: [5/6] {{.*}} Linking and Generating the Output Shared Library +// EMIT-LIB: [6/6] {{.*}} Compilation completed + +// EMIT-OBJ: [1/5] {{.*}} Importing ONNX Model to MLIR Module from +// EMIT-OBJ: [2/5] {{.*}} Compiling and Optimizing MLIR Module +// EMIT-OBJ: [3/5] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode +// EMIT-OBJ: [4/5] {{.*}} Generating Object from LLVM Bitcode +// EMIT-OBJ: [5/5] {{.*}} Compilation completed + +// EMIT-JNI: [1/8] {{.*}} Importing ONNX Model to MLIR Module from +// EMIT-JNI: [2/8] {{.*}} Compiling and Optimizing MLIR Module +// EMIT-JNI: [3/8] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode +// EMIT-JNI: [4/8] {{.*}} Generating Object from LLVM Bitcode +// EMIT-JNI: [5/8] {{.*}} Generating JNI Object +// EMIT-JNI: [6/8] {{.*}} Linking and Generating the Output Shared Library +// EMIT-JNI: [7/8] {{.*}} Creating JNI Jar +// EMIT-JNI: [8/8] {{.*}} Compilation completed + +// EMIT-LLVMIR: [1/3] {{.*}} Importing ONNX Model to MLIR Module from +// EMIT-LLVMIR: [2/3] {{.*}} Compiling and Optimizing MLIR Module +// EMIT-LLVMIR: [3/3] {{.*}} Compilation completed module { func.func @main_graph(%arg0: tensor) -> tensor { onnx.Return %arg0 : tensor } "onnx.EntryPoint"() {func = @main_graph} : () -> () } + +