Skip to content

Commit

Permalink
Merge branch 'main' into run_gpt2_fix_data_file
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexandreEichenberger authored Nov 1, 2024
2 parents 1d8dd01 + b39ed16 commit b145ecd
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 15 deletions.
15 changes: 9 additions & 6 deletions src/Compiler/CompilerUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,8 @@ mlir::TimingScope rootTimingScope;
namespace onnx_mlir {

// Values to report the current phase of compilation.
// Increase TOTAL_COMPILE_PHASE when having more phases.
uint64_t CURRENT_COMPILE_PHASE = 1;
uint64_t TOTAL_COMPILE_PHASE = 6;
uint64_t TOTAL_COMPILE_PHASE = 0;

// Make a function that forces preserving all files using the runtime arguments
// and/or the overridePreserveFiles enum.
Expand Down Expand Up @@ -977,11 +976,15 @@ static int emitOutput(mlir::OwningOpRef<ModuleOp> &module,
int compileModule(mlir::OwningOpRef<ModuleOp> &module,
mlir::MLIRContext &context, std::string outputNameNoExt,
EmissionTargetType emissionTarget) {
// When a C++ program calls this function directly without using onnx-mlir
// driver, there is no importing phase (e.g. the model is .mlir, not .onnx).
// Thus, decrease the total number of phases.
if (CURRENT_COMPILE_PHASE == 1) {
SET_TOTAL_COMPILE_PHASE(emissionTarget);
TOTAL_COMPILE_PHASE--;
}

std::string msg = "Compiling and Optimizing MLIR Module";
// There is no importing phase (e.g. the model is .mlir, not .onnx), adjust to
// correctly reflect the current phase.
if (CURRENT_COMPILE_PHASE == 1)
CURRENT_COMPILE_PHASE++;
showCompilePhase(msg);
auto compileModuleTiming = rootTimingScope.nest("[onnx-mlir] " + msg);

Expand Down
14 changes: 13 additions & 1 deletion src/Compiler/CompilerUtils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,22 @@ extern mlir::TimingScope rootTimingScope;
namespace onnx_mlir {

// Values to report the current phase of compilation.
// Increase TOTAL_COMPILE_PHASE when having more phases.
extern uint64_t CURRENT_COMPILE_PHASE;
extern uint64_t TOTAL_COMPILE_PHASE;

// When having more phases, let increase TOTAL_COMPILE_PHASE.
#define SET_TOTAL_COMPILE_PHASE(emissionTarget) \
{ \
if (emissionTarget == EmitObj) \
TOTAL_COMPILE_PHASE = 5; \
else if (emissionTarget == EmitLib) \
TOTAL_COMPILE_PHASE = 6; \
else if (emissionTarget == EmitJNI) \
TOTAL_COMPILE_PHASE = 8; \
else \
TOTAL_COMPILE_PHASE = 3; \
}

struct Command {

std::string _path;
Expand Down
2 changes: 2 additions & 0 deletions src/onnx-mlir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ int main(int argc, char *argv[]) {
// may better determine which compilation we are dealing with.
std::filesystem::path p(inputFilename);
std::string modelShortName = p.filename();
// Configure compile phase information.
SET_TOTAL_COMPILE_PHASE(emissionTarget);
std::string msg =
"Importing ONNX Model to MLIR Module from \"" + modelShortName + "\"";
showCompilePhase(msg);
Expand Down
40 changes: 32 additions & 8 deletions test/mlir/driver/compile_phases.mlir
Original file line number Diff line number Diff line change
@@ -1,14 +1,38 @@
// RUN: onnx-mlir %s -o %t| FileCheck %s && rm %t.so

// CHECK: [1/6] {{.*}} Importing ONNX Model to MLIR Module from
// CHECK: [2/6] {{.*}} Compiling and Optimizing MLIR Module
// CHECK: [3/6] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode
// CHECK: [4/6] {{.*}} Generating Object from LLVM Bitcode
// CHECK: [5/6] {{.*}} Linking and Generating the Output Shared Library
// CHECK: [6/6] {{.*}} Compilation completed
// RUN: onnx-mlir %s -o %t | FileCheck --check-prefix=EMIT-LIB %s && rm %t.so
// RUN: onnx-mlir %s --EmitObj -o %t | FileCheck --check-prefix=EMIT-OBJ %s && rm %t.o
// RUN: onnx-mlir %s --EmitJNI -o %t | FileCheck --check-prefix=EMIT-JNI %s && rm %t.jar
// RUN: onnx-mlir %s --EmitLLVMIR -o %t | FileCheck --check-prefix=EMIT-LLVMIR %s && rm %t.onnx.mlir

// EMIT-LIB: [1/6] {{.*}} Importing ONNX Model to MLIR Module from
// EMIT-LIB: [2/6] {{.*}} Compiling and Optimizing MLIR Module
// EMIT-LIB: [3/6] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode
// EMIT-LIB: [4/6] {{.*}} Generating Object from LLVM Bitcode
// EMIT-LIB: [5/6] {{.*}} Linking and Generating the Output Shared Library
// EMIT-LIB: [6/6] {{.*}} Compilation completed

// EMIT-OBJ: [1/5] {{.*}} Importing ONNX Model to MLIR Module from
// EMIT-OBJ: [2/5] {{.*}} Compiling and Optimizing MLIR Module
// EMIT-OBJ: [3/5] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode
// EMIT-OBJ: [4/5] {{.*}} Generating Object from LLVM Bitcode
// EMIT-OBJ: [5/5] {{.*}} Compilation completed

// EMIT-JNI: [1/8] {{.*}} Importing ONNX Model to MLIR Module from
// EMIT-JNI: [2/8] {{.*}} Compiling and Optimizing MLIR Module
// EMIT-JNI: [3/8] {{.*}} Translating MLIR Module to LLVM and Generating LLVM Optimized Bitcode
// EMIT-JNI: [4/8] {{.*}} Generating Object from LLVM Bitcode
// EMIT-JNI: [5/8] {{.*}} Generating JNI Object
// EMIT-JNI: [6/8] {{.*}} Linking and Generating the Output Shared Library
// EMIT-JNI: [7/8] {{.*}} Creating JNI Jar
// EMIT-JNI: [8/8] {{.*}} Compilation completed

// EMIT-LLVMIR: [1/3] {{.*}} Importing ONNX Model to MLIR Module from
// EMIT-LLVMIR: [2/3] {{.*}} Compiling and Optimizing MLIR Module
// EMIT-LLVMIR: [3/3] {{.*}} Compilation completed
module {
func.func @main_graph(%arg0: tensor<?xf32>) -> tensor<?xf32> {
onnx.Return %arg0 : tensor<?xf32>
}
"onnx.EntryPoint"() {func = @main_graph} : () -> ()
}


0 comments on commit b145ecd

Please sign in to comment.