-
Notifications
You must be signed in to change notification settings - Fork 12
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
9 changed files
with
293 additions
and
67 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
#include "mlir/IR/Builders.h" | ||
#include "mlir/IR/BuiltinOps.h" | ||
#include "mlir/IR/BuiltinTypes.h" | ||
#include "mlir/IR/MLIRContext.h" | ||
#include "mlir/IR/Operation.h" | ||
#include "mlir/IR/OperationSupport.h" | ||
#include "mlir/IR/Types.h" | ||
#include "mlir/IR/Value.h" | ||
#include "mlir/IR/Verifier.h" | ||
#include "mlir/Pass/Pass.h" | ||
#include "mlir/Pass/PassManager.h" | ||
|
||
#include "xla/mlir_hlo/mhlo/transforms/passes.h" | ||
#include "xla/service/backend.h" | ||
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h" | ||
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" | ||
#include "xla/service/gpu/model/gpu_performance_model.h" | ||
#include "xla/service/platform_util.h" | ||
#include "xla/stream_executor/cuda/cuda_platform_id.h" | ||
#include "xla/stream_executor/device_description.h" | ||
#include "xla/stream_executor/platform.h" | ||
#include "xla/stream_executor/platform_manager.h" | ||
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h" | ||
|
||
#include <iostream> | ||
#include <memory> | ||
#include <stdexcept> | ||
#include <string> | ||
|
||
#include "AnalyticalCostModel.h" | ||
#include "RunXlaGpuPasses.h" | ||
|
||
using namespace mlir; | ||
|
||
uint64_t AnalyticalCostModel::getAnalyticalCost(ModuleOp &wrapperModule) { | ||
std::unique_ptr<xla::HloModule> preOpt = | ||
wrapperModuleToHloModule(wrapperModule); | ||
|
||
// Run XLA passes (layout, fusion, simplification) to ensure what is being | ||
// measured is what will be run | ||
auto hloModule = runXlaGpuPasses(std::move(preOpt)); | ||
|
||
auto deviceDescription = getDeviceDescription(); | ||
|
||
xla::HloCostAnalysis::ShapeSizeFunction shapeSizeFunction = | ||
[](const xla::Shape &shape) { | ||
return xla::gpu::GetSizeOfShape(shape, 4); | ||
}; | ||
xla::gpu::GpuHloCostAnalysis costAnalysis( | ||
xla::gpu::GpuHloCostAnalysis::Options{shapeSizeFunction, {}, {}, true}, | ||
*deviceDescription); | ||
|
||
assert(hloModule->computation_count() == 1); | ||
|
||
uint64_t cost = -1; | ||
|
||
for (auto c : hloModule->computations()) { | ||
c->Accept(&costAnalysis); | ||
// The op we are measuring should always be the return value, which is | ||
// at the root. | ||
auto op = c->root_instruction(); | ||
|
||
auto runtime = xla::gpu::GpuPerformanceModel::EstimateRunTimeForInstruction( | ||
op, *deviceDescription, &costAnalysis, | ||
xla::gpu::GpuPerformanceModelOptions::ForModule(op->GetModule())); | ||
if (cost != -1) { | ||
throw std::invalid_argument("found two computations"); | ||
} | ||
cost = absl::ToInt64Nanoseconds(runtime.exec_time); | ||
} | ||
|
||
return cost; | ||
} | ||
|
||
/** | ||
* Create XLA internal HloModule for the analytical cost model | ||
*/ | ||
std::unique_ptr<xla::HloModule> | ||
AnalyticalCostModel::wrapperModuleToHloModule(ModuleOp &wrapperModule) { | ||
auto context = wrapperModule.getContext(); | ||
PassManager pm(context); | ||
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass()); | ||
pm.run(wrapperModule); | ||
|
||
MlirToHloConversionOptions options; | ||
options.propagate_layouts = true; | ||
options.return_tuple = false; | ||
|
||
auto hloModule = ConvertMlirHloToHloModule(wrapperModule, options); | ||
if (!hloModule.ok()) { | ||
llvm::errs() << "Couldn't create hloModule: " | ||
<< hloModule.status().message(); | ||
return nullptr; | ||
} else { | ||
return std::move(hloModule.value()); | ||
} | ||
} | ||
|
||
stream_executor::Platform *AnalyticalCostModel::getXlaPlatform() { | ||
return xla::PlatformUtil::GetPlatform("cuda").value(); | ||
} | ||
|
||
/** | ||
* Get DeviceDescription for current device. | ||
*/ | ||
std::unique_ptr<stream_executor::DeviceDescription> | ||
AnalyticalCostModel::getDeviceDescription() { | ||
// assume ordinal 0 | ||
return std::move(getXlaPlatform()->DescriptionForDevice(0).value()); | ||
} | ||
|
||
/** | ||
* Get StreamExecutor for current device. | ||
*/ | ||
stream_executor::StreamExecutor *AnalyticalCostModel::getStreamExecutor() { | ||
// assume ordinal 0 | ||
auto executor = getXlaPlatform()->ExecutorForDevice(0).value(); | ||
if (executor == nullptr) { | ||
throw std::runtime_error("Couldn't get executor"); | ||
} | ||
|
||
return executor; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
#include "mlir/IR/Builders.h" | ||
#include "mlir/IR/BuiltinOps.h" | ||
#include "mlir/IR/BuiltinTypes.h" | ||
#include "mlir/IR/MLIRContext.h" | ||
#include "mlir/IR/Operation.h" | ||
#include "mlir/IR/OperationSupport.h" | ||
#include "mlir/IR/Types.h" | ||
#include "mlir/IR/Value.h" | ||
#include "mlir/IR/Verifier.h" | ||
|
||
#include "xla/mlir_hlo/mhlo/transforms/passes.h" | ||
#include "xla/service/backend.h" | ||
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h" | ||
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" | ||
#include "xla/service/gpu/model/gpu_performance_model.h" | ||
#include "xla/service/platform_util.h" | ||
#include "xla/stream_executor/cuda/cuda_platform_id.h" | ||
#include "xla/stream_executor/device_description.h" | ||
#include "xla/stream_executor/platform.h" | ||
#include "xla/stream_executor/platform_manager.h" | ||
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h" | ||
|
||
#include <iostream> | ||
#include <memory> | ||
#include <stdexcept> | ||
#include <string> | ||
|
||
class AnalyticalCostModel { | ||
public: | ||
static uint64_t getAnalyticalCost(mlir::ModuleOp &wrapperModule); | ||
|
||
private: | ||
/** | ||
* Create XLA internal HloModule for the analytical cost model | ||
*/ | ||
static std::unique_ptr<xla::HloModule> | ||
wrapperModuleToHloModule(mlir::ModuleOp &wrapperModule); | ||
|
||
static stream_executor::Platform *getXlaPlatform(); | ||
|
||
/** | ||
* Get DeviceDescription for current device. | ||
*/ | ||
static std::unique_ptr<stream_executor::DeviceDescription> | ||
getDeviceDescription(); | ||
|
||
/** | ||
* Get StreamExecutor for current device. | ||
*/ | ||
static stream_executor::StreamExecutor *getStreamExecutor(); | ||
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
#include "xla/mlir_hlo/mhlo/transforms/passes.h" | ||
#include "xla/service/backend.h" | ||
#include "xla/service/compiler.h" | ||
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h" | ||
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" | ||
#include "xla/service/gpu/model/gpu_performance_model.h" | ||
#include "xla/service/gpu/nvptx_compiler.h" | ||
#include "xla/service/platform_util.h" | ||
#include "xla/stream_executor/cuda/cuda_platform_id.h" | ||
#include "xla/stream_executor/device_description.h" | ||
#include "xla/stream_executor/platform.h" | ||
#include "xla/stream_executor/platform_manager.h" | ||
#include "xla/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h" | ||
|
||
#include "RunXlaGpuPasses.h" | ||
|
||
std::unique_ptr<xla::HloModule> | ||
runXlaGpuPasses(std::unique_ptr<xla::HloModule> hloModule) { | ||
xla::gpu::NVPTXCompiler compiler; | ||
auto executor = getStreamExecutor(); | ||
xla::gpu::NVPTXCompiler::CompileOptions options; | ||
auto res = compiler.RunHloPasses(hloModule, executor, options); | ||
return res; | ||
} |
Oops, something went wrong.