Skip to content

Commit

Permalink
address review comments
Browse files Browse the repository at this point in the history
Signed-off-by: dchigarev <[email protected]>
  • Loading branch information
dchigarev committed Oct 8, 2024
1 parent 42cbdc0 commit 717f7ca
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 41 deletions.
16 changes: 7 additions & 9 deletions src/common/transformations/src/transformations/mlir/mlir_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@
#include "gc/Utils/Error.h"
#include "gc/ExecutionEngine/GPURuntime/GpuOclRuntime.h"
#include "openvino/runtime/intel_gpu/remote_properties.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#endif
#endif

Expand Down Expand Up @@ -367,18 +366,17 @@ bool MLIREvaluateGcGPU::invoke(const ov::TensorVector& inputs, ov::TensorVector&
gc::gpu::OclContext ctx = build_ocl_context(evaluationContext);
gc::gpu::StaticExecutor exec(module);

auto it = evaluationContext.find(ov::intel_gpu::memory_type::is_kernel_arg_usm.name());
auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::is_kernel_arg_usm.name());
if (it == evaluationContext.end()) {
OPENVINO_THROW("No is_kernel_arg_usm provided for OpenCL execution");
}
std::vector<bool> arg_types = it->second.as<std::vector<bool>>();
size_t module_arg_i = 0;

for (size_t i = 0; i < inputs.size(); ++i, ++module_arg_i) {
for (size_t i = 0; i < inputs.size(); ++i) {
exec.arg(inputs[i].data(), arg_types[i]);
}
for (size_t i = 0; i < outputs.size(); ++i, ++module_arg_i) {
exec.arg(outputs[i].data(), arg_types[module_arg_i]);
for (size_t i = 0, j = inputs.size(); i < outputs.size(); ++i, ++j) {
exec.arg(outputs[i].data(), arg_types[j]);
}
exec(ctx);
maybe_set_result_event(evaluationContext, ctx);
Expand All @@ -389,7 +387,7 @@ bool MLIREvaluateGcGPU::invoke_packed(std::vector<void*>& args, const ov::Evalua
gc::gpu::OclContext ctx = build_ocl_context(evaluationContext);
gc::gpu::DynamicExecutor exec(module);

auto it = evaluationContext.find(ov::intel_gpu::memory_type::is_kernel_arg_usm.name());
auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::is_kernel_arg_usm.name());
if (it == evaluationContext.end()) {
OPENVINO_THROW("No is_kernel_arg_usm provided for OpenCL execution");
}
Expand All @@ -412,7 +410,7 @@ void MLIREvaluateGcGPU::maybe_set_result_event(const ov::EvaluationContext& eval
// case with in-order queue where we don't need to return an event
if (ctx.lastEvent == nullptr)
return;
auto it = evaluationContext.find(ov::intel_gpu::result_event.name());
auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::result_event.name());
if (it == evaluationContext.end()) {
OPENVINO_THROW("No result_event provided for OpenCL execution");
}
Expand All @@ -430,7 +428,7 @@ gc::gpu::OclContext MLIREvaluateGcGPU::build_ocl_context(const ov::EvaluationCon
uint32_t waitListLen = 0;
std::vector<ov::intel_gpu::gpu_handle_param> waitList;

it = evaluationContext.find(ov::intel_gpu::wait_list.name());
it = evaluationContext.find(ov::intel_gpu::mlir_meta::wait_list.name());
if (it != evaluationContext.end()) {
waitList = it->second.as<std::vector<ov::intel_gpu::gpu_handle_param>>();
waitListLen = waitList.size();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,6 @@ static constexpr auto surface = "GPU_SURFACE";
*/
static constexpr auto buffer = "GPU_BUFFER";

// TODO: maybe find more suitable place for this property (should be accessible from both
// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp)
/**
* @brief This key identifies whether the kernel argument at [i] position is USM pointer
* (this one is passed as evaluation context to a mlir_op)
* @ingroup ov_runtime_ocl_gpu_prop_cpp_api
*/
static constexpr Property<std::vector<bool>> is_kernel_arg_usm{"IS_KERNEL_ARG_USM"};

} // namespace memory_type

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,25 +93,6 @@ static constexpr Property<gpu_handle_param> ocl_queue{"OCL_QUEUE"};
*/
static constexpr Property<gpu_handle_param> va_device{"VA_DEVICE"};

// TODO: maybe find more suitable place for this property (should be accessible from both
// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp)
/**
* @brief This key identifies an event list to wait for a kernel execution.
* (this one is passed as evaluation context to a mlir_op)
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
static constexpr Property<std::vector<gpu_handle_param>> wait_list{"EVENTS_WAIT_LIST"};

// TODO: maybe find more suitable place for this property (should be accessible from both
// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp)
/**
* @brief This key identifies a pointer to a cl::Event that should be set with
* the result cl_event of a kernel execution.
* (this one is passed as evaluation context to a mlir_op)
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
static constexpr Property<gpu_handle_param> result_event{"RESULT_EVENT"};

/**
* @brief Enum to define the type of the shared memory buffer
* @ingroup ov_runtime_ocl_gpu_cpp_api
Expand Down Expand Up @@ -204,5 +185,34 @@ static constexpr Property<uint32_t> dev_object_handle{"DEV_OBJECT_HANDLE"};
*/
static constexpr Property<uint32_t> va_plane{"VA_PLANE"};

// TODO: maybe find more suitable place for these properties (should be accessible from both
// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp)
/**
* @brief Namespace for properties related to MLIR operations within the GPU plugin.
* These properties are used as evaluation context parameters for MLIR operations,
* assisting in managing events, result tracking, and kernel argument types.
*/
namespace mlir_meta {

/**
* @brief This key identifies an event list to wait for a kernel execution.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
static constexpr Property<std::vector<gpu_handle_param>> wait_list{"EVENTS_WAIT_LIST"};

/**
* @brief This key identifies a pointer to a cl::Event that should be set with
* the result cl_event of a kernel execution.
* @ingroup ov_runtime_ocl_gpu_cpp_api
*/
static constexpr Property<gpu_handle_param> result_event{"RESULT_EVENT"};

/**
* @brief This key identifies whether the kernel argument at [i] position is USM pointer
* @ingroup ov_runtime_ocl_gpu_prop_cpp_api
*/
static constexpr Property<std::vector<bool>> is_kernel_arg_usm{"IS_KERNEL_ARG_USM"};

} // namespace mlir_meta
} // namespace intel_gpu
} // namespace ov
7 changes: 3 additions & 4 deletions src/plugins/intel_gpu/src/plugin/ops/mlir_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
#include "runtime/ocl/ocl_base_event.hpp"

#include "openvino/runtime/intel_gpu/remote_properties.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"

namespace ov {
namespace op {
Expand Down Expand Up @@ -86,7 +85,7 @@ void CreateMLIRSubgraphOp(ProgramBuilder& p, const std::shared_ptr<ov::op::mlir:
cl_command_queue queue = ocl_stream->get_cl_queue().get();
meta.insert(ov::intel_gpu::ocl_queue(queue));
}
meta.insert(ov::intel_gpu::memory_type::is_kernel_arg_usm(is_usm_ptr));
meta.insert(ov::intel_gpu::mlir_meta::is_kernel_arg_usm(is_usm_ptr));

std::vector<ov::intel_gpu::gpu_handle_param> events_list;
if (stream.get_queue_type() == cldnn::QueueTypes::out_of_order) {
Expand All @@ -101,11 +100,11 @@ void CreateMLIRSubgraphOp(ProgramBuilder& p, const std::shared_ptr<ov::op::mlir:
}
}
}
meta.insert(ov::intel_gpu::wait_list(events_list));
meta.insert(ov::intel_gpu::mlir_meta::wait_list(events_list));

if (auto ocl_ev = dynamic_cast<cldnn::ocl::ocl_base_event*>(ev.get())) {
cl::Event* cl_ev = &ocl_ev->get();
meta.insert(ov::intel_gpu::result_event(cl_ev));
meta.insert(ov::intel_gpu::mlir_meta::result_event(cl_ev));
} else {
OPENVINO_THROW("Unsupported result event type");
}
Expand Down

0 comments on commit 717f7ca

Please sign in to comment.