diff --git a/src/common/transformations/src/transformations/mlir/mlir_op.cpp b/src/common/transformations/src/transformations/mlir/mlir_op.cpp index c0b1322bc2019f..c85759d8e60769 100644 --- a/src/common/transformations/src/transformations/mlir/mlir_op.cpp +++ b/src/common/transformations/src/transformations/mlir/mlir_op.cpp @@ -66,7 +66,6 @@ #include "gc/Utils/Error.h" #include "gc/ExecutionEngine/GPURuntime/GpuOclRuntime.h" #include "openvino/runtime/intel_gpu/remote_properties.hpp" -#include "openvino/runtime/intel_gpu/properties.hpp" #endif #endif @@ -367,18 +366,17 @@ bool MLIREvaluateGcGPU::invoke(const ov::TensorVector& inputs, ov::TensorVector& gc::gpu::OclContext ctx = build_ocl_context(evaluationContext); gc::gpu::StaticExecutor exec(module); - auto it = evaluationContext.find(ov::intel_gpu::memory_type::is_kernel_arg_usm.name()); + auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::is_kernel_arg_usm.name()); if (it == evaluationContext.end()) { OPENVINO_THROW("No is_kernel_arg_usm provided for OpenCL execution"); } std::vector arg_types = it->second.as>(); - size_t module_arg_i = 0; - for (size_t i = 0; i < inputs.size(); ++i, ++module_arg_i) { + for (size_t i = 0; i < inputs.size(); ++i) { exec.arg(inputs[i].data(), arg_types[i]); } - for (size_t i = 0; i < outputs.size(); ++i, ++module_arg_i) { - exec.arg(outputs[i].data(), arg_types[module_arg_i]); + for (size_t i = 0, j = inputs.size(); i < outputs.size(); ++i, ++j) { + exec.arg(outputs[i].data(), arg_types[j]); } exec(ctx); maybe_set_result_event(evaluationContext, ctx); @@ -389,7 +387,7 @@ bool MLIREvaluateGcGPU::invoke_packed(std::vector& args, const ov::Evalua gc::gpu::OclContext ctx = build_ocl_context(evaluationContext); gc::gpu::DynamicExecutor exec(module); - auto it = evaluationContext.find(ov::intel_gpu::memory_type::is_kernel_arg_usm.name()); + auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::is_kernel_arg_usm.name()); if (it == evaluationContext.end()) { OPENVINO_THROW("No is_kernel_arg_usm provided for OpenCL execution"); } @@ -412,7 +410,7 @@ void MLIREvaluateGcGPU::maybe_set_result_event(const ov::EvaluationContext& eval // case with in-order queue where we don't need to return an event if (ctx.lastEvent == nullptr) return; - auto it = evaluationContext.find(ov::intel_gpu::result_event.name()); + auto it = evaluationContext.find(ov::intel_gpu::mlir_meta::result_event.name()); if (it == evaluationContext.end()) { OPENVINO_THROW("No result_event provided for OpenCL execution"); } @@ -430,7 +428,7 @@ gc::gpu::OclContext MLIREvaluateGcGPU::build_ocl_context(const ov::EvaluationCon uint32_t waitListLen = 0; std::vector waitList; - it = evaluationContext.find(ov::intel_gpu::wait_list.name()); + it = evaluationContext.find(ov::intel_gpu::mlir_meta::wait_list.name()); if (it != evaluationContext.end()) { waitList = it->second.as>(); waitListLen = waitList.size(); diff --git a/src/inference/include/openvino/runtime/intel_gpu/properties.hpp b/src/inference/include/openvino/runtime/intel_gpu/properties.hpp index dc67adc42d6593..185195e288805c 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/properties.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/properties.hpp @@ -142,15 +142,6 @@ static constexpr auto surface = "GPU_SURFACE"; */ static constexpr auto buffer = "GPU_BUFFER"; -// TODO: maybe find more suitable place for this property (should be accessible from both -// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp) -/** - * @brief This key identifies whether the kernel argument at [i] position is USM pointer - * (this one is passed as evaluation context to a mlir_op) - * @ingroup ov_runtime_ocl_gpu_prop_cpp_api - */ -static constexpr Property> is_kernel_arg_usm{"IS_KERNEL_ARG_USM"}; - } // namespace memory_type /** diff --git a/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp b/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp index ab2d4b4d3d47fa..da509aa70236cb 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/remote_properties.hpp @@ -93,25 +93,6 @@ static constexpr Property ocl_queue{"OCL_QUEUE"}; */ static constexpr Property va_device{"VA_DEVICE"}; -// TODO: maybe find more suitable place for this property (should be accessible from both -// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp) -/** - * @brief This key identifies an event list to wait for a kernel execution. - * (this one is passed as evaluation context to a mlir_op) - * @ingroup ov_runtime_ocl_gpu_cpp_api - */ -static constexpr Property> wait_list{"EVENTS_WAIT_LIST"}; - -// TODO: maybe find more suitable place for this property (should be accessible from both -// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp) -/** - * @brief This key identifies a pointer to a cl::Event that should be set with - * the result cl_event of a kernel execution. - * (this one is passed as evaluation context to a mlir_op) - * @ingroup ov_runtime_ocl_gpu_cpp_api - */ -static constexpr Property result_event{"RESULT_EVENT"}; - /** * @brief Enum to define the type of the shared memory buffer * @ingroup ov_runtime_ocl_gpu_cpp_api @@ -204,5 +185,34 @@ static constexpr Property dev_object_handle{"DEV_OBJECT_HANDLE"}; */ static constexpr Property va_plane{"VA_PLANE"}; +// TODO: maybe find more suitable place for these properties (should be accessible from both +// src/plugins/intel_gpu/.../mlir_op.cpp and src/common/transformations/.../mlir_op.cpp) +/** + * @brief Namespace for properties related to MLIR operations within the GPU plugin. + * These properties are used as evaluation context parameters for MLIR operations, + * assisting in managing events, result tracking, and kernel argument types. + */ +namespace mlir_meta { + +/** + * @brief This key identifies an event list to wait for a kernel execution. + * @ingroup ov_runtime_ocl_gpu_cpp_api + */ +static constexpr Property> wait_list{"EVENTS_WAIT_LIST"}; + +/** + * @brief This key identifies a pointer to a cl::Event that should be set with + * the result cl_event of a kernel execution. + * @ingroup ov_runtime_ocl_gpu_cpp_api + */ +static constexpr Property result_event{"RESULT_EVENT"}; + +/** + * @brief This key identifies whether the kernel argument at [i] position is USM pointer + * @ingroup ov_runtime_ocl_gpu_prop_cpp_api + */ +static constexpr Property> is_kernel_arg_usm{"IS_KERNEL_ARG_USM"}; + +} // namespace mlir_meta } // namespace intel_gpu } // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/ops/mlir_op.cpp b/src/plugins/intel_gpu/src/plugin/ops/mlir_op.cpp index 06a3705bb450d9..b36d5098237b3d 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/mlir_op.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/mlir_op.cpp @@ -12,7 +12,6 @@ #include "runtime/ocl/ocl_base_event.hpp" #include "openvino/runtime/intel_gpu/remote_properties.hpp" -#include "openvino/runtime/intel_gpu/properties.hpp" namespace ov { namespace op { @@ -86,7 +85,7 @@ void CreateMLIRSubgraphOp(ProgramBuilder& p, const std::shared_ptrget_cl_queue().get(); meta.insert(ov::intel_gpu::ocl_queue(queue)); } - meta.insert(ov::intel_gpu::memory_type::is_kernel_arg_usm(is_usm_ptr)); + meta.insert(ov::intel_gpu::mlir_meta::is_kernel_arg_usm(is_usm_ptr)); std::vector events_list; if (stream.get_queue_type() == cldnn::QueueTypes::out_of_order) { @@ -101,11 +100,11 @@ void CreateMLIRSubgraphOp(ProgramBuilder& p, const std::shared_ptr(ev.get())) { cl::Event* cl_ev = &ocl_ev->get(); - meta.insert(ov::intel_gpu::result_event(cl_ev)); + meta.insert(ov::intel_gpu::mlir_meta::result_event(cl_ev)); } else { OPENVINO_THROW("Unsupported result event type"); }