diff --git a/include/onnxruntime/core/session/onnxruntime_c_api.h b/include/onnxruntime/core/session/onnxruntime_c_api.h index e1fa9132c8e50..c883ffa100320 100644 --- a/include/onnxruntime/core/session/onnxruntime_c_api.h +++ b/include/onnxruntime/core/session/onnxruntime_c_api.h @@ -1344,6 +1344,8 @@ struct OrtApi { * Create a tensor with user's buffer. You can fill the buffer either before calling this function or after. * p_data is owned by caller. ReleaseValue won't release p_data. * + * If you wish to transfer ownership of p_data to ORT use CreateTensorWithDataAndDeleterAsOrtValue. + * * \param[in] info Memory description of where the p_data buffer resides (CPU vs GPU etc). * \param[in] p_data Pointer to the data buffer. * \param[in] p_data_len The number of bytes in the data buffer. diff --git a/onnxruntime/core/framework/onnxruntime_typeinfo.cc b/onnxruntime/core/framework/onnxruntime_typeinfo.cc index b8a6e2b44c919..91383425f16d9 100644 --- a/onnxruntime/core/framework/onnxruntime_typeinfo.cc +++ b/onnxruntime/core/framework/onnxruntime_typeinfo.cc @@ -86,8 +86,8 @@ ORT_API_STATUS_IMPL(OrtApis::CastTypeInfoToOptionalTypeInfo, _In_ const OrtTypeI API_IMPL_END } -ORT_API_STATUS_IMPL(OrtApis::GetDenotationFromTypeInfo, _In_ const OrtTypeInfo* type_info, _Out_ const char** const out, - _Out_ size_t* len) { +ORT_API_STATUS_IMPL(OrtApis::GetDenotationFromTypeInfo, _In_ const OrtTypeInfo* type_info, + _Out_ const char** const out, _Out_ size_t* len) { API_IMPL_BEGIN *out = type_info->denotation.c_str(); *len = type_info->denotation.size(); @@ -115,8 +115,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateSparseTensorTypeInfo, _In_ const OrtTensorTyp API_IMPL_END } -ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_key_type, _In_ const OrtTypeInfo* map_value_type, - _Out_ OrtTypeInfo** type_info) { +ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_key_type, + _In_ const OrtTypeInfo* map_value_type, _Out_ OrtTypeInfo** type_info) { API_IMPL_BEGIN auto ti = std::make_unique(ONNXType::ONNX_TYPE_MAP); ti->map_type_info = std::make_unique(map_key_type, map_value_type->Clone()); @@ -126,7 +126,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_ke API_IMPL_END } -ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* sequence_type, _Out_ OrtTypeInfo** type_info) { +ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* sequence_type, + _Out_ OrtTypeInfo** type_info) { API_IMPL_BEGIN auto ti = std::make_unique(ONNXType::ONNX_TYPE_SEQUENCE); ti->sequence_type_info = std::make_unique(sequence_type->Clone()); @@ -136,7 +137,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* seq API_IMPL_END } -ORT_API_STATUS_IMPL(OrtApis::CreateOptionalTypeInfo, _In_ const OrtTypeInfo* contained_type, _Out_ OrtTypeInfo** type_info) { +ORT_API_STATUS_IMPL(OrtApis::CreateOptionalTypeInfo, _In_ const OrtTypeInfo* contained_type, + _Out_ OrtTypeInfo** type_info) { API_IMPL_BEGIN auto ti = std::make_unique(ONNXType::ONNX_TYPE_OPTIONAL); ti->optional_type_info = std::make_unique(contained_type->Clone()); diff --git a/onnxruntime/core/session/onnxruntime_c_api.cc b/onnxruntime/core/session/onnxruntime_c_api.cc index d7b020a43fc84..a05309f9ec207 100644 --- a/onnxruntime/core/session/onnxruntime_c_api.cc +++ b/onnxruntime/core/session/onnxruntime_c_api.cc @@ -126,17 +126,6 @@ ORT_STATUS_PTR CreateTensorImpl(MLDataType ml_type, const int64_t* shape, size_t return nullptr; } -ORT_STATUS_PTR CreateTensorImplForSeq(MLDataType elem_type, const int64_t* shape, size_t shape_len, Tensor& out) { - OrtAllocator* allocator; - // TODO(pranav): what allocator should be used to create the tensor here? - // for the sake of simplicity of the API using the default one here - ORT_API_RETURN_IF_ERROR(OrtApis::GetAllocatorWithDefaultOptions(&allocator)); - AllocatorPtr alloc_ptr = std::make_shared(allocator); - TensorShape tensor_shape(shape, shape_len); - out = Tensor(elem_type, tensor_shape, std::move(alloc_ptr)); - return nullptr; -} - // Create Tensor with existing data. Tensor does not own memory. ORT_STATUS_PTR CreateTensorImpl(MLDataType ml_type, const int64_t* shape, size_t shape_len,