Skip to content

Commit

Permalink
Add GPU support for ONNXRuntime.
Browse files Browse the repository at this point in the history
  • Loading branch information
hqucms committed Feb 14, 2022
1 parent ad04da7 commit c2d3921
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 9 deletions.
3 changes: 3 additions & 0 deletions PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
namespace cms::Ort {

typedef std::vector<std::vector<float>> FloatArrays;
enum GPUMode { no_gpu, auto_gpu, force_gpu };

class ONNXRuntime {
public:
Expand All @@ -29,6 +30,8 @@ namespace cms::Ort {
ONNXRuntime& operator=(const ONNXRuntime&) = delete;
~ONNXRuntime();

static ::Ort::SessionOptions defaultSessionOptions(GPUMode gpu_mode = no_gpu);

// Run inference and get outputs
// input_names: list of the names of the input nodes.
// input_values: list of input arrays for each input node. The order of `input_values` must match `input_names`.
Expand Down
40 changes: 36 additions & 4 deletions PhysicsTools/ONNXRuntime/src/ONNXRuntime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <iostream>
#include <memory>
#include <numeric>
#include <cuda_runtime.h>

namespace cms::Ort {

Expand All @@ -25,11 +26,9 @@ namespace cms::Ort {
ONNXRuntime::ONNXRuntime(const std::string& model_path, const SessionOptions* session_options) {
// create session
if (session_options) {
session_ = std::make_unique<Session>(env_, model_path.c_str(), *session_options);
session_.reset(new Session(env_, model_path.c_str(), *session_options));
} else {
SessionOptions sess_opts;
sess_opts.SetIntraOpNumThreads(1);
session_ = std::make_unique<Session>(env_, model_path.c_str(), sess_opts);
session_.reset(new Session(env_, model_path.c_str(), defaultSessionOptions()));
}
AllocatorWithDefaultOptions allocator;

Expand Down Expand Up @@ -78,6 +77,35 @@ namespace cms::Ort {

ONNXRuntime::~ONNXRuntime() {}

SessionOptions ONNXRuntime::defaultSessionOptions(GPUMode gpu_mode) {
SessionOptions sess_opts;
sess_opts.SetIntraOpNumThreads(1);
if (gpu_mode != no_gpu) {
// detect if there is GPU
int devices = 0;
auto status = cudaGetDeviceCount(&devices);
bool is_gpu_available = (status == cudaSuccess && devices > 0);
if (is_gpu_available) {
// https://www.onnxruntime.ai/docs/reference/execution-providers/CUDA-ExecutionProvider.html
OrtCUDAProviderOptions options;
options.device_id = 0;
options.arena_extend_strategy = 0;
options.cuda_mem_limit = std::numeric_limits<std::size_t>::max();
options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearch::EXHAUSTIVE;
options.do_copy_in_default_stream = 1;
sess_opts.AppendExecutionProvider_CUDA(options);
} else {
// if GPU is not available
if (gpu_mode == force_gpu) {
throw cms::Exception("RuntimeError") << "No GPU detected, cannot run ONNXRuntime on GPU.";
} else {
std::cout << "[ONNXRuntime] No GPU detected, will run on CPU." << std::endl;
}
}
}
return sess_opts;
}

FloatArrays ONNXRuntime::run(const std::vector<std::string>& input_names,
FloatArrays& input_values,
const std::vector<std::vector<int64_t>>& input_shapes,
Expand All @@ -104,6 +132,10 @@ namespace cms::Ort {
} else {
input_dims = input_shapes[input_pos];
// rely on the given input_shapes to set the batch size
if (input_dims[0] != batch_size) {
throw cms::Exception("RuntimeError") << "The first element of `input_shapes` (" << input_dims[0]
<< ") does not match the given `batch_size` (" << batch_size << ")";
}
}
auto expected_len = std::accumulate(input_dims.begin(), input_dims.end(), 1, std::multiplies<int64_t>());
if (expected_len != (int64_t)value->size()) {
Expand Down
1 change: 1 addition & 0 deletions PhysicsTools/ONNXRuntime/test/BuildFile.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,6 @@
<bin name="testONNXRuntime" file="testRunner.cpp, testONNXRuntime.cc">
<use name="cppunit"/>
<use name="PhysicsTools/ONNXRuntime"/>
<use name="HeterogeneousCore/CUDAUtilities"/>
<use name="FWCore/ParameterSet"/>
</bin>
28 changes: 23 additions & 5 deletions PhysicsTools/ONNXRuntime/test/testONNXRuntime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,26 +2,34 @@

#include "PhysicsTools/ONNXRuntime/interface/ONNXRuntime.h"
#include "FWCore/ParameterSet/interface/FileInPath.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"

#include <chrono>
#include <iostream>

using namespace cms::Ort;

class testONNXRuntime : public CppUnit::TestFixture {
CPPUNIT_TEST_SUITE(testONNXRuntime);
CPPUNIT_TEST(checkAll);
CPPUNIT_TEST(checkCPU);
CPPUNIT_TEST(checkGPU);
CPPUNIT_TEST(checkAuto);
CPPUNIT_TEST_SUITE_END();

private:
void test(GPUMode gpu_mode = no_gpu);

public:
void checkAll();
void checkCPU();
void checkGPU();
void checkAuto();
};

CPPUNIT_TEST_SUITE_REGISTRATION(testONNXRuntime);

void testONNXRuntime::checkAll() {
void testONNXRuntime::test(GPUMode gpu_mode) {
std::string model_path = edm::FileInPath("PhysicsTools/ONNXRuntime/test/data/model.onnx").fullPath();
ONNXRuntime rt(model_path);
auto session_options = ONNXRuntime::defaultSessionOptions(gpu_mode);
ONNXRuntime rt(model_path, &session_options);
for (const unsigned batch_size : {1, 2, 4}) {
FloatArrays input_values{
std::vector<float>(batch_size * 2, 1),
Expand All @@ -35,3 +43,13 @@ void testONNXRuntime::checkAll() {
}
}
}

void testONNXRuntime::checkCPU() { test(); }

void testONNXRuntime::checkGPU() {
if (cms::cudatest::testDevices()) {
test(force_gpu);
}
}

void testONNXRuntime::checkAuto() { test(auto_gpu); }

0 comments on commit c2d3921

Please sign in to comment.