CMS 3D CMS Logo

ONNXRuntime.cc
Go to the documentation of this file.
1 /*
2  * ONNXRuntime.cc
3  *
4  * Created on: Jun 28, 2019
5  * Author: hqu
6  */
7 
9 
10 #include <cassert>
11 #include <iostream>
12 #include <algorithm>
13 #include <numeric>
14 #include <functional>
17 
18 namespace cms::Ort {
19 
20  using namespace ::Ort;
21 
22  const Env ONNXRuntime::env_(ORT_LOGGING_LEVEL_ERROR, "");
23 
24  ONNXRuntime::ONNXRuntime(const std::string& model_path, const SessionOptions* session_options) {
25  // create session
26  if (session_options) {
27  session_.reset(new Session(env_, model_path.c_str(), *session_options));
28  } else {
29  SessionOptions sess_opts;
30  sess_opts.SetIntraOpNumThreads(1);
31  session_.reset(new Session(env_, model_path.c_str(), sess_opts));
32  }
33  AllocatorWithDefaultOptions allocator;
34 
35  // get input names and shapes
36  size_t num_input_nodes = session_->GetInputCount();
37  input_node_strings_.resize(num_input_nodes);
38  input_node_names_.resize(num_input_nodes);
39  input_node_dims_.clear();
40 
41  for (size_t i = 0; i < num_input_nodes; i++) {
42  // get input node names
43  std::string input_name(session_->GetInputName(i, allocator));
44  input_node_strings_[i] = input_name;
45  input_node_names_[i] = input_node_strings_[i].c_str();
46 
47  // get input shapes
48  auto type_info = session_->GetInputTypeInfo(i);
49  auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
50  size_t num_dims = tensor_info.GetDimensionsCount();
51  input_node_dims_[input_name].resize(num_dims);
52  tensor_info.GetDimensions(input_node_dims_[input_name].data(), num_dims);
53  }
54 
55  size_t num_output_nodes = session_->GetOutputCount();
56  output_node_strings_.resize(num_output_nodes);
57  output_node_names_.resize(num_output_nodes);
58  output_node_dims_.clear();
59 
60  for (size_t i = 0; i < num_output_nodes; i++) {
61  // get output node names
62  std::string output_name(session_->GetOutputName(i, allocator));
63  output_node_strings_[i] = output_name;
64  output_node_names_[i] = output_node_strings_[i].c_str();
65 
66  // get output node types
67  auto type_info = session_->GetOutputTypeInfo(i);
68  auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
69  size_t num_dims = tensor_info.GetDimensionsCount();
70  output_node_dims_[output_name].resize(num_dims);
71  tensor_info.GetDimensions(output_node_dims_[output_name].data(), num_dims);
72 
73  // the 0th dim depends on the batch size
74  output_node_dims_[output_name].at(0) = -1;
75  }
76  }
77 
79 
80  FloatArrays ONNXRuntime::run(const std::vector<std::string>& input_names,
81  FloatArrays& input_values,
82  const std::vector<std::vector<int64_t>>& input_shapes,
83  const std::vector<std::string>& output_names,
84  int64_t batch_size) const {
85  assert(input_names.size() == input_values.size());
86  assert(input_shapes.empty() || input_names.size() == input_shapes.size());
87  assert(batch_size > 0);
88 
89  // create input tensor objects from data values
90  std::vector<Value> input_tensors;
91  auto memory_info = MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
92  for (const auto& name : input_node_strings_) {
93  auto iter = std::find(input_names.begin(), input_names.end(), name);
94  if (iter == input_names.end()) {
95  throw cms::Exception("RuntimeError") << "Input " << name << " is not provided!";
96  }
97  auto input_pos = iter - input_names.begin();
98  auto value = input_values.begin() + input_pos;
99  std::vector<int64_t> input_dims;
100  if (input_shapes.empty()) {
101  input_dims = input_node_dims_.at(name);
102  input_dims[0] = batch_size;
103  } else {
104  input_dims = input_shapes[input_pos];
105  // rely on the given input_shapes to set the batch size
106  }
107  auto expected_len = std::accumulate(input_dims.begin(), input_dims.end(), 1, std::multiplies<int64_t>());
108  if (expected_len != (int64_t)value->size()) {
109  throw cms::Exception("RuntimeError")
110  << "Input array " << name << " has a wrong size of " << value->size() << ", expected " << expected_len;
111  }
112  auto input_tensor =
113  Value::CreateTensor<float>(memory_info, value->data(), value->size(), input_dims.data(), input_dims.size());
114  assert(input_tensor.IsTensor());
115  input_tensors.emplace_back(std::move(input_tensor));
116  }
117 
118  // set output node names; will get all outputs if `output_names` is not provided
119  std::vector<const char*> run_output_node_names;
120  if (output_names.empty()) {
121  run_output_node_names = output_node_names_;
122  } else {
123  for (const auto& name : output_names) {
124  run_output_node_names.push_back(name.c_str());
125  }
126  }
127 
128  // run
129  auto output_tensors = session_->Run(RunOptions{nullptr},
130  input_node_names_.data(),
131  input_tensors.data(),
132  input_tensors.size(),
133  run_output_node_names.data(),
134  run_output_node_names.size());
135 
136  // convert output to floats
138  for (auto& output_tensor : output_tensors) {
139  assert(output_tensor.IsTensor());
140 
141  // get output shape
142  auto tensor_info = output_tensor.GetTensorTypeAndShapeInfo();
143  auto length = tensor_info.GetElementCount();
144 
145  auto floatarr = output_tensor.GetTensorMutableData<float>();
146  outputs.emplace_back(floatarr, floatarr + length);
147  }
148  assert(outputs.size() == run_output_node_names.size());
149 
150  return outputs;
151  }
152 
153  const std::vector<std::string>& ONNXRuntime::getOutputNames() const {
154  if (session_) {
155  return output_node_strings_;
156  } else {
157  throw cms::Exception("RuntimeError") << "Needs to call createSession() first before getting the output names!";
158  }
159  }
160 
161  const std::vector<int64_t>& ONNXRuntime::getOutputShape(const std::string& output_name) const {
162  auto iter = output_node_dims_.find(output_name);
163  if (iter == output_node_dims_.end()) {
164  throw cms::Exception("RuntimeError") << "Output name " << output_name << " is invalid!";
165  } else {
166  return iter->second;
167  }
168  }
169 
170 } /* namespace cms::Ort */
std::unique_ptr<::Ort::Session > session_
Definition: ONNXRuntime.h:55
std::map< std::string, std::vector< int64_t > > input_node_dims_
Definition: ONNXRuntime.h:59
std::map< std::string, std::vector< int64_t > > output_node_dims_
Definition: ONNXRuntime.h:63
static const ::Ort::Env env_
Definition: ONNXRuntime.h:54
void find(edm::Handle< EcalRecHitCollection > &hits, DetId thisDet, std::vector< EcalRecHitCollection::const_iterator > &hit, bool debug=false)
Definition: FindCaloHit.cc:20
std::vector< std::vector< float > > FloatArrays
Definition: ONNXRuntime.h:23
FloatArrays run(const std::vector< std::string > &input_names, FloatArrays &input_values, const std::vector< std::vector< int64_t >> &input_shapes={}, const std::vector< std::string > &output_names={}, int64_t batch_size=1) const
Definition: ONNXRuntime.cc:80
ONNXRuntime(const std::string &model_path, const ::Ort::SessionOptions *session_options=nullptr)
Definition: value.py:1
std::vector< const char * > output_node_names_
Definition: ONNXRuntime.h:62
const std::vector< std::string > & getOutputNames() const
Definition: ONNXRuntime.cc:153
char data[epos_bytes_allocation]
Definition: EPOS_Wrapper.h:82
const std::vector< int64_t > & getOutputShape(const std::string &output_name) const
Definition: ONNXRuntime.cc:161
std::vector< std::string > input_node_strings_
Definition: ONNXRuntime.h:57
std::vector< const char * > input_node_names_
Definition: ONNXRuntime.h:58
def move(src, dest)
Definition: eostools.py:511
std::vector< std::string > output_node_strings_
Definition: ONNXRuntime.h:61