1 #ifndef PhysicsTools_PatAlgos_BaseMVAValueMapProducer 2 #define PhysicsTools_PatAlgos_BaseMVAValueMapProducer 35 #include "TMVA/Factory.h" 36 #include "TMVA/Reader.h" 63 if (disableONNXGraphOpt) {
64 Ort::SessionOptions sess_opts;
66 sess_opts.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_DISABLE_ALL);
67 ort_ = std::make_unique<cms::Ort::ONNXRuntime>(
model_path, &sess_opts);
79 std::shared_ptr<tensorflow::GraphDef>
graph_;
81 std::unique_ptr<cms::Ort::ONNXRuntime>
ort_;
99 throw cms::Exception(
"ConfigError") <<
"Only 'TF', 'ONNX' and 'TMVA' backends are supported\n";
126 for (
const auto&
s : iConfig.
getParameter<std::vector<std::string>>(
"outputFormulas")) {
132 produces<edm::ValueMap<float>>();
135 produces<edm::ValueMap<float>>(
n);
163 std::vector<std::pair<std::string, StringObjectFunction<T, true>>>
funcs_;
182 template <
typename T>
186 readAdditionalCollections(
iEvent, iSetup);
187 std::vector<std::vector<float>> mvaOut((tmva_) ? 1 : output_names_.size());
188 for (
auto&
v : mvaOut)
189 v.reserve(
src->size());
193 std::vector<float>
data;
194 data.reserve(
src->size() * positions_.size());
195 for (
auto const&
o : *
src) {
196 for (
auto const&
p : funcs_) {
199 fillAdditionalVariables(
o);
200 data.insert(
data.end(), values_.begin(), values_.end());
205 tensorflow::TensorShape input_size{(
long long int)
src->size(), (
long long int)positions_.size()};
207 input_tensors.resize(1);
210 for (
unsigned i = 0;
i <
data.size(); ++
i) {
211 input_tensors[0].second.flat<
float>()(
i) =
data[
i];
213 std::vector<tensorflow::Tensor> output_tensors;
214 tensorflow::run(globalCache()->getTFSession(), input_tensors, {outputTensorName_}, &output_tensors);
215 for (
unsigned i = 0;
i < output_tensors.at(0).NumElements(); ++
i) {
216 outputs.push_back(output_tensors.at(0).flat<
float>()(
i));
221 globalCache()->getONNXSession().run({inputTensorName_},
inputs, {}, {outputTensorName_},
src->size())[0];
224 const unsigned outdim =
outputs.size() /
src->size();
225 for (
unsigned i = 0;
i <
src->size(); ++
i) {
226 std::vector<float> tmpOut(
outputs.begin() +
i * outdim,
outputs.begin() + (
i + 1) * outdim);
227 for (
size_t k = 0;
k < output_names_.size();
k++) {
228 mvaOut[
k].push_back(output_formulas_[
k](tmpOut));
233 for (
auto const&
o : *
src) {
234 for (
auto const&
p : funcs_) {
237 fillAdditionalVariables(
o);
239 mvaOut[0].push_back(isClassifier_ ? reader_->EvaluateMVA(name_) : reader_->EvaluateRegression(name_)[0]);
241 std::vector<float> tmpOut;
244 tensorflow::TensorShape input_size{1, (
long long int)positions_.size()};
246 input_tensors.resize(1);
249 for (
size_t j = 0;
j < values_.size();
j++) {
250 input_tensors[0].second.matrix<
float>()(0,
j) = values_[
j];
252 std::vector<tensorflow::Tensor>
outputs;
255 tmpOut.push_back(
outputs.at(0).matrix<
float>()(0,
k));
258 tmpOut = globalCache()->getONNXSession().run({inputTensorName_},
inputs, {}, {outputTensorName_})[0];
260 for (
size_t k = 0;
k < output_names_.size();
k++)
267 for (
auto&
m : mvaOut) {
277 template <
typename T>
281 cfg.getParameter<
bool>(
"disableONNXGraphOpt"));
284 template <
typename T>
287 template <
typename T>
291 desc.add<std::vector<std::string>>(
"variablesOrder")->setComment(
"ordered list of MVA input variable names");
292 desc.add<
std::string>(
"name")->setComment(
"output score variable name");
293 desc.add<
bool>(
"isClassifier")->setComment(
"is a classifier discriminator");
298 desc.add<
std::string>(
"backend",
"TMVA")->setComment(
"TMVA, TF or ONNX");
299 desc.add<
std::string>(
"inputTensorName",
"")->setComment(
"Name of tensorflow input tensor in the model");
300 desc.add<
std::string>(
"outputTensorName",
"")->setComment(
"Name of tensorflow output tensor in the model");
301 desc.add<std::vector<std::string>>(
"outputNames", std::vector<std::string>())
302 ->setComment(
"Names of the output values to be used in the output valuemap");
303 desc.add<std::vector<std::string>>(
"outputFormulas", std::vector<std::string>())
304 ->setComment(
"Formulas to be used to post process the output");
305 desc.add<
bool>(
"batch_eval",
false)->setComment(
"Run inference in batch instead of per-object");
306 desc.add<
bool>(
"disableONNXGraphOpt",
false)->setComment(
"Disable ONNX runtime graph optimization");
311 template <
typename T>
321 modname +=
"BaseMVAValueMapProducer";
std::vector< float > values_
Session * createSession(SessionOptions &sessionOptions)
static edm::ParameterSetDescription getDescription()
void endStream() override
virtual void fillAdditionalVariables(const T &)
std::vector< NamedTensor > NamedTensorList
T getParameter(std::string const &) const
std::vector< StringObjectFunction< std::vector< float > > > output_formulas_
::Ort::SessionOptions defaultSessionOptions(Backend backend=Backend::cpu)
static std::unique_ptr< BaseMVACache > initializeGlobalCache(const edm::ParameterSet &cfg)
std::string inputTensorName_
std::shared_ptr< tensorflow::GraphDef > graph_
GraphDef * loadGraphDef(const std::string &pbFile)
static void globalEndJob(const BaseMVACache *cache)
void produce(edm::Event &, const edm::EventSetup &) override
std::string outputTensorName_
std::string weightfilename_
std::vector< std::vector< float > > FloatArrays
bool setValue(Container &, const reco::JetBaseRef &, const JetExtendedData &)
associate jet with value. Returns false and associate nothing if jet is already associated ...
const cms::Ort::ONNXRuntime & getONNXSession() const
void setValue(const std::string var, float val)
std::pair< std::string, Tensor > NamedTensor
std::vector< std::pair< std::string, StringObjectFunction< T, true > > > funcs_
std::vector< std::string > variablesOrder_
std::vector< std::string > getParameterNamesForType(bool trackiness=true) const
void run(Session *session, const NamedTensorList &inputs, const std::vector< std::string > &outputNames, std::vector< Tensor > *outputs, const thread::ThreadPoolOptions &threadPoolOptions)
bool closeSession(Session *&session)
tensorflow::Session * getTFSession() const
std::unique_ptr< cms::Ort::ONNXRuntime > ort_
edm::EDGetTokenT< edm::View< T > > src_
static void fillDescriptions(edm::ConfigurationDescriptions &descriptions)
Analysis-level electron class.
std::map< std::string, size_t > positions_
Analysis-level calorimeter jet class.
void add(std::string const &label, ParameterSetDescription const &psetDescription)
TMVA::IMethod * loadTMVAWeights(TMVA::Reader *reader, const std::string &method, const std::string &weightFile, bool verbose=false)
tensorflow::Session * tf_session_
deadvectors [0] push_back({0.0175431, 0.538005, 6.80997, 13.29})
char data[epos_bytes_allocation]
~BaseMVAValueMapProducer() override
void beginStream(edm::StreamID) override
BaseMVAValueMapProducer(const edm::ParameterSet &iConfig, const BaseMVACache *cache)
uint32_t dimension(pat::CandKinResolution::Parametrization parametrization)
Returns the number of free parameters in a parametrization (3 or 4)
BaseMVACache(const std::string &model_path, const std::string &backend, const bool disableONNXGraphOpt)
Analysis-level muon class.
std::vector< std::string > output_names_
virtual void readAdditionalCollections(edm::Event &, const edm::EventSetup &)
to be implemented in derived classes, filling values for additional variables