CMS 3D CMS Logo

DeepMETProducer.cc
Go to the documentation of this file.
5 
8 
10 
11 struct DeepMETCache {
12  std::atomic<tensorflow::GraphDef*> graph_def;
13 };
14 
15 class DeepMETProducer : public edm::stream::EDProducer<edm::GlobalCache<DeepMETCache> > {
16 public:
17  explicit DeepMETProducer(const edm::ParameterSet&, const DeepMETCache*);
18  void produce(edm::Event& event, const edm::EventSetup& setup) override;
19  static void fillDescriptions(edm::ConfigurationDescriptions& descriptions);
20 
21  // static methods for handling the global cache
22  static std::unique_ptr<DeepMETCache> initializeGlobalCache(const edm::ParameterSet&);
23  static void globalEndJob(DeepMETCache*);
24 
25 private:
27  const float norm_;
28  const bool ignore_leptons_;
29  const unsigned int max_n_pf_;
30 
31  tensorflow::Session* session_;
32 
33  tensorflow::Tensor input_;
34  tensorflow::Tensor input_cat0_;
35  tensorflow::Tensor input_cat1_;
36  tensorflow::Tensor input_cat2_;
37 
38  inline static const std::unordered_map<int, int32_t> charge_embedding_{{-1, 0}, {0, 1}, {1, 2}};
39  inline static const std::unordered_map<int, int32_t> pdg_id_embedding_{
40  {-211, 0}, {-13, 1}, {-11, 2}, {0, 3}, {1, 4}, {2, 5}, {11, 6}, {13, 7}, {22, 8}, {130, 9}, {211, 10}};
41 };
42 
43 namespace {
44  float scale_and_rm_outlier(float val, float scale) {
45  float ret_val = val * scale;
46  if (ret_val > 1e6 || ret_val < -1e6)
47  return 0.;
48  return ret_val;
49  }
50 } // namespace
51 
53  : pf_token_(consumes<std::vector<pat::PackedCandidate> >(cfg.getParameter<edm::InputTag>("pf_src"))),
54  norm_(cfg.getParameter<double>("norm_factor")),
55  ignore_leptons_(cfg.getParameter<bool>("ignore_leptons")),
56  max_n_pf_(cfg.getParameter<unsigned int>("max_n_pf")),
57  session_(tensorflow::createSession(cache->graph_def)) {
58  produces<pat::METCollection>();
59 
60  const tensorflow::TensorShape shape({1, max_n_pf_, 8});
61  const tensorflow::TensorShape cat_shape({1, max_n_pf_, 1});
62 
63  input_ = tensorflow::Tensor(tensorflow::DT_FLOAT, shape);
64  input_cat0_ = tensorflow::Tensor(tensorflow::DT_FLOAT, cat_shape);
65  input_cat1_ = tensorflow::Tensor(tensorflow::DT_FLOAT, cat_shape);
66  input_cat2_ = tensorflow::Tensor(tensorflow::DT_FLOAT, cat_shape);
67 }
68 
70  auto const& pfs = event.get(pf_token_);
71 
72  static const tensorflow::NamedTensorList input_list = {
73  {"input", input_}, {"input_cat0", input_cat0_}, {"input_cat1", input_cat1_}, {"input_cat2", input_cat2_}};
74 
75  // Set all inputs to zero
76  input_.flat<float>().setZero();
77  input_cat0_.flat<float>().setZero();
78  input_cat1_.flat<float>().setZero();
79  input_cat2_.flat<float>().setZero();
80 
81  size_t i_pf = 0;
82  float px_leptons = 0.;
83  float py_leptons = 0.;
84  const float scale = 1. / norm_;
85  for (const auto& pf : pfs) {
86  if (ignore_leptons_) {
87  int pdg_id = std::abs(pf.pdgId());
88  if (pdg_id == 11 || pdg_id == 13) {
89  px_leptons += pf.px();
90  py_leptons += pf.py();
91  continue;
92  }
93  }
94 
95  // fill the tensor
96  // PF keys [b'PF_dxy', b'PF_dz', b'PF_eta', b'PF_mass', b'PF_pt', b'PF_puppiWeight', b'PF_px', b'PF_py']
97  float* ptr = &input_.tensor<float, 3>()(0, i_pf, 0);
98  *ptr = pf.dxy();
99  *(++ptr) = pf.dz();
100  *(++ptr) = pf.eta();
101  *(++ptr) = pf.mass();
102  *(++ptr) = scale_and_rm_outlier(pf.pt(), scale);
103  *(++ptr) = pf.puppiWeight();
104  *(++ptr) = scale_and_rm_outlier(pf.px(), scale);
105  *(++ptr) = scale_and_rm_outlier(pf.py(), scale);
106  input_cat0_.tensor<float, 3>()(0, i_pf, 0) = charge_embedding_.at(pf.charge());
107  input_cat1_.tensor<float, 3>()(0, i_pf, 0) = pdg_id_embedding_.at(pf.pdgId());
108  input_cat2_.tensor<float, 3>()(0, i_pf, 0) = pf.fromPV();
109 
110  ++i_pf;
111  if (i_pf == max_n_pf_) {
112  break; // output a warning?
113  }
114  }
115 
116  std::vector<tensorflow::Tensor> outputs;
117  const std::vector<std::string> output_names = {"output/BiasAdd"};
118 
119  // run the inference and return met
120  tensorflow::run(session_, input_list, output_names, &outputs);
121 
122  // The DNN directly estimates the missing px and py, not the recoil
123  float px = outputs[0].tensor<float, 2>()(0, 0) * norm_;
124  float py = outputs[0].tensor<float, 2>()(0, 1) * norm_;
125 
126  px -= px_leptons;
127  py -= py_leptons;
128 
129  auto pf_mets = std::make_unique<pat::METCollection>();
130  const reco::Candidate::LorentzVector p4(px, py, 0., std::hypot(px, py));
131  pf_mets->emplace_back(reco::MET(p4, {}));
132  event.put(std::move(pf_mets));
133 }
134 
135 std::unique_ptr<DeepMETCache> DeepMETProducer::initializeGlobalCache(const edm::ParameterSet& params) {
136  // this method is supposed to create, initialize and return a DeepMETCache instance
137  std::unique_ptr<DeepMETCache> cache = std::make_unique<DeepMETCache>();
138 
139  // load the graph def and save it
140  std::string graphPath = params.getParameter<std::string>("graph_path");
141  if (!graphPath.empty()) {
142  graphPath = edm::FileInPath(graphPath).fullPath();
143  cache->graph_def = tensorflow::loadGraphDef(graphPath);
144  }
145 
146  return cache;
147 }
148 
150 
153  desc.add<edm::InputTag>("pf_src", edm::InputTag("packedPFCandidates"));
154  desc.add<bool>("ignore_leptons", false);
155  desc.add<double>("norm_factor", 50.);
156  desc.add<unsigned int>("max_n_pf", 4500);
157  desc.add<std::string>("graph_path", "RecoMET/METPUSubtraction/data/deepmet/deepmet_v1_2018.pb");
158  descriptions.add("deepMETProducer", desc);
159 }
160 
Session * createSession(SessionOptions &sessionOptions)
Definition: TensorFlow.cc:87
T getParameter(std::string const &) const
std::vector< NamedTensor > NamedTensorList
Definition: TensorFlow.h:26
const bool ignore_leptons_
std::atomic< tensorflow::GraphDef * > graph_def
void produce(edm::Event &event, const edm::EventSetup &setup) override
tensorflow::Tensor input_cat0_
GraphDef * loadGraphDef(const std::string &pbFile)
Definition: TensorFlow.cc:68
tensorflow::Tensor input_cat1_
static void fillDescriptions(edm::ConfigurationDescriptions &descriptions)
def setup(process, global_tag, zero_tesla=False)
Definition: GeneralSetup.py:2
static void globalEndJob(DeepMETCache *)
static std::unique_ptr< DeepMETCache > initializeGlobalCache(const edm::ParameterSet &)
tensorflow::Tensor input_
DeepMETProducer(const edm::ParameterSet &, const DeepMETCache *)
Definition: HeavyIon.h:7
#define DEFINE_FWK_MODULE(type)
Definition: MakerMacros.h:16
Definition: MET.h:42
double p4[4]
Definition: TauolaWrapper.h:92
static const std::unordered_map< int, int32_t > pdg_id_embedding_
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
tensorflow::Session * session_
ParameterDescriptionBase * add(U const &iLabel, T const &value)
static void fillDescriptions(edm::ConfigurationDescriptions &descriptions)
void add(std::string const &label, ParameterSetDescription const &psetDescription)
tensorflow::Tensor input_cat2_
math::XYZTLorentzVector LorentzVector
Lorentz vector.
Definition: Candidate.h:37
HLT enums.
def cache(function)
Definition: utilities.py:3
const unsigned int max_n_pf_
void run(Session *session, const NamedTensorList &inputs, const std::vector< std::string > &outputNames, const std::vector< std::string > &targetNodes, std::vector< Tensor > *outputs)
Definition: TensorFlow.cc:210
std::string fullPath() const
Definition: FileInPath.cc:163
static const std::unordered_map< int, int32_t > charge_embedding_
const edm::EDGetTokenT< std::vector< pat::PackedCandidate > > pf_token_
def move(src, dest)
Definition: eostools.py:511
Definition: event.py:1
const float norm_