CMS 3D CMS Logo

GEMSegmentAlgorithm.cc
Go to the documentation of this file.
1 
8 #include "GEMSegmentAlgorithm.h"
9 #include "MuonSegFit.h"
10 
16 
17 #include <algorithm>
18 #include <cmath>
19 #include <iostream>
20 #include <string>
21 
22 /* Constructor
23  *
24  */
26 {
27  debug = ps.getUntrackedParameter<bool>("GEMDebug");
28  minHitsPerSegment = ps.getParameter<unsigned int>("minHitsPerSegment");
29  preClustering = ps.getParameter<bool>("preClustering");
30  dXclusBoxMax = ps.getParameter<double>("dXclusBoxMax");
31  dYclusBoxMax = ps.getParameter<double>("dYclusBoxMax");
32  preClustering_useChaining = ps.getParameter<bool>("preClusteringUseChaining");
33  dPhiChainBoxMax = ps.getParameter<double>("dPhiChainBoxMax");
34  dEtaChainBoxMax = ps.getParameter<double>("dEtaChainBoxMax");
35  maxRecHitsInCluster = ps.getParameter<int>("maxRecHitsInCluster");
36  clusterOnlySameBXRecHits = ps.getParameter<bool>("clusterOnlySameBXRecHits");
37 
38  // maybe to be used in the future ???
39  // Pruning = ps.getParameter<bool>("Pruning");
40  // BrutePruning = ps.getParameter<bool>("BrutePruning");
41  // maxRecHitsInCluster is the maximal number of hits in a precluster that is being processed
42  // This cut is intended to remove messy events. Currently nothing is returned if there are
43  // more that maxRecHitsInCluster hits. It could be useful to return an estimate of the
44  // cluster position, which is available.
45  // maxRecHitsInCluster = ps.getParameter<int>("maxRecHitsInCluster");
46  // onlyBestSegment = ps.getParameter<bool>("onlyBestSegment");
47 
48  // CSC uses pruning to remove clearly bad hits, using as much info from the rechits as possible: charge, position, timing, ...
49  // In fits with bad chi^2 they look for the worst hit (hit with abnormally large residual)
50  // if worst hit was found, refit without worst hit and select if considerably better than original fit.
51 }
52 
53 /* Destructor
54  *
55  */
57 }
58 
59 
60 std::vector<GEMSegment> GEMSegmentAlgorithm::run(const GEMEnsemble& ensemble, const EnsembleHitContainer& rechits) {
61 
62  // pre-cluster rechits and loop over all sub clusters separately
63  std::vector<GEMSegment> segments_temp;
64  std::vector<GEMSegment> segments;
65  ProtoSegments rechits_clusters; // this is a collection of groups of rechits
66 
67  if(preClustering) {
68  // run a pre-clusterer on the given rechits to split obviously separated segment seeds:
70  // it uses X,Y,Z information; there are no configurable parameters used;
71  // the X, Y, Z "cuts" are just (much) wider than reasonable high pt segments
72  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] preClustering :: use Chaining";
73  rechits_clusters = this->chainHits(ensemble, rechits );
74  }
75  else{
76  // it uses X,Y information + configurable parameters
77  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] Clustering";
78  rechits_clusters = this->clusterHits(ensemble, rechits );
79  }
80  // loop over the found clusters:
81  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] Loop over clusters and build segments";
82  for(auto sub_rechits = rechits_clusters.begin(); sub_rechits != rechits_clusters.end(); ++sub_rechits ) {
83  // clear the buffer for the subset of segments:
84  segments_temp.clear();
85  // build the subset of segments:
86  this->buildSegments(ensemble, (*sub_rechits), segments_temp);
87  // add the found subset of segments to the collection of all segments in this chamber:
88  segments.insert( segments.end(), segments_temp.begin(), segments_temp.end() );
89  }
90 
91 
92  return segments;
93  }
94  else {
95  this->buildSegments(ensemble, rechits, segments);
96  return segments;
97  }
98 }
99 
100 
101 // ********************************************************************;
104 
105  // think how to implement BX requirement here
106 
107  ProtoSegments rechits_clusters; // this is a collection of groups of rechits
108 
109  float dXclus_box = 0.0;
110  float dYclus_box = 0.0;
111 
112  ProtoSegments seeds; seeds.reserve(rechits.size());
113 
114  std::vector<float> running_meanX; running_meanX.reserve(rechits.size());
115  std::vector<float> running_meanY; running_meanY.reserve(rechits.size());
116 
117  std::vector<float> seed_minX; seed_minX.reserve(rechits.size());
118  std::vector<float> seed_maxX; seed_maxX.reserve(rechits.size());
119  std::vector<float> seed_minY; seed_minY.reserve(rechits.size());
120  std::vector<float> seed_maxY; seed_maxY.reserve(rechits.size());
121 
122  // split rechits into subvectors and return vector of vectors:
123  // Loop over rechits
124  // Create one seed per hit
125  for(unsigned int i = 0; i < rechits.size(); ++i) {
126  seeds.push_back(EnsembleHitContainer(1,rechits[i]));
127 
128  GEMDetId rhID = rechits[i]->gemId();
129  const GEMEtaPartition * rhEP = (ensemble.second.find(rhID.rawId()))->second;
130  if(!rhEP) throw cms::Exception("GEMEtaPartition not found") << "Corresponding GEMEtaPartition to GEMDetId: "<<rhID<<" not found in the GEMEnsemble";
131  const GEMSuperChamber * rhCH = ensemble.first;
132  LocalPoint rhLP_inEtaPartFrame = rechits[i]->localPosition();
133  GlobalPoint rhGP_inCMSFrame = rhEP->toGlobal(rhLP_inEtaPartFrame);
134  LocalPoint rhLP_inChamberFrame = rhCH->toLocal(rhGP_inCMSFrame);
135 
136  running_meanX.push_back(rhLP_inChamberFrame.x());
137  running_meanY.push_back(rhLP_inChamberFrame.y() );
138 
139  // set min/max X and Y for box containing the hits in the precluster:
140  seed_minX.push_back( rhLP_inChamberFrame.x() );
141  seed_maxX.push_back( rhLP_inChamberFrame.x() );
142  seed_minY.push_back( rhLP_inChamberFrame.y() );
143  seed_maxY.push_back( rhLP_inChamberFrame.y() );
144 
145  }
146 
147  // merge clusters that are too close
148  // measure distance between final "running mean"
149  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
150  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
151  if(running_meanX[MMM] == running_max || running_meanX[NNN] == running_max ) {
152  LogDebug("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::clusterHits]: ALARM! Skipping used seeds, this should not happen - inform developers!";
153  continue; //skip seeds that have been used
154  }
155 
156  // calculate cut criteria for simple running mean distance cut:
157  //dXclus = fabs(running_meanX[NNN] - running_meanX[MMM]);
158  //dYclus = fabs(running_meanY[NNN] - running_meanY[MMM]);
159  // calculate minmal distance between precluster boxes containing the hits:
160  if ( running_meanX[NNN] > running_meanX[MMM] ) dXclus_box = seed_minX[NNN] - seed_maxX[MMM];
161  else dXclus_box = seed_minX[MMM] - seed_maxX[NNN];
162  if ( running_meanY[NNN] > running_meanY[MMM] ) dYclus_box = seed_minY[NNN] - seed_maxY[MMM];
163  else dYclus_box = seed_minY[MMM] - seed_maxY[NNN];
164 
165 
166  if( dXclus_box < dXclusBoxMax && dYclus_box < dYclusBoxMax ) {
167  // merge clusters!
168  // merge by adding seed NNN to seed MMM and erasing seed NNN
169 
170  // calculate running mean for the merged seed:
171  if(seeds[NNN].size()+seeds[MMM].size() != 0) {
172  running_meanX[MMM] = (running_meanX[NNN]*seeds[NNN].size() + running_meanX[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
173  running_meanY[MMM] = (running_meanY[NNN]*seeds[NNN].size() + running_meanY[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
174  }
175 
176  // update min/max X and Y for box containing the hits in the merged cluster:
177  if ( seed_minX[NNN] < seed_minX[MMM] ) seed_minX[MMM] = seed_minX[NNN];
178  if ( seed_maxX[NNN] > seed_maxX[MMM] ) seed_maxX[MMM] = seed_maxX[NNN];
179  if ( seed_minY[NNN] < seed_minY[MMM] ) seed_minY[MMM] = seed_minY[NNN];
180  if ( seed_maxY[NNN] > seed_maxY[MMM] ) seed_maxY[MMM] = seed_maxY[NNN];
181 
182  // add seed NNN to MMM (lower to larger number)
183  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
184 
185  // mark seed NNN as used (at the moment just set running mean to 999999.)
186  running_meanX[NNN] = running_max;
187  running_meanY[NNN] = running_max;
188  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
189  // next seed (NNN+1)
190  break;
191  }
192  }
193  }
194 
195  // hand over the final seeds to the output
196  // would be more elegant if we could do the above step with
197  // erasing the merged ones, rather than the
198  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
199  if(running_meanX[NNN] == running_max) continue; //skip seeds that have been marked as used up in merging
200  rechits_clusters.push_back(seeds[NNN]);
201  }
202 
203  return rechits_clusters;
204 }
205 
206 
209 
210  ProtoSegments rechits_chains;
211  ProtoSegments seeds;
212  seeds.reserve(rechits.size());
213  std::vector<bool> usedCluster(rechits.size(),false);
214 
215  // split rechits into subvectors and return vector of vectors:
216  // Loop over rechits
217  // Create one seed per hit
218  for ( unsigned int i=0; i<rechits.size(); ++i)
219  seeds.push_back(EnsembleHitContainer(1,rechits[i]));
220 
221  // merge chains that are too close ("touch" each other)
222  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
223  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
224  if(usedCluster[MMM] || usedCluster[NNN]){
225  continue;
226  }
227  // all is in the way we define "good";
228  // try not to "cluster" the hits but to "chain" them;
229  // it does the clustering but also does a better job
230  // for inclined tracks (not clustering them together;
231  // crossed tracks would be still clustered together)
232  // 22.12.09: In fact it is not much more different
233  // than the "clustering", we just introduce another
234  // variable in the game - Z. And it makes sense
235  // to re-introduce Y (or actually wire group mumber)
236  // in a similar way as for the strip number - see
237  // the code below.
238  bool goodToMerge = isGoodToMerge(ensemble, seeds[NNN], seeds[MMM]);
239  if(goodToMerge){
240  // merge chains!
241  // merge by adding seed NNN to seed MMM and erasing seed NNN
242 
243  // add seed NNN to MMM (lower to larger number)
244  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
245 
246  // mark seed NNN as used
247  usedCluster[NNN] = true;
248  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
249  // next seed (NNN+1)
250  break;
251  }
252 
253  }
254  }
255 
256  // hand over the final seeds to the output
257  // would be more elegant if we could do the above step with
258  // erasing the merged ones, rather than the
259  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
260  if(usedCluster[NNN]) continue; //skip seeds that have been marked as used up in merging
261  rechits_chains.push_back(seeds[NNN]);
262  }
263 
264  //***************************************************************
265 
266  return rechits_chains;
267 }
268 
269 bool GEMSegmentAlgorithm::isGoodToMerge(const GEMEnsemble& ensemble, const EnsembleHitContainer& newChain, const EnsembleHitContainer& oldChain) {
270 
271  bool phiRequirementOK = false; // once it is true in the loop, it is ok to merge
272  bool etaRequirementOK = false; // once it is true in the loop, it is ok to merge
273  bool bxRequirementOK = false; // once it is true in the loop, it is ok to merge
274 
275  for(size_t iRH_new = 0;iRH_new<newChain.size();++iRH_new){
276  int layer_new = (newChain[iRH_new]->gemId().station() - 1)*2 + newChain[iRH_new]->gemId().layer();
277 
278  const GEMEtaPartition * rhEP = (ensemble.second.find(newChain[iRH_new]->gemId().rawId()))->second;
279  GlobalPoint pos_new = rhEP->toGlobal(newChain[iRH_new]->localPosition());
280 
281  for(size_t iRH_old = 0;iRH_old<oldChain.size();++iRH_old){
282  int layer_old = (oldChain[iRH_old]->gemId().station() - 1)*2 + oldChain[iRH_old]->gemId().layer();
283  // Layers - hits on the same layer should not be allowed ==> if abs(layer_new - layer_old) > 0 is ok. if = 0 is false
284  if ( layer_new == layer_old ) return false;
285 
286  const GEMEtaPartition * oldrhEP = (ensemble.second.find(oldChain[iRH_old]->gemId().rawId()))->second;
287  GlobalPoint pos_old = oldrhEP->toGlobal(oldChain[iRH_old]->localPosition());
288 
289  // Eta & Phi- to be chained, two hits need also to be "close" in phi and eta
290  if(phiRequirementOK==false) phiRequirementOK = std::abs(reco::deltaPhi( float(pos_new.phi()), float(pos_old.phi()) )) < dPhiChainBoxMax;
291  if(etaRequirementOK==false) etaRequirementOK = std::abs(pos_new.eta()-pos_old.eta()) < dEtaChainBoxMax;
292  // and they should have a time difference compatible with the hypothesis
293  // that the rechits originate from the same particle, but were detected in different layers
294  if(bxRequirementOK==false) {
296  bxRequirementOK = true;
297  }
298  else {
299  if (newChain[iRH_new]->BunchX() == oldChain[iRH_old]->BunchX()) bxRequirementOK = true;
300  }
301  }
302 
303  if( phiRequirementOK && etaRequirementOK && bxRequirementOK)
304  return true;
305  }
306  }
307  return false;
308 }
309 
310 void GEMSegmentAlgorithm::buildSegments(const GEMEnsemble& ensemble, const EnsembleHitContainer& rechits, std::vector<GEMSegment>& gemsegs) {
311  if (rechits.size() < minHitsPerSegment) return;
312 
314  proto_segment.clear();
315 
316  // select hits from the ensemble and sort it
317  const GEMSuperChamber * suCh = ensemble.first;
318  for (auto rh=rechits.begin(); rh!=rechits.end();rh++){
319  proto_segment.push_back(*rh);
320 
321  // for segFit - using local point in chamber frame
322  const GEMEtaPartition * thePartition = (ensemble.second.find((*rh)->gemId()))->second;
323  GlobalPoint gp = thePartition->toGlobal((*rh)->localPosition());
324  const LocalPoint lp = suCh->toLocal(gp);
325 
326  GEMRecHit *newRH = (*rh)->clone();
327  newRH->setPosition(lp);
328  MuonSegFit::MuonRecHitPtr trkRecHit(newRH);
329  muonRecHits.push_back(trkRecHit);
330  }
331 
332  #ifdef EDM_ML_DEBUG // have lines below only compiled when in debug mode
333  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] will now try to fit a GEMSegment from collection of "<<rechits.size()<<" GEM RecHits";
334  for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
335  auto gemid = (*rh)->gemId();
336  auto rhLP = (*rh)->localPosition();
337  edm::LogVerbatim("GEMSegmentAlgorithm") << "[RecHit :: Loc x = "<<std::showpos<<std::setw(9)<<rhLP.x()<<" Loc y = "<<std::showpos<<std::setw(9)<<rhLP.y()
338  <<" BX = "<<std::showpos<<(*rh)->BunchX()<<" -- "<<gemid.rawId()<<" = "<<gemid<<" ]";
339  }
340  #endif
341 
342  // The actual fit on all hits of the vector of the selected Tracking RecHits:
343  sfit_ = std::make_unique<MuonSegFit>(muonRecHits);
344  bool goodfit = sfit_->fit();
345  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] GEMSegment fit done :: fit is good = "<<goodfit;
346 
347  // quit function if fit was not OK
348  if(!goodfit){
349  for (auto rh:muonRecHits) rh.reset();
350  return;
351  }
352  // obtain all information necessary to make the segment:
353  LocalPoint protoIntercept = sfit_->intercept();
354  LocalVector protoDirection = sfit_->localdir();
355  AlgebraicSymMatrix protoErrors = sfit_->covarianceMatrix();
356  double protoChi2 = sfit_->chi2();
357 
358  // Calculate the bunch crossing of the GEM Segment
359  float bx = 0.0;
360  for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
361  bx += (*rh)->BunchX();
362  }
363  if(!rechits.empty()) bx=bx*1.0/(rechits.size());
364 
365  // Calculate the central value and uncertainty of the segment time
366  // if we want to study impact of 2-3ns time resolution on GEM Segment
367  // (if there will be TDCs in readout and not just BX determination)
368  // then implement tof() method for rechits and use this here
369  /*`
370  float averageTime=0.;
371  for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
372  GEMEtaPartition thePartition = ensemble.second.find((*rh)->gemId));
373  GlobalPoint pos = (thePartition->toGlobal((*rh)->localPosition());
374  float tof = pos.mag() * 0.01 / 0.2997925 + 25.0*(*rh)->BunchX();
375  averageTime += pos;
376  }
377  if(rechits.size() != 0) averageTime=averageTime/(rechits.size());
378  float timeUncrt=0.;
379  for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
380  GEMEtaPartition thePartition = ensemble.second.find((*rh)->gemId));
381  GlobalPoint pos = (thePartition->toGlobal((*rh)->localPosition());
382  float tof = pos.mag() * 0.01 / 0.2997925 + 25.0*(*rh)->BunchX();
383  timeUncrt += pow(tof-averageTime,2);
384  }
385  if(rechits.size() != 0) timeUncrt=timeUncrt/(rechits.size());
386  timeUncrt = sqrt(timeUncrt);
387  */
388 
389  // save all information inside GEMCSCSegment
390  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] will now wrap fit info in GEMSegment dataformat";
391  GEMSegment tmp(proto_segment, protoIntercept, protoDirection, protoErrors, protoChi2, bx);
392  // GEMSegment tmp(proto_segment, protoIntercept, protoDirection, protoErrors, protoChi2, averageTime, timeUncrt);
393 
394  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] GEMSegment made in "<<tmp.gemDetId();
395  edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] "<<tmp;
396 
397  for (auto rh:muonRecHits) rh.reset();
398  gemsegs.push_back(tmp);
399 }
400 
401 
#define LogDebug(id)
size
Write out results.
std::shared_ptr< TrackingRecHit > MuonRecHitPtr
Definition: MuonSegFit.h:41
T getParameter(std::string const &) const
T getUntrackedParameter(std::string const &, T const &) const
GEMDetId gemDetId() const
Definition: GEMSegment.h:73
GlobalPoint toGlobal(const Local2DPoint &lp) const
Conversion to the global R.F. from the R.F. of the GeomDet.
Definition: GeomDet.h:54
~GEMSegmentAlgorithm() override
Destructor.
ProtoSegments chainHits(const GEMEnsemble &ensemble, const EnsembleHitContainer &rechits)
std::vector< const GEMRecHit * > EnsembleHitContainer
Typedefs.
Geom::Phi< T > phi() const
Definition: PV3DBase.h:69
T y() const
Definition: PV3DBase.h:63
LocalPoint toLocal(const GlobalPoint &gp) const
Conversion to the R.F. of the GeomDet.
Definition: GeomDet.h:69
std::pair< const GEMSuperChamber *, std::map< uint32_t, const GEMEtaPartition * > > GEMEnsemble
uint32_t rawId() const
get the raw id
Definition: DetId.h:43
U second(std::pair< T, U > const &p)
bool isGoodToMerge(const GEMEnsemble &ensemble, const EnsembleHitContainer &newChain, const EnsembleHitContainer &oldChain)
unsigned int minHitsPerSegment
std::vector< MuonRecHitPtr > MuonRecHitContainer
Definition: MuonSegFit.h:42
ProtoSegments clusterHits(const GEMEnsemble &ensemble, const EnsembleHitContainer &rechits)
Utility functions.
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
std::vector< EnsembleHitContainer > ProtoSegments
#define end
Definition: vmac.h:39
std::unique_ptr< MuonSegFit > sfit_
double deltaPhi(double phi1, double phi2)
Definition: deltaPhi.h:22
GEMSegmentAlgorithm(const edm::ParameterSet &ps)
Constructor.
GEMRecHit * clone() const override
Definition: GEMRecHit.cc:64
T eta() const
Definition: PV3DBase.h:76
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
#define begin
Definition: vmac.h:32
CLHEP::HepSymMatrix AlgebraicSymMatrix
EnsembleHitContainer proto_segment
void setPosition(LocalPoint pos)
Set local position.
Definition: GEMRecHit.h:78
T x() const
Definition: PV3DBase.h:62
std::vector< GEMSegment > run(const GEMEnsemble &ensemble, const EnsembleHitContainer &rechits) override
void buildSegments(const GEMEnsemble &ensemble, const EnsembleHitContainer &rechits, std::vector< GEMSegment > &gemsegs)