CMS 3D CMS Logo

CSCSegAlgoST.cc
Go to the documentation of this file.
1 
12 #include "CSCSegAlgoST.h"
13 #include "CSCCondSegFit.h"
14 #include "CSCSegAlgoShowering.h"
15 
17 
19 
21 
23 
24 #include <algorithm>
25 #include <cmath>
26 #include <iostream>
27 #include <string>
28 
29 
30 /* constructor
31  *
32  */
34  CSCSegmentAlgorithm(ps), myName_("CSCSegAlgoST"), ps_(ps), showering_(nullptr) {
35 
36  debug = ps.getUntrackedParameter<bool>("CSCDebug");
37  // minLayersApart = ps.getParameter<int>("minLayersApart");
38  // nSigmaFromSegment = ps.getParameter<double>("nSigmaFromSegment");
39  minHitsPerSegment = ps.getParameter<int>("minHitsPerSegment");
40  // muonsPerChamberMax = ps.getParameter<int>("CSCSegmentPerChamberMax");
41  // chi2Max = ps.getParameter<double>("chi2Max");
42  dXclusBoxMax = ps.getParameter<double>("dXclusBoxMax");
43  dYclusBoxMax = ps.getParameter<double>("dYclusBoxMax");
44  preClustering = ps.getParameter<bool>("preClustering");
45  preClustering_useChaining = ps.getParameter<bool>("preClusteringUseChaining");
46  Pruning = ps.getParameter<bool>("Pruning");
47  BrutePruning = ps.getParameter<bool>("BrutePruning");
48  BPMinImprovement = ps.getParameter<double>("BPMinImprovement");
49  // maxRecHitsInCluster is the maximal number of hits in a precluster that is being processed
50  // This cut is intended to remove messy events. Currently nothing is returned if there are
51  // more that maxRecHitsInCluster hits. It could be useful to return an estimate of the
52  // cluster position, which is available.
53  maxRecHitsInCluster = ps.getParameter<int>("maxRecHitsInCluster");
54  onlyBestSegment = ps.getParameter<bool>("onlyBestSegment");
55 
56  hitDropLimit4Hits = ps.getParameter<double>("hitDropLimit4Hits");
57  hitDropLimit5Hits = ps.getParameter<double>("hitDropLimit5Hits");
58  hitDropLimit6Hits = ps.getParameter<double>("hitDropLimit6Hits");
59 
60  yweightPenaltyThreshold = ps.getParameter<double>("yweightPenaltyThreshold");
61  yweightPenalty = ps.getParameter<double>("yweightPenalty");
62 
63  curvePenaltyThreshold = ps.getParameter<double>("curvePenaltyThreshold");
64  curvePenalty = ps.getParameter<double>("curvePenalty");
65 
66  useShowering = ps.getParameter<bool>("useShowering");
67  if (useShowering) showering_ = new CSCSegAlgoShowering( ps );
68 
70  adjustCovariance_ = ps.getParameter<bool>("CorrectTheErrors");
71 
72  chi2Norm_3D_ = ps.getParameter<double>("NormChi2Cut3D");
73  prePrun_ = ps.getParameter<bool>("prePrun");
74  prePrunLimit_ = ps.getParameter<double>("prePrunLimit");
75 
76  if (debug) edm::LogVerbatim("CSCSegment") << "CSCSegAlgoST: with factored conditioned segment fit";
77 }
78 
79 /* Destructor
80  *
81  */
83  delete showering_;
84 }
85 
86 
87 std::vector<CSCSegment> CSCSegAlgoST::run(const CSCChamber* aChamber, const ChamberHitContainer& rechits) {
88 
89  // Set member variable
90  theChamber = aChamber;
91 
92  LogTrace("CSCSegAlgoST") << "[CSCSegAlgoST::run] Start building segments in chamber " << theChamber->id();
93 
94  // pre-cluster rechits and loop over all sub clusters seperately
95  std::vector<CSCSegment> segments_temp;
96  std::vector<CSCSegment> segments;
97  std::vector<ChamberHitContainer> rechits_clusters; // a collection of clusters of rechits
98 
99  // Define yweight penalty depending on chamber.
100  // We fixed the relative ratios, but they can be scaled by parameters:
101 
102  for(int a = 0; a<5; ++a) {
103  for(int b = 0; b<5; ++b) {
104  a_yweightPenaltyThreshold[a][b] = 0.0;
105  }
106  }
107 
118 
119  if(preClustering) {
120  // run a pre-clusterer on the given rechits to split clearly-separated segment seeds:
122  // it uses X,Y,Z information; there are no configurable parameters used;
123  // the X, Y, Z "cuts" are just (much) wider than the LCT readout ones
124  // (which are actually not step functions); this new code could accomodate
125  // the clusterHits one below but we leave it for security and backward
126  // comparison reasons
127  rechits_clusters = chainHits( theChamber, rechits );
128  }
129  else{
130  // it uses X,Y information + configurable parameters
131  rechits_clusters = clusterHits( theChamber, rechits );
132  }
133  // loop over the found clusters:
134  for(std::vector<ChamberHitContainer>::iterator sub_rechits = rechits_clusters.begin(); sub_rechits != rechits_clusters.end(); ++sub_rechits ) {
135  // clear the buffer for the subset of segments:
136  segments_temp.clear();
137  // build the subset of segments:
138  segments_temp = buildSegments( (*sub_rechits) );
139  // add the found subset of segments to the collection of all segments in this chamber:
140  segments.insert( segments.end(), segments_temp.begin(), segments_temp.end() );
141  }
142  // Any pruning of hits?
143  if( Pruning ) {
144  segments_temp.clear(); // segments_temp needed?!?!
145  segments_temp = prune_bad_hits( theChamber, segments );
146  segments.clear(); // segments_temp needed?!?!
147  segments.swap(segments_temp); // segments_temp needed?!?!
148  }
149 
150  // Ganged strips in ME1/1A?
151  if ( ("ME1/a" == aChamber->specs()->chamberTypeName()) && aChamber->specs()->gangedStrips() ){
152  // if ( aChamber->specs()->gangedStrips() ){
153  findDuplicates(segments);
154  }
155  return segments;
156  }
157  else {
158  segments = buildSegments(rechits);
159  if( Pruning ) {
160  segments_temp.clear(); // segments_temp needed?!?!
161  segments_temp = prune_bad_hits( theChamber, segments );
162  segments.clear(); // segments_temp needed?!?!
163  segments.swap(segments_temp); // segments_temp needed?!?!
164  }
165 
166  // Ganged strips in ME1/1A?
167  if ( ("ME1/a" == aChamber->specs()->chamberTypeName()) && aChamber->specs()->gangedStrips() ){
168  // if ( aChamber->specs()->gangedStrips() ){
169  findDuplicates(segments);
170  }
171  return segments;
172  //return buildSegments(rechits);
173  }
174 }
175 
176 // ********************************************************************;
177 // *** This method is meant to remove clearly bad hits, using as ***;
178 // *** much information from the chamber as possible (e.g. charge, ***;
179 // *** hit position, timing, etc.) ***;
180 // ********************************************************************;
181 std::vector<CSCSegment> CSCSegAlgoST::prune_bad_hits(const CSCChamber* aChamber, std::vector<CSCSegment> & segments) {
182 
183  // std::cout<<"*************************************************************"<<std::endl;
184  // std::cout<<"Called prune_bad_hits in Chamber "<< theChamber->specs()->chamberTypeName()<<std::endl;
185  // std::cout<<"*************************************************************"<<std::endl;
186 
187  std::vector<CSCSegment> segments_temp;
188  std::vector<ChamberHitContainer> rechits_clusters; // this is a collection of groups of rechits
189 
190  const float chi2ndfProbMin = 1.0e-4;
191  bool use_brute_force = BrutePruning;
192 
193  int hit_nr = 0;
194  int hit_nr_worst = -1;
195  //int hit_nr_2ndworst = -1;
196 
197  for(std::vector<CSCSegment>::iterator it=segments.begin(); it != segments.end(); ++it) {
198 
199  // do nothing for nhit <= minHitPerSegment
200  if( (*it).nRecHits() <= minHitsPerSegment ) continue;
201 
202  if( !use_brute_force ) {// find worst hit
203 
204  float chisq = (*it).chi2();
205  int nhits = (*it).nRecHits();
206  LocalPoint localPos = (*it).localPosition();
207  LocalVector segDir = (*it).localDirection();
208  const CSCChamber* cscchamber = theChamber;
209  float globZ ;
210 
211  GlobalPoint globalPosition = cscchamber->toGlobal(localPos);
212  globZ = globalPosition.z();
213 
214 
215  if( ChiSquaredProbability((double)chisq,(double)(2*nhits-4)) < chi2ndfProbMin ) {
216 
217  // find (rough) "residuals" (NOT excluding the hit from the fit - speed!) of hits on segment
218  std::vector<CSCRecHit2D> theseRecHits = (*it).specificRecHits();
219  std::vector<CSCRecHit2D>::const_iterator iRH_worst;
220  //float xdist_local = -99999.;
221 
222  float xdist_local_worst_sig = -99999.;
223  float xdist_local_2ndworst_sig = -99999.;
224  float xdist_local_sig = -99999.;
225 
226  hit_nr = 0;
227  hit_nr_worst = -1;
228  //hit_nr_2ndworst = -1;
229 
230  for ( std::vector<CSCRecHit2D>::const_iterator iRH = theseRecHits.begin(); iRH != theseRecHits.end(); ++iRH) {
231  //mark "worst" hit:
232 
233  //float z_at_target ;
234  //float radius ;
235  float loc_x_at_target;
236  //float loc_y_at_target;
237  //float loc_z_at_target;
238 
239  //z_at_target = 0.;
240  //radius = 0.;
241 
242  // set the z target in CMS global coordinates:
243  const CSCLayer* csclayerRH = theChamber->layer((*iRH).cscDetId().layer());
244  LocalPoint localPositionRH = (*iRH).localPosition();
245  GlobalPoint globalPositionRH = csclayerRH->toGlobal(localPositionRH);
246 
247  LocalError rerrlocal = (*iRH).localPositionError();
248  float xxerr = rerrlocal.xx();
249 
250  float target_z = globalPositionRH.z(); // target z position in cm (z pos of the hit)
251 
252  if(target_z > 0.) {
253  loc_x_at_target = localPos.x() + (segDir.x()/fabs(segDir.z())*( target_z - globZ ));
254  //loc_y_at_target = localPos.y() + (segDir.y()/fabs(segDir.z())*( target_z - globZ ));
255  //loc_z_at_target = target_z;
256  }
257  else {
258  loc_x_at_target = localPos.x() + ((-1)*segDir.x()/fabs(segDir.z())*( target_z - globZ ));
259  //loc_y_at_target = localPos.y() + ((-1)*segDir.y()/fabs(segDir.z())*( target_z - globZ ));
260  //loc_z_at_target = target_z;
261  }
262  // have to transform the segments coordinates back to the local frame... how?!!!!!!!!!!!!
263 
264  //xdist_local = fabs(localPositionRH.x() - loc_x_at_target);
265  xdist_local_sig = fabs((localPositionRH.x() -loc_x_at_target)/(xxerr));
266 
267  if( xdist_local_sig > xdist_local_worst_sig ) {
268  xdist_local_2ndworst_sig = xdist_local_worst_sig;
269  xdist_local_worst_sig = xdist_local_sig;
270  iRH_worst = iRH;
271  //hit_nr_2ndworst = hit_nr_worst;
272  hit_nr_worst = hit_nr;
273  }
274  else if(xdist_local_sig > xdist_local_2ndworst_sig) {
275  xdist_local_2ndworst_sig = xdist_local_sig;
276  //hit_nr_2ndworst = hit_nr;
277  }
278  ++hit_nr;
279  }
280 
281  // reset worst hit number if certain criteria apply.
282  // Criteria: 2nd worst hit must be at least a factor of
283  // 1.5 better than the worst in terms of sigma:
284  if ( xdist_local_worst_sig / xdist_local_2ndworst_sig < 1.5 ) {
285  hit_nr_worst = -1;
286  //hit_nr_2ndworst = -1;
287  }
288  }
289  }
290 
291  // if worst hit was found, refit without worst hit and select if considerably better than original fit.
292  // Can also use brute force: refit all n-1 hit segments and choose one over the n hit if considerably "better"
293 
294  std::vector< CSCRecHit2D > buffer;
295  std::vector< std::vector< CSCRecHit2D > > reduced_segments;
296  std::vector< CSCRecHit2D > theseRecHits = (*it).specificRecHits();
297  float best_red_seg_prob = 0.0;
298  // usefor chi2 1 diff float best_red_seg_prob = 99999.;
299  buffer.clear();
300 
301  if( ChiSquaredProbability((double)(*it).chi2(),(double)((2*(*it).nRecHits())-4)) < chi2ndfProbMin ) {
302 
303  buffer = theseRecHits;
304 
305  // Dirty switch: here one can select to refit all possible subsets or just the one without the
306  // tagged worst hit:
307  if( use_brute_force ) { // Brute force method: loop over all possible segments:
308  for(size_t bi = 0; bi < buffer.size(); ++bi) {
309  reduced_segments.push_back(buffer);
310  reduced_segments[bi].erase(reduced_segments[bi].begin()+(bi),reduced_segments[bi].begin()+(bi+1));
311  }
312  }
313  else { // More elegant but still biased: erase only worst hit
314  // Comment: There is not a very strong correlation of the worst hit with the one that one should remove...
315  if( hit_nr_worst >= 0 && hit_nr_worst <= int(buffer.size()) ) {
316  // fill segment in buffer, delete worst hit
317  buffer.erase(buffer.begin()+(hit_nr_worst),buffer.begin()+(hit_nr_worst+1));
318  reduced_segments.push_back(buffer);
319  }
320  else {
321  // only fill segment in array, do not delete anything
322  reduced_segments.push_back(buffer);
323  }
324  }
325  }
326 
327  // Loop over the subsegments and fit (only one segment if "use_brute_force" is false):
328  for(size_t iSegment=0; iSegment<reduced_segments.size(); ++iSegment) {
329  // loop over hits on given segment and push pointers to hits into protosegment
330  protoSegment.clear();
331  for(size_t m = 0; m<reduced_segments[iSegment].size(); ++m ) {
332  protoSegment.push_back(&reduced_segments[iSegment][m]);
333  }
334 
335  // Create fitter object
336  CSCCondSegFit* segfit = new CSCCondSegFit( pset(), chamber(), protoSegment );
337  condpass1 = false;
338  condpass2 = false;
339  segfit->setScaleXError( 1.0 );
340  segfit->fit(condpass1, condpass2);
341 
342  // Attempt to handle numerical instability of the fit;
343  // The same as in the build method;
344  // Preprune is not applied;
345  if( adjustCovariance() ){
346  if(segfit->chi2()/segfit->ndof()>chi2Norm_3D_){
347  condpass1 = true;
348  segfit->fit(condpass1, condpass2);
349  }
350  if( (segfit->scaleXError() < 1.00005)&&(segfit->chi2()/segfit->ndof()>chi2Norm_3D_) ){
351  condpass2 = true;
352  segfit->fit(condpass1, condpass2);
353  }
354  }
355 
356  // calculate error matrix
357  // AlgebraicSymMatrix temp2 = segfit->covarianceMatrix();
358 
359  // build an actual segment
360  CSCSegment temp(protoSegment, segfit->intercept(), segfit->localdir(),
361  segfit->covarianceMatrix(), segfit->chi2() );
362 
363  // and finished with this fit
364  delete segfit;
365 
366  // n-hit segment is (*it)
367  // (n-1)-hit segment is temp
368  // replace n-hit segment with (n-1)-hit segment if segment probability is BPMinImprovement better
369  double oldchi2 = (*it).chi2();
370  double olddof = 2 * (*it).nRecHits() - 4;
371  double newchi2 = temp.chi2();
372  double newdof = 2 * temp.nRecHits() - 4;
373  if( ( ChiSquaredProbability(oldchi2,olddof) < (1./BPMinImprovement)*
374  ChiSquaredProbability(newchi2,newdof) )
375  && ( ChiSquaredProbability(newchi2,newdof) > best_red_seg_prob )
376  && ( ChiSquaredProbability(newchi2,newdof) > 1e-10 )
377  ) {
378  best_red_seg_prob = ChiSquaredProbability(newchi2,newdof);
379  // The (n-1)- segment is better than the n-hit segment.
380  // If it has at least minHitsPerSegment hits replace the n-hit segment
381  // with this better (n-1)-hit segment:
382  if( temp.nRecHits() >= minHitsPerSegment ) {
383  (*it) = temp;
384  }
385  }
386  } // end of loop over subsegments (iSegment)
387 
388  } // end loop over segments (it)
389 
390  return segments;
391 
392 }
393 
394 
395 // ********************************************************************;
396 std::vector< std::vector<const CSCRecHit2D*> > CSCSegAlgoST::clusterHits(const CSCChamber* aChamber, const ChamberHitContainer & rechits) {
397  theChamber = aChamber;
398 
399  std::vector<ChamberHitContainer> rechits_clusters; // this is a collection of groups of rechits
400  // const float dXclus_box_cut = 4.; // seems to work reasonably 070116
401  // const float dYclus_box_cut = 8.; // seems to work reasonably 070116
402 
403  //float dXclus = 0.0;
404  //float dYclus = 0.0;
405  float dXclus_box = 0.0;
406  float dYclus_box = 0.0;
407 
408  std::vector<const CSCRecHit2D*> temp;
409 
410  std::vector< ChamberHitContainer > seeds;
411 
412  std::vector<float> running_meanX;
413  std::vector<float> running_meanY;
414 
415  std::vector<float> seed_minX;
416  std::vector<float> seed_maxX;
417  std::vector<float> seed_minY;
418  std::vector<float> seed_maxY;
419 
420  //std::cout<<"*************************************************************"<<std::endl;
421  //std::cout<<"Called clusterHits in Chamber "<< theChamber->specs()->chamberTypeName()<<std::endl;
422  //std::cout<<"*************************************************************"<<std::endl;
423 
424  // split rechits into subvectors and return vector of vectors:
425  // Loop over rechits
426  // Create one seed per hit
427  for(unsigned int i = 0; i < rechits.size(); ++i) {
428 
429  temp.clear();
430 
431  temp.push_back(rechits[i]);
432 
433  seeds.push_back(temp);
434 
435  // First added hit in seed defines the mean to which the next hit is compared
436  // for this seed.
437 
438  running_meanX.push_back( rechits[i]->localPosition().x() );
439  running_meanY.push_back( rechits[i]->localPosition().y() );
440 
441  // set min/max X and Y for box containing the hits in the precluster:
442  seed_minX.push_back( rechits[i]->localPosition().x() );
443  seed_maxX.push_back( rechits[i]->localPosition().x() );
444  seed_minY.push_back( rechits[i]->localPosition().y() );
445  seed_maxY.push_back( rechits[i]->localPosition().y() );
446  }
447 
448  // merge clusters that are too close
449  // measure distance between final "running mean"
450  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
451 
452  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
453  if(running_meanX[MMM] == 999999. || running_meanX[NNN] == 999999. ) {
454  LogTrace("CSCSegment|CSC") << "[CSCSegAlgoST::clusterHits] ALARM! Skipping used seeds, this should not happen - inform CSC DPG";
455  // std::cout<<"We should never see this line now!!!"<<std::endl;
456  continue; //skip seeds that have been used
457  }
458 
459  // calculate cut criteria for simple running mean distance cut:
460  //dXclus = fabs(running_meanX[NNN] - running_meanX[MMM]);
461  //dYclus = fabs(running_meanY[NNN] - running_meanY[MMM]);
462 
463  // calculate minmal distance between precluster boxes containing the hits:
464  if ( running_meanX[NNN] > running_meanX[MMM] ) dXclus_box = seed_minX[NNN] - seed_maxX[MMM];
465  else dXclus_box = seed_minX[MMM] - seed_maxX[NNN];
466  if ( running_meanY[NNN] > running_meanY[MMM] ) dYclus_box = seed_minY[NNN] - seed_maxY[MMM];
467  else dYclus_box = seed_minY[MMM] - seed_maxY[NNN];
468 
469 
470  if( dXclus_box < dXclusBoxMax && dYclus_box < dYclusBoxMax ) {
471  // merge clusters!
472  // merge by adding seed NNN to seed MMM and erasing seed NNN
473 
474  // calculate running mean for the merged seed:
475  running_meanX[MMM] = (running_meanX[NNN]*seeds[NNN].size() + running_meanX[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
476  running_meanY[MMM] = (running_meanY[NNN]*seeds[NNN].size() + running_meanY[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
477 
478  // update min/max X and Y for box containing the hits in the merged cluster:
479  if ( seed_minX[NNN] <= seed_minX[MMM] ) seed_minX[MMM] = seed_minX[NNN];
480  if ( seed_maxX[NNN] > seed_maxX[MMM] ) seed_maxX[MMM] = seed_maxX[NNN];
481  if ( seed_minY[NNN] <= seed_minY[MMM] ) seed_minY[MMM] = seed_minY[NNN];
482  if ( seed_maxY[NNN] > seed_maxY[MMM] ) seed_maxY[MMM] = seed_maxY[NNN];
483 
484  // add seed NNN to MMM (lower to larger number)
485  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
486 
487  // mark seed NNN as used (at the moment just set running mean to 999999.)
488  running_meanX[NNN] = 999999.;
489  running_meanY[NNN] = 999999.;
490  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
491  // next seed (NNN+1)
492  break;
493  }
494 
495  }
496  }
497 
498  // hand over the final seeds to the output
499  // would be more elegant if we could do the above step with
500  // erasing the merged ones, rather than the
501  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
502  if(running_meanX[NNN] == 999999.) continue; //skip seeds that have been marked as used up in merging
503  rechits_clusters.push_back(seeds[NNN]);
504  }
505 
506  //***************************************************************
507 
508  return rechits_clusters;
509 }
510 
511 
512 std::vector< std::vector<const CSCRecHit2D*> > CSCSegAlgoST::chainHits(const CSCChamber* aChamber, const ChamberHitContainer & rechits) {
513 
514  std::vector<ChamberHitContainer> rechits_chains; // this is a collection of groups of rechits
515 
516 
517  std::vector<const CSCRecHit2D*> temp;
518 
519  std::vector< ChamberHitContainer > seeds;
520 
521  std::vector <bool> usedCluster;
522 
523  // split rechits into subvectors and return vector of vectors:
524  // Loop over rechits
525  // Create one seed per hit
526  //std::cout<<" rechits.size() = "<<rechits.size()<<std::endl;
527  for(unsigned int i = 0; i < rechits.size(); ++i) {
528  temp.clear();
529  temp.push_back(rechits[i]);
530  seeds.push_back(temp);
531  usedCluster.push_back(false);
532  }
533  // Only ME1/1A can have ganged strips so no need to test name
534  bool gangedME11a = false;
535  if ( ("ME1/a" == aChamber->specs()->chamberTypeName()) && aChamber->specs()->gangedStrips() ){
536  // if ( aChamber->specs()->gangedStrips() ){
537  gangedME11a = true;
538  }
539  // merge chains that are too close ("touch" each other)
540  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
541  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
542  if(usedCluster[MMM] || usedCluster[NNN]){
543  continue;
544  }
545  // all is in the way we define "good";
546  // try not to "cluster" the hits but to "chain" them;
547  // it does the clustering but also does a better job
548  // for inclined tracks (not clustering them together;
549  // crossed tracks would be still clustered together)
550  // 22.12.09: In fact it is not much more different
551  // than the "clustering", we just introduce another
552  // variable in the game - Z. And it makes sense
553  // to re-introduce Y (or actually wire group mumber)
554  // in a similar way as for the strip number - see
555  // the code below.
556  bool goodToMerge = isGoodToMerge(gangedME11a, seeds[NNN], seeds[MMM]);
557  if(goodToMerge){
558  // merge chains!
559  // merge by adding seed NNN to seed MMM and erasing seed NNN
560 
561  // add seed NNN to MMM (lower to larger number)
562  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
563 
564  // mark seed NNN as used
565  usedCluster[NNN] = true;
566  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
567  // next seed (NNN+1)
568  break;
569  }
570 
571  }
572  }
573 
574  // hand over the final seeds to the output
575  // would be more elegant if we could do the above step with
576  // erasing the merged ones, rather than the
577 
578  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
579  if(usedCluster[NNN]) continue; //skip seeds that have been marked as used up in merging
580  rechits_chains.push_back(seeds[NNN]);
581  }
582 
583  //***************************************************************
584 
585  return rechits_chains;
586 }
587 
588 bool CSCSegAlgoST::isGoodToMerge(bool gangedME11a, ChamberHitContainer & newChain, ChamberHitContainer & oldChain) {
589  for(size_t iRH_new = 0;iRH_new<newChain.size();++iRH_new){
590  int layer_new = newChain[iRH_new]->cscDetId().layer()-1;
591  int middleStrip_new = newChain[iRH_new]->nStrips()/2;
592  int centralStrip_new = newChain[iRH_new]->channels(middleStrip_new);
593  int centralWire_new = newChain[iRH_new]->hitWire();
594  bool layerRequirementOK = false;
595  bool stripRequirementOK = false;
596  bool wireRequirementOK = false;
597  bool goodToMerge = false;
598  for(size_t iRH_old = 0;iRH_old<oldChain.size();++iRH_old){
599  int layer_old = oldChain[iRH_old]->cscDetId().layer()-1;
600  int middleStrip_old = oldChain[iRH_old]->nStrips()/2;
601  int centralStrip_old = oldChain[iRH_old]->channels(middleStrip_old);
602  int centralWire_old = oldChain[iRH_old]->hitWire();
603 
604  // to be chained, two hits need to be in neighbouring layers...
605  // or better allow few missing layers (upto 3 to avoid inefficiencies);
606  // however we'll not make an angle correction because it
607  // worsen the situation in some of the "regular" cases
608  // (not making the correction means that the conditions for
609  // forming a cluster are different if we have missing layers -
610  // this could affect events at the boundaries )
611  if(layer_new==layer_old+1 || layer_new==layer_old-1 ||
612  layer_new==layer_old+2 || layer_new==layer_old-2 ||
613  layer_new==layer_old+3 || layer_new==layer_old-3 ||
614  layer_new==layer_old+4 || layer_new==layer_old-4 ){
615  layerRequirementOK = true;
616  }
617  int allStrips = 48;
618  //to be chained, two hits need to be "close" in strip number (can do it in phi
619  // but it doesn't really matter); let "close" means upto 2 strips (3?) -
620  // this is more compared to what CLCT readout patterns allow
621  if(centralStrip_new==centralStrip_old ||
622  centralStrip_new==centralStrip_old+1 || centralStrip_new==centralStrip_old-1 ||
623  centralStrip_new==centralStrip_old+2 || centralStrip_new==centralStrip_old-2){
624  stripRequirementOK = true;
625  }
626  // same for wires (and ALCT patterns)
627  if(centralWire_new==centralWire_old ||
628  centralWire_new==centralWire_old+1 || centralWire_new==centralWire_old-1 ||
629  centralWire_new==centralWire_old+2 || centralWire_new==centralWire_old-2){
630  wireRequirementOK = true;
631  }
632 
633  if(gangedME11a){
634  if(centralStrip_new==centralStrip_old+1-allStrips || centralStrip_new==centralStrip_old-1-allStrips ||
635  centralStrip_new==centralStrip_old+2-allStrips || centralStrip_new==centralStrip_old-2-allStrips ||
636  centralStrip_new==centralStrip_old+1+allStrips || centralStrip_new==centralStrip_old-1+allStrips ||
637  centralStrip_new==centralStrip_old+2+allStrips || centralStrip_new==centralStrip_old-2+allStrips){
638  stripRequirementOK = true;
639  }
640  }
641  if(layerRequirementOK && stripRequirementOK && wireRequirementOK){
642  goodToMerge = true;
643  return goodToMerge;
644  }
645  }
646  }
647  return false;
648 }
649 
650 
651 
652 
653 double CSCSegAlgoST::theWeight(double coordinate_1, double coordinate_2, double coordinate_3, float layer_1, float layer_2, float layer_3) {
654  double sub_weight = 0;
655  sub_weight = fabs(
656  ( (coordinate_2 - coordinate_3) / (layer_2 - layer_3) ) -
657  ( (coordinate_1 - coordinate_2) / (layer_1 - layer_2) )
658  );
659  return sub_weight;
660 }
661 
662 /*
663  * This algorithm uses a Minimum Spanning Tree (ST) approach to build
664  * endcap muon track segments from the rechits in the 6 layers of a CSC.
665  */
666 std::vector<CSCSegment> CSCSegAlgoST::buildSegments(const ChamberHitContainer& rechits) {
667 
668  // Clear buffer for segment vector
669  std::vector<CSCSegment> segmentInChamber;
670  segmentInChamber.clear(); // list of final segments
671 
672  // CSC Ring;
673  unsigned int thering = 999;
674  unsigned int thestation = 999;
675  //unsigned int thecham = 999;
676 
677  std::vector<int> hits_onLayerNumber(6);
678 
679  unsigned int UpperLimit = maxRecHitsInCluster;
680  if (int(rechits.size()) < minHitsPerSegment) return segmentInChamber;
681 
682  for(int iarray = 0; iarray <6; ++iarray) { // magic number 6: number of layers in CSC chamber - not gonna change :)
683  PAhits_onLayer[iarray].clear();
684  hits_onLayerNumber[iarray] = 0;
685  }
686 
687  chosen_Psegments.clear();
688  chosen_weight_A.clear();
689 
690  Psegments.clear();
691  Psegments_noLx.clear();
692  Psegments_noL1.clear();
693  Psegments_noL2.clear();
694  Psegments_noL3.clear();
695  Psegments_noL4.clear();
696  Psegments_noL5.clear();
697  Psegments_noL6.clear();
698 
699  Psegments_hits.clear();
700 
701  weight_A.clear();
702  weight_noLx_A.clear();
703  weight_noL1_A.clear();
704  weight_noL2_A.clear();
705  weight_noL3_A.clear();
706  weight_noL4_A.clear();
707  weight_noL5_A.clear();
708  weight_noL6_A.clear();
709 
710  weight_B.clear();
711  weight_noL1_B.clear();
712  weight_noL2_B.clear();
713  weight_noL3_B.clear();
714  weight_noL4_B.clear();
715  weight_noL5_B.clear();
716  weight_noL6_B.clear();
717 
718  curv_A.clear();
719  curv_noL1_A.clear();
720  curv_noL2_A.clear();
721  curv_noL3_A.clear();
722  curv_noL4_A.clear();
723  curv_noL5_A.clear();
724  curv_noL6_A.clear();
725 
726  // definition of middle layer for n-hit segment
727  int midlayer_pointer[6] = {0,0,2,3,3,4};
728 
729  // int n_layers_missed_tot = 0;
730  int n_layers_occupied_tot = 0;
731  int n_layers_processed = 0;
732 
733  float min_weight_A = 99999.9;
734  float min_weight_noLx_A = 99999.9;
735 
736  //float best_weight_B = -1.;
737  //float best_weight_noLx_B = -1.;
738 
739  //float best_curv_A = -1.;
740  //float best_curv_noLx_A = -1.;
741 
742  int best_pseg = -1;
743  int best_noLx_pseg = -1;
744  int best_Layer_noLx = -1;
745 
746  //************************************************************************;
747  //*** Start segment building *****************************************;
748  //************************************************************************;
749 
750  // Determine how many layers with hits we have
751  // Fill all hits into the layer hit container:
752 
753  // Have 2 standard arrays: one giving the number of hits per layer.
754  // The other the corresponding hits.
755 
756  // Loop all available hits, count hits per layer and fill the hits into array by layer
757  for(size_t M = 0; M < rechits.size(); ++M) {
758  // add hits to array per layer and count hits per layer:
759  hits_onLayerNumber[ rechits[M]->cscDetId().layer()-1 ] += 1;
760  if(hits_onLayerNumber[ rechits[M]->cscDetId().layer()-1 ] == 1 ) n_layers_occupied_tot += 1;
761  // add hits to vector in array
762  PAhits_onLayer[rechits[M]->cscDetId().layer()-1] .push_back(rechits[M]);
763  }
764 
765  // We have now counted the hits per layer and filled pointers to the hits into an array
766 
767  int tothits = 0;
768  int maxhits = 0;
769  int nexthits = 0;
770  int maxlayer = -1;
771  int nextlayer = -1;
772 
773  for(size_t i = 0; i< hits_onLayerNumber.size(); ++i){
774  //std::cout<<"We have "<<hits_onLayerNumber[i]<<" hits on layer "<<i+1<<std::endl;
775  tothits += hits_onLayerNumber[i];
776  if (hits_onLayerNumber[i] > maxhits) {
777  nextlayer = maxlayer;
778  nexthits = maxhits;
779  maxlayer = i;
780  maxhits = hits_onLayerNumber[i];
781  }
782  else if (hits_onLayerNumber[i] > nexthits) {
783  nextlayer = i;
784  nexthits = hits_onLayerNumber[i];
785  }
786  }
787 
788 
789  if (tothits > (int)UpperLimit) {
790  if (n_layers_occupied_tot > 4) {
791  tothits = tothits - hits_onLayerNumber[maxlayer];
792  n_layers_occupied_tot = n_layers_occupied_tot - 1;
793  PAhits_onLayer[maxlayer].clear();
794  hits_onLayerNumber[maxlayer] = 0;
795  }
796  }
797 
798  if (tothits > (int)UpperLimit) {
799  if (n_layers_occupied_tot > 4) {
800  tothits = tothits - hits_onLayerNumber[nextlayer];
801  n_layers_occupied_tot = n_layers_occupied_tot - 1;
802  PAhits_onLayer[nextlayer].clear();
803  hits_onLayerNumber[nextlayer] = 0;
804  }
805  }
806 
807  if (tothits > (int)UpperLimit){
808 
809  //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
810  // Showering muon - returns nothing if chi2 == -1 (see comment in SegAlgoShowering)
811  //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
812  if (useShowering) {
813  CSCSegment segShower = showering_->showerSeg(theChamber, rechits);
814  if( debug ) dumpSegment( segShower );
815  // Make sure have at least 3 hits...
816  if ( segShower.nRecHits() < 3 ) return segmentInChamber;
817  if ( segShower.chi2() == -1 ) return segmentInChamber;
818  segmentInChamber.push_back(segShower);
819  return segmentInChamber;
820  }
821  else {
822  // LogTrace("CSCSegment|CSC") << "[CSCSegAlgoST::buildSegments] No. of rechits in the cluster/chamber > "
823  // << UpperLimit << " ... Segment finding in the cluster/chamber canceled!";
824  CSCDetId id = rechits[0]->cscDetId();
825  edm::LogVerbatim("CSCSegment|CSC") << "[CSCSegAlgoST::buildSegments] No. of rechits in the cluster/chamber > "
826  << UpperLimit << " ... Segment finding canceled in CSC" << id;
827  return segmentInChamber;
828  }
829  }
830 
831  // Find out which station, ring and chamber we are in
832  // Used to choose station/ring dependant y-weight cuts
833 
834  if( !rechits.empty() ) {
835  thering = rechits[0]->cscDetId().ring();
836  thestation = rechits[0]->cscDetId().station();
837  //thecham = rechits[0]->cscDetId().chamber();
838  }
839 
840  // std::cout<<"We are in Station/ring/chamber: "<<thestation <<" "<< thering<<" "<< thecham<<std::endl;
841 
842  // Cut-off parameter - don't reconstruct segments with less than X hits
843  if( n_layers_occupied_tot < minHitsPerSegment ) {
844  return segmentInChamber;
845  }
846 
847  // Start building all possible hit combinations:
848 
849  // loop over the six chamber layers and form segment candidates from the available hits:
850 
851  for(int layer = 0; layer < 6; ++layer) {
852 
853  // *****************************************************************
854  // *** Set missed layer counter here (not currently implemented) ***
855  // *****************************************************************
856  // if( PAhits_onLayer[layer].size() == 0 ) {
857  // n_layers_missed_tot += 1;
858  // }
859 
860  if( !PAhits_onLayer[layer].empty() ) {
861  n_layers_processed += 1;
862  }
863 
864  // Save the size of the protosegment before hits were added on the current layer
865  int orig_number_of_psegs = Psegments.size();
866  int orig_number_of_noL1_psegs = Psegments_noL1.size();
867  int orig_number_of_noL2_psegs = Psegments_noL2.size();
868  int orig_number_of_noL3_psegs = Psegments_noL3.size();
869  int orig_number_of_noL4_psegs = Psegments_noL4.size();
870  int orig_number_of_noL5_psegs = Psegments_noL5.size();
871  int orig_number_of_noL6_psegs = Psegments_noL6.size();
872 
873  // loop over the hits on the layer and initiate protosegments or add hits to protosegments
874  for(int hit = 0; hit < int(PAhits_onLayer[layer].size()); ++hit) { // loop all hits on the Layer number "layer"
875 
876  // create protosegments from all hits on the first layer with hits
877  if( orig_number_of_psegs == 0 ) { // would be faster to turn this around - ask for "orig_number_of_psegs != 0"
878 
879  Psegments_hits.push_back(PAhits_onLayer[layer][hit]);
880 
881  Psegments.push_back(Psegments_hits);
882  Psegments_noL6.push_back(Psegments_hits);
883  Psegments_noL5.push_back(Psegments_hits);
884  Psegments_noL4.push_back(Psegments_hits);
885  Psegments_noL3.push_back(Psegments_hits);
886  Psegments_noL2.push_back(Psegments_hits);
887 
888  // Initialize weights corresponding to this segment for first hit (with 0)
889 
890  curv_A.push_back(0.0);
891  curv_noL6_A.push_back(0.0);
892  curv_noL5_A.push_back(0.0);
893  curv_noL4_A.push_back(0.0);
894  curv_noL3_A.push_back(0.0);
895  curv_noL2_A.push_back(0.0);
896 
897  weight_A.push_back(0.0);
898  weight_noL6_A.push_back(0.0);
899  weight_noL5_A.push_back(0.0);
900  weight_noL4_A.push_back(0.0);
901  weight_noL3_A.push_back(0.0);
902  weight_noL2_A.push_back(0.0);
903 
904  weight_B.push_back(0.0);
905  weight_noL6_B.push_back(0.0);
906  weight_noL5_B.push_back(0.0);
907  weight_noL4_B.push_back(0.0);
908  weight_noL3_B.push_back(0.0);
909  weight_noL2_B.push_back(0.0);
910 
911  // reset array for next hit on next layer
912  Psegments_hits .clear();
913  }
914  else {
915  if( orig_number_of_noL1_psegs == 0 ) {
916 
917  Psegments_hits.push_back(PAhits_onLayer[layer][hit]);
918 
919  Psegments_noL1.push_back(Psegments_hits);
920 
921  // Initialize weight corresponding to this segment for first hit (with 0)
922 
923  curv_noL1_A.push_back(0.0);
924 
925  weight_noL1_A.push_back(0.0);
926 
927  weight_noL1_B.push_back(0.0);
928 
929  // reset array for next hit on next layer
930  Psegments_hits .clear();
931 
932  }
933 
934  // loop over the protosegments and create a new protosegments for each hit-1 on this layer
935 
936  for( int pseg = 0; pseg < orig_number_of_psegs; ++pseg ) {
937 
938  int pseg_pos = (pseg)+((hit)*orig_number_of_psegs);
939  int pseg_noL1_pos = (pseg)+((hit)*orig_number_of_noL1_psegs);
940  int pseg_noL2_pos = (pseg)+((hit)*orig_number_of_noL2_psegs);
941  int pseg_noL3_pos = (pseg)+((hit)*orig_number_of_noL3_psegs);
942  int pseg_noL4_pos = (pseg)+((hit)*orig_number_of_noL4_psegs);
943  int pseg_noL5_pos = (pseg)+((hit)*orig_number_of_noL5_psegs);
944  int pseg_noL6_pos = (pseg)+((hit)*orig_number_of_noL6_psegs);
945 
946  // - Loop all psegs.
947  // - If not last hit, clone existing protosegments (PAhits_onLayer[layer].size()-1) times
948  // - then add the new hits
949 
950  if( ! (hit == int(PAhits_onLayer[layer].size()-1)) ) { // not the last hit - prepare (copy) new protosegments for the following hits
951  // clone psegs (to add next hits or last hit on layer):
952 
953  Psegments.push_back(Psegments[ pseg_pos ]);
954  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) Psegments_noL1.push_back(Psegments_noL1[ pseg_noL1_pos ]);
955  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) Psegments_noL2.push_back(Psegments_noL2[ pseg_noL2_pos ]);
956  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) Psegments_noL3.push_back(Psegments_noL3[ pseg_noL3_pos ]);
957  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) Psegments_noL4.push_back(Psegments_noL4[ pseg_noL4_pos ]);
958  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) Psegments_noL5.push_back(Psegments_noL5[ pseg_noL5_pos ]);
959  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) Psegments_noL6.push_back(Psegments_noL6[ pseg_noL6_pos ]);
960  // clone weight corresponding to this segment too
961  weight_A.push_back(weight_A[ pseg_pos ]);
962  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) weight_noL1_A.push_back(weight_noL1_A[ pseg_noL1_pos ]);
963  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) weight_noL2_A.push_back(weight_noL2_A[ pseg_noL2_pos ]);
964  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) weight_noL3_A.push_back(weight_noL3_A[ pseg_noL3_pos ]);
965  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) weight_noL4_A.push_back(weight_noL4_A[ pseg_noL4_pos ]);
966  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) weight_noL5_A.push_back(weight_noL5_A[ pseg_noL5_pos ]);
967  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) weight_noL6_A.push_back(weight_noL6_A[ pseg_noL6_pos ]);
968  // clone curvature variable corresponding to this segment too
969  curv_A.push_back(curv_A[ pseg_pos ]);
970  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) curv_noL1_A.push_back(curv_noL1_A[ pseg_noL1_pos ]);
971  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) curv_noL2_A.push_back(curv_noL2_A[ pseg_noL2_pos ]);
972  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) curv_noL3_A.push_back(curv_noL3_A[ pseg_noL3_pos ]);
973  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) curv_noL4_A.push_back(curv_noL4_A[ pseg_noL4_pos ]);
974  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) curv_noL5_A.push_back(curv_noL5_A[ pseg_noL5_pos ]);
975  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) curv_noL6_A.push_back(curv_noL6_A[ pseg_noL6_pos ]);
976  // clone "y"-weight corresponding to this segment too
977  weight_B.push_back(weight_B[ pseg_pos ]);
978  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) weight_noL1_B.push_back(weight_noL1_B[ pseg_noL1_pos ]);
979  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) weight_noL2_B.push_back(weight_noL2_B[ pseg_noL2_pos ]);
980  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) weight_noL3_B.push_back(weight_noL3_B[ pseg_noL3_pos ]);
981  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) weight_noL4_B.push_back(weight_noL4_B[ pseg_noL4_pos ]);
982  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) weight_noL5_B.push_back(weight_noL5_B[ pseg_noL5_pos ]);
983  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) weight_noL6_B.push_back(weight_noL6_B[ pseg_noL6_pos ]);
984  }
985  // add hits to original pseg:
986  Psegments[ pseg_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
987  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) Psegments_noL1[ pseg_noL1_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
988  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) Psegments_noL2[ pseg_noL2_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
989  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) Psegments_noL3[ pseg_noL3_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
990  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) Psegments_noL4[ pseg_noL4_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
991  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) Psegments_noL5[ pseg_noL5_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
992  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) Psegments_noL6[ pseg_noL6_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
993 
994  // calculate/update the weight (only for >2 hits on psegment):
995 
996  if( Psegments[ pseg_pos ].size() > 2 ) {
997 
998  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
999  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1000 
1001  weight_A[ pseg_pos ] += theWeight(
1002  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().x(),
1003  (*(Psegments[ pseg_pos ].end()-2 ))->localPosition().x(),
1004  (*(Psegments[ pseg_pos ].end()-3 ))->localPosition().x(),
1005  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1006  float((*(Psegments[ pseg_pos ].end()-2))->cscDetId().layer()),
1007  float((*(Psegments[ pseg_pos ].end()-3))->cscDetId().layer())
1008  );
1009 
1010  weight_B[ pseg_pos ] += theWeight(
1011  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().y(),
1012  (*(Psegments[ pseg_pos ].end()-2 ))->localPosition().y(),
1013  (*(Psegments[ pseg_pos ].end()-3 ))->localPosition().y(),
1014  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1015  float((*(Psegments[ pseg_pos ].end()-2))->cscDetId().layer()),
1016  float((*(Psegments[ pseg_pos ].end()-3))->cscDetId().layer())
1017  );
1018 
1019  // if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1020 
1021  if(int(Psegments[ pseg_pos ].size()) == n_layers_occupied_tot) {
1022 
1023  curv_A[ pseg_pos ] += theWeight(
1024  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().x(),
1025  (*(Psegments[ pseg_pos ].end()-midlayer_pointer[n_layers_occupied_tot-1] ))->localPosition().x(),
1026  (*(Psegments[ pseg_pos ].end()-n_layers_occupied_tot ))->localPosition().x(),
1027  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1028  float((*(Psegments[ pseg_pos ].end()-midlayer_pointer[n_layers_occupied_tot-1] ))->cscDetId().layer()),
1029  float((*(Psegments[ pseg_pos ].end()-n_layers_occupied_tot ))->cscDetId().layer())
1030  );
1031 
1032  if (curv_A[ pseg_pos ] > curvePenaltyThreshold) weight_A[ pseg_pos ] = weight_A[ pseg_pos ] * curvePenalty;
1033 
1034  if (weight_B[ pseg_pos ] > a_yweightPenaltyThreshold[thestation][thering]) weight_A[ pseg_pos ] = weight_A[ pseg_pos ] * yweightPenalty;
1035 
1036  if (weight_A[ pseg_pos ] < min_weight_A ) {
1037  min_weight_A = weight_A[ pseg_pos ];
1038  //best_weight_B = weight_B[ pseg_pos ];
1039  //best_curv_A = curv_A[ pseg_pos ];
1040  best_pseg = pseg_pos ;
1041  }
1042 
1043  }
1044 
1045  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1046  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1047 
1048  }
1049 
1050  if ( n_layers_occupied_tot > 3 ) {
1051  if (pseg < orig_number_of_noL1_psegs && (n_layers_processed != 2)) {
1052  if(( Psegments_noL1[ pseg_noL1_pos ].size() > 2 ) ) {
1053 
1054  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1055  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1056 
1057  weight_noL1_A[ pseg_noL1_pos ] += theWeight(
1058  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().x(),
1059  (*(Psegments_noL1[ pseg_noL1_pos ].end()-2 ))->localPosition().x(),
1060  (*(Psegments_noL1[ pseg_noL1_pos ].end()-3 ))->localPosition().x(),
1061  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1))->cscDetId().layer()),
1062  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-2))->cscDetId().layer()),
1063  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-3))->cscDetId().layer())
1064  );
1065 
1066  weight_noL1_B[ pseg_noL1_pos ] += theWeight(
1067  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().y(),
1068  (*(Psegments_noL1[ pseg_noL1_pos ].end()-2 ))->localPosition().y(),
1069  (*(Psegments_noL1[ pseg_noL1_pos ].end()-3 ))->localPosition().y(),
1070  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1))->cscDetId().layer()),
1071  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-2))->cscDetId().layer()),
1072  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-3))->cscDetId().layer())
1073  );
1074 
1075  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1076 
1077  if(int(Psegments_noL1[ pseg_noL1_pos ].size()) == n_layers_occupied_tot -1 ) {
1078 
1079  curv_noL1_A[ pseg_noL1_pos ] += theWeight(
1080  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().x(),
1081  (*(Psegments_noL1[ pseg_noL1_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1082  (*(Psegments_noL1[ pseg_noL1_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1083  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->cscDetId().layer()),
1084  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1085  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1086  );
1087 
1088  if (curv_noL1_A[ pseg_noL1_pos ] > curvePenaltyThreshold) weight_noL1_A[ pseg_noL1_pos ] = weight_noL1_A[ pseg_noL1_pos ] * curvePenalty;
1089 
1090  if (weight_noL1_B[ pseg_noL1_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1091  weight_noL1_A[ pseg_noL1_pos ] = weight_noL1_A[ pseg_noL1_pos ] * yweightPenalty;
1092 
1093  if (weight_noL1_A[ pseg_noL1_pos ] < min_weight_noLx_A ) {
1094  min_weight_noLx_A = weight_noL1_A[ pseg_noL1_pos ];
1095  //best_weight_noLx_B = weight_noL1_B[ pseg_noL1_pos ];
1096  //best_curv_noLx_A = curv_noL1_A[ pseg_noL1_pos ];
1097  best_noLx_pseg = pseg_noL1_pos;
1098  best_Layer_noLx = 1;
1099  }
1100 
1101  }
1102 
1103  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1104  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1105 
1106  }
1107  }
1108  }
1109 
1110  if ( n_layers_occupied_tot > 3 ) {
1111  if (pseg < orig_number_of_noL2_psegs && ( n_layers_processed != 2 )) {
1112  if(( Psegments_noL2[ pseg_noL2_pos ].size() > 2 )) {
1113 
1114  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1115  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1116 
1117  weight_noL2_A[ pseg_noL2_pos ] += theWeight(
1118  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().x(),
1119  (*(Psegments_noL2[ pseg_noL2_pos ].end()-2 ))->localPosition().x(),
1120  (*(Psegments_noL2[ pseg_noL2_pos ].end()-3 ))->localPosition().x(),
1121  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1))->cscDetId().layer()),
1122  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-2))->cscDetId().layer()),
1123  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-3))->cscDetId().layer())
1124  );
1125 
1126  weight_noL2_B[ pseg_noL2_pos ] += theWeight(
1127  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().y(),
1128  (*(Psegments_noL2[ pseg_noL2_pos ].end()-2 ))->localPosition().y(),
1129  (*(Psegments_noL2[ pseg_noL2_pos ].end()-3 ))->localPosition().y(),
1130  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1))->cscDetId().layer()),
1131  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-2))->cscDetId().layer()),
1132  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-3))->cscDetId().layer())
1133  );
1134 
1135  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1136 
1137  if(int(Psegments_noL2[ pseg_noL2_pos ].size()) == n_layers_occupied_tot -1 ) {
1138 
1139  curv_noL2_A[ pseg_noL2_pos ] += theWeight(
1140  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().x(),
1141  (*(Psegments_noL2[ pseg_noL2_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1142  (*(Psegments_noL2[ pseg_noL2_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1143  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->cscDetId().layer()),
1144  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1145  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1146  );
1147 
1148  if (curv_noL2_A[ pseg_noL2_pos ] > curvePenaltyThreshold) weight_noL2_A[ pseg_noL2_pos ] = weight_noL2_A[ pseg_noL2_pos ] * curvePenalty;
1149 
1150  if (weight_noL2_B[ pseg_noL2_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1151  weight_noL2_A[ pseg_noL2_pos ] = weight_noL2_A[ pseg_noL2_pos ] * yweightPenalty;
1152 
1153  if (weight_noL2_A[ pseg_noL2_pos ] < min_weight_noLx_A ) {
1154  min_weight_noLx_A = weight_noL2_A[ pseg_noL2_pos ];
1155  //best_weight_noLx_B = weight_noL2_B[ pseg_noL2_pos ];
1156  //best_curv_noLx_A = curv_noL2_A[ pseg_noL2_pos ];
1157  best_noLx_pseg = pseg_noL2_pos;
1158  best_Layer_noLx = 2;
1159  }
1160 
1161  }
1162 
1163  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1164  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1165 
1166  }
1167  }
1168  }
1169 
1170  if ( n_layers_occupied_tot > 3 ) {
1171  if (pseg < orig_number_of_noL3_psegs && ( n_layers_processed != 3 )) {
1172  if(( Psegments_noL3[ pseg_noL3_pos ].size() > 2 )) {
1173 
1174  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1175  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1176 
1177  weight_noL3_A[ pseg_noL3_pos ] += theWeight(
1178  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().x(),
1179  (*(Psegments_noL3[ pseg_noL3_pos ].end()-2 ))->localPosition().x(),
1180  (*(Psegments_noL3[ pseg_noL3_pos ].end()-3 ))->localPosition().x(),
1181  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1))->cscDetId().layer()),
1182  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-2))->cscDetId().layer()),
1183  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-3))->cscDetId().layer())
1184  );
1185 
1186  weight_noL3_B[ pseg_noL3_pos ] += theWeight(
1187  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().y(),
1188  (*(Psegments_noL3[ pseg_noL3_pos ].end()-2 ))->localPosition().y(),
1189  (*(Psegments_noL3[ pseg_noL3_pos ].end()-3 ))->localPosition().y(),
1190  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1))->cscDetId().layer()),
1191  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-2))->cscDetId().layer()),
1192  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-3))->cscDetId().layer())
1193  );
1194 
1195  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1196 
1197  if(int(Psegments_noL3[ pseg_noL3_pos ].size()) == n_layers_occupied_tot -1 ) {
1198 
1199  curv_noL3_A[ pseg_noL3_pos ] += theWeight(
1200  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().x(),
1201  (*(Psegments_noL3[ pseg_noL3_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1202  (*(Psegments_noL3[ pseg_noL3_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1203  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->cscDetId().layer()),
1204  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1205  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1206  );
1207 
1208  if (curv_noL3_A[ pseg_noL3_pos ] > curvePenaltyThreshold) weight_noL3_A[ pseg_noL3_pos ] = weight_noL3_A[ pseg_noL3_pos ] * curvePenalty;
1209 
1210  if (weight_noL3_B[ pseg_noL3_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1211  weight_noL3_A[ pseg_noL3_pos ] = weight_noL3_A[ pseg_noL3_pos ] * yweightPenalty;
1212 
1213  if (weight_noL3_A[ pseg_noL3_pos ] < min_weight_noLx_A ) {
1214  min_weight_noLx_A = weight_noL3_A[ pseg_noL3_pos ];
1215  //best_weight_noLx_B = weight_noL3_B[ pseg_noL3_pos ];
1216  //best_curv_noLx_A = curv_noL3_A[ pseg_noL3_pos ];
1217  best_noLx_pseg = pseg_noL3_pos;
1218  best_Layer_noLx = 3;
1219  }
1220 
1221  }
1222 
1223  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1224  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1225 
1226  }
1227  }
1228  }
1229 
1230  if ( n_layers_occupied_tot > 3 ) {
1231  if (pseg < orig_number_of_noL4_psegs && ( n_layers_processed != 4 )) {
1232  if(( Psegments_noL4[ pseg_noL4_pos ].size() > 2 )) {
1233 
1234  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1235  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1236 
1237  weight_noL4_A[ pseg_noL4_pos ] += theWeight(
1238  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().x(),
1239  (*(Psegments_noL4[ pseg_noL4_pos ].end()-2 ))->localPosition().x(),
1240  (*(Psegments_noL4[ pseg_noL4_pos ].end()-3 ))->localPosition().x(),
1241  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1))->cscDetId().layer()),
1242  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-2))->cscDetId().layer()),
1243  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-3))->cscDetId().layer())
1244  );
1245 
1246  weight_noL4_B[ pseg_noL4_pos ] += theWeight(
1247  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().y(),
1248  (*(Psegments_noL4[ pseg_noL4_pos ].end()-2 ))->localPosition().y(),
1249  (*(Psegments_noL4[ pseg_noL4_pos ].end()-3 ))->localPosition().y(),
1250  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1))->cscDetId().layer()),
1251  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-2))->cscDetId().layer()),
1252  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-3))->cscDetId().layer())
1253  );
1254 
1255  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1256 
1257  if(int(Psegments_noL4[ pseg_noL4_pos ].size()) == n_layers_occupied_tot -1 ) {
1258 
1259  curv_noL4_A[ pseg_noL4_pos ] += theWeight(
1260  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().x(),
1261  (*(Psegments_noL4[ pseg_noL4_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1262  (*(Psegments_noL4[ pseg_noL4_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1263  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->cscDetId().layer()),
1264  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1265  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1266  );
1267 
1268  if (curv_noL4_A[ pseg_noL4_pos ] > curvePenaltyThreshold) weight_noL4_A[ pseg_noL4_pos ] = weight_noL4_A[ pseg_noL4_pos ] * curvePenalty;
1269 
1270  if (weight_noL4_B[ pseg_noL4_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1271  weight_noL4_A[ pseg_noL4_pos ] = weight_noL4_A[ pseg_noL4_pos ] * yweightPenalty;
1272 
1273  if (weight_noL4_A[ pseg_noL4_pos ] < min_weight_noLx_A ) {
1274  min_weight_noLx_A = weight_noL4_A[ pseg_noL4_pos ];
1275  //best_weight_noLx_B = weight_noL4_B[ pseg_noL4_pos ];
1276  //best_curv_noLx_A = curv_noL4_A[ pseg_noL4_pos ];
1277  best_noLx_pseg = pseg_noL4_pos;
1278  best_Layer_noLx = 4;
1279  }
1280 
1281  }
1282 
1283  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1284  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1285 
1286  }
1287  }
1288  }
1289 
1290  if ( n_layers_occupied_tot > 4 ) {
1291  if (pseg < orig_number_of_noL5_psegs && ( n_layers_processed != 5 )) {
1292  if(( Psegments_noL5[ pseg_noL5_pos ].size() > 2 )){
1293 
1294  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1295  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1296 
1297  weight_noL5_A[ pseg_noL5_pos ] += theWeight(
1298  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().x(),
1299  (*(Psegments_noL5[ pseg_noL5_pos ].end()-2 ))->localPosition().x(),
1300  (*(Psegments_noL5[ pseg_noL5_pos ].end()-3 ))->localPosition().x(),
1301  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1))->cscDetId().layer()),
1302  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-2))->cscDetId().layer()),
1303  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-3))->cscDetId().layer())
1304  );
1305 
1306  weight_noL5_B[ pseg_noL5_pos ] += theWeight(
1307  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().y(),
1308  (*(Psegments_noL5[ pseg_noL5_pos ].end()-2 ))->localPosition().y(),
1309  (*(Psegments_noL5[ pseg_noL5_pos ].end()-3 ))->localPosition().y(),
1310  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1))->cscDetId().layer()),
1311  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-2))->cscDetId().layer()),
1312  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-3))->cscDetId().layer())
1313  );
1314 
1315  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1316 
1317  if(int(Psegments_noL5[ pseg_noL5_pos ].size()) == n_layers_occupied_tot -1 ) {
1318 
1319  curv_noL5_A[ pseg_noL5_pos ] += theWeight(
1320  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().x(),
1321  (*(Psegments_noL5[ pseg_noL5_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1322  (*(Psegments_noL5[ pseg_noL5_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1323  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->cscDetId().layer()),
1324  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1325  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1326  );
1327 
1328  if (curv_noL5_A[ pseg_noL5_pos ] > curvePenaltyThreshold) weight_noL5_A[ pseg_noL5_pos ] = weight_noL5_A[ pseg_noL5_pos ] * curvePenalty;
1329 
1330  if (weight_noL5_B[ pseg_noL5_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1331  weight_noL5_A[ pseg_noL5_pos ] = weight_noL5_A[ pseg_noL5_pos ] * yweightPenalty;
1332 
1333  if (weight_noL5_A[ pseg_noL5_pos ] < min_weight_noLx_A ) {
1334  min_weight_noLx_A = weight_noL5_A[ pseg_noL5_pos ];
1335  //best_weight_noLx_B = weight_noL5_B[ pseg_noL5_pos ];
1336  //best_curv_noLx_A = curv_noL5_A[ pseg_noL5_pos ];
1337  best_noLx_pseg = pseg_noL5_pos;
1338  best_Layer_noLx = 5;
1339  }
1340 
1341  }
1342 
1343  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1344  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1345 
1346  }
1347  }
1348  }
1349 
1350  if ( n_layers_occupied_tot > 5 ) {
1351  if (pseg < orig_number_of_noL6_psegs && ( n_layers_processed != 6 )) {
1352  if(( Psegments_noL6[ pseg_noL6_pos ].size() > 2 )){
1353 
1354  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1355  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1356 
1357  weight_noL6_A[ pseg_noL6_pos ] += theWeight(
1358  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().x(),
1359  (*(Psegments_noL6[ pseg_noL6_pos ].end()-2 ))->localPosition().x(),
1360  (*(Psegments_noL6[ pseg_noL6_pos ].end()-3 ))->localPosition().x(),
1361  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1))->cscDetId().layer()),
1362  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-2))->cscDetId().layer()),
1363  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-3))->cscDetId().layer())
1364  );
1365 
1366  weight_noL6_B[ pseg_noL6_pos ] += theWeight(
1367  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().y(),
1368  (*(Psegments_noL6[ pseg_noL6_pos ].end()-2 ))->localPosition().y(),
1369  (*(Psegments_noL6[ pseg_noL6_pos ].end()-3 ))->localPosition().y(),
1370  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1))->cscDetId().layer()),
1371  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-2))->cscDetId().layer()),
1372  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-3))->cscDetId().layer())
1373  );
1374 
1375  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1376 
1377  if(int(Psegments_noL6[ pseg_noL6_pos ].size()) == n_layers_occupied_tot -1 ) {
1378 
1379  curv_noL6_A[ pseg_noL6_pos ] += theWeight(
1380  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().x(),
1381  (*(Psegments_noL6[ pseg_noL6_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1382  (*(Psegments_noL6[ pseg_noL6_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1383  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->cscDetId().layer()),
1384  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1385  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1386  );
1387 
1388  if (curv_noL6_A[ pseg_noL6_pos ] > curvePenaltyThreshold) weight_noL6_A[ pseg_noL6_pos ] = weight_noL6_A[ pseg_noL6_pos ] * curvePenalty;
1389 
1390  if (weight_noL6_B[ pseg_noL6_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1391  weight_noL6_A[ pseg_noL6_pos ] = weight_noL6_A[ pseg_noL6_pos ] * yweightPenalty;
1392 
1393  if (weight_noL6_A[ pseg_noL6_pos ] < min_weight_noLx_A ) {
1394  min_weight_noLx_A = weight_noL6_A[ pseg_noL6_pos ];
1395  //best_weight_noLx_B = weight_noL6_B[ pseg_noL6_pos ];
1396  //best_curv_noLx_A = curv_noL6_A[ pseg_noL6_pos ];
1397  best_noLx_pseg = pseg_noL6_pos;
1398  best_Layer_noLx = 6;
1399  }
1400 
1401  }
1402 
1403  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1404  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1405 
1406  }
1407  }
1408  }
1409 
1410  }
1411  }
1412  }
1413  }
1414 
1415  //************************************************************************;
1416  //*** End segment building *******************************************;
1417  //************************************************************************;
1418 
1419  // Important part! Here segment(s) are actually chosen. All the good segments
1420  // could be chosen or some (best) ones only (in order to save time).
1421 
1422  // Check if there is a segment with n-1 hits that has a signifcantly better
1423  // weight than the best n hit segment
1424 
1425  // IBL 070828: implicit assumption here is that there will always only be one segment per
1426  // cluster - if there are >1 we will need to find out which segment the alternative n-1 hit
1427  // protosegment belongs to!
1428 
1429 
1430  //float chosen_weight = min_weight_A;
1431  //float chosen_ywgt = best_weight_B;
1432  //float chosen_curv = best_curv_A;
1433  //int chosen_nlayers = n_layers_occupied_tot;
1434  int chosen_pseg = best_pseg;
1435  if (best_pseg<0) {
1436  return segmentInChamber;
1437  }
1440 
1441  float hit_drop_limit = -999999.999;
1442 
1443  // define different weight improvement requirements depending on how many layers are in the segment candidate
1444  switch ( n_layers_processed ) {
1445  case 1 :
1446  // do nothing;
1447  break;
1448  case 2 :
1449  // do nothing;
1450  break;
1451  case 3 :
1452  // do nothing;
1453  break;
1454  case 4 :
1455  hit_drop_limit = hitDropLimit6Hits * (1./2.) * hitDropLimit4Hits;
1456  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 4)) {
1457  // std::cout<<"CSCSegAlgoST: For four layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1458  }
1459  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 3)) hit_drop_limit = hit_drop_limit * (1./2.);
1460  break;
1461  case 5 :
1462  hit_drop_limit = hitDropLimit6Hits * (2./3.) * hitDropLimit5Hits;
1463  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 5)) {
1464  // std::cout<<"CSCSegAlgoST: For five layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1465  }
1466  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 4)) hit_drop_limit = hit_drop_limit * (1./2.);
1467  if (best_Layer_noLx == 3) hit_drop_limit = hit_drop_limit * (1./3.);
1468  break;
1469  case 6 :
1470  hit_drop_limit = hitDropLimit6Hits * (3./4.);
1471  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 6)) {
1472  // std::cout<<"CSCSegAlgoST: For six layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1473  }
1474  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 5)) hit_drop_limit = hit_drop_limit * (1./2.);
1475  if ((best_Layer_noLx == 3) || (best_Layer_noLx == 4)) hit_drop_limit = hit_drop_limit * (1./3.);
1476  break;
1477 
1478  default :
1479  // Fallback - should never occur.
1480  LogTrace("CSCSegment|CSC") << "[CSCSegAlgoST::buildSegments] Unexpected number of layers with hits - please inform CSC DPG.";
1481  hit_drop_limit = 0.1;
1482  }
1483 
1484  // choose the NoLx collection (the one that contains the best N-1 candidate)
1485  switch ( best_Layer_noLx ) {
1486  case 1 :
1487  Psegments_noLx.clear();
1489  weight_noLx_A.clear();
1491  break;
1492  case 2 :
1493  Psegments_noLx.clear();
1495  weight_noLx_A.clear();
1497  break;
1498  case 3 :
1499  Psegments_noLx.clear();
1501  weight_noLx_A.clear();
1503  break;
1504  case 4 :
1505  Psegments_noLx.clear();
1507  weight_noLx_A.clear();
1509  break;
1510  case 5 :
1511  Psegments_noLx.clear();
1513  weight_noLx_A.clear();
1515  break;
1516  case 6 :
1517  Psegments_noLx.clear();
1519  weight_noLx_A.clear();
1521  break;
1522 
1523  default :
1524  // Fallback - should occur only for preclusters with only 3 layers with hits.
1525  Psegments_noLx.clear();
1526  weight_noLx_A.clear();
1527  }
1528 
1529  if( min_weight_A > 0. ) {
1530  if ( min_weight_noLx_A/min_weight_A < hit_drop_limit ) {
1531  //chosen_weight = min_weight_noLx_A;
1532  //chosen_ywgt = best_weight_noLx_B;
1533  //chosen_curv = best_curv_noLx_A;
1534  //chosen_nlayers = n_layers_occupied_tot-1;
1535  chosen_pseg = best_noLx_pseg;
1536  chosen_Psegments.clear();
1537  chosen_weight_A.clear();
1540  }
1541  }
1542 
1543  if(onlyBestSegment) {
1544  ChooseSegments2a( chosen_Psegments, chosen_pseg );
1545  }
1546  else {
1548  }
1549 
1550  for(unsigned int iSegment=0; iSegment<GoodSegments.size();++iSegment){
1551  protoSegment = GoodSegments[iSegment];
1552 
1553  // Create new fitter object
1554  CSCCondSegFit* segfit = new CSCCondSegFit( pset(), chamber(), protoSegment );
1555  condpass1 = false;
1556  condpass2 = false;
1557  segfit->setScaleXError( 1.0 );
1558  segfit->fit(condpass1, condpass2);
1559 
1560  // Attempt to handle numerical instability of the fit.
1561  // (Any segment with chi2/dof > chi2Norm_3D_ is considered
1562  // as potentially suffering from numerical instability in fit.)
1563  if( adjustCovariance() ){
1564  // Call the fit with prefitting option:
1565  // First fit a straight line to X-Z coordinates and calculate chi2
1566  // This is done in CSCCondSegFit::correctTheCovX()
1567  // Scale up errors in X if this chi2 is too big (default 'too big' is >20);
1568  // Then refit XY-Z with the scaled-up X errors
1569  if(segfit->chi2()/segfit->ndof()>chi2Norm_3D_){
1570  condpass1 = true;
1571  segfit->fit(condpass1, condpass2);
1572  }
1573  if(segfit->scaleXError() < 1.00005){
1574  LogTrace("CSCWeirdSegment") << "[CSCSegAlgoST::buildSegments] Segment ErrXX scaled and refit " << std::endl;
1575  if(segfit->chi2()/segfit->ndof()>chi2Norm_3D_){
1576  // Call the fit with direct adjustment of condition number;
1577  // If the attempt to improve fit by scaling up X error fails
1578  // call the procedure to make the condition number of M compatible with
1579  // the precision of X and Y measurements;
1580  // Achieved by decreasing abs value of the Covariance
1581  LogTrace("CSCWeirdSegment") << "[CSCSegAlgoST::buildSegments] Segment ErrXY changed to match cond. number and refit " << std::endl;
1582  condpass2 = true;
1583  segfit->fit(condpass1, condpass2);
1584  }
1585  }
1586  // Call the pre-pruning procedure;
1587  // If the attempt to improve fit by scaling up X error is successfull,
1588  // while scale factor for X errors is too big.
1589  // Prune the recHit inducing the biggest contribution into X-Z chi^2
1590  // and refit;
1591  if(prePrun_ && (sqrt(segfit->scaleXError())>prePrunLimit_) &&
1592  (segfit->nhits()>3)){
1593  LogTrace("CSCWeirdSegment") << "[CSCSegAlgoST::buildSegments] Scale factor chi2uCorrection too big, pre-Prune and refit " << std::endl;
1594  protoSegment.erase(protoSegment.begin() + segfit->worstHit(),
1595  protoSegment.begin() + segfit->worstHit()+1 );
1596 
1597  // Need to create new fitter object to repeat fit with fewer hits
1598  // Original code maintained current values of condpass1, condpass2, scaleXError - calc in CorrectTheCovX()
1599  //@@ DO THE SAME THING HERE, BUT IS THAT CORRECT?! It does make a difference.
1600  double tempcorr = segfit->scaleXError(); // save current value
1601  delete segfit;
1602  segfit = new CSCCondSegFit( pset(), chamber(), protoSegment );
1603  segfit->setScaleXError( tempcorr ); // reset to previous value (rather than init to 1)
1604  segfit->fit(condpass1, condpass2);
1605  }
1606  }
1607 
1608  // calculate covariance matrix
1609  // AlgebraicSymMatrix temp2 = segfit->covarianceMatrix();
1610 
1611  // build an actual CSC segment
1612  CSCSegment temp(protoSegment, segfit->intercept(), segfit->localdir(),
1613  segfit->covarianceMatrix(), segfit->chi2() );
1614  delete segfit;
1615 
1616  if( debug ) dumpSegment( temp );
1617 
1618  segmentInChamber.push_back(temp);
1619  }
1620  return segmentInChamber;
1621 }
1622 
1623 void CSCSegAlgoST::ChooseSegments2a(std::vector< ChamberHitContainer > & chosen_segments, int chosen_seg) {
1624  // just return best segment
1625  GoodSegments.clear();
1626  GoodSegments.push_back( chosen_segments[chosen_seg] );
1627 }
1628 
1629 void CSCSegAlgoST::ChooseSegments3(std::vector< ChamberHitContainer > & chosen_segments, std::vector< float > & chosen_weight, int chosen_seg) {
1630 
1631  int SumCommonHits = 0;
1632  GoodSegments.clear();
1633  int nr_remaining_candidates;
1634  unsigned int nr_of_segment_candidates;
1635 
1636  nr_remaining_candidates = nr_of_segment_candidates = chosen_segments.size();
1637 
1638  // always select and return best protosegment:
1639  GoodSegments.push_back( chosen_segments[ chosen_seg ] );
1640 
1641  float chosen_weight_temp = 999999.;
1642  int chosen_seg_temp = -1;
1643 
1644  // try to find further segment candidates:
1645  while( nr_remaining_candidates > 0 ) {
1646 
1647  for(unsigned int iCand=0; iCand < nr_of_segment_candidates; ++iCand) {
1648  //only compare current best to psegs that have not been marked bad:
1649  if( chosen_weight[iCand] < 0. ) continue;
1650  SumCommonHits = 0;
1651 
1652  for( int ihits = 0; ihits < int(chosen_segments[iCand].size()); ++ihits ) { // iCand and iiCand NEED to have same nr of hits! (always have by construction)
1653  if( chosen_segments[iCand][ihits] == chosen_segments[chosen_seg][ihits]) {
1654  ++SumCommonHits;
1655  }
1656  }
1657 
1658  //mark a pseg bad:
1659  if(SumCommonHits>1) { // needs to be a card; should be investigated first
1660  chosen_weight[iCand] = -1.;
1661  nr_remaining_candidates -= 1;
1662  }
1663  else {
1664  // save the protosegment with the smallest weight
1665  if( chosen_weight[ iCand ] < chosen_weight_temp ) {
1666  chosen_weight_temp = chosen_weight[ iCand ];
1667  chosen_seg_temp = iCand ;
1668  }
1669  }
1670  }
1671 
1672  if( chosen_seg_temp > -1 ) GoodSegments.push_back( chosen_segments[ chosen_seg_temp ] );
1673 
1674  chosen_seg = chosen_seg_temp;
1675  // re-initialze temporary best parameters
1676  chosen_weight_temp = 999999;
1677  chosen_seg_temp = -1;
1678  }
1679 }
1680 
1681 void CSCSegAlgoST::ChooseSegments2(int best_seg) {
1682  // std::vector <int> CommonHits(6); // nice concept :)
1683  std::vector <unsigned int> BadCandidate;
1684  int SumCommonHits =0;
1685  GoodSegments.clear();
1686  BadCandidate.clear();
1687  for(unsigned int iCand=0;iCand<Psegments.size();++iCand) {
1688  // skip here if segment was marked bad
1689  for(unsigned int iiCand=iCand+1;iiCand<Psegments.size();++iiCand){
1690  // skip here too if segment was marked bad
1691  SumCommonHits =0;
1692  if( Psegments[iCand].size() != Psegments[iiCand].size() ) {
1693  LogTrace("CSCSegment|CSC") << "[CSCSegAlgoST::ChooseSegments2] ALARM!! Should not happen - please inform CSC DPG";
1694  }
1695  else {
1696  for( int ihits = 0; ihits < int(Psegments[iCand].size()); ++ihits ) { // iCand and iiCand NEED to have same nr of hits! (alsways have by construction)
1697  if( Psegments[iCand][ihits] == Psegments[iiCand][ihits]) {
1698  ++SumCommonHits;
1699  }
1700  }
1701  }
1702  if(SumCommonHits>1) {
1703  if( weight_A[iCand]>weight_A[iiCand] ) { // use weight_A here
1704  BadCandidate.push_back(iCand);
1705  // rather mark segment bad by an array which is in sync with protosegments!! e.g. set weight = weight*1000 or have an addidional array or set it to weight *= -1
1706  }
1707  else{
1708  BadCandidate.push_back(iiCand);
1709  // rather mark segment bad by an array which is in sync with protosegments!! e.g. set weight = weight*1000 or have an addidional array or set it to weight *= -1
1710  }
1711  }
1712  }
1713  }
1714  bool discard;
1715  for(unsigned int isegm=0;isegm<Psegments.size();++isegm) {
1716  // For best results another iteration/comparison over Psegments
1717  //should be applied here... It would make the program much slower.
1718  discard = false;
1719  for(unsigned int ibad=0;ibad<BadCandidate.size();++ibad) {
1720  // can save this loop if we used an array in sync with Psegments!!!!
1721  if(isegm == BadCandidate[ibad]) {
1722  discard = true;
1723  }
1724  }
1725  if(!discard) {
1726  GoodSegments.push_back( Psegments[isegm] );
1727  }
1728  }
1729 }
1730 
1731 
1732 void CSCSegAlgoST::findDuplicates(std::vector<CSCSegment> & segments ){
1733  // this is intended for ME1/1a only - we have ghost segments because of the strips ganging
1734  // this function finds them (first the rechits by sharesInput() )
1735  // if a segment shares all the rechits with another segment it is a duplicate (even if
1736  // it has less rechits)
1737 
1738  for(std::vector<CSCSegment>::iterator it=segments.begin(); it != segments.end(); ++it) {
1739  std::vector<CSCSegment*> duplicateSegments;
1740  for(std::vector<CSCSegment>::iterator it2=segments.begin(); it2 != segments.end(); ++it2) {
1741  //
1742  bool allShared = true;
1743  if(it!=it2){
1744  allShared = it->sharesRecHits(*it2);
1745  }
1746  else{
1747  allShared = false;
1748  }
1749  //
1750  if(allShared){
1751  duplicateSegments.push_back(&(*it2));
1752  }
1753  }
1754  it->setDuplicateSegments(duplicateSegments);
1755  }
1756 
1757 }
1758 
1759 void CSCSegAlgoST::dumpSegment( const CSCSegment& seg ) const {
1760 
1761  // Only called if pset value 'CSCDebug' is set in config
1762 
1763  edm::LogVerbatim("CSCSegment") << "CSCSegment in " << chamber()->id()
1764  << "\nlocal position = " << seg.localPosition()
1765  << "\nerror = " << seg.localPositionError()
1766  << "\nlocal direction = " << seg.localDirection()
1767  << "\nerror =" << seg.localDirectionError()
1768  << "\ncovariance matrix"
1769  << seg.parametersError()
1770  << "chi2/ndf = " << seg.chi2() << "/" << seg.degreesOfFreedom()
1771  << "\n#rechits = " << seg.specificRecHits().size()
1772  << "\ntime = " << seg.time();
1773 }
1774 
size
Write out results.
double yweightPenaltyThreshold
Definition: CSCSegAlgoST.h:184
T getParameter(std::string const &) const
T getUntrackedParameter(std::string const &, T const &) const
float xx() const
Definition: LocalError.h:24
std::vector< CSCSegment > run(const CSCChamber *aChamber, const ChamberHitContainer &rechits) override
Definition: CSCSegAlgoST.cc:87
bool preClustering_useChaining
Definition: CSCSegAlgoST.h:171
LocalVector localdir() const
Definition: CSCSegFit.h:88
std::vector< ChamberHitContainer > Psegments_noL5
Definition: CSCSegAlgoST.h:131
LocalVector localDirection() const override
Local direction.
Definition: CSCSegment.h:41
std::vector< float > curv_noL2_A
Definition: CSCSegAlgoST.h:145
bool condpass1
Flag whether to &#39;improve&#39; covariance matrix.
Definition: CSCSegAlgoST.h:193
double dXclusBoxMax
Definition: CSCSegAlgoST.h:167
const CSCChamber * chamber() const
Definition: CSCSegAlgoST.h:112
CSCDetId id() const
Get the (concrete) DetId.
Definition: CSCChamber.h:37
void dumpSegment(const CSCSegment &seg) const
CSCSegment showerSeg(const CSCChamber *aChamber, const ChamberHitContainer &rechits)
const CSCChamber * theChamber
Definition: CSCSegAlgoST.h:119
std::vector< float > weight_noL4_A
Definition: CSCSegAlgoST.h:139
std::vector< float > curv_noL6_A
Definition: CSCSegAlgoST.h:149
GlobalPoint toGlobal(const Local2DPoint &lp) const
Conversion to the global R.F. from the R.F. of the GeomDet.
Definition: GeomDet.h:54
void ChooseSegments2(int best_seg)
#define nullptr
void ChooseSegments3(int best_seg)
LocalError localDirectionError() const override
Error on the local direction.
Definition: CSCSegment.cc:51
int worstHit(void)
Definition: CSCCondSegFit.h:38
std::vector< float > weight_noL3_B
Definition: CSCSegAlgoST.h:153
const edm::ParameterSet & pset(void) const
Definition: CSCSegAlgoST.h:86
std::vector< ChamberHitContainer > Psegments_noL1
Definition: CSCSegAlgoST.h:127
CSCSegAlgoST(const edm::ParameterSet &ps)
Constructor.
Definition: CSCSegAlgoST.cc:33
bool isGoodToMerge(bool isME11a, ChamberHitContainer &newChain, ChamberHitContainer &oldChain)
std::vector< ChamberHitContainer > chosen_Psegments
Definition: CSCSegAlgoST.h:133
std::vector< ChamberHitContainer > Psegments
Definition: CSCSegAlgoST.h:125
ChamberHitContainer protoSegment
Definition: CSCSegAlgoST.h:158
std::vector< std::vector< const CSCRecHit2D * > > clusterHits(const CSCChamber *aChamber, const ChamberHitContainer &rechits)
std::vector< float > weight_noL3_A
Definition: CSCSegAlgoST.h:138
double hitDropLimit4Hits
Definition: CSCSegAlgoST.h:178
std::string chamberTypeName() const
size_t nhits(void) const
Definition: CSCSegFit.h:84
bool adjustCovariance_
Definition: CSCSegAlgoST.h:191
CSCSegAlgoShowering * showering_
Definition: CSCSegAlgoST.h:117
int nRecHits() const
Definition: CSCSegment.h:67
const CSCChamberSpecs * specs() const
Definition: CSCChamber.h:42
double BPMinImprovement
Definition: CSCSegAlgoST.h:174
T sqrt(T t)
Definition: SSEVec.h:18
std::vector< ChamberHitContainer > Psegments_noLx
Definition: CSCSegAlgoST.h:126
double curvePenaltyThreshold
Definition: CSCSegAlgoST.h:187
std::vector< std::vector< const CSCRecHit2D * > > chainHits(const CSCChamber *aChamber, const ChamberHitContainer &rechits)
double chi2(void) const
Definition: CSCSegFit.h:85
T z() const
Definition: PV3DBase.h:64
std::vector< ChamberHitContainer > Psegments_noL4
Definition: CSCSegAlgoST.h:130
double hitDropLimit6Hits
Definition: CSCSegAlgoST.h:180
std::vector< float > weight_A
Definition: CSCSegAlgoST.h:134
std::vector< float > weight_noL1_B
Definition: CSCSegAlgoST.h:151
LocalPoint localPosition() const override
Definition: CSCSegment.h:38
const CSCLayer * layer(CSCDetId id) const
Return the layer corresponding to the given id.
Definition: CSCChamber.cc:39
int ndof(void) const
Definition: CSCSegFit.h:86
std::vector< CSCSegment > buildSegments(const ChamberHitContainer &rechits)
float ChiSquaredProbability(double chiSquared, double nrDOF)
~CSCSegAlgoST() override
Destructor.
Definition: CSCSegAlgoST.cc:82
std::vector< CSCSegment > prune_bad_hits(const CSCChamber *aChamber, std::vector< CSCSegment > &segments)
std::vector< float > curv_noL5_A
Definition: CSCSegAlgoST.h:148
std::vector< ChamberHitContainer > Psegments_noL6
Definition: CSCSegAlgoST.h:132
#define end
Definition: vmac.h:39
bool gangedStrips() const
std::vector< const CSCRecHit2D * > ChamberHitContainer
Typedefs.
Definition: CSCSegAlgoST.h:39
bool onlyBestSegment
Definition: CSCSegAlgoST.h:175
const std::vector< CSCRecHit2D > & specificRecHits() const
Definition: CSCSegment.h:65
#define LogTrace(id)
std::vector< ChamberHitContainer > Psegments_noL2
Definition: CSCSegAlgoST.h:128
std::vector< float > weight_noL5_A
Definition: CSCSegAlgoST.h:140
std::vector< float > weight_noL1_A
Definition: CSCSegAlgoST.h:136
std::vector< float > curv_noL4_A
Definition: CSCSegAlgoST.h:147
std::vector< float > weight_noL2_A
Definition: CSCSegAlgoST.h:137
std::vector< float > chosen_weight_A
Definition: CSCSegAlgoST.h:142
Segments GoodSegments
Definition: CSCSegAlgoST.h:120
bool adjustCovariance(void)
Definition: CSCSegAlgoST.h:89
int minHitsPerSegment
Definition: CSCSegAlgoST.h:164
ChamberHitContainer Psegments_hits
Definition: CSCSegAlgoST.h:123
double dYclusBoxMax
Definition: CSCSegAlgoST.h:168
AlgebraicSymMatrix covarianceMatrix(void)
Definition: CSCSegFit.cc:378
void findDuplicates(std::vector< CSCSegment > &segments)
std::vector< float > curv_noL1_A
Definition: CSCSegAlgoST.h:144
void setScaleXError(double factor)
Definition: CSCSegFit.h:70
double b
Definition: hdecay.h:120
double theWeight(double coordinate_1, double coordinate_2, double coordinate_3, float layer_1, float layer_2, float layer_3)
Utility functions.
LocalPoint intercept() const
Definition: CSCSegFit.h:87
double curvePenalty
Definition: CSCSegAlgoST.h:188
std::vector< float > curv_A
Definition: CSCSegAlgoST.h:143
#define begin
Definition: vmac.h:32
bool prePrun_
Chi^2 normalization for the initial fit.
Definition: CSCSegAlgoST.h:197
double a
Definition: hdecay.h:121
bool preClustering
Definition: CSCSegAlgoST.h:170
double yweightPenalty
Definition: CSCSegAlgoST.h:185
std::vector< float > weight_noL5_B
Definition: CSCSegAlgoST.h:155
std::vector< float > weight_B
Definition: CSCSegAlgoST.h:150
float a_yweightPenaltyThreshold[5][5]
Definition: CSCSegAlgoST.h:182
void ChooseSegments2a(std::vector< ChamberHitContainer > &best_segments, int best_seg)
double chi2() const override
Chi2 of the segment fit.
Definition: CSCSegment.h:57
int maxRecHitsInCluster
Definition: CSCSegAlgoST.h:169
int degreesOfFreedom() const override
Degrees of freedom of the segment fit.
Definition: CSCSegment.h:61
std::vector< float > weight_noL6_A
Definition: CSCSegAlgoST.h:141
LocalError localPositionError() const override
Definition: CSCSegment.cc:47
std::vector< float > weight_noL2_B
Definition: CSCSegAlgoST.h:152
double hitDropLimit5Hits
Definition: CSCSegAlgoST.h:179
std::vector< float > weight_noLx_A
Definition: CSCSegAlgoST.h:135
std::vector< float > curv_noL3_A
Definition: CSCSegAlgoST.h:146
std::vector< float > weight_noL6_B
Definition: CSCSegAlgoST.h:156
T x() const
Definition: PV3DBase.h:62
void fit(bool condpass1=false, bool condpass2=false)
ChamberHitContainer PAhits_onLayer[6]
Definition: CSCSegAlgoST.h:122
AlgebraicSymMatrix parametersError() const override
Covariance matrix of parameters()
Definition: CSCSegment.h:48
float time() const
Definition: CSCSegment.cc:149
double scaleXError(void) const
Definition: CSCSegFit.h:83
std::vector< float > weight_noL4_B
Definition: CSCSegAlgoST.h:154
std::vector< ChamberHitContainer > Psegments_noL3
Definition: CSCSegAlgoST.h:129
double chi2Norm_3D_
Definition: CSCSegAlgoST.h:195
double prePrunLimit_
Definition: CSCSegAlgoST.h:199