CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
CSCSegAlgoST.cc
Go to the documentation of this file.
1 
10 #include "CSCSegAlgoST.h"
11 #include "CSCSegAlgoShowering.h"
12 
14 
15 // // For clhep Matrix::solve
16 // #include "DataFormats/CLHEP/interface/AlgebraicObjects.h"
18 
21 
23 
24 #include <algorithm>
25 #include <cmath>
26 #include <iostream>
27 #include <string>
28 
29 
30 /* Constructor
31  *
32  */
33 CSCSegAlgoST::CSCSegAlgoST(const edm::ParameterSet& ps) : CSCSegmentAlgorithm(ps), myName("CSCSegAlgoST") {
34 
35  debug = ps.getUntrackedParameter<bool>("CSCDebug");
36  // minLayersApart = ps.getParameter<int>("minLayersApart");
37  // nSigmaFromSegment = ps.getParameter<double>("nSigmaFromSegment");
38  minHitsPerSegment = ps.getParameter<int>("minHitsPerSegment");
39  // muonsPerChamberMax = ps.getParameter<int>("CSCSegmentPerChamberMax");
40  // chi2Max = ps.getParameter<double>("chi2Max");
41  dXclusBoxMax = ps.getParameter<double>("dXclusBoxMax");
42  dYclusBoxMax = ps.getParameter<double>("dYclusBoxMax");
43  preClustering = ps.getParameter<bool>("preClustering");
44  preClustering_useChaining = ps.getParameter<bool>("preClusteringUseChaining");
45  Pruning = ps.getParameter<bool>("Pruning");
46  BrutePruning = ps.getParameter<bool>("BrutePruning");
47  BPMinImprovement = ps.getParameter<double>("BPMinImprovement");
48  // maxRecHitsInCluster is the maximal number of hits in a precluster that is being processed
49  // This cut is intended to remove messy events. Currently nothing is returned if there are
50  // more that maxRecHitsInCluster hits. It could be useful to return an estimate of the
51  // cluster position, which is available.
52  maxRecHitsInCluster = ps.getParameter<int>("maxRecHitsInCluster");
53  onlyBestSegment = ps.getParameter<bool>("onlyBestSegment");
54 
55  hitDropLimit4Hits = ps.getParameter<double>("hitDropLimit4Hits");
56  hitDropLimit5Hits = ps.getParameter<double>("hitDropLimit5Hits");
57  hitDropLimit6Hits = ps.getParameter<double>("hitDropLimit6Hits");
58 
59  yweightPenaltyThreshold = ps.getParameter<double>("yweightPenaltyThreshold");
60  yweightPenalty = ps.getParameter<double>("yweightPenalty");
61 
62  curvePenaltyThreshold = ps.getParameter<double>("curvePenaltyThreshold");
63  curvePenalty = ps.getParameter<double>("curvePenalty");
64 
65  useShowering = ps.getParameter<bool>("useShowering");
66  showering_ = new CSCSegAlgoShowering( ps );
67  // std::cout<<"Constructor called..."<<std::endl;
69  correctCov_ = ps.getParameter<bool>("CorrectTheErrors");
70  chi2Norm_2D_ = ps.getParameter<double>("NormChi2Cut2D");
71  chi2Norm_3D_ = ps.getParameter<double>("NormChi2Cut3D");
72  prePrun_ = ps.getParameter<bool>("prePrun");
73  prePrunLimit_ = ps.getParameter<double>("prePrunLimit");
74  //
75  condSeed1_ = ps.getParameter<double>("SeedSmall");
76  condSeed2_ = ps.getParameter<double>("SeedBig");
77  covToAnyNumber_ = ps.getParameter<bool>("ForceCovariance");
78  covToAnyNumberAll_ = ps.getParameter<bool>("ForceCovarianceAll");
79  covAnyNumber_ = ps.getParameter<double>("Covariance");
80  passCondNumber=false;
81  passCondNumber_2=false;
83  maxContrIndex=0;
84  protoNDF = 1.;
85 
86 }
87 
88 /* Destructor
89  *
90  */
92  delete showering_;
93 }
94 
95 
96 std::vector<CSCSegment> CSCSegAlgoST::run(const CSCChamber* aChamber, ChamberHitContainer rechits) {
97 
98  // Store chamber in temp memory
99  theChamber = aChamber;
100  // pre-cluster rechits and loop over all sub clusters seperately
101  std::vector<CSCSegment> segments_temp;
102  std::vector<CSCSegment> segments;
103  std::vector<ChamberHitContainer> rechits_clusters; // this is a collection of groups of rechits
104 
105  // Define yweight penalty depending on chamber. We fixed the relative ratios, but
106  // they can be scaled by parameters:
107 
108  for(int a = 0; a<5; ++a) {
109  for(int b = 0; b<5; ++b) {
110  a_yweightPenaltyThreshold[a][b] = 0.0;
111  }
112  }
113 
123 
124  if(preClustering) {
125  // run a pre-clusterer on the given rechits to split obviously separated segment seeds:
127  // it uses X,Y,Z information; there are no configurable parameters used;
128  // the X, Y, Z "cuts" are just (much) wider than the LCT readout ones
129  // (which are actually not step functions); this new code could accomodate
130  // the clusterHits one below but we leave it for security and backward
131  // comparison reasons
132  rechits_clusters = chainHits( theChamber, rechits );
133  }
134  else{
135  // it uses X,Y information + configurable parameters
136  rechits_clusters = clusterHits( theChamber, rechits );
137  }
138  // loop over the found clusters:
139  for(std::vector<ChamberHitContainer>::iterator sub_rechits = rechits_clusters.begin(); sub_rechits != rechits_clusters.end(); ++sub_rechits ) {
140  // clear the buffer for the subset of segments:
141  segments_temp.clear();
142  // build the subset of segments:
143  segments_temp = buildSegments( (*sub_rechits) );
144  // add the found subset of segments to the collection of all segments in this chamber:
145  segments.insert( segments.end(), segments_temp.begin(), segments_temp.end() );
146  }
147  // this is the place to prune:
148  if( Pruning ) {
149  segments_temp.clear(); // segments_temp needed?!?!
150  segments_temp = prune_bad_hits( theChamber, segments );
151  segments.clear(); // segments_temp needed?!?!
152  segments.swap(segments_temp); // segments_temp needed?!?!
153  }
154  if ("ME1/a" == aChamber->specs()->chamberTypeName()){
155  findDuplicates(segments);
156  }
157  return segments;
158  }
159  else {
160  segments = buildSegments(rechits);
161  if( Pruning ) {
162  segments_temp.clear(); // segments_temp needed?!?!
163  segments_temp = prune_bad_hits( theChamber, segments );
164  segments.clear(); // segments_temp needed?!?!
165  segments.swap(segments_temp); // segments_temp needed?!?!
166  }
167  if ("ME1/a" == aChamber->specs()->chamberTypeName()){
168  findDuplicates(segments);
169  }
170  return segments;
171  //return buildSegments(rechits);
172  }
173 }
174 
175 // ********************************************************************;
176 // *** This method is meant to remove clear bad hits, using as ***;
177 // *** much information from the chamber as possible (e.g. charge, ***;
178 // *** hit position, timing, etc.) ***;
179 // ********************************************************************;
180 std::vector<CSCSegment> CSCSegAlgoST::prune_bad_hits(const CSCChamber* aChamber, std::vector<CSCSegment> & segments) {
181 
182  // std::cout<<"*************************************************************"<<std::endl;
183  // std::cout<<"Called prune_bad_hits in Chamber "<< theChamber->specs()->chamberTypeName()<<std::endl;
184  // std::cout<<"*************************************************************"<<std::endl;
185 
186  std::vector<CSCSegment> segments_temp;
187  std::vector<ChamberHitContainer> rechits_clusters; // this is a collection of groups of rechits
188 
189  const float chi2ndfProbMin = 1.0e-4;
190  bool use_brute_force = BrutePruning;
191 
192  int hit_nr = 0;
193  int hit_nr_worst = -1;
194  int hit_nr_2ndworst = -1;
195 
196  for(std::vector<CSCSegment>::iterator it=segments.begin(); it != segments.end(); ++it) {
197 
198  // do nothing for nhit <= minHitPerSegment
199  if( (*it).nRecHits() <= minHitsPerSegment ) continue;
200 
201  if( !use_brute_force ) {// find worst hit
202 
203  float chisq = (*it).chi2();
204  int nhits = (*it).nRecHits();
205  LocalPoint localPos = (*it).localPosition();
206  LocalVector segDir = (*it).localDirection();
207  const CSCChamber* cscchamber = theChamber;
208  float globZ ;
209 
210  GlobalPoint globalPosition = cscchamber->toGlobal(localPos);
211  globZ = globalPosition.z();
212 
213 
214  if( ChiSquaredProbability((double)chisq,(double)(2*nhits-4)) < chi2ndfProbMin ) {
215 
216  // find (rough) "residuals" (NOT excluding the hit from the fit - speed!) of hits on segment
217  std::vector<CSCRecHit2D> theseRecHits = (*it).specificRecHits();
218  std::vector<CSCRecHit2D>::const_iterator iRH_worst;
219  float xdist_local = -99999.;
220 
221  float xdist_local_worst_sig = -99999.;
222  float xdist_local_2ndworst_sig = -99999.;
223  float xdist_local_sig = -99999.;
224 
225  hit_nr = 0;
226  hit_nr_worst = -1;
227  hit_nr_2ndworst = -1;
228 
229  for ( std::vector<CSCRecHit2D>::const_iterator iRH = theseRecHits.begin(); iRH != theseRecHits.end(); ++iRH) {
230  //mark "worst" hit:
231 
232  float z_at_target ;
233  float radius ;
234  float loc_x_at_target ;
235  float loc_y_at_target ;
236  float loc_z_at_target ;
237 
238  z_at_target = 0.;
239  loc_x_at_target = 0.;
240  loc_y_at_target = 0.;
241  loc_z_at_target = 0.;
242  radius = 0.;
243 
244  // set the z target in CMS global coordinates:
245  const CSCLayer* csclayerRH = theChamber->layer((*iRH).cscDetId().layer());
246  LocalPoint localPositionRH = (*iRH).localPosition();
247  GlobalPoint globalPositionRH = csclayerRH->toGlobal(localPositionRH);
248 
249  LocalError rerrlocal = (*iRH).localPositionError();
250  float xxerr = rerrlocal.xx();
251 
252  float target_z = globalPositionRH.z(); // target z position in cm (z pos of the hit)
253 
254  if(target_z > 0.) {
255  loc_x_at_target = localPos.x() + (segDir.x()/fabs(segDir.z())*( target_z - globZ ));
256  loc_y_at_target = localPos.y() + (segDir.y()/fabs(segDir.z())*( target_z - globZ ));
257  loc_z_at_target = target_z;
258  }
259  else {
260  loc_x_at_target = localPos.x() + ((-1)*segDir.x()/fabs(segDir.z())*( target_z - globZ ));
261  loc_y_at_target = localPos.y() + ((-1)*segDir.y()/fabs(segDir.z())*( target_z - globZ ));
262  loc_z_at_target = target_z;
263  }
264  // have to transform the segments coordinates back to the local frame... how?!!!!!!!!!!!!
265 
266  xdist_local = fabs(localPositionRH.x() - loc_x_at_target);
267  xdist_local_sig = fabs((localPositionRH.x() -loc_x_at_target)/(xxerr));
268 
269  if( xdist_local_sig > xdist_local_worst_sig ) {
270  xdist_local_2ndworst_sig = xdist_local_worst_sig;
271  xdist_local_worst_sig = xdist_local_sig;
272  iRH_worst = iRH;
273  hit_nr_2ndworst = hit_nr_worst;
274  hit_nr_worst = hit_nr;
275  }
276  else if(xdist_local_sig > xdist_local_2ndworst_sig) {
277  xdist_local_2ndworst_sig = xdist_local_sig;
278  hit_nr_2ndworst = hit_nr;
279  }
280  ++hit_nr;
281  }
282 
283  // reset worst hit number if certain criteria apply.
284  // Criteria: 2nd worst hit must be at least a factor of
285  // 1.5 better than the worst in terms of sigma:
286  if ( xdist_local_worst_sig / xdist_local_2ndworst_sig < 1.5 ) {
287  hit_nr_worst = -1;
288  hit_nr_2ndworst = -1;
289  }
290  }
291  }
292 
293  // if worst hit was found, refit without worst hit and select if considerably better than original fit.
294  // Can also use brute force: refit all n-1 hit segments and choose one over the n hit if considerably "better"
295 
296  std::vector< CSCRecHit2D > buffer;
297  std::vector< std::vector< CSCRecHit2D > > reduced_segments;
298  std::vector< CSCRecHit2D > theseRecHits = (*it).specificRecHits();
299  float best_red_seg_prob = 0.0;
300  // usefor chi2 1 diff float best_red_seg_prob = 99999.;
301  buffer.clear();
302 
303  if( ChiSquaredProbability((double)(*it).chi2(),(double)((2*(*it).nRecHits())-4)) < chi2ndfProbMin ) {
304 
305  buffer = theseRecHits;
306 
307  // Dirty switch: here one can select to refit all possible subsets or just the one without the
308  // tagged worst hit:
309  if( use_brute_force ) { // Brute force method: loop over all possible segments:
310  for(size_t bi = 0; bi < buffer.size(); ++bi) {
311  reduced_segments.push_back(buffer);
312  reduced_segments[bi].erase(reduced_segments[bi].begin()+(bi),reduced_segments[bi].begin()+(bi+1));
313  }
314  }
315  else { // More elegant but still biased: erase only worst hit
316  // Comment: There is not a very strong correlation of the worst hit with the one that one should remove...
317  if( hit_nr_worst >= 0 && hit_nr_worst <= int(buffer.size()) ) {
318  // fill segment in buffer, delete worst hit
319  buffer.erase(buffer.begin()+(hit_nr_worst),buffer.begin()+(hit_nr_worst+1));
320  reduced_segments.push_back(buffer);
321  }
322  else {
323  // only fill segment in array, do not delete anything
324  reduced_segments.push_back(buffer);
325  }
326  }
327  }
328 
329  // Loop over the subsegments and fit (only one segment if "use_brute_force" is false):
330  for(size_t iSegment=0; iSegment<reduced_segments.size(); ++iSegment) {
331  // loop over hits on given segment and push pointers to hits into protosegment
332  protoSegment.clear();
333  for(size_t m = 0; m<reduced_segments[iSegment].size(); ++m ) {
334  protoSegment.push_back(&reduced_segments[iSegment][m]);
335  }
336  passCondNumber=false;
337  passCondNumber_2 = false;
339  doSlopesAndChi2();
340  // Attempt to handle numerical instability of the fit;
341  // The same as in the build method;
342  // Preprune is not applied;
343  if(correctCov_){
345  passCondNumber = true;
346  doSlopesAndChi2();
347  }
349  passCondNumber_2=true;
350  doSlopesAndChi2();
351  }
352  }
354  // calculate error matrix
355  AlgebraicSymMatrix protoErrors = calculateError();
356  // but reorder components to match what's required by TrackingRecHit interface
357  // i.e. slopes first, then positions
358  flipErrors( protoErrors );
359  //
361 
362  // replace n hit segment with n-1 hit segment, if segment probability is BPMinImprovement better:
363  if( ( ChiSquaredProbability((double)(*it).chi2(),(double)((2*(*it).nRecHits())-4))
364  <
365  (1./BPMinImprovement)*(ChiSquaredProbability((double)temp.chi2(),(double)(2*temp.nRecHits()-4))) ) // was (1.e-3) 081202
366 
367  &&
368  ( (ChiSquaredProbability((double)temp.chi2(),(double)(2*temp.nRecHits()-4)))
369  > best_red_seg_prob
370  )
371  &&
372  ( (ChiSquaredProbability((double)temp.chi2(),(double)(2*temp.nRecHits()-4))) > 1e-10 )
373  ) {
374  best_red_seg_prob = ChiSquaredProbability((double)temp.chi2(),(double)(2*temp.nRecHits()-4));
375  // The alternative n-1 segment is much cleaner. If this segment
376  // has >= minHitsPerSegment hits exchange current n hit segment (*it)
377  // with better n-1 hit segment:
378  if( temp.nRecHits() >= minHitsPerSegment ) {
379  (*it) = temp;
380  }
381  }
382  }
383  }
384 
385  return segments;
386 
387 }
388 
389 
390 // ********************************************************************;
391 std::vector< std::vector<const CSCRecHit2D*> > CSCSegAlgoST::clusterHits(const CSCChamber* aChamber, ChamberHitContainer & rechits) {
392  theChamber = aChamber;
393 
394  std::vector<ChamberHitContainer> rechits_clusters; // this is a collection of groups of rechits
395  // const float dXclus_box_cut = 4.; // seems to work reasonably 070116
396  // const float dYclus_box_cut = 8.; // seems to work reasonably 070116
397 
398  float dXclus = 0.0;
399  float dYclus = 0.0;
400  float dXclus_box = 0.0;
401  float dYclus_box = 0.0;
402 
403  std::vector<const CSCRecHit2D*> temp;
404 
405  std::vector< ChamberHitContainer > seeds;
406 
407  std::vector<float> running_meanX;
408  std::vector<float> running_meanY;
409 
410  std::vector<float> seed_minX;
411  std::vector<float> seed_maxX;
412  std::vector<float> seed_minY;
413  std::vector<float> seed_maxY;
414 
415  //std::cout<<"*************************************************************"<<std::endl;
416  //std::cout<<"Called clusterHits in Chamber "<< theChamber->specs()->chamberTypeName()<<std::endl;
417  //std::cout<<"*************************************************************"<<std::endl;
418 
419  // split rechits into subvectors and return vector of vectors:
420  // Loop over rechits
421  // Create one seed per hit
422  for(unsigned int i = 0; i < rechits.size(); ++i) {
423 
424  temp.clear();
425 
426  temp.push_back(rechits[i]);
427 
428  seeds.push_back(temp);
429 
430  // First added hit in seed defines the mean to which the next hit is compared
431  // for this seed.
432 
433  running_meanX.push_back( rechits[i]->localPosition().x() );
434  running_meanY.push_back( rechits[i]->localPosition().y() );
435 
436  // set min/max X and Y for box containing the hits in the precluster:
437  seed_minX.push_back( rechits[i]->localPosition().x() );
438  seed_maxX.push_back( rechits[i]->localPosition().x() );
439  seed_minY.push_back( rechits[i]->localPosition().y() );
440  seed_maxY.push_back( rechits[i]->localPosition().y() );
441  }
442 
443  // merge clusters that are too close
444  // measure distance between final "running mean"
445  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
446 
447  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
448  if(running_meanX[MMM] == 999999. || running_meanX[NNN] == 999999. ) {
449  LogDebug("CSCSegment|CSC") << "CSCSegmentST::clusterHits: Warning: Skipping used seeds, this should happen - inform developers!";
450  // std::cout<<"We should never see this line now!!!"<<std::endl;
451  continue; //skip seeds that have been used
452  }
453 
454  // calculate cut criteria for simple running mean distance cut:
455  dXclus = fabs(running_meanX[NNN] - running_meanX[MMM]);
456  dYclus = fabs(running_meanY[NNN] - running_meanY[MMM]);
457 
458  // calculate minmal distance between precluster boxes containing the hits:
459  if ( running_meanX[NNN] > running_meanX[MMM] ) dXclus_box = seed_minX[NNN] - seed_maxX[MMM];
460  else dXclus_box = seed_minX[MMM] - seed_maxX[NNN];
461  if ( running_meanY[NNN] > running_meanY[MMM] ) dYclus_box = seed_minY[NNN] - seed_maxY[MMM];
462  else dYclus_box = seed_minY[MMM] - seed_maxY[NNN];
463 
464 
465  if( dXclus_box < dXclusBoxMax && dYclus_box < dYclusBoxMax ) {
466  // merge clusters!
467  // merge by adding seed NNN to seed MMM and erasing seed NNN
468 
469  // calculate running mean for the merged seed:
470  running_meanX[MMM] = (running_meanX[NNN]*seeds[NNN].size() + running_meanX[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
471  running_meanY[MMM] = (running_meanY[NNN]*seeds[NNN].size() + running_meanY[MMM]*seeds[MMM].size()) / (seeds[NNN].size()+seeds[MMM].size());
472 
473  // update min/max X and Y for box containing the hits in the merged cluster:
474  if ( seed_minX[NNN] <= seed_minX[MMM] ) seed_minX[MMM] = seed_minX[NNN];
475  if ( seed_maxX[NNN] > seed_maxX[MMM] ) seed_maxX[MMM] = seed_maxX[NNN];
476  if ( seed_minY[NNN] <= seed_minY[MMM] ) seed_minY[MMM] = seed_minY[NNN];
477  if ( seed_maxY[NNN] > seed_maxY[MMM] ) seed_maxY[MMM] = seed_maxY[NNN];
478 
479  // add seed NNN to MMM (lower to larger number)
480  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
481 
482  // mark seed NNN as used (at the moment just set running mean to 999999.)
483  running_meanX[NNN] = 999999.;
484  running_meanY[NNN] = 999999.;
485  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
486  // next seed (NNN+1)
487  break;
488  }
489 
490  }
491  }
492 
493  // hand over the final seeds to the output
494  // would be more elegant if we could do the above step with
495  // erasing the merged ones, rather than the
496  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
497  if(running_meanX[NNN] == 999999.) continue; //skip seeds that have been marked as used up in merging
498  rechits_clusters.push_back(seeds[NNN]);
499  }
500 
501  //***************************************************************
502 
503  return rechits_clusters;
504 }
505 
506 
507 std::vector< std::vector<const CSCRecHit2D*> > CSCSegAlgoST::chainHits(const CSCChamber* aChamber, ChamberHitContainer & rechits) {
508 
509  std::vector<ChamberHitContainer> rechits_chains; // this is a collection of groups of rechits
510 
511 
512  std::vector<const CSCRecHit2D*> temp;
513 
514  std::vector< ChamberHitContainer > seeds;
515 
516  std::vector <bool> usedCluster;
517 
518  // split rechits into subvectors and return vector of vectors:
519  // Loop over rechits
520  // Create one seed per hit
521  //std::cout<<" rechits.size() = "<<rechits.size()<<std::endl;
522  for(unsigned int i = 0; i < rechits.size(); ++i) {
523  temp.clear();
524  temp.push_back(rechits[i]);
525  seeds.push_back(temp);
526  usedCluster.push_back(false);
527  }
528  bool isME11a = false;
529  if ("ME1/a" == aChamber->specs()->chamberTypeName()){
530  isME11a = true;
531  }
532  // merge chains that are too close ("touch" each other)
533  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
534  for(size_t MMM = NNN+1; MMM < seeds.size(); ++MMM) {
535  if(usedCluster[MMM] || usedCluster[NNN]){
536  continue;
537  }
538  // all is in the way we define "good";
539  // try not to "cluster" the hits but to "chain" them;
540  // it does the clustering but also does a better job
541  // for inclined tracks (not clustering them togheter;
542  // crossed tracks would be still clustered together)
543  // 22.12.09: In fact it is not much more different
544  // than the "clustering", we just introduce another
545  // variable in the game - Z. And it make sense
546  // to re-introduce Y (or actually wire group mumber)
547  // in a similar way as for the strip number - see
548  // the code below.
549  bool goodToMerge = isGoodToMerge(isME11a, seeds[NNN], seeds[MMM]);
550  if(goodToMerge){
551  // merge chains!
552  // merge by adding seed NNN to seed MMM and erasing seed NNN
553 
554  // add seed NNN to MMM (lower to larger number)
555  seeds[MMM].insert(seeds[MMM].end(),seeds[NNN].begin(),seeds[NNN].end());
556 
557  // mark seed NNN as used
558  usedCluster[NNN] = true;
559  // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
560  // next seed (NNN+1)
561  break;
562  }
563 
564  }
565  }
566 
567  // hand over the final seeds to the output
568  // would be more elegant if we could do the above step with
569  // erasing the merged ones, rather than the
570 
571  for(size_t NNN = 0; NNN < seeds.size(); ++NNN) {
572  if(usedCluster[NNN]) continue; //skip seeds that have been marked as used up in merging
573  rechits_chains.push_back(seeds[NNN]);
574  }
575 
576  //***************************************************************
577 
578  return rechits_chains;
579 }
580 
581 bool CSCSegAlgoST::isGoodToMerge(bool isME11a, ChamberHitContainer & newChain, ChamberHitContainer & oldChain) {
582  for(size_t iRH_new = 0;iRH_new<newChain.size();++iRH_new){
583  int layer_new = newChain[iRH_new]->cscDetId().layer()-1;
584  int middleStrip_new = newChain[iRH_new]->channels().size()/2;
585  int centralStrip_new = newChain[iRH_new]->channels()[middleStrip_new];
586  int middleWire_new = newChain[iRH_new]->wgroups().size()/2;
587  int centralWire_new = newChain[iRH_new]->wgroups()[middleWire_new];
588  bool layerRequirementOK = false;
589  bool stripRequirementOK = false;
590  bool wireRequirementOK = false;
591  bool goodToMerge = false;
592  for(size_t iRH_old = 0;iRH_old<oldChain.size();++iRH_old){
593  int layer_old = oldChain[iRH_old]->cscDetId().layer()-1;
594  int middleStrip_old = oldChain[iRH_old]->channels().size()/2;
595  int centralStrip_old = oldChain[iRH_old]->channels()[middleStrip_old];
596  int middleWire_old = oldChain[iRH_old]->wgroups().size()/2;
597  int centralWire_old = oldChain[iRH_old]->wgroups()[middleWire_old];
598 
599  // to be chained, two hits need to be in neighbouring layers...
600  // or better allow few missing layers (upto 3 to avoid inefficiencies);
601  // however we'll not make an angle correction because it
602  // worsen the situation in some of the "regular" cases
603  // (not making the correction means that the conditions for
604  // forming a cluster are different if we have missing layers -
605  // this could affect events at the boundaries )
606  if(layer_new==layer_old+1 || layer_new==layer_old-1 ||
607  layer_new==layer_old+2 || layer_new==layer_old-2 ||
608  layer_new==layer_old+3 || layer_new==layer_old-3 ||
609  layer_new==layer_old+4 || layer_new==layer_old-4 ){
610  layerRequirementOK = true;
611  }
612  int allStrips = 48;
613  //to be chained, two hits need to be "close" in strip number (can do it in phi
614  // but it doesn't really matter); let "close" means upto 2 strips (3?) -
615  // this is more compared to what CLCT readout patterns allow
616  if(centralStrip_new==centralStrip_old ||
617  centralStrip_new==centralStrip_old+1 || centralStrip_new==centralStrip_old-1 ||
618  centralStrip_new==centralStrip_old+2 || centralStrip_new==centralStrip_old-2){
619  stripRequirementOK = true;
620  }
621  // same for wires (and ALCT patterns)
622  if(centralWire_new==centralWire_old ||
623  centralWire_new==centralWire_old+1 || centralWire_new==centralWire_old-1 ||
624  centralWire_new==centralWire_old+2 || centralWire_new==centralWire_old-2){
625  wireRequirementOK = true;
626  }
627 
628  if(isME11a){
629  if(centralStrip_new==centralStrip_old+1-allStrips || centralStrip_new==centralStrip_old-1-allStrips ||
630  centralStrip_new==centralStrip_old+2-allStrips || centralStrip_new==centralStrip_old-2-allStrips ||
631  centralStrip_new==centralStrip_old+1+allStrips || centralStrip_new==centralStrip_old-1+allStrips ||
632  centralStrip_new==centralStrip_old+2+allStrips || centralStrip_new==centralStrip_old-2+allStrips){
633  stripRequirementOK = true;
634  }
635  }
636  if(layerRequirementOK && stripRequirementOK && wireRequirementOK){
637  goodToMerge = true;
638  return goodToMerge;
639  }
640  }
641  }
642  return false;
643 }
644 
645 
646 
647 
648 double CSCSegAlgoST::theWeight(double coordinate_1, double coordinate_2, double coordinate_3, float layer_1, float layer_2, float layer_3) {
649  double sub_weight = 0;
650  sub_weight = fabs(
651  ( (coordinate_2 - coordinate_3) / (layer_2 - layer_3) ) -
652  ( (coordinate_1 - coordinate_2) / (layer_1 - layer_2) )
653  );
654  return sub_weight;
655 }
656 
657 /*
658  * This algorithm is based on the Minimum Spanning Tree (ST) approach
659  * for building endcap muon track segments out of the rechit's in a CSCChamber.
660  */
661 std::vector<CSCSegment> CSCSegAlgoST::buildSegments(ChamberHitContainer rechits) {
662 
663  // Clear buffer for segment vector
664  std::vector<CSCSegment> segmentInChamber;
665  segmentInChamber.clear(); // list of final segments
666 
667  // CSC Ring;
668  unsigned int thering = 999;
669  unsigned int thestation = 999;
670  unsigned int thecham = 999;
671 
672  std::vector<int> hits_onLayerNumber(6);
673 
674  unsigned int UpperLimit = maxRecHitsInCluster;
675  if (int(rechits.size()) < minHitsPerSegment) return segmentInChamber;
676 
677  for(int iarray = 0; iarray <6; ++iarray) { // magic number 6: number of layers in CSC chamber - not gonna change :)
678  PAhits_onLayer[iarray].clear();
679  hits_onLayerNumber[iarray] = 0;
680  }
681 
682  chosen_Psegments.clear();
683  chosen_weight_A.clear();
684 
685  Psegments.clear();
686  Psegments_noLx.clear();
687  Psegments_noL1.clear();
688  Psegments_noL2.clear();
689  Psegments_noL3.clear();
690  Psegments_noL4.clear();
691  Psegments_noL5.clear();
692  Psegments_noL6.clear();
693 
694  Psegments_hits.clear();
695 
696  weight_A.clear();
697  weight_noLx_A.clear();
698  weight_noL1_A.clear();
699  weight_noL2_A.clear();
700  weight_noL3_A.clear();
701  weight_noL4_A.clear();
702  weight_noL5_A.clear();
703  weight_noL6_A.clear();
704 
705  weight_B.clear();
706  weight_noL1_B.clear();
707  weight_noL2_B.clear();
708  weight_noL3_B.clear();
709  weight_noL4_B.clear();
710  weight_noL5_B.clear();
711  weight_noL6_B.clear();
712 
713  curv_A.clear();
714  curv_noL1_A.clear();
715  curv_noL2_A.clear();
716  curv_noL3_A.clear();
717  curv_noL4_A.clear();
718  curv_noL5_A.clear();
719  curv_noL6_A.clear();
720 
721  // definition of middle layer for n-hit segment
722  int midlayer_pointer[6] = {0,0,2,3,3,4};
723 
724  // int n_layers_missed_tot = 0;
725  int n_layers_occupied_tot = 0;
726  int n_layers_processed = 0;
727 
728  float min_weight_A = 99999.9;
729  float min_weight_noLx_A = 99999.9;
730 
731  float best_weight_B = -1.;
732  float best_weight_noLx_B = -1.;
733 
734  float best_curv_A = -1.;
735  float best_curv_noLx_A = -1.;
736 
737  int best_pseg = -1;
738  int best_noLx_pseg = -1;
739  int best_Layer_noLx = -1;
740 
741  //************************************************************************;
742  //*** Start segment building *****************************************;
743  //************************************************************************;
744 
745  // Determine how many layers with hits we have
746  // Fill all hits into the layer hit container:
747 
748  // Have 2 standard arrays: one giving the number of hits per layer.
749  // The other the corresponding hits.
750 
751  // Loop all available hits, count hits per layer and fill the hits into array by layer
752  for(size_t M = 0; M < rechits.size(); ++M) {
753  // add hits to array per layer and count hits per layer:
754  hits_onLayerNumber[ rechits[M]->cscDetId().layer()-1 ] += 1;
755  if(hits_onLayerNumber[ rechits[M]->cscDetId().layer()-1 ] == 1 ) n_layers_occupied_tot += 1;
756  // add hits to vector in array
757  PAhits_onLayer[rechits[M]->cscDetId().layer()-1] .push_back(rechits[M]);
758  }
759 
760  // We have now counted the hits per layer and filled pointers to the hits into an array
761 
762  int tothits = 0;
763  int maxhits = 0;
764  int nexthits = 0;
765  int maxlayer = -1;
766  int nextlayer = -1;
767 
768  for(size_t i = 0; i< hits_onLayerNumber.size(); ++i){
769  //std::cout<<"We have "<<hits_onLayerNumber[i]<<" hits on layer "<<i+1<<std::endl;
770  tothits += hits_onLayerNumber[i];
771  if (hits_onLayerNumber[i] > maxhits) {
772  nextlayer = maxlayer;
773  nexthits = maxhits;
774  maxlayer = i;
775  maxhits = hits_onLayerNumber[i];
776  }
777  else if (hits_onLayerNumber[i] > nexthits) {
778  nextlayer = i;
779  nexthits = hits_onLayerNumber[i];
780  }
781  }
782 
783 
784  if (tothits > (int)UpperLimit) {
785  if (n_layers_occupied_tot > 4) {
786  tothits = tothits - hits_onLayerNumber[maxlayer];
787  n_layers_occupied_tot = n_layers_occupied_tot - 1;
788  PAhits_onLayer[maxlayer].clear();
789  hits_onLayerNumber[maxlayer] = 0;
790  }
791  }
792 
793  if (tothits > (int)UpperLimit) {
794  if (n_layers_occupied_tot > 4) {
795  tothits = tothits - hits_onLayerNumber[nextlayer];
796  n_layers_occupied_tot = n_layers_occupied_tot - 1;
797  PAhits_onLayer[nextlayer].clear();
798  hits_onLayerNumber[nextlayer] = 0;
799  }
800  }
801 
802  if (tothits > (int)UpperLimit){
803 
804  //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
805  // Showering muon - returns nothing if chi2 == -1 (see comment in SegAlgoShowering)
806  //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
807  if (useShowering) {
808  CSCSegment segShower = showering_->showerSeg(theChamber, rechits);
809 
810  // Make sure have at least 3 hits...
811  if ( segShower.nRecHits() < 3 ) return segmentInChamber;
812  if ( segShower.chi2() == -1 ) return segmentInChamber;
813 
814  segmentInChamber.push_back(segShower);
815  return segmentInChamber;
816 
817  } else{
818  LogDebug("CSCSegment|CSC") <<"Number of rechits in the cluster/chamber > "<< UpperLimit<<
819  " ... Segment finding in the cluster/chamber canceled!";
820  // std::cout<<"Number of rechits in the cluster/chamber > "<< UpperLimit<<
821  // " ... Segment finding in the cluster/chamber canceled! "<<std::endl;
822  return segmentInChamber;
823  }
824  }
825 
826  // Find out which station, ring and chamber we are in
827  // Used to choose station/ring dependant y-weight cuts
828 
829  if( rechits.size() > 0 ) {
830  thering = rechits[0]->cscDetId().ring();
831  thestation = rechits[0]->cscDetId().station();
832  thecham = rechits[0]->cscDetId().chamber();
833  }
834 
835  // std::cout<<"We are in Station/ring/chamber: "<<thestation <<" "<< thering<<" "<< thecham<<std::endl;
836 
837  // Cut-off parameter - don't reconstruct segments with less than X hits
838  if( n_layers_occupied_tot < minHitsPerSegment ) {
839  return segmentInChamber;
840  }
841 
842  // Start building all possible hit combinations:
843 
844  // loop over the six chamber layers and form segment candidates from the available hits:
845 
846  for(int layer = 0; layer < 6; ++layer) {
847 
848  // *****************************************************************
849  // *** Set missed layer counter here (not currently implemented) ***
850  // *****************************************************************
851  // if( PAhits_onLayer[layer].size() == 0 ) {
852  // n_layers_missed_tot += 1;
853  // }
854 
855  if( PAhits_onLayer[layer].size() > 0 ) {
856  n_layers_processed += 1;
857  }
858 
859  // Save the size of the protosegment before hits were added on the current layer
860  int orig_number_of_psegs = Psegments.size();
861  int orig_number_of_noL1_psegs = Psegments_noL1.size();
862  int orig_number_of_noL2_psegs = Psegments_noL2.size();
863  int orig_number_of_noL3_psegs = Psegments_noL3.size();
864  int orig_number_of_noL4_psegs = Psegments_noL4.size();
865  int orig_number_of_noL5_psegs = Psegments_noL5.size();
866  int orig_number_of_noL6_psegs = Psegments_noL6.size();
867 
868  // loop over the hits on the layer and initiate protosegments or add hits to protosegments
869  for(int hit = 0; hit < int(PAhits_onLayer[layer].size()); ++hit) { // loop all hits on the Layer number "layer"
870 
871  // create protosegments from all hits on the first layer with hits
872  if( orig_number_of_psegs == 0 ) { // would be faster to turn this around - ask for "orig_number_of_psegs != 0"
873 
874  Psegments_hits.push_back(PAhits_onLayer[layer][hit]);
875 
876  Psegments.push_back(Psegments_hits);
877  Psegments_noL6.push_back(Psegments_hits);
878  Psegments_noL5.push_back(Psegments_hits);
879  Psegments_noL4.push_back(Psegments_hits);
880  Psegments_noL3.push_back(Psegments_hits);
881  Psegments_noL2.push_back(Psegments_hits);
882 
883  // Initialize weights corresponding to this segment for first hit (with 0)
884 
885  curv_A.push_back(0.0);
886  curv_noL6_A.push_back(0.0);
887  curv_noL5_A.push_back(0.0);
888  curv_noL4_A.push_back(0.0);
889  curv_noL3_A.push_back(0.0);
890  curv_noL2_A.push_back(0.0);
891 
892  weight_A.push_back(0.0);
893  weight_noL6_A.push_back(0.0);
894  weight_noL5_A.push_back(0.0);
895  weight_noL4_A.push_back(0.0);
896  weight_noL3_A.push_back(0.0);
897  weight_noL2_A.push_back(0.0);
898 
899  weight_B.push_back(0.0);
900  weight_noL6_B.push_back(0.0);
901  weight_noL5_B.push_back(0.0);
902  weight_noL4_B.push_back(0.0);
903  weight_noL3_B.push_back(0.0);
904  weight_noL2_B.push_back(0.0);
905 
906  // reset array for next hit on next layer
907  Psegments_hits .clear();
908  }
909  else {
910  if( orig_number_of_noL1_psegs == 0 ) {
911 
912  Psegments_hits.push_back(PAhits_onLayer[layer][hit]);
913 
914  Psegments_noL1.push_back(Psegments_hits);
915 
916  // Initialize weight corresponding to this segment for first hit (with 0)
917 
918  curv_noL1_A.push_back(0.0);
919 
920  weight_noL1_A.push_back(0.0);
921 
922  weight_noL1_B.push_back(0.0);
923 
924  // reset array for next hit on next layer
925  Psegments_hits .clear();
926 
927  }
928 
929  // loop over the protosegments and create a new protosegments for each hit-1 on this layer
930 
931  for( int pseg = 0; pseg < orig_number_of_psegs; ++pseg ) {
932 
933  int pseg_pos = (pseg)+((hit)*orig_number_of_psegs);
934  int pseg_noL1_pos = (pseg)+((hit)*orig_number_of_noL1_psegs);
935  int pseg_noL2_pos = (pseg)+((hit)*orig_number_of_noL2_psegs);
936  int pseg_noL3_pos = (pseg)+((hit)*orig_number_of_noL3_psegs);
937  int pseg_noL4_pos = (pseg)+((hit)*orig_number_of_noL4_psegs);
938  int pseg_noL5_pos = (pseg)+((hit)*orig_number_of_noL5_psegs);
939  int pseg_noL6_pos = (pseg)+((hit)*orig_number_of_noL6_psegs);
940 
941  // - Loop all psegs.
942  // - If not last hit, clone existing protosegments (PAhits_onLayer[layer].size()-1) times
943  // - then add the new hits
944 
945  if( ! (hit == int(PAhits_onLayer[layer].size()-1)) ) { // not the last hit - prepare (copy) new protosegments for the following hits
946  // clone psegs (to add next hits or last hit on layer):
947 
948  Psegments.push_back(Psegments[ pseg_pos ]);
949  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) Psegments_noL1.push_back(Psegments_noL1[ pseg_noL1_pos ]);
950  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) Psegments_noL2.push_back(Psegments_noL2[ pseg_noL2_pos ]);
951  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) Psegments_noL3.push_back(Psegments_noL3[ pseg_noL3_pos ]);
952  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) Psegments_noL4.push_back(Psegments_noL4[ pseg_noL4_pos ]);
953  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) Psegments_noL5.push_back(Psegments_noL5[ pseg_noL5_pos ]);
954  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) Psegments_noL6.push_back(Psegments_noL6[ pseg_noL6_pos ]);
955  // clone weight corresponding to this segment too
956  weight_A.push_back(weight_A[ pseg_pos ]);
957  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) weight_noL1_A.push_back(weight_noL1_A[ pseg_noL1_pos ]);
958  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) weight_noL2_A.push_back(weight_noL2_A[ pseg_noL2_pos ]);
959  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) weight_noL3_A.push_back(weight_noL3_A[ pseg_noL3_pos ]);
960  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) weight_noL4_A.push_back(weight_noL4_A[ pseg_noL4_pos ]);
961  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) weight_noL5_A.push_back(weight_noL5_A[ pseg_noL5_pos ]);
962  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) weight_noL6_A.push_back(weight_noL6_A[ pseg_noL6_pos ]);
963  // clone curvature variable corresponding to this segment too
964  curv_A.push_back(curv_A[ pseg_pos ]);
965  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) curv_noL1_A.push_back(curv_noL1_A[ pseg_noL1_pos ]);
966  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) curv_noL2_A.push_back(curv_noL2_A[ pseg_noL2_pos ]);
967  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) curv_noL3_A.push_back(curv_noL3_A[ pseg_noL3_pos ]);
968  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) curv_noL4_A.push_back(curv_noL4_A[ pseg_noL4_pos ]);
969  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) curv_noL5_A.push_back(curv_noL5_A[ pseg_noL5_pos ]);
970  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) curv_noL6_A.push_back(curv_noL6_A[ pseg_noL6_pos ]);
971  // clone "y"-weight corresponding to this segment too
972  weight_B.push_back(weight_B[ pseg_pos ]);
973  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) weight_noL1_B.push_back(weight_noL1_B[ pseg_noL1_pos ]);
974  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) weight_noL2_B.push_back(weight_noL2_B[ pseg_noL2_pos ]);
975  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) weight_noL3_B.push_back(weight_noL3_B[ pseg_noL3_pos ]);
976  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) weight_noL4_B.push_back(weight_noL4_B[ pseg_noL4_pos ]);
977  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) weight_noL5_B.push_back(weight_noL5_B[ pseg_noL5_pos ]);
978  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) weight_noL6_B.push_back(weight_noL6_B[ pseg_noL6_pos ]);
979  }
980  // add hits to original pseg:
981  Psegments[ pseg_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
982  if (n_layers_processed != 2 && pseg < orig_number_of_noL1_psegs) Psegments_noL1[ pseg_noL1_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
983  if (n_layers_processed != 2 && pseg < orig_number_of_noL2_psegs) Psegments_noL2[ pseg_noL2_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
984  if (n_layers_processed != 3 && pseg < orig_number_of_noL3_psegs) Psegments_noL3[ pseg_noL3_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
985  if (n_layers_processed != 4 && pseg < orig_number_of_noL4_psegs) Psegments_noL4[ pseg_noL4_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
986  if (n_layers_processed != 5 && pseg < orig_number_of_noL5_psegs) Psegments_noL5[ pseg_noL5_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
987  if (n_layers_processed != 6 && pseg < orig_number_of_noL6_psegs) Psegments_noL6[ pseg_noL6_pos ].push_back(PAhits_onLayer[ layer ][ hit ]);
988 
989  // calculate/update the weight (only for >2 hits on psegment):
990 
991  if( Psegments[ pseg_pos ].size() > 2 ) {
992 
993  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
994  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
995 
996  weight_A[ pseg_pos ] += theWeight(
997  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().x(),
998  (*(Psegments[ pseg_pos ].end()-2 ))->localPosition().x(),
999  (*(Psegments[ pseg_pos ].end()-3 ))->localPosition().x(),
1000  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1001  float((*(Psegments[ pseg_pos ].end()-2))->cscDetId().layer()),
1002  float((*(Psegments[ pseg_pos ].end()-3))->cscDetId().layer())
1003  );
1004 
1005  weight_B[ pseg_pos ] += theWeight(
1006  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().y(),
1007  (*(Psegments[ pseg_pos ].end()-2 ))->localPosition().y(),
1008  (*(Psegments[ pseg_pos ].end()-3 ))->localPosition().y(),
1009  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1010  float((*(Psegments[ pseg_pos ].end()-2))->cscDetId().layer()),
1011  float((*(Psegments[ pseg_pos ].end()-3))->cscDetId().layer())
1012  );
1013 
1014  // if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1015 
1016  if(int(Psegments[ pseg_pos ].size()) == n_layers_occupied_tot) {
1017 
1018  curv_A[ pseg_pos ] += theWeight(
1019  (*(Psegments[ pseg_pos ].end()-1 ))->localPosition().x(),
1020  (*(Psegments[ pseg_pos ].end()-midlayer_pointer[n_layers_occupied_tot-1] ))->localPosition().x(),
1021  (*(Psegments[ pseg_pos ].end()-n_layers_occupied_tot ))->localPosition().x(),
1022  float((*(Psegments[ pseg_pos ].end()-1))->cscDetId().layer()),
1023  float((*(Psegments[ pseg_pos ].end()-midlayer_pointer[n_layers_occupied_tot-1] ))->cscDetId().layer()),
1024  float((*(Psegments[ pseg_pos ].end()-n_layers_occupied_tot ))->cscDetId().layer())
1025  );
1026 
1027  if (curv_A[ pseg_pos ] > curvePenaltyThreshold) weight_A[ pseg_pos ] = weight_A[ pseg_pos ] * curvePenalty;
1028 
1029  if (weight_B[ pseg_pos ] > a_yweightPenaltyThreshold[thestation][thering]) weight_A[ pseg_pos ] = weight_A[ pseg_pos ] * yweightPenalty;
1030 
1031  if (weight_A[ pseg_pos ] < min_weight_A ) {
1032  min_weight_A = weight_A[ pseg_pos ];
1033  best_weight_B = weight_B[ pseg_pos ];
1034  best_curv_A = curv_A[ pseg_pos ];
1035  best_pseg = pseg_pos ;
1036  }
1037 
1038  }
1039 
1040  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1041  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1042 
1043  }
1044 
1045  if ( n_layers_occupied_tot > 3 ) {
1046  if (pseg < orig_number_of_noL1_psegs && (n_layers_processed != 2)) {
1047  if(( Psegments_noL1[ pseg_noL1_pos ].size() > 2 ) ) {
1048 
1049  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1050  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1051 
1052  weight_noL1_A[ pseg_noL1_pos ] += theWeight(
1053  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().x(),
1054  (*(Psegments_noL1[ pseg_noL1_pos ].end()-2 ))->localPosition().x(),
1055  (*(Psegments_noL1[ pseg_noL1_pos ].end()-3 ))->localPosition().x(),
1056  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1))->cscDetId().layer()),
1057  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-2))->cscDetId().layer()),
1058  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-3))->cscDetId().layer())
1059  );
1060 
1061  weight_noL1_B[ pseg_noL1_pos ] += theWeight(
1062  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().y(),
1063  (*(Psegments_noL1[ pseg_noL1_pos ].end()-2 ))->localPosition().y(),
1064  (*(Psegments_noL1[ pseg_noL1_pos ].end()-3 ))->localPosition().y(),
1065  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1))->cscDetId().layer()),
1066  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-2))->cscDetId().layer()),
1067  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-3))->cscDetId().layer())
1068  );
1069 
1070  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1071 
1072  if(int(Psegments_noL1[ pseg_noL1_pos ].size()) == n_layers_occupied_tot -1 ) {
1073 
1074  curv_noL1_A[ pseg_noL1_pos ] += theWeight(
1075  (*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->localPosition().x(),
1076  (*(Psegments_noL1[ pseg_noL1_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1077  (*(Psegments_noL1[ pseg_noL1_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1078  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-1 ))->cscDetId().layer()),
1079  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1080  float((*(Psegments_noL1[ pseg_noL1_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1081  );
1082 
1083  if (curv_noL1_A[ pseg_noL1_pos ] > curvePenaltyThreshold) weight_noL1_A[ pseg_noL1_pos ] = weight_noL1_A[ pseg_noL1_pos ] * curvePenalty;
1084 
1085  if (weight_noL1_B[ pseg_noL1_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1086  weight_noL1_A[ pseg_noL1_pos ] = weight_noL1_A[ pseg_noL1_pos ] * yweightPenalty;
1087 
1088  if (weight_noL1_A[ pseg_noL1_pos ] < min_weight_noLx_A ) {
1089  min_weight_noLx_A = weight_noL1_A[ pseg_noL1_pos ];
1090  best_weight_noLx_B = weight_noL1_B[ pseg_noL1_pos ];
1091  best_curv_noLx_A = curv_noL1_A[ pseg_noL1_pos ];
1092  best_noLx_pseg = pseg_noL1_pos;
1093  best_Layer_noLx = 1;
1094  }
1095 
1096  }
1097 
1098  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1099  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1100 
1101  }
1102  }
1103  }
1104 
1105  if ( n_layers_occupied_tot > 3 ) {
1106  if (pseg < orig_number_of_noL2_psegs && ( n_layers_processed != 2 )) {
1107  if(( Psegments_noL2[ pseg_noL2_pos ].size() > 2 )) {
1108 
1109  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1110  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1111 
1112  weight_noL2_A[ pseg_noL2_pos ] += theWeight(
1113  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().x(),
1114  (*(Psegments_noL2[ pseg_noL2_pos ].end()-2 ))->localPosition().x(),
1115  (*(Psegments_noL2[ pseg_noL2_pos ].end()-3 ))->localPosition().x(),
1116  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1))->cscDetId().layer()),
1117  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-2))->cscDetId().layer()),
1118  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-3))->cscDetId().layer())
1119  );
1120 
1121  weight_noL2_B[ pseg_noL2_pos ] += theWeight(
1122  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().y(),
1123  (*(Psegments_noL2[ pseg_noL2_pos ].end()-2 ))->localPosition().y(),
1124  (*(Psegments_noL2[ pseg_noL2_pos ].end()-3 ))->localPosition().y(),
1125  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1))->cscDetId().layer()),
1126  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-2))->cscDetId().layer()),
1127  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-3))->cscDetId().layer())
1128  );
1129 
1130  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1131 
1132  if(int(Psegments_noL2[ pseg_noL2_pos ].size()) == n_layers_occupied_tot -1 ) {
1133 
1134  curv_noL2_A[ pseg_noL2_pos ] += theWeight(
1135  (*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->localPosition().x(),
1136  (*(Psegments_noL2[ pseg_noL2_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1137  (*(Psegments_noL2[ pseg_noL2_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1138  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-1 ))->cscDetId().layer()),
1139  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1140  float((*(Psegments_noL2[ pseg_noL2_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1141  );
1142 
1143  if (curv_noL2_A[ pseg_noL2_pos ] > curvePenaltyThreshold) weight_noL2_A[ pseg_noL2_pos ] = weight_noL2_A[ pseg_noL2_pos ] * curvePenalty;
1144 
1145  if (weight_noL2_B[ pseg_noL2_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1146  weight_noL2_A[ pseg_noL2_pos ] = weight_noL2_A[ pseg_noL2_pos ] * yweightPenalty;
1147 
1148  if (weight_noL2_A[ pseg_noL2_pos ] < min_weight_noLx_A ) {
1149  min_weight_noLx_A = weight_noL2_A[ pseg_noL2_pos ];
1150  best_weight_noLx_B = weight_noL2_B[ pseg_noL2_pos ];
1151  best_curv_noLx_A = curv_noL2_A[ pseg_noL2_pos ];
1152  best_noLx_pseg = pseg_noL2_pos;
1153  best_Layer_noLx = 2;
1154  }
1155 
1156  }
1157 
1158  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1159  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1160 
1161  }
1162  }
1163  }
1164 
1165  if ( n_layers_occupied_tot > 3 ) {
1166  if (pseg < orig_number_of_noL3_psegs && ( n_layers_processed != 3 )) {
1167  if(( Psegments_noL3[ pseg_noL3_pos ].size() > 2 )) {
1168 
1169  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1170  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1171 
1172  weight_noL3_A[ pseg_noL3_pos ] += theWeight(
1173  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().x(),
1174  (*(Psegments_noL3[ pseg_noL3_pos ].end()-2 ))->localPosition().x(),
1175  (*(Psegments_noL3[ pseg_noL3_pos ].end()-3 ))->localPosition().x(),
1176  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1))->cscDetId().layer()),
1177  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-2))->cscDetId().layer()),
1178  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-3))->cscDetId().layer())
1179  );
1180 
1181  weight_noL3_B[ pseg_noL3_pos ] += theWeight(
1182  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().y(),
1183  (*(Psegments_noL3[ pseg_noL3_pos ].end()-2 ))->localPosition().y(),
1184  (*(Psegments_noL3[ pseg_noL3_pos ].end()-3 ))->localPosition().y(),
1185  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1))->cscDetId().layer()),
1186  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-2))->cscDetId().layer()),
1187  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-3))->cscDetId().layer())
1188  );
1189 
1190  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1191 
1192  if(int(Psegments_noL3[ pseg_noL3_pos ].size()) == n_layers_occupied_tot -1 ) {
1193 
1194  curv_noL3_A[ pseg_noL3_pos ] += theWeight(
1195  (*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->localPosition().x(),
1196  (*(Psegments_noL3[ pseg_noL3_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1197  (*(Psegments_noL3[ pseg_noL3_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1198  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-1 ))->cscDetId().layer()),
1199  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1200  float((*(Psegments_noL3[ pseg_noL3_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1201  );
1202 
1203  if (curv_noL3_A[ pseg_noL3_pos ] > curvePenaltyThreshold) weight_noL3_A[ pseg_noL3_pos ] = weight_noL3_A[ pseg_noL3_pos ] * curvePenalty;
1204 
1205  if (weight_noL3_B[ pseg_noL3_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1206  weight_noL3_A[ pseg_noL3_pos ] = weight_noL3_A[ pseg_noL3_pos ] * yweightPenalty;
1207 
1208  if (weight_noL3_A[ pseg_noL3_pos ] < min_weight_noLx_A ) {
1209  min_weight_noLx_A = weight_noL3_A[ pseg_noL3_pos ];
1210  best_weight_noLx_B = weight_noL3_B[ pseg_noL3_pos ];
1211  best_curv_noLx_A = curv_noL3_A[ pseg_noL3_pos ];
1212  best_noLx_pseg = pseg_noL3_pos;
1213  best_Layer_noLx = 3;
1214  }
1215 
1216  }
1217 
1218  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1219  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1220 
1221  }
1222  }
1223  }
1224 
1225  if ( n_layers_occupied_tot > 3 ) {
1226  if (pseg < orig_number_of_noL4_psegs && ( n_layers_processed != 4 )) {
1227  if(( Psegments_noL4[ pseg_noL4_pos ].size() > 2 )) {
1228 
1229  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1230  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1231 
1232  weight_noL4_A[ pseg_noL4_pos ] += theWeight(
1233  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().x(),
1234  (*(Psegments_noL4[ pseg_noL4_pos ].end()-2 ))->localPosition().x(),
1235  (*(Psegments_noL4[ pseg_noL4_pos ].end()-3 ))->localPosition().x(),
1236  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1))->cscDetId().layer()),
1237  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-2))->cscDetId().layer()),
1238  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-3))->cscDetId().layer())
1239  );
1240 
1241  weight_noL4_B[ pseg_noL4_pos ] += theWeight(
1242  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().y(),
1243  (*(Psegments_noL4[ pseg_noL4_pos ].end()-2 ))->localPosition().y(),
1244  (*(Psegments_noL4[ pseg_noL4_pos ].end()-3 ))->localPosition().y(),
1245  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1))->cscDetId().layer()),
1246  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-2))->cscDetId().layer()),
1247  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-3))->cscDetId().layer())
1248  );
1249 
1250  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1251 
1252  if(int(Psegments_noL4[ pseg_noL4_pos ].size()) == n_layers_occupied_tot -1 ) {
1253 
1254  curv_noL4_A[ pseg_noL4_pos ] += theWeight(
1255  (*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->localPosition().x(),
1256  (*(Psegments_noL4[ pseg_noL4_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1257  (*(Psegments_noL4[ pseg_noL4_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1258  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-1 ))->cscDetId().layer()),
1259  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1260  float((*(Psegments_noL4[ pseg_noL4_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1261  );
1262 
1263  if (curv_noL4_A[ pseg_noL4_pos ] > curvePenaltyThreshold) weight_noL4_A[ pseg_noL4_pos ] = weight_noL4_A[ pseg_noL4_pos ] * curvePenalty;
1264 
1265  if (weight_noL4_B[ pseg_noL4_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1266  weight_noL4_A[ pseg_noL4_pos ] = weight_noL4_A[ pseg_noL4_pos ] * yweightPenalty;
1267 
1268  if (weight_noL4_A[ pseg_noL4_pos ] < min_weight_noLx_A ) {
1269  min_weight_noLx_A = weight_noL4_A[ pseg_noL4_pos ];
1270  best_weight_noLx_B = weight_noL4_B[ pseg_noL4_pos ];
1271  best_curv_noLx_A = curv_noL4_A[ pseg_noL4_pos ];
1272  best_noLx_pseg = pseg_noL4_pos;
1273  best_Layer_noLx = 4;
1274  }
1275 
1276  }
1277 
1278  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1279  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1280 
1281  }
1282  }
1283  }
1284 
1285  if ( n_layers_occupied_tot > 4 ) {
1286  if (pseg < orig_number_of_noL5_psegs && ( n_layers_processed != 5 )) {
1287  if(( Psegments_noL5[ pseg_noL5_pos ].size() > 2 )){
1288 
1289  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1290  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1291 
1292  weight_noL5_A[ pseg_noL5_pos ] += theWeight(
1293  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().x(),
1294  (*(Psegments_noL5[ pseg_noL5_pos ].end()-2 ))->localPosition().x(),
1295  (*(Psegments_noL5[ pseg_noL5_pos ].end()-3 ))->localPosition().x(),
1296  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1))->cscDetId().layer()),
1297  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-2))->cscDetId().layer()),
1298  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-3))->cscDetId().layer())
1299  );
1300 
1301  weight_noL5_B[ pseg_noL5_pos ] += theWeight(
1302  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().y(),
1303  (*(Psegments_noL5[ pseg_noL5_pos ].end()-2 ))->localPosition().y(),
1304  (*(Psegments_noL5[ pseg_noL5_pos ].end()-3 ))->localPosition().y(),
1305  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1))->cscDetId().layer()),
1306  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-2))->cscDetId().layer()),
1307  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-3))->cscDetId().layer())
1308  );
1309 
1310  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1311 
1312  if(int(Psegments_noL5[ pseg_noL5_pos ].size()) == n_layers_occupied_tot -1 ) {
1313 
1314  curv_noL5_A[ pseg_noL5_pos ] += theWeight(
1315  (*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->localPosition().x(),
1316  (*(Psegments_noL5[ pseg_noL5_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1317  (*(Psegments_noL5[ pseg_noL5_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1318  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-1 ))->cscDetId().layer()),
1319  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1320  float((*(Psegments_noL5[ pseg_noL5_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1321  );
1322 
1323  if (curv_noL5_A[ pseg_noL5_pos ] > curvePenaltyThreshold) weight_noL5_A[ pseg_noL5_pos ] = weight_noL5_A[ pseg_noL5_pos ] * curvePenalty;
1324 
1325  if (weight_noL5_B[ pseg_noL5_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1326  weight_noL5_A[ pseg_noL5_pos ] = weight_noL5_A[ pseg_noL5_pos ] * yweightPenalty;
1327 
1328  if (weight_noL5_A[ pseg_noL5_pos ] < min_weight_noLx_A ) {
1329  min_weight_noLx_A = weight_noL5_A[ pseg_noL5_pos ];
1330  best_weight_noLx_B = weight_noL5_B[ pseg_noL5_pos ];
1331  best_curv_noLx_A = curv_noL5_A[ pseg_noL5_pos ];
1332  best_noLx_pseg = pseg_noL5_pos;
1333  best_Layer_noLx = 5;
1334  }
1335 
1336  }
1337 
1338  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1339  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1340 
1341  }
1342  }
1343  }
1344 
1345  if ( n_layers_occupied_tot > 5 ) {
1346  if (pseg < orig_number_of_noL6_psegs && ( n_layers_processed != 6 )) {
1347  if(( Psegments_noL6[ pseg_noL6_pos ].size() > 2 )){
1348 
1349  // looks more exciting than it is. Here the weight is calculated. It is the difference in x of the last two and one but the last two hits,
1350  // divided by the distance of the corresponding hits. Please refer to twiki page XXXX or CMS Note YYY (and use layer_distance)
1351 
1352  weight_noL6_A[ pseg_noL6_pos ] += theWeight(
1353  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().x(),
1354  (*(Psegments_noL6[ pseg_noL6_pos ].end()-2 ))->localPosition().x(),
1355  (*(Psegments_noL6[ pseg_noL6_pos ].end()-3 ))->localPosition().x(),
1356  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1))->cscDetId().layer()),
1357  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-2))->cscDetId().layer()),
1358  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-3))->cscDetId().layer())
1359  );
1360 
1361  weight_noL6_B[ pseg_noL6_pos ] += theWeight(
1362  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().y(),
1363  (*(Psegments_noL6[ pseg_noL6_pos ].end()-2 ))->localPosition().y(),
1364  (*(Psegments_noL6[ pseg_noL6_pos ].end()-3 ))->localPosition().y(),
1365  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1))->cscDetId().layer()),
1366  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-2))->cscDetId().layer()),
1367  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-3))->cscDetId().layer())
1368  );
1369 
1370  //if we have picked up the last hit go looking for pseg with the lowest (and second lowest?) weight
1371 
1372  if(int(Psegments_noL6[ pseg_noL6_pos ].size()) == n_layers_occupied_tot -1 ) {
1373 
1374  curv_noL6_A[ pseg_noL6_pos ] += theWeight(
1375  (*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->localPosition().x(),
1376  (*(Psegments_noL6[ pseg_noL6_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->localPosition().x(),
1377  (*(Psegments_noL6[ pseg_noL6_pos ].end()-(n_layers_occupied_tot-1) ))->localPosition().x(),
1378  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-1 ))->cscDetId().layer()),
1379  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-midlayer_pointer[n_layers_occupied_tot-2] ))->cscDetId().layer()),
1380  float((*(Psegments_noL6[ pseg_noL6_pos ].end()-(n_layers_occupied_tot-1) ))->cscDetId().layer())
1381  );
1382 
1383  if (curv_noL6_A[ pseg_noL6_pos ] > curvePenaltyThreshold) weight_noL6_A[ pseg_noL6_pos ] = weight_noL6_A[ pseg_noL6_pos ] * curvePenalty;
1384 
1385  if (weight_noL6_B[ pseg_noL6_pos ] > a_yweightPenaltyThreshold[thestation][thering])
1386  weight_noL6_A[ pseg_noL6_pos ] = weight_noL6_A[ pseg_noL6_pos ] * yweightPenalty;
1387 
1388  if (weight_noL6_A[ pseg_noL6_pos ] < min_weight_noLx_A ) {
1389  min_weight_noLx_A = weight_noL6_A[ pseg_noL6_pos ];
1390  best_weight_noLx_B = weight_noL6_B[ pseg_noL6_pos ];
1391  best_curv_noLx_A = curv_noL6_A[ pseg_noL6_pos ];
1392  best_noLx_pseg = pseg_noL6_pos;
1393  best_Layer_noLx = 6;
1394  }
1395 
1396  }
1397 
1398  // alternative: fill map with weight and pseg (which is already ordered)? Seems a very good tool to go looking for segments from.
1399  // As I understand, the segments would be inserted according to their weight, so the list would "automatically" be sorted.
1400 
1401  }
1402  }
1403  }
1404 
1405  }
1406  }
1407  }
1408  }
1409 
1410  //************************************************************************;
1411  //*** End segment building *******************************************;
1412  //************************************************************************;
1413 
1414  // Important part! Here segment(s) are actually chosen. All the good segments
1415  // could be chosen or some (best) ones only (in order to save time).
1416 
1417  // Check if there is a segment with n-1 hits that has a signifcantly better
1418  // weight than the best n hit segment
1419 
1420  // IBL 070828: implicit assumption here is that there will always only be one segment per
1421  // cluster - if there are >1 we will need to find out which segment the alternative n-1 hit
1422  // protosegment belongs to!
1423 
1424 
1425  float chosen_weight = min_weight_A;
1426  float chosen_ywgt = best_weight_B;
1427  float chosen_curv = best_curv_A;
1428  int chosen_nlayers = n_layers_occupied_tot;
1429  int chosen_pseg = best_pseg;
1430  if (best_pseg<0) {
1431  return segmentInChamber;
1432  }
1435 
1436  float hit_drop_limit = -999999.999;
1437 
1438  // define different weight improvement requirements depending on how many layers are in the segment candidate
1439  switch ( n_layers_processed ) {
1440  case 1 :
1441  // do nothing;
1442  break;
1443  case 2 :
1444  // do nothing;
1445  break;
1446  case 3 :
1447  // do nothing;
1448  break;
1449  case 4 :
1450  hit_drop_limit = hitDropLimit6Hits * (1./2.) * hitDropLimit4Hits;
1451  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 4)) {
1452  // std::cout<<"CSCSegAlgoST: For four layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1453  }
1454  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 3)) hit_drop_limit = hit_drop_limit * (1./2.);
1455  break;
1456  case 5 :
1457  hit_drop_limit = hitDropLimit6Hits * (2./3.) * hitDropLimit5Hits;
1458  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 5)) {
1459  // std::cout<<"CSCSegAlgoST: For five layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1460  }
1461  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 4)) hit_drop_limit = hit_drop_limit * (1./2.);
1462  if (best_Layer_noLx == 3) hit_drop_limit = hit_drop_limit * (1./3.);
1463  break;
1464  case 6 :
1465  hit_drop_limit = hitDropLimit6Hits * (3./4.);
1466  if ((best_Layer_noLx < 1) || (best_Layer_noLx > 6)) {
1467  // std::cout<<"CSCSegAlgoST: For six layers, best_Layer_noLx = "<< best_Layer_noLx << std::endl;
1468  }
1469  if ((best_Layer_noLx == 2) || (best_Layer_noLx == 5)) hit_drop_limit = hit_drop_limit * (1./2.);
1470  if ((best_Layer_noLx == 3) || (best_Layer_noLx == 4)) hit_drop_limit = hit_drop_limit * (1./3.);
1471  break;
1472 
1473  default :
1474  // Fallback - should never occur.
1475  LogDebug("CSCSegment|CSC") <<"CSCSegAlgoST: Unexpected number of layers with hits - please inform developers.";
1476  // std::cout<<"CSCSegAlgoST: Unexpected number of layers with hits - please inform developers."<<std::endl;
1477  hit_drop_limit = 0.1;
1478  }
1479 
1480  // choose the NoLx collection (the one that contains the best N-1 candidate)
1481  switch ( best_Layer_noLx ) {
1482  case 1 :
1483  Psegments_noLx.clear();
1485  weight_noLx_A.clear();
1487  break;
1488  case 2 :
1489  Psegments_noLx.clear();
1491  weight_noLx_A.clear();
1493  break;
1494  case 3 :
1495  Psegments_noLx.clear();
1497  weight_noLx_A.clear();
1499  break;
1500  case 4 :
1501  Psegments_noLx.clear();
1503  weight_noLx_A.clear();
1505  break;
1506  case 5 :
1507  Psegments_noLx.clear();
1509  weight_noLx_A.clear();
1511  break;
1512  case 6 :
1513  Psegments_noLx.clear();
1515  weight_noLx_A.clear();
1517  break;
1518 
1519  default :
1520  // Fallback - should occur only for preclusters with only 3 layers with hits.
1521  Psegments_noLx.clear();
1522  weight_noLx_A.clear();
1523  }
1524 
1525  if( min_weight_A > 0. ) {
1526  if ( min_weight_noLx_A/min_weight_A < hit_drop_limit ) {
1527  chosen_weight = min_weight_noLx_A;
1528  chosen_ywgt = best_weight_noLx_B;
1529  chosen_curv = best_curv_noLx_A;
1530  chosen_nlayers = n_layers_occupied_tot-1;
1531  chosen_pseg = best_noLx_pseg;
1532  chosen_Psegments.clear();
1533  chosen_weight_A.clear();
1536  }
1537  }
1538 
1539  if(onlyBestSegment) {
1540  ChooseSegments2a( chosen_Psegments, chosen_pseg );
1541  }
1542  else {
1544  }
1545 
1546  for(unsigned int iSegment=0; iSegment<GoodSegments.size();++iSegment){
1547  protoSegment = GoodSegments[iSegment];
1548  passCondNumber=false;
1549  passCondNumber_2 = false;
1550  protoChiUCorrection=1.0;
1551  doSlopesAndChi2();
1552  // Attempt to handle numerical instability of the fit;
1553  // Any segment with protoChi2/protoNDF>chi2Norm_3D_
1554  // considered as that one potentially suffering from
1555  // numerical instability in fit.
1556  if(correctCov_){
1557  // Call the fit with prefitting option;
1558  // First fit a straight line to X-Z coordinates
1559  // and calculate chi^2 (chiUZ in correctTheCovX(void)) for X-Z fit;
1560  // Scale up errors in X if chiUZ too big (default 20);
1561  // Refit XY-Z with the scaled up X errors
1563  passCondNumber = true;
1564  doSlopesAndChi2();
1565  }
1566  if(protoChiUCorrection<1.00005){
1567  LogDebug("CSCSegment|segmWierd") << "Wierd segment, ErrXX scaled, refit " <<std::endl;
1569  // Call the fit with direct adjustment of condition number;
1570  // If the attempt to improve fit by scaling up X error fails
1571  // call the procedure to make the condition number of M compatible with
1572  // the precision of X and Y measurements;
1573  // Achieved by decreasing abs value of the Covariance
1574  LogDebug("CSCSegment|segmWierd") << "Wierd segment, ErrXY changed to match cond. number, refit " << std::endl;
1575  passCondNumber_2=true;
1576  doSlopesAndChi2();
1577  }
1578  }
1579  // Call the pre-pruning procedure;
1580  // If the attempt to improve fit by scaling up X error is successfull,
1581  // while scale factor for X errors is too big.
1582  // Prune the recHit inducing the biggest contribution into X-Z chi^2
1583  // and refit;
1585  (protoSegment.size()>3)){
1586  LogDebug("CSCSegment|segmWierd") << "Scale factor protoChiUCorrection too big, pre-Prune, refit " << std::endl;
1587  protoSegment.erase(protoSegment.begin()+(maxContrIndex),
1588  protoSegment.begin()+(maxContrIndex+1));
1589  doSlopesAndChi2();
1590  }
1591  }
1592 
1594  // calculate error matrix
1595  AlgebraicSymMatrix protoErrors = calculateError();
1596  // but reorder components to match what's required by TrackingRecHit interface
1597  // i.e. slopes first, then positions
1598  flipErrors( protoErrors );
1599  //
1601  segmentInChamber.push_back(temp);
1602  }
1603  return segmentInChamber;
1604 }
1605 
1606 void CSCSegAlgoST::ChooseSegments2a(std::vector< ChamberHitContainer > & chosen_segments, int chosen_seg) {
1607  // just return best segment
1608  GoodSegments.clear();
1609  GoodSegments.push_back( chosen_segments[chosen_seg] );
1610 }
1611 
1612 void CSCSegAlgoST::ChooseSegments3(std::vector< ChamberHitContainer > & chosen_segments, std::vector< float > & chosen_weight, int chosen_seg) {
1613 
1614  int SumCommonHits = 0;
1615  GoodSegments.clear();
1616  int nr_remaining_candidates;
1617  unsigned int nr_of_segment_candidates;
1618 
1619  nr_remaining_candidates = nr_of_segment_candidates = chosen_segments.size();
1620 
1621  // always select and return best protosegment:
1622  GoodSegments.push_back( chosen_segments[ chosen_seg ] );
1623 
1624  float chosen_weight_temp = 999999.;
1625  int chosen_seg_temp = -1;
1626 
1627  // try to find further segment candidates:
1628  while( nr_remaining_candidates > 0 ) {
1629 
1630  for(unsigned int iCand=0; iCand < nr_of_segment_candidates; ++iCand) {
1631  //only compare current best to psegs that have not been marked bad:
1632  if( chosen_weight[iCand] < 0. ) continue;
1633  SumCommonHits = 0;
1634 
1635  for( int ihits = 0; ihits < int(chosen_segments[iCand].size()); ++ihits ) { // iCand and iiCand NEED to have same nr of hits! (always have by construction)
1636  if( chosen_segments[iCand][ihits] == chosen_segments[chosen_seg][ihits]) {
1637  ++SumCommonHits;
1638  }
1639  }
1640 
1641  //mark a pseg bad:
1642  if(SumCommonHits>1) { // needs to be a card; should be investigated first
1643  chosen_weight[iCand] = -1.;
1644  nr_remaining_candidates -= 1;
1645  }
1646  else {
1647  // save the protosegment with the smallest weight
1648  if( chosen_weight[ iCand ] < chosen_weight_temp ) {
1649  chosen_weight_temp = chosen_weight[ iCand ];
1650  chosen_seg_temp = iCand ;
1651  }
1652  }
1653  }
1654 
1655  if( chosen_seg_temp > -1 ) GoodSegments.push_back( chosen_segments[ chosen_seg_temp ] );
1656 
1657  chosen_seg = chosen_seg_temp;
1658  // re-initialze temporary best parameters
1659  chosen_weight_temp = 999999;
1660  chosen_seg_temp = -1;
1661  }
1662 }
1663 
1664 void CSCSegAlgoST::ChooseSegments2(int best_seg) {
1665  // std::vector <int> CommonHits(6); // nice concept :)
1666  std::vector <unsigned int> BadCandidate;
1667  int SumCommonHits =0;
1668  GoodSegments.clear();
1669  BadCandidate.clear();
1670  for(unsigned int iCand=0;iCand<Psegments.size();++iCand) {
1671  // skip here if segment was marked bad
1672  for(unsigned int iiCand=iCand+1;iiCand<Psegments.size();++iiCand){
1673  // skip here too if segment was marked bad
1674  SumCommonHits =0;
1675  if( Psegments[iCand].size() != Psegments[iiCand].size() ) {
1676  LogDebug("CSCSegment|CSC") <<"CSCSegmentST::ChooseSegments2: ALARM!! THIS should not happen!!";
1677 // std::cout<<"CSCSegmentST::ChooseSegments2: ALARM!! THIS should not happen!!"<<std::endl;
1678  }
1679  else {
1680  for( int ihits = 0; ihits < int(Psegments[iCand].size()); ++ihits ) { // iCand and iiCand NEED to have same nr of hits! (alsways have by construction)
1681  if( Psegments[iCand][ihits] == Psegments[iiCand][ihits]) {
1682  ++SumCommonHits;
1683  }
1684  }
1685  }
1686  if(SumCommonHits>1) {
1687  if( weight_A[iCand]>weight_A[iiCand] ) { // use weight_A here
1688  BadCandidate.push_back(iCand);
1689  // rather mark segment bad by an array which is in sync with protosegments!! e.g. set weight = weight*1000 or have an addidional array or set it to weight *= -1
1690  }
1691  else{
1692  BadCandidate.push_back(iiCand);
1693  // rather mark segment bad by an array which is in sync with protosegments!! e.g. set weight = weight*1000 or have an addidional array or set it to weight *= -1
1694  }
1695  }
1696  }
1697  }
1698  bool discard;
1699  for(unsigned int isegm=0;isegm<Psegments.size();++isegm) {
1700  // For best results another iteration/comparison over Psegments
1701  //should be applied here... It would make the program much slower.
1702  discard = false;
1703  for(unsigned int ibad=0;ibad<BadCandidate.size();++ibad) {
1704  // can save this loop if we used an array in sync with Psegments!!!!
1705  if(isegm == BadCandidate[ibad]) {
1706  discard = true;
1707  }
1708  }
1709  if(!discard) {
1710  GoodSegments.push_back( Psegments[isegm] );
1711  }
1712  }
1713 }
1714 //Method doSlopesAndChi2
1715 // fitSlopes() and fillChiSquared() are always called one after the other
1716 // In fact the code is duplicated in the two functions (as we need 2 loops) -
1717 // it is much better to fix that at some point
1719  fitSlopes();
1720  fillChiSquared();
1721 }
1722 /* Method fitSlopes
1723  *
1724  * Perform a Least Square Fit on a segment as per SK algo
1725  *
1726  */
1728  e_Cxx.clear();
1730  correctTheCovX();
1731  if(e_Cxx.size()!=protoSegment.size()){
1732  LogDebug("CSCSegment|segmWierd") << "e_Cxx.size()!=protoSegment.size() IT IS A SERIOUS PROBLEM!!! " <<std::endl;
1733  }
1734  }
1735  CLHEP::HepMatrix M(4,4,0);
1736  CLHEP::HepVector B(4,0);
1737  ChamberHitContainer::const_iterator ih = protoSegment.begin();
1738  for (ih = protoSegment.begin(); ih != protoSegment.end(); ++ih) {
1739  const CSCRecHit2D& hit = (**ih);
1740  const CSCLayer* layer = theChamber->layer(hit.cscDetId().layer());
1741  GlobalPoint gp = layer->toGlobal(hit.localPosition());
1742  LocalPoint lp = theChamber->toLocal(gp);
1743  // ptc: Local position of hit w.r.t. chamber
1744  double u = lp.x();
1745  double v = lp.y();
1746  double z = lp.z();
1747  // ptc: Covariance matrix of local errors
1748  CLHEP::HepMatrix IC(2,2);
1750  IC(1,1) = e_Cxx.at(ih-protoSegment.begin());
1751  }
1752  else{
1753  IC(1,1) = hit.localPositionError().xx();
1754  }
1755  // IC(1,1) = hit.localPositionError().xx();
1756  IC(1,2) = hit.localPositionError().xy();
1757  IC(2,2) = hit.localPositionError().yy();
1758  IC(2,1) = IC(1,2); // since Cov is symmetric
1760  if(passCondNumber_2){
1761  correctTheCovMatrix(IC);
1762  }
1763  // ptc: Invert covariance matrix (and trap if it fails!)
1764  int ierr = 0;
1765  IC.invert(ierr); // inverts in place
1766  if (ierr != 0) {
1767  LogDebug("CSCSegment|CSC") << "CSCSegment::fitSlopes: failed to invert covariance matrix=\n" << IC;
1768  // std::cout<< "CSCSegment::fitSlopes: failed to invert covariance matrix=\n" << IC << "\n"<<std::endl;
1769  }
1770 
1771  M(1,1) += IC(1,1);
1772  M(1,2) += IC(1,2);
1773  M(1,3) += IC(1,1) * z;
1774  M(1,4) += IC(1,2) * z;
1775  B(1) += u * IC(1,1) + v * IC(1,2);
1776 
1777  M(2,1) += IC(2,1);
1778  M(2,2) += IC(2,2);
1779  M(2,3) += IC(2,1) * z;
1780  M(2,4) += IC(2,2) * z;
1781  B(2) += u * IC(2,1) + v * IC(2,2);
1782 
1783  M(3,1) += IC(1,1) * z;
1784  M(3,2) += IC(1,2) * z;
1785  M(3,3) += IC(1,1) * z * z;
1786  M(3,4) += IC(1,2) * z * z;
1787  B(3) += ( u * IC(1,1) + v * IC(1,2) ) * z;
1788 
1789  M(4,1) += IC(2,1) * z;
1790  M(4,2) += IC(2,2) * z;
1791  M(4,3) += IC(2,1) * z * z;
1792  M(4,4) += IC(2,2) * z * z;
1793  B(4) += ( u * IC(2,1) + v * IC(2,2) ) * z;
1794  }
1795  CLHEP::HepVector p = solve(M, B);
1796 
1797  // Update member variables
1798  // Note that origin has local z = 0
1799  protoIntercept = LocalPoint(p(1), p(2), 0.);
1800  protoSlope_u = p(3);
1801  protoSlope_v = p(4);
1802 }
1803 /* Method fillChiSquared
1804  *
1805  * Determine Chi^2 for the proto wire segment
1806  *
1807  */
1809 
1810  double chsq = 0.;
1811 
1812  ChamberHitContainer::const_iterator ih;
1813  for (ih = protoSegment.begin(); ih != protoSegment.end(); ++ih) {
1814 
1815  const CSCRecHit2D& hit = (**ih);
1816  const CSCLayer* layer = theChamber->layer(hit.cscDetId().layer());
1817  GlobalPoint gp = layer->toGlobal(hit.localPosition());
1818  LocalPoint lp = theChamber->toLocal(gp);
1819 
1820  double u = lp.x();
1821  double v = lp.y();
1822  double z = lp.z();
1823 
1824  double du = protoIntercept.x() + protoSlope_u * z - u;
1825  double dv = protoIntercept.y() + protoSlope_v * z - v;
1826 
1827  CLHEP::HepMatrix IC(2,2);
1829  IC(1,1) = e_Cxx.at(ih-protoSegment.begin());
1830  }
1831  else{
1832  IC(1,1) = hit.localPositionError().xx();
1833  }
1834  // IC(1,1) = hit.localPositionError().xx();
1835  IC(1,2) = hit.localPositionError().xy();
1836  IC(2,2) = hit.localPositionError().yy();
1837  IC(2,1) = IC(1,2);
1839  if(passCondNumber_2){
1840  correctTheCovMatrix(IC);
1841  }
1842 
1843  // Invert covariance matrix
1844  int ierr = 0;
1845  IC.invert(ierr);
1846  if (ierr != 0) {
1847  LogDebug("CSCSegment|CSC") << "CSCSegment::fillChiSquared: failed to invert covariance matrix=\n" << IC;
1848  // std::cout << "CSCSegment::fillChiSquared: failed to invert covariance matrix=\n" << IC << "\n";
1849 
1850  }
1851 
1852  chsq += du*du*IC(1,1) + 2.*du*dv*IC(1,2) + dv*dv*IC(2,2);
1853  }
1854 
1855  protoChi2 = chsq;
1856  protoNDF = 2.*protoSegment.size() - 4;
1857 }
1858 /* fillLocalDirection
1859  *
1860  */
1862  // Always enforce direction of segment to point from IP outwards
1863  // (Incorrect for particles not coming from IP, of course.)
1864 
1865  double dxdz = protoSlope_u;
1866  double dydz = protoSlope_v;
1867  double dz = 1./sqrt(1. + dxdz*dxdz + dydz*dydz);
1868  double dx = dz*dxdz;
1869  double dy = dz*dydz;
1870  LocalVector localDir(dx,dy,dz);
1871 
1872  // localDir may need sign flip to ensure it points outward from IP
1873  // ptc: Examine its direction and origin in global z: to point outward
1874  // the localDir should always have same sign as global z...
1875 
1876  double globalZpos = ( theChamber->toGlobal( protoIntercept ) ).z();
1877  double globalZdir = ( theChamber->toGlobal( localDir ) ).z();
1878  double directionSign = globalZpos * globalZdir;
1879  protoDirection = (directionSign * localDir).unit();
1880 }
1881 /* weightMatrix
1882  *
1883  */
1885 
1886  std::vector<const CSCRecHit2D*>::const_iterator it;
1887  int nhits = protoSegment.size();
1888  AlgebraicSymMatrix matrix(2*nhits, 0);
1889  int row = 0;
1890 
1891  for (it = protoSegment.begin(); it != protoSegment.end(); ++it) {
1892 
1893  const CSCRecHit2D& hit = (**it);
1894  ++row;
1895  matrix(row, row) = protoChiUCorrection*hit.localPositionError().xx();
1896  matrix(row, row+1) = hit.localPositionError().xy();
1897  ++row;
1898  matrix(row, row-1) = hit.localPositionError().xy();
1899  matrix(row, row) = hit.localPositionError().yy();
1900  }
1901  int ierr;
1902  matrix.invert(ierr);
1903  return matrix;
1904 }
1905 
1906 
1907 /* derivativeMatrix
1908  *
1909  */
1910 CLHEP::HepMatrix CSCSegAlgoST::derivativeMatrix() const {
1911 
1912  ChamberHitContainer::const_iterator it;
1913  int nhits = protoSegment.size();
1914  CLHEP::HepMatrix matrix(2*nhits, 4);
1915  int row = 0;
1916 
1917  for(it = protoSegment.begin(); it != protoSegment.end(); ++it) {
1918 
1919  const CSCRecHit2D& hit = (**it);
1920  const CSCLayer* layer = theChamber->layer(hit.cscDetId().layer());
1921  GlobalPoint gp = layer->toGlobal(hit.localPosition());
1922  LocalPoint lp = theChamber->toLocal(gp);
1923  float z = lp.z();
1924  ++row;
1925  matrix(row, 1) = 1.;
1926  matrix(row, 3) = z;
1927  ++row;
1928  matrix(row, 2) = 1.;
1929  matrix(row, 4) = z;
1930  }
1931  return matrix;
1932 }
1933 
1934 
1935 /* calculateError
1936  *
1937  */
1939 
1940  AlgebraicSymMatrix weights = weightMatrix();
1942 
1943  // (AT W A)^-1
1944  // from http://www.phys.ufl.edu/~avery/fitting.html, part I
1945  int ierr;
1946  AlgebraicSymMatrix result = weights.similarityT(A);
1947  result.invert(ierr);
1948 
1949  // blithely assuming the inverting never fails...
1950  return result;
1951 }
1952 
1953 
1955 
1956  // The CSCSegment needs the error matrix re-arranged to match
1957  // parameters in order (uz, vz, u0, v0) where uz, vz = slopes, u0, v0 = intercepts
1958 
1959  AlgebraicSymMatrix hold( a );
1960 
1961  // errors on slopes into upper left
1962  a(1,1) = hold(3,3);
1963  a(1,2) = hold(3,4);
1964  a(2,1) = hold(4,3);
1965  a(2,2) = hold(4,4);
1966 
1967  // errors on positions into lower right
1968  a(3,3) = hold(1,1);
1969  a(3,4) = hold(1,2);
1970  a(4,3) = hold(2,1);
1971  a(4,4) = hold(2,2);
1972 
1973  // must also interchange off-diagonal elements of off-diagonal 2x2 submatrices
1974  a(4,1) = hold(2,3);
1975  a(3,2) = hold(1,4);
1976  a(2,3) = hold(4,1); // = hold(1,4)
1977  a(1,4) = hold(3,2); // = hold(2,3)
1978 }
1979 //
1981  std::vector<double> uu, vv, zz;
1982  //std::vector<double> e_Cxx;
1983  e_Cxx.clear();
1984  double sum_U_err=0.0;
1985  double sum_Z_U_err=0.0;
1986  double sum_Z2_U_err=0.0;
1987  double sum_U_U_err=0.0;
1988  double sum_UZ_U_err=0.0;
1989  std::vector<double> chiUZind;
1990  std::vector<double>::iterator chiContribution;
1991  double chiUZ=0.0;
1992  ChamberHitContainer::const_iterator ih = protoSegment.begin();
1993  for (ih = protoSegment.begin(); ih != protoSegment.end(); ++ih) {
1994  const CSCRecHit2D& hit = (**ih);
1995  e_Cxx.push_back(hit.localPositionError().xx());
1996  //
1997  const CSCLayer* layer = theChamber->layer(hit.cscDetId().layer());
1998  GlobalPoint gp = layer->toGlobal(hit.localPosition());
1999  LocalPoint lp = theChamber->toLocal(gp);
2000  // ptc: Local position of hit w.r.t. chamber
2001  double u = lp.x();
2002  double v = lp.y();
2003  double z = lp.z();
2004  uu.push_back(u);
2005  vv.push_back(v);
2006  zz.push_back(z);
2008  sum_U_err += 1./e_Cxx.back();
2009  sum_Z_U_err += z/e_Cxx.back();
2010  sum_Z2_U_err += (z*z)/e_Cxx.back();
2011  sum_U_U_err += u/e_Cxx.back();
2012  sum_UZ_U_err += (u*z)/e_Cxx.back();
2013  }
2014 
2017 
2018  double denom=sum_U_err*sum_Z2_U_err-pow(sum_Z_U_err,2);
2019  double U0=(sum_Z2_U_err*sum_U_U_err-sum_Z_U_err*sum_UZ_U_err)/denom;
2020  double UZ=(sum_U_err*sum_UZ_U_err-sum_Z_U_err*sum_U_U_err)/denom;
2021 
2024 
2025  for(unsigned i=0; i<uu.size(); ++i){
2026  double uMean = U0+UZ*zz[i];
2027  chiUZind.push_back((pow((uMean-uu[i]),2))/e_Cxx[i]);
2028  chiUZ += (pow((uMean-uu[i]),2))/e_Cxx[i];
2029  }
2030  chiUZ = chiUZ/(uu.size()-2);
2031 
2032  if(chiUZ>=chi2Norm_2D_){
2034  for(unsigned i=0; i<uu.size(); ++i)
2036  }
2037 
2039 
2041  chiContribution=max_element(chiUZind.begin(),chiUZind.end());
2042  maxContrIndex = chiContribution - chiUZind.begin();
2043  /*
2044  for(unsigned i=0; i<chiUZind.size();++i){
2045  if(*chiContribution==chiUZind[i]){
2046  maxContrIndex=i;
2047  }
2048  }
2049  */
2050  }
2051  //
2052  //return e_Cxx;
2053 }
2054 //
2055 void CSCSegAlgoST::correctTheCovMatrix(CLHEP::HepMatrix &IC){
2056  double condNumberCorr1=0.0;
2057  double condNumberCorr2=0.0;
2058  double detCov=0.0;
2059  double diag1=0.0;
2060  double diag2=0.0;
2061  double IC_12_corr=0.0;
2062  double IC_11_corr=0.0;
2063  if(!covToAnyNumberAll_){
2064  condNumberCorr1=condSeed1_*IC(2,2);
2065  condNumberCorr2=condSeed2_*IC(2,2);
2066  diag1=IC(1,1)*IC(2,2);
2067  diag2=IC(1,2)*IC(1,2);
2068  detCov=fabs(diag1-diag2);
2069  if((diag1<condNumberCorr2)&&(diag2<condNumberCorr2)){
2070  if(covToAnyNumber_)
2071  IC(1,2)=covAnyNumber_;
2072  else{
2073  IC_11_corr=condSeed1_+fabs(IC(1,2))/IC(2,2);
2074  IC(1,1)=IC_11_corr;
2075  }
2076  }
2077 
2078  if(((detCov<condNumberCorr2)&&(diag1>condNumberCorr2))||
2079  ((diag2>condNumberCorr2)&&(detCov<condNumberCorr2)
2080  )){
2081  if(covToAnyNumber_)
2082  IC(1,2)=covAnyNumber_;
2083  else{
2084  IC_12_corr=sqrt(fabs(diag1-condNumberCorr2));
2085  if(IC(1,2)<0)
2086  IC(1,2)=(-1)*IC_12_corr;
2087  else
2088  IC(1,2)=IC_12_corr;
2089  }
2090  }
2091  }
2092  else{
2093  IC(1,2)=covAnyNumber_;
2094  }
2095 }
2096 //
2097 void CSCSegAlgoST::findDuplicates(std::vector<CSCSegment> & segments ){
2098  // this is intended for ME1/1a only - we have ghost segments because of the strips ganging
2099  // this function finds them (first the rechits by sharesInput() )
2100  // if a segment shares all the rechits with another segment it is a duplicate (even if
2101  // it has less rechits)
2102 
2103  for(std::vector<CSCSegment>::iterator it=segments.begin(); it != segments.end(); ++it) {
2104  std::vector<CSCSegment*> duplicateSegments;
2105  for(std::vector<CSCSegment>::iterator it2=segments.begin(); it2 != segments.end(); ++it2) {
2106  //
2107  bool allShared = true;
2108  if(it!=it2){
2109  allShared = it->sharesRecHits(*it2);
2110  }
2111  else{
2112  allShared = false;
2113  }
2114  //
2115  if(allShared){
2116  duplicateSegments.push_back(&(*it2));
2117  }
2118  }
2119  it->setDuplicateSegments(duplicateSegments);
2120  }
2121 
2122 }
2123 //
2124 
#define LogDebug(id)
double yweightPenaltyThreshold
Definition: CSCSegAlgoST.h:190
T getParameter(std::string const &) const
double protoChiUCorrection
Allow to correct the error matrix.
Definition: CSCSegAlgoST.h:199
T getUntrackedParameter(std::string const &, T const &) const
int i
Definition: DBlmapReader.cc:9
float xx() const
Definition: LocalError.h:19
CSCSegment showerSeg(const CSCChamber *aChamber, ChamberHitContainer rechits)
bool preClustering_useChaining
Definition: CSCSegAlgoST.h:177
CSCDetId cscDetId() const
Definition: CSCRecHit2D.h:47
bool passCondNumber
The number to fource the Covariance.
Definition: CSCSegAlgoST.h:213
void doSlopesAndChi2(void)
std::vector< ChamberHitContainer > Psegments_noL5
Definition: CSCSegAlgoST.h:129
std::vector< float > curv_noL2_A
Definition: CSCSegAlgoST.h:143
double dXclusBoxMax
Definition: CSCSegAlgoST.h:173
void correctTheCovMatrix(CLHEP::HepMatrix &IC)
double_binary B
Definition: DDStreamer.cc:235
const CSCChamber * theChamber
Definition: CSCSegAlgoST.h:117
std::vector< float > weight_noL4_A
Definition: CSCSegAlgoST.h:137
void correctTheCovX(void)
std::vector< std::vector< const CSCRecHit2D * > > clusterHits(const CSCChamber *aChamber, ChamberHitContainer &rechits)
std::vector< float > curv_noL6_A
Definition: CSCSegAlgoST.h:147
GlobalPoint toGlobal(const Local2DPoint &lp) const
Conversion to the global R.F. from the R.F. of the GeomDet.
Definition: GeomDet.h:49
void ChooseSegments2(int best_seg)
void ChooseSegments3(int best_seg)
std::vector< float > weight_noL3_B
Definition: CSCSegAlgoST.h:151
T y() const
Definition: PV3DBase.h:57
float protoSlope_v
Definition: CSCSegAlgoST.h:160
LocalPoint toLocal(const GlobalPoint &gp) const
Conversion to the R.F. of the GeomDet.
Definition: GeomDet.h:64
std::vector< ChamberHitContainer > Psegments_noL1
Definition: CSCSegAlgoST.h:125
CSCSegAlgoST(const edm::ParameterSet &ps)
Constructor.
Definition: CSCSegAlgoST.cc:33
bool isGoodToMerge(bool isME11a, ChamberHitContainer &newChain, ChamberHitContainer &oldChain)
std::vector< ChamberHitContainer > chosen_Psegments
Definition: CSCSegAlgoST.h:131
std::vector< ChamberHitContainer > Psegments
Definition: CSCSegAlgoST.h:123
ChamberHitContainer protoSegment
Definition: CSCSegAlgoST.h:158
int layer() const
Definition: CSCDetId.h:63
float protoSlope_u
Definition: CSCSegAlgoST.h:159
double double double z
double condSeed1_
Correct the error matrix for the condition number.
Definition: CSCSegAlgoST.h:209
LocalVector protoDirection
Definition: CSCSegAlgoST.h:164
std::vector< float > weight_noL3_A
Definition: CSCSegAlgoST.h:136
double hitDropLimit4Hits
Definition: CSCSegAlgoST.h:184
std::string chamberTypeName() const
float xy() const
Definition: LocalError.h:20
CSCSegAlgoShowering * showering_
Definition: CSCSegAlgoST.h:195
int nRecHits() const
Definition: CSCSegment.h:68
virtual ~CSCSegAlgoST()
Destructor.
Definition: CSCSegAlgoST.cc:91
const CSCChamberSpecs * specs() const
Definition: CSCChamber.h:42
CLHEP::HepMatrix AlgebraicMatrix
LocalError localPositionError() const
Definition: CSCRecHit2D.h:46
double BPMinImprovement
Definition: CSCSegAlgoST.h:180
float yy() const
Definition: LocalError.h:21
double covAnyNumber_
Allow to use any number for covariance for all RecHits.
Definition: CSCSegAlgoST.h:212
string unit
Definition: csvLumiCalc.py:46
T sqrt(T t)
Definition: SSEVec.h:28
std::vector< ChamberHitContainer > Psegments_noLx
Definition: CSCSegAlgoST.h:124
double curvePenaltyThreshold
Definition: CSCSegAlgoST.h:193
T z() const
Definition: PV3DBase.h:58
std::vector< ChamberHitContainer > Psegments_noL4
Definition: CSCSegAlgoST.h:128
tuple result
Definition: query.py:137
double protoNDF
Definition: CSCSegAlgoST.h:163
double hitDropLimit6Hits
Definition: CSCSegAlgoST.h:186
double protoChi2
Definition: CSCSegAlgoST.h:162
std::vector< float > weight_A
Definition: CSCSegAlgoST.h:132
std::vector< float > weight_noL1_B
Definition: CSCSegAlgoST.h:149
const CSCLayer * layer(CSCDetId id) const
Return the layer corresponding to the given id.
Definition: CSCChamber.cc:41
double chi2Norm_2D_
Definition: CSCSegAlgoST.h:201
float ChiSquaredProbability(double chiSquared, double nrDOF)
std::vector< CSCSegment > prune_bad_hits(const CSCChamber *aChamber, std::vector< CSCSegment > &segments)
void fitSlopes(void)
std::vector< float > curv_noL5_A
Definition: CSCSegAlgoST.h:146
std::vector< ChamberHitContainer > Psegments_noL6
Definition: CSCSegAlgoST.h:130
#define end
Definition: vmac.h:38
unsigned maxContrIndex
Chi^2 normalization for the initial fit.
Definition: CSCSegAlgoST.h:203
std::vector< const CSCRecHit2D * > ChamberHitContainer
Typedefs.
Definition: CSCSegAlgoST.h:40
bool onlyBestSegment
Definition: CSCSegAlgoST.h:181
void flipErrors(AlgebraicSymMatrix &) const
std::vector< ChamberHitContainer > Psegments_noL2
Definition: CSCSegAlgoST.h:126
std::vector< float > weight_noL5_A
Definition: CSCSegAlgoST.h:138
bool passCondNumber_2
Passage the condition number calculations.
Definition: CSCSegAlgoST.h:214
AlgebraicSymMatrix weightMatrix(void) const
std::vector< float > weight_noL1_A
Definition: CSCSegAlgoST.h:134
std::vector< float > curv_noL4_A
Definition: CSCSegAlgoST.h:145
std::vector< float > weight_noL2_A
Definition: CSCSegAlgoST.h:135
std::vector< float > chosen_weight_A
Definition: CSCSegAlgoST.h:140
Segments GoodSegments
Definition: CSCSegAlgoST.h:118
bool correctCov_
Correct the Error Matrix.
Definition: CSCSegAlgoST.h:198
int minHitsPerSegment
Definition: CSCSegAlgoST.h:170
ChamberHitContainer Psegments_hits
Definition: CSCSegAlgoST.h:121
double dYclusBoxMax
Definition: CSCSegAlgoST.h:174
void findDuplicates(std::vector< CSCSegment > &segments)
std::vector< float > curv_noL1_A
Definition: CSCSegAlgoST.h:142
void fillChiSquared(void)
AlgebraicSymMatrix calculateError(void) const
bool covToAnyNumberAll_
Allow to use any number for covariance (by hand)
Definition: CSCSegAlgoST.h:211
double b
Definition: hdecay.h:120
double theWeight(double coordinate_1, double coordinate_2, double coordinate_3, float layer_1, float layer_2, float layer_3)
Utility functions.
double curvePenalty
Definition: CSCSegAlgoST.h:194
std::vector< float > curv_A
Definition: CSCSegAlgoST.h:141
#define begin
Definition: vmac.h:31
bool prePrun_
The index of the worst x RecHit in Chi^2-X method.
Definition: CSCSegAlgoST.h:204
double chi2() const
Chi2 of the segment fit.
Definition: CSCSegment.h:58
double a
Definition: hdecay.h:121
std::vector< std::vector< const CSCRecHit2D * > > chainHits(const CSCChamber *aChamber, ChamberHitContainer &rechits)
bool preClustering
Definition: CSCSegAlgoST.h:176
double yweightPenalty
Definition: CSCSegAlgoST.h:191
CLHEP::HepSymMatrix AlgebraicSymMatrix
std::vector< float > weight_noL5_B
Definition: CSCSegAlgoST.h:153
std::vector< float > weight_B
Definition: CSCSegAlgoST.h:148
float a_yweightPenaltyThreshold[5][5]
Definition: CSCSegAlgoST.h:188
void ChooseSegments2a(std::vector< ChamberHitContainer > &best_segments, int best_seg)
Local3DPoint LocalPoint
Definition: LocalPoint.h:11
std::vector< double > e_Cxx
Definition: CSCSegAlgoST.h:200
CLHEP::HepMatrix derivativeMatrix(void) const
void fillLocalDirection(void)
int maxRecHitsInCluster
Definition: CSCSegAlgoST.h:175
Definition: DDAxes.h:10
std::vector< CSCSegment > run(const CSCChamber *aChamber, ChamberHitContainer rechits)
Definition: CSCSegAlgoST.cc:96
std::vector< float > weight_noL6_A
Definition: CSCSegAlgoST.h:139
LocalPoint localPosition() const
Definition: CSCRecHit2D.h:45
std::vector< float > weight_noL2_B
Definition: CSCSegAlgoST.h:150
bool covToAnyNumber_
The correction parameters.
Definition: CSCSegAlgoST.h:210
double hitDropLimit5Hits
Definition: CSCSegAlgoST.h:185
std::vector< float > weight_noLx_A
Definition: CSCSegAlgoST.h:133
std::vector< float > curv_noL3_A
Definition: CSCSegAlgoST.h:144
std::vector< float > weight_noL6_B
Definition: CSCSegAlgoST.h:154
T x() const
Definition: PV3DBase.h:56
tuple size
Write out results.
mathSSE::Vec4< T > v
std::vector< CSCSegment > buildSegments(ChamberHitContainer rechits)
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
ChamberHitContainer PAhits_onLayer[6]
Definition: CSCSegAlgoST.h:120
LocalPoint protoIntercept
Definition: CSCSegAlgoST.h:161
double condSeed2_
Definition: CSCSegAlgoST.h:209
std::vector< float > weight_noL4_B
Definition: CSCSegAlgoST.h:152
std::vector< ChamberHitContainer > Psegments_noL3
Definition: CSCSegAlgoST.h:127
double chi2Norm_3D_
Chi^2 normalization for the corrected fit.
Definition: CSCSegAlgoST.h:202
double prePrunLimit_
Definition: CSCSegAlgoST.h:207