CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
DAClusterizerInZ.cc
Go to the documentation of this file.
5 
6 
7 using namespace std;
8 
9 
10 namespace {
11 
12  bool recTrackLessZ1(const DAClusterizerInZ::track_t & tk1,
13  const DAClusterizerInZ::track_t & tk2)
14  {
15  return tk1.z < tk2.z;
16  }
17 }
18 
19 
20 vector<DAClusterizerInZ::track_t> DAClusterizerInZ::fill(
21  const vector<reco::TransientTrack> & tracks
22  )const{
23  // prepare track data for clustering
24  vector<track_t> tks;
25  for(vector<reco::TransientTrack>::const_iterator it=tracks.begin(); it!=tracks.end(); it++){
26  track_t t;
27  t.z=((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
28  double tantheta=tan(((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta());
29  double phi=((*it).stateAtBeamLine().trackStateAtPCA()).momentum().phi();
30  // get the beam-spot
31  reco::BeamSpot beamspot=(it->stateAtBeamLine()).beamSpot();
32  t.dz2= pow((*it).track().dzError(),2) // track errror
33  + (pow(beamspot.BeamWidthX()*cos(phi),2)+pow(beamspot.BeamWidthY()*sin(phi),2))/pow(tantheta,2) // beam-width induced
34  + pow(vertexSize_,2); // intrinsic vertex size, safer for outliers and short lived decays
35  if (d0CutOff_>0){
36  Measurement1D IP=(*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
37  t.pi=1./(1.+exp(pow(IP.value()/IP.error(),2)-pow(d0CutOff_ ,2))); // reduce weight for high ip tracks
38  }else{
39  t.pi=1.;
40  }
41  t.tt=&(*it);
42  t.Z=1.;
43  tks.push_back(t);
44  }
45  return tks;
46 }
47 
48 
49 
50 
51 
52 double DAClusterizerInZ::Eik(const track_t & t, const vertex_t &k )const{
53  return pow(t.z-k.z,2)/t.dz2;
54 }
55 
56 
57 
58 
60  double beta,
61  vector<track_t> & tks,
62  vector<vertex_t> & y
63  )const{
64  //update weights and vertex positions
65  // mass constrained annealing without noise
66  // returns the squared sum of changes of vertex positions
67 
68  unsigned int nt=tks.size();
69 
70  //initialize sums
71  double sumpi=0;
72  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
73  k->se=0; k->sw=0; k->swz=0;
74  k->swE=0; k->Tc=0;
75  }
76 
77 
78  // loop over tracks
79  for(unsigned int i=0; i<nt; i++){
80 
81  // update pik and Zi
82  double Zi=0.;
83  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
84  k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
85  Zi += k->pk * k->ei;
86  }
87  tks[i].Z=Zi;
88 
89  // normalization for pk
90  if (tks[i].Z>0){
91  sumpi += tks[i].pi;
92  // accumulate weighted z and weights for vertex update
93  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
94  k->se += tks[i].pi* k->ei / Zi;
95  double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
96  k->sw += w;
97  k->swz += w * tks[i].z;
98  k->swE+= w*Eik(tks[i],*k);
99  }
100  }else{
101  sumpi += tks[i].pi;
102  }
103 
104 
105  } // end of track loop
106 
107 
108  // now update z and pk
109  double delta=0;
110  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
111  if ( k->sw > 0){
112  double znew=k->swz/k->sw;
113  delta+=pow(k->z-znew,2);
114  k->z=znew;
115  k->Tc=2*k->swE/k->sw;
116  }else{
117  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
118  if(verbose_){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
119  k->Tc=-1;
120  }
121 
122  k->pk = k->pk * k->se / sumpi;
123  }
124 
125  // return how much the prototypes moved
126  return delta;
127 }
128 
129 
130 
131 
132 
134  double beta,
135  vector<track_t> & tks,
136  vector<vertex_t> & y,
137  double & rho0
138  )const{
139  // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise
140  // returns the squared sum of changes of vertex positions
141 
142  unsigned int nt=tks.size();
143 
144  //initialize sums
145  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
146  k->se=0; k->sw=0; k->swz=0;
147  k->swE=0; k->Tc=0;
148  }
149 
150 
151  // loop over tracks
152  for(unsigned int i=0; i<nt; i++){
153 
154  // update pik and Zi
155  double Zi=rho0*exp(-beta*dzCutOff_*dzCutOff_);// cut-off
156  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
157  k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
158  Zi += k->pk * k->ei;
159  }
160  tks[i].Z=Zi;
161 
162  // normalization
163  if (tks[i].Z>0){
164  // accumulate weighted z and weights for vertex update
165  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
166  k->se += tks[i].pi* k->ei / Zi;
167  double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
168  k->sw += w;
169  k->swz += w * tks[i].z;
170  k->swE+= w*Eik(tks[i],*k);
171  }
172  }
173 
174 
175  } // end of track loop
176 
177 
178  // now update z
179  double delta=0;
180  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
181  if ( k->sw > 0){
182  double znew=k->swz/k->sw;
183  delta+=pow(k->z-znew,2);
184  k->z=znew;
185  k->Tc=2*k->swE/k->sw;
186  }else{
187  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
188  if(verbose_){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
189  k->Tc=0;
190  }
191 
192  }
193 
194  // return how much the prototypes moved
195  return delta;
196 }
197 
198 
199 
200 
201 
202 bool DAClusterizerInZ::merge(vector<vertex_t> & y, int nt)const{
203  // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
204 
205  if(y.size()<2) return false;
206 
207  for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
208  if( fabs( (k+1)->z - k->z ) < 1.e-3 ){ // with fabs if only called after freeze-out (splitAll() at highter T)
209  double rho=k->pk + (k+1)->pk;
210  if(rho>0){
211  k->z=( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
212  }else{
213  k->z=0.5*(k->z + (k+1)->z);
214  }
215  k->pk=rho;
216 
217  y.erase(k+1);
218  return true;
219  }
220  }
221 
222  return false;
223 }
224 
225 
226 
227 
228 bool DAClusterizerInZ::merge(vector<vertex_t> & y, double & beta)const{
229  // merge clusters that collapsed or never separated,
230  // only merge if the estimated critical temperature of the merged vertex is below the current temperature
231  // return true if vertices were merged, false otherwise
232  if(y.size()<2) return false;
233 
234  for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
235  if (fabs((k+1)->z - k->z)<2.e-3){
236  double rho=k->pk + (k+1)->pk;
237  double swE=k->swE+(k+1)->swE - k->pk * (k+1)->pk /rho *pow((k+1)->z - k->z,2);
238  double Tc=2*swE/(k->sw+(k+1)->sw);
239 
240  if(Tc*beta<1){
241  if(rho>0){
242  k->z=( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
243  }else{
244  k->z=0.5*(k->z + (k+1)->z);
245  }
246  k->pk=rho;
247  k->sw+=(k+1)->sw;
248  k->swE=swE;
249  k->Tc=Tc;
250  y.erase(k+1);
251  return true;
252  }
253  }
254  }
255 
256  return false;
257 }
258 
259 
260 
261 
262 
263 bool DAClusterizerInZ::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{
264  // eliminate clusters with only one significant/unique track
265  if(y.size()<2) return false;
266 
267  unsigned int nt=tks.size();
268  double sumpmin=nt;
269  vector<vertex_t>::iterator k0=y.end();
270  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
271  int nUnique=0;
272  double sump=0;
273  double pmax=k->pk/(k->pk+rho0*exp(-beta*dzCutOff_*dzCutOff_));
274  for(unsigned int i=0; i<nt; i++){
275  if(tks[i].Z>0){
276  double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
277  sump+=p;
278  if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; }
279  }
280  }
281 
282  if((nUnique<2)&&(sump<sumpmin)){
283  sumpmin=sump;
284  k0=k;
285  }
286  }
287 
288  if(k0!=y.end()){
289  if(verbose_){cout << "eliminating prototype at " << k0->z << " with sump=" << sumpmin << endl;}
290  //rho0+=k0->pk;
291  y.erase(k0);
292  return true;
293  }else{
294  return false;
295  }
296 }
297 
298 
299 
300 
302  double betamax,
303  vector<track_t> & tks,
304  vector<vertex_t> & y
305  )const{
306 
307  double T0=0; // max Tc for beta=0
308  // estimate critical temperature from beta=0 (T=inf)
309  unsigned int nt=tks.size();
310 
311  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
312 
313  // vertex fit at T=inf
314  double sumwz=0;
315  double sumw=0;
316  for(unsigned int i=0; i<nt; i++){
317  double w=tks[i].pi/tks[i].dz2;
318  sumwz+=w*tks[i].z;
319  sumw +=w;
320  }
321  k->z=sumwz/sumw;
322 
323  // estimate Tcrit, eventually do this in the same loop
324  double a=0, b=0;
325  for(unsigned int i=0; i<nt; i++){
326  double dx=tks[i].z-(k->z);
327  double w=tks[i].pi/tks[i].dz2;
328  a+=w*pow(dx,2)/tks[i].dz2;
329  b+=w;
330  }
331  double Tc= 2.*a/b; // the critical temperature of this vertex
332  if(Tc>T0) T0=Tc;
333  }// vertex loop (normally there should be only one vertex at beta=0)
334 
335  if (T0>1./betamax){
336  return betamax/pow(coolingFactor_, int(log(T0*betamax)/log(coolingFactor_))-1 );
337  }else{
338  // ensure at least one annealing step
339  return betamax/coolingFactor_;
340  }
341 }
342 
343 
344 
345 
346 
348  double beta,
349  vector<track_t> & tks,
350  vector<vertex_t> & y,
351  double threshold
352  )const{
353  // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
354  // an update must have been made just before doing this (same beta, no merging)
355  // returns true if at least one cluster was split
356 
357  double epsilon=1e-3; // split all single vertices by 10 um
358  bool split=false;
359 
360 
361  // avoid left-right biases by splitting highest Tc first
362 
363  std::vector<std::pair<double, unsigned int> > critical;
364  for(unsigned int ik=0; ik<y.size(); ik++){
365  if (beta*y[ik].Tc > 1.){
366  critical.push_back( make_pair(y[ik].Tc, ik));
367  }
368  }
369  stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
370 
371 
372  for(unsigned int ic=0; ic<critical.size(); ic++){
373  unsigned int ik=critical[ic].second;
374  // estimate subcluster positions and weight
375  double p1=0, z1=0, w1=0;
376  double p2=0, z2=0, w2=0;
377  //double sumpi=0;
378  for(unsigned int i=0; i<tks.size(); i++){
379  if(tks[i].Z>0){
380  //sumpi+=tks[i].pi;
381  double p=y[ik].pk * exp(-beta*Eik(tks[i],y[ik])) / tks[i].Z*tks[i].pi;
382  double w=p/tks[i].dz2;
383  if(tks[i].z<y[ik].z){
384  p1+=p; z1+=w*tks[i].z; w1+=w;
385  }else{
386  p2+=p; z2+=w*tks[i].z; w2+=w;
387  }
388  }
389  }
390  if(w1>0){ z1=z1/w1;} else{z1=y[ik].z-epsilon;}
391  if(w2>0){ z2=z2/w2;} else{z2=y[ik].z+epsilon;}
392 
393  // reduce split size if there is not enough room
394  if( ( ik > 0 ) && ( y[ik-1].z>=z1 ) ){ z1=0.5*(y[ik].z+y[ik-1].z); }
395  if( ( ik+1 < y.size()) && ( y[ik+1].z<=z2 ) ){ z2=0.5*(y[ik].z+y[ik+1].z); }
396 
397  // split if the new subclusters are significantly separated
398  if( (z2-z1)>epsilon){
399  split=true;
400  vertex_t vnew;
401  vnew.pk = p1*y[ik].pk/(p1+p2);
402  y[ik].pk= p2*y[ik].pk/(p1+p2);
403  vnew.z = z1;
404  y[ik].z = z2;
405  y.insert(y.begin()+ik, vnew);
406 
407  // adjust remaining pointers
408  for(unsigned int jc=ic; jc<critical.size(); jc++){
409  if (critical[jc].second>ik) {critical[jc].second++;}
410  }
411  }
412  }
413 
414  // stable_sort(y.begin(), y.end(), clusterLessZ);
415  return split;
416 }
417 
418 
419 
420 
421 
423  vector<vertex_t> & y
424  )const{
425 
426 
427  double epsilon=1e-3; // split all single vertices by 10 um
428  double zsep=2*epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
429  vector<vertex_t> y1;
430 
431  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
432  if ( ( (k==y.begin())|| (k-1)->z < k->z - zsep) && (((k+1)==y.end() )|| (k+1)->z > k->z + zsep)) {
433  // isolated prototype, split
434  vertex_t vnew;
435  vnew.z = k->z -epsilon;
436  (*k).z = k->z+epsilon;
437  vnew.pk= 0.5* (*k).pk;
438  (*k).pk= 0.5* (*k).pk;
439  y1.push_back(vnew);
440  y1.push_back(*k);
441 
442  }else if( y1.empty() || (y1.back().z < k->z -zsep)){
443  y1.push_back(*k);
444  }else{
445  y1.back().z -=epsilon;
446  k->z+=epsilon;
447  y1.push_back(*k);
448  }
449  }// vertex loop
450 
451  y=y1;
452 }
453 
454 
456 {
457  // some defaults to avoid uninitialized variables
458  verbose_= conf.getUntrackedParameter<bool>("verbose", false);
459  useTc_=true;
460  betamax_=0.1;
461  betastop_ =1.0;
462  coolingFactor_=0.8;
463  maxIterations_=100;
464  vertexSize_=0.05; // 0.5 mm
465  dzCutOff_=4.0; // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes
466 
467  // configure
468 
469  double Tmin = conf.getParameter<double>("Tmin");
470  vertexSize_ = conf.getParameter<double>("vertexSize");
471  coolingFactor_ = conf.getParameter<double>("coolingFactor");
472  d0CutOff_ = conf.getParameter<double>("d0CutOff");
473  dzCutOff_ = conf.getParameter<double>("dzCutOff");
474  maxIterations_=100;
475  if (Tmin==0){
476  cout << "DAClusterizerInZ: invalid Tmin" << Tmin << " reset do default " << 1./betamax_ << endl;
477  }else{
478  betamax_ = 1./Tmin;
479  }
480 
481  // for testing, negative cooling factor: revert to old splitting scheme
482  if(coolingFactor_<0){
483  coolingFactor_=-coolingFactor_; useTc_=false;
484  }
485 
486 }
487 
488 
489 void DAClusterizerInZ::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0, int verbosity)const{
490 
491  // copy and sort for nicer printout
492  vector<track_t> tks;
493  for(vector<track_t>::const_iterator t=tks0.begin(); t!=tks0.end(); t++){tks.push_back(*t); }
494  stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
495 
496  cout << "-----DAClusterizerInZ::dump ----" << endl;
497  cout << "beta=" << beta << " betamax= " << betamax_ << endl;
498  cout << " z= ";
499  cout.precision(4);
500  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
501  cout << setw(8) << fixed << k->z ;
502  }
503  cout << endl << "T=" << setw(15) << 1./beta <<" Tc= ";
504  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
505  cout << setw(8) << fixed << k->Tc ;
506  }
507 
508  cout << endl << " pk=";
509  double sumpk=0;
510  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
511  cout << setw(8) << setprecision(3) << fixed << k->pk;
512  sumpk+=k->pk;
513  }
514  cout << endl;
515 
516  if(verbosity>0){
517  double E=0, F=0;
518  cout << endl;
519  cout << "---- z +/- dz ip +/-dip pt phi eta weights ----" << endl;
520  cout.precision(4);
521  for(unsigned int i=0; i<tks.size(); i++){
522  if (tks[i].Z>0){ F-=log(tks[i].Z)/beta;}
523  double tz= tks[i].z;
524  cout << setw (3)<< i << ")" << setw (8) << fixed << setprecision(4)<< tz << " +/-" << setw (6)<< sqrt(tks[i].dz2);
525 
526  if(tks[i].tt->track().quality(reco::TrackBase::highPurity)){ cout << " *";}else{cout <<" ";}
527  if(tks[i].tt->track().hitPattern().hasValidHitInFirstPixelBarrel()){ cout <<"+"; }else{ cout << "-"; }
528  cout << setw(1) << tks[i].tt->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
529  cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement();
530  cout << setw(1) << hex << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement() -
531  tks[i].tt->track().hitPattern().pixelLayersWithMeasurement() << dec;
532  cout << "=" << setw(1) << hex << tks[i].tt->track().hitPattern().numberOfHits(reco::HitPattern::MISSING_OUTER_HITS) << dec;
533 
534  Measurement1D IP=tks[i].tt->stateAtBeamLine().transverseImpactParameter();
535  cout << setw (8) << IP.value() << "+/-" << setw (6) << IP.error();
536  cout << " " << setw(6) << setprecision(2) << tks[i].tt->track().pt()*tks[i].tt->track().charge();
537  cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi()
538  << " " << setw(5) << setprecision(2) << tks[i].tt->track().eta() ;
539 
540  double sump=0.;
541  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
542  if((tks[i].pi>0)&&(tks[i].Z>0)){
543  //double p=pik(beta,tks[i],*k);
544  double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
545  if( p > 0.0001){
546  cout << setw (8) << setprecision(3) << p;
547  }else{
548  cout << " . ";
549  }
550  E+=p*Eik(tks[i],*k);
551  sump+=p;
552  }else{
553  cout << " ";
554  }
555  }
556  cout << endl;
557  }
558  cout << endl << "T=" << 1/beta << " E=" << E << " n="<< y.size() << " F= " << F << endl << "----------" << endl;
559  }
560 }
561 
562 
563 
564 
565 
566 vector< TransientVertex >
567 DAClusterizerInZ::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity)
568 const
569 {
570 
571  vector<track_t> tks=fill(tracks);
572  unsigned int nt=tracks.size();
573  double rho0=0.0; // start with no outlier rejection
574 
575  vector< TransientVertex > clusters;
576  if (tks.empty()) return clusters;
577 
578  vector<vertex_t> y; // the vertex prototypes
579 
580  // initialize:single vertex at infinite temperature
581  vertex_t vstart;
582  vstart.z=0.;
583  vstart.pk=1.;
584  y.push_back(vstart);
585  int niter=0; // number of iterations
586 
587 
588  // estimate first critical temperature
589  double beta=beta0(betamax_, tks, y);
590  niter=0; while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){ }
591 
592 
593  // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
594  while(beta<betamax_){
595 
596  if(useTc_){
597  update(beta, tks,y);
598  while(merge(y,beta)){update(beta, tks,y);}
599  split(beta, tks,y, 1.);
600  beta=beta/coolingFactor_;
601  }else{
602  beta=beta/coolingFactor_;
603  splitAll(y);
604  }
605 
606 
607  // make sure we are not too far from equilibrium before cooling further
608  niter=0; while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){ }
609 
610  }
611 
612  if(useTc_){
613  // last round of splitting, make sure no critical clusters are left
614  update(beta, tks,y);
615  while(merge(y,beta)){update(beta, tks,y);}
616  unsigned int ntry=0;
617  while( split(beta, tks,y,1.) && (ntry++<10) ){
618  niter=0;
619  while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){}
620  merge(y,beta);
621  update(beta, tks,y);
622  }
623  }else{
624  // merge collapsed clusters
625  while(merge(y,beta)){update(beta, tks,y);}
626  if(verbose_ ){ cout << "dump after 1st merging " << endl; dump(beta,y,tks,2);}
627  }
628 
629 
630 
631 
632  // switch on outlier rejection
633  rho0=1./nt; for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; } // democratic
634  niter=0; while((update(beta, tks,y,rho0) > 1.e-8) && (niter++ < maxIterations_)){ }
635  if(verbose_ ){ cout << "rho0=" << rho0 << " niter=" << niter << endl; dump(beta,y,tks,2);}
636 
637 
638  // merge again (some cluster split by outliers collapse here)
639  while(merge(y,tks.size())){}
640  if(verbose_ ){ cout << "dump after 2nd merging " << endl; dump(beta,y,tks,2);}
641 
642 
643  // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
644  while(beta<=betastop_){
645  while(purge(y,tks,rho0, beta)){
646  niter=0; while((update(beta, tks,y,rho0) > 1.e-6) && (niter++ < maxIterations_)){ }
647  }
648  beta/=coolingFactor_;
649  niter=0; while((update(beta, tks,y,rho0) > 1.e-6) && (niter++ < maxIterations_)){ }
650  }
651 
652 
653 // // new, one last round of cleaning at T=Tstop
654 // while(purge(y,tks,rho0, beta)){
655 // niter=0; while((update(beta, tks,y,rho0) > 1.e-6) && (niter++ < maxIterations_)){ }
656 // }
657 
658 
659  if(verbose_){
660  cout << "Final result, rho0=" << rho0 << endl;
661  dump(beta,y,tks,2);
662  }
663 
664 
665  // select significant tracks and use a TransientVertex as a container
666  GlobalError dummyError;
667 
668 
669  // ensure correct normalization of probabilities, should make double assginment reasonably impossible
670  for(unsigned int i=0; i<nt; i++){
671  tks[i].Z=rho0*exp(-beta*dzCutOff_*dzCutOff_);
672  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
673  tks[i].Z+=k->pk * exp(-beta*Eik(tks[i],*k));
674  }
675  }
676 
677 
678  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
679  GlobalPoint pos(0, 0, k->z);
680  vector< reco::TransientTrack > vertexTracks;
681  for(unsigned int i=0; i<nt; i++){
682  if(tks[i].Z>0){
683  double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
684  if( (tks[i].pi>0) && ( p > 0.5 ) ){ vertexTracks.push_back(*(tks[i].tt)); tks[i].Z=0; } // setting Z=0 excludes double assignment
685  }
686  }
687  TransientVertex v(pos, dummyError, vertexTracks, 5);
688  clusters.push_back(v);
689  }
690 
691 
692  return clusters;
693 
694 }
695 
696 
697 
698 
699 
700 vector< vector<reco::TransientTrack> >
701 DAClusterizerInZ::clusterize(const vector<reco::TransientTrack> & tracks)
702  const
703 {
704  if(verbose_) {
705  cout << "###################################################" << endl;
706  cout << "# DAClusterizerInZ::clusterize nt="<<tracks.size() << endl;
707  cout << "###################################################" << endl;
708  }
709 
710  vector< vector<reco::TransientTrack> > clusters;
711  vector< TransientVertex > pv=vertices(tracks);
712 
713  if(verbose_){ cout << "# DAClusterizerInZ::clusterize pv.size="<<pv.size() << endl; }
714  if (pv.size()==0){ return clusters;}
715 
716 
717  // fill into clusters and merge
718  vector< reco::TransientTrack> aCluster=pv.begin()->originalTracks();
719 
720  for(vector<TransientVertex>::iterator k=pv.begin()+1; k!=pv.end(); k++){
721  if ( fabs(k->position().z() - (k-1)->position().z()) > (2*vertexSize_) ){
722  // close a cluster
723  clusters.push_back(aCluster);
724  aCluster.clear();
725  }
726  for(unsigned int i=0; i<k->originalTracks().size(); i++){ aCluster.push_back( k->originalTracks().at(i)); }
727 
728  }
729  clusters.push_back(aCluster);
730 
731 
732  return clusters;
733 
734 }
735 
const double Z[kNumberCalorimeter]
const double beta
dbl * delta
Definition: mlp_gen.cc:36
T getParameter(std::string const &) const
T getUntrackedParameter(std::string const &, T const &) const
int i
Definition: DBlmapReader.cc:9
string fill
Definition: lumiContext.py:319
common ppss p3p6s2 common epss epspn46 common const1 w2
Definition: inclppp.h:1
const double w
Definition: UKUtility.cc:23
Sin< T >::type sin(const T &t)
Definition: Sin.h:22
Definition: DDAxes.h:10
double error() const
Definition: Measurement1D.h:30
std::vector< track_t > fill(const std::vector< reco::TransientTrack > &tracks) const
float float float z
const Double_t pi
U second(std::pair< T, U > const &p)
bool merge(std::vector< vertex_t > &, int) const
bool split(double beta, std::vector< track_t > &tks, std::vector< vertex_t > &y, double threshold) const
T sqrt(T t)
Definition: SSEVec.h:48
Cos< T >::type cos(const T &t)
Definition: Cos.h:22
tuple IP
Definition: listHistos.py:76
std::vector< std::vector< reco::TransientTrack > > clusterize(const std::vector< reco::TransientTrack > &tracks) const
Tan< T >::type tan(const T &t)
Definition: Tan.h:22
double BeamWidthX() const
beam width X
Definition: BeamSpot.h:86
double p2[4]
Definition: TauolaWrapper.h:90
tuple conf
Definition: dbtoconf.py:185
int nt
Definition: AMPTWrapper.h:32
DAClusterizerInZ(const edm::ParameterSet &conf)
tuple tracks
Definition: testEve_cfg.py:39
double BeamWidthY() const
beam width Y
Definition: BeamSpot.h:88
const reco::TransientTrack * tt
double b
Definition: hdecay.h:120
void dump(const double beta, const std::vector< vertex_t > &y, const std::vector< track_t > &tks, const int verbosity=0) const
double value() const
Definition: Measurement1D.h:28
void splitAll(std::vector< vertex_t > &y) const
double update(double beta, std::vector< track_t > &tks, std::vector< vertex_t > &y) const
double p1[4]
Definition: TauolaWrapper.h:89
#define update(a, b)
double Eik(const track_t &t, const vertex_t &k) const
double a
Definition: hdecay.h:121
static int position[264][3]
Definition: ReadPGInfo.cc:509
tuple cout
Definition: gather_cfg.py:121
static uInt32 F(BLOWFISH_CTX *ctx, uInt32 x)
Definition: blowfish.cc:281
double beta0(const double betamax, std::vector< track_t > &tks, std::vector< vertex_t > &y) const
bool purge(std::vector< vertex_t > &, std::vector< track_t > &, double &, const double) const
double split
Definition: MVATrainer.cc:139
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
std::vector< TransientVertex > vertices(const std::vector< reco::TransientTrack > &tracks, const int verbosity=0) const
Definition: DDAxes.h:10