test
CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
DAClusterizerInZT.cc
Go to the documentation of this file.
6 
7 using namespace std;
8 
9 namespace {
10  constexpr double epsilon = 1.0e-3;
11  constexpr double vertexSizeTime = 0.008;
12  constexpr double dtCutOff = 4.0;
13 
14  bool recTrackLessZ1(const DAClusterizerInZT::track_t & tk1,
15  const DAClusterizerInZT::track_t & tk2)
16  {
17  return tk1.z < tk2.z;
18  }
19 
20  double sqr(double f) { return f*f; }
21 }
22 
23 
24 vector<DAClusterizerInZT::track_t>
25 DAClusterizerInZT::fill( const vector<reco::TransientTrack> & tracks )const{
26  // prepare track data for clustering
27  vector<track_t> tks;
28  tks.reserve(tracks.size());
29  for(vector<reco::TransientTrack>::const_iterator it=tracks.begin(); it!=tracks.end(); it++){
30  track_t t;
31  t.pi = 1.;
32  auto tsPCA = (*it).stateAtBeamLine().trackStateAtPCA();
33  t.z = tsPCA.position().z();
34  t.t = it->timeExt(); // the time
35 
36  if (std::abs(t.z) > 1000.) continue;
37  auto const & t_mom = tsPCA.momentum();
38  // get the beam-spot
39  reco::BeamSpot beamspot = (*it).stateAtBeamLine().beamSpot();
40  t.dz2 =
41  sqr((*it).track().dzError()) // track errror
42  + (sqr(beamspot.BeamWidthX()*t_mom.x())+sqr(beamspot.BeamWidthY()*t_mom.y()))*sqr(t_mom.z())/sqr(t_mom.perp2()) // beam spot width
43  + sqr(vertexSize_); // intrinsic vertex size, safer for outliers and short lived decays
44  //t.dz2 = 1./ t_dz2;
45 
46  t.dtz = 0.;
47  t.dt2 =sqr((*it).dtErrorExt()) + sqr(vertexSizeTime); // the ~injected~ timing error, need to add a small minimum vertex size in time
48 
49  if (d0CutOff_>0){
50  Measurement1D IP = (*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
51  t.pi=1./(1.+std::exp(sqr(IP.value()/IP.error()) - sqr(d0CutOff_))); // reduce weight for high ip tracks
52  }else{
53  t.pi=1.;
54  }
55  t.tt=&(*it);
56  t.zi=1.;
58  tks.push_back(t);
59  }
60  }
61 
62  tks.shrink_to_fit();
63  return tks;
64 }
65 
66 
67 
68 
69 
70 double DAClusterizerInZT::e_ik(const track_t & t, const vertex_t &k ) const {
71  return sqr(t.z-k.z)/t.dz2 + sqr(t.t - k.t)/t.dt2;
72 }
73 
74 
75 
76 
77 
78 
80  vector<track_t> & tks,
81  vector<vertex_t> & y,
82  const double rho0 ) const {
83  // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise
84  // returns the squared sum of changes of vertex positions
85 
86  unsigned int nt=tks.size();
87 
88  //initialize sums
89  double sumpi = 0.;
90  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
91  k->sw = 0.; k->swz = 0.; k->swt = 0.; k->se = 0.;
92  k->swE = 0.; k->tC=0.;
93  }
94 
95 
96  // loop over tracks
97  for(unsigned int i=0; i<nt; i++){
98 
99  // update pik and Zi and Ti
100  double Zi = rho0*std::exp(-beta*(dzCutOff_*dzCutOff_));// cut-off (eventually add finite size in time)
101  //double Ti = 0.; // dt0*std::exp(-beta*dtCutOff_);
102  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
103  k->ei = std::exp(-beta*e_ik(tks[i],*k));// cache exponential for one track at a time
104  Zi += k->pk * k->ei;
105  }
106  tks[i].zi=Zi;
107  sumpi += tks[i].pi;
108 
109  // normalization
110  if (tks[i].zi>0){
111  // accumulate weighted z and weights for vertex update
112  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
113  k->se += tks[i].pi* k->ei / Zi;
114  double w = k->pk * tks[i].pi * k->ei /( Zi * ( tks[i].dz2 * tks[i].dt2 ) );
115  k->sw += w;
116  k->swz += w * tks[i].z;
117  k->swt += w * tks[i].t;
118  k->swE += w * e_ik(tks[i],*k);
119  }
120  }
121  } // end of track loop
122 
123 
124  // now update z
125  double delta=0;
126  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
127  if ( k->sw > 0){
128  const double znew=k->swz/k->sw;
129  const double tnew=k->swt/k->sw;
130  delta += sqr(k->z-znew) + sqr(k->t-tnew);
131  k->z = znew;
132  k->t = tnew;
133  k->tC = 2*k->swE/k->sw;
134  }else{
135  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
136  if(verbose_){cout << " a cluster melted away ? pk=" << k->pk << " sumw=" << k->sw << endl;}
137  k->tC = (rho0 == 0. ? -1 : 0);
138  }
139  if(rho0 == 0.) k->pk = k->pk * k->se / sumpi;
140  }
141 
142  // return how much the prototypes moved
143  return delta;
144 }
145 
146 bool DAClusterizerInZT::merge(vector<vertex_t> & y, int nt)const{
147  // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
148 
149  if(y.size()<2) return false;
150 
151  for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
152  if( std::abs( (k+1)->z - k->z ) < epsilon &&
153  std::abs( (k+1)->t - k->t ) < epsilon ){ // with fabs if only called after freeze-out (splitAll() at highter T)
154  double rho = k->pk + (k+1)->pk;
155  if(rho>0){
156  k->z = ( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
157  k->t = ( k->pk * k->t + (k+1)->t * (k+1)->pk)/rho;
158  }else{
159  k->z = 0.5*(k->z + (k+1)->z);
160  k->t = 0.5*(k->t + (k+1)->t);
161  }
162  k->pk = rho;
163 
164  y.erase(k+1);
165  return true;
166  }
167  }
168 
169  return false;
170 }
171 
172 bool DAClusterizerInZT::merge(vector<vertex_t> & y, double & beta)const{
173  // merge clusters that collapsed or never separated,
174  // only merge if the estimated critical temperature of the merged vertex is below the current temperature
175  // return true if vertices were merged, false otherwise
176  if(y.size()<2) return false;
177 
178  for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
179  if ( std::abs((k+1)->z - k->z) < 2*epsilon &&
180  std::abs((k+1)->t - k->t) < 2*epsilon ) {
181  double rho=k->pk + (k+1)->pk;
182  double swE=k->swE+(k+1)->swE - k->pk * (k+1)->pk / rho * ( sqr((k+1)->z - k->z) +
183  sqr((k+1)->t - k->t) );
184  double Tc=2*swE/(k->sw+(k+1)->sw);
185 
186  if(Tc*beta<1){
187  if(rho>0){
188  k->z = ( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
189  k->t = ( k->pk * k->t + (k+1)->t * (k+1)->pk)/rho;
190  }else{
191  k->z = 0.5*(k->z + (k+1)->z);
192  k->t = 0.5*(k->t + (k+1)->t);
193  }
194  k->pk = rho;
195  k->sw += (k+1)->sw;
196  k->swE = swE;
197  k->tC = Tc;
198  y.erase(k+1);
199  return true;
200  }
201  }
202  }
203 
204  return false;
205 }
206 
207 bool DAClusterizerInZT::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{
208  // eliminate clusters with only one significant/unique track
209  if(y.size()<2) return false;
210 
211  unsigned int nt=tks.size();
212  double sumpmin=nt;
213  vector<vertex_t>::iterator k0=y.end();
214  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
215  int nUnique=0;
216  double sump=0;
217  double pmax=k->pk/(k->pk+rho0*exp(-beta*dzCutOff_*dzCutOff_));
218  for(unsigned int i=0; i<nt; i++){
219  if(tks[i].zi > 0){
220  double p = k->pk * std::exp(-beta*e_ik(tks[i],*k)) / tks[i].zi ;
221  sump+=p;
222  if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; }
223  }
224  }
225 
226  if((nUnique<2)&&(sump<sumpmin)){
227  sumpmin=sump;
228  k0=k;
229  }
230  }
231 
232  if(k0!=y.end()){
233  if(verbose_){cout << "eliminating prototype at " << k0->z << "," << k0->t
234  << " with sump=" << sumpmin << endl;}
235  y.erase(k0);
236  return true;
237  }else{
238  return false;
239  }
240 }
241 
242 double DAClusterizerInZT::beta0( double betamax,
243  vector<track_t> & tks,
244  vector<vertex_t> & y ) const {
245 
246  double T0=0; // max Tc for beta=0
247  // estimate critical temperature from beta=0 (T=inf)
248  unsigned int nt=tks.size();
249 
250  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
251 
252  // vertex fit at T=inf
253  double sumwz=0.;
254  double sumwt=0.;
255  double sumw=0.;
256  for(unsigned int i=0; i<nt; i++){
257  double w = tks[i].pi/(tks[i].dz2 * tks[i].dt2);
258  sumwz += w*tks[i].z;
259  sumwt += w*tks[i].t;
260  sumw += w;
261  }
262  k->z = sumwz/sumw;
263  k->t = sumwt/sumw;
264 
265  // estimate Tcrit, eventually do this in the same loop
266  double a=0, b=0;
267  for(unsigned int i=0; i<nt; i++){
268  double dx = tks[i].z-(k->z);
269  double dt = tks[i].t-(k->t);
270  double w = tks[i].pi/(tks[i].dz2 * tks[i].dt2);
271  a += w*(sqr(dx)/tks[i].dz2 + sqr(dt)/tks[i].dt2);
272  b += w;
273  }
274  double Tc= 2.*a/b; // the critical temperature of this vertex
275  if(Tc>T0) T0=Tc;
276  }// vertex loop (normally there should be only one vertex at beta=0)
277 
278  if (T0>1./betamax){
279  return betamax/pow(coolingFactor_, int(std::log(T0*betamax)*logCoolingFactor_)-1 );
280  }else{
281  // ensure at least one annealing step
282  return betamax/coolingFactor_;
283  }
284 }
285 
287  vector<track_t> & tks,
288  vector<vertex_t> & y,
289  double threshold ) const {
290  // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
291  // an update must have been made just before doing this (same beta, no merging)
292  // returns true if at least one cluster was split
293  bool split=false;
294 
295  // avoid left-right biases by splitting highest Tc first
296 
297  std::vector<std::pair<double, unsigned int> > critical;
298  for(unsigned int ik=0; ik<y.size(); ik++){
299  if (beta*y[ik].tC > 1.){
300  critical.push_back( make_pair(y[ik].tC, ik));
301  }
302  }
303  std::stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
304 
305  for(unsigned int ic=0; ic<critical.size(); ic++){
306  unsigned int ik=critical[ic].second;
307  // estimate subcluster positions and weight
308  double p1=0, z1=0, t1=0, w1=0;
309  double p2=0, z2=0, t2=0, w2=0;
310  //double sumpi=0;
311  for(unsigned int i=0; i<tks.size(); i++){
312  if(tks[i].zi>0){
313  //sumpi+=tks[i].pi;
314  double p=y[ik].pk * exp(-beta*e_ik(tks[i],y[ik])) / tks[i].zi*tks[i].pi;
315  double w=p/(tks[i].dz2 * tks[i].dt2);
316  if(tks[i].z < y[ik].z){
317  p1+=p; z1+=w*tks[i].z; t1+=w*tks[i].t; w1+=w;
318  }else{
319  p2+=p; z2+=w*tks[i].z; t2+=w*tks[i].t; w2+=w;
320  }
321  }
322  }
323  if(w1>0){ z1=z1/w1; t1=t1/w1;} else{ z1=y[ik].z-epsilon; t1=y[ik].t-epsilon; }
324  if(w2>0){ z2=z2/w2; t2=t2/w2;} else{ z2=y[ik].z+epsilon; t2=y[ik].t+epsilon;}
325 
326  // reduce split size if there is not enough room
327  if( ( ik > 0 ) && ( y[ik-1].z>=z1 ) ){ z1=0.5*(y[ik].z+y[ik-1].z); t1=0.5*(y[ik].t+y[ik-1].t); }
328  if( ( ik+1 < y.size()) && ( y[ik+1].z<=z2 ) ){ z2=0.5*(y[ik].z+y[ik+1].z); t2=0.5*(y[ik].t+y[ik+1].t); }
329 
330  // split if the new subclusters are significantly separated
331  if( std::abs(z2-z1) > epsilon || std::abs(t2-t1) > epsilon ){
332  split=true;
333  vertex_t vnew;
334  vnew.pk = p1*y[ik].pk/(p1+p2);
335  y[ik].pk= p2*y[ik].pk/(p1+p2);
336  vnew.z = z1;
337  vnew.t = t1;
338  y[ik].z = z2;
339  y[ik].t = t2;
340  y.insert(y.begin()+ik, vnew);
341 
342  // adjust remaining pointers
343  for(unsigned int jc=ic; jc<critical.size(); jc++){
344  if (critical[jc].second>ik) {critical[jc].second++;}
345  }
346  }
347  }
348 
349  // stable_sort(y.begin(), y.end(), clusterLessZ);
350  return split;
351 }
352 
353 
354 
355 
356 
357 void DAClusterizerInZT::splitAll( vector<vertex_t> & y ) const {
358 
359  constexpr double zsep=2*epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
360  constexpr double tsep=2*epsilon; // check t as well
361 
362  vector<vertex_t> y1;
363 
364  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
365  if ( ( (k==y.begin())|| (k-1)->z < k->z - zsep) && (((k+1)==y.end() )|| (k+1)->z > k->z + zsep)) {
366  // isolated prototype, split
367  vertex_t vnew;
368  vnew.z = k->z - epsilon;
369  vnew.t = k->t - epsilon;
370  (*k).z = k->z + epsilon;
371  (*k).t = k->t + epsilon;
372  vnew.pk= 0.5* (*k).pk;
373  (*k).pk= 0.5* (*k).pk;
374  y1.push_back(vnew);
375  y1.push_back(*k);
376 
377  }else if( y1.empty() || (y1.back().z < k->z -zsep) || (y1.back().t < k->t - tsep) ){
378  y1.push_back(*k);
379  }else{
380  y1.back().z -= epsilon;
381  y1.back().t -= epsilon;
382  k->z += epsilon;
383  k->t += epsilon;
384  y1.push_back(*k);
385  }
386  }// vertex loop
387 
388  y=y1;
389 }
390 
391 
393  verbose_(conf.getUntrackedParameter<bool>("verbose", false)),
394  useTc_(true),
395  vertexSize_(conf.getParameter<double>("vertexSize")),
396  maxIterations_(100),
397  coolingFactor_(std::sqrt(conf.getParameter<double>("coolingFactor"))),
398  betamax_(0.1),
399  betastop_(1.0),
400  dzCutOff_(conf.getParameter<double>("dzCutOff")),
401  d0CutOff_(conf.getParameter<double>("d0CutOff")),
402  dtCutOff_(dtCutOff)
403 {
404 
405  double Tmin = conf.getParameter<double>("Tmin")*std::sqrt(2.0);// scale up by sqrt(D=2)
406  if (Tmin==0){
407  edm::LogWarning("DAClusterizerInZT") << "DAClusterizerInZT: invalid Tmin" << Tmin << " reset do default " << 1./betamax_ << endl;
408  }else{
409  betamax_ = 1./Tmin;
410  }
411 
412  // for testing, negative cooling factor: revert to old splitting scheme
413  if(coolingFactor_<0){
415  }
416 
418 }
419 
420 
421 void DAClusterizerInZT::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0, int verbosity)const{
422 
423  // copy and sort for nicer printout
424  vector<track_t> tks;
425  for(vector<track_t>::const_iterator t=tks0.begin(); t!=tks0.end(); t++){tks.push_back(*t); }
426  std::stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
427 
428  cout << "-----DAClusterizerInZT::dump ----" << endl;
429  cout << " beta=" << beta << " betamax= " << betamax_ << endl;
430  cout << " z= ";
431  cout.precision(4);
432  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
433  cout << setw(8) << fixed << k->z;
434  }
435  cout << endl << " t= ";
436  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
437  cout << setw(8) << fixed << k->t;
438  }
439  cout << endl << "T=" << setw(15) << 1./beta <<" Tc= ";
440  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
441  cout << setw(8) << fixed << k->tC ;
442  }
443 
444  cout << endl << " pk=";
445  double sumpk=0;
446  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
447  cout << setw(8) << setprecision(3) << fixed << k->pk;
448  sumpk+=k->pk;
449  }
450  cout << endl;
451 
452  if(verbosity>0){
453  double E=0, F=0;
454  cout << endl;
455  cout << "---- z +/- dz t +/- dt ip +/-dip pt phi eta weights ----" << endl;
456  cout.precision(4);
457  for(unsigned int i=0; i<tks.size(); i++){
458  if (tks[i].zi>0){ F-=log(tks[i].zi)/beta;}
459  double tz= tks[i].z;
460  double tt= tks[i].t;
461  cout << setw (3)<< i << ")" << setw (8) << fixed << setprecision(4)<< tz << " +/-" << setw (6)<< sqrt(tks[i].dz2)
462  << setw(8) << fixed << setprecision(4) << tt << " +/-" << setw(6) << std::sqrt(tks[i].dt2) ;
463 
464  if(tks[i].tt->track().quality(reco::TrackBase::highPurity)){ cout << " *";}else{cout <<" ";}
465  if(tks[i].tt->track().hitPattern().hasValidHitInPixelLayer(PixelSubdetector::SubDetector::PixelBarrel, 1)){cout <<"+";}else{cout << "-";}
466 
467  cout << setw(1) << tks[i].tt->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
468  cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement();
469  cout << setw(1) << hex << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement()-tks[i].tt->track().hitPattern().pixelLayersWithMeasurement() <<dec;
470  cout << "=" << setw(1)<<hex <<tks[i].tt->track().hitPattern().numberOfHits(reco::HitPattern::MISSING_OUTER_HITS) << dec;
471 
472  Measurement1D IP=tks[i].tt->stateAtBeamLine().transverseImpactParameter();
473  cout << setw (8) << IP.value() << "+/-" << setw (6) << IP.error();
474  cout << " " << setw(6) << setprecision(2) << tks[i].tt->track().pt()*tks[i].tt->track().charge();
475  cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi()
476  << " " << setw(5) << setprecision(2) << tks[i].tt->track().eta() ;
477 
478  double sump=0.;
479  for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
480  if((tks[i].pi>0)&&(tks[i].zi>0)){
481  //double p=pik(beta,tks[i],*k);
482  double p=k->pk * std::exp(-beta*e_ik(tks[i],*k)) / tks[i].zi;
483  if( p > 0.0001){
484  cout << setw (8) << setprecision(3) << p;
485  }else{
486  cout << " . ";
487  }
488  E+=p*e_ik(tks[i],*k);
489  sump+=p;
490  }else{
491  cout << " ";
492  }
493  }
494  cout << endl;
495  }
496  cout << endl << "T=" << 1/beta << " E=" << E << " n="<< y.size() << " F= " << F << endl << "----------" << endl;
497  }
498 }
499 
500 
501 
502 
503 
504 vector< TransientVertex >
505 DAClusterizerInZT::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity)
506 const
507 {
508 
509  vector<track_t> tks=fill(tracks);
510  unsigned int nt=tks.size();
511  double rho0=0.0; // start with no outlier rejection
512 
513  vector< TransientVertex > clusters;
514  if (tks.empty()) return clusters;
515 
516  vector<vertex_t> y; // the vertex prototypes
517 
518  // initialize:single vertex at infinite temperature
519  vertex_t vstart;
520  vstart.z=0.;
521  vstart.t=0.;
522  vstart.pk=1.;
523  y.push_back(vstart);
524  int niter=0; // number of iterations
525 
526 
527  // estimate first critical temperature
528  double beta=beta0(betamax_, tks, y);
529  niter=0;
530  while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){ }
531 
532  // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
533  while(beta<betamax_){
534 
535  if(useTc_){
536  update(beta, tks,y);
537  while(merge(y,beta)){update(beta, tks,y);}
538  split(beta, tks,y, 1.);
539  beta=beta/coolingFactor_;
540  }else{
541  beta=beta/coolingFactor_;
542  splitAll(y);
543  }
544 
545 
546  // make sure we are not too far from equilibrium before cooling further
547  niter=0;
548  while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){ }
549 
550  }
551 
552  if(useTc_){
553  // last round of splitting, make sure no critical clusters are left
554  update(beta, tks,y);
555  while(merge(y,beta)){update(beta, tks,y);}
556  unsigned int ntry=0;
557  while( split(beta, tks,y,1.) && (ntry++<10) ){
558  niter=0;
559  while((update(beta, tks,y)>1.e-6) && (niter++ < maxIterations_)){}
560  merge(y,beta);
561  update(beta, tks,y);
562  }
563  }else{
564  // merge collapsed clusters
565  while(merge(y,beta)){update(beta, tks,y);}
566  if(verbose_ ){ cout << "dump after 1st merging " << endl; dump(beta,y,tks,2);}
567  }
568 
569 
570 
571 
572  // switch on outlier rejection
573  rho0=1./nt;
574  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; } // democratic
575  niter=0;
576  while( (update(beta, tks,y,rho0) > 1.e-8) && (niter++ < maxIterations_) ){ }
577  if(verbose_ ){ cout << "rho0=" << rho0 << " niter=" << niter << endl; dump(beta,y,tks,2);}
578 
579 
580  // merge again (some cluster split by outliers collapse here)
581  while(merge(y,tks.size())){}
582  if(verbose_ ){ cout << "dump after 2nd merging " << endl; dump(beta,y,tks,2);}
583 
584 
585  // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
586  while(beta<=betastop_){
587  while(purge(y,tks,rho0, beta)){
588  niter=0;
589  while((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)){ }
590  }
591  beta/=coolingFactor_;
592  niter=0;
593  while((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)){ }
594  }
595 
596  if(verbose_){
597  cout << "Final result, rho0=" << rho0 << endl;
598  dump(beta,y,tks,2);
599  }
600 
601  // ensure correct normalization of probabilities, should make double assginment reasonably impossible
602  for(unsigned int i=0; i<nt; i++){
603  tks[i].zi=rho0*exp(-beta*( dzCutOff_*dzCutOff_));
604  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
605  tks[i].zi += k->pk * exp(-beta*e_ik(tks[i],*k));
606  }
607  }
608 
609 
610  for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
611  GlobalPoint pos(0, 0, k->z);
612  double time = k->t;
613  vector< reco::TransientTrack > vertexTracks;
614  //double max_track_time_err2 = 0;
615  double mean = 0.;
616  double expv_x2 = 0.;
617  double normw = 0.;
618  for(unsigned int i=0; i<nt; i++){
619  const double invdt = 1.0/std::sqrt(tks[i].dt2);
620  if(tks[i].zi>0){
621  double p = k->pk * exp(-beta*e_ik(tks[i],*k)) / tks[i].zi;
622  if( (tks[i].pi>0) && ( p > 0.5 ) ){
623  //std::cout << "pushing back " << i << ' ' << tks[i].tt << std::endl;
624  vertexTracks.push_back(*(tks[i].tt)); tks[i].zi=0;
625  mean += tks[i].t*invdt*p;
626  expv_x2 += tks[i].t*tks[i].t*invdt*p;
627  normw += invdt*p;
628  } // setting Z=0 excludes double assignment
629  }
630  }
631  mean = mean/normw;
632  expv_x2 = expv_x2/normw;
633  const double time_var = expv_x2 - mean*mean;
634  const double crappy_error_guess = std::sqrt(time_var);
635  GlobalError dummyErrorWithTime(0,
636  0,0,
637  0,0,0,
638  0,0,0,crappy_error_guess);
639  TransientVertex v(pos, time, dummyErrorWithTime, vertexTracks, 5);
640  clusters.push_back(v);
641  }
642 
643 
644  return clusters;
645 
646 }
647 
648 
649 
650 
651 
652 vector< vector<reco::TransientTrack> >
653 DAClusterizerInZT::clusterize(const vector<reco::TransientTrack> & tracks)
654  const
655 {
656  if(verbose_) {
657  cout << "###################################################" << endl;
658  cout << "# DAClusterizerInZT::clusterize nt="<<tracks.size() << endl;
659  cout << "###################################################" << endl;
660  }
661 
662  vector< vector<reco::TransientTrack> > clusters;
663  vector< TransientVertex > pv=vertices(tracks);
664 
665  if(verbose_){ cout << "# DAClusterizerInZT::clusterize pv.size="<<pv.size() << endl; }
666  if (pv.size()==0){ return clusters;}
667 
668 
669  // fill into clusters and merge
670  vector< reco::TransientTrack> aCluster=pv.begin()->originalTracks();
671 
672  if( verbose_ ) {
673  std::cout << '\t' << 0;
674  std::cout << ' ' << pv.begin()->position() << ' ' << pv.begin()->time() << std::endl;
675  }
676 
677  for(vector<TransientVertex>::iterator k=pv.begin()+1; k!=pv.end(); k++){
678  if( verbose_ ) {
679  std::cout << '\t' << std::distance(pv.begin(),k);
680  std::cout << ' ' << k->position() << ' ' << k->time() << std::endl;
681  }
682  if ( std::abs(k->position().z() - (k-1)->position().z()) > (2*vertexSize_) ||
683  std::abs(k->time() - (k-1)->time()) > 2*vertexSizeTime ) {
684  // close a cluster
685  clusters.push_back(aCluster);
686  aCluster.clear();
687  }
688  for(unsigned int i=0; i<k->originalTracks().size(); i++){
689  aCluster.push_back( k->originalTracks().at(i));
690  }
691 
692  }
693  clusters.push_back(aCluster);
694 
695  if(verbose_) { std::cout << "# DAClusterizerInZT::clusterize clusters.size="<<clusters.size() << std::endl; }
696 
697  return clusters;
698 
699 }
700 
std::vector< std::vector< reco::TransientTrack > > clusterize(const std::vector< reco::TransientTrack > &tracks) const
const double beta
dbl * delta
Definition: mlp_gen.cc:36
T getParameter(std::string const &) const
float dt
Definition: AMPTWrapper.h:126
int i
Definition: DBlmapReader.cc:9
common ppss p3p6s2 common epss epspn46 common const1 w2
Definition: inclppp.h:1
const double w
Definition: UKUtility.cc:23
const reco::TransientTrack * tt
double error() const
Definition: Measurement1D.h:30
double update(double beta, std::vector< track_t > &tks, std::vector< vertex_t > &y, const double rho0=0.0) const
std::vector< TransientVertex > vertices(const std::vector< reco::TransientTrack > &tracks, const int verbosity=0) const
double e_ik(const track_t &t, const vertex_t &k) const
#define constexpr
void dump(const double beta, const std::vector< vertex_t > &y, const std::vector< track_t > &tks, const int verbosity=0) const
bool isFinite(T x)
const Double_t pi
U second(std::pair< T, U > const &p)
auto const T2 &decltype(t1.eta()) t2
Definition: deltaR.h:16
T sqrt(T t)
Definition: SSEVec.h:18
void splitAll(std::vector< vertex_t > &y) const
tuple IP
Definition: listHistos.py:76
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
bool split(double beta, std::vector< track_t > &tks, std::vector< vertex_t > &y, double threshold) const
double f[11][100]
double BeamWidthX() const
beam width X
Definition: BeamSpot.h:86
std::vector< track_t > fill(const std::vector< reco::TransientTrack > &tracks) const
double p2[4]
Definition: TauolaWrapper.h:90
int nt
Definition: AMPTWrapper.h:32
bool merge(std::vector< vertex_t > &, int) const
tuple tracks
Definition: testEve_cfg.py:39
double BeamWidthY() const
beam width Y
Definition: BeamSpot.h:88
double b
Definition: hdecay.h:120
double value() const
Definition: Measurement1D.h:28
double p1[4]
Definition: TauolaWrapper.h:89
double a
Definition: hdecay.h:121
Square< F >::type sqr(const F &f)
Definition: Square.h:13
static int position[264][3]
Definition: ReadPGInfo.cc:509
tuple cout
Definition: gather_cfg.py:145
DAClusterizerInZT(const edm::ParameterSet &conf)
volatile std::atomic< bool > shutdown_flag false
static uInt32 F(BLOWFISH_CTX *ctx, uInt32 x)
Definition: blowfish.cc:281
double split
Definition: MVATrainer.cc:139
double beta0(const double betamax, std::vector< track_t > &tks, std::vector< vertex_t > &y) const
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
bool purge(std::vector< vertex_t > &, std::vector< track_t > &, double &, const double) const