test
CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
DAClusterizerInZ_vect.cc
Go to the documentation of this file.
5 
6 #include <cmath>
7 #include <cassert>
8 #include <limits>
9 #include <iomanip>
11 #include "vdt/vdtMath.h"
12 
13 using namespace std;
14 
16 
17  // some defaults to avoid uninitialized variables
18  verbose_ = conf.getUntrackedParameter<bool> ("verbose", false);
19 
20  betamax_ = 0.1;
21  betastop_ = 1.0;
22  /*
23  coolingFactor_ = 0.6;
24  maxIterations_ = 100;
25  vertexSize_ = 0.01; // 0.1 mm
26  dzCutOff_ = 4.0;
27  */
28  // configure
29 
30  double Tmin = conf.getParameter<double> ("Tmin");
31  vertexSize_ = conf.getParameter<double> ("vertexSize");
32  coolingFactor_ = conf.getParameter<double> ("coolingFactor");
33  useTc_=true;
34  if(coolingFactor_<0){
35  coolingFactor_=-coolingFactor_;
36  useTc_=false;
37  }
38  d0CutOff_ = conf.getParameter<double> ("d0CutOff");
39  dzCutOff_ = conf.getParameter<double> ("dzCutOff");
40  maxIterations_ = 100;
41  if (Tmin == 0) {
42  LogDebug("DAClusterizerinZ_vectorized") << "DAClusterizerInZ: invalid Tmin" << Tmin
43  << " reset do default " << 1. / betamax_ << endl;
44  } else {
45  betamax_ = 1. / Tmin;
46  }
47 
48 }
49 
50 namespace {
51  inline double local_exp( double const& inp) {
52  return vdt::fast_exp( inp );
53  }
54 
55  inline void local_exp_list( double const * __restrict__ arg_inp, double * __restrict__ arg_out, const int arg_arr_size) {
56  for(int i=0; i!=arg_arr_size; ++i ) arg_out[i]=vdt::fast_exp(arg_inp[i]);
57  }
58 
59 }
60 
61 //todo: use r-value possibility of c++11 here
63 DAClusterizerInZ_vect::fill(const vector<reco::TransientTrack> & tracks) const {
64 
65  // prepare track data for clustering
66  LogDebug("DAClusterizerinZ_vectorized") << "input values";
67  track_t tks;
68  for (auto it = tracks.begin(); it!= tracks.end(); it++){
69  if (!(*it).isValid()) continue;
70  double t_pi=1.;
71  double t_z = ((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
72  if (std::abs(t_z) > 1000.) continue;
73  auto const & t_mom = (*it).stateAtBeamLine().trackStateAtPCA().momentum();
74  // get the beam-spot
75  reco::BeamSpot beamspot = (it->stateAtBeamLine()).beamSpot();
76  double t_dz2 =
77  std::pow((*it).track().dzError(), 2) // track errror
78  + (std::pow(beamspot.BeamWidthX()*t_mom.x(),2)+std::pow(beamspot.BeamWidthY()*t_mom.y(),2))*std::pow(t_mom.z(),2)/std::pow(t_mom.perp2(),2) // beam spot width
79  + std::pow(vertexSize_, 2); // intrinsic vertex size, safer for outliers and short lived decays
80  t_dz2 = 1./ t_dz2;
81  if (edm::isNotFinite(t_dz2) || t_dz2 < std::numeric_limits<double>::min() ) continue;
82  if (d0CutOff_ > 0) {
83  Measurement1D atIP =
84  (*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
85  t_pi = 1. / (1. + local_exp(std::pow(atIP.value() / atIP.error(), 2) - std::pow(d0CutOff_, 2))); // reduce weight for high ip tracks
86  if (edm::isNotFinite(t_pi) || t_pi < std::numeric_limits<double>::epsilon()) continue; // usually is > 0.99
87  }
88  LogTrace("DAClusterizerinZ_vectorized") << t_z <<' '<< t_dz2 <<' '<< t_pi;
89  tks.AddItem(t_z, t_dz2, &(*it), t_pi);
90  }
91  tks.ExtractRaw();
92 
93  if (verbose_) {
94  LogDebug("DAClusterizerinZ_vectorized") << "Track count " << tks.GetSize() << std::endl;
95  }
96 
97  return tks;
98 }
99 
100 
101 namespace {
102  inline
103  double Eik(double t_z, double k_z, double t_dz2) {
104  return std::pow(t_z - k_z, 2) * t_dz2;
105  }
106 }
107 
108 double DAClusterizerInZ_vect::update(double beta, track_t & gtracks,
109  vertex_t & gvertices, bool useRho0, double & rho0) const {
110 
111  //update weights and vertex positions
112  // mass constrained annealing without noise
113  // returns the squared sum of changes of vertex positions
114 
115  const unsigned int nt = gtracks.GetSize();
116  const unsigned int nv = gvertices.GetSize();
117 
118  //initialize sums
119  double sumpi = 0;
120 
121  // to return how much the prototype moved
122  double delta = 0;
123 
124 
125  // intial value of a sum
126  double Z_init = 0;
127 
128  // define kernels
129  auto kernel_calc_exp_arg = [ beta, nv ] ( const unsigned int itrack,
130  track_t const& tracks,
131  vertex_t const& vertices ) {
132  const double track_z = tracks._z[itrack];
133  const double botrack_dz2 = -beta*tracks._dz2[itrack];
134 
135  // auto-vectorized
136  for ( unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
137  auto mult_res = track_z - vertices._z[ivertex];
138  vertices._ei_cache[ivertex] = botrack_dz2 * ( mult_res * mult_res );
139  }
140  };
141 
142  auto kernel_add_Z = [ nv, Z_init ] (vertex_t const& vertices) -> double
143  {
144  double ZTemp = Z_init;
145  for (unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
146  ZTemp += vertices._pk[ivertex] * vertices._ei[ivertex];
147  }
148  return ZTemp;
149  };
150 
151  auto kernel_calc_normalization = [ beta, nv ] (const unsigned int track_num,
152  track_t & tks_vec,
153  vertex_t & y_vec ) {
154  auto tmp_trk_pi = tks_vec._pi[track_num];
155  auto o_trk_Z_sum = 1./tks_vec._Z_sum[track_num];
156  auto o_trk_dz2 = tks_vec._dz2[track_num];
157  auto tmp_trk_z = tks_vec._z[track_num];
158  auto obeta = -1./beta;
159 
160  // auto-vectorized
161  for (unsigned int k = 0; k < nv; ++k) {
162  y_vec._se[k] += y_vec._ei[k] * (tmp_trk_pi* o_trk_Z_sum);
163  auto w = y_vec._pk[k] * y_vec._ei[k] * (tmp_trk_pi*o_trk_Z_sum *o_trk_dz2);
164  y_vec._sw[k] += w;
165  y_vec._swz[k] += w * tmp_trk_z;
166  y_vec._swE[k] += w * y_vec._ei_cache[k]*obeta;
167  }
168  };
169 
170 
171  for (auto ivertex = 0U; ivertex < nv; ++ivertex) {
172  gvertices._se[ivertex] = 0.0;
173  gvertices._sw[ivertex] = 0.0;
174  gvertices._swz[ivertex] = 0.0;
175  gvertices._swE[ivertex] = 0.0;
176  }
177 
178 
179  // independpent of loop
180  if ( useRho0 )
181  {
182  Z_init = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_); // cut-off
183  }
184 
185  // loop over tracks
186  for (auto itrack = 0U; itrack < nt; ++itrack) {
187  kernel_calc_exp_arg(itrack, gtracks, gvertices);
188  local_exp_list(gvertices._ei_cache, gvertices._ei, nv);
189 
190  gtracks._Z_sum[itrack] = kernel_add_Z(gvertices);
191 
192  // used in the next major loop to follow
193  if (!useRho0)
194  sumpi += gtracks._pi[itrack];
195 
196  if (gtracks._Z_sum[itrack] > 0) {
197  kernel_calc_normalization(itrack, gtracks, gvertices);
198  }
199  }
200 
201  // now update z and pk
202  auto kernel_calc_z = [ sumpi, nv, this, useRho0 ] (vertex_t & vertices ) -> double {
203 
204  double delta=0;
205  // does not vectorizes
206  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex ) {
207  if (vertices._sw[ivertex] > 0) {
208  auto znew = vertices._swz[ ivertex ] / vertices._sw[ ivertex ];
209  // prevents from vectorizing if
210  delta += std::pow( vertices._z[ ivertex ] - znew, 2 );
211  vertices._z[ ivertex ] = znew;
212  }
213 #ifdef VI_DEBUG
214  else {
215  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << vertices._sw[ivertex] << endl;
216  if (this->verbose_) {
217  LogDebug("DAClusterizerinZ_vectorized") << " a cluster melted away ? pk=" << vertices._pk[ ivertex ] << " sumw="
218  << vertices._sw[ivertex] << endl;
219  }
220  }
221 #endif
222  }
223  // dont do, if rho cut
224  if ( ! useRho0 ){
225  auto osumpi = 1./sumpi;
226  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex )
227  vertices._pk[ ivertex ] = vertices._pk[ ivertex ] * vertices._se[ ivertex ] * osumpi;
228  }
229  return delta;
230  };
231 
232  delta += kernel_calc_z(gvertices);
233 
234  // return how much the prototypes moved
235  return delta;
236 }
237 
238 
239 
240 bool DAClusterizerInZ_vect::merge(vertex_t & y, double & beta)const{
241  // merge clusters that collapsed or never separated,
242  // only merge if the estimated critical temperature of the merged vertex is below the current temperature
243  // return true if vertices were merged, false otherwise
244  const unsigned int nv = y.GetSize();
245 
246  if (nv < 2)
247  return false;
248 
249 
250  for (unsigned int k = 0; (k + 1) < nv; k++) {
251  if (fabs(y._z[k + 1] - y._z[k]) < 2.e-2) {
252  double rho=y._pk[k] + y._pk[k+1];
253  double swE=y._swE[k]+y._swE[k+1] - y._pk[k]*y._pk[k+1] /rho *std::pow(y._z[k+1]-y._z[k],2);
254  double Tc=2*swE/(y._sw[k]+y._sw[k+1]);
255 
256  if(Tc*beta<1){
257  if(rho>0){
258  y._z[k] = (y._pk[k]*y._z[k] + y._pk[k+1]*y._z[k + 1])/rho;
259  }else{
260  y._z[k] = 0.5 * (y._z[k] + y._z[k + 1]);
261  }
262  y._pk[k] = rho;
263  y._sw[k]+=y._sw[k+1];
264  y._swE[k]=swE;
265  y.RemoveItem(k+1);
266  return true;
267  }
268  }
269  }
270 
271  return false;
272 }
273 
274 
275 
276 
277 
279  // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
280 
281  const unsigned int nv = y.GetSize();
282 
283  if (nv < 2) return false;
284 
285  for (unsigned int k = 0; (k + 1) < nv; k++) {
286  //if ((k+1)->z - k->z<1.e-2){ // note, no fabs here, maintains z-ordering (with split()+merge() at every temperature)
287  if (std::abs(y._z[k + 1] - y._z[k]) < 1.e-2) { // with fabs if only called after freeze-out (splitAll() at highter T)
288  y._pk[k] += y._pk[k + 1];
289  y._z[k] = 0.5 * (y._z[k] + y._z[k + 1]);
290 
291  y.RemoveItem(k + 1);
292 
293  if ( verbose_)
294  {
295  LogDebug("DAClusterizerinZ_vectorized") << "Merging vertices k = " << k << std::endl;
296  }
297 
298  return true;
299  }
300  }
301 
302  return false;
303 }
304 
305 bool
306 DAClusterizerInZ_vect::purge(vertex_t & y, track_t & tks, double & rho0, const double beta) const {
307  // eliminate clusters with only one significant/unique track
308  const unsigned int nv = y.GetSize();
309  const unsigned int nt = tks.GetSize();
310 
311  if (nv < 2)
312  return false;
313 
314  double sumpmin = nt;
315  unsigned int k0 = nv;
316 
317  for (unsigned int k = 0; k < nv; k++) {
318 
319  int nUnique = 0;
320  double sump = 0;
321  double pmax = y._pk[k] / (y._pk[k] + rho0 * local_exp(-beta * dzCutOff_* dzCutOff_));
322 
323  for (unsigned int i = 0; i < nt; i++) {
324 
325  if (tks._Z_sum[i] > 0) {
326  //double p=pik(beta,tks[i],*k);
327  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k], tks._dz2[i])) / tks._Z_sum[i];
328  sump += p;
329  if ((p > 0.9 * pmax) && (tks._pi[i] > 0)) {
330  nUnique++;
331  }
332  }
333  }
334 
335  if ((nUnique < 2) && (sump < sumpmin)) {
336  sumpmin = sump;
337  k0 = k;
338  }
339  }
340 
341  if (k0 != nv) {
342  if (verbose_) {
343  LogDebug("DAClusterizerinZ_vectorized") << "eliminating prototype at " << y._z[k0] << " with sump="
344  << sumpmin << endl;
345  }
346  //rho0+=k0->pk;
347  y.RemoveItem(k0);
348  return true;
349  } else {
350  return false;
351  }
352 }
353 
354 double
355 DAClusterizerInZ_vect::beta0(double betamax, track_t const & tks, vertex_t const & y) const {
356 
357  double T0 = 0; // max Tc for beta=0
358  // estimate critical temperature from beta=0 (T=inf)
359  const unsigned int nt = tks.GetSize();
360  const unsigned int nv = y.GetSize();
361 
362  for (unsigned int k = 0; k < nv; k++) {
363 
364  // vertex fit at T=inf
365  double sumwz = 0;
366  double sumw = 0;
367  for (unsigned int i = 0; i < nt; i++) {
368  double w = tks._pi[i] * tks._dz2[i];
369  sumwz += w * tks._z[i];
370  sumw += w;
371  }
372  y._z[k] = sumwz / sumw;
373 
374  // estimate Tcrit, eventually do this in the same loop
375  double a = 0, b = 0;
376  for (unsigned int i = 0; i < nt; i++) {
377  double dx = tks._z[i] - y._z[k];
378  double w = tks._pi[i] * tks._dz2[i];
379  a += w * std::pow(dx, 2) * tks._dz2[i];
380  b += w;
381  }
382  double Tc = 2. * a / b; // the critical temperature of this vertex
383  if (Tc > T0) T0 = Tc;
384  }// vertex loop (normally there should be only one vertex at beta=0)
385 
386  if (T0 > 1. / betamax) {
387  return betamax / std::pow(coolingFactor_, int(std::log(T0 * betamax) / std::log(coolingFactor_)) - 1);
388  } else {
389  // ensure at least one annealing step
390  return betamax / coolingFactor_;
391  }
392 }
393 
394 
395 bool
396 DAClusterizerInZ_vect::split(const double beta, track_t &tks, vertex_t & y ) const{
397  // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
398  // an update must have been made just before doing this (same beta, no merging)
399  // returns true if at least one cluster was split
400 
401  double epsilon=1e-3; // split all single vertices by 10 um
402  unsigned int nv = y.GetSize();
403 
404  // avoid left-right biases by splitting highest Tc first
405 
406  std::vector<std::pair<double, unsigned int> > critical;
407  for(unsigned int k=0; k<nv; k++){
408  double Tc= 2*y._swE[k]/y._sw[k];
409  if (beta*Tc > 1.){
410  critical.push_back( make_pair(Tc, k));
411  }
412  }
413  if (critical.size()==0) return false;
414  std::stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
415 
416 
417  bool split=false;
418  const unsigned int nt = tks.GetSize();
419 
420  for(unsigned int ic=0; ic<critical.size(); ic++){
421  unsigned int k=critical[ic].second;
422  // estimate subcluster positions and weight
423  double p1=0, z1=0, w1=0;
424  double p2=0, z2=0, w2=0;
425  for(unsigned int i=0; i<nt; i++){
426  if (tks._Z_sum[i] > 0) {
427  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k], tks._dz2[i])) / tks._Z_sum[i];
428  double w=p*tks._dz2[i];
429  if(tks._z[i]<y._z[k]){
430  p1+=p; z1+=w*tks._z[i]; w1+=w;
431  }else{
432  p2+=p; z2+=w*tks._z[i]; w2+=w;
433  }
434  }
435  }
436  if(w1>0){ z1=z1/w1;} else{z1=y._z[k]-epsilon;}
437  if(w2>0){ z2=z2/w2;} else{z2=y._z[k]+epsilon;}
438 
439  // reduce split size if there is not enough room
440  if( ( k > 0 ) && ( y._z[k-1]>=z1 ) ){ z1=0.5*(y._z[k]+y._z[k-1]); }
441  if( ( k+1 < nv) && ( y._z[k+1]<=z2 ) ){ z2=0.5*(y._z[k]+y._z[k+1]); }
442 
443  // split if the new subclusters are significantly separated
444  if( (z2-z1)>epsilon){
445  split=true;
446  double pk1=p1*y._pk[k]/(p1+p2);
447  double pk2=p2*y._pk[k]/(p1+p2);
448  y._z[k] = z2;
449  y._pk[k] = pk2;
450  y.InsertItem(k, z1, pk1);
451  nv++;
452 
453  // adjust remaining pointers
454  for(unsigned int jc=ic; jc<critical.size(); jc++){
455  if (critical[jc].second>k) {critical[jc].second++;}
456  }
457  }
458  }
459  return split;
460 }
461 
462 
463 
465 
466  const unsigned int nv = y.GetSize();
467 
468  double epsilon = 1e-3; // split all single vertices by 10 um
469  double zsep = 2 * epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
470  vertex_t y1;
471 
472  if (verbose_) {
473  LogDebug("DAClusterizerinZ_vectorized") << "Before Split "<< std::endl;
474  y.DebugOut();
475  }
476 
477  for (unsigned int k = 0; k < nv; k++) {
478  if (
479  ( (k == 0) || ( y._z[k - 1] < (y._z[k] - zsep)) ) &&
480  ( ((k + 1) == nv) || ( y._z[k + 1] > (y._z[k] + zsep)) ) )
481  {
482  // isolated prototype, split
483  double new_z = y.z[k] - epsilon;
484  y.z[k] = y.z[k] + epsilon;
485 
486  double new_pk = 0.5 * y.pk[k];
487  y.pk[k] = 0.5 * y.pk[k];
488 
489  y1.AddItem(new_z, new_pk);
490  y1.AddItem(y._z[k], y._pk[k]);
491  }
492  else if ( (y1.GetSize() == 0 ) ||
493  (y1._z[y1.GetSize() - 1] < (y._z[k] - zsep) ))
494  {
495  y1.AddItem(y._z[k], y._pk[k]);
496  }
497  else
498  {
499  y1._z[y1.GetSize() - 1] = y1._z[y1.GetSize() - 1] - epsilon;
500  y._z[k] = y._z[k] + epsilon;
501  y1.AddItem( y._z[k] , y._pk[k]);
502  }
503  }// vertex loop
504 
505  y = y1;
506  y.ExtractRaw();
507 
508  if (verbose_) {
509  LogDebug("DAClusterizerinZ_vectorized") << "After split " << std::endl;
510  y.DebugOut();
511  }
512 }
513 
514 
515 
516 vector<TransientVertex>
517 DAClusterizerInZ_vect::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity) const {
518  track_t && tks = fill(tracks);
519  tks.ExtractRaw();
520 
521  unsigned int nt = tks.GetSize();
522  double rho0 = 0.0; // start with no outlier rejection
523 
524  vector<TransientVertex> clusters;
525  if (tks.GetSize() == 0) return clusters;
526 
527  vertex_t y; // the vertex prototypes
528 
529  // initialize:single vertex at infinite temperature
530  y.AddItem( 0, 1.0);
531 
532  int niter = 0; // number of iterations
533 
534 
535  // estimate first critical temperature
536  double beta = beta0(betamax_, tks, y);
537  if ( verbose_) LogDebug("DAClusterizerinZ_vectorized") << "Beta0 is " << beta << std::endl;
538 
539  niter = 0;
540  while ((update(beta, tks, y, false, rho0) > 1.e-6) &&
541  (niter++ < maxIterations_)) {}
542 
543  // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
544  while (beta < betamax_) {
545  if(useTc_){
546  update(beta, tks,y, false, rho0);
547  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
548  split(beta, tks,y);
549  beta=beta/coolingFactor_;
550  }else{
551  beta=beta/coolingFactor_;
552  splitAll(y);
553  }
554 
555  // make sure we are not too far from equilibrium before cooling further
556  niter = 0;
557  while ((update(beta, tks, y, false, rho0) > 1.e-6) &&
558  (niter++ < maxIterations_)) {}
559  }
560 
561 
562  if(useTc_){
563  // last round of splitting, make sure no critical clusters are left
564  update(beta, tks,y, false, rho0);// make sure Tc is up-to-date
565  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
566  unsigned int ntry=0;
567  while( split(beta, tks,y) && (ntry++<10) ){
568  niter=0;
569  while((update(beta, tks,y, false, rho0)>1.e-6) && (niter++ < maxIterations_)){}
570  merge(y,beta);
571  update(beta, tks,y, false, rho0);
572  }
573  }else{
574  // merge collapsed clusters
575  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
576  //while(merge(y)){} original code
577  }
578 
579  if (verbose_) {
580  LogDebug("DAClusterizerinZ_vectorized") << "dump after 1st merging " << endl;
581  dump(beta, y, tks, 2);
582  }
583 
584  // switch on outlier rejection
585  rho0 = 1. / nt;
586 
587  // auto-vectorized
588  for (unsigned int k = 0; k < y.GetSize(); k++) y._pk[k] = 1.; // democratic
589  niter = 0;
590  while ((update(beta, tks, y, true, rho0) > 1.e-8) && (niter++ < maxIterations_)) {}
591  if (verbose_) {
592  LogDebug("DAClusterizerinZ_vectorized") << "rho0=" << rho0 << " niter=" << niter << endl;
593  dump(beta, y, tks, 2);
594  }
595 
596  // merge again (some cluster split by outliers collapse here)
597  while (merge(y)) {}
598  if (verbose_) {
599  LogDebug("DAClusterizerinZ_vectorized") << "dump after 2nd merging " << endl;
600  dump(beta, y, tks, 2);
601  }
602 
603  // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
604  while (beta <= betastop_) {
605  while (purge(y, tks, rho0, beta)) {
606  niter = 0;
607  while ((update(beta, tks, y, true, rho0) > 1.e-6) && (niter++ < maxIterations_)) {}
608  }
609  beta /= coolingFactor_;
610  niter = 0;
611  while ((update(beta, tks, y, true, rho0) > 1.e-6) && (niter++< maxIterations_)) {}
612  }
613 
614  if (verbose_) {
615  LogDebug("DAClusterizerinZ_vectorized") << "Final result, rho0=" << rho0 << endl;
616  dump(beta, y, tks, 2);
617  }
618 
619  // select significant tracks and use a TransientVertex as a container
620  GlobalError dummyError;
621 
622  // ensure correct normalization of probabilities, should makes double assginment reasonably impossible
623  const unsigned int nv = y.GetSize();
624  for (unsigned int k = 0; k < nv; k++)
625  if ( edm::isNotFinite(y._pk[k]) || edm::isNotFinite(y._z[k]) ) { y._pk[k]=0; y._z[k]=0;}
626 
627  for (unsigned int i = 0; i < nt; i++) // initialize
628  tks._Z_sum[i] = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_);
629 
630  // improve vectorization (does not require reduction ....)
631  for (unsigned int k = 0; k < nv; k++) {
632  for (unsigned int i = 0; i < nt; i++)
633  tks._Z_sum[i] += y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],tks._dz2[i]));
634  }
635 
636 
637  for (unsigned int k = 0; k < nv; k++) {
638  GlobalPoint pos(0, 0, y._z[k]);
639 
640  vector<reco::TransientTrack> vertexTracks;
641  for (unsigned int i = 0; i < nt; i++) {
642  if (tks._Z_sum[i] > 0) {
643 
644  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
645  tks._dz2[i])) / tks._Z_sum[i];
646  if ((tks._pi[i] > 0) && (p > 0.5)) {
647  vertexTracks.push_back(*(tks.tt[i]));
648  tks._Z_sum[i] = 0;
649  } // setting Z=0 excludes double assignment
650  }
651  }
652  TransientVertex v(pos, dummyError, vertexTracks, 0);
653  clusters.push_back(v);
654  }
655 
656  return clusters;
657 
658 }
659 
660 vector<vector<reco::TransientTrack> > DAClusterizerInZ_vect::clusterize(
661  const vector<reco::TransientTrack> & tracks) const {
662 
663  if (verbose_) {
664  std::cout << "###################################################" << endl;
665  std::cout << "# vectorized DAClusterizerInZ_vect::clusterize nt=" << tracks.size() << endl;
666  std::cout << "###################################################" << endl;
667  }
668 
669  vector<vector<reco::TransientTrack> > clusters;
670  vector<TransientVertex> && pv = vertices(tracks);
671 
672  if (verbose_) {
673  LogDebug("DAClusterizerinZ_vectorized") << "# DAClusterizerInZ::clusterize pv.size=" << pv.size()
674  << endl;
675  }
676  if (pv.size() == 0) {
677  return clusters;
678  }
679 
680  // fill into clusters and merge
681  vector<reco::TransientTrack> aCluster = pv.begin()->originalTracks();
682 
683  for (auto k = pv.begin() + 1; k != pv.end(); k++) {
684  if ( std::abs(k->position().z() - (k - 1)->position().z()) > (2 * vertexSize_)) {
685  // close a cluster
686  clusters.push_back(aCluster);
687  aCluster.clear();
688  }
689  for (unsigned int i = 0; i < k->originalTracks().size(); i++) {
690  aCluster.push_back(k->originalTracks()[i]);
691  }
692 
693  }
694  clusters.emplace_back(std::move(aCluster));
695 
696  return clusters;
697 
698 }
699 
700 
701 
702 
703 
704 void DAClusterizerInZ_vect::dump(const double beta, const vertex_t & y,
705  const track_t & tks, int verbosity) const {
706 
707  const unsigned int nv = y.GetSize();
708  const unsigned int nt = tks.GetSize();
709 
710  LogDebug("DAClusterizerinZ_vectorized") << "-----DAClusterizerInZ::dump ----" << endl;
711  LogDebug("DAClusterizerinZ_vectorized") << "beta=" << beta << " betamax= " << betamax_ << endl;
712  LogDebug("DAClusterizerinZ_vectorized") << " z= ";
713  LogDebug("DAClusterizerinZ_vectorized") << setprecision(4);
714  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
715  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << fixed << y._z[ivertex];
716  }
717  LogDebug("DAClusterizerinZ_vectorized") << endl << "T=" << setw(15) << 1. / beta
718  << " Tc= ";
719  LogDebug("DAClusterizerinZ_vectorized") << endl
720  << " pk=";
721  double sumpk = 0;
722  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
723  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << setprecision(3) << fixed << y._pk[ivertex];
724  sumpk += y._pk[ivertex];
725  }
726  LogDebug("DAClusterizerinZ_vectorized") << endl;
727 
728  if (verbosity > 0) {
729  double E = 0, F = 0;
730  LogDebug("DAClusterizerinZ_vectorized") << endl;
731  LogDebug("DAClusterizerinZ_vectorized")
732  << "---- z +/- dz ip +/-dip pt phi eta weights ----"
733  << endl;
734  LogDebug("DAClusterizerinZ_vectorized") << setprecision(4);
735  for (unsigned int i = 0; i < nt; i++) {
736  if (tks._Z_sum[i] > 0) {
737  F -= std::log(tks._Z_sum[i]) / beta;
738  }
739  double tz = tks._z[i];
740  LogDebug("DAClusterizerinZ_vectorized") << setw(3) << i << ")" << setw(8) << fixed << setprecision(4)
741  << tz << " +/-" << setw(6) << sqrt(1./tks._dz2[i]);
742 
743  if (tks.tt[i]->track().quality(reco::TrackBase::highPurity)) {
744  LogDebug("DAClusterizerinZ_vectorized") << " *";
745  } else {
746  LogDebug("DAClusterizerinZ_vectorized") << " ";
747  }
748  if (tks.tt[i]->track().hitPattern().hasValidHitInFirstPixelBarrel()) {
749  LogDebug("DAClusterizerinZ_vectorized") << "+";
750  } else {
751  LogDebug("DAClusterizerinZ_vectorized") << "-";
752  }
753  LogDebug("DAClusterizerinZ_vectorized") << setw(1)
754  << tks.tt[i]->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
755  LogDebug("DAClusterizerinZ_vectorized") << setw(1)
756  << tks.tt[i]->track().hitPattern().pixelEndcapLayersWithMeasurement();
757  LogDebug("DAClusterizerinZ_vectorized") << setw(1) << hex
758  << tks.tt[i]->track().hitPattern().trackerLayersWithMeasurement()
759  - tks.tt[i]->track().hitPattern().pixelLayersWithMeasurement()
760  << dec;
761  LogDebug("DAClusterizerinZ_vectorized") << "=" << setw(1) << hex
762  << tks.tt[i]->track().hitPattern().numberOfHits(reco::HitPattern::MISSING_OUTER_HITS)
763  << dec;
764 
765  Measurement1D IP =
766  tks.tt[i]->stateAtBeamLine().transverseImpactParameter();
767  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << IP.value() << "+/-" << setw(6) << IP.error();
768  LogDebug("DAClusterizerinZ_vectorized") << " " << setw(6) << setprecision(2)
769  << tks.tt[i]->track().pt() * tks.tt[i]->track().charge();
770  LogDebug("DAClusterizerinZ_vectorized") << " " << setw(5) << setprecision(2)
771  << tks.tt[i]->track().phi() << " " << setw(5)
772  << setprecision(2) << tks.tt[i]->track().eta();
773 
774  double sump = 0.;
775  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
776  if ((tks._pi[i] > 0) && (tks._Z_sum[i] > 0)) {
777  //double p=pik(beta,tks[i],*k);
778  double p = y._pk[ivertex] * exp(-beta * Eik(tks._z[i], y._z[ivertex], tks._dz2[i])) / tks._Z_sum[i];
779  if (p > 0.0001) {
780  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << setprecision(3) << p;
781  } else {
782  LogDebug("DAClusterizerinZ_vectorized") << " . ";
783  }
784  E += p * Eik(tks._z[i], y._z[ivertex], tks._dz2[i]);
785  sump += p;
786  } else {
787  LogDebug("DAClusterizerinZ_vectorized") << " ";
788  }
789  }
790  LogDebug("DAClusterizerinZ_vectorized") << endl;
791  }
792  LogDebug("DAClusterizerinZ_vectorized") << endl << "T=" << 1 / beta << " E=" << E << " n=" << y.GetSize()
793  << " F= " << F << endl << "----------" << endl;
794  }
795 }
#define LogDebug(id)
void AddItem(double new_z, double new_dz2, const reco::TransientTrack *new_tt, double new_pi)
const double beta
dbl * delta
Definition: mlp_gen.cc:36
T getParameter(std::string const &) const
T getUntrackedParameter(std::string const &, T const &) const
int i
Definition: DBlmapReader.cc:9
string fill
Definition: lumiContext.py:319
common ppss p3p6s2 common epss epspn46 common const1 w2
Definition: inclppp.h:1
const double w
Definition: UKUtility.cc:23
void splitAll(vertex_t &y) const
void AddItem(double new_z, double new_pk)
double error() const
Definition: Measurement1D.h:30
bool split(const double beta, track_t &t, vertex_t &y) const
track_t fill(const std::vector< reco::TransientTrack > &tracks) const
std::vector< const reco::TransientTrack * > tt
bool merge(vertex_t &) const
std::vector< std::vector< reco::TransientTrack > > clusterize(const std::vector< reco::TransientTrack > &tracks) const
U second(std::pair< T, U > const &p)
bool isNotFinite(T x)
Definition: isFinite.h:10
T sqrt(T t)
Definition: SSEVec.h:48
def move
Definition: eostools.py:510
tuple IP
Definition: listHistos.py:76
void InsertItem(unsigned int i, double new_z, double new_pk)
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
double BeamWidthX() const
beam width X
Definition: BeamSpot.h:86
double update(double beta, track_t &gtracks, vertex_t &gvertices, bool useRho0, double &rho0) const
T min(T a, T b)
Definition: MathUtil.h:58
double p2[4]
Definition: TauolaWrapper.h:90
#define LogTrace(id)
tuple conf
Definition: dbtoconf.py:185
double beta0(const double betamax, track_t const &tks, vertex_t const &y) const
int nt
Definition: AMPTWrapper.h:32
DAClusterizerInZ_vect(const edm::ParameterSet &conf)
void dump(const double beta, const vertex_t &y, const track_t &tks, const int verbosity=0) const
tuple tracks
Definition: testEve_cfg.py:39
double BeamWidthY() const
beam width Y
Definition: BeamSpot.h:88
double b
Definition: hdecay.h:120
double value() const
Definition: Measurement1D.h:28
std::vector< TransientVertex > vertices(const std::vector< reco::TransientTrack > &tracks, const int verbosity=0) const
double p1[4]
Definition: TauolaWrapper.h:89
#define update(a, b)
double a
Definition: hdecay.h:121
static int position[264][3]
Definition: ReadPGInfo.cc:509
tuple cout
Definition: gather_cfg.py:121
static uInt32 F(BLOWFISH_CTX *ctx, uInt32 x)
Definition: blowfish.cc:281
bool purge(vertex_t &, track_t &, double &, const double) const
double split
Definition: MVATrainer.cc:139
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
tuple log
Definition: cmsBatch.py:341