CMS 3D CMS Logo

DAClusterizerInZ_vect.cc
Go to the documentation of this file.
5 
6 #include <cmath>
7 #include <cassert>
8 #include <limits>
9 #include <iomanip>
11 #include "vdt/vdtMath.h"
12 
13 using namespace std;
14 
16 
17  // hardcoded parameters
18  maxIterations_ = 100;
19  mintrkweight_ = 0.5; // conf.getParameter<double>("mintrkweight");
20 
21 
22  // configurable debug outptut debug output
23  verbose_ = conf.getUntrackedParameter<bool> ("verbose", false);
24  zdumpcenter_ = conf.getUntrackedParameter<double> ("zdumpcenter", 0.);
25  zdumpwidth_ = conf.getUntrackedParameter<double> ("zdumpwidth", 20.);
26 
27  // configurable parameters
28  double Tmin = conf.getParameter<double> ("Tmin");
29  double Tpurge = conf.getParameter<double> ("Tpurge");
30  double Tstop = conf.getParameter<double> ("Tstop");
31  vertexSize_ = conf.getParameter<double> ("vertexSize");
32  coolingFactor_ = conf.getParameter<double> ("coolingFactor");
33  useTc_=true;
34  if(coolingFactor_<0){
35  coolingFactor_=-coolingFactor_;
36  useTc_=false;
37  }
38  d0CutOff_ = conf.getParameter<double> ("d0CutOff");
39  dzCutOff_ = conf.getParameter<double> ("dzCutOff");
40  uniquetrkweight_ = conf.getParameter<double>("uniquetrkweight");
41  zmerge_ = conf.getParameter<double>("zmerge");
42 
43  if(verbose_){
44  std::cout << "DAClusterizerinZ_vect: mintrkweight = " << mintrkweight_ << std::endl;
45  std::cout << "DAClusterizerinZ_vect: uniquetrkweight = " << uniquetrkweight_ << std::endl;
46  std::cout << "DAClusterizerinZ_vect: zmerge = " << zmerge_ << std::endl;
47  std::cout << "DAClusterizerinZ_vect: Tmin = " << Tmin << std::endl;
48  std::cout << "DAClusterizerinZ_vect: Tpurge = " << Tpurge << std::endl;
49  std::cout << "DAClusterizerinZ_vect: Tstop = " << Tstop << std::endl;
50  std::cout << "DAClusterizerinZ_vect: vertexSize = " << vertexSize_ << std::endl;
51  std::cout << "DAClusterizerinZ_vect: coolingFactor = " << coolingFactor_ << std::endl;
52  std::cout << "DAClusterizerinZ_vect: d0CutOff = " << d0CutOff_ << std::endl;
53  std::cout << "DAClusterizerinZ_vect: dzCutOff = " << dzCutOff_ << std::endl;
54  }
55 
56 
57  if (Tmin == 0) {
58  edm::LogWarning("DAClusterizerinZ_vectorized") << "DAClusterizerInZ: invalid Tmin" << Tmin
59  << " reset do default " << 1. / betamax_;
60  } else {
61  betamax_ = 1. / Tmin;
62  }
63 
64 
65  if ((Tpurge > Tmin) || (Tpurge == 0)) {
66  edm::LogWarning("DAClusterizerinZ_vectorized") << "DAClusterizerInZ: invalid Tpurge" << Tpurge
67  << " set to " << Tmin;
68  Tpurge = Tmin;
69  }
70  betapurge_ = 1./Tpurge;
71 
72 
73  if ((Tstop > Tpurge) || (Tstop == 0)) {
74  edm::LogWarning("DAClusterizerinZ_vectorized") << "DAClusterizerInZ: invalid Tstop" << Tstop
75  << " set to " << max(1., Tpurge);
76  Tstop = max(1., Tpurge) ;
77  }
78  betastop_ = 1./Tstop;
79 
80 }
81 
82 
83 namespace {
84  inline double local_exp( double const& inp) {
85  return vdt::fast_exp( inp );
86  }
87 
88  inline void local_exp_list( double const * __restrict__ arg_inp, double * __restrict__ arg_out, const int arg_arr_size) {
89  for(int i=0; i!=arg_arr_size; ++i ) arg_out[i]=vdt::fast_exp(arg_inp[i]);
90  }
91 
92 }
93 
94 //todo: use r-value possibility of c++11 here
96 DAClusterizerInZ_vect::fill(const vector<reco::TransientTrack> & tracks) const {
97 
98  // prepare track data for clustering
99  track_t tks;
100  for (auto it = tracks.begin(); it!= tracks.end(); it++){
101  if (!(*it).isValid()) continue;
102  double t_pi=1.;
103  double t_z = ((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
104  if (std::fabs(t_z) > 1000.) continue;
105  auto const & t_mom = (*it).stateAtBeamLine().trackStateAtPCA().momentum();
106  // get the beam-spot
107  reco::BeamSpot beamspot = (it->stateAtBeamLine()).beamSpot();
108  double t_dz2 =
109  std::pow((*it).track().dzError(), 2) // track errror
110  + (std::pow(beamspot.BeamWidthX()*t_mom.x(),2)+std::pow(beamspot.BeamWidthY()*t_mom.y(),2))*std::pow(t_mom.z(),2)/std::pow(t_mom.perp2(),2) // beam spot width
111  + std::pow(vertexSize_, 2); // intrinsic vertex size, safer for outliers and short lived decays
112  t_dz2 = 1./ t_dz2;
113  if (edm::isNotFinite(t_dz2) || t_dz2 < std::numeric_limits<double>::min() ) continue;
114  if (d0CutOff_ > 0) {
116  (*it).stateAtBeamLine().transverseImpactParameter();// error contains beamspot
117  t_pi = 1. / (1. + local_exp(std::pow(atIP.value() / atIP.error(), 2) - std::pow(d0CutOff_, 2))); // reduce weight for high ip tracks
118  if (edm::isNotFinite(t_pi) || t_pi < std::numeric_limits<double>::epsilon()) continue; // usually is > 0.99
119  }
120  LogTrace("DAClusterizerinZ_vectorized") << t_z <<' '<< t_dz2 <<' '<< t_pi;
121  tks.AddItem(t_z, t_dz2, &(*it), t_pi);
122  }
123  tks.ExtractRaw();
124 
125  if (verbose_) {
126  std::cout << "Track count " << tks.GetSize() << std::endl;
127  }
128 
129  return tks;
130 }
131 
132 
133 namespace {
134  inline
135  double Eik(double t_z, double k_z, double t_dz2) {
136  return std::pow(t_z - k_z, 2) * t_dz2;
137  }
138 }
139 
140 double DAClusterizerInZ_vect::update(double beta, track_t & gtracks,
141  vertex_t & gvertices, bool useRho0, const double & rho0) const {
142 
143  //update weights and vertex positions
144  // mass constrained annealing without noise
145  // returns the squared sum of changes of vertex positions
146 
147  const unsigned int nt = gtracks.GetSize();
148  const unsigned int nv = gvertices.GetSize();
149 
150  //initialize sums
151  double sumpi = 0;
152 
153  // to return how much the prototype moved
154  double delta = 0;
155 
156 
157  // intial value of a sum
158  double Z_init = 0;
159  // independpent of loop
160  if ( useRho0 )
161  {
162  Z_init = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_); // cut-off
163  }
164 
165  // define kernels
166  auto kernel_calc_exp_arg = [ beta, nv ] ( const unsigned int itrack,
167  track_t const& tracks,
168  vertex_t const& vertices ) {
169  const double track_z = tracks._z[itrack];
170  const double botrack_dz2 = -beta*tracks._dz2[itrack];
171 
172  // auto-vectorized
173  for ( unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
174  auto mult_res = track_z - vertices._z[ivertex];
175  vertices._ei_cache[ivertex] = botrack_dz2 * ( mult_res * mult_res );
176  }
177  };
178 
179  auto kernel_add_Z = [ nv, Z_init ] (vertex_t const& vertices) -> double
180  {
181  double ZTemp = Z_init;
182  for (unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
183  ZTemp += vertices._pk[ivertex] * vertices._ei[ivertex];
184  }
185  return ZTemp;
186  };
187 
188  auto kernel_calc_normalization = [ beta, nv ] (const unsigned int track_num,
189  track_t & tks_vec,
190  vertex_t & y_vec ) {
191  auto tmp_trk_pi = tks_vec._pi[track_num];
192  auto o_trk_Z_sum = 1./tks_vec._Z_sum[track_num];
193  auto o_trk_dz2 = tks_vec._dz2[track_num];
194  auto tmp_trk_z = tks_vec._z[track_num];
195  auto obeta = -1./beta;
196 
197  // auto-vectorized
198  for (unsigned int k = 0; k < nv; ++k) {
199  y_vec._se[k] += y_vec._ei[k] * (tmp_trk_pi* o_trk_Z_sum);
200  auto w = y_vec._pk[k] * y_vec._ei[k] * (tmp_trk_pi*o_trk_Z_sum *o_trk_dz2);
201  y_vec._sw[k] += w;
202  y_vec._swz[k] += w * tmp_trk_z;
203  y_vec._swE[k] += w * y_vec._ei_cache[k]*obeta;
204  }
205  };
206 
207 
208  for (auto ivertex = 0U; ivertex < nv; ++ivertex) {
209  gvertices._se[ivertex] = 0.0;
210  gvertices._sw[ivertex] = 0.0;
211  gvertices._swz[ivertex] = 0.0;
212  gvertices._swE[ivertex] = 0.0;
213  }
214 
215 
216 
217  // loop over tracks
218  for (auto itrack = 0U; itrack < nt; ++itrack) {
219  kernel_calc_exp_arg(itrack, gtracks, gvertices);
220  local_exp_list(gvertices._ei_cache, gvertices._ei, nv);
221 
222  gtracks._Z_sum[itrack] = kernel_add_Z(gvertices);
223  if (edm::isNotFinite(gtracks._Z_sum[itrack])) gtracks._Z_sum[itrack] = 0.0;
224  // used in the next major loop to follow
225  sumpi += gtracks._pi[itrack];
226 
227  if (gtracks._Z_sum[itrack] > 1.e-100){
228  kernel_calc_normalization(itrack, gtracks, gvertices);
229  }
230  }
231 
232  // now update z and pk
233  auto kernel_calc_z = [ sumpi, nv, this, useRho0 ] (vertex_t & vertices ) -> double {
234 
235  double delta=0;
236  // does not vectorizes
237  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex ) {
238  if (vertices._sw[ivertex] > 0) {
239  auto znew = vertices._swz[ ivertex ] / vertices._sw[ ivertex ];
240  // prevents from vectorizing if
241  delta += std::pow( vertices._z[ ivertex ] - znew, 2 );
242  vertices._z[ ivertex ] = znew;
243  }
244 #ifdef VI_DEBUG
245  else {
246  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << vertices._sw[ivertex] << endl;
247  if (this->verbose_) {
248  std::cout << " a cluster melted away ? pk=" << vertices._pk[ ivertex ] << " sumw="
249  << vertices._sw[ivertex] << endl;
250  }
251  }
252 #endif
253  }
254 
255  auto osumpi = 1./sumpi;
256  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex )
257  vertices._pk[ ivertex ] = vertices._pk[ ivertex ] * vertices._se[ ivertex ] * osumpi;
258 
259  return delta;
260  };
261 
262  delta += kernel_calc_z(gvertices);
263 
264  // return how much the prototypes moved
265  return delta;
266 }
267 
268 
269 
270 
271 
272 bool DAClusterizerInZ_vect::merge(vertex_t & y, double & beta)const{
273  // merge clusters that collapsed or never separated,
274  // only merge if the estimated critical temperature of the merged vertex is below the current temperature
275  // return true if vertices were merged, false otherwise
276  const unsigned int nv = y.GetSize();
277 
278  if (nv < 2)
279  return false;
280 
281  // merge the smallest distance clusters first
282  std::vector<std::pair<double, unsigned int> > critical;
283  for (unsigned int k = 0; (k + 1) < nv; k++) {
284  if (std::fabs(y._z[k + 1] - y._z[k]) < zmerge_) {
285  critical.push_back( make_pair( std::fabs(y._z[k + 1] - y._z[k]), k) );
286  }
287  }
288  if (critical.empty()) return false;
289 
290  std::stable_sort(critical.begin(), critical.end(), std::less<std::pair<double, unsigned int> >() );
291 
292 
293  for (unsigned int ik=0; ik < critical.size(); ik++){
294  unsigned int k = critical[ik].second;
295  double rho = y._pk[k]+y._pk[k+1];
296  double swE = y._swE[k]+y._swE[k+1]-y._pk[k]*y._pk[k+1] / rho*std::pow(y._z[k+1]-y._z[k],2);
297  double Tc = 2*swE / (y._sw[k]+y._sw[k+1]);
298 
299  if(Tc*beta < 1){
300  if(verbose_){ std::cout << "merging " << y._z[k + 1] << " and " << y._z[k] << " Tc = " << Tc << " sw = " << y._sw[k]+y._sw[k+1] <<std::endl;}
301  if(rho > 0){
302  y._z[k] = (y._pk[k]*y._z[k] + y._pk[k+1]*y._z[k + 1])/rho;
303  }else{
304  y._z[k] = 0.5 * (y._z[k] + y._z[k + 1]);
305  }
306  y._pk[k] = rho;
307  y._sw[k] += y._sw[k+1];
308  y._swE[k] = swE;
309  y.RemoveItem(k+1);
310  return true;
311  }
312  }
313 
314  return false;
315 }
316 
317 
318 
319 
320 bool
321 DAClusterizerInZ_vect::purge(vertex_t & y, track_t & tks, double & rho0, const double beta) const {
322  // eliminate clusters with only one significant/unique track
323  const unsigned int nv = y.GetSize();
324  const unsigned int nt = tks.GetSize();
325 
326  if (nv < 2)
327  return false;
328 
329  double sumpmin = nt;
330  unsigned int k0 = nv;
331 
332  for (unsigned int k = 0; k < nv; k++) {
333 
334  int nUnique = 0;
335  double sump = 0;
336 
337  double pmax = y._pk[k] / (y._pk[k] + rho0 * local_exp(-beta * dzCutOff_* dzCutOff_));
338  for (unsigned int i = 0; i < nt; i++) {
339  if (tks._Z_sum[i] > 1.e-100) {
340  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k], tks._dz2[i])) / tks._Z_sum[i];
341  sump += p;
342  if ((p > uniquetrkweight_ * pmax) && (tks._pi[i] > 0)) {
343  nUnique++;
344  }
345  }
346  }
347 
348  if ((nUnique < 2) && (sump < sumpmin)) {
349  sumpmin = sump;
350  k0 = k;
351  }
352 
353  }
354 
355  if (k0 != nv) {
356  if (verbose_) {
357  std::cout << "eliminating prototype at " << std::setw(10) << std::setprecision(4) << y._z[k0]
358  << " with sump=" << sumpmin
359  << " rho*nt =" << y._pk[k0]*nt
360  << endl;
361  }
362  y.RemoveItem(k0);
363  return true;
364  } else {
365  return false;
366  }
367 }
368 
369 
370 
371 
372 double
373 DAClusterizerInZ_vect::beta0(double betamax, track_t const & tks, vertex_t const & y) const {
374 
375  double T0 = 0; // max Tc for beta=0
376  // estimate critical temperature from beta=0 (T=inf)
377  const unsigned int nt = tks.GetSize();
378  const unsigned int nv = y.GetSize();
379 
380  for (unsigned int k = 0; k < nv; k++) {
381 
382  // vertex fit at T=inf
383  double sumwz = 0;
384  double sumw = 0;
385  for (unsigned int i = 0; i < nt; i++) {
386  double w = tks._pi[i] * tks._dz2[i];
387  sumwz += w * tks._z[i];
388  sumw += w;
389  }
390  y._z[k] = sumwz / sumw;
391 
392  // estimate Tcrit, eventually do this in the same loop
393  double a = 0, b = 0;
394  for (unsigned int i = 0; i < nt; i++) {
395  double dx = tks._z[i] - y._z[k];
396  double w = tks._pi[i] * tks._dz2[i];
397  a += w * std::pow(dx, 2) * tks._dz2[i];
398  b += w;
399  }
400  double Tc = 2. * a / b; // the critical temperature of this vertex
401  if (Tc > T0) T0 = Tc;
402  }// vertex loop (normally there should be only one vertex at beta=0)
403 
404  if(verbose_){
405  std::cout << "DAClustrizerInZ_vect.beta0: Tc = " << T0 << std::endl;
406  int coolingsteps = 1 - int(std::log(T0 * betamax) / std::log(coolingFactor_));
407  std::cout << "DAClustrizerInZ_vect.beta0: nstep = " << coolingsteps << std::endl;
408  }
409 
410 
411  if (T0 > 1. / betamax) {
412  return betamax / std::pow(coolingFactor_, int(std::log(T0 * betamax) / std::log(coolingFactor_)) - 1);
413  } else {
414  // ensure at least one annealing step
415  return betamax * coolingFactor_;
416  }
417 }
418 
419 
420 
421 bool
422 DAClusterizerInZ_vect::split(const double beta, track_t &tks, vertex_t & y, double threshold ) const{
423  // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
424  // an update must have been made just before doing this (same beta, no merging)
425  // returns true if at least one cluster was split
426 
427  double epsilon=1e-3; // minimum split size
428  unsigned int nv = y.GetSize();
429 
430  // avoid left-right biases by splitting highest Tc first
431 
432  std::vector<std::pair<double, unsigned int> > critical;
433  for(unsigned int k=0; k<nv; k++){
434  double Tc= 2*y._swE[k]/y._sw[k];
435  if (beta*Tc > threshold){
436  critical.push_back( make_pair(Tc, k));
437  }
438  }
439  if (critical.empty()) return false;
440 
441 
442  std::stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
443 
444 
445  bool split=false;
446  const unsigned int nt = tks.GetSize();
447 
448  for(unsigned int ic=0; ic<critical.size(); ic++){
449  unsigned int k=critical[ic].second;
450 
451  // estimate subcluster positions and weight
452  double p1=0, z1=0, w1=0;
453  double p2=0, z2=0, w2=0;
454  for(unsigned int i=0; i<nt; i++){
455  if (tks._Z_sum[i] > 1.e-100) {
456 
457  // winner-takes-all, usually overestimates splitting
458  double tl = tks._z[i] < y._z[k] ? 1.: 0.;
459  double tr = 1. - tl;
460 
461  // soften it, especially at low T
462  double arg = (tks._z[i] - y._z[k]) * sqrt(beta * tks._dz2[i]);
463  if(std::fabs(arg) < 20){
464  double t = local_exp(-arg);
465  tl = t/(t+1.);
466  tr = 1/(t+1.);
467  }
468 
469  double p = y._pk[k] * tks._pi[i] * local_exp(-beta * Eik(tks._z[i], y._z[k], tks._dz2[i])) / tks._Z_sum[i];
470  double w = p*tks._dz2[i];
471  p1 += p*tl ; z1 += w*tl*tks._z[i]; w1 += w*tl;
472  p2 += p*tr; z2 += w*tr*tks._z[i]; w2 += w*tr;
473  }
474  }
475 
476  if(w1>0){z1 = z1/w1;} else {z1=y._z[k]-epsilon;}
477  if(w2>0){z2 = z2/w2;} else {z2=y._z[k]+epsilon;}
478 
479  // reduce split size if there is not enough room
480  if( ( k > 0 ) && ( z1 < (0.6*y._z[k] + 0.4*y._z[k-1])) ){ z1 = 0.6*y._z[k] + 0.4*y._z[k-1]; }
481  if( ( k+1 < nv) && ( z2 > (0.6*y._z[k] + 0.4*y._z[k+1])) ){ z2 = 0.6*y._z[k] + 0.4*y._z[k+1]; }
482 
483  if(verbose_){
484  if (std::fabs(y._z[k] - zdumpcenter_) < zdumpwidth_){
485  std::cout << " T= " << std::setw(8) << 1./beta
486  << " Tc= " << critical[ic].first
487  << " splitting " << std::fixed << std::setprecision(4) << y._z[k]
488  << " --> " << z1 << "," << z2
489  << " [" << p1 << "," << p2 << "]" ;
490  if (std::fabs(z2-z1)>epsilon){
491  std::cout << std::endl;
492  }else{
493  std::cout << " rejected " << std::endl;
494  }
495  }
496  }
497 
498  // split if the new subclusters are significantly separated
499  if( (z2-z1) > epsilon){
500  split = true;
501  double pk1 = p1*y._pk[k]/(p1+p2);
502  double pk2 = p2*y._pk[k]/(p1+p2);
503  y._z[k] = z2;
504  y._pk[k] = pk2;
505  y.InsertItem(k, z1, pk1);
506  nv++;
507 
508  // adjust remaining pointers
509  for(unsigned int jc=ic; jc < critical.size(); jc++){
510  if (critical[jc].second > k) {critical[jc].second++;}
511  }
512  }
513  }
514  return split;
515 }
516 
517 
518 
520 
521  const unsigned int nv = y.GetSize();
522 
523  double epsilon = 1e-3; // split all single vertices by 10 um
524  double zsep = 2 * epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
525  vertex_t y1;
526 
527  if (verbose_) {
528  std::cout << "Before Split "<< std::endl;
529  y.DebugOut();
530  }
531 
532  for (unsigned int k = 0; k < nv; k++) {
533  if (
534  ( (k == 0) || ( y._z[k - 1] < (y._z[k] - zsep)) ) &&
535  ( ((k + 1) == nv) || ( y._z[k + 1] > (y._z[k] + zsep)) ) )
536  {
537  // isolated prototype, split
538  double new_z = y.z[k] - epsilon;
539  y.z[k] = y.z[k] + epsilon;
540 
541  double new_pk = 0.5 * y.pk[k];
542  y.pk[k] = 0.5 * y.pk[k];
543 
544  y1.AddItem(new_z, new_pk);
545  y1.AddItem(y._z[k], y._pk[k]);
546  }
547  else if ( (y1.GetSize() == 0 ) ||
548  (y1._z[y1.GetSize() - 1] < (y._z[k] - zsep) ))
549  {
550  y1.AddItem(y._z[k], y._pk[k]);
551  }
552  else
553  {
554  y1._z[y1.GetSize() - 1] = y1._z[y1.GetSize() - 1] - epsilon;
555  y._z[k] = y._z[k] + epsilon;
556  y1.AddItem( y._z[k] , y._pk[k]);
557  }
558  }// vertex loop
559 
560  y = y1;
561  y.ExtractRaw();
562 
563  if (verbose_) {
564  std::cout << "After split " << std::endl;
565  y.DebugOut();
566  }
567 }
568 
569 
570 
571 vector<TransientVertex>
572 DAClusterizerInZ_vect::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity) const {
573  track_t && tks = fill(tracks);
574  tks.ExtractRaw();
575 
576  unsigned int nt = tks.GetSize();
577  double rho0 = 0.0; // start with no outlier rejection
578 
579  vector<TransientVertex> clusters;
580  if (tks.GetSize() == 0) return clusters;
581 
582  vertex_t y; // the vertex prototypes
583 
584  // initialize:single vertex at infinite temperature
585  y.AddItem( 0, 1.0);
586 
587  int niter = 0; // number of iterations
588 
589 
590  // estimate first critical temperature
591  double beta = beta0(betamax_, tks, y);
592  if ( verbose_) std::cout << "Beta0 is " << beta << std::endl;
593 
594  niter = 0;
595  while ((update(beta, tks, y, false, rho0) > 1.e-6) &&
596  (niter++ < maxIterations_)) {}
597 
598  // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
599 
600  double betafreeze = betamax_ * sqrt(coolingFactor_);
601 
602  while (beta < betafreeze) {
603  if(useTc_){
604  update(beta, tks,y, false, rho0);
605  while(merge(y, beta)){update(beta, tks, y, false, rho0);}
606  split(beta, tks, y);
607  beta=beta/coolingFactor_;
608  }else{
609  beta=beta/coolingFactor_;
610  splitAll(y);
611  }
612 
613  // make sure we are not too far from equilibrium before cooling further
614  niter = 0;
615  while ((update(beta, tks, y, false, rho0) > 1.e-6) &&
616  (niter++ < maxIterations_)) {}
617 
618  if(verbose_){ dump( beta, y, tks, 0); }
619  }
620 
621 
622  if(useTc_){
623  //last round of splitting, make sure no critical clusters are left
624  if(verbose_){ std::cout << "last spliting at " << 1./beta << std::endl; }
625  update(beta, tks,y, false, rho0);// make sure Tc is up-to-date
626  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
627  unsigned int ntry=0;
628  double threshold = 1.0;
629  while( split(beta, tks, y, threshold) && (ntry++<10) ){
630  niter=0;
631  while((update(beta, tks,y, false, rho0)>1.e-6) && (niter++ < maxIterations_)){}
632  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
633  if(verbose_){
634  std::cout << "after final splitting, try " << ntry << std::endl;
635  dump(beta, y, tks, 2);
636  }
637  // relax splitting a bit to reduce multiple split-merge cycles of the same cluster
638  threshold *= 1.1;
639  }
640  }else{
641  // merge collapsed clusters
642  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
643  }
644 
645  if (verbose_) {
646  update(beta, tks,y, false, rho0);
647  std::cout << "dump after 1st merging " << endl;
648  dump(beta, y, tks, 2);
649  }
650 
651 
652  // switch on outlier rejection at T=Tmin
653  if(dzCutOff_ > 0){
654  rho0 = 1./nt;
655  for(unsigned int a=0; a<10; a++){ update(beta, tks, y, true, a*rho0/10);} // adiabatic turn-on
656  }
657 
658  niter=0;
659  while ((update(beta, tks, y, true, rho0) > 1.e-8) && (niter++ < maxIterations_)) {};
660  if (verbose_) {
661  std::cout << "dump after noise-suppression, rho0=" << rho0 << " niter = " << niter << endl;
662  dump(beta, y, tks, 2);
663  }
664 
665  // merge again (some cluster split by outliers collapse here)
666  while (merge(y, beta)) {update(beta, tks, y, true, rho0); }
667  if (verbose_) {
668  std::cout << "dump after merging " << endl;
669  dump(beta, y, tks, 2);
670  }
671 
672  // go down to the purging temperature (if it is lower than tmin)
673  while( beta < betapurge_ ){
674  beta = min( beta/coolingFactor_, betapurge_);
675  niter = 0;
676  while ((update(beta, tks, y, false, rho0) > 1.e-8) && (niter++ < maxIterations_)) {}
677  }
678 
679 
680  // eliminate insigificant vertices, this is more restrictive at higher T
681  while (purge(y, tks, rho0, beta)) {
682  niter = 0;
683  while ((update(beta, tks, y, true, rho0) > 1.e-6) && (niter++ < maxIterations_)) {}
684  }
685 
686  if (verbose_) {
687  update(beta, tks,y, true, rho0);
688  std::cout << " after purging " << std:: endl;
689  dump(beta, y, tks, 2);
690  }
691 
692  // optionally cool some more without doing anything, to make the assignment harder
693  while( beta < betastop_ ){
694  beta = min( beta/coolingFactor_, betastop_);
695  niter =0;
696  while ((update(beta, tks, y, true, rho0) > 1.e-8) && (niter++ < maxIterations_)) {}
697  }
698 
699 
700  if (verbose_) {
701  std::cout << "Final result, rho0=" << std::scientific << rho0 << endl;
702  dump(beta, y, tks, 2);
703  }
704 
705  // select significant tracks and use a TransientVertex as a container
706  GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01);
707 
708  // ensure correct normalization of probabilities, should makes double assignment reasonably impossible
709  const unsigned int nv = y.GetSize();
710  for (unsigned int k = 0; k < nv; k++)
711  if ( edm::isNotFinite(y._pk[k]) || edm::isNotFinite(y._z[k]) ) { y._pk[k]=0; y._z[k]=0;}
712 
713  for (unsigned int i = 0; i < nt; i++) // initialize
714  tks._Z_sum[i] = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_);
715 
716  // improve vectorization (does not require reduction ....)
717  for (unsigned int k = 0; k < nv; k++) {
718  for (unsigned int i = 0; i < nt; i++)
719  tks._Z_sum[i] += y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],tks._dz2[i]));
720  }
721 
722 
723  for (unsigned int k = 0; k < nv; k++) {
724  GlobalPoint pos(0, 0, y._z[k]);
725 
726  vector<reco::TransientTrack> vertexTracks;
727  for (unsigned int i = 0; i < nt; i++) {
728  if (tks._Z_sum[i] > 1e-100) {
729 
730  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
731  tks._dz2[i])) / tks._Z_sum[i];
732  if ((tks._pi[i] > 0) && (p > mintrkweight_)) {
733  vertexTracks.push_back(*(tks.tt[i]));
734  tks._Z_sum[i] = 0; // setting Z=0 excludes double assignment
735  }
736  }
737  }
738  TransientVertex v(pos, dummyError, vertexTracks, 0);
739  clusters.push_back(v);
740  }
741 
742  return clusters;
743 
744 }
745 
746 vector<vector<reco::TransientTrack> > DAClusterizerInZ_vect::clusterize(
747  const vector<reco::TransientTrack> & tracks) const {
748 
749  if (verbose_) {
750  std::cout << "###################################################" << endl;
751  std::cout << "# vectorized DAClusterizerInZ_vect::clusterize nt=" << tracks.size() << endl;
752  std::cout << "###################################################" << endl;
753  }
754 
755  vector<vector<reco::TransientTrack> > clusters;
756  vector<TransientVertex> && pv = vertices(tracks);
757 
758  if (verbose_) {
759  std::cout << "# DAClusterizerInZ::clusterize pv.size=" << pv.size()
760  << endl;
761  }
762  if (pv.empty()) {
763  return clusters;
764  }
765 
766  // fill into clusters and merge
767  vector<reco::TransientTrack> aCluster = pv.begin()->originalTracks();
768 
769  for (auto k = pv.begin() + 1; k != pv.end(); k++) {
770  if ( std::abs(k->position().z() - (k - 1)->position().z()) > (2 * vertexSize_)) {
771  // close a cluster
772  if (aCluster.size()>1){
773  clusters.push_back(aCluster);
774  }else{
775  if(verbose_){
776  std::cout << " one track cluster at " << k->position().z() << " suppressed" << std::endl;
777  }
778  }
779  aCluster.clear();
780  }
781  for (unsigned int i = 0; i < k->originalTracks().size(); i++) {
782  aCluster.push_back(k->originalTracks()[i]);
783  }
784 
785  }
786  clusters.emplace_back(std::move(aCluster));
787 
788  return clusters;
789 
790 }
791 
792 
793 
794 void DAClusterizerInZ_vect::dump(const double beta, const vertex_t & y,
795  const track_t & tks, int verbosity) const {
796 
797  const unsigned int nv = y.GetSize();
798  const unsigned int nt = tks.GetSize();
799 
800  std::vector< unsigned int > iz;
801  for(unsigned int j=0; j<nt; j++){ iz.push_back(j); }
802  std::sort(iz.begin(), iz.end(), [tks](unsigned int a, unsigned int b){ return tks._z[a]<tks._z[b];} );
803  std::cout << std::endl;
804  std::cout << "-----DAClusterizerInZ::dump ----" << nv << " clusters " << std::endl;
805  std::cout << " z= ";
806  std::cout << setprecision(4);
807  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
808  if (std::fabs(y._z[ivertex]-zdumpcenter_) < zdumpwidth_){
809  std::cout << setw(8) << fixed << y._z[ivertex];
810  }
811  }
812  std::cout << endl << "T=" << setw(15) << 1. / beta
813  << " Tmin =" << setw(10) << 1./betamax_
814  << " Tc= ";
815  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
816  if (std::fabs(y._z[ivertex]-zdumpcenter_) < zdumpwidth_){
817  double Tc = 2*y._swE[ivertex]/y._sw[ivertex];
818  std::cout << setw(8) << fixed << setprecision(1) << Tc;
819  }
820  }
821  std::cout << endl;
822 
823  std::cout << " pk= ";
824  double sumpk = 0;
825  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
826  sumpk += y._pk[ivertex];
827  if (std::fabs(y._z[ivertex] - zdumpcenter_) > zdumpwidth_) continue;
828  std::cout << setw(8) << setprecision(4) << fixed << y._pk[ivertex];
829  }
830  std::cout << endl;
831 
832  std::cout << " nt= ";
833  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
834  sumpk += y._pk[ivertex];
835  if (std::fabs(y._z[ivertex] - zdumpcenter_) > zdumpwidth_) continue;
836  std::cout << setw(8) << setprecision(1) << fixed << y._pk[ivertex]*nt;
837  }
838  std::cout << endl;
839 
840  if (verbosity > 0) {
841  double E = 0, F = 0;
842  std::cout << endl;
843  std::cout
844  << "---- z +/- dz ip +/-dip pt phi eta weights ----"
845  << endl;
846  std::cout << setprecision(4);
847  for (unsigned int i0 = 0; i0 < nt; i0++) {
848  unsigned int i = iz[i0];
849  if (tks._Z_sum[i] > 0) {
850  F -= std::log(tks._Z_sum[i]) / beta;
851  }
852  double tz = tks._z[i];
853 
854  if( std::fabs(tz - zdumpcenter_) > zdumpwidth_) continue;
855  std::cout << setw(4) << i << ")" << setw(8) << fixed << setprecision(4)
856  << tz << " +/-" << setw(6) << sqrt(1./tks._dz2[i]);
857 
858  if (tks.tt[i]->track().quality(reco::TrackBase::highPurity)) {
859  std::cout << " *";
860  } else {
861  std::cout << " ";
862  }
863  if (tks.tt[i]->track().hitPattern().hasValidHitInPixelLayer(PixelSubdetector::SubDetector::PixelBarrel, 1)) {
864  std::cout << "+";
865  } else {
866  std::cout << "-";
867  }
868  std::cout << setw(1)
869  << tks.tt[i]->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
870  std::cout << setw(1)
871  << tks.tt[i]->track().hitPattern().pixelEndcapLayersWithMeasurement();
872  std::cout << setw(1) << hex
873  << tks.tt[i]->track().hitPattern().trackerLayersWithMeasurement()
874  - tks.tt[i]->track().hitPattern().pixelLayersWithMeasurement()
875  << dec;
876  std::cout << "=" << setw(1) << hex
877  << tks.tt[i]->track().hitPattern().numberOfLostHits(reco::HitPattern::MISSING_OUTER_HITS)
878  << dec;
879 
880  Measurement1D IP =
881  tks.tt[i]->stateAtBeamLine().transverseImpactParameter();
882  std::cout << setw(8) << IP.value() << "+/-" << setw(6) << IP.error();
883  std::cout << " " << setw(6) << setprecision(2)
884  << tks.tt[i]->track().pt() * tks.tt[i]->track().charge();
885  std::cout << " " << setw(5) << setprecision(2)
886  << tks.tt[i]->track().phi() << " " << setw(5)
887  << setprecision(2) << tks.tt[i]->track().eta();
888 
889  double sump = 0.;
890  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
891  if (std::fabs(y._z[ivertex]-zdumpcenter_) > zdumpwidth_) continue;
892 
893  if ((tks._pi[i] > 0) && (tks._Z_sum[i] > 0)) {
894  //double p=pik(beta,tks[i],*k);
895  double p = y._pk[ivertex] * exp(-beta * Eik(tks._z[i], y._z[ivertex], tks._dz2[i])) / tks._Z_sum[i];
896  if (p > 0.0001) {
897  std::cout << setw(8) << setprecision(3) << p;
898  } else {
899  std::cout << " . ";
900  }
901  E += p * Eik(tks._z[i], y._z[ivertex], tks._dz2[i]);
902  sump += p;
903  } else {
904  std::cout << " ";
905  }
906  }
907  std::cout << endl;
908  }
909  std::cout << endl << "T=" << 1 / beta << " E=" << E << " n=" << y.GetSize()
910  << " F= " << F << endl << "----------" << endl;
911  }
912 }
void AddItem(double new_z, double new_dz2, const reco::TransientTrack *new_tt, double new_pi)
dbl * delta
Definition: mlp_gen.cc:36
T getParameter(std::string const &) const
T getUntrackedParameter(std::string const &, T const &) const
common ppss p3p6s2 common epss epspn46 common const1 w2
Definition: inclppp.h:1
const double w
Definition: UKUtility.cc:23
bool split(const double beta, track_t &t, vertex_t &y, double threshold=1.) const
void splitAll(vertex_t &y) const
void AddItem(double new_z, double new_pk)
double error() const
Definition: Measurement1D.h:30
track_t fill(const std::vector< reco::TransientTrack > &tracks) const
std::vector< const reco::TransientTrack * > tt
A arg
Definition: Factorize.h:37
U second(std::pair< T, U > const &p)
bool isNotFinite(T x)
Definition: isFinite.h:10
T sqrt(T t)
Definition: SSEVec.h:18
void InsertItem(unsigned int i, double new_z, double new_pk)
def pv(vc)
Definition: MetAnalyzer.py:6
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
std::vector< std::vector< reco::TransientTrack > > clusterize(const std::vector< reco::TransientTrack > &tracks) const override
double BeamWidthX() const
beam width X
Definition: BeamSpot.h:86
T min(T a, T b)
Definition: MathUtil.h:58
double p2[4]
Definition: TauolaWrapper.h:90
#define LogTrace(id)
double beta0(const double betamax, track_t const &tks, vertex_t const &y) const
int nt
Definition: AMPTWrapper.h:32
int k[5][pyjets_maxn]
double update(double beta, track_t &gtracks, vertex_t &gvertices, bool useRho0, const double &rho0) const
bool merge(vertex_t &y, double &beta) const
DAClusterizerInZ_vect(const edm::ParameterSet &conf)
void dump(const double beta, const vertex_t &y, const track_t &tks, const int verbosity=0) const
double BeamWidthY() const
beam width Y
Definition: BeamSpot.h:88
double b
Definition: hdecay.h:120
double value() const
Definition: Measurement1D.h:28
std::vector< TransientVertex > vertices(const std::vector< reco::TransientTrack > &tracks, const int verbosity=0) const
double p1[4]
Definition: TauolaWrapper.h:89
#define update(a, b)
double a
Definition: hdecay.h:121
static int position[264][3]
Definition: ReadPGInfo.cc:509
static uInt32 F(BLOWFISH_CTX *ctx, uInt32 x)
Definition: blowfish.cc:281
bool purge(vertex_t &, track_t &, double &, const double) const
double split
Definition: MVATrainer.cc:139
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
def move(src, dest)
Definition: eostools.py:510
def merge(dictlist, TELL=False)
Definition: MatrixUtil.py:193