CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
DAClusterizerInZ_vect.cc
Go to the documentation of this file.
5 
6 #include <cmath>
7 #include <cassert>
8 #include <limits>
9 #include <iomanip>
10 
11 #include "vdt/vdtMath.h"
12 
13 using namespace std;
14 
16 
17  // some defaults to avoid uninitialized variables
18  verbose_ = conf.getUntrackedParameter<bool> ("verbose", false);
19  use_vdt_ = conf.getUntrackedParameter<bool> ("use_vdt", false);
20 
21  betamax_ = 0.1;
22  betastop_ = 1.0;
23  coolingFactor_ = 0.6;
24  maxIterations_ = 100;
25  vertexSize_ = 0.01; // 0.1 mm
26  dzCutOff_ = 4.0;
27 
28  // configure
29 
30  double Tmin = conf.getParameter<double> ("Tmin");
31  vertexSize_ = conf.getParameter<double> ("vertexSize");
32  coolingFactor_ = conf.getParameter<double> ("coolingFactor");
33  useTc_=true;
34  if(coolingFactor_<0){
35  coolingFactor_=-coolingFactor_;
36  useTc_=false;
37  }
38  d0CutOff_ = conf.getParameter<double> ("d0CutOff");
39  dzCutOff_ = conf.getParameter<double> ("dzCutOff");
40  maxIterations_ = 100;
41  if (Tmin == 0) {
42  LogDebug("DAClusterizerinZ_vectorized") << "DAClusterizerInZ: invalid Tmin" << Tmin
43  << " reset do default " << 1. / betamax_ << endl;
44  } else {
45  betamax_ = 1. / Tmin;
46  }
47 
48 }
49 
50 inline double DAClusterizerInZ_vect::local_exp( double const& inp) const
51  {
52  if ( use_vdt_)
53  return vdt::fast_exp( inp );
54 
55  return std::exp( inp );
56  }
57 
58 inline void DAClusterizerInZ_vect::local_exp_list( double* arg_inp, double* arg_out,const int arg_arr_size) const
59  {
60  if ( use_vdt_){
61 // std::cout << "I use vdt!\n";
62  for(int i=0; i!=arg_arr_size; ++i ) arg_out[i]=vdt::fast_exp(arg_inp[i]);
63  // vdt::fast_expv(arg_arr_size, arg_inp, arg_out);
64  }
65  else
66  for(int i=0; i!=arg_arr_size; ++i ) arg_out[i]=std::exp(arg_inp[i]);
67  }
68 
69 //todo: use r-value possibility of c++11 here
71  reco::TransientTrack> & tracks) const {
72 
73  // prepare track data for clustering
74  track_t tks;
75  for (vector<reco::TransientTrack>::const_iterator it = tracks.begin(); it
76  != tracks.end(); it++)
77  {
78  double t_pi;
79  double t_z = ((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
80  double phi=((*it).stateAtBeamLine().trackStateAtPCA()).momentum().phi();
81  double tantheta = tan(
82  ((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta());
83  // get the beam-spot
84  reco::BeamSpot beamspot = (it->stateAtBeamLine()).beamSpot();
85  double t_dz2 =
86  pow((*it).track().dzError(), 2) // track errror
87  + (pow(beamspot.BeamWidthX()*cos(phi),2)+pow(beamspot.BeamWidthY()*sin(phi),2))/pow(tantheta,2) // beam-width
88  + pow(vertexSize_, 2); // intrinsic vertex size, safer for outliers and short lived decays
89  if (d0CutOff_ > 0) {
91  (*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
92  t_pi = 1. / (1. + local_exp(pow(IP.value() / IP.error(), 2) - pow(
93  d0CutOff_, 2))); // reduce weight for high ip tracks
94  } else {
95  t_pi = 1.;
96  }
97 
98  tks.AddItem(t_z, t_dz2, &(*it), t_pi);
99  }
100  tks.ExtractRaw();
101 
102  if (verbose_) {
103  LogDebug("DAClusterizerinZ_vectorized") << "Track count " << tks.GetSize() << std::endl;
104  }
105 
106  return tks;
107 }
108 
109 
110 double DAClusterizerInZ_vect::Eik(double const& t_z, double const& k_z, double const& t_dz2) const
111 {
112  return pow(t_z - k_z, 2) / t_dz2;
113 }
114 
115 
116 double DAClusterizerInZ_vect::update(double beta, track_t & gtracks,
117  vertex_t & gvertices, bool useRho0, double & rho0) const {
118 
119  //update weights and vertex positions
120  // mass constrained annealing without noise
121  // returns the squared sum of changes of vertex positions
122 
123  const unsigned int nt = gtracks.GetSize();
124  const unsigned int nv = gvertices.GetSize();
125 
126  //initialize sums
127  double sumpi = 0;
128 
129  // to return how much the prototype moved
130  double delta = 0;
131 
132  unsigned int itrack;
133  unsigned int ivertex;
134 
135  // intial value of a sum
136  double Z_init = 0;
137 
138  // define kernels
139  auto kernel_calc_exp_arg = [ &beta, nv ] (
140  const unsigned int itrack,
141  track_t const& tracks,
142  vertex_t const& vertices )
143  {
144  const double track_z = tracks._z[itrack];
145  const double track_dz2 = tracks._dz2[itrack];
146 
147  // auto-vectorized
148  for ( unsigned int ivertex = 0; ivertex < nv; ++ivertex)
149  {
150  double mult_res = ( track_z - vertices._z[ivertex] );
151  vertices._ei_cache[ivertex] = -beta *
152  ( mult_res * mult_res ) / track_dz2;
153  }
154  };
155 
156  // auto kernel_add_Z = [ nv, &Z_init ] (vertex_t const& vertices) raises a warning
157  // we declare the return type with " -> double"
158  auto kernel_add_Z = [ nv, &Z_init ] (vertex_t const& vertices) -> double
159  {
160  double ZTemp = Z_init;
161 
162  for (unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
163 
164  ZTemp += vertices._pk[ivertex] * vertices._ei[ivertex];
165  }
166  return ZTemp;
167  };
168 
169  auto kernel_calc_normalization = [ &beta, nv ] (const unsigned int track_num,
170  track_t & tks_vec,
171  vertex_t & y_vec )
172  {
173  double w;
174 
175  const double tmp_trk_pi = tks_vec._pi[track_num];
176  const double tmp_trk_Z_sum = tks_vec._Z_sum[track_num];
177  const double tmp_trk_dz2 = tks_vec._dz2[track_num];
178  const double tmp_trk_z = tks_vec._z[track_num];
179 
180  // auto-vectorized
181  for (unsigned int k = 0; k < nv; ++k) {
182  y_vec._se[k] += tmp_trk_pi* y_vec._ei[k] / tmp_trk_Z_sum;
183  w = y_vec._pk[k] * tmp_trk_pi * y_vec._ei[k] / tmp_trk_Z_sum / tmp_trk_dz2;
184  y_vec._sw[k] += w;
185  y_vec._swz[k] += w * tmp_trk_z;
186  y_vec._swE[k] += w * y_vec._ei_cache[k]/(-beta);
187  }
188  };
189 
190  // not vectorized
191  for (ivertex = 0; ivertex < nv; ++ivertex) {
192  gvertices._se[ivertex] = 0.0;
193  gvertices._sw[ivertex] = 0.0;
194  gvertices._swz[ivertex] = 0.0;
195  gvertices._swE[ivertex] = 0.0;
196  }
197 
198 
199  // independpent of loop
200  if ( useRho0 )
201  {
202  Z_init = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_); // cut-off
203  }
204 
205  // loop over tracks
206  for (itrack = 0; itrack < nt; ++itrack) {
207  kernel_calc_exp_arg(itrack, gtracks, gvertices);
208  local_exp_list(gvertices._ei_cache, gvertices._ei, nv);
209  //vdt::cephes_single_exp_vect( y_vec._ei, nv );
210 
211  gtracks._Z_sum[itrack] = kernel_add_Z(gvertices);
212 
213  // used in the next major loop to follow
214  if (!useRho0)
215  sumpi += gtracks._pi[itrack];
216 
217  if (gtracks._Z_sum[itrack] > 0) {
218  kernel_calc_normalization(itrack, gtracks, gvertices);
219  }
220  }
221 
222  // now update z and pk
223  auto kernel_calc_z = [ &delta, &sumpi, nv, this, useRho0 ] (vertex_t & vertices )
224  {
225  // does not vectorizes
226  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex )
227  {
228  if (vertices._sw[ivertex] > 0)
229  {
230  double znew = vertices._swz[ ivertex ] / vertices._sw[ ivertex ];
231 
232  // prevents from vectorizing
233  delta += pow( vertices._z[ ivertex ] - znew, 2 );
234  vertices._z[ ivertex ] = znew;
235  }
236  else {
237  edm::LogInfo("sumw") << "invalid sum of weights in fit: " << vertices._sw[ivertex]
238  << endl;
239  if (this->verbose_) {
240  LogDebug("DAClusterizerinZ_vectorized") << " a cluster melted away ? pk=" << vertices._pk[ ivertex ] << " sumw="
241  << vertices._sw[ivertex] << endl;
242  }
243  }
244 
245  // dont do, if rho cut
246  if ( ! useRho0 )
247  {
248  vertices._pk[ ivertex ] = vertices._pk[ ivertex ] * vertices._se[ ivertex ] / sumpi;
249  }
250  }
251  };
252 
253  kernel_calc_z(gvertices);
254 
255  // return how much the prototypes moved
256  return delta;
257 }
258 
259 
260 
262  // merge clusters that collapsed or never separated,
263  // only merge if the estimated critical temperature of the merged vertex is below the current temperature
264  // return true if vertices were merged, false otherwise
265  const unsigned int nv = y.GetSize();
266 
267  if (nv < 2)
268  return false;
269 
270 
271  for (unsigned int k = 0; (k + 1) < nv; k++) {
272  if (fabs(y._z[k + 1] - y._z[k]) < 2.e-2) {
273  double rho=y._pk[k] + y._pk[k+1];
274  double swE=y._swE[k]+y._swE[k+1] - y._pk[k]*y._pk[k+1] /rho *pow(y._z[k+1]-y._z[k],2);
275  double Tc=2*swE/(y._sw[k]+y._sw[k+1]);
276 
277  if(Tc*beta<1){
278  if(rho>0){
279  y._z[k] = (y._pk[k]*y._z[k] + y._pk[k+1]*y._z[k + 1])/rho;
280  }else{
281  y._z[k] = 0.5 * (y._z[k] + y._z[k + 1]);
282  }
283  y._pk[k] = rho;
284  y._sw[k]+=y._sw[k+1];
285  y._swE[k]=swE;
286  y.RemoveItem(k+1);
287  return true;
288  }
289  }
290  }
291 
292  return false;
293 }
294 
295 
296 
297 
298 
300  // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
301 
302  const unsigned int nv = y.GetSize();
303 
304  if (nv < 2)
305  return false;
306 
307  for (unsigned int k = 0; (k + 1) < nv; k++) {
308  //if ((k+1)->z - k->z<1.e-2){ // note, no fabs here, maintains z-ordering (with split()+merge() at every temperature)
309  if (fabs(y._z[k + 1] - y._z[k]) < 1.e-2) { // with fabs if only called after freeze-out (splitAll() at highter T)
310  y._pk[k] += y._pk[k + 1];
311  y._z[k] = 0.5 * (y._z[k] + y._z[k + 1]);
312 
313  y.RemoveItem(k + 1);
314 
315  if ( verbose_)
316  {
317  LogDebug("DAClusterizerinZ_vectorized") << "Merging vertices k = " << k << std::endl;
318  }
319 
320  return true;
321  }
322  }
323 
324  return false;
325 }
326 
327 bool DAClusterizerInZ_vect::purge(vertex_t & y, track_t & tks, double & rho0,
328  const double beta) const {
329  // eliminate clusters with only one significant/unique track
330  const unsigned int nv = y.GetSize();
331  const unsigned int nt = tks.GetSize();
332 
333  if (nv < 2)
334  return false;
335 
336  double sumpmin = nt;
337  unsigned int k0 = nv;
338 
339  for (unsigned int k = 0; k < nv; k++) {
340 
341  int nUnique = 0;
342  double sump = 0;
343  double pmax = y._pk[k] / (y._pk[k] + rho0 * local_exp(-beta * dzCutOff_
344  * dzCutOff_));
345 
346  for (unsigned int i = 0; i < nt; i++) {
347 
348  if (tks._Z_sum[i] > 0) {
349  //double p=pik(beta,tks[i],*k);
350  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
351  tks._dz2[i])) / tks._Z_sum[i];
352  sump += p;
353  if ((p > 0.9 * pmax) && (tks._pi[i] > 0)) {
354  nUnique++;
355  }
356  }
357  }
358 
359  if ((nUnique < 2) && (sump < sumpmin)) {
360  sumpmin = sump;
361  k0 = k;
362  }
363  }
364 
365  if (k0 != nv) {
366  if (verbose_) {
367  LogDebug("DAClusterizerinZ_vectorized") << "eliminating prototype at " << y._z[k0] << " with sump="
368  << sumpmin << endl;
369  }
370  //rho0+=k0->pk;
371  y.RemoveItem(k0);
372  return true;
373  } else {
374  return false;
375  }
376 }
377 
378 double DAClusterizerInZ_vect::beta0(double betamax, track_t & tks, vertex_t & y) const {
379 
380  double T0 = 0; // max Tc for beta=0
381  // estimate critical temperature from beta=0 (T=inf)
382  const unsigned int nt = tks.GetSize();
383  const unsigned int nv = y.GetSize();
384 
385  for (unsigned int k = 0; k < nv; k++) {
386 
387  // vertex fit at T=inf
388  double sumwz = 0;
389  double sumw = 0;
390  for (unsigned int i = 0; i < nt; i++) {
391  double w = tks._pi[i] / tks._dz2[i];
392  sumwz += w * tks._z[i];
393  sumw += w;
394  }
395  y._z[k] = sumwz / sumw;
396 
397  // estimate Tcrit, eventually do this in the same loop
398  double a = 0, b = 0;
399  for (unsigned int i = 0; i < nt; i++) {
400  double dx = tks._z[i] - (y._z[k]);
401  double w = tks._pi[i] / tks._dz2[i];
402  a += w * pow(dx, 2) / tks._dz2[i];
403  b += w;
404  }
405  double Tc = 2. * a / b; // the critical temperature of this vertex
406  if (Tc > T0)
407  T0 = Tc;
408  }// vertex loop (normally there should be only one vertex at beta=0)
409 
410  if (T0 > 1. / betamax) {
411  return betamax / pow(coolingFactor_, int(log(T0 * betamax) / log(
412  coolingFactor_)) - 1);
413  } else {
414  // ensure at least one annealing step
415  return betamax / coolingFactor_;
416  }
417 }
418 
419 
420 bool DAClusterizerInZ_vect::split(const double beta, track_t &tks, vertex_t & y ) const{
421  // split only critical vertices (Tc >~ T=1/beta <==> beta*Tc>~1)
422  // an update must have been made just before doing this (same beta, no merging)
423  // returns true if at least one cluster was split
424 
425  double epsilon=1e-3; // split all single vertices by 10 um
426  unsigned int nv = y.GetSize();
427 
428  // avoid left-right biases by splitting highest Tc first
429 
430  std::vector<std::pair<double, unsigned int> > critical;
431  for(unsigned int k=0; k<nv; k++){
432  double Tc= 2*y._swE[k]/y._sw[k];
433  if (beta*Tc > 1.){
434  critical.push_back( make_pair(Tc, k));
435  }
436  }
437  if (critical.size()==0) return false;
438  stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
439 
440 
441  bool split=false;
442  const unsigned int nt = tks.GetSize();
443 
444  for(unsigned int ic=0; ic<critical.size(); ic++){
445  unsigned int k=critical[ic].second;
446  // estimate subcluster positions and weight
447  double p1=0, z1=0, w1=0;
448  double p2=0, z2=0, w2=0;
449  for(unsigned int i=0; i<nt; i++){
450  if (tks._Z_sum[i] > 0) {
451  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
452  tks._dz2[i])) / tks._Z_sum[i];
453 
454  double w=p/tks._dz2[i];
455  if(tks._z[i]<y._z[k]){
456  p1+=p; z1+=w*tks._z[i]; w1+=w;
457  }else{
458  p2+=p; z2+=w*tks._z[i]; w2+=w;
459  }
460  }
461  }
462  if(w1>0){ z1=z1/w1;} else{z1=y._z[k]-epsilon;}
463  if(w2>0){ z2=z2/w2;} else{z2=y._z[k]+epsilon;}
464 
465  // reduce split size if there is not enough room
466  if( ( k > 0 ) && ( y._z[k-1]>=z1 ) ){ z1=0.5*(y._z[k]+y._z[k-1]); }
467  if( ( k+1 < nv) && ( y._z[k+1]<=z2 ) ){ z2=0.5*(y._z[k]+y._z[k+1]); }
468 
469  // split if the new subclusters are significantly separated
470  if( (z2-z1)>epsilon){
471  split=true;
472  double pk1=p1*y._pk[k]/(p1+p2);
473  double pk2=p2*y._pk[k]/(p1+p2);
474  y._z[k] = z2;
475  y._pk[k] = pk2;
476  y.InsertItem(k, z1, pk1);
477  nv++;
478 
479  // adjust remaining pointers
480  for(unsigned int jc=ic; jc<critical.size(); jc++){
481  if (critical[jc].second>k) {critical[jc].second++;}
482  }
483  }
484  }
485  return split;
486 }
487 
488 
489 
491 
492  const unsigned int nv = y.GetSize();
493 
494  double epsilon = 1e-3; // split all single vertices by 10 um
495  double zsep = 2 * epsilon; // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
496  vertex_t y1;
497 
498  if (verbose_)
499  {
500  LogDebug("DAClusterizerinZ_vectorized") << "Before Split "<< std::endl;
501  y.DebugOut();
502  }
503 
504  for (unsigned int k = 0; k < nv; k++) {
505 
506  if (
507  ( (k == 0) || ( y._z[k - 1] < (y._z[k] - zsep)) ) &&
508  ( ((k + 1) == nv) || ( y._z[k + 1] > (y._z[k] + zsep)) ) )
509  {
510  // isolated prototype, split
511  double new_z = y.z[k] - epsilon;
512  y.z[k] = y.z[k] + epsilon;
513 
514  double new_pk = 0.5 * y.pk[k];
515  y.pk[k] = 0.5 * y.pk[k];
516 
517  y1.AddItem(new_z, new_pk);
518  y1.AddItem(y._z[k], y._pk[k]);
519 
520  }
521  else if ( (y1.GetSize() == 0 ) ||
522  (y1._z[y1.GetSize() - 1] < (y._z[k] - zsep) ))
523  {
524 
525  y1.AddItem(y._z[k], y._pk[k]);
526  }
527  else
528  {
529  y1._z[y1.GetSize() - 1] = y1._z[y1.GetSize() - 1] - epsilon;
530  y._z[k] = y._z[k] + epsilon;
531  y1.AddItem( y._z[k] , y._pk[k]);
532  }
533  }// vertex loop
534 
535  y = y1;
536  y.ExtractRaw();
537 
538  if (verbose_)
539  {
540  LogDebug("DAClusterizerinZ_vectorized") << "After split " << std::endl;
541  y.DebugOut();
542  }
543 }
544 
545 
546 void DAClusterizerInZ_vect::dump(const double beta, const vertex_t & y,
547  const track_t & tks, int verbosity) const {
548 
549  const unsigned int nv = y.GetSize();
550  const unsigned int nt = tks.GetSize();
551 
552  LogDebug("DAClusterizerinZ_vectorized") << "-----DAClusterizerInZ::dump ----" << endl;
553  LogDebug("DAClusterizerinZ_vectorized") << "beta=" << beta << " betamax= " << betamax_ << endl;
554  LogDebug("DAClusterizerinZ_vectorized") << " z= ";
555  LogDebug("DAClusterizerinZ_vectorized") << setprecision(4);
556  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
557  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << fixed << y._z[ivertex];
558  }
559  LogDebug("DAClusterizerinZ_vectorized") << endl << "T=" << setw(15) << 1. / beta
560  << " Tc= ";
561  LogDebug("DAClusterizerinZ_vectorized") << endl
562  << " pk=";
563  double sumpk = 0;
564  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
565  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << setprecision(3) << fixed << y._pk[ivertex];
566  sumpk += y._pk[ivertex];
567  }
568  LogDebug("DAClusterizerinZ_vectorized") << endl;
569 
570  if (verbosity > 0) {
571  double E = 0, F = 0;
572  LogDebug("DAClusterizerinZ_vectorized") << endl;
573  LogDebug("DAClusterizerinZ_vectorized")
574  << "---- z +/- dz ip +/-dip pt phi eta weights ----"
575  << endl;
576  LogDebug("DAClusterizerinZ_vectorized") << setprecision(4);
577  for (unsigned int i = 0; i < nt; i++) {
578  if (tks._Z_sum[i] > 0) {
579  F -= log(tks._Z_sum[i]) / beta;
580  }
581  double tz = tks._z[i];
582  LogDebug("DAClusterizerinZ_vectorized") << setw(3) << i << ")" << setw(8) << fixed << setprecision(4)
583  << tz << " +/-" << setw(6) << sqrt(tks._dz2[i]);
584 
585  if (tks.tt[i]->track().quality(reco::TrackBase::highPurity)) {
586  LogDebug("DAClusterizerinZ_vectorized") << " *";
587  } else {
588  LogDebug("DAClusterizerinZ_vectorized") << " ";
589  }
590  if (tks.tt[i]->track().hitPattern().hasValidHitInFirstPixelBarrel()) {
591  LogDebug("DAClusterizerinZ_vectorized") << "+";
592  } else {
593  LogDebug("DAClusterizerinZ_vectorized") << "-";
594  }
595  LogDebug("DAClusterizerinZ_vectorized") << setw(1)
596  << tks.tt[i]->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
597  LogDebug("DAClusterizerinZ_vectorized") << setw(1)
598  << tks.tt[i]->track().hitPattern().pixelEndcapLayersWithMeasurement();
599  LogDebug("DAClusterizerinZ_vectorized") << setw(1) << hex
600  << tks.tt[i]->track().hitPattern().trackerLayersWithMeasurement()
601  - tks.tt[i]->track().hitPattern().pixelLayersWithMeasurement()
602  << dec;
603  LogDebug("DAClusterizerinZ_vectorized") << "=" << setw(1) << hex
604  << tks.tt[i]->track().trackerExpectedHitsOuter().numberOfHits()
605  << dec;
606 
607  Measurement1D IP =
608  tks.tt[i]->stateAtBeamLine().transverseImpactParameter();
609  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << IP.value() << "+/-" << setw(6) << IP.error();
610  LogDebug("DAClusterizerinZ_vectorized") << " " << setw(6) << setprecision(2)
611  << tks.tt[i]->track().pt() * tks.tt[i]->track().charge();
612  LogDebug("DAClusterizerinZ_vectorized") << " " << setw(5) << setprecision(2)
613  << tks.tt[i]->track().phi() << " " << setw(5)
614  << setprecision(2) << tks.tt[i]->track().eta();
615 
616  double sump = 0.;
617  for (unsigned int ivertex = 0; ivertex < nv; ++ ivertex) {
618  if ((tks._pi[i] > 0) && (tks._Z_sum[i] > 0)) {
619  //double p=pik(beta,tks[i],*k);
620  double p = y._pk[ivertex] * exp(-beta * Eik(tks._z[i], y._z[ivertex], tks._dz2[i])) / tks._Z_sum[i];
621  if (p > 0.0001) {
622  LogDebug("DAClusterizerinZ_vectorized") << setw(8) << setprecision(3) << p;
623  } else {
624  LogDebug("DAClusterizerinZ_vectorized") << " . ";
625  }
626  E += p * Eik(tks._z[i], y._z[ivertex], tks._dz2[i]);
627  sump += p;
628  } else {
629  LogDebug("DAClusterizerinZ_vectorized") << " ";
630  }
631  }
632  LogDebug("DAClusterizerinZ_vectorized") << endl;
633  }
634  LogDebug("DAClusterizerinZ_vectorized") << endl << "T=" << 1 / beta << " E=" << E << " n=" << y.GetSize()
635  << " F= " << F << endl << "----------" << endl;
636  }
637 }
638 
639 vector<TransientVertex> DAClusterizerInZ_vect::vertices(const vector<
640  reco::TransientTrack> & tracks, const int verbosity) const {
641 
642 
643  track_t tks = fill(tracks);
644  tks.ExtractRaw();
645 
646  unsigned int nt = tracks.size();
647  double rho0 = 0.0; // start with no outlier rejection
648 
649  vector<TransientVertex> clusters;
650  if (tks.GetSize() == 0)
651  return clusters;
652 
653  vertex_t y; // the vertex prototypes
654 
655  // initialize:single vertex at infinite temperature
656  y.AddItem( 0, 1.0);
657 
658  int niter = 0; // number of iterations
659 
660 
661  // estimate first critical temperature
662  double beta = beta0(betamax_, tks, y);
663  if ( verbose_)
664  {
665  LogDebug("DAClusterizerinZ_vectorized") << "Beta0 is " << beta << std::endl;
666  }
667 
668  niter = 0;
669  while ((update(beta, tks, y, false, rho0) > 1.e-6) && (niter++
670  < maxIterations_)) {
671  }
672 
673  // annealing loop, stop when T<Tmin (i.e. beta>1/Tmin)
674  while (beta < betamax_) {
675 
676 
677  if(useTc_){
678  update(beta, tks,y, false, rho0);
679  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
680  split(beta, tks,y);
681  beta=beta/coolingFactor_;
682  }else{
683  beta=beta/coolingFactor_;
684  splitAll(y);
685  }
686 
687  // make sure we are not too far from equilibrium before cooling further
688  niter = 0;
689  while ((update(beta, tks, y, false, rho0) > 1.e-6) && (niter++
690  < maxIterations_)) {
691  }
692 
693  }
694 
695 
696  if(useTc_){
697  // last round of splitting, make sure no critical clusters are left
698  update(beta, tks,y, false, rho0);// make sure Tc is up-to-date
699  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
700  unsigned int ntry=0;
701  while( split(beta, tks,y) && (ntry++<10) ){
702  niter=0;
703  while((update(beta, tks,y, false, rho0)>1.e-6) && (niter++ < maxIterations_)){}
704  merge(y,beta);
705  update(beta, tks,y, false, rho0);
706  }
707  }else{
708  // merge collapsed clusters
709  while(merge(y,beta)){update(beta, tks,y, false, rho0);}
710  //while(merge(y)){} original code
711  }
712 
713  if (verbose_) {
714  LogDebug("DAClusterizerinZ_vectorized") << "dump after 1st merging " << endl;
715  dump(beta, y, tks, 2);
716  }
717 
718  // switch on outlier rejection
719  rho0 = 1. / nt;
720 
721  // auto-vectorized
722  for (unsigned int k = 0; k < y.GetSize(); k++) {
723  y._pk[k] = 1.;
724  } // democratic
725  niter = 0;
726  while ((update(beta, tks, y, true, rho0) > 1.e-8) && (niter++
727  < maxIterations_)) {
728  }
729  if (verbose_) {
730  LogDebug("DAClusterizerinZ_vectorized") << "rho0=" << rho0 << " niter=" << niter << endl;
731  dump(beta, y, tks, 2);
732  }
733 
734  // merge again (some cluster split by outliers collapse here)
735  while (merge(y)) {
736  }
737  if (verbose_) {
738  LogDebug("DAClusterizerinZ_vectorized") << "dump after 2nd merging " << endl;
739  dump(beta, y, tks, 2);
740  }
741 
742  // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
743  while (beta <= betastop_) {
744  while (purge(y, tks, rho0, beta)) {
745  niter = 0;
746  while ((update(beta, tks, y, true, rho0) > 1.e-6) && (niter++
747  < maxIterations_)) {
748  }
749  }
750  beta /= coolingFactor_;
751  niter = 0;
752  while ((update(beta, tks, y, true, rho0) > 1.e-6) && (niter++
753  < maxIterations_)) {
754  }
755  }
756 
757  if (verbose_) {
758  LogDebug("DAClusterizerinZ_vectorized") << "Final result, rho0=" << rho0 << endl;
759  dump(beta, y, tks, 2);
760  }
761 
762  // select significant tracks and use a TransientVertex as a container
763  GlobalError dummyError;
764 
765  // ensure correct normalization of probabilities, should makes double assginment reasonably impossible
766  const unsigned int nv = y.GetSize();
767 
768  for (unsigned int i = 0; i < nt; i++) {
769  tks._Z_sum[i] = rho0 * local_exp(-beta * dzCutOff_ * dzCutOff_);
770 
771  for (unsigned int k = 0; k < nv; k++) {
772  tks._Z_sum[i] += y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
773  tks._dz2[i]));
774  }
775  }
776 
777  for (unsigned int k = 0; k < nv; k++) {
778 
779  GlobalPoint pos(0, 0, y._z[k]);
780 
781  vector<reco::TransientTrack> vertexTracks;
782  for (unsigned int i = 0; i < nt; i++) {
783  if (tks._Z_sum[i] > 0) {
784 
785  double p = y._pk[k] * local_exp(-beta * Eik(tks._z[i], y._z[k],
786  tks._dz2[i])) / tks._Z_sum[i];
787  if ((tks._pi[i] > 0) && (p > 0.5)) {
788 
789  vertexTracks.push_back(*(tks.tt[i]));
790  tks._Z_sum[i] = 0;
791  } // setting Z=0 excludes double assignment
792  }
793  }
794  TransientVertex v(pos, dummyError, vertexTracks, 0);
795  clusters.push_back(v);
796  }
797 
798  return clusters;
799 
800 }
801 
802 vector<vector<reco::TransientTrack> > DAClusterizerInZ_vect::clusterize(
803  const vector<reco::TransientTrack> & tracks) const {
804 
805  if (verbose_) {
806  std::cout << "###################################################" << endl;
807  std::cout << "# vectorized DAClusterizerInZ_vect::clusterize nt=" << tracks.size() << endl;
808  std::cout << "###################################################" << endl;
809  }
810 
811  vector<vector<reco::TransientTrack> > clusters;
812  vector<TransientVertex> pv = vertices(tracks);
813 
814  if (verbose_) {
815  LogDebug("DAClusterizerinZ_vectorized") << "# DAClusterizerInZ::clusterize pv.size=" << pv.size()
816  << endl;
817  }
818  if (pv.size() == 0) {
819  return clusters;
820  }
821 
822  // fill into clusters and merge
823  vector<reco::TransientTrack> aCluster = pv.begin()->originalTracks();
824 
825  for (vector<TransientVertex>::iterator k = pv.begin() + 1; k != pv.end(); k++) {
826  if ( fabs(k->position().z() - (k - 1)->position().z()) > (2 * vertexSize_)) {
827  // close a cluster
828  clusters.push_back(aCluster);
829  aCluster.clear();
830  }
831  for (unsigned int i = 0; i < k->originalTracks().size(); i++) {
832  aCluster.push_back(k->originalTracks().at(i));
833  }
834 
835  }
836  clusters.push_back(aCluster);
837 
838  return clusters;
839 
840 }
841 
#define LogDebug(id)
void AddItem(double new_z, double new_dz2, const reco::TransientTrack *new_tt, double new_pi)
const double beta
dbl * delta
Definition: mlp_gen.cc:36
T getParameter(std::string const &) const
void local_exp_list(double *arg_inp, double *arg_out, const int arg_arr_size) const
T getUntrackedParameter(std::string const &, T const &) const
int i
Definition: DBlmapReader.cc:9
string fill
Definition: lumiContext.py:319
common ppss p3p6s2 common epss epspn46 common const1 w2
Definition: inclppp.h:1
const double w
Definition: UKUtility.cc:23
double beta0(const double betamax, track_t &tks, vertex_t &y) const
void splitAll(vertex_t &y) const
double local_exp(double const &inp) const
void AddItem(double new_z, double new_pk)
Sin< T >::type sin(const T &t)
Definition: Sin.h:22
Definition: DDAxes.h:10
double error() const
Definition: Measurement1D.h:30
bool split(const double beta, track_t &t, vertex_t &y) const
track_t fill(const std::vector< reco::TransientTrack > &tracks) const
std::vector< const reco::TransientTrack * > tt
bool merge(vertex_t &) const
std::vector< std::vector< reco::TransientTrack > > clusterize(const std::vector< reco::TransientTrack > &tracks) const
U second(std::pair< T, U > const &p)
T sqrt(T t)
Definition: SSEVec.h:48
Cos< T >::type cos(const T &t)
Definition: Cos.h:22
tuple IP
Definition: listHistos.py:76
void InsertItem(unsigned int i, double new_z, double new_pk)
Tan< T >::type tan(const T &t)
Definition: Tan.h:22
double update(double beta, track_t &gtracks, vertex_t &gvertices, bool useRho0, double &rho0) const
double p2[4]
Definition: TauolaWrapper.h:90
tuple conf
Definition: dbtoconf.py:185
int nt
Definition: AMPTWrapper.h:32
int k[5][pyjets_maxn]
DAClusterizerInZ_vect(const edm::ParameterSet &conf)
void dump(const double beta, const vertex_t &y, const track_t &tks, const int verbosity=0) const
tuple tracks
Definition: testEve_cfg.py:39
double b
Definition: hdecay.h:120
double value() const
Definition: Measurement1D.h:28
std::vector< TransientVertex > vertices(const std::vector< reco::TransientTrack > &tracks, const int verbosity=0) const
double p1[4]
Definition: TauolaWrapper.h:89
#define update(a, b)
double a
Definition: hdecay.h:121
static int position[264][3]
Definition: ReadPGInfo.cc:509
tuple cout
Definition: gather_cfg.py:121
double Eik(double const &t_z, double const &k_z, double const &t_dz2) const
const double epsilon
static uInt32 F(BLOWFISH_CTX *ctx, uInt32 x)
Definition: blowfish.cc:281
bool purge(vertex_t &, track_t &, double &, const double) const
double split
Definition: MVATrainer.cc:139
Power< A, B >::type pow(const A &a, const B &b)
Definition: Power.h:40
Definition: DDAxes.h:10