CMS 3D CMS Logo

/data/refman/pasoursint/CMSSW_5_3_0/src/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ.cc

Go to the documentation of this file.
00001 #include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h"
00002 #include "FWCore/MessageLogger/interface/MessageLogger.h"
00003 #include "DataFormats/GeometryCommonDetAlgo/interface/Measurement1D.h"
00004 #include "RecoVertex/VertexPrimitives/interface/VertexException.h"
00005 
00006 
00007 using namespace std;
00008 
00009 
00010 namespace {
00011 
00012   bool recTrackLessZ1(const DAClusterizerInZ::track_t & tk1,
00013                      const DAClusterizerInZ::track_t & tk2)
00014   {
00015     return tk1.z < tk2.z;
00016   }
00017 }
00018 
00019 
00020 vector<DAClusterizerInZ::track_t> DAClusterizerInZ::fill(
00021                           const vector<reco::TransientTrack> & tracks
00022                           )const{
00023   // prepare track data for clustering
00024   vector<track_t> tks;
00025   for(vector<reco::TransientTrack>::const_iterator it=tracks.begin(); it!=tracks.end(); it++){
00026     track_t t;
00027     t.z=((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
00028     double tantheta=tan(((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta());
00029     double phi=((*it).stateAtBeamLine().trackStateAtPCA()).momentum().phi();
00030     //  get the beam-spot
00031     reco::BeamSpot beamspot=(it->stateAtBeamLine()).beamSpot();
00032     t.dz2= pow((*it).track().dzError(),2)          // track errror
00033       + (pow(beamspot.BeamWidthX()*cos(phi),2)+pow(beamspot.BeamWidthY()*sin(phi),2))/pow(tantheta,2)  // beam-width induced
00034       + pow(vertexSize_,2);                        // intrinsic vertex size, safer for outliers and short lived decays
00035     if (d0CutOff_>0){
00036       Measurement1D IP=(*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
00037       t.pi=1./(1.+exp(pow(IP.value()/IP.error(),2)-pow(d0CutOff_ ,2)));  // reduce weight for high ip tracks  
00038     }else{
00039       t.pi=1.;
00040     }
00041     t.tt=&(*it);
00042     t.Z=1.;
00043     tks.push_back(t);
00044   }
00045   return tks;
00046 }
00047 
00048 
00049 
00050 
00051 
00052 double DAClusterizerInZ::Eik(const track_t & t, const vertex_t &k )const{
00053   return pow(t.z-k.z,2)/t.dz2;
00054 }
00055   
00056 
00057 
00058 
00059 double DAClusterizerInZ::update(
00060                                            double beta,
00061                                            vector<track_t> & tks,
00062                                            vector<vertex_t> & y
00063                                            )const{
00064   //update weights and vertex positions
00065   // mass constrained annealing without noise
00066   // returns the squared sum of changes of vertex positions
00067 
00068   unsigned int nt=tks.size();
00069 
00070   //initialize sums
00071   double sumpi=0;
00072   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00073     k->se=0;    k->sw=0;   k->swz=0;
00074     k->swE=0;  k->Tc=0;
00075   }
00076 
00077 
00078   // loop over tracks
00079   for(unsigned int i=0; i<nt; i++){
00080 
00081     // update pik and Zi
00082     double Zi=0.;
00083     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00084       k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
00085       Zi += k->pk * k->ei;
00086     }
00087     tks[i].Z=Zi;
00088 
00089     // normalization for pk
00090     if (tks[i].Z>0){
00091       sumpi += tks[i].pi;
00092       // accumulate weighted z and weights for vertex update
00093       for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00094         k->se += tks[i].pi* k->ei / Zi;
00095         double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
00096         k->sw  += w;
00097         k->swz += w * tks[i].z;
00098         k->swE+= w*Eik(tks[i],*k);
00099       }
00100     }else{
00101       sumpi += tks[i].pi;
00102     }
00103     
00104 
00105   } // end of track loop
00106 
00107 
00108   // now update z and pk
00109   double delta=0;
00110   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00111     if ( k->sw > 0){
00112       double znew=k->swz/k->sw; 
00113       delta+=pow(k->z-znew,2);
00114       k->z=znew;
00115       k->Tc=2*k->swE/k->sw;
00116     }else{
00117       edm::LogInfo("sumw") <<  "invalid sum of weights in fit: " << k->sw << endl;
00118       if(verbose_){cout << " a cluster melted away ?  pk=" << k->pk <<  " sumw=" << k->sw <<  endl;}
00119       k->Tc=-1;
00120     }
00121 
00122     k->pk = k->pk * k->se / sumpi;
00123   }
00124 
00125   // return how much the prototypes moved
00126   return delta;
00127 }
00128 
00129 
00130 
00131 
00132 
00133 double DAClusterizerInZ::update(
00134                                 double beta,
00135                                 vector<track_t> & tks,
00136                                 vector<vertex_t> & y,
00137                                 double & rho0
00138                                 )const{
00139   // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise 
00140   // returns the squared sum of changes of vertex positions
00141 
00142   unsigned int nt=tks.size();
00143 
00144   //initialize sums
00145   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00146     k->se=0;    k->sw=0;   k->swz=0;
00147     k->swE=0;  k->Tc=0;
00148   }
00149 
00150 
00151   // loop over tracks
00152   for(unsigned int i=0; i<nt; i++){
00153 
00154     // update pik and Zi
00155     double Zi=rho0*exp(-beta*dzCutOff_*dzCutOff_);// cut-off
00156     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00157       k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
00158       Zi += k->pk * k->ei;
00159     }
00160     tks[i].Z=Zi;
00161 
00162     // normalization
00163     if (tks[i].Z>0){
00164        // accumulate weighted z and weights for vertex update
00165       for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00166         k->se += tks[i].pi* k->ei / Zi;
00167         double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
00168         k->sw  += w;
00169         k->swz += w * tks[i].z;
00170         k->swE+= w*Eik(tks[i],*k);
00171       }
00172     }
00173 
00174 
00175   } // end of track loop
00176 
00177 
00178   // now update z
00179   double delta=0;
00180   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00181     if ( k->sw > 0){
00182       double znew=k->swz/k->sw; 
00183       delta+=pow(k->z-znew,2);
00184       k->z=znew;
00185       k->Tc=2*k->swE/k->sw;
00186     }else{
00187       edm::LogInfo("sumw") <<  "invalid sum of weights in fit: " << k->sw << endl;
00188       if(verbose_){cout << " a cluster melted away ?  pk=" << k->pk <<  " sumw=" << k->sw <<  endl;}
00189       k->Tc=0;
00190     }
00191 
00192   }
00193 
00194   // return how much the prototypes moved
00195   return delta;
00196 }
00197 
00198 
00199 
00200 
00201 
00202 bool DAClusterizerInZ::merge(vector<vertex_t> & y, int nt)const{
00203   // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
00204 
00205   if(y.size()<2)  return false;
00206 
00207   for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
00208     if( fabs( (k+1)->z - k->z ) < 1.e-3 ){  // with fabs if only called after freeze-out (splitAll() at highter T)
00209      double rho=k->pk + (k+1)->pk;
00210       if(rho>0){
00211         k->z=( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
00212       }else{
00213         k->z=0.5*(k->z + (k+1)->z);
00214       }
00215       k->pk=rho;
00216  
00217       y.erase(k+1);
00218       return true;  
00219     }
00220   }
00221   
00222   return false;
00223 }
00224 
00225 
00226 
00227 
00228 bool DAClusterizerInZ::merge(vector<vertex_t> & y, double & beta)const{
00229   // merge clusters that collapsed or never separated, 
00230   // only merge if the estimated critical temperature of the merged vertex is below the current temperature
00231   // return true if vertices were merged, false otherwise
00232   if(y.size()<2)  return false;
00233 
00234   for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
00235     if (fabs((k+1)->z - k->z)<2.e-3){ 
00236       double rho=k->pk + (k+1)->pk;
00237       double swE=k->swE+(k+1)->swE - k->pk * (k+1)->pk /rho *pow((k+1)->z - k->z,2);
00238       double Tc=2*swE/(k->sw+(k+1)->sw);
00239       
00240       if(Tc*beta<1){
00241         if(rho>0){
00242           k->z=( k->pk * k->z + (k+1)->z * (k+1)->pk)/rho;
00243         }else{
00244           k->z=0.5*(k->z + (k+1)->z);
00245         }
00246         k->pk=rho;
00247         k->sw+=(k+1)->sw;
00248         k->swE=swE;
00249         k->Tc=Tc;
00250         y.erase(k+1);
00251         return true; 
00252       }
00253     }
00254   }
00255 
00256   return false;
00257 }
00258 
00259 
00260 
00261 
00262 
00263 bool DAClusterizerInZ::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{
00264   // eliminate clusters with only one significant/unique track
00265   if(y.size()<2)  return false;
00266   
00267   unsigned int nt=tks.size();
00268   double sumpmin=nt;
00269   vector<vertex_t>::iterator k0=y.end();
00270   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00271     int nUnique=0;
00272     double sump=0;
00273     double pmax=k->pk/(k->pk+rho0*exp(-beta*dzCutOff_*dzCutOff_));
00274     for(unsigned int i=0; i<nt; i++){
00275       if(tks[i].Z>0){
00276         double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
00277         sump+=p;
00278         if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; }
00279       }
00280     }
00281 
00282     if((nUnique<2)&&(sump<sumpmin)){
00283       sumpmin=sump;
00284       k0=k;
00285     }
00286   }
00287  
00288   if(k0!=y.end()){
00289     if(verbose_){cout << "eliminating prototype at " << k0->z << " with sump=" << sumpmin << endl;}
00290     //rho0+=k0->pk;
00291     y.erase(k0);
00292     return true;
00293   }else{
00294     return false;
00295   }
00296 }
00297 
00298 
00299  
00300 
00301 double DAClusterizerInZ::beta0(
00302                                double betamax,
00303                                vector<track_t> & tks,
00304                                vector<vertex_t> & y
00305                                )const{
00306   
00307   double T0=0;  // max Tc for beta=0
00308   // estimate critical temperature from beta=0 (T=inf)
00309   unsigned int nt=tks.size();
00310 
00311   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00312 
00313     // vertex fit at T=inf 
00314     double sumwz=0;
00315     double sumw=0;
00316     for(unsigned int i=0; i<nt; i++){
00317       double w=tks[i].pi/tks[i].dz2;
00318       sumwz+=w*tks[i].z;
00319       sumw +=w;
00320     }
00321     k->z=sumwz/sumw;
00322 
00323     // estimate Tcrit, eventually do this in the same loop
00324     double a=0, b=0;
00325     for(unsigned int i=0; i<nt; i++){
00326       double dx=tks[i].z-(k->z);
00327       double w=tks[i].pi/tks[i].dz2;
00328       a+=w*pow(dx,2)/tks[i].dz2;
00329       b+=w;
00330     }
00331     double Tc= 2.*a/b;  // the critical temperature of this vertex
00332     if(Tc>T0) T0=Tc;
00333   }// vertex loop (normally there should be only one vertex at beta=0)
00334   
00335   if (T0>1./betamax){
00336     return betamax/pow(coolingFactor_, int(log(T0*betamax)/log(coolingFactor_))-1 );
00337   }else{
00338     // ensure at least one annealing step
00339     return betamax/coolingFactor_;
00340   }
00341 }
00342 
00343 
00344 
00345 
00346 
00347 bool DAClusterizerInZ::split(
00348                          double beta,
00349                          vector<track_t> & tks,
00350                          vector<vertex_t> & y,
00351                          double threshold
00352                          )const{
00353   // split only critical vertices (Tc >~ T=1/beta   <==>   beta*Tc>~1)
00354   // an update must have been made just before doing this (same beta, no merging)
00355   // returns true if at least one cluster was split
00356 
00357   double epsilon=1e-3;      // split all single vertices by 10 um 
00358   bool split=false;
00359 
00360 
00361   // avoid left-right biases by splitting highest Tc first
00362   
00363   std::vector<std::pair<double, unsigned int> > critical;
00364   for(unsigned int ik=0; ik<y.size(); ik++){
00365     if (beta*y[ik].Tc > 1.){
00366       critical.push_back( make_pair(y[ik].Tc, ik));
00367     }
00368   }
00369   stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >() );
00370 
00371 
00372   for(unsigned int ic=0; ic<critical.size(); ic++){
00373     unsigned int ik=critical[ic].second;
00374     // estimate subcluster positions and weight
00375     double p1=0, z1=0, w1=0;
00376     double p2=0, z2=0, w2=0;
00377     //double sumpi=0;
00378     for(unsigned int i=0; i<tks.size(); i++){
00379       if(tks[i].Z>0){
00380         //sumpi+=tks[i].pi;
00381         double p=y[ik].pk * exp(-beta*Eik(tks[i],y[ik])) / tks[i].Z*tks[i].pi;
00382         double w=p/tks[i].dz2;
00383         if(tks[i].z<y[ik].z){
00384           p1+=p; z1+=w*tks[i].z; w1+=w;
00385         }else{
00386           p2+=p; z2+=w*tks[i].z; w2+=w;
00387         }
00388       }
00389     }
00390     if(w1>0){  z1=z1/w1;} else{z1=y[ik].z-epsilon;}
00391     if(w2>0){  z2=z2/w2;} else{z2=y[ik].z+epsilon;}
00392 
00393     // reduce split size if there is not enough room
00394     if( ( ik   > 0       ) && ( y[ik-1].z>=z1 ) ){ z1=0.5*(y[ik].z+y[ik-1].z); }
00395     if( ( ik+1 < y.size()) && ( y[ik+1].z<=z2 ) ){ z2=0.5*(y[ik].z+y[ik+1].z); }
00396 
00397     // split if the new subclusters are significantly separated
00398     if( (z2-z1)>epsilon){
00399       split=true;
00400       vertex_t vnew;
00401       vnew.pk = p1*y[ik].pk/(p1+p2);
00402       y[ik].pk= p2*y[ik].pk/(p1+p2);
00403       vnew.z  = z1;
00404       y[ik].z = z2;
00405       y.insert(y.begin()+ik, vnew);
00406 
00407      // adjust remaining pointers
00408       for(unsigned int jc=ic; jc<critical.size(); jc++){
00409         if (critical[jc].second>ik) {critical[jc].second++;}
00410       }
00411     }
00412   }
00413 
00414   //  stable_sort(y.begin(), y.end(), clusterLessZ);
00415   return split;
00416 }
00417 
00418 
00419 
00420 
00421 
00422 void DAClusterizerInZ::splitAll(
00423                          vector<vertex_t> & y
00424                          )const{
00425 
00426 
00427   double epsilon=1e-3;      // split all single vertices by 10 um 
00428   double zsep=2*epsilon;    // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
00429   vector<vertex_t> y1;
00430 
00431   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00432     if ( ( (k==y.begin())|| (k-1)->z < k->z - zsep) && (((k+1)==y.end()  )|| (k+1)->z > k->z + zsep)) { 
00433       // isolated prototype, split
00434       vertex_t vnew;
00435       vnew.z  = k->z -epsilon;
00436       (*k).z  = k->z+epsilon;
00437       vnew.pk= 0.5* (*k).pk;
00438       (*k).pk= 0.5* (*k).pk;
00439       y1.push_back(vnew);
00440       y1.push_back(*k);
00441 
00442     }else if( y1.empty() || (y1.back().z < k->z -zsep)){
00443       y1.push_back(*k);
00444     }else{
00445       y1.back().z -=epsilon;
00446       k->z+=epsilon;
00447       y1.push_back(*k);
00448     }
00449   }// vertex loop
00450   
00451   y=y1;
00452 }
00453  
00454 
00455 DAClusterizerInZ::DAClusterizerInZ(const edm::ParameterSet& conf) 
00456 {
00457   // some defaults to avoid uninitialized variables
00458   verbose_= conf.getUntrackedParameter<bool>("verbose", false);
00459   useTc_=true;
00460   betamax_=0.1;
00461   betastop_  =1.0;
00462   coolingFactor_=0.8;
00463   maxIterations_=100;
00464   vertexSize_=0.05;  // 0.5 mm
00465   dzCutOff_=4.0;   // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes
00466 
00467   // configure
00468 
00469   double Tmin = conf.getParameter<double>("Tmin");
00470   vertexSize_ = conf.getParameter<double>("vertexSize");
00471   coolingFactor_ = conf.getParameter<double>("coolingFactor");
00472   d0CutOff_  =  conf.getParameter<double>("d0CutOff");
00473   dzCutOff_  =  conf.getParameter<double>("dzCutOff");
00474   maxIterations_=100;
00475   if (Tmin==0){
00476     cout << "DAClusterizerInZ: invalid Tmin" << Tmin << "  reset do default " << 1./betamax_ << endl;
00477   }else{
00478     betamax_ = 1./Tmin;
00479   }
00480 
00481   // for testing, negative cooling factor: revert to old splitting scheme
00482   if(coolingFactor_<0){
00483     coolingFactor_=-coolingFactor_; useTc_=false;
00484   }
00485 
00486 }
00487 
00488 
00489 void DAClusterizerInZ::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0, int verbosity)const{
00490 
00491   // copy and sort for nicer printout
00492   vector<track_t> tks; 
00493   for(vector<track_t>::const_iterator t=tks0.begin(); t!=tks0.end(); t++){tks.push_back(*t); }
00494   stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
00495 
00496   cout << "-----DAClusterizerInZ::dump ----" << endl;
00497   cout << "beta=" << beta << "   betamax= " << betamax_ << endl;
00498   cout << "                                                               z= ";
00499   cout.precision(4);
00500   for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00501     cout  <<  setw(8) << fixed << k->z ;
00502   }
00503   cout << endl << "T=" << setw(15) << 1./beta <<"                                             Tc= ";
00504   for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00505     cout  <<  setw(8) << fixed << k->Tc ;
00506   }
00507  
00508   cout << endl << "                                                               pk=";
00509   double sumpk=0;
00510   for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00511     cout <<  setw(8) <<  setprecision(3) <<  fixed << k->pk;
00512     sumpk+=k->pk;
00513   }
00514   cout  << endl;
00515 
00516   if(verbosity>0){
00517     double E=0, F=0;
00518     cout << endl;
00519     cout << "----       z +/- dz                ip +/-dip       pt    phi  eta    weights  ----" << endl;
00520     cout.precision(4);
00521     for(unsigned int i=0; i<tks.size(); i++){
00522       if (tks[i].Z>0){  F-=log(tks[i].Z)/beta;}
00523       double tz= tks[i].z;
00524       cout <<  setw (3)<< i << ")" <<  setw (8) << fixed << setprecision(4)<<  tz << " +/-" <<  setw (6)<< sqrt(tks[i].dz2);
00525 
00526       if(tks[i].tt->track().quality(reco::TrackBase::highPurity)){ cout << " *";}else{cout <<"  ";}
00527       if(tks[i].tt->track().hitPattern().hasValidHitInFirstPixelBarrel()){cout <<"+";}else{cout << "-";}
00528       cout << setw(1) << tks[i].tt->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
00529       cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement(); 
00530       cout << setw(1) << hex << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement()-tks[i].tt->track().hitPattern().pixelLayersWithMeasurement() <<dec; 
00531       cout << "=" << setw(1)<<hex <<tks[i].tt->track().trackerExpectedHitsOuter().numberOfHits() << dec;
00532 
00533       Measurement1D IP=tks[i].tt->stateAtBeamLine().transverseImpactParameter();
00534       cout << setw (8) << IP.value() << "+/-" << setw (6) << IP.error();
00535       cout << " " << setw(6) << setprecision(2)  << tks[i].tt->track().pt()*tks[i].tt->track().charge();
00536       cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi() 
00537            << " "  << setw(5)  << setprecision(2)   << tks[i].tt->track().eta() ;
00538 
00539       double sump=0.;
00540       for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00541         if((tks[i].pi>0)&&(tks[i].Z>0)){
00542           //double p=pik(beta,tks[i],*k);
00543           double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z; 
00544           if( p > 0.0001){
00545             cout <<  setw (8) <<  setprecision(3) << p;
00546           }else{
00547             cout << "    .   ";
00548           }
00549           E+=p*Eik(tks[i],*k);
00550           sump+=p;
00551         }else{
00552             cout << "        ";
00553         }
00554       }
00555       cout << endl;
00556     }
00557     cout << endl << "T=" << 1/beta  << " E=" << E << " n="<< y.size() << "  F= " << F <<  endl <<  "----------" << endl;
00558   }
00559 }
00560 
00561 
00562 
00563 
00564 
00565 vector< TransientVertex >
00566 DAClusterizerInZ::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity) 
00567 const
00568 {
00569  
00570   vector<track_t> tks=fill(tracks);
00571   unsigned int nt=tracks.size();
00572   double rho0=0.0;  // start with no outlier rejection
00573 
00574   vector< TransientVertex > clusters;
00575   if (tks.empty()) return clusters;
00576 
00577   vector<vertex_t> y; // the vertex prototypes
00578 
00579   // initialize:single vertex at infinite temperature
00580   vertex_t vstart;
00581   vstart.z=0.;
00582   vstart.pk=1.;
00583   y.push_back(vstart);
00584   int niter=0;      // number of iterations
00585   
00586 
00587   // estimate first critical temperature
00588   double beta=beta0(betamax_, tks, y);
00589   niter=0; while((update(beta, tks,y)>1.e-6)  && (niter++ < maxIterations_)){ }
00590 
00591   
00592  // annealing loop, stop when T<Tmin  (i.e. beta>1/Tmin)
00593   while(beta<betamax_){ 
00594 
00595     if(useTc_){
00596       update(beta, tks,y);
00597       while(merge(y,beta)){update(beta, tks,y);}
00598       split(beta, tks,y, 1.);
00599       beta=beta/coolingFactor_;
00600     }else{
00601       beta=beta/coolingFactor_;
00602       splitAll(y);
00603     }
00604 
00605 
00606    // make sure we are not too far from equilibrium before cooling further
00607    niter=0; while((update(beta, tks,y)>1.e-6)  && (niter++ < maxIterations_)){ }
00608 
00609   }
00610 
00611   if(useTc_){
00612     // last round of splitting, make sure no critical clusters are left
00613     update(beta, tks,y);
00614     while(merge(y,beta)){update(beta, tks,y);}
00615     unsigned int ntry=0;
00616     while( split(beta, tks,y,1.) && (ntry++<10) ){
00617       niter=0; 
00618       while((update(beta, tks,y)>1.e-6)  && (niter++ < maxIterations_)){}
00619       merge(y,beta);
00620       update(beta, tks,y);
00621     }
00622   }else{
00623     // merge collapsed clusters 
00624     while(merge(y,beta)){update(beta, tks,y);}  
00625     if(verbose_ ){ cout << "dump after 1st merging " << endl;  dump(beta,y,tks,2);}
00626   }
00627   
00628 
00629 
00630 
00631   // switch on outlier rejection
00632   rho0=1./nt; for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; }  // democratic
00633   niter=0; while((update(beta, tks,y,rho0) > 1.e-8)  && (niter++ < maxIterations_)){  }
00634   if(verbose_  ){ cout << "rho0=" << rho0 <<   " niter=" << niter <<  endl; dump(beta,y,tks,2);}
00635 
00636   
00637   // merge again  (some cluster split by outliers collapse here)
00638   while(merge(y,tks.size())){}  
00639   if(verbose_  ){ cout << "dump after 2nd merging " << endl;  dump(beta,y,tks,2);}
00640 
00641 
00642   // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
00643   while(beta<=betastop_){
00644     while(purge(y,tks,rho0, beta)){
00645       niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
00646     } 
00647     beta/=coolingFactor_;
00648     niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
00649   }
00650 
00651 
00652 //   // new, one last round of cleaning at T=Tstop
00653 //   while(purge(y,tks,rho0, beta)){
00654 //     niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
00655 //   } 
00656 
00657 
00658   if(verbose_){
00659    cout << "Final result, rho0=" << rho0 << endl;
00660    dump(beta,y,tks,2);
00661   }
00662 
00663 
00664   // select significant tracks and use a TransientVertex as a container
00665   GlobalError dummyError; 
00666 
00667 
00668   // ensure correct normalization of probabilities, should make double assginment reasonably impossible
00669   for(unsigned int i=0; i<nt; i++){  
00670     tks[i].Z=rho0*exp(-beta*dzCutOff_*dzCutOff_);
00671     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00672       tks[i].Z+=k->pk * exp(-beta*Eik(tks[i],*k));
00673     }
00674   }
00675 
00676 
00677   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00678     GlobalPoint pos(0, 0, k->z);
00679     vector< reco::TransientTrack > vertexTracks;
00680     for(unsigned int i=0; i<nt; i++){
00681       if(tks[i].Z>0){
00682         double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
00683         if( (tks[i].pi>0) && ( p > 0.5 ) ){ vertexTracks.push_back(*(tks[i].tt)); tks[i].Z=0; } // setting Z=0 excludes double assignment
00684       }
00685     }
00686     TransientVertex v(pos, dummyError, vertexTracks, 5);
00687     clusters.push_back(v);
00688   }
00689 
00690 
00691   return clusters;
00692 
00693 }
00694 
00695 
00696 
00697 
00698 
00699 vector< vector<reco::TransientTrack> >
00700 DAClusterizerInZ::clusterize(const vector<reco::TransientTrack> & tracks)
00701   const
00702 {
00703   if(verbose_) {
00704     cout << "###################################################" << endl;
00705     cout << "# DAClusterizerInZ::clusterize   nt="<<tracks.size() << endl;
00706     cout << "###################################################" << endl;
00707   }
00708 
00709   vector< vector<reco::TransientTrack> > clusters;
00710   vector< TransientVertex > pv=vertices(tracks);
00711 
00712   if(verbose_){ cout << "# DAClusterizerInZ::clusterize   pv.size="<<pv.size() << endl;  }
00713   if (pv.size()==0){ return clusters;}
00714 
00715 
00716   // fill into clusters and merge
00717   vector< reco::TransientTrack>  aCluster=pv.begin()->originalTracks();
00718   
00719   for(vector<TransientVertex>::iterator k=pv.begin()+1; k!=pv.end(); k++){
00720     if ( fabs(k->position().z() - (k-1)->position().z()) > (2*vertexSize_) ){
00721       // close a cluster
00722       clusters.push_back(aCluster);
00723       aCluster.clear();
00724     }
00725     for(unsigned int i=0; i<k->originalTracks().size(); i++){ aCluster.push_back( k->originalTracks().at(i)); }
00726     
00727   }
00728   clusters.push_back(aCluster);
00729   
00730   
00731   return clusters;
00732 
00733 }
00734