CMS 3D CMS Logo

CMSSW_4_4_3_patch1/src/RecoVertex/PrimaryVertexProducer/src/DAClusterizerInZ.cc

Go to the documentation of this file.
00001 #include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h"
00002 #include "FWCore/MessageLogger/interface/MessageLogger.h"
00003 #include "DataFormats/GeometryCommonDetAlgo/interface/Measurement1D.h"
00004 #include "RecoVertex/VertexPrimitives/interface/VertexException.h"
00005 
00006 
00007 using namespace std;
00008 
00009 
00010 namespace {
00011 
00012   bool recTrackLessZ1(const DAClusterizerInZ::track_t & tk1,
00013                      const DAClusterizerInZ::track_t & tk2)
00014   {
00015     return tk1.z < tk2.z;
00016   }
00017 }
00018 
00019 
00020 vector<DAClusterizerInZ::track_t> DAClusterizerInZ::fill(
00021                           const vector<reco::TransientTrack> & tracks
00022                           )const{
00023   // prepare track data for clustering
00024   vector<track_t> tks;
00025   for(vector<reco::TransientTrack>::const_iterator it=tracks.begin(); it!=tracks.end(); it++){
00026     track_t t;
00027     t.z=((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
00028     double tantheta=tan(((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta());
00029     //  get the beam-spot
00030     reco::BeamSpot beamspot=(it->stateAtBeamLine()).beamSpot();
00031     t.dz2= pow((*it).track().dzError(),2)          // track errror
00032       + (pow(beamspot.BeamWidthX(),2)+pow(beamspot.BeamWidthY(),2))/pow(tantheta,2)  // beam-width induced
00033       + pow(vertexSize_,2);                        // intrinsic vertex size, safer for outliers and short lived decays
00034     if (d0CutOff_>0){
00035       Measurement1D IP=(*it).stateAtBeamLine().transverseImpactParameter();// error constains beamspot
00036       t.pi=1./(1.+exp(pow(IP.value()/IP.error(),2)-pow(d0CutOff_ ,2)));  // reduce weight for high ip tracks  
00037     }else{
00038       t.pi=1.;
00039     }
00040     t.tt=&(*it);
00041     t.Z=1.;
00042     tks.push_back(t);
00043   }
00044   return tks;
00045 }
00046 
00047 
00048 
00049 
00050 // double DAClusterizerInZ::pik(const double beta, const track_t & t, const vertex_t &k )const{
00051 //   //note: t.Z = sum_k exp(-beta*Eik ) is assumed to be valid, this is done in updateAndFit
00052 //   //      then we have sum_k pik(beta, t, k) = 1
00053 //   //      the last call of updateAndFit must have been made with the same temperature ! 
00054 //   //      at low T, the vertex position must be quite accurate (iterated)
00055 //   double Eik=pow(t.z-k.z,2)/t.dz2;
00056 //   double o= k.pk*exp(-beta*Eik); 
00057 //   if(isnan(o)){ cout << "NaN in pik " << k.pk << " " << beta << " " << Eik << " Z=" << t.Z << "   beta Eik=" << beta*Eik << endl;}
00058 //   if (t.Z>0) return k.pk*exp(-beta*Eik)/t.Z; 
00059 //   return 0;
00060 // }
00061 
00062 
00063 double DAClusterizerInZ::Eik(const track_t & t, const vertex_t &k )const{
00064   return pow(t.z-k.z,2)/t.dz2;
00065 }
00066   
00067 
00068 
00069 
00070 double DAClusterizerInZ::update(
00071                                            double beta,
00072                                            vector<track_t> & tks,
00073                                            vector<vertex_t> & y
00074                                            )const{
00075   //update weights and vertex positions
00076   // mass constrained annealing without noise
00077   // returns the squared sum of changes of vertex positions
00078 
00079   //cout << endl << endl << "DAClusterizerInZ::update" << endl;
00080   unsigned int nt=tks.size();
00081 
00082   //initialize sums
00083   double sumpi=0;
00084   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00085     k->se=0;    k->sw=0;   k->swz=0;
00086   }
00087 
00088 
00089   // loop over tracks
00090   for(unsigned int i=0; i<nt; i++){
00091 
00092     // update pik and Zi
00093     double Zi=0.;
00094     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00095       k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
00096       Zi += k->pk * k->ei;
00097     }
00098     tks[i].Z=Zi;
00099 
00100     // normalization for pk
00101     if (tks[i].Z>0){
00102       sumpi += tks[i].pi;
00103       // accumulate weighted z and weights for vertex update
00104       for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00105         k->se += tks[i].pi* k->ei / Zi;
00106         double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
00107         k->sw  += w;
00108         k->swz += w * tks[i].z;
00109       }
00110     }else{
00111       sumpi += tks[i].pi;
00112     }
00113     
00114 
00115   } // end of track loop
00116 
00117 
00118   // now update z and pk
00119   double delta=0;
00120   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00121     if ( k->sw > 0){
00122       double znew=k->swz/k->sw; 
00123       delta+=pow(k->z-znew,2);
00124       k->z=znew;
00125     }else{
00126       edm::LogInfo("sumw") <<  "invalid sum of weights in fit: " << k->sw << endl;
00127       if(verbose_){cout << " a cluster melted away ?  pk=" << k->pk <<  " sumw=" << k->sw <<  endl;}
00128     }
00129 
00130     k->pk = k->pk * k->se / sumpi;
00131   }
00132 
00133   // return how much the prototypes moved
00134   return delta;
00135 }
00136 
00137 
00138 
00139 
00140 
00141 double DAClusterizerInZ::update(
00142                                 double beta,
00143                                 vector<track_t> & tks,
00144                                 vector<vertex_t> & y,
00145                                 double & rho0
00146                                 )const{
00147   // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise 
00148   // returns the squared sum of changes of vertex positions
00149 
00150   unsigned int nt=tks.size();
00151 
00152   //initialize sums
00153   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00154     k->se=0;    k->sw=0;   k->swz=0;
00155   }
00156 
00157 
00158   // loop over tracks
00159   for(unsigned int i=0; i<nt; i++){
00160 
00161     // update pik and Zi
00162     double Zi=rho0*exp(-beta*dzCutOff_*dzCutOff_);// cut-off
00163     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00164       k->ei=exp(-beta*Eik(tks[i],*k));// cache exponential for one track at a time
00165       Zi += k->pk * k->ei;
00166     }
00167     tks[i].Z=Zi;
00168 
00169     // normalization
00170     if (tks[i].Z>0){
00171       //double pi0=exp(-beta*dzCutOff_*dzCutOff_)/tks[i].Z;
00172       //sumpi += tks[i].pi*(1.-pi0*rho0);  // exclude rho0 from the normalization of the cluster "mass"
00173        // accumulate weighted z and weights for vertex update
00174       for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00175         k->se += tks[i].pi* k->ei / Zi;
00176         double w = k->pk * tks[i].pi* k->ei / Zi / tks[i].dz2;
00177         k->sw  += w;
00178         k->swz += w * tks[i].z;
00179       }
00180     }
00181     //else{  sumpi += tks[i].pi;  }
00182 
00183 
00184   } // end of track loop
00185 
00186 
00187   // now update z and pk
00188   double delta=0;
00189   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00190     if ( k->sw > 0){
00191       double znew=k->swz/k->sw; 
00192       delta+=pow(k->z-znew,2);
00193       k->z=znew;
00194     }else{
00195       edm::LogInfo("sumw") <<  "invalid sum of weights in fit: " << k->sw << endl;
00196       if(verbose_){cout << " a cluster melted away ?  pk=" << k->pk <<  " sumw=" << k->sw <<  endl;}
00197     }
00198 
00199     //k->pk *= k->se / sumpi; 
00200   }
00201 
00202   // return how much the prototypes moved
00203   return delta;
00204 }
00205 
00206 
00207 
00208 
00209 
00210 bool DAClusterizerInZ::merge(vector<vertex_t> & y, int nt)const{
00211   // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
00212 
00213  if(y.size()<2)  return false;
00214 
00215   for(vector<vertex_t>::iterator k=y.begin(); (k+1)!=y.end(); k++){
00216     //if ((k+1)->z - k->z<1.e-2){  // note, no fabs here, maintains z-ordering  (with split()+merge() at every temperature)
00217     if (fabs((k+1)->z - k->z)<1.e-2){  // with fabs if only called after freeze-out (splitAll() at highter T)
00218       k->pk += (k+1)->pk;
00219       k->z=0.5*(k->z+(k+1)->z);
00220       y.erase(k+1);
00221       return true;  
00222     }
00223   }
00224   
00225   return false;
00226 }
00227 
00228 
00229 
00230 bool DAClusterizerInZ::purge(vector<vertex_t> & y, vector<track_t> & tks, double & rho0, const double beta)const{
00231   // eliminate clusters with only one significant/unique track
00232   if(y.size()<2)  return false;
00233   
00234   unsigned int nt=tks.size();
00235   double sumpmin=nt;
00236   vector<vertex_t>::iterator k0=y.end();
00237   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00238     int nUnique=0;
00239     double sump=0;
00240     double pmax=k->pk/(k->pk+rho0*exp(-beta*dzCutOff_*dzCutOff_));
00241     for(unsigned int i=0; i<nt; i++){
00242       if(tks[i].Z>0){
00243         //double p=pik(beta,tks[i],*k);
00244         double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
00245         sump+=p;
00246         if( (p > 0.9*pmax) && (tks[i].pi>0) ){ nUnique++; }
00247       }
00248     }
00249 
00250     if((nUnique<2)&&(sump<sumpmin)){
00251       sumpmin=sump;
00252       k0=k;
00253     }
00254   }
00255  
00256   if(k0!=y.end()){
00257     if(verbose_){cout << "eliminating prototype at " << k0->z << " with sump=" << sumpmin << endl;}
00258     //rho0+=k0->pk;
00259     y.erase(k0);
00260     return true;
00261   }else{
00262     return false;
00263   }
00264 }
00265 
00266 
00267  
00268 
00269 double DAClusterizerInZ::beta0(
00270                                double betamax,
00271                                vector<track_t> & tks,
00272                                vector<vertex_t> & y
00273                                )const{
00274   
00275   double T0=0;  // max Tc for beta=0
00276   // estimate critical temperature from beta=0 (T=inf)
00277   unsigned int nt=tks.size();
00278 
00279   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00280 
00281     // vertex fit at T=inf 
00282     double sumwz=0;
00283     double sumw=0;
00284     for(unsigned int i=0; i<nt; i++){
00285       double w=tks[i].pi/tks[i].dz2;
00286       sumwz+=w*tks[i].z;
00287       sumw +=w;
00288     }
00289     k->z=sumwz/sumw;
00290 
00291     // estimate Tcrit, eventually do this in the same loop
00292     double a=0, b=0;
00293     for(unsigned int i=0; i<nt; i++){
00294       double dx=tks[i].z-(k->z);
00295       double w=tks[i].pi/tks[i].dz2;
00296       a+=w*pow(dx,2)/tks[i].dz2;
00297       b+=w;
00298     }
00299     double Tc= 2.*a/b;  // the critical temperature of this vertex
00300     if(Tc>T0) T0=Tc;
00301   }// vertex loop (normally there should be only one vertex at beta=0)
00302   
00303   if (T0>1./betamax){
00304     return betamax/pow(coolingFactor_, int(log(T0*betamax)/log(coolingFactor_))-1 );
00305   }else{
00306     // ensure at least one annealing step
00307     return betamax/coolingFactor_;
00308   }
00309 }
00310 
00311 
00312 
00313 
00314 void DAClusterizerInZ::splitAll(
00315                          vector<track_t> & tks,
00316                          vector<vertex_t> & y
00317                          )const{
00318 
00319 
00320   double epsilon=1e-3;      // split all single vertices by 10 um 
00321   double zsep=2*epsilon;    // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
00322   vector<vertex_t> y1;
00323 
00324   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){
00325     if ( ( (k==y.begin())|| (k-1)->z < k->z - zsep) && (((k+1)==y.end()  )|| (k+1)->z > k->z + zsep)) { 
00326       // isolated prototype, split
00327       vertex_t vnew;
00328       vnew.z  = k->z -epsilon;
00329       (*k).z  = k->z+epsilon;
00330       vnew.pk= 0.5* (*k).pk;
00331       (*k).pk= 0.5* (*k).pk;
00332       y1.push_back(vnew);
00333       y1.push_back(*k);
00334 
00335     }else if( y1.empty() || (y1.back().z < k->z -zsep)){
00336       y1.push_back(*k);
00337     }else{
00338       y1.back().z -=epsilon;
00339       k->z+=epsilon;
00340       y1.push_back(*k);
00341     }
00342   }// vertex loop
00343   
00344   y=y1;
00345 }
00346  
00347 
00348 DAClusterizerInZ::DAClusterizerInZ(const edm::ParameterSet& conf) 
00349 {
00350   // some defaults to avoid uninitialized variables
00351   verbose_= conf.getUntrackedParameter<bool>("verbose", false);
00352   betamax_=0.1;
00353   betastop_  =1.0;
00354   coolingFactor_=0.8;
00355   maxIterations_=100;
00356   vertexSize_=0.05;  // 0.5 mm
00357   dzCutOff_=4.0;   // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes
00358 
00359   // configure
00360 
00361   double Tmin = conf.getParameter<double>("Tmin");
00362   vertexSize_ = conf.getParameter<double>("vertexSize");
00363   coolingFactor_ = conf.getParameter<double>("coolingFactor");
00364   d0CutOff_  =  conf.getParameter<double>("d0CutOff");
00365   dzCutOff_  =  conf.getParameter<double>("dzCutOff");
00366   maxIterations_=100;
00367   if (Tmin==0){
00368     cout << "DAClusterizerInZ: invalid Tmin" << Tmin << "  reset do default " << 1./betamax_ << endl;
00369   }else{
00370     betamax_ = 1./Tmin;
00371   }
00372 
00373 }
00374 
00375 
00376 void DAClusterizerInZ::dump(const double beta, const vector<vertex_t> & y, const vector<track_t> & tks0, int verbosity)const{
00377 
00378   // copy and sort for nicer printout
00379   vector<track_t> tks; 
00380   for(vector<track_t>::const_iterator t=tks0.begin(); t!=tks0.end(); t++){tks.push_back(*t); }
00381   stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
00382 
00383   cout << "-----DAClusterizerInZ::dump ----" << endl;
00384   cout << "beta=" << beta << "   betamax= " << betamax_ << endl;
00385   cout << "                                                               z= ";
00386   cout.precision(4);
00387   for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00388     cout  <<  setw(8) << fixed << k->z ;
00389   }
00390   cout << endl << "T=" << setw(15) << 1./beta <<"                                            Tc= ";
00391   cout << endl << "                                                               pk=";
00392   double sumpk=0;
00393   for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00394     cout <<  setw(8) <<  setprecision(3) <<  fixed << k->pk;
00395     sumpk+=k->pk;
00396   }
00397   cout  << endl;
00398 
00399   if(verbosity>0){
00400     double E=0, F=0;
00401     cout << endl;
00402     cout << "----       z +/- dz                ip +/-dip       pt    phi  eta    weights  ----" << endl;
00403     cout.precision(4);
00404     for(unsigned int i=0; i<tks.size(); i++){
00405       if (tks[i].Z>0){  F-=log(tks[i].Z)/beta;}
00406       double tz= tks[i].z;
00407       cout <<  setw (3)<< i << ")" <<  setw (8) << fixed << setprecision(4)<<  tz << " +/-" <<  setw (6)<< sqrt(tks[i].dz2);
00408 
00409       if(tks[i].tt->track().quality(reco::TrackBase::highPurity)){ cout << " *";}else{cout <<"  ";}
00410       if(tks[i].tt->track().hitPattern().hasValidHitInFirstPixelBarrel()){cout <<"+";}else{cout << "-";}
00411       cout << setw(1) << tks[i].tt->track().hitPattern().pixelBarrelLayersWithMeasurement(); // see DataFormats/TrackReco/interface/HitPattern.h
00412       cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement(); 
00413       cout << setw(1) << hex << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement()-tks[i].tt->track().hitPattern().pixelLayersWithMeasurement() <<dec; 
00414       cout << "=" << setw(1)<<hex <<tks[i].tt->track().trackerExpectedHitsOuter().numberOfHits() << dec;
00415 
00416       Measurement1D IP=tks[i].tt->stateAtBeamLine().transverseImpactParameter();
00417       cout << setw (8) << IP.value() << "+/-" << setw (6) << IP.error();
00418       cout << " " << setw(6) << setprecision(2)  << tks[i].tt->track().pt()*tks[i].tt->track().charge();
00419       cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi() 
00420            << " "  << setw(5)  << setprecision(2)   << tks[i].tt->track().eta() ;
00421 
00422       double sump=0.;
00423       for(vector<vertex_t>::const_iterator k=y.begin(); k!=y.end(); k++){
00424         if((tks[i].pi>0)&&(tks[i].Z>0)){
00425           //double p=pik(beta,tks[i],*k);
00426           double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z; 
00427           if( p > 0.0001){
00428             cout <<  setw (8) <<  setprecision(3) << p;
00429           }else{
00430             cout << "    .   ";
00431           }
00432           E+=p*Eik(tks[i],*k);
00433           sump+=p;
00434         }else{
00435             cout << "        ";
00436         }
00437       }
00438       cout << endl;
00439     }
00440     cout << endl << "T=" << 1/beta  << " E=" << E << " n="<< y.size() << "  F= " << F <<  endl <<  "----------" << endl;
00441   }
00442 }
00443 
00444 
00445 
00446 
00447 
00448 vector< TransientVertex >
00449 DAClusterizerInZ::vertices(const vector<reco::TransientTrack> & tracks, const int verbosity) 
00450 const
00451 {
00452  
00453   vector<track_t> tks=fill(tracks);
00454   unsigned int nt=tracks.size();
00455   double rho0=0.0;  // start with no outlier rejection
00456 
00457   vector< TransientVertex > clusters;
00458   if (tks.empty()) return clusters;
00459 
00460   vector<vertex_t> y; // the vertex prototypes
00461 
00462   // initialize:single vertex at infinite temperature
00463   vertex_t vstart;
00464   vstart.z=0.;
00465   vstart.pk=1.;
00466   y.push_back(vstart);
00467   int niter=0;      // number of iterations
00468   
00469 
00470   // estimate first critical temperature
00471   double beta=beta0(betamax_, tks, y);
00472   niter=0; while((update(beta, tks,y)>1.e-6)  && (niter++ < maxIterations_)){ }
00473 
00474   
00475  // annealing loop, stop when T<Tmin  (i.e. beta>1/Tmin)
00476   while(beta<betamax_){ 
00477 
00478     beta=beta/coolingFactor_;
00479     splitAll(tks,y);
00480 
00481     // make sure we are not too far from equilibrium before cooling further
00482     niter=0; while((update(beta, tks,y)>1.e-6)  && (niter++ < maxIterations_)){ }
00483 
00484   }
00485 
00486 
00487   // merge collapsed clusters 
00488   while(merge(y,tks.size())){} 
00489   if(verbose_  ){ cout << "dump after 1st merging " << endl;  dump(beta,y,tks,2);}
00490 
00491 
00492 
00493   // switch on outlier rejection
00494   //rho0=exp(beta*dzCutOff_*dzCutOff_)/nt; if(rho0>0.1) rho0=0.1;
00495   rho0=1./nt; for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ k->pk =1.; }  // democratic
00496   niter=0; while((update(beta, tks,y,rho0) > 1.e-8)  && (niter++ < maxIterations_)){  }
00497   if(verbose_  ){ cout << "rho0=" << rho0 <<   " niter=" << niter <<  endl; dump(beta,y,tks,2);}
00498 
00499   
00500   // merge again  (some cluster split by outliers collapse here)
00501   while(merge(y,tks.size())){}  
00502   if(verbose_  ){ cout << "dump after 2nd merging " << endl;  dump(beta,y,tks,2);}
00503 
00504 
00505   // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
00506   while(beta<=betastop_){
00507     while(purge(y,tks,rho0, beta)){
00508       niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
00509     } 
00510     beta/=coolingFactor_;
00511     niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
00512   }
00513 
00514   if(verbose_){
00515    cout << "Final result, rho0=" << rho0 << endl;
00516    dump(beta,y,tks,2);
00517   }
00518 
00519 
00520   // select significant tracks and use a TransientVertex as a container
00521   GlobalError dummyError; 
00522 
00523 
00524   // ensure correct normalization of probabilities, should makes double assginment reasonably impossible
00525   for(unsigned int i=0; i<nt; i++){  
00526     tks[i].Z=rho0*exp(-beta*dzCutOff_*dzCutOff_);
00527     for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00528       tks[i].Z+=k->pk * exp(-beta*Eik(tks[i],*k));
00529     }
00530   }
00531 
00532 
00533   for(vector<vertex_t>::iterator k=y.begin(); k!=y.end(); k++){ 
00534     GlobalPoint pos(0, 0, k->z);
00535     vector< reco::TransientTrack > vertexTracks;
00536     for(unsigned int i=0; i<nt; i++){
00537       if(tks[i].Z>0){
00538         double p=k->pk * exp(-beta*Eik(tks[i],*k)) / tks[i].Z;
00539         if( (tks[i].pi>0) && ( p > 0.5 ) ){ vertexTracks.push_back(*(tks[i].tt)); tks[i].Z=0; } // setting Z=0 excludes double assignment
00540       }
00541     }
00542     TransientVertex v(pos, dummyError, vertexTracks, 0);
00543     clusters.push_back(v);
00544   }
00545 
00546 
00547   return clusters;
00548 
00549 }
00550 
00551 
00552 
00553 
00554 
00555 vector< vector<reco::TransientTrack> >
00556 DAClusterizerInZ::clusterize(const vector<reco::TransientTrack> & tracks)
00557   const
00558 {
00559   if(verbose_) {
00560     cout << "###################################################" << endl;
00561     cout << "# DAClusterizerInZ::clusterize   nt="<<tracks.size() << endl;
00562     cout << "###################################################" << endl;
00563   }
00564 
00565   vector< vector<reco::TransientTrack> > clusters;
00566   vector< TransientVertex > pv=vertices(tracks);
00567 
00568   if(verbose_){ cout << "# DAClusterizerInZ::clusterize   pv.size="<<pv.size() << endl;  }
00569   if (pv.size()==0){ return clusters;}
00570 
00571 
00572   // fill into clusters and merge
00573   vector< reco::TransientTrack>  aCluster=pv.begin()->originalTracks();
00574   
00575   for(vector<TransientVertex>::iterator k=pv.begin()+1; k!=pv.end(); k++){
00576     if ( k->position().z() - (k-1)->position().z()> (2*vertexSize_) ){
00577       // close a cluster
00578       clusters.push_back(aCluster);
00579       aCluster.clear();
00580     }
00581     for(unsigned int i=0; i<k->originalTracks().size(); i++){ aCluster.push_back( k->originalTracks().at(i)); }
00582     
00583   }
00584   clusters.push_back(aCluster);
00585   
00586   
00587   return clusters;
00588 
00589 }
00590