11 #include "vdt/vdtMath.h"
13 #include <Math/SMatrix.h>
24 maxIterations_ = 1000;
37 coolingFactor_ = conf.
getParameter<
double>(
"coolingFactor");
40 uniquetrkweight_ = conf.
getParameter<
double>(
"uniquetrkweight");
43 convergence_mode_ = conf.
getParameter<
int>(
"convergence_mode");
48 std::cout <<
"DAClusterizerinZ_vect: mintrkweight = " << mintrkweight_ << std::endl;
49 std::cout <<
"DAClusterizerinZ_vect: uniquetrkweight = " << uniquetrkweight_ << std::endl;
50 std::cout <<
"DAClusterizerinZ_vect: zmerge = " << zmerge_ << std::endl;
51 std::cout <<
"DAClusterizerinZ_vect: Tmin = " <<
Tmin << std::endl;
53 std::cout <<
"DAClusterizerinZ_vect: Tstop = " <<
Tstop << std::endl;
54 std::cout <<
"DAClusterizerinZ_vect: vertexSize = " << vertexSize_ << std::endl;
55 std::cout <<
"DAClusterizerinZ_vect: coolingFactor = " << coolingFactor_ << std::endl;
56 std::cout <<
"DAClusterizerinZ_vect: d0CutOff = " << d0CutOff_ << std::endl;
57 std::cout <<
"DAClusterizerinZ_vect: dzCutOff = " << dzCutOff_ << std::endl;
58 std::cout <<
"DAClusterizerInZ_vect: zrange = " << sel_zrange_ << std::endl;
59 std::cout <<
"DAClusterizerinZ_vect: convergence mode = " << convergence_mode_ << std::endl;
60 std::cout <<
"DAClusterizerinZ_vect: delta_highT = " << delta_highT_ << std::endl;
61 std::cout <<
"DAClusterizerinZ_vect: delta_lowT = " << delta_lowT_ << std::endl;
64 std::cout <<
"DAClusterizerinZ_vect: DEBUGLEVEL " << DEBUGLEVEL << std::endl;
67 if (convergence_mode_ > 1) {
69 <<
"DAClusterizerInZ_vect: invalid convergence_mode" << convergence_mode_ <<
" reset to default " << 0;
70 convergence_mode_ = 0;
75 <<
"DAClusterizerInZ_vect: invalid Tmin" <<
Tmin <<
" reset to default " << 1. / betamax_;
82 <<
"DAClusterizerInZ_vect: invalid Tpurge" <<
Tpurge <<
" set to " <<
Tmin;
89 <<
"DAClusterizerInZ_vect: invalid Tstop" <<
Tstop <<
" set to " <<
max(1.,
Tpurge);
92 betastop_ = 1. /
Tstop;
96 inline double local_exp(
double const& inp) {
return vdt::fast_exp(inp); }
98 inline void local_exp_list(
double const* __restrict__ arg_inp,
double* __restrict__ arg_out,
const int arg_arr_size) {
99 for (
auto i = 0;
i != arg_arr_size; ++
i)
100 arg_out[
i] = vdt::fast_exp(arg_inp[
i]);
103 inline void local_exp_list_range(
double const* __restrict__ arg_inp,
104 double* __restrict__ arg_out,
107 for (
auto i = kmin;
i != kmax; ++
i)
108 arg_out[
i] = vdt::fast_exp(arg_inp[
i]);
114 if (!(nv == 999999)) {
120 if (!(
nt == 999999)) {
129 assert(
v.ei_cache.size() == nv);
137 assert(
v.ei_cache_ptr == &
v.ei_cache.front());
138 assert(
v.swz_ptr == &
v.swz.front());
140 assert(
v.swE_ptr == &
v.swE.front());
142 for (
unsigned int k = 0;
k < nv - 1;
k++) {
143 if (
v.z[
k] <=
v.z[
k + 1])
145 cout <<
" Z, cluster z-ordering assertion failure z[" <<
k <<
"] =" <<
v.z[
k] <<
" z[" <<
k + 1
146 <<
"] =" <<
v.z[
k + 1] << endl;
165 for (
unsigned int i = 0;
i <
nt;
i++) {
168 cout <<
"track vertex range assertion failure" <<
i <<
"/" <<
nt <<
" kmin,kmax=" << tks.
kmin[
i] <<
", "
169 << tks.
kmax[
i] <<
" nv=" << nv << endl;
172 for (
unsigned int i = 0;
i <
nt;
i++) {
181 for (
auto it =
tracks.begin(); it !=
tracks.end(); it++) {
182 if (!(*it).isValid())
185 double t_z = ((*it).stateAtBeamLine().trackStateAtPCA()).
position().z();
186 if (std::fabs(
t_z) > 1000.)
188 auto const& t_mom = (*it).stateAtBeamLine().trackStateAtPCA().momentum();
191 double t_dz2 =
std::pow((*it).track().dzError(), 2)
205 LogTrace(
"DAClusterizerinZ_vect") <<
t_z <<
' ' << t_dz2 <<
' ' << t_pi;
211 if (DEBUGLEVEL > 0) {
220 inline double Eik(
double t_z,
double k_z,
double t_dz2) {
return std::pow(
t_z - k_z, 2) * t_dz2; }
224 const unsigned int nv = gvertices.
getSize();
225 const unsigned int nt = gtracks.
getSize();
228 edm::LogWarning(
"DAClusterizerinZ_vect") <<
"empty cluster list in set_vtx_range";
232 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
236 unsigned int kmin =
min(nv - 1, gtracks.
kmin[itrack]);
239 while ((kmin > 0) && (gvertices.
z_ptr[kmin - 1] >
zmin)) {
243 while ((kmin < (nv - 1)) && (gvertices.
z_ptr[kmin] <
zmin)) {
249 unsigned int kmax =
min(nv - 1, gtracks.
kmax[itrack] - 1);
253 while ((kmax < (nv - 1)) && (gvertices.
z_ptr[kmax + 1] <
zmax)) {
257 while ((kmax > 0) && (gvertices.
z_ptr[kmax] >
zmax)) {
263 gtracks.
kmin[itrack] = kmin;
264 gtracks.
kmax[itrack] = kmax + 1;
267 gtracks.
kmax[itrack] =
min(nv,
max(kmin, kmax) + 1);
273 const unsigned int nt = gtracks.
getSize();
274 const unsigned int nv = gvertices.
getSize();
275 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
276 gtracks.
kmin[itrack] = 0;
277 gtracks.
kmax[itrack] = nv;
287 const unsigned int nt = gtracks.
getSize();
288 const unsigned int nv = gvertices.
getSize();
300 Z_init = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
304 auto kernel_calc_exp_arg_range = [
beta](
const unsigned int itrack,
307 const unsigned int kmin,
308 const unsigned int kmax) {
309 const double track_z =
tracks.z_ptr[itrack];
310 const double botrack_dz2 = -
beta *
tracks.dz2_ptr[itrack];
313 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
314 auto mult_res = track_z -
vertices.z_ptr[ivertex];
315 vertices.ei_cache_ptr[ivertex] = botrack_dz2 * (mult_res * mult_res);
319 auto kernel_add_Z_range = [Z_init](
320 vertex_t const&
vertices,
const unsigned int kmin,
const unsigned int kmax) ->
double {
321 double ZTemp = Z_init;
322 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
328 auto kernel_calc_normalization_range = [](
const unsigned int track_num,
331 const unsigned int kmin,
332 const unsigned int kmax) {
333 auto tmp_trk_pi = tks_vec.pi_ptr[track_num];
334 auto o_trk_Z_sum = 1. / tks_vec.Z_sum_ptr[track_num];
335 auto o_trk_dz2 = tks_vec.dz2_ptr[track_num];
336 auto tmp_trk_z = tks_vec.z_ptr[track_num];
339 for (
unsigned int k = kmin;
k < kmax; ++
k) {
340 y_vec.se_ptr[
k] += y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum);
341 auto w = y_vec.pk_ptr[
k] * y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum * o_trk_dz2);
342 y_vec.sw_ptr[
k] +=
w;
343 y_vec.swz_ptr[
k] +=
w * tmp_trk_z;
347 for (
auto ivertex = 0
U; ivertex < nv; ++ivertex) {
348 gvertices.
se_ptr[ivertex] = 0.0;
349 gvertices.
sw_ptr[ivertex] = 0.0;
350 gvertices.
swz_ptr[ivertex] = 0.0;
354 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
355 unsigned int kmin = gtracks.
kmin[itrack];
356 unsigned int kmax = gtracks.
kmax[itrack];
359 assert((kmin < kmax) && (kmax <= nv));
363 kernel_calc_exp_arg_range(itrack, gtracks, gvertices, kmin, kmax);
365 gtracks.
Z_sum_ptr[itrack] = kernel_add_Z_range(gvertices, kmin, kmax);
370 sumpi += gtracks.
pi_ptr[itrack];
372 if (gtracks.
Z_sum_ptr[itrack] > 1.e-100) {
373 kernel_calc_normalization_range(itrack, gtracks, gvertices, kmin, kmax);
381 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
389 auto osumpi = 1. / sumpi;
390 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex)
396 delta = kernel_calc_z(gvertices);
406 const unsigned int nt = gtracks.
getSize();
407 const unsigned int nv = gvertices.
getSize();
418 Z_init = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
422 auto kernel_calc_exp_arg_range = [
beta](
const unsigned int itrack,
425 const unsigned int kmin,
426 const unsigned int kmax) {
427 const double track_z =
tracks.z_ptr[itrack];
428 const double botrack_dz2 = -
beta *
tracks.dz2_ptr[itrack];
431 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
432 auto mult_res = track_z -
vertices.z_ptr[ivertex];
433 vertices.ei_cache_ptr[ivertex] = botrack_dz2 * (mult_res * mult_res);
437 auto kernel_add_Z_range = [Z_init](
438 vertex_t const&
vertices,
const unsigned int kmin,
const unsigned int kmax) ->
double {
439 double ZTemp = Z_init;
440 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
446 auto kernel_calc_normalization_range = [
beta](
const unsigned int track_num,
449 const unsigned int kmin,
450 const unsigned int kmax) {
451 auto tmp_trk_pi = tks_vec.pi_ptr[track_num];
452 auto o_trk_Z_sum = 1. / tks_vec.Z_sum_ptr[track_num];
453 auto o_trk_dz2 = tks_vec.dz2_ptr[track_num];
454 auto tmp_trk_z = tks_vec.z_ptr[track_num];
455 auto obeta = -1. /
beta;
458 for (
unsigned int k = kmin;
k < kmax; ++
k) {
459 y_vec.se_ptr[
k] += y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum);
460 auto w = y_vec.pk_ptr[
k] * y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum * o_trk_dz2);
461 y_vec.sw_ptr[
k] +=
w;
462 y_vec.swz_ptr[
k] +=
w * tmp_trk_z;
463 y_vec.swE_ptr[
k] +=
w * y_vec.ei_cache_ptr[
k] * obeta;
467 for (
auto ivertex = 0
U; ivertex < nv; ++ivertex) {
468 gvertices.
se_ptr[ivertex] = 0.0;
469 gvertices.
sw_ptr[ivertex] = 0.0;
470 gvertices.
swz_ptr[ivertex] = 0.0;
471 gvertices.
swE_ptr[ivertex] = 0.0;
475 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
476 unsigned int kmin = gtracks.
kmin[itrack];
477 unsigned int kmax = gtracks.
kmax[itrack];
479 kernel_calc_exp_arg_range(itrack, gtracks, gvertices, kmin, kmax);
481 gtracks.
Z_sum_ptr[itrack] = kernel_add_Z_range(gvertices, kmin, kmax);
486 sumpi += gtracks.
pi_ptr[itrack];
488 if (gtracks.
Z_sum_ptr[itrack] > 1.e-100) {
489 kernel_calc_normalization_range(itrack, gtracks, gvertices, kmin, kmax);
497 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
506 auto osumpi = 1. / sumpi;
507 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex)
513 delta = kernel_calc_z(gvertices);
522 auto nv =
v.getSize();
524 for (
auto i = 0
U;
i <
nt;
i++) {
526 for (
auto k = 0u;
k < nv;
k++) {
527 double Eik = (tks.
z[
k] -
v.z[
i]) * (tks.
z[
k] -
v.z[
i]) * tks.
dz2[
i];
528 if ((
beta * Eik) < 30) {
529 Z +=
v.pk[
k] * local_exp(-
beta * Eik);
542 unsigned int niter = 0;
544 double delta_max = delta_lowT_;
546 if (convergence_mode_ == 0) {
547 delta_max = delta_max0;
548 }
else if (convergence_mode_ == 1) {
552 set_vtx_range(
beta, tks,
v);
553 double delta_sum_range = 0;
554 std::vector<double>
z0 =
v.z;
556 while (niter++ < maxIterations_) {
558 delta_sum_range +=
delta;
560 if (delta_sum_range > zrange_min_) {
561 for (
unsigned int k = 0;
k <
v.getSize();
k++) {
563 set_vtx_range(
beta, tks,
v);
571 if (
delta < delta_max) {
577 if (DEBUGLEVEL > 0) {
578 std::cout <<
"DAClusterizerInZ_vect.thermalize niter = " << niter <<
" at T = " << 1 /
beta
579 <<
" nv = " <<
v.getSize() << std::endl;
592 const unsigned int nv = y.getSize();
598 std::vector<std::pair<double, unsigned int> >
critical;
599 for (
unsigned int k = 0; (
k + 1) < nv;
k++) {
600 if (std::fabs(y.z_ptr[
k + 1] - y.z_ptr[
k]) < zmerge_) {
601 critical.push_back(make_pair(std::fabs(y.z_ptr[
k + 1] - y.z_ptr[
k]),
k));
607 std::stable_sort(
critical.begin(),
critical.end(), std::less<std::pair<double, unsigned int> >());
609 for (
unsigned int ik = 0; ik <
critical.size(); ik++) {
611 double rho = y.pk_ptr[
k] + y.pk_ptr[
k + 1];
612 double swE = y.swE_ptr[
k] + y.swE_ptr[
k + 1] -
613 y.pk_ptr[
k] * y.pk_ptr[
k + 1] / rho *
std::pow(y.z_ptr[
k + 1] - y.z_ptr[
k], 2);
614 double Tc = 2 * swE / (y.sw_ptr[
k] + y.sw_ptr[
k + 1]);
619 if (DEBUGLEVEL > 1) {
620 std::cout <<
"merging " <<
fixed << setprecision(4) << y.z_ptr[
k + 1] <<
" and " << y.z_ptr[
k]
621 <<
" Tc = " << Tc <<
" sw = " << y.sw_ptr[
k] + y.sw_ptr[
k + 1] << std::endl;
626 y.z_ptr[
k] = (y.pk_ptr[
k] * y.z_ptr[
k] + y.pk_ptr[
k + 1] * y.z_ptr[
k + 1]) / rho;
628 y.z_ptr[
k] = 0.5 * (y.z_ptr[
k] + y.z_ptr[
k + 1]);
631 y.sw_ptr[
k] += y.sw_ptr[
k + 1];
633 y.removeItem(
k + 1, tks);
634 set_vtx_range(
beta, tks, y);
644 constexpr
double eps = 1.e-100;
646 const unsigned int nv = y.getSize();
653 unsigned int k0 = nv;
655 std::vector<double> inverse_zsums(
nt), arg_cache(
nt), eik_cache(
nt), pcut_cache(nv);
656 double* __restrict__ pinverse_zsums;
657 double* __restrict__ parg_cache;
658 double* __restrict__ peik_cache;
659 double* __restrict__ ppcut_cache;
660 pinverse_zsums = inverse_zsums.data();
661 parg_cache = arg_cache.data();
662 peik_cache = eik_cache.data();
663 ppcut_cache = pcut_cache.data();
664 for (
unsigned i = 0;
i <
nt; ++
i) {
667 const auto rhoconst = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
668 for (
unsigned int k = 0;
k < nv;
k++) {
669 const double pmax = y.pk_ptr[
k] / (y.pk_ptr[
k] + rhoconst);
670 ppcut_cache[
k] = uniquetrkweight_ * pmax;
673 for (
unsigned int k = 0;
k < nv;
k++) {
674 for (
unsigned int i = 0;
i <
nt; ++
i) {
675 const auto track_z = tks.
z_ptr[
i];
677 const auto mult_resz = track_z - y.z_ptr[
k];
678 parg_cache[
i] = botrack_dz2 * (mult_resz * mult_resz);
680 local_exp_list(parg_cache, peik_cache,
nt);
684 for (
unsigned int i = 0;
i <
nt; ++
i) {
685 const auto p = y.pk_ptr[
k] * peik_cache[
i] * pinverse_zsums[
i];
687 nUnique += ((
p > ppcut_cache[
k]) & (tks.
pi_ptr[
i] > 0)) ? 1 : 0;
690 if ((nUnique < 2) && (sump < sumpmin)) {
699 if (DEBUGLEVEL > 1) {
700 std::cout <<
"eliminating prototype at " << std::setw(10) << std::setprecision(4) << y.z_ptr[
k0]
701 <<
" with sump=" << sumpmin <<
" rho*nt =" << y.pk_ptr[
k0] *
nt << endl;
705 y.removeItem(
k0, tks);
706 set_vtx_range(
beta, tks, y);
717 const unsigned int nv = y.getSize();
719 for (
unsigned int k = 0;
k < nv;
k++) {
723 for (
unsigned int i = 0;
i <
nt;
i++) {
729 y.z_ptr[
k] = sumwz / sumw;
733 for (
unsigned int i = 0;
i <
nt;
i++) {
739 double Tc = 2. *
a /
b;
747 if (DEBUGLEVEL > 0) {
748 std::cout <<
"DAClusterizerInZ_vect.beta0: Tc = " << T0 << std::endl;
750 std::cout <<
"DAClusterizerInZ_vect.beta0: nstep = " << coolingsteps << std::endl;
754 if (T0 > 1. / betamax) {
757 return betamax *
std::pow(coolingFactor_, coolingsteps);
760 return betamax * coolingFactor_;
770 unsigned int nv = y.getSize();
774 std::vector<std::pair<double, unsigned int> >
critical;
775 for (
unsigned int k = 0;
k < nv;
k++) {
776 double Tc = 2 * y.swE_ptr[
k] / y.sw_ptr[
k];
784 std::stable_sort(
critical.begin(),
critical.end(), std::greater<std::pair<double, unsigned int> >());
789 for (
unsigned int ic = 0; ic <
critical.size(); ic++) {
793 double p1 = 0, z1 = 0, w1 = 0;
794 double p2 = 0,
z2 = 0,
w2 = 0;
795 for (
unsigned int i = 0;
i <
nt;
i++) {
798 double tl = tks.
z_ptr[
i] < y.z_ptr[
k] ? 1. : 0.;
803 if (std::fabs(
arg) < 20) {
804 double t = local_exp(-
arg);
833 if ((
k > 0) && (z1 < (0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k - 1]))) {
834 z1 = 0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k - 1];
836 if ((
k + 1 < nv) && (
z2 > (0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k + 1]))) {
837 z2 = 0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k + 1];
842 if (DEBUGLEVEL > 1) {
843 if (std::fabs(y.z_ptr[
k] - zdumpcenter_) < zdumpwidth_) {
845 <<
std::fixed << std::setprecision(4) << y.z_ptr[
k] <<
" --> " << z1 <<
"," <<
z2 <<
" [" <<
p1
859 double pk1 =
p1 * y.pk_ptr[
k] / (
p1 +
p2);
860 double pk2 =
p2 * y.pk_ptr[
k] / (
p1 +
p2);
863 y.insertItem(
k, z1, pk1, tks);
870 for (
unsigned int jc = ic; jc <
critical.size(); jc++) {
897 clear_vtx_range(tks, y);
900 double beta = beta0(betamax_, tks, y);
906 thermalize(
beta, tks, y, delta_highT_);
910 double betafreeze = betamax_ *
sqrt(coolingFactor_);
912 while (
beta < betafreeze) {
913 updateTc(
beta, tks, y, rho0);
915 updateTc(
beta, tks, y, rho0);
920 set_vtx_range(
beta, tks, y);
921 thermalize(
beta, tks, y, delta_highT_);
927 if (DEBUGLEVEL > 0) {
928 std::cout <<
"DAClusterizerInZ_vect::vertices :"
929 <<
"last round of splitting" << std::endl;
933 set_vtx_range(
beta, tks, y);
934 updateTc(
beta, tks, y, rho0);
937 set_vtx_range(
beta, tks, y);
938 updateTc(
beta, tks, y, rho0);
941 unsigned int ntry = 0;
944 set_vtx_range(
beta, tks, y);
945 thermalize(
beta, tks, y, delta_highT_, 0.);
946 updateTc(
beta, tks, y, rho0);
948 updateTc(
beta, tks, y, rho0);
957 if (DEBUGLEVEL > 0) {
958 std::cout <<
"DAClusterizerInZ_vect::vertices :"
959 <<
"turning on outlier rejection at T=" << 1 /
beta << std::endl;
966 for (
unsigned int a = 0;
a < 5;
a++) {
971 thermalize(
beta, tks, y, delta_lowT_, rho0);
975 if (DEBUGLEVEL > 0) {
976 std::cout <<
"DAClusterizerInZ_vect::vertices :"
977 <<
"merging with outlier rejection at T=" << 1 /
beta << std::endl;
985 set_vtx_range(
beta, tks, y);
991 if (DEBUGLEVEL > 0) {
992 std::cout <<
"DAClusterizerInZ_vect::vertices :"
993 <<
"after merging with outlier rejection at T=" << 1 /
beta << std::endl;
1000 while (
beta < betapurge_) {
1002 set_vtx_range(
beta, tks, y);
1003 thermalize(
beta, tks, y, delta_lowT_, rho0);
1008 if (DEBUGLEVEL > 0) {
1009 std::cout <<
"DAClusterizerInZ_vect::vertices :"
1010 <<
"purging at T=" << 1 /
beta << std::endl;
1015 while (purge(y, tks, rho0,
beta)) {
1016 thermalize(
beta, tks, y, delta_lowT_, rho0);
1021 if (DEBUGLEVEL > 0) {
1022 std::cout <<
"DAClusterizerInZ_vect::vertices :"
1023 <<
"last cooling T=" << 1 /
beta << std::endl;
1028 while (
beta < betastop_) {
1030 thermalize(
beta, tks, y, delta_lowT_, rho0);
1035 if (DEBUGLEVEL > 0) {
1036 std::cout <<
"DAClusterizerInZ_vect::vertices :"
1037 <<
"stop cooling at T=" << 1 /
beta << std::endl;
1044 GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01);
1047 const unsigned int nv = y.getSize();
1048 for (
unsigned int k = 0;
k < nv;
k++)
1054 const auto z_sum_init = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
1055 for (
unsigned int i = 0;
i <
nt;
i++)
1059 for (
unsigned int k = 0;
k < nv;
k++) {
1060 for (
unsigned int i = 0;
i <
nt;
i++)
1064 for (
unsigned int k = 0;
k < nv;
k++) {
1067 vector<reco::TransientTrack> vertexTracks;
1068 for (
unsigned int i = 0;
i <
nt;
i++) {
1071 if ((tks.
pi_ptr[
i] > 0) && (
p > mintrkweight_)) {
1072 vertexTracks.push_back(*(tks.
tt[
i]));
1085 const vector<reco::TransientTrack>&
tracks)
const {
1086 vector<vector<reco::TransientTrack> >
clusters;
1090 if (DEBUGLEVEL > 0) {
1091 std::cout <<
"###################################################" << endl;
1092 std::cout <<
"# vectorized DAClusterizerInZ_vect::clusterize nt=" <<
tracks.size() << endl;
1093 std::cout <<
"# DAClusterizerInZ_vect::clusterize pv.size=" <<
pv.size() << endl;
1094 std::cout <<
"###################################################" << endl;
1103 vector<reco::TransientTrack> aCluster =
pv.begin()->originalTracks();
1105 for (
auto k =
pv.begin() + 1;
k !=
pv.end();
k++) {
1108 if (aCluster.size() > 1) {
1113 std::cout <<
" one track cluster at " <<
k->position().z() <<
" suppressed" << std::endl;
1118 for (
unsigned int i = 0;
i <
k->originalTracks().size();
i++) {
1119 aCluster.push_back(
k->originalTracks()[
i]);
1129 const unsigned int nv = y.getSize();
1132 std::vector<unsigned int> iz;
1133 for (
unsigned int j = 0;
j <
nt;
j++) {
1136 std::sort(iz.begin(), iz.end(), [tks](
unsigned int a,
unsigned int b) {
return tks.
z_ptr[
a] < tks.
z_ptr[
b]; });
1138 std::cout <<
"-----DAClusterizerInZ::dump ----" << nv <<
" clusters " << std::endl;
1140 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1141 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1142 std::cout <<
" " << setw(3) << ivertex <<
" ";
1148 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1149 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1154 <<
"T=" << setw(15) << 1. /
beta <<
" Tmin =" << setw(10) << 1. / betamax_
1156 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1157 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1158 double Tc = 2 * y.swE_ptr[ivertex] / y.sw_ptr[ivertex];
1166 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1167 sumpk += y.pk_ptr[ivertex];
1168 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1170 std::cout << setw(8) << setprecision(4) <<
fixed << y.pk_ptr[ivertex];
1175 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1176 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1178 std::cout << setw(8) << setprecision(1) <<
fixed << y.pk_ptr[ivertex] *
nt;
1183 double E = 0,
F = 0;
1185 std::cout <<
"---- z +/- dz ip +/-dip pt phi eta weights ----" << endl;
1187 for (
unsigned int i0 = 0; i0 <
nt; i0++) {
1188 unsigned int i = iz[i0];
1192 double tz = tks.
z_ptr[
i];
1194 if (std::fabs(tz - zdumpcenter_) > zdumpwidth_)
1196 std::cout << setw(4) <<
i <<
")" << setw(8) <<
fixed << setprecision(4) << tz <<
" +/-" << setw(6)
1198 if ((tks.
tt[
i] ==
nullptr)) {
1215 .pixelBarrelLayersWithMeasurement();
1216 std::cout << setw(1) << tks.
tt[
i]->track().hitPattern().pixelEndcapLayersWithMeasurement();
1218 << tks.
tt[
i]->track().hitPattern().trackerLayersWithMeasurement() -
1219 tks.
tt[
i]->track().hitPattern().pixelLayersWithMeasurement()
1225 std::cout << setw(8) <<
IP.value() <<
"+/-" << setw(6) <<
IP.error();
1226 std::cout <<
" " << setw(6) << setprecision(2) << tks.
tt[
i]->track().pt() * tks.
tt[
i]->track().charge();
1227 std::cout <<
" " << setw(5) << setprecision(2) << tks.
tt[
i]->track().phi() <<
" " << setw(5) << setprecision(2)
1228 << tks.
tt[
i]->track().eta();
1232 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1233 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1238 double p = y.pk_ptr[ivertex] * local_exp(-
beta * Eik(tks.
z_ptr[
i], y.z_ptr[ivertex], tks.
dz2_ptr[
i])) /
1241 std::cout << setw(8) << setprecision(3) <<
p;
1251 std::cout <<
" ( " << std::setw(3) << tks.
kmin[
i] <<
"," << std::setw(3) << tks.
kmax[
i] - 1 <<
" ) ";
1255 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1256 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1257 std::cout <<
" " << setw(3) << ivertex <<
" ";
1263 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1264 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1270 <<
"T=" << 1 /
beta <<
" E=" << E <<
" n=" << y.getSize() <<
" F= " <<
F << endl
1271 <<
"----------" << endl;