11 #include "vdt/vdtMath.h"
13 #include <Math/SMatrix.h>
24 maxIterations_ = 1000;
37 coolingFactor_ = conf.
getParameter<
double>(
"coolingFactor");
40 uniquetrkweight_ = conf.
getParameter<
double>(
"uniquetrkweight");
43 convergence_mode_ = conf.
getParameter<
int>(
"convergence_mode");
48 std::cout <<
"DAClusterizerinZ_vect: mintrkweight = " << mintrkweight_ << std::endl;
49 std::cout <<
"DAClusterizerinZ_vect: uniquetrkweight = " << uniquetrkweight_ << std::endl;
50 std::cout <<
"DAClusterizerinZ_vect: zmerge = " << zmerge_ << std::endl;
51 std::cout <<
"DAClusterizerinZ_vect: Tmin = " <<
Tmin << std::endl;
53 std::cout <<
"DAClusterizerinZ_vect: Tstop = " <<
Tstop << std::endl;
54 std::cout <<
"DAClusterizerinZ_vect: vertexSize = " << vertexSize_ << std::endl;
55 std::cout <<
"DAClusterizerinZ_vect: coolingFactor = " << coolingFactor_ << std::endl;
56 std::cout <<
"DAClusterizerinZ_vect: d0CutOff = " << d0CutOff_ << std::endl;
57 std::cout <<
"DAClusterizerinZ_vect: dzCutOff = " << dzCutOff_ << std::endl;
58 std::cout <<
"DAClusterizerInZ_vect: zrange = " << sel_zrange_ << std::endl;
59 std::cout <<
"DAClusterizerinZ_vect: convergence mode = " << convergence_mode_ << std::endl;
60 std::cout <<
"DAClusterizerinZ_vect: delta_highT = " << delta_highT_ << std::endl;
61 std::cout <<
"DAClusterizerinZ_vect: delta_lowT = " << delta_lowT_ << std::endl;
64 std::cout <<
"DAClusterizerinZ_vect: DEBUGLEVEL " << DEBUGLEVEL << std::endl;
67 if (convergence_mode_ > 1) {
69 <<
"DAClusterizerInZ_vect: invalid convergence_mode" << convergence_mode_ <<
" reset to default " << 0;
70 convergence_mode_ = 0;
75 <<
"DAClusterizerInZ_vect: invalid Tmin" <<
Tmin <<
" reset to default " << 1. / betamax_;
82 <<
"DAClusterizerInZ_vect: invalid Tpurge" <<
Tpurge <<
" set to " <<
Tmin;
89 <<
"DAClusterizerInZ_vect: invalid Tstop" <<
Tstop <<
" set to " <<
max(1.,
Tpurge);
92 betastop_ = 1. /
Tstop;
96 inline double local_exp(
double const& inp) {
return vdt::fast_exp(inp); }
98 inline void local_exp_list_range(
double const* __restrict__ arg_inp,
99 double* __restrict__ arg_out,
100 const unsigned int kmin,
101 const unsigned int kmax) {
102 for (
auto i = kmin;
i != kmax; ++
i)
103 arg_out[
i] = vdt::fast_exp(arg_inp[
i]);
109 if (!(nv == 999999)) {
115 if (!(
nt == 999999)) {
124 assert(
v.ei_cache.size() == nv);
132 assert(
v.ei_cache_ptr == &
v.ei_cache.front());
133 assert(
v.swz_ptr == &
v.swz.front());
135 assert(
v.swE_ptr == &
v.swE.front());
137 for (
unsigned int k = 0;
k < nv - 1;
k++) {
138 if (
v.z[
k] <=
v.z[
k + 1])
140 cout <<
" Z, cluster z-ordering assertion failure z[" <<
k <<
"] =" <<
v.z[
k] <<
" z[" <<
k + 1
141 <<
"] =" <<
v.z[
k + 1] << endl;
160 for (
unsigned int i = 0;
i <
nt;
i++) {
163 cout <<
"track vertex range assertion failure" <<
i <<
"/" <<
nt <<
" kmin,kmax=" << tks.
kmin[
i] <<
", "
164 << tks.
kmax[
i] <<
" nv=" << nv << endl;
167 for (
unsigned int i = 0;
i <
nt;
i++) {
176 for (
auto it =
tracks.begin(); it !=
tracks.end(); it++) {
177 if (!(*it).isValid())
180 double t_z = ((*it).stateAtBeamLine().trackStateAtPCA()).
position().z();
181 if (std::fabs(
t_z) > 1000.)
183 auto const& t_mom = (*it).stateAtBeamLine().trackStateAtPCA().momentum();
186 double t_dz2 =
std::pow((*it).track().dzError(), 2)
200 LogTrace(
"DAClusterizerinZ_vect") <<
t_z <<
' ' << t_dz2 <<
' ' << t_pi;
206 if (DEBUGLEVEL > 0) {
215 inline double Eik(
double t_z,
double k_z,
double t_dz2) {
return std::pow(
t_z - k_z, 2) * t_dz2; }
219 const unsigned int nv = gvertices.
getSize();
220 const unsigned int nt = gtracks.
getSize();
223 edm::LogWarning(
"DAClusterizerinZ_vect") <<
"empty cluster list in set_vtx_range";
227 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
231 unsigned int kmin =
min(nv - 1, gtracks.
kmin[itrack]);
234 while ((kmin > 0) && (gvertices.
z_ptr[kmin - 1] >
zmin)) {
238 while ((kmin < (nv - 1)) && (gvertices.
z_ptr[kmin] <
zmin)) {
244 unsigned int kmax =
min(nv - 1, gtracks.
kmax[itrack] - 1);
248 while ((kmax < (nv - 1)) && (gvertices.
z_ptr[kmax + 1] <
zmax)) {
252 while ((kmax > 0) && (gvertices.
z_ptr[kmax] >
zmax)) {
258 gtracks.
kmin[itrack] = kmin;
259 gtracks.
kmax[itrack] = kmax + 1;
262 gtracks.
kmax[itrack] =
min(nv,
max(kmin, kmax) + 1);
268 const unsigned int nt = gtracks.
getSize();
269 const unsigned int nv = gvertices.
getSize();
270 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
271 gtracks.
kmin[itrack] = 0;
272 gtracks.
kmax[itrack] = nv;
282 const unsigned int nt = gtracks.
getSize();
283 const unsigned int nv = gvertices.
getSize();
295 Z_init = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
299 auto kernel_calc_exp_arg_range = [
beta](
const unsigned int itrack,
302 const unsigned int kmin,
303 const unsigned int kmax) {
304 const double track_z =
tracks.z_ptr[itrack];
305 const double botrack_dz2 = -
beta *
tracks.dz2_ptr[itrack];
308 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
309 auto mult_res = track_z -
vertices.z_ptr[ivertex];
310 vertices.ei_cache_ptr[ivertex] = botrack_dz2 * (mult_res * mult_res);
314 auto kernel_add_Z_range = [Z_init](
315 vertex_t const&
vertices,
const unsigned int kmin,
const unsigned int kmax) ->
double {
316 double ZTemp = Z_init;
317 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
323 auto kernel_calc_normalization_range = [](
const unsigned int track_num,
326 const unsigned int kmin,
327 const unsigned int kmax) {
328 auto tmp_trk_pi = tks_vec.pi_ptr[track_num];
329 auto o_trk_Z_sum = 1. / tks_vec.Z_sum_ptr[track_num];
330 auto o_trk_dz2 = tks_vec.dz2_ptr[track_num];
331 auto tmp_trk_z = tks_vec.z_ptr[track_num];
334 for (
unsigned int k = kmin;
k < kmax; ++
k) {
335 y_vec.se_ptr[
k] += y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum);
336 auto w = y_vec.pk_ptr[
k] * y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum * o_trk_dz2);
337 y_vec.sw_ptr[
k] +=
w;
338 y_vec.swz_ptr[
k] +=
w * tmp_trk_z;
342 for (
auto ivertex = 0
U; ivertex < nv; ++ivertex) {
343 gvertices.
se_ptr[ivertex] = 0.0;
344 gvertices.
sw_ptr[ivertex] = 0.0;
345 gvertices.
swz_ptr[ivertex] = 0.0;
349 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
350 unsigned int kmin = gtracks.
kmin[itrack];
351 unsigned int kmax = gtracks.
kmax[itrack];
354 assert((kmin < kmax) && (kmax <= nv));
358 kernel_calc_exp_arg_range(itrack, gtracks, gvertices, kmin, kmax);
360 gtracks.
Z_sum_ptr[itrack] = kernel_add_Z_range(gvertices, kmin, kmax);
365 sumpi += gtracks.
pi_ptr[itrack];
367 if (gtracks.
Z_sum_ptr[itrack] > 1.e-100) {
368 kernel_calc_normalization_range(itrack, gtracks, gvertices, kmin, kmax);
376 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
384 auto osumpi = 1. / sumpi;
385 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex)
391 delta = kernel_calc_z(gvertices);
401 const unsigned int nt = gtracks.
getSize();
402 const unsigned int nv = gvertices.
getSize();
413 Z_init = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
417 auto kernel_calc_exp_arg_range = [
beta](
const unsigned int itrack,
420 const unsigned int kmin,
421 const unsigned int kmax) {
422 const double track_z =
tracks.z_ptr[itrack];
423 const double botrack_dz2 = -
beta *
tracks.dz2_ptr[itrack];
426 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
427 auto mult_res = track_z -
vertices.z_ptr[ivertex];
428 vertices.ei_cache_ptr[ivertex] = botrack_dz2 * (mult_res * mult_res);
432 auto kernel_add_Z_range = [Z_init](
433 vertex_t const&
vertices,
const unsigned int kmin,
const unsigned int kmax) ->
double {
434 double ZTemp = Z_init;
435 for (
unsigned int ivertex = kmin; ivertex < kmax; ++ivertex) {
441 auto kernel_calc_normalization_range = [
beta](
const unsigned int track_num,
444 const unsigned int kmin,
445 const unsigned int kmax) {
446 auto tmp_trk_pi = tks_vec.pi_ptr[track_num];
447 auto o_trk_Z_sum = 1. / tks_vec.Z_sum_ptr[track_num];
448 auto o_trk_dz2 = tks_vec.dz2_ptr[track_num];
449 auto tmp_trk_z = tks_vec.z_ptr[track_num];
450 auto obeta = -1. /
beta;
453 for (
unsigned int k = kmin;
k < kmax; ++
k) {
454 y_vec.se_ptr[
k] += y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum);
455 auto w = y_vec.pk_ptr[
k] * y_vec.ei_ptr[
k] * (tmp_trk_pi * o_trk_Z_sum * o_trk_dz2);
456 y_vec.sw_ptr[
k] +=
w;
457 y_vec.swz_ptr[
k] +=
w * tmp_trk_z;
458 y_vec.swE_ptr[
k] +=
w * y_vec.ei_cache_ptr[
k] * obeta;
462 for (
auto ivertex = 0
U; ivertex < nv; ++ivertex) {
463 gvertices.
se_ptr[ivertex] = 0.0;
464 gvertices.
sw_ptr[ivertex] = 0.0;
465 gvertices.
swz_ptr[ivertex] = 0.0;
466 gvertices.
swE_ptr[ivertex] = 0.0;
470 for (
auto itrack = 0
U; itrack <
nt; ++itrack) {
471 unsigned int kmin = gtracks.
kmin[itrack];
472 unsigned int kmax = gtracks.
kmax[itrack];
474 kernel_calc_exp_arg_range(itrack, gtracks, gvertices, kmin, kmax);
476 gtracks.
Z_sum_ptr[itrack] = kernel_add_Z_range(gvertices, kmin, kmax);
481 sumpi += gtracks.
pi_ptr[itrack];
483 if (gtracks.
Z_sum_ptr[itrack] > 1.e-100) {
484 kernel_calc_normalization_range(itrack, gtracks, gvertices, kmin, kmax);
492 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
501 auto osumpi = 1. / sumpi;
502 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex)
508 delta = kernel_calc_z(gvertices);
517 auto nv =
v.getSize();
519 for (
auto i = 0
U;
i <
nt;
i++) {
521 for (
auto k = 0u;
k < nv;
k++) {
522 double Eik = (tks.
z[
k] -
v.z[
i]) * (tks.
z[
k] -
v.z[
i]) * tks.
dz2[
i];
523 if ((
beta * Eik) < 30) {
537 unsigned int niter = 0;
539 double delta_max = delta_lowT_;
541 if (convergence_mode_ == 0) {
542 delta_max = delta_max0;
543 }
else if (convergence_mode_ == 1) {
547 set_vtx_range(
beta, tks,
v);
548 double delta_sum_range = 0;
549 std::vector<double>
z0 =
v.z;
551 while (niter++ < maxIterations_) {
553 delta_sum_range +=
delta;
555 if (delta_sum_range > zrange_min_) {
556 for (
unsigned int k = 0;
k <
v.getSize();
k++) {
558 set_vtx_range(
beta, tks,
v);
566 if (
delta < delta_max) {
572 if (DEBUGLEVEL > 0) {
573 std::cout <<
"DAClusterizerInZ_vect.thermalize niter = " << niter <<
" at T = " << 1 /
beta
574 <<
" nv = " <<
v.getSize() << std::endl;
587 const unsigned int nv = y.getSize();
593 std::vector<std::pair<double, unsigned int> >
critical;
594 for (
unsigned int k = 0; (
k + 1) < nv;
k++) {
595 if (std::fabs(y.z_ptr[
k + 1] - y.z_ptr[
k]) < zmerge_) {
596 critical.push_back(make_pair(std::fabs(y.z_ptr[
k + 1] - y.z_ptr[
k]),
k));
602 std::stable_sort(
critical.begin(),
critical.end(), std::less<std::pair<double, unsigned int> >());
604 for (
unsigned int ik = 0; ik <
critical.size(); ik++) {
606 double rho = y.pk_ptr[
k] + y.pk_ptr[
k + 1];
607 double swE = y.swE_ptr[
k] + y.swE_ptr[
k + 1] -
608 y.pk_ptr[
k] * y.pk_ptr[
k + 1] / rho *
std::pow(y.z_ptr[
k + 1] - y.z_ptr[
k], 2);
609 double Tc = 2 * swE / (y.sw_ptr[
k] + y.sw_ptr[
k + 1]);
614 if (DEBUGLEVEL > 1) {
615 std::cout <<
"merging " <<
fixed << setprecision(4) << y.z_ptr[
k + 1] <<
" and " << y.z_ptr[
k]
616 <<
" Tc = " << Tc <<
" sw = " << y.sw_ptr[
k] + y.sw_ptr[
k + 1] << std::endl;
621 y.z_ptr[
k] = (y.pk_ptr[
k] * y.z_ptr[
k] + y.pk_ptr[
k + 1] * y.z_ptr[
k + 1]) / rho;
623 y.z_ptr[
k] = 0.5 * (y.z_ptr[
k] + y.z_ptr[
k + 1]);
626 y.sw_ptr[
k] += y.sw_ptr[
k + 1];
628 y.removeItem(
k + 1, tks);
629 set_vtx_range(
beta, tks, y);
640 const unsigned int nv = y.getSize();
647 unsigned int k0 = nv;
649 for (
unsigned int k = 0;
k < nv;
k++) {
653 double pmax = y.pk_ptr[
k] / (y.pk_ptr[
k] + rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_));
654 for (
unsigned int i = 0;
i <
nt;
i++) {
660 if ((
p > uniquetrkweight_ * pmax) && (tks.
pi_ptr[
i] > 0)) {
666 if ((nUnique < 2) && (sump < sumpmin)) {
675 if (DEBUGLEVEL > 1) {
676 std::cout <<
"eliminating prototype at " << std::setw(10) << std::setprecision(4) << y.z_ptr[
k0]
677 <<
" with sump=" << sumpmin <<
" rho*nt =" << y.pk_ptr[
k0] *
nt << endl;
681 y.removeItem(
k0, tks);
682 set_vtx_range(
beta, tks, y);
693 const unsigned int nv = y.getSize();
695 for (
unsigned int k = 0;
k < nv;
k++) {
699 for (
unsigned int i = 0;
i <
nt;
i++) {
705 y.z_ptr[
k] = sumwz / sumw;
709 for (
unsigned int i = 0;
i <
nt;
i++) {
715 double Tc = 2. *
a /
b;
723 if (DEBUGLEVEL > 0) {
724 std::cout <<
"DAClusterizerInZ_vect.beta0: Tc = " << T0 << std::endl;
726 std::cout <<
"DAClusterizerInZ_vect.beta0: nstep = " << coolingsteps << std::endl;
730 if (T0 > 1. / betamax) {
733 return betamax *
std::pow(coolingFactor_, coolingsteps);
736 return betamax * coolingFactor_;
746 unsigned int nv = y.getSize();
750 std::vector<std::pair<double, unsigned int> >
critical;
751 for (
unsigned int k = 0;
k < nv;
k++) {
752 double Tc = 2 * y.swE_ptr[
k] / y.sw_ptr[
k];
760 std::stable_sort(
critical.begin(),
critical.end(), std::greater<std::pair<double, unsigned int> >());
765 for (
unsigned int ic = 0; ic <
critical.size(); ic++) {
769 double p1 = 0, z1 = 0, w1 = 0;
770 double p2 = 0,
z2 = 0,
w2 = 0;
771 for (
unsigned int i = 0;
i <
nt;
i++) {
774 double tl = tks.
z_ptr[
i] < y.z_ptr[
k] ? 1. : 0.;
779 if (std::fabs(
arg) < 20) {
780 double t = local_exp(-
arg);
809 if ((
k > 0) && (z1 < (0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k - 1]))) {
810 z1 = 0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k - 1];
812 if ((
k + 1 < nv) && (
z2 > (0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k + 1]))) {
813 z2 = 0.6 * y.z_ptr[
k] + 0.4 * y.z_ptr[
k + 1];
818 if (DEBUGLEVEL > 1) {
819 if (std::fabs(y.z_ptr[
k] - zdumpcenter_) < zdumpwidth_) {
821 <<
std::fixed << std::setprecision(4) << y.z_ptr[
k] <<
" --> " << z1 <<
"," <<
z2 <<
" [" <<
p1
835 double pk1 =
p1 * y.pk_ptr[
k] / (
p1 +
p2);
836 double pk2 =
p2 * y.pk_ptr[
k] / (
p1 +
p2);
839 y.insertItem(
k, z1, pk1, tks);
846 for (
unsigned int jc = ic; jc <
critical.size(); jc++) {
873 clear_vtx_range(tks, y);
876 double beta = beta0(betamax_, tks, y);
882 thermalize(
beta, tks, y, delta_highT_);
886 double betafreeze = betamax_ *
sqrt(coolingFactor_);
888 while (
beta < betafreeze) {
889 updateTc(
beta, tks, y, rho0);
891 updateTc(
beta, tks, y, rho0);
896 set_vtx_range(
beta, tks, y);
897 thermalize(
beta, tks, y, delta_highT_);
903 if (DEBUGLEVEL > 0) {
904 std::cout <<
"DAClusterizerInZ_vect::vertices :"
905 <<
"last round of splitting" << std::endl;
909 set_vtx_range(
beta, tks, y);
910 updateTc(
beta, tks, y, rho0);
913 set_vtx_range(
beta, tks, y);
914 updateTc(
beta, tks, y, rho0);
917 unsigned int ntry = 0;
920 set_vtx_range(
beta, tks, y);
921 thermalize(
beta, tks, y, delta_highT_, 0.);
922 updateTc(
beta, tks, y, rho0);
924 updateTc(
beta, tks, y, rho0);
933 if (DEBUGLEVEL > 0) {
934 std::cout <<
"DAClusterizerInZ_vect::vertices :"
935 <<
"turning on outlier rejection at T=" << 1 /
beta << std::endl;
942 for (
unsigned int a = 0;
a < 5;
a++) {
947 thermalize(
beta, tks, y, delta_lowT_, rho0);
951 if (DEBUGLEVEL > 0) {
952 std::cout <<
"DAClusterizerInZ_vect::vertices :"
953 <<
"merging with outlier rejection at T=" << 1 /
beta << std::endl;
961 set_vtx_range(
beta, tks, y);
967 if (DEBUGLEVEL > 0) {
968 std::cout <<
"DAClusterizerInZ_vect::vertices :"
969 <<
"after merging with outlier rejection at T=" << 1 /
beta << std::endl;
976 while (
beta < betapurge_) {
978 set_vtx_range(
beta, tks, y);
979 thermalize(
beta, tks, y, delta_lowT_, rho0);
984 if (DEBUGLEVEL > 0) {
985 std::cout <<
"DAClusterizerInZ_vect::vertices :"
986 <<
"purging at T=" << 1 /
beta << std::endl;
991 while (purge(y, tks, rho0,
beta)) {
992 thermalize(
beta, tks, y, delta_lowT_, rho0);
997 if (DEBUGLEVEL > 0) {
998 std::cout <<
"DAClusterizerInZ_vect::vertices :"
999 <<
"last cooling T=" << 1 /
beta << std::endl;
1004 while (
beta < betastop_) {
1006 thermalize(
beta, tks, y, delta_lowT_, rho0);
1011 if (DEBUGLEVEL > 0) {
1012 std::cout <<
"DAClusterizerInZ_vect::vertices :"
1013 <<
"stop cooling at T=" << 1 /
beta << std::endl;
1020 GlobalError dummyError(0.01, 0, 0.01, 0., 0., 0.01);
1023 const unsigned int nv = y.getSize();
1024 for (
unsigned int k = 0;
k < nv;
k++)
1030 for (
unsigned int i = 0;
i <
nt;
i++)
1031 tks.
Z_sum_ptr[
i] = rho0 * local_exp(-
beta * dzCutOff_ * dzCutOff_);
1034 for (
unsigned int k = 0;
k < nv;
k++) {
1035 for (
unsigned int i = 0;
i <
nt;
i++)
1039 for (
unsigned int k = 0;
k < nv;
k++) {
1042 vector<reco::TransientTrack> vertexTracks;
1043 for (
unsigned int i = 0;
i <
nt;
i++) {
1046 if ((tks.
pi_ptr[
i] > 0) && (
p > mintrkweight_)) {
1047 vertexTracks.push_back(*(tks.
tt[
i]));
1060 const vector<reco::TransientTrack>&
tracks)
const {
1061 vector<vector<reco::TransientTrack> >
clusters;
1065 if (DEBUGLEVEL > 0) {
1066 std::cout <<
"###################################################" << endl;
1067 std::cout <<
"# vectorized DAClusterizerInZ_vect::clusterize nt=" <<
tracks.size() << endl;
1068 std::cout <<
"# DAClusterizerInZ_vect::clusterize pv.size=" <<
pv.size() << endl;
1069 std::cout <<
"###################################################" << endl;
1078 vector<reco::TransientTrack> aCluster =
pv.begin()->originalTracks();
1080 for (
auto k =
pv.begin() + 1;
k !=
pv.end();
k++) {
1083 if (aCluster.size() > 1) {
1088 std::cout <<
" one track cluster at " <<
k->position().z() <<
" suppressed" << std::endl;
1093 for (
unsigned int i = 0;
i <
k->originalTracks().size();
i++) {
1094 aCluster.push_back(
k->originalTracks()[
i]);
1104 const unsigned int nv = y.getSize();
1107 std::vector<unsigned int> iz;
1108 for (
unsigned int j = 0;
j <
nt;
j++) {
1111 std::sort(iz.begin(), iz.end(), [tks](
unsigned int a,
unsigned int b) {
return tks.
z_ptr[
a] < tks.
z_ptr[
b]; });
1113 std::cout <<
"-----DAClusterizerInZ::dump ----" << nv <<
" clusters " << std::endl;
1115 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1116 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1117 std::cout <<
" " << setw(3) << ivertex <<
" ";
1123 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1124 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1129 <<
"T=" << setw(15) << 1. /
beta <<
" Tmin =" << setw(10) << 1. / betamax_
1131 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1132 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1133 double Tc = 2 * y.swE_ptr[ivertex] / y.sw_ptr[ivertex];
1141 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1142 sumpk += y.pk_ptr[ivertex];
1143 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1145 std::cout << setw(8) << setprecision(4) <<
fixed << y.pk_ptr[ivertex];
1150 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1151 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1153 std::cout << setw(8) << setprecision(1) <<
fixed << y.pk_ptr[ivertex] *
nt;
1158 double E = 0,
F = 0;
1160 std::cout <<
"---- z +/- dz ip +/-dip pt phi eta weights ----" << endl;
1162 for (
unsigned int i0 = 0; i0 <
nt; i0++) {
1163 unsigned int i = iz[i0];
1167 double tz = tks.
z_ptr[
i];
1169 if (std::fabs(tz - zdumpcenter_) > zdumpwidth_)
1171 std::cout << setw(4) <<
i <<
")" << setw(8) <<
fixed << setprecision(4) << tz <<
" +/-" << setw(6)
1173 if ((tks.
tt[
i] ==
nullptr)) {
1190 .pixelBarrelLayersWithMeasurement();
1191 std::cout << setw(1) << tks.
tt[
i]->track().hitPattern().pixelEndcapLayersWithMeasurement();
1193 << tks.
tt[
i]->track().hitPattern().trackerLayersWithMeasurement() -
1194 tks.
tt[
i]->track().hitPattern().pixelLayersWithMeasurement()
1200 std::cout << setw(8) <<
IP.value() <<
"+/-" << setw(6) <<
IP.error();
1201 std::cout <<
" " << setw(6) << setprecision(2) << tks.
tt[
i]->track().pt() * tks.
tt[
i]->track().charge();
1202 std::cout <<
" " << setw(5) << setprecision(2) << tks.
tt[
i]->track().phi() <<
" " << setw(5) << setprecision(2)
1203 << tks.
tt[
i]->track().eta();
1207 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1208 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) > zdumpwidth_)
1216 std::cout << setw(8) << setprecision(3) <<
p;
1226 std::cout <<
" ( " << std::setw(3) << tks.
kmin[
i] <<
"," << std::setw(3) << tks.
kmax[
i] - 1 <<
" ) ";
1230 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1231 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1232 std::cout <<
" " << setw(3) << ivertex <<
" ";
1238 for (
unsigned int ivertex = 0; ivertex < nv; ++ivertex) {
1239 if (std::fabs(y.z_ptr[ivertex] - zdumpcenter_) < zdumpwidth_) {
1245 <<
"T=" << 1 /
beta <<
" E=" << E <<
" n=" << y.getSize() <<
" F= " <<
F << endl
1246 <<
"----------" << endl;