CMS 3D CMS Logo

PixelCPEFast.cc
Go to the documentation of this file.
1 #include <cuda_runtime.h>
2 
12 
13 // Services
14 // this is needed to get errors from templates
15 
16 namespace {
17  constexpr float micronsToCm = 1.0e-4;
18 }
19 
20 //-----------------------------------------------------------------------------
22 //-----------------------------------------------------------------------------
24  const MagneticField* mag,
25  const TrackerGeometry& geom,
26  const TrackerTopology& ttopo,
27  const SiPixelLorentzAngle* lorentzAngle,
28  const SiPixelGenErrorDBObject* genErrorDBObject,
29  const SiPixelLorentzAngle* lorentzAngleWidth)
30  : PixelCPEGenericBase(conf, mag, geom, ttopo, lorentzAngle, genErrorDBObject, lorentzAngleWidth) {
31  // Use errors from templates or from GenError
34  throw cms::Exception("InvalidCalibrationLoaded")
35  << "ERROR: GenErrors not filled correctly. Check the sqlite file. Using SiPixelTemplateDBObject version "
36  << (*genErrorDBObject_).version();
37  }
38 
39  isPhase2_ = conf.getParameter<bool>("isPhase2");
40 
42 
43  cpuData_ = {
45  detParamsGPU_.data(),
48  };
49 }
50 
51 const pixelCPEforGPU::ParamsOnGPU* PixelCPEFast::getGPUProductAsync(cudaStream_t cudaStream) const {
52  const auto& data = gpuData_.dataForCurrentDeviceAsync(cudaStream, [this](GPUData& data, cudaStream_t stream) {
53  // and now copy to device...
54 
55  cudaCheck(cudaMalloc((void**)&data.paramsOnGPU_h.m_commonParams, sizeof(pixelCPEforGPU::CommonParams)));
56  cudaCheck(cudaMalloc((void**)&data.paramsOnGPU_h.m_detParams,
57  this->detParamsGPU_.size() * sizeof(pixelCPEforGPU::DetParams)));
58  cudaCheck(cudaMalloc((void**)&data.paramsOnGPU_h.m_averageGeometry, sizeof(pixelCPEforGPU::AverageGeometry)));
59  cudaCheck(cudaMalloc((void**)&data.paramsOnGPU_h.m_layerGeometry, sizeof(pixelCPEforGPU::LayerGeometry)));
60  cudaCheck(cudaMalloc((void**)&data.paramsOnGPU_d, sizeof(pixelCPEforGPU::ParamsOnGPU)));
61  cudaCheck(cudaMemcpyAsync(
62  data.paramsOnGPU_d, &data.paramsOnGPU_h, sizeof(pixelCPEforGPU::ParamsOnGPU), cudaMemcpyDefault, stream));
63  cudaCheck(cudaMemcpyAsync((void*)data.paramsOnGPU_h.m_commonParams,
64  &this->commonParamsGPU_,
66  cudaMemcpyDefault,
67  stream));
68  cudaCheck(cudaMemcpyAsync((void*)data.paramsOnGPU_h.m_averageGeometry,
69  &this->averageGeometry_,
71  cudaMemcpyDefault,
72  stream));
73  cudaCheck(cudaMemcpyAsync((void*)data.paramsOnGPU_h.m_layerGeometry,
74  &this->layerGeometry_,
76  cudaMemcpyDefault,
77  stream));
78  cudaCheck(cudaMemcpyAsync((void*)data.paramsOnGPU_h.m_detParams,
79  this->detParamsGPU_.data(),
80  this->detParamsGPU_.size() * sizeof(pixelCPEforGPU::DetParams),
81  cudaMemcpyDefault,
82  stream));
83  });
84  return data.paramsOnGPU_d;
85 }
86 
88  //
89  // this code executes only once per job, computation inefficiency is not an issue
90  // many code blocks are repeated: better keep the computation local and self oconsistent as blocks may in future move around, be deleted ...
91  // It is valid only for Phase1 and the version of GenError in DB used in late 2018 and in 2021
92 
93  commonParamsGPU_.theThicknessB = m_DetParams.front().theThickness;
94  commonParamsGPU_.theThicknessE = m_DetParams.back().theThickness;
95  commonParamsGPU_.thePitchX = m_DetParams[0].thePitchX;
96  commonParamsGPU_.thePitchY = m_DetParams[0].thePitchY;
97 
101 
102  LogDebug("PixelCPEFast") << "pitch & thickness " << commonParamsGPU_.thePitchX << ' ' << commonParamsGPU_.thePitchY
104 
105  // zero average geometry
107 
108  uint32_t oldLayer = 0;
109  uint32_t oldLadder = 0;
110  float rl = 0;
111  float zl = 0;
112  float miz = 500, mxz = 0;
113  float pl = 0;
114  int nl = 0;
115  detParamsGPU_.resize(m_DetParams.size());
116 
117  for (auto i = 0U; i < m_DetParams.size(); ++i) {
118  auto& p = m_DetParams[i];
119  auto& g = detParamsGPU_[i];
120 
121  if (!isPhase2_) {
126 
127  g.numPixsInModule = g.nRows * g.nCols;
128 
129  } else {
130  g.nRowsRoc = p.theDet->specificTopology().rowsperroc();
131  g.nColsRoc = p.theDet->specificTopology().colsperroc();
132  g.nRows = p.theDet->specificTopology().rocsX() * g.nRowsRoc;
133  g.nCols = p.theDet->specificTopology().rocsY() * g.nColsRoc;
134 
135  g.numPixsInModule = g.nRows * g.nCols;
136  }
137 
138  assert(p.theDet->index() == int(i));
139  assert(commonParamsGPU_.thePitchY == p.thePitchY);
140  assert(commonParamsGPU_.thePitchX == p.thePitchX);
141 
142  g.isBarrel = GeomDetEnumerators::isBarrel(p.thePart);
143  g.isPosZ = p.theDet->surface().position().z() > 0;
144  g.layer = ttopo_.layer(p.theDet->geographicalId());
145  g.index = i; // better be!
146  g.rawId = p.theDet->geographicalId();
148  assert(thickness == p.theThickness);
149 
150  auto ladder = ttopo_.pxbLadder(p.theDet->geographicalId());
151  if (oldLayer != g.layer) {
152  oldLayer = g.layer;
153  LogDebug("PixelCPEFast") << "new layer at " << i << (g.isBarrel ? " B " : (g.isPosZ ? " E+ " : " E- "))
154  << g.layer << " starting at " << g.rawId << '\n'
155  << "old layer had " << nl << " ladders";
156  nl = 0;
157  }
158  if (oldLadder != ladder) {
159  oldLadder = ladder;
160  LogDebug("PixelCPEFast") << "new ladder at " << i << (g.isBarrel ? " B " : (g.isPosZ ? " E+ " : " E- "))
161  << ladder << " starting at " << g.rawId << '\n'
162  << "old ladder ave z,r,p mz " << zl / 8.f << " " << rl / 8.f << " " << pl / 8.f << ' '
163  << miz << ' ' << mxz;
164  rl = 0;
165  zl = 0;
166  pl = 0;
167  miz = isPhase2_ ? 500 : 90;
168  mxz = 0;
169  nl++;
170  }
171 
172  g.shiftX = 0.5f * p.lorentzShiftInCmX;
173  g.shiftY = 0.5f * p.lorentzShiftInCmY;
174  g.chargeWidthX = p.lorentzShiftInCmX * p.widthLAFractionX;
175  g.chargeWidthY = p.lorentzShiftInCmY * p.widthLAFractionY;
176 
177  g.x0 = p.theOrigin.x();
178  g.y0 = p.theOrigin.y();
179  g.z0 = p.theOrigin.z();
180 
181  auto vv = p.theDet->surface().position();
182  auto rr = pixelCPEforGPU::Rotation(p.theDet->surface().rotation());
183  g.frame = pixelCPEforGPU::Frame(vv.x(), vv.y(), vv.z(), rr);
184 
185  zl += vv.z();
186  miz = std::min(miz, std::abs(vv.z()));
187  mxz = std::max(mxz, std::abs(vv.z()));
188  rl += vv.perp();
189  pl += vv.phi(); // (not obvious)
190 
191  // errors .....
193 
194  cp.with_track_angle = false;
195 
196  auto lape = p.theDet->localAlignmentError();
197  if (lape.invalid())
198  lape = LocalError(); // zero....
199 
200  g.apeXX = lape.xx();
201  g.apeYY = lape.yy();
202 
203  auto toMicron = [&](float x) { return std::min(511, int(x * 1.e4f + 0.5f)); };
204 
205  // average angle
206  auto gvx = p.theOrigin.x() + 40.f * commonParamsGPU_.thePitchX;
207  auto gvy = p.theOrigin.y();
208  auto gvz = 1.f / p.theOrigin.z();
209  //--- Note that the normalization is not required as only the ratio used
210 
211  {
212  // calculate angles (fed into errorFromTemplates)
213  cp.cotalpha = gvx * gvz;
214  cp.cotbeta = gvy * gvz;
215 
216  if (!isPhase2_)
217  errorFromTemplates(p, cp, 20000.);
218  else
219  cp.qBin_ = 0.f;
220  }
221 
222 #ifdef EDM_ML_DEBUG
223  auto m = 10000.f;
224  for (float qclus = 15000; qclus < 35000; qclus += 15000) {
225  errorFromTemplates(p, cp, qclus);
226  LogDebug("PixelCPEFast") << i << ' ' << qclus << ' ' << cp.pixmx << ' ' << m * cp.sigmax << ' ' << m * cp.sx1
227  << ' ' << m * cp.sx2 << ' ' << m * cp.sigmay << ' ' << m * cp.sy1 << ' ' << m * cp.sy2;
228  }
229  LogDebug("PixelCPEFast") << i << ' ' << m * std::sqrt(lape.xx()) << ' ' << m * std::sqrt(lape.yy());
230 #endif // EDM_ML_DEBUG
231 
232  g.pixmx = std::max(0, cp.pixmx);
233  g.sx2 = toMicron(cp.sx2);
234  g.sy1 = std::max(21, toMicron(cp.sy1)); // for some angles sy1 is very small
235  g.sy2 = std::max(55, toMicron(cp.sy2)); // sometimes sy2 is smaller than others (due to angle?)
236 
237  // sample xerr as function of position
239 
240  for (int ix = 0; ix < CPEFastParametrisation::kNumErrorBins; ++ix) {
241  auto x = xoff * (1.f - (0.5f + float(ix)) / 8.f);
242  auto gvx = p.theOrigin.x() - x;
243  auto gvy = p.theOrigin.y();
244  auto gvz = 1.f / p.theOrigin.z();
245  cp.cotbeta = gvy * gvz;
246  cp.cotalpha = gvx * gvz;
247  errorFromTemplates(p, cp, 20000.f);
248  g.sigmax[ix] = toMicron(cp.sigmax);
249  g.sigmax1[ix] = toMicron(cp.sx1);
250  LogDebug("PixelCPEFast") << "sigmax vs x " << i << ' ' << x << ' ' << cp.cotalpha << ' ' << int(g.sigmax[ix])
251  << ' ' << int(g.sigmax1[ix]) << ' ' << 10000.f * cp.sigmay << std::endl;
252  }
253 #ifdef EDM_ML_DEBUG
254  // sample yerr as function of position
256  for (int ix = 0; ix < CPEFastParametrisation::kNumErrorBins; ++ix) {
257  auto y = yoff * (1.f - (0.5f + float(ix)) / 8.f);
258  auto gvx = p.theOrigin.x() + 40.f * commonParamsGPU_.thePitchY;
259  auto gvy = p.theOrigin.y() - y;
260  auto gvz = 1.f / p.theOrigin.z();
261  cp.cotbeta = gvy * gvz;
262  cp.cotalpha = gvx * gvz;
263  errorFromTemplates(p, cp, 20000.f);
264  LogDebug("PixelCPEFast") << "sigmay vs y " << i << ' ' << y << ' ' << cp.cotbeta << ' ' << 10000.f * cp.sigmay
265  << std::endl;
266  }
267 #endif // EDM_ML_DEBUG
268 
269  // calculate angles (repeated)
270  cp.cotalpha = gvx * gvz;
271  cp.cotbeta = gvy * gvz;
272  auto aveCB = cp.cotbeta;
273 
274  // sample x by charge
275  int qbin = CPEFastParametrisation::kGenErrorQBins; // low charge
276  int k = 0;
277  for (int qclus = 1000; qclus < 200000; qclus += 1000) {
278  errorFromTemplates(p, cp, qclus);
279  if (cp.qBin_ == qbin)
280  continue;
281  qbin = cp.qBin_;
282  g.xfact[k] = cp.sigmax;
283  g.yfact[k] = cp.sigmay;
284  g.minCh[k++] = qclus;
285 #ifdef EDM_ML_DEBUG
286  LogDebug("PixelCPEFast") << i << ' ' << g.rawId << ' ' << cp.cotalpha << ' ' << qclus << ' ' << cp.qBin_ << ' '
287  << cp.pixmx << ' ' << m * cp.sigmax << ' ' << m * cp.sx1 << ' ' << m * cp.sx2 << ' '
288  << m * cp.sigmay << ' ' << m * cp.sy1 << ' ' << m * cp.sy2 << std::endl;
289 #endif // EDM_ML_DEBUG
290  }
291 
293 
294  // fill the rest (sometimes bin 4 is missing)
295  for (int kk = k; kk < CPEFastParametrisation::kGenErrorQBins; ++kk) {
296  g.xfact[kk] = g.xfact[k - 1];
297  g.yfact[kk] = g.yfact[k - 1];
298  g.minCh[kk] = g.minCh[k - 1];
299  }
300  auto detx = 1.f / g.xfact[0];
301  auto dety = 1.f / g.yfact[0];
302  for (int kk = 0; kk < CPEFastParametrisation::kGenErrorQBins; ++kk) {
303  g.xfact[kk] *= detx;
304  g.yfact[kk] *= dety;
305  }
306  // sample y in "angle" (estimated from cluster size)
307  float ys = 8.f - 4.f; // apperent bias of half pixel (see plot)
308  // plot: https://indico.cern.ch/event/934821/contributions/3974619/attachments/2091853/3515041/DigilessReco.pdf page 25
309  // sample yerr as function of "size"
310  for (int iy = 0; iy < CPEFastParametrisation::kNumErrorBins; ++iy) {
311  ys += 1.f; // first bin 0 is for size 9 (and size is in fixed point 2^3)
313  ys += 8.f; // last bin for "overflow"
314  // cp.cotalpha = ys*(commonParamsGPU_.thePitchX/(8.f*thickness)); // use this to print sampling in "x" (and comment the line below)
315  cp.cotbeta = std::copysign(ys * (commonParamsGPU_.thePitchY / (8.f * thickness)), aveCB);
316  errorFromTemplates(p, cp, 20000.f);
317  g.sigmay[iy] = toMicron(cp.sigmay);
318  LogDebug("PixelCPEFast") << "sigmax/sigmay " << i << ' ' << (ys + 4.f) / 8.f << ' ' << cp.cotalpha << '/'
319  << cp.cotbeta << ' ' << 10000.f * cp.sigmax << '/' << int(g.sigmay[iy]) << std::endl;
320  }
321  } // loop over det
322 
323  const int numberOfModulesInLadder =
325  const int numberOfModulesInBarrel =
328 
329  const int firstEndcapPos = 4, firstEndcapNeg = isPhase2_ ? 16 : 7;
330  const float ladderFactor = 1.f / float(numberOfModulesInLadder);
331 
332  // compute ladder baricenter (only in global z) for the barrel
333  //
334  auto& aveGeom = averageGeometry_;
335  int il = 0;
336  for (int im = 0, nm = numberOfModulesInBarrel; im < nm; ++im) {
337  auto const& g = detParamsGPU_[im];
338  il = im / numberOfModulesInLadder;
339  assert(il < int(numberOfLaddersInBarrel));
340  auto z = g.frame.z();
341  aveGeom.ladderZ[il] += ladderFactor * z;
342  aveGeom.ladderMinZ[il] = std::min(aveGeom.ladderMinZ[il], z);
343  aveGeom.ladderMaxZ[il] = std::max(aveGeom.ladderMaxZ[il], z);
344  aveGeom.ladderX[il] += ladderFactor * g.frame.x();
345  aveGeom.ladderY[il] += ladderFactor * g.frame.y();
346  aveGeom.ladderR[il] += ladderFactor * sqrt(g.frame.x() * g.frame.x() + g.frame.y() * g.frame.y());
347  }
348  assert(il + 1 == int(numberOfLaddersInBarrel));
349  // add half_module and tollerance
350  const float module_length = isPhase2_ ? 4.345f : 6.7f;
351  constexpr float module_tolerance = 0.2f;
352  for (int il = 0, nl = numberOfLaddersInBarrel; il < nl; ++il) {
353  aveGeom.ladderMinZ[il] -= (0.5f * module_length - module_tolerance);
354  aveGeom.ladderMaxZ[il] += (0.5f * module_length - module_tolerance);
355  }
356 
357  // compute "max z" for first layer in endcap (should we restrict to the outermost ring?)
358  if (!isPhase2_) {
359  for (auto im = phase1PixelTopology::layerStart[firstEndcapPos];
360  im < phase1PixelTopology::layerStart[firstEndcapPos + 1];
361  ++im) {
362  auto const& g = detParamsGPU_[im];
363  aveGeom.endCapZ[0] = std::max(aveGeom.endCapZ[0], g.frame.z());
364  }
365  for (auto im = phase1PixelTopology::layerStart[firstEndcapNeg];
366  im < phase1PixelTopology::layerStart[firstEndcapNeg + 1];
367  ++im) {
368  auto const& g = detParamsGPU_[im];
369  aveGeom.endCapZ[1] = std::min(aveGeom.endCapZ[1], g.frame.z());
370  }
371  // correct for outer ring being closer
372  aveGeom.endCapZ[0] -= 1.5f;
373  aveGeom.endCapZ[1] += 1.5f;
374  } else {
375  for (auto im = phase2PixelTopology::layerStart[firstEndcapPos];
376  im < phase2PixelTopology::layerStart[firstEndcapPos + 1];
377  ++im) {
378  auto const& g = detParamsGPU_[im];
379  aveGeom.endCapZ[0] = std::max(aveGeom.endCapZ[0], g.frame.z());
380  }
381  for (auto im = phase2PixelTopology::layerStart[firstEndcapNeg];
382  im < phase2PixelTopology::layerStart[firstEndcapNeg + 1];
383  ++im) {
384  auto const& g = detParamsGPU_[im];
385  aveGeom.endCapZ[1] = std::min(aveGeom.endCapZ[1], g.frame.z());
386  }
387  }
388 #ifdef EDM_ML_DEBUG
389  for (int jl = 0, nl = numberOfLaddersInBarrel; jl < nl; ++jl) {
390  LogDebug("PixelCPEFast") << jl << ':' << aveGeom.ladderR[jl] << '/'
391  << std::sqrt(aveGeom.ladderX[jl] * aveGeom.ladderX[jl] +
392  aveGeom.ladderY[jl] * aveGeom.ladderY[jl])
393  << ',' << aveGeom.ladderZ[jl] << ',' << aveGeom.ladderMinZ[jl] << ','
394  << aveGeom.ladderMaxZ[jl] << '\n';
395  }
396  LogDebug("PixelCPEFast") << aveGeom.endCapZ[0] << ' ' << aveGeom.endCapZ[1];
397 #endif // EDM_ML_DEBUG
398 
399  // fill Layer and ladders geometry
400  memset(&layerGeometry_, 0, sizeof(pixelCPEforGPU::LayerGeometry));
401  if (!isPhase2_) {
405  } else {
409  }
410 }
411 
413  if (paramsOnGPU_d != nullptr) {
414  cudaFree((void*)paramsOnGPU_h.m_commonParams);
415  cudaFree((void*)paramsOnGPU_h.m_detParams);
416  cudaFree((void*)paramsOnGPU_h.m_averageGeometry);
417  cudaFree((void*)paramsOnGPU_h.m_layerGeometry);
418  cudaFree(paramsOnGPU_d);
419  }
420 }
421 
423  ClusterParamGeneric& theClusterParam,
424  float qclus) const {
425  float locBz = theDetParam.bz;
426  float locBx = theDetParam.bx;
427  LogDebug("PixelCPEFast") << "PixelCPEFast::localPosition(...) : locBz = " << locBz;
428 
429  theClusterParam.pixmx = std::numeric_limits<int>::max(); // max pixel charge for truncation of 2-D cluster
430 
431  theClusterParam.sigmay = -999.9; // CPE Generic y-error for multi-pixel cluster
432  theClusterParam.sigmax = -999.9; // CPE Generic x-error for multi-pixel cluster
433  theClusterParam.sy1 = -999.9; // CPE Generic y-error for single single-pixel
434  theClusterParam.sy2 = -999.9; // CPE Generic y-error for single double-pixel cluster
435  theClusterParam.sx1 = -999.9; // CPE Generic x-error for single single-pixel cluster
436  theClusterParam.sx2 = -999.9; // CPE Generic x-error for single double-pixel cluster
437 
438  float dummy;
439 
441  int gtemplID = theDetParam.detTemplateId;
442 
443  theClusterParam.qBin_ = gtempl.qbin(gtemplID,
444  theClusterParam.cotalpha,
445  theClusterParam.cotbeta,
446  locBz,
447  locBx,
448  qclus,
449  false,
450  theClusterParam.pixmx,
451  theClusterParam.sigmay,
452  dummy,
453  theClusterParam.sigmax,
454  dummy,
455  theClusterParam.sy1,
456  dummy,
457  theClusterParam.sy2,
458  dummy,
459  theClusterParam.sx1,
460  dummy,
461  theClusterParam.sx2,
462  dummy);
463 
464  theClusterParam.sigmax = theClusterParam.sigmax * micronsToCm;
465  theClusterParam.sx1 = theClusterParam.sx1 * micronsToCm;
466  theClusterParam.sx2 = theClusterParam.sx2 * micronsToCm;
467 
468  theClusterParam.sigmay = theClusterParam.sigmay * micronsToCm;
469  theClusterParam.sy1 = theClusterParam.sy1 * micronsToCm;
470  theClusterParam.sy2 = theClusterParam.sy2 * micronsToCm;
471 }
472 
473 //-----------------------------------------------------------------------------
477 //-----------------------------------------------------------------------------
478 LocalPoint PixelCPEFast::localPosition(DetParam const& theDetParam, ClusterParam& theClusterParamBase) const {
479  ClusterParamGeneric& theClusterParam = static_cast<ClusterParamGeneric&>(theClusterParamBase);
480 
481  assert(!theClusterParam.with_track_angle);
482 
484  errorFromTemplates(theDetParam, theClusterParam, theClusterParam.theCluster->charge());
485  } else {
486  theClusterParam.qBin_ = 0;
487  }
488 
489  int q_f_X;
490  int q_l_X;
491  int q_f_Y;
492  int q_l_Y;
493  collect_edge_charges(theClusterParam, q_f_X, q_l_X, q_f_Y, q_l_Y, useErrorsFromTemplates_ && truncatePixelCharge_);
494 
495  // do GPU like ...
497 
498  cp.minRow[0] = theClusterParam.theCluster->minPixelRow();
499  cp.maxRow[0] = theClusterParam.theCluster->maxPixelRow();
500  cp.minCol[0] = theClusterParam.theCluster->minPixelCol();
501  cp.maxCol[0] = theClusterParam.theCluster->maxPixelCol();
502 
503  cp.q_f_X[0] = q_f_X;
504  cp.q_l_X[0] = q_l_X;
505  cp.q_f_Y[0] = q_f_Y;
506  cp.q_l_Y[0] = q_l_Y;
507 
508  cp.charge[0] = theClusterParam.theCluster->charge();
509 
510  auto ind = theDetParam.theDet->index();
512  auto xPos = cp.xpos[0];
513  auto yPos = cp.ypos[0];
514 
515  // set the error (mind ape....)
517  theClusterParam.sigmax = cp.xerr[0];
518  theClusterParam.sigmay = cp.yerr[0];
519 
520  LogDebug("PixelCPEFast") << " in PixelCPEFast:localPosition - pos = " << xPos << " " << yPos << " size "
521  << cp.maxRow[0] - cp.minRow[0] << ' ' << cp.maxCol[0] - cp.minCol[0];
522 
523  //--- Now put the two together
524  LocalPoint pos_in_local(xPos, yPos);
525  return pos_in_local;
526 }
527 
528 //============== INFLATED ERROR AND ERRORS FROM DB BELOW ================
529 
530 //-------------------------------------------------------------------------
531 // Hit error in the local frame
532 //-------------------------------------------------------------------------
533 LocalError PixelCPEFast::localError(DetParam const& theDetParam, ClusterParam& theClusterParamBase) const {
534  ClusterParamGeneric& theClusterParam = static_cast<ClusterParamGeneric&>(theClusterParamBase);
535 
536  auto xerr = theClusterParam.sigmax;
537  auto yerr = theClusterParam.sigmay;
538 
539  LogDebug("PixelCPEFast") << " errors " << xerr << " " << yerr;
540 
541  auto xerr_sq = xerr * xerr;
542  auto yerr_sq = yerr * yerr;
543 
544  return LocalError(xerr_sq, 0, yerr_sq);
545 }
546 
548  // call PixelCPEGenericBase fillPSetDescription to add common rechit errors
550  desc.add<bool>("isPhase2", false);
551 }
constexpr uint16_t maxModuleStride
T getParameter(std::string const &) const
Definition: ParameterSet.h:307
std::vector< SiPixelGenErrorStore > thePixelGenError_
Definition: PixelCPEFast.h:43
LocalPoint localPosition(DetParam const &theDetParam, ClusterParam &theClusterParam) const override
pixelCPEforGPU::AverageGeometry averageGeometry_
Definition: PixelCPEFast.h:49
void fillParamsForGpu()
Definition: PixelCPEFast.cc:87
static bool pushfile(int filenum, std::vector< SiPixelGenErrorStore > &pixelTemp, std::string dir="")
constexpr uint32_t numberOfModulesInBarrel
SOAFrame< float > Frame
std::vector< pixelCPEforGPU::DetParams > detParamsGPU_
Definition: PixelCPEFast.h:46
static void fillPSetDescription(edm::ParameterSetDescription &desc)
int index() const
Definition: GeomDet.h:83
const SiPixelCluster * theCluster
Definition: PixelCPEBase.h:75
pixelCPEforGPU::CommonParams commonParamsGPU_
Definition: PixelCPEFast.h:47
constexpr uint32_t layerStart[numberOfLayers+1]
int maxPixelRow() const
bool isBarrel(GeomDetEnumerators::SubDetector m)
DetParams const * m_detParams
constexpr uint16_t numRowsInRoc
static void collect_edge_charges(ClusterParam &theClusterParam, int &q_f_X, int &q_l_X, int &q_f_Y, int &q_l_Y, bool truncate)
DetParams m_DetParams
Definition: PixelCPEBase.h:281
constexpr void position(CommonParams const &__restrict__ comParams, DetParams const &__restrict__ detParams, ClusParams &cp, uint32_t ic)
constexpr int kGenErrorQBins
pixelCPEforGPU::ParamsOnGPU paramsOnGPU_h
Definition: PixelCPEFast.h:57
unsigned int pxbLadder(const DetId &id) const
uint32_t T const *__restrict__ uint32_t const *__restrict__ int32_t int Histo::index_type cudaStream_t stream
cms::cuda::ESProduct< GPUData > gpuData_
Definition: PixelCPEFast.h:60
assert(be >=bs)
constexpr uint16_t numColsInModule
const PixelGeomDetUnit * theDet
Definition: PixelCPEBase.h:47
constexpr uint16_t numberOfLaddersInBarrel
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
constexpr uint16_t numRowsInModule
unsigned int layer(const DetId &id) const
LayerGeometry const * m_layerGeometry
constexpr std::array< uint8_t, layerIndexSize > layer
int minPixelRow() const
int charge() const
PixelCPEFast(edm::ParameterSet const &conf, const MagneticField *, const TrackerGeometry &, const TrackerTopology &, const SiPixelLorentzAngle *, const SiPixelGenErrorDBObject *, const SiPixelLorentzAngle *)
The constructor.
Definition: PixelCPEFast.cc:23
AverageGeometry const * m_averageGeometry
int qbin(int id, float cotalpha, float cotbeta, float locBz, float locBx, float qclus, bool irradiationCorrections, int &pixmx, float &sigmay, float &deltay, float &sigmax, float &deltax, float &sy1, float &dy1, float &sy2, float &dy2, float &sx1, float &dx1, float &sx2, float &dx2)
int minPixelCol() const
T sqrt(T t)
Definition: SSEVec.h:19
int maxPixelCol() const
uint32_t layerStart[phase2PixelTopology::numberOfLayers+1]
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
constexpr T z() const
Definition: SOARotation.h:133
constexpr uint16_t numColsInRoc
double f[11][100]
static void fillPSetDescription(edm::ParameterSetDescription &desc)
pixelCPEforGPU::ParamsOnGPU cpuData_
Definition: PixelCPEFast.h:50
constexpr uint32_t numberOfLaddersInBarrel
LocalError localError(DetParam const &theDetParam, ClusterParam &theClusterParam) const override
T mag() const
The vector magnitude. Equivalent to sqrt(vec.mag2())
SOARotation< float > Rotation
constexpr int16_t xOffset
CommonParams const * m_commonParams
const TrackerTopology & ttopo_
Definition: PixelCPEBase.h:230
constexpr uint16_t maxModuleStride
const pixelCPEforGPU::ParamsOnGPU * getGPUProductAsync(cudaStream_t cudaStream) const
Definition: PixelCPEFast.cc:51
constexpr std::array< uint8_t, layerIndexSize > layer
const SiPixelGenErrorDBObject * genErrorDBObject_
Definition: PixelCPEBase.h:235
constexpr uint16_t numberOfModulesInBarrel
char data[epos_bytes_allocation]
Definition: EPOS_Wrapper.h:79
constexpr uint32_t layerStart[numberOfLayers+1]
constexpr void errorFromDB(CommonParams const &__restrict__ comParams, DetParams const &__restrict__ detParams, ClusParams &cp, uint32_t ic)
uint8_t layer[phase2PixelTopology::layerIndexSize]
#define cudaCheck(ARG,...)
Definition: cudaCheck.h:69
void errorFromTemplates(DetParam const &theDetParam, ClusterParamGeneric &theClusterParam, float qclus) const
constexpr uint16_t numberOfModulesInLadder
constexpr uint32_t numberOfModulesInLadder
constexpr int16_t yOffset
constexpr int kNumErrorBins
pixelCPEforGPU::ParamsOnGPU * paramsOnGPU_d
Definition: PixelCPEFast.h:58
pixelCPEforGPU::LayerGeometry layerGeometry_
Definition: PixelCPEFast.h:48
def cp(fromDir, toDir, listOfFiles, overwrite=False, smallList=False)
#define LogDebug(id)