CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
CAHitNtupletGeneratorOnGPU.cc
Go to the documentation of this file.
1 //
2 // Original Author: Felice Pantaleo, CERN
3 //
4 
5 // #define GPU_DEBUG
6 
7 #include <array>
8 #include <cassert>
9 #include <functional>
10 #include <vector>
11 
22 
24 
25 namespace {
26 
27  template <typename T>
28  T sqr(T x) {
29  return x * x;
30  }
31 
33  auto coeff = pset.getParameter<std::vector<double>>("chi2Coeff");
34  auto ptMax = pset.getParameter<double>("chi2MaxPt");
35  if (coeff.size() != 2) {
37  "CAHitNtupletGeneratorOnGPU.trackQualityCuts.chi2Coeff must have 2 elements");
38  }
39  coeff[1] = (coeff[1] - coeff[0]) / log2(ptMax);
40  return cAHitNtupletGenerator::QualityCuts{// polynomial coefficients for the pT-dependent chi2 cut
41  {(float)coeff[0], (float)coeff[1], 0.f, 0.f},
42  // max pT used to determine the chi2 cut
43  (float)ptMax,
44  // chi2 scale factor: 8 for broken line fit, ?? for Riemann fit
45  (float)pset.getParameter<double>("chi2Scale"),
46  // regional cuts for triplets
47  {(float)pset.getParameter<double>("tripletMaxTip"),
48  (float)pset.getParameter<double>("tripletMinPt"),
49  (float)pset.getParameter<double>("tripletMaxZip")},
50  // regional cuts for quadruplets
51  {(float)pset.getParameter<double>("quadrupletMaxTip"),
52  (float)pset.getParameter<double>("quadrupletMinPt"),
53  (float)pset.getParameter<double>("quadrupletMaxZip")}};
54  }
55 
56 } // namespace
57 
58 using namespace std;
59 
61  : m_params(cfg.getParameter<bool>("onGPU"),
62  cfg.getParameter<unsigned int>("minHitsPerNtuplet"),
63  cfg.getParameter<unsigned int>("maxNumberOfDoublets"),
64  cfg.getParameter<unsigned int>("minHitsForSharingCut"),
65  cfg.getParameter<bool>("useRiemannFit"),
66  cfg.getParameter<bool>("fitNas4"),
67  cfg.getParameter<bool>("includeJumpingForwardDoublets"),
68  cfg.getParameter<bool>("earlyFishbone"),
69  cfg.getParameter<bool>("lateFishbone"),
70  cfg.getParameter<bool>("idealConditions"),
71  cfg.getParameter<bool>("fillStatistics"),
72  cfg.getParameter<bool>("doClusterCut"),
73  cfg.getParameter<bool>("doZ0Cut"),
74  cfg.getParameter<bool>("doPtCut"),
75  cfg.getParameter<bool>("doSharedHitCut"),
76  cfg.getParameter<bool>("dupPassThrough"),
77  cfg.getParameter<bool>("useSimpleTripletCleaner"),
78  cfg.getParameter<double>("ptmin"),
79  cfg.getParameter<double>("CAThetaCutBarrel"),
80  cfg.getParameter<double>("CAThetaCutForward"),
81  cfg.getParameter<double>("hardCurvCut"),
82  cfg.getParameter<double>("dcaCutInnerTriplet"),
83  cfg.getParameter<double>("dcaCutOuterTriplet"),
84  makeQualityCuts(cfg.getParameterSet("trackQualityCuts"))) {
85 #ifdef DUMP_GPU_TK_TUPLES
86  printf("TK: %s %s % %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
87  "tid",
88  "qual",
89  "nh",
90  "charge",
91  "pt",
92  "eta",
93  "phi",
94  "tip",
95  "zip",
96  "chi2",
97  "h1",
98  "h2",
99  "h3",
100  "h4",
101  "h5");
102 #endif
103 
104  if (m_params.onGPU_) {
105  // allocate pinned host memory only if CUDA is available
107  if (cs and cs->enabled()) {
108  cudaCheck(cudaMalloc(&m_counters, sizeof(Counters)));
109  cudaCheck(cudaMemset(m_counters, 0, sizeof(Counters)));
110  }
111  } else {
112  m_counters = new Counters();
113  memset(m_counters, 0, sizeof(Counters));
114  }
115 }
116 
118  if (m_params.onGPU_) {
119  // print the gpu statistics and free pinned host memory only if CUDA is available
121  if (cs and cs->enabled()) {
122  if (m_params.doStats_) {
123  // crash on multi-gpu processes
125  }
126  cudaFree(m_counters);
127  }
128  } else {
129  if (m_params.doStats_) {
131  }
132  delete m_counters;
133  }
134 }
135 
137  // 87 cm/GeV = 1/(3.8T * 0.3)
138  // take less than radius given by the hardPtCut and reject everything below
139  // auto hardCurvCut = 1.f/(0.35 * 87.f);
140  desc.add<double>("ptmin", 0.9f)->setComment("Cut on minimum pt");
141  desc.add<double>("CAThetaCutBarrel", 0.002f)->setComment("Cut on RZ alignement for Barrel");
142  desc.add<double>("CAThetaCutForward", 0.003f)->setComment("Cut on RZ alignment for Forward");
143  desc.add<double>("hardCurvCut", 1.f / (0.35 * 87.f))->setComment("Cut on minimum curvature");
144  desc.add<double>("dcaCutInnerTriplet", 0.15f)->setComment("Cut on origin radius when the inner hit is on BPix1");
145  desc.add<double>("dcaCutOuterTriplet", 0.25f)->setComment("Cut on origin radius when the outer hit is on BPix1");
146  desc.add<bool>("earlyFishbone", true);
147  desc.add<bool>("lateFishbone", false);
148  desc.add<bool>("idealConditions", true);
149  desc.add<bool>("fillStatistics", false);
150  desc.add<unsigned int>("minHitsPerNtuplet", 4);
151  desc.add<unsigned int>("maxNumberOfDoublets", caConstants::maxNumberOfDoublets);
152  desc.add<unsigned int>("minHitsForSharingCut", 10)
153  ->setComment("Maximum number of hits in a tuple to clean also if the shared hit is on bpx1");
154  desc.add<bool>("includeJumpingForwardDoublets", false);
155  desc.add<bool>("fitNas4", false)->setComment("fit only 4 hits out of N");
156  desc.add<bool>("doClusterCut", true);
157  desc.add<bool>("doZ0Cut", true);
158  desc.add<bool>("doPtCut", true);
159  desc.add<bool>("useRiemannFit", false)->setComment("true for Riemann, false for BrokenLine");
160  desc.add<bool>("doSharedHitCut", true)->setComment("Sharing hit nTuples cleaning");
161  desc.add<bool>("dupPassThrough", false)->setComment("Do not reject duplicate");
162  desc.add<bool>("useSimpleTripletCleaner", true)->setComment("use alternate implementation");
163 
164  edm::ParameterSetDescription trackQualityCuts;
165  trackQualityCuts.add<double>("chi2MaxPt", 10.)->setComment("max pT used to determine the pT-dependent chi2 cut");
166  trackQualityCuts.add<std::vector<double>>("chi2Coeff", {0.9, 1.8})->setComment("chi2 at 1GeV and at ptMax above");
167  trackQualityCuts.add<double>("chi2Scale", 8.)
168  ->setComment(
169  "Factor to multiply the pT-dependent chi2 cut (currently: 8 for the broken line fit, ?? for the Riemann "
170  "fit)");
171  trackQualityCuts.add<double>("tripletMinPt", 0.5)->setComment("Min pT for triplets, in GeV");
172  trackQualityCuts.add<double>("tripletMaxTip", 0.3)->setComment("Max |Tip| for triplets, in cm");
173  trackQualityCuts.add<double>("tripletMaxZip", 12.)->setComment("Max |Zip| for triplets, in cm");
174  trackQualityCuts.add<double>("quadrupletMinPt", 0.3)->setComment("Min pT for quadruplets, in GeV");
175  trackQualityCuts.add<double>("quadrupletMaxTip", 0.5)->setComment("Max |Tip| for quadruplets, in cm");
176  trackQualityCuts.add<double>("quadrupletMaxZip", 12.)->setComment("Max |Zip| for quadruplets, in cm");
177  desc.add<edm::ParameterSetDescription>("trackQualityCuts", trackQualityCuts)
178  ->setComment(
179  "Quality cuts based on the results of the track fit:\n - apply a pT-dependent chi2 cut;\n - apply \"region "
180  "cuts\" based on the fit results (pT, Tip, Zip).");
181 }
182 
184  float bfield,
185  cudaStream_t stream) const {
186  PixelTrackHeterogeneous tracks(cms::cuda::make_device_unique<pixelTrack::TrackSoA>(stream));
187 
188  auto* soa = tracks.get();
189  assert(soa);
190 
192  kernels.setCounters(m_counters);
193  kernels.allocateOnGPU(hits_d.nHits(), stream);
194 
195  kernels.buildDoublets(hits_d, stream);
196  kernels.launchKernels(hits_d, soa, stream);
197 
198  HelixFitOnGPU fitter(bfield, m_params.fitNas4_);
199  fitter.allocateOnGPU(&(soa->hitIndices), kernels.tupleMultiplicity(), soa);
200  if (m_params.useRiemannFit_) {
202  } else {
204  }
205  kernels.classifyTuples(hits_d, soa, stream);
206 
207 #ifdef GPU_DEBUG
208  cudaDeviceSynchronize();
209  cudaCheck(cudaGetLastError());
210  std::cout << "finished building pixel tracks on GPU" << std::endl;
211 #endif
212 
213  return tracks;
214 }
215 
217  PixelTrackHeterogeneous tracks(std::make_unique<pixelTrack::TrackSoA>());
218 
219  auto* soa = tracks.get();
220  assert(soa);
221 
223  kernels.setCounters(m_counters);
224  kernels.allocateOnGPU(hits_d.nHits(), nullptr);
225 
226  kernels.buildDoublets(hits_d, nullptr);
227  kernels.launchKernels(hits_d, soa, nullptr);
228 
229  if (0 == hits_d.nHits())
230  return tracks;
231 
232  // now fit
233  HelixFitOnGPU fitter(bfield, m_params.fitNas4_);
234  fitter.allocateOnGPU(&(soa->hitIndices), kernels.tupleMultiplicity(), soa);
235 
236  if (m_params.useRiemannFit_) {
238  } else {
240  }
241 
242  kernels.classifyTuples(hits_d, soa, nullptr);
243 
244 #ifdef GPU_DEBUG
245  std::cout << "finished building pixel tracks on CPU" << std::endl;
246 #endif
247 
248  return tracks;
249 }
void launchKernels(HitsOnCPU const &hh, TkSoA *tuples_d, cudaStream_t cudaStream)
void setComment(std::string const &value)
tuple cfg
Definition: looper.py:296
void launchBrokenLineKernelsOnCPU(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples)
void buildDoublets(HitsOnCPU const &hh, cudaStream_t stream)
unique_ptr< ClusterSequence > cs
constexpr uint32_t maxNumberOfQuadruplets
Definition: CAConstants.h:42
void classifyTuples(HitsOnCPU const &hh, TkSoA *tuples_d, cudaStream_t cudaStream)
ParameterSet const & getParameterSet(ParameterSetID const &id)
auto const & tracks
cannot be loose
uint32_t T const *__restrict__ uint32_t const *__restrict__ int32_t int Histo::index_type cudaStream_t stream
int sqr(const T &t)
assert(be >=bs)
static void printCounters(Counters const *counters)
auto const * get() const
TupleMultiplicity const * tupleMultiplicity() const
ZVertexSoA * soa
void allocateOnGPU(int32_t nHits, cudaStream_t stream)
bool enabled() const
Definition: CUDAService.h:22
printf("params %d %f %f %f\n", minT, eps, errmax, chi2max)
static void fillDescriptions(edm::ParameterSetDescription &desc)
uint16_t const *__restrict__ x
Definition: gpuClustering.h:39
cAHitNtupletGenerator::Counters Counters
ParameterDescriptionBase * add(U const &iLabel, T const &value)
constexpr uint32_t maxNumberOfDoublets
Definition: CAConstants.h:37
void launchRiemannKernelsOnCPU(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples)
void launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t cudaStream)
CAHitNtupletGeneratorOnGPU(const edm::ParameterSet &cfg, edm::ConsumesCollector &&iC)
T getParameter(std::string const &) const
Definition: ParameterSet.h:303
void launchBrokenLineKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t cudaStream)
void allocateOnGPU(Tuples const *tuples, TupleMultiplicity const *tupleMultiplicity, OutputSoA *outputSoA)
Definition: HelixFitOnGPU.cc:4
PixelTrackHeterogeneous makeTuples(TrackingRecHit2DCPU const &hits_d, float bfield) const
tuple cout
Definition: gather_cfg.py:144
#define cudaCheck(ARG,...)
Definition: cudaCheck.h:69
long double T
PixelTrackHeterogeneous makeTuplesAsync(TrackingRecHit2DGPU const &hits_d, float bfield, cudaStream_t stream) const