CMS 3D CMS Logo

All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
CAHitNtupletGeneratorOnGPU.cc
Go to the documentation of this file.
1 //
2 // Original Author: Felice Pantaleo, CERN
3 //
4 
5 // #define GPU_DEBUG
6 
7 #include <array>
8 #include <cassert>
9 #include <functional>
10 #include <vector>
11 
22 
24 
25 namespace {
26 
27  template <typename T>
28  T sqr(T x) {
29  return x * x;
30  }
31 
33  auto coeff = pset.getParameter<std::vector<double>>("chi2Coeff");
34  auto ptMax = pset.getParameter<double>("chi2MaxPt");
35  if (coeff.size() != 2) {
37  "CAHitNtupletGeneratorOnGPU.trackQualityCuts.chi2Coeff must have 2 elements");
38  }
39  coeff[1] = (coeff[1] - coeff[0]) / log2(ptMax);
40  return cAHitNtupletGenerator::QualityCuts{// polynomial coefficients for the pT-dependent chi2 cut
41  {(float)coeff[0], (float)coeff[1], 0.f, 0.f},
42  // max pT used to determine the chi2 cut
43  (float)ptMax,
44  // chi2 scale factor: 8 for broken line fit, ?? for Riemann fit
45  (float)pset.getParameter<double>("chi2Scale"),
46  // regional cuts for triplets
47  {(float)pset.getParameter<double>("tripletMaxTip"),
48  (float)pset.getParameter<double>("tripletMinPt"),
49  (float)pset.getParameter<double>("tripletMaxZip")},
50  // regional cuts for quadruplets
51  {(float)pset.getParameter<double>("quadrupletMaxTip"),
52  (float)pset.getParameter<double>("quadrupletMinPt"),
53  (float)pset.getParameter<double>("quadrupletMaxZip")}};
54  }
55 
56 } // namespace
57 
58 using namespace std;
59 
61  : m_params(cfg.getParameter<bool>("onGPU"),
62  cfg.getParameter<unsigned int>("minHitsPerNtuplet"),
63  cfg.getParameter<unsigned int>("maxNumberOfDoublets"),
64  cfg.getParameter<unsigned int>("minHitsForSharingCut"),
65  cfg.getParameter<bool>("useRiemannFit"),
66  cfg.getParameter<bool>("fitNas4"),
67  cfg.getParameter<bool>("includeJumpingForwardDoublets"),
68  cfg.getParameter<bool>("earlyFishbone"),
69  cfg.getParameter<bool>("lateFishbone"),
70  cfg.getParameter<bool>("idealConditions"),
71  cfg.getParameter<bool>("fillStatistics"),
72  cfg.getParameter<bool>("doClusterCut"),
73  cfg.getParameter<bool>("doZ0Cut"),
74  cfg.getParameter<bool>("doPtCut"),
75  cfg.getParameter<bool>("doSharedHitCut"),
76  cfg.getParameter<bool>("dupPassThrough"),
77  cfg.getParameter<bool>("useSimpleTripletCleaner"),
78  cfg.getParameter<double>("ptmin"),
79  cfg.getParameter<double>("CAThetaCutBarrel"),
80  cfg.getParameter<double>("CAThetaCutForward"),
81  cfg.getParameter<double>("hardCurvCut"),
82  cfg.getParameter<double>("dcaCutInnerTriplet"),
83  cfg.getParameter<double>("dcaCutOuterTriplet"),
84  makeQualityCuts(cfg.getParameterSet("trackQualityCuts"))) {
85 #ifdef DUMP_GPU_TK_TUPLES
86  printf("TK: %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n",
87  "tid",
88  "qual",
89  "nh",
90  "nl",
91  "charge",
92  "pt",
93  "eta",
94  "phi",
95  "tip",
96  "zip",
97  "chi2",
98  "h1",
99  "h2",
100  "h3",
101  "h4",
102  "h5",
103  "hn");
104 #endif
105 
106  if (m_params.onGPU_) {
107  // allocate pinned host memory only if CUDA is available
109  if (cs and cs->enabled()) {
110  cudaCheck(cudaMalloc(&m_counters, sizeof(Counters)));
111  cudaCheck(cudaMemset(m_counters, 0, sizeof(Counters)));
112  }
113  } else {
114  m_counters = new Counters();
115  memset(m_counters, 0, sizeof(Counters));
116  }
117 }
118 
120  if (m_params.onGPU_) {
121  // print the gpu statistics and free pinned host memory only if CUDA is available
123  if (cs and cs->enabled()) {
124  if (m_params.doStats_) {
125  // crash on multi-gpu processes
127  }
128  cudaFree(m_counters);
129  }
130  } else {
131  if (m_params.doStats_) {
133  }
134  delete m_counters;
135  }
136 }
137 
139  // 87 cm/GeV = 1/(3.8T * 0.3)
140  // take less than radius given by the hardPtCut and reject everything below
141  // auto hardCurvCut = 1.f/(0.35 * 87.f);
142  desc.add<double>("ptmin", 0.9f)->setComment("Cut on minimum pt");
143  desc.add<double>("CAThetaCutBarrel", 0.002f)->setComment("Cut on RZ alignement for Barrel");
144  desc.add<double>("CAThetaCutForward", 0.003f)->setComment("Cut on RZ alignment for Forward");
145  desc.add<double>("hardCurvCut", 1.f / (0.35 * 87.f))->setComment("Cut on minimum curvature");
146  desc.add<double>("dcaCutInnerTriplet", 0.15f)->setComment("Cut on origin radius when the inner hit is on BPix1");
147  desc.add<double>("dcaCutOuterTriplet", 0.25f)->setComment("Cut on origin radius when the outer hit is on BPix1");
148  desc.add<bool>("earlyFishbone", true);
149  desc.add<bool>("lateFishbone", false);
150  desc.add<bool>("idealConditions", true);
151  desc.add<bool>("fillStatistics", false);
152  desc.add<unsigned int>("minHitsPerNtuplet", 4);
153  desc.add<unsigned int>("maxNumberOfDoublets", caConstants::maxNumberOfDoublets);
154  desc.add<unsigned int>("minHitsForSharingCut", 10)
155  ->setComment("Maximum number of hits in a tuple to clean also if the shared hit is on bpx1");
156  desc.add<bool>("includeJumpingForwardDoublets", false);
157  desc.add<bool>("fitNas4", false)->setComment("fit only 4 hits out of N");
158  desc.add<bool>("doClusterCut", true);
159  desc.add<bool>("doZ0Cut", true);
160  desc.add<bool>("doPtCut", true);
161  desc.add<bool>("useRiemannFit", false)->setComment("true for Riemann, false for BrokenLine");
162  desc.add<bool>("doSharedHitCut", true)->setComment("Sharing hit nTuples cleaning");
163  desc.add<bool>("dupPassThrough", false)->setComment("Do not reject duplicate");
164  desc.add<bool>("useSimpleTripletCleaner", true)->setComment("use alternate implementation");
165 
166  edm::ParameterSetDescription trackQualityCuts;
167  trackQualityCuts.add<double>("chi2MaxPt", 10.)->setComment("max pT used to determine the pT-dependent chi2 cut");
168  trackQualityCuts.add<std::vector<double>>("chi2Coeff", {0.9, 1.8})->setComment("chi2 at 1GeV and at ptMax above");
169  trackQualityCuts.add<double>("chi2Scale", 8.)
170  ->setComment(
171  "Factor to multiply the pT-dependent chi2 cut (currently: 8 for the broken line fit, ?? for the Riemann "
172  "fit)");
173  trackQualityCuts.add<double>("tripletMinPt", 0.5)->setComment("Min pT for triplets, in GeV");
174  trackQualityCuts.add<double>("tripletMaxTip", 0.3)->setComment("Max |Tip| for triplets, in cm");
175  trackQualityCuts.add<double>("tripletMaxZip", 12.)->setComment("Max |Zip| for triplets, in cm");
176  trackQualityCuts.add<double>("quadrupletMinPt", 0.3)->setComment("Min pT for quadruplets, in GeV");
177  trackQualityCuts.add<double>("quadrupletMaxTip", 0.5)->setComment("Max |Tip| for quadruplets, in cm");
178  trackQualityCuts.add<double>("quadrupletMaxZip", 12.)->setComment("Max |Zip| for quadruplets, in cm");
179  desc.add<edm::ParameterSetDescription>("trackQualityCuts", trackQualityCuts)
180  ->setComment(
181  "Quality cuts based on the results of the track fit:\n - apply a pT-dependent chi2 cut;\n - apply \"region "
182  "cuts\" based on the fit results (pT, Tip, Zip).");
183 }
184 
186  float bfield,
187  cudaStream_t stream) const {
188  PixelTrackHeterogeneous tracks(cms::cuda::make_device_unique<pixelTrack::TrackSoA>(stream));
189 
190  auto* soa = tracks.get();
191  assert(soa);
192 
194  kernels.setCounters(m_counters);
195  kernels.allocateOnGPU(hits_d.nHits(), stream);
196 
197  kernels.buildDoublets(hits_d, stream);
198  kernels.launchKernels(hits_d, soa, stream);
199 
200  HelixFitOnGPU fitter(bfield, m_params.fitNas4_);
201  fitter.allocateOnGPU(&(soa->hitIndices), kernels.tupleMultiplicity(), soa);
202  if (m_params.useRiemannFit_) {
204  } else {
206  }
207  kernels.classifyTuples(hits_d, soa, stream);
208 
209 #ifdef GPU_DEBUG
210  cudaDeviceSynchronize();
211  cudaCheck(cudaGetLastError());
212  std::cout << "finished building pixel tracks on GPU" << std::endl;
213 #endif
214 
215  return tracks;
216 }
217 
219  PixelTrackHeterogeneous tracks(std::make_unique<pixelTrack::TrackSoA>());
220 
221  auto* soa = tracks.get();
222  assert(soa);
223 
225  kernels.setCounters(m_counters);
226  kernels.allocateOnGPU(hits_d.nHits(), nullptr);
227 
228  kernels.buildDoublets(hits_d, nullptr);
229  kernels.launchKernels(hits_d, soa, nullptr);
230 
231  if (0 == hits_d.nHits())
232  return tracks;
233 
234  // now fit
235  HelixFitOnGPU fitter(bfield, m_params.fitNas4_);
236  fitter.allocateOnGPU(&(soa->hitIndices), kernels.tupleMultiplicity(), soa);
237 
238  if (m_params.useRiemannFit_) {
240  } else {
242  }
243 
244  kernels.classifyTuples(hits_d, soa, nullptr);
245 
246 #ifdef GPU_DEBUG
247  std::cout << "finished building pixel tracks on CPU" << std::endl;
248 #endif
249 
250  return tracks;
251 }
void launchKernels(HitsOnCPU const &hh, TkSoA *tuples_d, cudaStream_t cudaStream)
void setComment(std::string const &value)
tuple cfg
Definition: looper.py:296
void launchBrokenLineKernelsOnCPU(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples)
void buildDoublets(HitsOnCPU const &hh, cudaStream_t stream)
unique_ptr< ClusterSequence > cs
constexpr uint32_t maxNumberOfQuadruplets
Definition: CAConstants.h:42
void classifyTuples(HitsOnCPU const &hh, TkSoA *tuples_d, cudaStream_t cudaStream)
ParameterSet const & getParameterSet(ParameterSetID const &id)
auto const & tracks
cannot be loose
uint32_t T const *__restrict__ uint32_t const *__restrict__ int32_t int Histo::index_type cudaStream_t stream
int sqr(const T &t)
assert(be >=bs)
static void printCounters(Counters const *counters)
auto const * get() const
TupleMultiplicity const * tupleMultiplicity() const
ZVertexSoA * soa
void allocateOnGPU(int32_t nHits, cudaStream_t stream)
bool enabled() const
Definition: CUDAService.h:22
printf("params %d %f %f %f\n", minT, eps, errmax, chi2max)
static void fillDescriptions(edm::ParameterSetDescription &desc)
uint16_t const *__restrict__ x
Definition: gpuClustering.h:43
cAHitNtupletGenerator::Counters Counters
ParameterDescriptionBase * add(U const &iLabel, T const &value)
constexpr uint32_t maxNumberOfDoublets
Definition: CAConstants.h:37
void launchRiemannKernelsOnCPU(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples)
void launchRiemannKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t cudaStream)
CAHitNtupletGeneratorOnGPU(const edm::ParameterSet &cfg, edm::ConsumesCollector &&iC)
T getParameter(std::string const &) const
Definition: ParameterSet.h:303
void launchBrokenLineKernels(HitsView const *hv, uint32_t nhits, uint32_t maxNumberOfTuples, cudaStream_t cudaStream)
void allocateOnGPU(Tuples const *tuples, TupleMultiplicity const *tupleMultiplicity, OutputSoA *outputSoA)
Definition: HelixFitOnGPU.cc:4
PixelTrackHeterogeneous makeTuples(TrackingRecHit2DCPU const &hits_d, float bfield) const
tuple cout
Definition: gather_cfg.py:144
#define cudaCheck(ARG,...)
Definition: cudaCheck.h:69
long double T
PixelTrackHeterogeneous makeTuplesAsync(TrackingRecHit2DGPU const &hits_d, float bfield, cudaStream_t stream) const