CMS 3D CMS Logo

List of all members | Public Member Functions
pixelClustering::ClusterChargeCut< TrackerTraits > Struct Template Reference

#include <ClusterChargeCut.h>

Public Member Functions

template<typename TAcc >
ALPAKA_FN_ACC void operator() (TAcc const &acc, SiPixelDigisSoAView digi_view, SiPixelClustersSoAView clus_view, SiPixelClusterThresholds clusterThresholds, const uint32_t numElements) const
 

Detailed Description

template<typename TrackerTraits>
struct pixelClustering::ClusterChargeCut< TrackerTraits >

Definition at line 20 of file ClusterChargeCut.h.

Member Function Documentation

◆ operator()()

template<typename TrackerTraits >
template<typename TAcc >
ALPAKA_FN_ACC void pixelClustering::ClusterChargeCut< TrackerTraits >::operator() ( TAcc const &  acc,
SiPixelDigisSoAView  digi_view,
SiPixelClustersSoAView  clus_view,
SiPixelClusterThresholds  clusterThresholds,
const uint32_t  numElements 
) const
inline

Definition at line 22 of file ClusterChargeCut.h.

References gpuClustering::adc, ALPAKA_ACCELERATOR_NAMESPACE::caPixelDoublets::ALPAKA_ASSERT_ACC(), cms::cudacompat::atomicAdd(), cms::alpakatools::blockPrefixScan(), ALCARECOTkAlJpsiMuMu_cff::charge, DMR_cfg::chargeCut, ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), gpuClustering::endModule, SiPixelClusterThresholds::getThresholdForLayerOnCondition(), caHitNtupletGeneratorKernels::good, mps_fire::i, cms::alpakatools::independent_group_elements(), cms::alpakatools::independent_groups(), pixelClustering::invalidModuleId, phase1PixelTopology::layerStart, pixelClustering::maxNumClustersPerModules, pixelClustering::maxNumModules, SiStripPI::min, callgraph::module, gpuClustering::moduleId, gpuClustering::newclusId, phase1PixelTopology::numberOfModules, gpuClustering::numElements, hltrates_dqm_sourceclient-live_cfg::offset, convertSQLiteXML::ok, cms::alpakatools::once_per_block(), cms::alpakatools::once_per_grid(), Digi_cff::pdigi, gpuClustering::startBPIX2, and ALPAKA_ACCELERATOR_NAMESPACE::vertexFinder::ws.

27  {
29 
30 #ifdef GPU_DEBUG
32  printf("All digis before cut: \n");
33  for (uint32_t i = 0; i < numElements; i++)
34  printf("%d %d %d %d %d \n",
35  i,
36  digi_view[i].rawIdArr(),
37  digi_view[i].clus(),
38  digi_view[i].pdigi(),
39  digi_view[i].adc());
40  }
41 #endif
42 
43  auto& charge = alpaka::declareSharedVar<int32_t[maxNumClustersPerModules], __COUNTER__>(acc);
44  auto& ok = alpaka::declareSharedVar<uint8_t[maxNumClustersPerModules], __COUNTER__>(acc);
45  auto& newclusId = alpaka::declareSharedVar<uint16_t[maxNumClustersPerModules], __COUNTER__>(acc);
46 
48 
51 
52  auto endModule = clus_view[0].moduleStart();
53 
55  auto firstPixel = clus_view[1 + module].moduleStart();
56  auto thisModuleId = digi_view[firstPixel].moduleId();
57  while (thisModuleId == invalidModuleId and firstPixel < numElements) {
58  // skip invalid or duplicate pixels
59  ++firstPixel;
60  thisModuleId = digi_view[firstPixel].moduleId();
61  }
62  if (firstPixel >= numElements) {
63  // reached the end of the input while skipping the invalid pixels, nothing left to do
64  break;
65  }
66  if (thisModuleId != clus_view[module].moduleId()) {
67  // reached the end of the module while skipping the invalid pixels, skip this module
68  continue;
69  }
71 
72  uint32_t nclus = clus_view[thisModuleId].clusInModule();
73  if (nclus == 0)
74  return;
75 
77  printf("Warning: too many clusters in module %u in block %u: %u > %d\n",
78  thisModuleId,
79  module,
80  nclus,
82 
83  if (nclus > maxNumClustersPerModules) {
84  // remove excess FIXME find a way to cut charge first....
85  for (auto i : cms::alpakatools::independent_group_elements(acc, firstPixel, numElements)) {
86  if (digi_view[i].moduleId() == invalidModuleId)
87  continue; // not valid
88  if (digi_view[i].moduleId() != thisModuleId)
89  break; // end of module
90  if (digi_view[i].clus() >= maxNumClustersPerModules) {
91  digi_view[i].moduleId() = invalidModuleId;
92  digi_view[i].clus() = invalidModuleId;
93  }
94  }
96  }
97 
98 #ifdef GPU_DEBUG
99  if (thisModuleId % 100 == 1)
101  printf("start cluster charge cut for module %d in block %d\n", thisModuleId, module);
102 #endif
103 
105  for (auto i : cms::alpakatools::independent_group_elements(acc, nclus)) {
106  charge[i] = 0;
107  }
108  alpaka::syncBlockThreads(acc);
109 
110  for (auto i : cms::alpakatools::independent_group_elements(acc, firstPixel, numElements)) {
111  if (digi_view[i].moduleId() == invalidModuleId)
112  continue; // not valid
113  if (digi_view[i].moduleId() != thisModuleId)
114  break; // end of module
115  alpaka::atomicAdd(acc,
116  &charge[digi_view[i].clus()],
117  static_cast<int32_t>(digi_view[i].adc()),
118  alpaka::hierarchy::Threads{});
119  }
120  alpaka::syncBlockThreads(acc);
121 
122  auto chargeCut = clusterThresholds.getThresholdForLayerOnCondition(thisModuleId < startBPIX2);
123 
124  bool good = true;
125  for (auto i : cms::alpakatools::independent_group_elements(acc, nclus)) {
126  newclusId[i] = ok[i] = (charge[i] >= chargeCut) ? 1 : 0;
127  if (0 == ok[i])
128  good = false;
129 #ifdef GPU_DEBUG
130  printf("Cutting pix %d in module %d newId %d ok? %d charge %d cut %d -> good %d \n",
131  i,
132  thisModuleId,
133  newclusId[i],
134  ok[i],
135  charge[i],
136  chargeCut,
137  good);
138 #endif
139  }
140  // if all clusters are above threshold, do nothing
141  if (alpaka::syncBlockThreadsPredicate<alpaka::BlockAnd>(acc, good))
142  continue;
143 
144  // renumber
145  auto& ws = alpaka::declareSharedVar<uint16_t[32], __COUNTER__>(acc);
146  // FIXME this value should come from cms::alpakatools::blockPrefixScan itself
147  constexpr uint32_t maxThreads = 1024;
148  auto minClust = std::min(nclus, maxThreads);
149 
151  if constexpr (maxNumClustersPerModules > maxThreads) //only if needed
152  {
153  for (uint32_t offset = maxThreads; offset < nclus; offset += maxThreads) {
155  for (uint32_t i : cms::alpakatools::independent_group_elements(acc, offset, nclus)) {
156  uint32_t prevBlockEnd = (i / maxThreads) * maxThreads - 1;
157  newclusId[i] += newclusId[prevBlockEnd];
158  }
159  alpaka::syncBlockThreads(acc);
160  }
161  }
162 
163  ALPAKA_ASSERT_ACC(nclus >= newclusId[nclus - 1]);
164 
165  clus_view[thisModuleId].clusInModule() = newclusId[nclus - 1];
166 
167  // reassign id
168  for (auto i : cms::alpakatools::independent_group_elements(acc, firstPixel, numElements)) {
169  if (digi_view[i].moduleId() == invalidModuleId)
170  continue; // not valid
171  if (digi_view[i].moduleId() != thisModuleId)
172  break; // end of module
173  if (0 == ok[digi_view[i].clus()])
174  digi_view[i].moduleId() = digi_view[i].clus() = invalidModuleId;
175  else
176  digi_view[i].clus() = newclusId[digi_view[i].clus()] - 1;
177  }
178 
179  // done
180  alpaka::syncBlockThreads(acc);
181 #ifdef GPU_DEBUG
183  printf("All digis AFTER cut: \n");
184  for (uint32_t i = 0; i < numElements; i++)
185  printf("%d %d %d %d %d \n",
186  i,
187  digi_view[i].rawIdArr(),
188  digi_view[i].clus(),
189  digi_view[i].pdigi(),
190  digi_view[i].adc());
191  }
192 #endif
193  }
194  }
ALPAKA_FN_ACC constexpr bool once_per_block(TAcc const &acc)
ALPAKA_FN_ACC auto independent_group_elements(TAcc const &acc, TArgs... args)
ALPAKA_FN_ACC auto independent_groups(TAcc const &acc, TArgs... args)
chargeCut
Definition: DMR_cfg.py:159
constexpr uint16_t numberOfModules
constexpr int32_t getThresholdForLayerOnCondition(bool isLayer1) const noexcept
constexpr int startBPIX2
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ uint32_t const *__restrict__ int32_t *__restrict__ uint32_t numElements
__shared__ uint16_t newclusId[maxNumClustersPerModules]
ALPAKA_FN_ACC constexpr bool once_per_grid(TAcc const &acc)
constexpr int32_t maxNumClustersPerModules
ALPAKA_FN_ACC ALPAKA_FN_INLINE void blockPrefixScan(const TAcc &acc, T const *ci, T *co, int32_t size, T *ws=nullptr)
Definition: prefixScan.h:47
static constexpr uint32_t layerStart[numberOfLayers+1]
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ uint32_t const *__restrict__ moduleId
constexpr uint16_t maxNumModules
T1 atomicAdd(T1 *a, T2 b)
Definition: cudaCompat.h:61
constexpr uint16_t invalidModuleId
uint16_t *__restrict__ uint16_t const *__restrict__ adc