CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Groups Pages
gpuClusterChargeCut.h
Go to the documentation of this file.
1 #ifndef RecoLocalTracker_SiPixelClusterizer_plugins_gpuClusterChargeCut_h
2 #define RecoLocalTracker_SiPixelClusterizer_plugins_gpuClusterChargeCut_h
3 
4 #include <cstdint>
5 #include <cstdio>
6 
11 
12 // local include(s)
14 
15 namespace gpuClustering {
16 
19  clusterThresholds, // charge cut on cluster in electrons (for layer 1 and for other layers)
20  uint16_t* __restrict__ id, // module id of each pixel (modified if bad cluster)
21  uint16_t const* __restrict__ adc, // charge of each pixel
22  uint32_t const* __restrict__ moduleStart, // index of the first pixel of each module
23  uint32_t* __restrict__ nClustersInModule, // modified: number of clusters found in each module
24  uint32_t const* __restrict__ moduleId, // module id of each module
25  int32_t* __restrict__ clusterId, // modified: cluster id of each pixel
26  uint32_t numElements) {
27  __shared__ int32_t charge[maxNumClustersPerModules];
28  __shared__ uint8_t ok[maxNumClustersPerModules];
29  __shared__ uint16_t newclusId[maxNumClustersPerModules];
30 
32  auto endModule = moduleStart[0];
33  for (auto module = firstModule; module < endModule; module += gridDim.x) {
34  auto firstPixel = moduleStart[1 + module];
35  auto thisModuleId = id[firstPixel];
36  assert(thisModuleId < maxNumModules);
37  assert(thisModuleId == moduleId[module]);
38 
39  auto nclus = nClustersInModule[thisModuleId];
40  if (nclus == 0)
41  continue;
42 
43  if (threadIdx.x == 0 && nclus > maxNumClustersPerModules)
44  printf("Warning too many clusters in module %d in block %d: %d > %d\n",
45  thisModuleId,
46  blockIdx.x,
47  nclus,
49 
50  auto first = firstPixel + threadIdx.x;
51 
52  if (nclus > maxNumClustersPerModules) {
53  // remove excess FIXME find a way to cut charge first....
54  for (auto i = first; i < numElements; i += blockDim.x) {
55  if (id[i] == invalidModuleId)
56  continue; // not valid
57  if (id[i] != thisModuleId)
58  break; // end of module
59  if (clusterId[i] >= maxNumClustersPerModules) {
60  id[i] = invalidModuleId;
61  clusterId[i] = invalidModuleId;
62  }
63  }
65  }
66 
67 #ifdef GPU_DEBUG
68  if (thisModuleId % 100 == 1)
69  if (threadIdx.x == 0)
70  printf("start cluster charge cut for module %d in block %d\n", thisModuleId, blockIdx.x);
71 #endif
72 
74  for (auto i = threadIdx.x; i < nclus; i += blockDim.x) {
75  charge[i] = 0;
76  }
77  __syncthreads();
78 
79  for (auto i = first; i < numElements; i += blockDim.x) {
80  if (id[i] == invalidModuleId)
81  continue; // not valid
82  if (id[i] != thisModuleId)
83  break; // end of module
84  atomicAdd(&charge[clusterId[i]], adc[i]);
85  }
86  __syncthreads();
87 
88  auto chargeCut =
89  clusterThresholds.getThresholdForLayerOnCondition(thisModuleId < phase1PixelTopology::layerStart[1]);
90 
91  bool good = true;
92  for (auto i = threadIdx.x; i < nclus; i += blockDim.x) {
93  newclusId[i] = ok[i] = charge[i] >= chargeCut ? 1 : 0;
94  if (0 == ok[i])
95  good = false;
96  }
97 
98  // if all clusters above threshold do nothing
99  if (__syncthreads_and(good))
100  continue;
101 
102  // renumber
103  __shared__ uint16_t ws[32];
104  cms::cuda::blockPrefixScan(newclusId, nclus, ws);
105 
106  assert(nclus > newclusId[nclus - 1]);
107 
108  nClustersInModule[thisModuleId] = newclusId[nclus - 1];
109 
110  // reassign id
111  for (auto i = first; i < numElements; i += blockDim.x) {
112  if (id[i] == invalidModuleId)
113  continue; // not valid
114  if (id[i] != thisModuleId)
115  break; // end of module
116  if (0 == ok[clusterId[i]])
117  clusterId[i] = id[i] = invalidModuleId;
118  else
119  clusterId[i] = newclusId[clusterId[i]] - 1;
120  }
121 
122  //done
123  __syncthreads();
124  } // loop on modules
125  }
126 
127 } // namespace gpuClustering
128 
129 #endif // RecoLocalTracker_SiPixelClusterizer_plugins_gpuClusterChargeCut_h
const dim3 threadIdx
Definition: cudaCompat.h:29
__shared__ uint8_t ok[maxNumClustersPerModules]
const dim3 gridDim
Definition: cudaCompat.h:33
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ uint32_t const *__restrict__ int32_t *__restrict__ clusterId
#define __global__
Definition: cudaCompat.h:19
const dim3 blockDim
Definition: cudaCompat.h:30
float clusterChargeCut(const edm::ParameterSet &conf, const char *name="clusterChargeCut")
assert(be >=bs)
constexpr int32_t getThresholdForLayerOnCondition(bool isLayer1) const noexcept
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ nClustersInModule
auto &__restrict__ ws
printf("params %d %f %f %f\n", minT, eps, errmax, chi2max)
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ moduleStart
constexpr int32_t maxNumClustersPerModules
constexpr uint16_t maxNumModules
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ uint32_t const *__restrict__ int32_t *__restrict__ uint32_t numElements
const dim3 blockIdx
Definition: cudaCompat.h:32
constexpr uint16_t invalidModuleId
uint16_t *__restrict__ uint16_t const *__restrict__ uint32_t const *__restrict__ uint32_t *__restrict__ uint32_t const *__restrict__ moduleId
auto const good
min quality of good
__shared__ uint16_t newclusId[maxNumClustersPerModules]
void __syncthreads()
Definition: cudaCompat.h:108
constexpr uint32_t layerStart[numberOfLayers+1]
bool __syncthreads_and(bool x)
Definition: cudaCompat.h:111
T1 atomicAdd(T1 *a, T2 b)
Definition: cudaCompat.h:61
tuple module
Definition: callgraph.py:69
uint16_t *__restrict__ uint16_t const *__restrict__ adc