16 unsigned int n_detectors = dus.size();
17 for (
unsigned int i = 1;
i < 7; ++
i) {
19 if (
offset != dus.size() && dus[
offset]->type().isTrackerStrip()) {
25 LogDebug(
"SiPixelGainCalibrationForHLTGPU")
26 <<
"caching calibs for " << n_detectors <<
" pixel detectors of size " << gains.
data().size() <<
'\n'
38 auto nBinsToUseForEncoding = 253;
54 LogDebug(
"SiPixelGainCalibrationForHLTGPU")
59 LogDebug(
"SiPixelGainCalibrationForHLTGPU") << ind.size() <<
" " << n_detectors;
61 for (
auto i = 0U;
i < n_detectors; ++
i) {
64 assert(
p != ind.end() &&
p->detid == dus[
i]->geographicalId());
72 if (ind[
i].detid != dus[
i]->geographicalId())
73 LogDebug(
"SiPixelGainCalibrationForHLTGPU") << ind[
i].detid <<
"!=" << dus[
i]->geographicalId();
SiPixelGainForHLTonGPU * gainForHLTonGPU
std::vector< DetRegistry > const & getIndexes() const
const SiPixelGainCalibrationForHLT * gains_
DecodingStructure * v_pedestals_
std::vector< char > const & data() const
std::pair< uint32_t, uint32_t > Range
~SiPixelGainCalibrationForHLTGPU()
const DetContainer & detUnits() const override
Returm a vector of all GeomDet.
uint32_t T const *__restrict__ uint32_t const *__restrict__ int32_t int Histo::index_type cudaStream_t stream
SiPixelGainForHLTonGPU_DecodingStructure DecodingStructure
cms::cuda::ESProduct< GPUData > gpuData_
unsigned int offsetDU(SubDetector sid) const
SiPixelGainForHLTonGPU * gainForHLTonHost_
float getGainHigh() const
unsigned int numberOfRowsAveragedOver_
std::pair< Range, int > rangeAndCols_[gpuClustering::maxNumModules]
const SiPixelGainForHLTonGPU * getGPUProductAsync(cudaStream_t cudaStream) const
char data[epos_bytes_allocation]
SiPixelGainForHLTonGPU_DecodingStructure * gainDataOnGPU
__host__ __device__ constexpr RandomIt lower_bound(RandomIt first, RandomIt last, const T &value, Compare comp={})
SiPixelGainCalibrationForHLTGPU(const SiPixelGainCalibrationForHLT &gains, const TrackerGeometry &geom)
#define cudaCheck(ARG,...)
constexpr SubDetector tkDetEnum[8]
unsigned int nBinsToUseForEncoding_