CMS 3D CMS Logo

Namespaces | Classes | Typedefs | Functions | Variables
ALPAKA_ACCELERATOR_NAMESPACE Namespace Reference

Namespaces

 alpakatest
 
 brokenline
 
 caHitNtupletGenerator
 
 caHitNtupletGeneratorKernels
 
 caPixelDoublets
 
 detail
 
 device
 
 ecal
 
 global
 
 hcal
 
 HeterogeneousTestAlpakaDevicePlugins
 
 HeterogeneousTestAlpakaKernelPlugins
 
 hgcal
 
 particleFlowRecHitProducer
 
 pixelClustering
 
 pixelDetails
 
 pixelgpudetails
 
 pixelRecHits
 
 pixelTrack
 
 portabletest
 
 reco
 
 riemannFit
 
 stream
 
 test
 
 vertexFinder
 

Classes

class  AlpakaBackendProducer
 
class  AlpakaService
 
class  AlpakaTestDeviceAdditionModule
 
class  AlpakaTestKernelAdditionModule
 
class  AlpakaTestOpaqueAdditionModule
 
class  AlpakaTestWrapperAdditionModule
 
class  BeamSpotDeviceProducer
 
class  CACellT
 
class  CAHitNtupletAlpaka
 
class  CAHitNtupletGenerator
 
class  CAHitNtupletGeneratorKernels
 
class  CaloRecHitSoAProducer
 
class  EcalElectronicsMappingHostESProducer
 
class  EcalMultifitConditionsHostESProducer
 
class  EcalMultifitParametersHostESProducer
 
class  EcalRawToDigiPortable
 
class  EcalUncalibRecHitProducerPortable
 
class  ECLCCCompute1
 
class  ECLCCFlatten
 
class  ECLCCInit
 
class  EDMetadata
 
class  ESDeviceProduct
 
class  ESProducer
 
class  FastCluster
 
class  FastClusterExotic
 
class  FillRhfIndex
 
class  HBHERecHitProducerPortable
 
class  HcalDigisSoAProducer
 
class  HcalMahiConditionsESProducer
 
class  HcalMahiPulseOffsetsESProducer
 
class  HcalRecoParamWithPulseShapeESProducer
 
class  HcalSiPMCharacteristicsESProducer
 
class  HelixFit
 
class  HGCalLayerClustersAlgoWrapper
 
class  HGCalLayerClustersSoAAlgoKernelEnergy
 
class  HGCalLayerClustersSoAAlgoKernelPositionByHits
 
class  HGCalLayerClustersSoAAlgoKernelPositionByHits2
 
class  HGCalLayerClustersSoAAlgoKernelPositionByHits3
 
class  HGCalLayerClustersSoAAlgoWrapper
 
class  HGCalSoALayerClustersProducer
 
class  HGCalSoARecHitsLayerClustersProducer
 
class  HGCalSoARecHitsProducer
 
class  Kernel_BLFastFit
 
struct  Kernel_BLFit
 
class  Kernel_CircleFit
 
class  Kernel_FastFit
 
class  Kernel_LineFit
 
class  PFClusterParamsESProducer
 
class  PFClusterProducerKernel
 
class  PFClusterSoAProducer
 
class  PFRecHitECALParamsESProducer
 
class  PFRecHitHCALParamsESProducer
 
class  PFRecHitProducerKernel
 
struct  PFRecHitProducerKernelConstruct
 
struct  PFRecHitProducerKernelTopology
 
class  PFRecHitSoAProducer
 
class  PFRecHitTopologyESProducer
 
class  PixelCPEFastParamsESProducerAlpaka
 
class  PixelVertexProducerAlpaka
 
class  PrepareTopoInputs
 
class  ProducerBase
 
class  ProducerBaseAdaptor
 
class  SeedingTopoThresh
 
class  setHitsLayerStart
 
class  SiPixelCablingSoAESProducer
 
class  SiPixelGainCalibrationForHLTSoAESProducer
 
struct  SiPixelMappingUtilities
 
class  SiPixelPhase2DigiToCluster
 
class  SiPixelRawToCluster
 
class  SiPixelRecHitAlpaka
 
class  TestAlgo
 
class  TestAlgoKernel
 
class  TestAlgoKernelUpdate
 
class  TestAlgoKernelUpdateMulti2
 
class  TestAlgoKernelUpdateMulti3
 
class  TestAlgoMultiKernel2
 
class  TestAlgoMultiKernel3
 
class  TestAlgoStructKernel
 
class  TestAlpakaESProducerA
 
class  TestAlpakaESProducerB
 
class  TestAlpakaESProducerC
 
class  TestAlpakaESProducerD
 
class  TestAlpakaESProducerE
 
class  TestAlpakaESProducerNull
 
class  TestAlpakaGlobalProducer
 
class  TestAlpakaGlobalProducerE
 
class  TestAlpakaGlobalProducerNoOutput
 
class  TestAlpakaGlobalProducerNullES
 
class  TestAlpakaGlobalProducerOffset
 
class  TestAlpakaProducer
 
class  TestAlpakaStreamProducer
 
class  TestAlpakaStreamSynchronizingProducer
 
class  TestAlpakaStreamSynchronizingProducerToDevice
 
class  TestHelperClass
 
class  TopoClusterContraction
 

Typedefs

using AlpakaESTestDataADevice = PortableCollection< cms::alpakatest::AlpakaESTestSoAA >
 
using AlpakaESTestDataAHost = cms::alpakatest::AlpakaESTestDataAHost
 
using AlpakaESTestDataCDevice = PortableCollection< cms::alpakatest::AlpakaESTestSoAC >
 
using AlpakaESTestDataCHost = cms::alpakatest::AlpakaESTestDataCHost
 
using AlpakaESTestDataDDevice = PortableCollection< cms::alpakatest::AlpakaESTestSoAD >
 
using AlpakaESTestDataDHost = cms::alpakatest::AlpakaESTestDataDHost
 
using AlpakaESTestDataEDevice = cms::alpakatest::AlpakaESTestDataE< Device >
 
using AlpakaESTestDataEHost = cms::alpakatest::AlpakaESTestDataEHost
 
using BeamSpotDevice = PortableObject< BeamSpotPOD >
 
using CAHitNtupletAlpakaHIonPhase1 = CAHitNtupletAlpaka< pixelTopology::HIonPhase1 >
 
using CAHitNtupletAlpakaPhase1 = CAHitNtupletAlpaka< pixelTopology::Phase1 >
 
using CAHitNtupletAlpakaPhase2 = CAHitNtupletAlpaka< pixelTopology::Phase2 >
 
using EcalDigiDeviceCollection = PortableCollection< EcalDigiSoA >
 
using EcalDigiPhase2DeviceCollection = PortableCollection< EcalDigiPhase2SoA >
 
using EcalElectronicsMappingDevice = PortableCollection< EcalElectronicsMappingSoA >
 
using EcalMultifitConditionsDevice = PortableCollection< EcalMultifitConditionsSoA >
 
using EcalMultifitParametersDevice = PortableCollection< EcalMultifitParametersSoA >
 
using EcalUncalibratedRecHitDeviceCollection = PortableCollection< EcalUncalibratedRecHitSoA >
 
using HCALRecHitSoAProducer = CaloRecHitSoAProducer< HCAL >
 
using HGCalSoAClustersDeviceCollection = PortableCollection< HGCalSoAClusters >
 
using HGCalSoAClustersExtraDeviceCollection = PortableCollection< HGCalSoAClustersExtra >
 
using HGCalSoARecHitsDeviceCollection = PortableCollection< HGCalSoARecHits >
 
using HGCalSoARecHitsExtraDeviceCollection = PortableCollection< HGCalSoARecHitsExtra >
 
using PFRecHitECALTopologyESProducer = PFRecHitTopologyESProducer< ECAL >
 
using PFRecHitHCALTopologyESProducer = PFRecHitTopologyESProducer< HCAL >
 
using PFRecHitSoAProducerECAL = PFRecHitSoAProducer< ECAL >
 
using PFRecHitSoAProducerHCAL = PFRecHitSoAProducer< HCAL >
 
template<typename TrackerTraits >
using PixelCPEFastParams = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, PixelCPEFastParamsHost< TrackerTraits >, PixelCPEFastParamsDevice< Device, TrackerTraits > >
 
using PixelCPEFastParamsESProducerAlpakaHIonPhase1 = PixelCPEFastParamsESProducerAlpaka< pixelTopology::HIonPhase1 >
 
using PixelCPEFastParamsESProducerAlpakaPhase1 = PixelCPEFastParamsESProducerAlpaka< pixelTopology::Phase1 >
 
using PixelCPEFastParamsESProducerAlpakaPhase2 = PixelCPEFastParamsESProducerAlpaka< pixelTopology::Phase2 >
 
using PixelCPEFastParamsHIonPhase1 = PixelCPEFastParams< pixelTopology::HIonPhase1 >
 
using PixelCPEFastParamsPhase1 = PixelCPEFastParams< pixelTopology::Phase1 >
 
using PixelCPEFastParamsPhase2 = PixelCPEFastParams< pixelTopology::Phase2 >
 
using PixelVertexProducerAlpakaHIonPhase1 = PixelVertexProducerAlpaka< pixelTopology::HIonPhase1 >
 
using PixelVertexProducerAlpakaPhase1 = PixelVertexProducerAlpaka< pixelTopology::Phase1 >
 
using PixelVertexProducerAlpakaPhase2 = PixelVertexProducerAlpaka< pixelTopology::Phase2 >
 
template<typename T >
using PortableCollection = ::PortableCollection< T, Device >
 
template<typename T0 , typename T1 >
using PortableCollection2 = ::PortableMultiCollection< Device, T0, T1 >
 
template<typename T0 , typename T1 , typename T2 >
using PortableCollection3 = ::PortableMultiCollection< Device, T0, T1, T2 >
 
template<typename T0 , typename T1 , typename T2 , typename T3 >
using PortableCollection4 = ::PortableMultiCollection< Device, T0, T1, T2, T3 >
 
template<typename T0 , typename T1 , typename T2 , typename T3 , typename T4 >
using PortableCollection5 = ::PortableMultiCollection< Device, T0, T1, T2, T3, T4 >
 
template<typename T >
using PortableObject = ::PortableObject< T, Device >
 
using SiPixelClustersSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, SiPixelClustersHost, SiPixelClustersDevice< Device > >
 
using SiPixelDigiErrorsSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, SiPixelDigiErrorsHost, SiPixelDigiErrorsDevice< Device > >
 
using SiPixelDigisSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, SiPixelDigisHost, SiPixelDigisDevice< Device > >
 
using SiPixelGainCalibrationForHLTDevice = PortableCollection< SiPixelGainCalibrationForHLTSoA >
 
using SiPixelMappingDevice = PortableCollection< SiPixelMappingSoA >
 
using SiPixelRawToClusterHIonPhase1 = SiPixelRawToCluster< pixelTopology::HIonPhase1 >
 
using SiPixelRawToClusterPhase1 = SiPixelRawToCluster< pixelTopology::Phase1 >
 
using SiPixelRecHitAlpakaHIonPhase1 = SiPixelRecHitAlpaka< pixelTopology::HIonPhase1 >
 
using SiPixelRecHitAlpakaPhase1 = SiPixelRecHitAlpaka< pixelTopology::Phase1 >
 
using SiPixelRecHitAlpakaPhase2 = SiPixelRecHitAlpaka< pixelTopology::Phase2 >
 
using TrackingRecHitSoAHIonPhase1 = TrackingRecHitsSoACollection< pixelTopology::HIonPhase1 >
 
using TrackingRecHitSoAPhase1 = TrackingRecHitsSoACollection< pixelTopology::Phase1 >
 
using TrackingRecHitSoAPhase2 = TrackingRecHitsSoACollection< pixelTopology::Phase2 >
 
template<typename TrackerTraits >
using TrackingRecHitsSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, TrackingRecHitHost< TrackerTraits >, TrackingRecHitDevice< TrackerTraits, Device > >
 
template<typename TrackerTraits >
using TracksSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, TracksHost< TrackerTraits >, TracksDevice< TrackerTraits, Device > >
 
using ZVertexSoACollection = std::conditional_t< std::is_same_v< Device, alpaka::DevCpu >, ZVertexHost, ZVertexDevice< Device > >
 

Functions

ALPAKA_FN_ACC static ALPAKA_FN_INLINE float dR2 (Position4 pos1, Position4 pos2)
 
static ALPAKA_FN_ACC auto getRhFrac (reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, int topoSeedBegin, reco::PFRecHitFractionDeviceCollection::View fracView, int seedNum, int rhNum)
 
static ALPAKA_FN_ACC auto getSeedRhIdx (int *seeds, int seedNum)
 
template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void hcalFastCluster_exotic (const TAcc &acc, reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, int topoId, int nSeeds, int nRHTopo, reco::PFRecHitDeviceCollection::ConstView pfRecHits, reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, reco::PFClusterDeviceCollection::View clusterView, reco::PFRecHitFractionDeviceCollection::View fracView, Position4 *__restrict__ globalClusterPos, Position4 *__restrict__ globalPrevClusterPos, float *__restrict__ globalClusterEnergy, float *__restrict__ globalRhFracSum, int *__restrict__ globalSeeds, int *__restrict__ globalRechits)
 
template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void hcalFastCluster_multiSeedIterative (const TAcc &acc, reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, int topoId, int nSeeds, int nRHTopo, reco::PFRecHitDeviceCollection::ConstView pfRecHits, reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, reco::PFClusterDeviceCollection::View clusterView, reco::PFRecHitFractionDeviceCollection::View fracView)
 
template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void hcalFastCluster_multiSeedParallel (const TAcc &acc, reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, int topoId, int nSeeds, int nRHTopo, reco::PFRecHitDeviceCollection::ConstView pfRecHits, reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, reco::PFClusterDeviceCollection::View clusterView, reco::PFRecHitFractionDeviceCollection::View fracView)
 
template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void hcalFastCluster_singleSeed (const TAcc &acc, reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, const reco::PFRecHitHCALTopologyDeviceCollection::ConstView topology, int topoId, int nRHTopo, reco::PFRecHitDeviceCollection::ConstView pfRecHits, reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, reco::PFClusterDeviceCollection::View clusterView, reco::PFRecHitFractionDeviceCollection::View fracView)
 
ALPAKA_FN_ACC int representative (const int idx, reco::PFClusteringVarsDeviceCollection::View pfClusteringVars)
 
AlpakaESTestDataDDevice testESAlgoAsync (Queue &queue, AlpakaESTestDataADevice const &dataA, cms::alpakatest::AlpakaESTestDataB< Device > const &dataB)
 
template<bool debug = false>
static ALPAKA_FN_ACC void updateClusterPos (reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, Position4 &pos4, float frac, int rhInd, reco::PFRecHitDeviceCollection::ConstView pfRecHits, float rhENormInv)
 

Variables

static constexpr uint32_t blocksForExoticClusters = 4
 
static constexpr float cutoffDistance = 100.
 
static constexpr float cutoffFraction = 0.9999
 
static constexpr uint32_t kHBHalf = 1296
 
static constexpr uint32_t maxTopoInput = 2 * kHBHalf
 
static constexpr int threadsPerBlockForClustering = 512
 

Typedef Documentation

◆ AlpakaESTestDataADevice

Definition at line 13 of file AlpakaESTestData.h.

◆ AlpakaESTestDataAHost

Definition at line 12 of file AlpakaESTestData.h.

◆ AlpakaESTestDataCDevice

Definition at line 16 of file AlpakaESTestData.h.

◆ AlpakaESTestDataCHost

Definition at line 15 of file AlpakaESTestData.h.

◆ AlpakaESTestDataDDevice

Definition at line 19 of file AlpakaESTestData.h.

◆ AlpakaESTestDataDHost

Definition at line 18 of file AlpakaESTestData.h.

◆ AlpakaESTestDataEDevice

Definition at line 22 of file AlpakaESTestData.h.

◆ AlpakaESTestDataEHost

Definition at line 21 of file AlpakaESTestData.h.

◆ BeamSpotDevice

Definition at line 12 of file BeamSpotDevice.h.

◆ CAHitNtupletAlpakaHIonPhase1

Definition at line 89 of file CAHitNtuplet.cc.

◆ CAHitNtupletAlpakaPhase1

Definition at line 88 of file CAHitNtuplet.cc.

◆ CAHitNtupletAlpakaPhase2

Definition at line 90 of file CAHitNtuplet.cc.

◆ EcalDigiDeviceCollection

Definition at line 11 of file EcalDigiDeviceCollection.h.

◆ EcalDigiPhase2DeviceCollection

Definition at line 11 of file EcalDigiPhase2DeviceCollection.h.

◆ EcalElectronicsMappingDevice

Definition at line 13 of file EcalElectronicsMappingDevice.h.

◆ EcalMultifitConditionsDevice

Definition at line 13 of file EcalMultifitConditionsDevice.h.

◆ EcalMultifitParametersDevice

Definition at line 13 of file EcalMultifitParametersDevice.h.

◆ EcalUncalibratedRecHitDeviceCollection

Definition at line 11 of file EcalUncalibratedRecHitDeviceCollection.h.

◆ HCALRecHitSoAProducer

Definition at line 100 of file CaloRecHitSoAProducer.cc.

◆ HGCalSoAClustersDeviceCollection

Definition at line 10 of file HGCalSoAClustersDeviceCollection.h.

◆ HGCalSoAClustersExtraDeviceCollection

Definition at line 10 of file HGCalSoAClustersExtraDeviceCollection.h.

◆ HGCalSoARecHitsDeviceCollection

Definition at line 11 of file HGCalSoARecHitsDeviceCollection.h.

◆ HGCalSoARecHitsExtraDeviceCollection

Definition at line 11 of file HGCalSoARecHitsExtraDeviceCollection.h.

◆ PFRecHitECALTopologyESProducer

Definition at line 248 of file PFRecHitTopologyESProducer.cc.

◆ PFRecHitHCALTopologyESProducer

Definition at line 249 of file PFRecHitTopologyESProducer.cc.

◆ PFRecHitSoAProducerECAL

Definition at line 83 of file PFRecHitSoAProducer.cc.

◆ PFRecHitSoAProducerHCAL

Definition at line 82 of file PFRecHitSoAProducer.cc.

◆ PixelCPEFastParams

template<typename TrackerTraits >
using ALPAKA_ACCELERATOR_NAMESPACE::PixelCPEFastParams = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, PixelCPEFastParamsHost<TrackerTraits>, PixelCPEFastParamsDevice<Device, TrackerTraits> >

Definition at line 20 of file PixelCPEFastParamsCollection.h.

◆ PixelCPEFastParamsESProducerAlpakaHIonPhase1

Definition at line 116 of file PixelCPEFastParamsESProducerAlpaka.cc.

◆ PixelCPEFastParamsESProducerAlpakaPhase1

Definition at line 115 of file PixelCPEFastParamsESProducerAlpaka.cc.

◆ PixelCPEFastParamsESProducerAlpakaPhase2

Definition at line 117 of file PixelCPEFastParamsESProducerAlpaka.cc.

◆ PixelCPEFastParamsHIonPhase1

Definition at line 23 of file PixelCPEFastParamsCollection.h.

◆ PixelCPEFastParamsPhase1

Definition at line 22 of file PixelCPEFastParamsCollection.h.

◆ PixelCPEFastParamsPhase2

Definition at line 24 of file PixelCPEFastParamsCollection.h.

◆ PixelVertexProducerAlpakaHIonPhase1

Definition at line 104 of file PixelVertexProducerAlpaka.cc.

◆ PixelVertexProducerAlpakaPhase1

Definition at line 102 of file PixelVertexProducerAlpaka.cc.

◆ PixelVertexProducerAlpakaPhase2

Definition at line 103 of file PixelVertexProducerAlpaka.cc.

◆ PortableCollection

template<typename T >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection = typedef ::PortableCollection<T, Device>

Definition at line 17 of file PortableCollection.h.

◆ PortableCollection2

template<typename T0 , typename T1 >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection2 = typedef ::PortableMultiCollection<Device, T0, T1>

Definition at line 23 of file PortableCollection.h.

◆ PortableCollection3

template<typename T0 , typename T1 , typename T2 >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection3 = typedef ::PortableMultiCollection<Device, T0, T1, T2>

Definition at line 26 of file PortableCollection.h.

◆ PortableCollection4

template<typename T0 , typename T1 , typename T2 , typename T3 >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection4 = typedef ::PortableMultiCollection<Device, T0, T1, T2, T3>

Definition at line 29 of file PortableCollection.h.

◆ PortableCollection5

template<typename T0 , typename T1 , typename T2 , typename T3 , typename T4 >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableCollection5 = typedef ::PortableMultiCollection<Device, T0, T1, T2, T3, T4>

Definition at line 32 of file PortableCollection.h.

◆ PortableObject

template<typename T >
using ALPAKA_ACCELERATOR_NAMESPACE::PortableObject = typedef ::PortableObject<T, Device>

Definition at line 17 of file PortableObject.h.

◆ SiPixelClustersSoACollection

using ALPAKA_ACCELERATOR_NAMESPACE::SiPixelClustersSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, SiPixelClustersHost, SiPixelClustersDevice<Device> >

Definition at line 15 of file SiPixelClustersSoACollection.h.

◆ SiPixelDigiErrorsSoACollection

using ALPAKA_ACCELERATOR_NAMESPACE::SiPixelDigiErrorsSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, SiPixelDigiErrorsHost, SiPixelDigiErrorsDevice<Device> >

Definition at line 18 of file SiPixelDigiErrorsSoACollection.h.

◆ SiPixelDigisSoACollection

using ALPAKA_ACCELERATOR_NAMESPACE::SiPixelDigisSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, SiPixelDigisHost, SiPixelDigisDevice<Device> >

Definition at line 17 of file SiPixelDigisSoACollection.h.

◆ SiPixelGainCalibrationForHLTDevice

Definition at line 10 of file SiPixelGainCalibrationForHLTDevice.h.

◆ SiPixelMappingDevice

Definition at line 12 of file SiPixelMappingDevice.h.

◆ SiPixelRawToClusterHIonPhase1

Definition at line 283 of file SiPixelRawToCluster.cc.

◆ SiPixelRawToClusterPhase1

Definition at line 282 of file SiPixelRawToCluster.cc.

◆ SiPixelRecHitAlpakaHIonPhase1

Definition at line 95 of file SiPixelRecHitAlpaka.cc.

◆ SiPixelRecHitAlpakaPhase1

Definition at line 94 of file SiPixelRecHitAlpaka.cc.

◆ SiPixelRecHitAlpakaPhase2

Definition at line 96 of file SiPixelRecHitAlpaka.cc.

◆ TrackingRecHitSoAHIonPhase1

Definition at line 27 of file TrackingRecHitsSoACollection.h.

◆ TrackingRecHitSoAPhase1

Definition at line 25 of file TrackingRecHitsSoACollection.h.

◆ TrackingRecHitSoAPhase2

Definition at line 26 of file TrackingRecHitsSoACollection.h.

◆ TrackingRecHitsSoACollection

template<typename TrackerTraits >
using ALPAKA_ACCELERATOR_NAMESPACE::TrackingRecHitsSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, TrackingRecHitHost<TrackerTraits>, TrackingRecHitDevice<TrackerTraits, Device> >

Definition at line 22 of file TrackingRecHitsSoACollection.h.

◆ TracksSoACollection

template<typename TrackerTraits >
using ALPAKA_ACCELERATOR_NAMESPACE::TracksSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, TracksHost<TrackerTraits>, TracksDevice<TrackerTraits, Device> >

Definition at line 25 of file TracksSoACollection.h.

◆ ZVertexSoACollection

using ALPAKA_ACCELERATOR_NAMESPACE::ZVertexSoACollection = typedef std::conditional_t<std::is_same_v<Device, alpaka::DevCpu>, ZVertexHost, ZVertexDevice<Device> >

Definition at line 18 of file ZVertexSoACollection.h.

Function Documentation

◆ dR2()

ALPAKA_FN_ACC static ALPAKA_FN_INLINE float ALPAKA_ACCELERATOR_NAMESPACE::dR2 ( Position4  pos1,
Position4  pos2 
)
static

Definition at line 31 of file PFClusterSoAProducerKernel.dev.cc.

References funct::abs(), ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), HLT_2024v14_cff::eta1, HLT_2024v14_cff::eta2, f, M_PI, mag2(), ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z.

Referenced by TkAlCaRecoMonitor::analyze(), l1ct::L1EGPuppiIsoAlgo::calcIso(), reco::helper::JetMuonHitsIDHelper::calculate(), PhotonMVABasedHaloTagger::calmatchedHBHECoordForBothHypothesis(), PhotonMVABasedHaloTagger::calphoClusCoordinECAL(), l1ct::PFTkEGAlgoEmulator::compute_sumPt(), EcalPFClusterIsolation< T1 >::computedRVeto(), PhotonIDValueMapProducer::computeWorstPFChargedIsolation(), egammaisolation::EgammaTowerExtractor::deposit(), PFRecoTauDiscriminationByHPSSelection::discriminate(), HGCalClusterTools::energyInCone(), HLTDQMTagAndProbeEff< TagType, TagCollType, ProbeType, ProbeCollType >::fill(), EcalRegressionData::fill(), DeepTauIdBase< TritonEDProducer<> >::fillGrids(), DisappearingMuonsSkimming::filter(), findBestMatchingLepton(), DeepTauIdBase< TritonEDProducer<> >::findMatchedElectron(), HcalGeometry::getCells(), EcalEndcapGeometry::getCells(), CaloSubdetectorGeometry::getCells(), EcalBarrelGeometry::getCells(), CaloSubdetectorGeometry::getClosestCell(), egamma::getClosestCtfToGsf(), EcalClusterIsoCalculator::getEcalClusterIso(), HcalRechitIsoCalculator::getHcalRechitIso(), HcalPFClusterIsolation< T1 >::getSum(), TrackIsoCalculator::getTrackIso(), EgammaHcalIsolation::goodHitEnergy(), BaseTnPEfficiencyTask::hasTrigger(), hcalFastCluster_exotic(), hcalFastCluster_multiSeedIterative(), hcalFastCluster_multiSeedParallel(), hcalFastCluster_singleSeed(), HLTHcalNoiseFilter::hltFilter(), HLTDisplacedEgammaFilter::hltFilter(), HLTMuonTrimuonL3Filter::hltFilter(), HLTMuonL3PreFilter::hltFilter(), HLTMuonDimuonL3Filter::hltFilter(), reco::isodeposit::OtherJetConstituentsDeltaRVeto::initialize(), ZGammaplusJetsMonitor::isMatched(), heppy::IsolationComputer::isoSumNeutralsWeighted(), heppy::IsolationComputer::isoSumRaw(), PFB::match(), JetPlusTrackCorrector::matchElectrons(), reco::tau::cone::DeltaRFilter< typename CandType::value_type >::operator()(), EleTkIsolFromCands::operator()(), EgammaL1TkIsolation::passTrkSel(), JetTracksAssociationDRVertex::produce(), JetTracksAssociationDRVertexAssigned::produce(), JetVetoedTracksAssociationDRVertex::produce(), JetTracksAssociationDRCalo::produce(), PATTauHybridProducer::produce(), GenVisTauProducer::produce(), ShiftedPFCandidateProducerByMatchedObject::produce(), ShiftedJetProducerByMatchedObjectT< T >::produce(), NoPileUpPFMEtProducer::produce(), ShiftedPFCandidateProducerForPFMVAMEt::produce(), HLTMuonL2SelectorForL3IO::produce(), TrackDistanceValueMapProducer::produce(), HLTScoutingCaloProducer::produce(), L1PrefiringWeightProducer::produce(), PFClusterMatchedToPhotonsSelector::produce(), HLTScoutingPFProducer::produce(), HLTScoutingMuonProducer::produce(), PhotonIDValueMapProducer::produce(), pat::PATMuonProducer::puppiCombinedIsolation(), JetPlusTrackCorrector::rebuildJta(), and TtFullHadSignalSel::TtFullHadSignalSel().

31  {
32  float mag1 = sqrtf(pos1.x * pos1.x + pos1.y * pos1.y + pos1.z * pos1.z);
33  float cosTheta1 = mag1 > 0.0 ? pos1.z / mag1 : 1.0;
34  float eta1 = 0.5f * logf((1.0f + cosTheta1) / (1.0f - cosTheta1));
35  float phi1 = atan2f(pos1.y, pos1.x);
36 
37  float mag2 = sqrtf(pos2.x * pos2.x + pos2.y * pos2.y + pos2.z * pos2.z);
38  float cosTheta2 = mag2 > 0.0 ? pos2.z / mag2 : 1.0;
39  float eta2 = 0.5f * logf((1.0f + cosTheta2) / (1.0f - cosTheta2));
40  float phi2 = atan2f(pos2.y, pos2.x);
41 
42  float deta = eta2 - eta1;
43  constexpr const float fPI = M_PI;
44  float dphi = std::abs(std::abs(phi2 - phi1) - fPI) - fPI;
45  return (deta * deta + dphi * dphi);
46  }
Abs< T >::type abs(const T &t)
Definition: Abs.h:22
double f[11][100]
#define M_PI
T mag2() const
The vector magnitude squared. Equivalent to vec.dot(vec)

◆ getRhFrac()

static ALPAKA_FN_ACC auto ALPAKA_ACCELERATOR_NAMESPACE::getRhFrac ( reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars,
int  topoSeedBegin,
reco::PFRecHitFractionDeviceCollection::View  fracView,
int  seedNum,
int  rhNum 
)
static

Definition at line 52 of file PFClusterSoAProducerKernel.dev.cc.

Referenced by hcalFastCluster_exotic(), hcalFastCluster_multiSeedIterative(), and hcalFastCluster_multiSeedParallel().

56  {
57  int seedIdx = pfClusteringVars[topoSeedBegin + seedNum].topoSeedList();
58  return fracView[pfClusteringVars[seedIdx].seedFracOffsets() + rhNum].frac();
59  }

◆ getSeedRhIdx()

static ALPAKA_FN_ACC auto ALPAKA_ACCELERATOR_NAMESPACE::getSeedRhIdx ( int *  seeds,
int  seedNum 
)
static

◆ hcalFastCluster_exotic()

template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void ALPAKA_ACCELERATOR_NAMESPACE::hcalFastCluster_exotic ( const TAcc &  acc,
reco::PFClusterParamsDeviceCollection::ConstView  pfClusParams,
const reco::PFRecHitHCALTopologyDeviceCollection::ConstView  topology,
int  topoId,
int  nSeeds,
int  nRHTopo,
reco::PFRecHitDeviceCollection::ConstView  pfRecHits,
reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars,
reco::PFClusterDeviceCollection::View  clusterView,
reco::PFRecHitFractionDeviceCollection::View  fracView,
Position4 *__restrict__  globalClusterPos,
Position4 *__restrict__  globalPrevClusterPos,
float *__restrict__  globalClusterEnergy,
float *__restrict__  globalRhFracSum,
int *__restrict__  globalSeeds,
int *__restrict__  globalRechits 
)
static

Definition at line 546 of file PFClusterSoAProducerKernel.dev.cc.

References atomicMaxF(), cms::cudacompat::blockIdx, ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), cutoffFraction, debug, dR2(), hcalRecHitTable_cff::energy, f, DivergingColor::frac, HLT_2024v14_cff::fraction, getRhFrac(), getSeedRhIdx(), PFLayer::HCAL_BARREL1, PFLayer::HCAL_ENDCAP, mps_fire::i, dqmiolumiharvest::j, maxTopoInput, dqmiodumpmetadata::n, cms::alpakatools::once_per_block(), HLT_2024v14_cff::pfRecHits, alignCSCRings::s, HLT_2024v14_cff::seeds, riemannFit::stride, HLT_2024v14_cff::topology, updateClusterPos(), w(), ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::FastClusterExotic::operator()().

561  {
562  int& nRHNotSeed = alpaka::declareSharedVar<int, __COUNTER__>(acc);
563  int& blockIdx = alpaka::declareSharedVar<int, __COUNTER__>(acc);
564  int& topoSeedBegin = alpaka::declareSharedVar<int, __COUNTER__>(acc);
565  int& stride = alpaka::declareSharedVar<int, __COUNTER__>(acc);
566  int& iter = alpaka::declareSharedVar<int, __COUNTER__>(acc);
567  float& tol = alpaka::declareSharedVar<float, __COUNTER__>(acc);
568  float& diff2 = alpaka::declareSharedVar<float, __COUNTER__>(acc);
569  float& rhENormInv = alpaka::declareSharedVar<float, __COUNTER__>(acc);
570  bool& notDone = alpaka::declareSharedVar<bool, __COUNTER__>(acc);
571 
572  blockIdx = maxTopoInput * alpaka::getIdx<alpaka::Grid, alpaka::Blocks>(acc)[0u];
573  Position4* clusterPos = globalClusterPos + blockIdx;
574  Position4* prevClusterPos = globalPrevClusterPos + blockIdx;
575  float* clusterEnergy = globalClusterEnergy + blockIdx;
576  float* rhFracSum = globalRhFracSum + blockIdx;
577  int* seeds = globalSeeds + blockIdx;
578  int* rechits = globalRechits + blockIdx;
579 
580  if (once_per_block(acc)) {
581  nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds)
582  topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets();
583  tol = pfClusParams.stoppingTolerance() *
584  powf(fmaxf(1.0, nSeeds - 1), 2.0); // stopping tolerance * tolerance scaling
585  stride = alpaka::getWorkDiv<alpaka::Block, alpaka::Threads>(acc)[0u];
586  iter = 0;
587  notDone = true;
588 
589  int i = pfClusteringVars[topoSeedBegin].topoSeedList();
590 
591  if (topology.cutsFromDB()) {
592  rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold());
593  } else {
595  rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1];
596  else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP)
597  rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1];
598  else {
599  rhENormInv = 0.;
600  printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer());
601  }
602  }
603  }
604  alpaka::syncBlockThreads(acc); // all threads call sync
605 
606  for (int n = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; n < nRHTopo; n += stride) {
607  if (n < nSeeds)
608  seeds[n] = pfClusteringVars[topoSeedBegin + n].topoSeedList();
609  if (n < nRHNotSeed - 1)
610  rechits[n] =
611  fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + n + 1]
612  .pfrhIdx();
613  }
614  alpaka::syncBlockThreads(acc); // all threads call sync
615 
616  if constexpr (debug) {
617  if (once_per_block(acc)) {
618  printf("\n===========================================================================================\n");
619  printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo);
620  for (int s = 0; s < nSeeds; s++) {
621  if (s != 0)
622  printf(", ");
623  printf("%d", getSeedRhIdx(seeds, s));
624  }
625  if (nRHTopo == nSeeds) {
626  printf(")\n\n");
627  } else {
628  printf(") and other rechits (");
629  for (int r = 1; r < nRHNotSeed; r++) {
630  if (r != 1)
631  printf(", ");
632  if (r <= 0) {
633  printf("Invalid rhNum (%d) for get RhFracIdx!\n", r);
634  }
635  printf("%d", rechits[r - 1]);
636  }
637  printf(")\n\n");
638  }
639  }
640  alpaka::syncBlockThreads(acc); // all (or none) threads call sync
641  }
642 
643  // Set initial cluster position (energy) to seed rechit position (energy)
644  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
645  int i = seeds[s];
646  clusterPos[s] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0};
647  prevClusterPos[s] = clusterPos[s];
648  clusterEnergy[s] = pfRecHits[i].energy();
649  for (int r = 0; r < (nRHNotSeed - 1); r++) {
650  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r];
651  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.;
652  }
653  }
654  alpaka::syncBlockThreads(acc); // all threads call sync
655 
656  do {
657  if constexpr (debug) {
658  if (once_per_block(acc))
659  printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId);
660  }
661 
662  if (once_per_block(acc))
663  diff2 = -1;
664  // Reset rhFracSum
665  for (int tid = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) {
666  rhFracSum[tid] = 0.;
667  int rhThreadIdx = rechits[tid];
668  Position4 rhThreadPos =
669  Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.};
670  for (int s = 0; s < nSeeds; s++) {
671  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
672  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
673  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
674 
675  float d2 = dist2 / pfClusParams.showerSigma2();
676  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
677 
678  rhFracSum[tid] += fraction;
679  }
680  }
681  alpaka::syncBlockThreads(acc); // all threads call sync
682 
683  for (int tid = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) {
684  int rhThreadIdx = rechits[tid];
685  Position4 rhThreadPos =
686  Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.};
687  for (int s = 0; s < nSeeds; s++) {
688  int i = seeds[s];
689  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
690  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
691  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
692 
693  float d2 = dist2 / pfClusParams.showerSigma2();
694  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
695 
696  if (rhFracSum[tid] > pfClusParams.minFracTot()) {
697  float fracpct = fraction / rhFracSum[tid];
698  if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) {
699  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct;
700  } else {
701  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
702  }
703  } else {
704  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
705  }
706  }
707  }
708  alpaka::syncBlockThreads(acc); // all threads call sync
709 
710  if constexpr (debug) {
711  if (once_per_block(acc))
712  printf("Computing cluster position for topoId %d\n", topoId);
713  }
714 
715  // Reset cluster position and energy
716  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
717  int seedRhIdx = getSeedRhIdx(seeds, s);
718  float norm = logf(pfRecHits[seedRhIdx].energy() * rhENormInv);
719  clusterPos[s] = Position4{
720  pfRecHits[seedRhIdx].x() * norm, pfRecHits[seedRhIdx].y() * norm, pfRecHits[seedRhIdx].z() * norm, norm};
721  clusterEnergy[s] = pfRecHits[seedRhIdx].energy();
722  if constexpr (debug) {
723  printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n",
724  s,
725  seeds[s],
726  clusterEnergy[s],
727  clusterPos[s].x,
728  clusterPos[s].y,
729  clusterPos[s].z,
730  clusterPos[s].w);
731  }
732  }
733  alpaka::syncBlockThreads(acc); // all threads call sync
734 
735  // Recalculate position
736  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
737  int seedRhIdx = getSeedRhIdx(seeds, s);
738  for (int r = 0; r < nRHNotSeed - 1; r++) {
739  int j = rechits[r];
740  float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, s, r + 1);
741 
742  if (frac > -0.5) {
743  clusterEnergy[s] += frac * pfRecHits[j].energy();
744 
745  if (nSeeds == 1 || j == pfRecHits[seedRhIdx].neighbours()(0) || j == pfRecHits[seedRhIdx].neighbours()(1) ||
746  j == pfRecHits[seedRhIdx].neighbours()(2) || j == pfRecHits[seedRhIdx].neighbours()(3))
747  updateClusterPos(pfClusParams, clusterPos[s], frac, j, pfRecHits, rhENormInv);
748  }
749  }
750  }
751  alpaka::syncBlockThreads(acc); // all threads call sync
752 
753  // Position normalization
754  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
755  if (clusterPos[s].w >= pfClusParams.minAllowedNormalization()) {
756  // Divide by position norm
757  clusterPos[s].x /= clusterPos[s].w;
758  clusterPos[s].y /= clusterPos[s].w;
759  clusterPos[s].z /= clusterPos[s].w;
760 
761  if constexpr (debug)
762  printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n",
763  s,
764  seeds[s],
765  clusterEnergy[s],
766  clusterPos[s].x,
767  clusterPos[s].y,
768  clusterPos[s].z);
769  } else {
770  if constexpr (debug)
771  printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n",
772  s,
773  seeds[s],
774  clusterPos[s].w,
775  pfClusParams.minAllowedNormalization());
776  clusterPos[s].x = 0.0;
777  clusterPos[s].y = 0.0;
778  clusterPos[s].z = 0.0;
779  }
780  }
781  alpaka::syncBlockThreads(acc); // all threads call sync
782 
783  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
784  float delta2 = dR2(prevClusterPos[s], clusterPos[s]);
785  if constexpr (debug)
786  printf("\tCluster %d (seed %d) has delta2 = %f\n", s, seeds[s], delta2);
787  atomicMaxF(acc, &diff2, delta2);
788  prevClusterPos[s] = clusterPos[s]; // Save clusterPos
789  }
790  alpaka::syncBlockThreads(acc); // all threads call sync
791 
792  if (once_per_block(acc)) {
793  float tol2 = tol * tol;
794  iter++;
795  notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations());
796  if constexpr (debug) {
797  if (diff2 > tol2)
798  printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2);
799  else if constexpr (debug)
800  printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2);
801  }
802  }
803  alpaka::syncBlockThreads(acc); // all threads call sync
804  } while (notDone); // shared variable ensures synchronization is well defined
805  if (once_per_block(acc))
806  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
807  int rhIdx = pfClusteringVars[s + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList();
808  int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx();
809  clusterView[seedIdx].energy() = pfRecHits[s].energy();
810  clusterView[seedIdx].x() = pfRecHits[s].x();
811  clusterView[seedIdx].y() = pfRecHits[s].y();
812  clusterView[seedIdx].z() = pfRecHits[s].z();
813  }
814  alpaka::syncBlockThreads(acc); // all threads call sync
815  }
ALPAKA_FN_ACC constexpr bool once_per_block(TAcc const &acc)
static ALPAKA_FN_ACC auto getSeedRhIdx(int *seeds, int seedNum)
T w() const
float float float z
constexpr uint32_t stride
Definition: HelixFit.h:22
ALPAKA_FN_ACC static ALPAKA_FN_INLINE float dR2(Position4 pos1, Position4 pos2)
ALPAKA_FN_HOST_ACC static ALPAKA_FN_INLINE float atomicMaxF(const TAcc &acc, float *address, float val)
Definition: atomicMaxF.h:11
double f[11][100]
const dim3 blockIdx
Definition: cudaCompat.h:32
static ALPAKA_FN_ACC auto getRhFrac(reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, int topoSeedBegin, reco::PFRecHitFractionDeviceCollection::View fracView, int seedNum, int rhNum)
#define debug
Definition: HDRShower.cc:19
static ALPAKA_FN_ACC void updateClusterPos(reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, Position4 &pos4, float frac, int rhInd, reco::PFRecHitDeviceCollection::ConstView pfRecHits, float rhENormInv)
float x

◆ hcalFastCluster_multiSeedIterative()

template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void ALPAKA_ACCELERATOR_NAMESPACE::hcalFastCluster_multiSeedIterative ( const TAcc &  acc,
reco::PFClusterParamsDeviceCollection::ConstView  pfClusParams,
const reco::PFRecHitHCALTopologyDeviceCollection::ConstView  topology,
int  topoId,
int  nSeeds,
int  nRHTopo,
reco::PFRecHitDeviceCollection::ConstView  pfRecHits,
reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars,
reco::PFClusterDeviceCollection::View  clusterView,
reco::PFRecHitFractionDeviceCollection::View  fracView 
)
static

Definition at line 820 of file PFClusterSoAProducerKernel.dev.cc.

References atomicMaxF(), ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), cutoffFraction, debug, dR2(), hcalRecHitTable_cff::energy, f, DivergingColor::frac, HLT_2024v14_cff::fraction, getRhFrac(), getSeedRhIdx(), PFLayer::HCAL_BARREL1, PFLayer::HCAL_ENDCAP, mps_fire::i, dqmiolumiharvest::j, dqmiodumpmetadata::n, cms::alpakatools::once_per_block(), HLT_2024v14_cff::pfRecHits, alignCSCRings::s, HLT_2024v14_cff::seeds, riemannFit::stride, HLT_2024v14_cff::topology, updateClusterPos(), w(), ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::FastCluster::operator()().

830  {
831  int& nRHNotSeed = alpaka::declareSharedVar<int, __COUNTER__>(acc);
832  int& topoSeedBegin = alpaka::declareSharedVar<int, __COUNTER__>(acc);
833  int& stride = alpaka::declareSharedVar<int, __COUNTER__>(acc);
834  int& iter = alpaka::declareSharedVar<int, __COUNTER__>(acc);
835  float& tol = alpaka::declareSharedVar<float, __COUNTER__>(acc);
836  float& diff2 = alpaka::declareSharedVar<float, __COUNTER__>(acc);
837  float& rhENormInv = alpaka::declareSharedVar<float, __COUNTER__>(acc);
838  bool& notDone = alpaka::declareSharedVar<bool, __COUNTER__>(acc);
839 
840  auto& clusterPos = alpaka::declareSharedVar<Position4[400], __COUNTER__>(acc);
841  auto& prevClusterPos = alpaka::declareSharedVar<Position4[400], __COUNTER__>(acc);
842  auto& clusterEnergy = alpaka::declareSharedVar<float[400], __COUNTER__>(acc);
843  auto& rhFracSum = alpaka::declareSharedVar<float[1500], __COUNTER__>(acc);
844  auto& seeds = alpaka::declareSharedVar<int[400], __COUNTER__>(acc);
845  auto& rechits = alpaka::declareSharedVar<int[1500], __COUNTER__>(acc);
846 
847  if (once_per_block(acc)) {
848  nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds)
849  topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets();
850  tol = pfClusParams.stoppingTolerance() * // stopping tolerance * tolerance scaling
851  powf(fmaxf(1.0, nSeeds - 1), 2.0);
852  stride = alpaka::getWorkDiv<alpaka::Block, alpaka::Threads>(acc)[0u];
853  iter = 0;
854  notDone = true;
855 
856  int i = pfClusteringVars[topoSeedBegin].topoSeedList();
857 
858  if (topology.cutsFromDB()) {
859  rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold());
860  } else {
862  rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1];
863  else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP)
864  rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1];
865  else {
866  rhENormInv = 0.;
867  printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer());
868  }
869  }
870  }
871  alpaka::syncBlockThreads(acc); // all threads call sync
872 
873  for (int n = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; n < nRHTopo; n += stride) {
874  if (n < nSeeds)
875  seeds[n] = pfClusteringVars[topoSeedBegin + n].topoSeedList();
876  if (n < nRHNotSeed - 1)
877  rechits[n] =
878  fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + n + 1]
879  .pfrhIdx();
880  }
881  alpaka::syncBlockThreads(acc); // all threads call sync
882 
883  if constexpr (debug) {
884  if (once_per_block(acc)) {
885  printf("\n===========================================================================================\n");
886  printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo);
887  for (int s = 0; s < nSeeds; s++) {
888  if (s != 0)
889  printf(", ");
890  printf("%d", getSeedRhIdx(seeds, s));
891  }
892  if (nRHTopo == nSeeds) {
893  printf(")\n\n");
894  } else {
895  printf(") and other rechits (");
896  for (int r = 1; r < nRHNotSeed; r++) {
897  if (r != 1)
898  printf(", ");
899  if (r <= 0) {
900  printf("Invalid rhNum (%d) for get RhFracIdx!\n", r);
901  }
902  printf("%d", rechits[r - 1]);
903  }
904  printf(")\n\n");
905  }
906  }
907  alpaka::syncBlockThreads(acc); // all (or none) threads call sync
908  }
909 
910  // Set initial cluster position (energy) to seed rechit position (energy)
911  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
912  int i = seeds[s];
913  clusterPos[s] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0};
914  prevClusterPos[s] = clusterPos[s];
915  clusterEnergy[s] = pfRecHits[i].energy();
916  for (int r = 0; r < (nRHNotSeed - 1); r++) {
917  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r];
918  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.;
919  }
920  }
921  alpaka::syncBlockThreads(acc); // all threads call sync
922 
923  do {
924  if constexpr (debug) {
925  if (once_per_block(acc))
926  printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId);
927  }
928 
929  if (once_per_block(acc))
930  diff2 = -1;
931  // Reset rhFracSum
932  for (int tid = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) {
933  rhFracSum[tid] = 0.;
934  int rhThreadIdx = rechits[tid];
935  Position4 rhThreadPos =
936  Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.};
937  for (int s = 0; s < nSeeds; s++) {
938  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
939  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
940  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
941 
942  float d2 = dist2 / pfClusParams.showerSigma2();
943  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
944 
945  rhFracSum[tid] += fraction;
946  }
947  }
948  alpaka::syncBlockThreads(acc); // all threads call sync
949 
950  for (int tid = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; tid < nRHNotSeed - 1; tid += stride) {
951  int rhThreadIdx = rechits[tid];
952  Position4 rhThreadPos =
953  Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.};
954  for (int s = 0; s < nSeeds; s++) {
955  int i = seeds[s];
956  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
957  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
958  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
959 
960  float d2 = dist2 / pfClusParams.showerSigma2();
961  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
962 
963  if (rhFracSum[tid] > pfClusParams.minFracTot()) {
964  float fracpct = fraction / rhFracSum[tid];
965  if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) {
966  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct;
967  } else {
968  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
969  }
970  } else {
971  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
972  }
973  }
974  }
975  alpaka::syncBlockThreads(acc); // all threads call sync
976 
977  if constexpr (debug) {
978  if (once_per_block(acc))
979  printf("Computing cluster position for topoId %d\n", topoId);
980  }
981 
982  // Reset cluster position and energy
983  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
984  int seedRhIdx = getSeedRhIdx(seeds, s);
985  float norm = logf(pfRecHits[seedRhIdx].energy() * rhENormInv);
986  clusterPos[s] = Position4{
987  pfRecHits[seedRhIdx].x() * norm, pfRecHits[seedRhIdx].y() * norm, pfRecHits[seedRhIdx].z() * norm, norm};
988  clusterEnergy[s] = pfRecHits[seedRhIdx].energy();
989  if constexpr (debug) {
990  printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n",
991  s,
992  seeds[s],
993  clusterEnergy[s],
994  clusterPos[s].x,
995  clusterPos[s].y,
996  clusterPos[s].z,
997  clusterPos[s].w);
998  }
999  }
1000  alpaka::syncBlockThreads(acc); // all threads call sync
1001 
1002  // Recalculate position
1003  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
1004  int seedRhIdx = getSeedRhIdx(seeds, s);
1005  for (int r = 0; r < nRHNotSeed - 1; r++) {
1006  int j = rechits[r];
1007  float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, s, r + 1);
1008 
1009  if (frac > -0.5) {
1010  clusterEnergy[s] += frac * pfRecHits[j].energy();
1011 
1012  if (nSeeds == 1 || j == pfRecHits[seedRhIdx].neighbours()(0) || j == pfRecHits[seedRhIdx].neighbours()(1) ||
1013  j == pfRecHits[seedRhIdx].neighbours()(2) || j == pfRecHits[seedRhIdx].neighbours()(3))
1014  updateClusterPos(pfClusParams, clusterPos[s], frac, j, pfRecHits, rhENormInv);
1015  }
1016  }
1017  }
1018  alpaka::syncBlockThreads(acc); // all threads call sync
1019 
1020  // Position normalization
1021  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
1022  if (clusterPos[s].w >= pfClusParams.minAllowedNormalization()) {
1023  // Divide by position norm
1024  clusterPos[s].x /= clusterPos[s].w;
1025  clusterPos[s].y /= clusterPos[s].w;
1026  clusterPos[s].z /= clusterPos[s].w;
1027 
1028  if constexpr (debug)
1029  printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n",
1030  s,
1031  seeds[s],
1032  clusterEnergy[s],
1033  clusterPos[s].x,
1034  clusterPos[s].y,
1035  clusterPos[s].z);
1036  } else {
1037  if constexpr (debug)
1038  printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n",
1039  s,
1040  seeds[s],
1041  clusterPos[s].w,
1042  pfClusParams.minAllowedNormalization());
1043  clusterPos[s].x = 0.0;
1044  clusterPos[s].y = 0.0;
1045  clusterPos[s].z = 0.0;
1046  }
1047  }
1048  alpaka::syncBlockThreads(acc); // all threads call sync
1049 
1050  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
1051  float delta2 = dR2(prevClusterPos[s], clusterPos[s]);
1052  if constexpr (debug)
1053  printf("\tCluster %d (seed %d) has delta2 = %f\n", s, seeds[s], delta2);
1054  atomicMaxF(acc, &diff2, delta2);
1055  prevClusterPos[s] = clusterPos[s]; // Save clusterPos
1056  }
1057  alpaka::syncBlockThreads(acc); // all threads call sync
1058 
1059  if (once_per_block(acc)) {
1060  float tol2 = tol * tol;
1061  iter++;
1062  notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations());
1063  if constexpr (debug) {
1064  if (diff2 > tol2)
1065  printf("\tTopoId %d has diff2 = %f greater than tolerance %f (continuing)\n", topoId, diff2, tol2);
1066  else if constexpr (debug)
1067  printf("\tTopoId %d has diff2 = %f LESS than tolerance %f (terminating!)\n", topoId, diff2, tol2);
1068  }
1069  }
1070  alpaka::syncBlockThreads(acc); // all threads call sync
1071  } while (notDone); // shared variable ensures synchronization is well defined
1072  if (once_per_block(acc))
1073  for (int s = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; s < nSeeds; s += stride) {
1074  int rhIdx = pfClusteringVars[s + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList();
1075  int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx();
1076  clusterView[seedIdx].energy() = pfRecHits[s].energy();
1077  clusterView[seedIdx].x() = pfRecHits[s].x();
1078  clusterView[seedIdx].y() = pfRecHits[s].y();
1079  clusterView[seedIdx].z() = pfRecHits[s].z();
1080  }
1081  }
ALPAKA_FN_ACC constexpr bool once_per_block(TAcc const &acc)
static ALPAKA_FN_ACC auto getSeedRhIdx(int *seeds, int seedNum)
T w() const
float float float z
constexpr uint32_t stride
Definition: HelixFit.h:22
ALPAKA_FN_ACC static ALPAKA_FN_INLINE float dR2(Position4 pos1, Position4 pos2)
ALPAKA_FN_HOST_ACC static ALPAKA_FN_INLINE float atomicMaxF(const TAcc &acc, float *address, float val)
Definition: atomicMaxF.h:11
double f[11][100]
static ALPAKA_FN_ACC auto getRhFrac(reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, int topoSeedBegin, reco::PFRecHitFractionDeviceCollection::View fracView, int seedNum, int rhNum)
#define debug
Definition: HDRShower.cc:19
static ALPAKA_FN_ACC void updateClusterPos(reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, Position4 &pos4, float frac, int rhInd, reco::PFRecHitDeviceCollection::ConstView pfRecHits, float rhENormInv)
float x

◆ hcalFastCluster_multiSeedParallel()

template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void ALPAKA_ACCELERATOR_NAMESPACE::hcalFastCluster_multiSeedParallel ( const TAcc &  acc,
reco::PFClusterParamsDeviceCollection::ConstView  pfClusParams,
const reco::PFRecHitHCALTopologyDeviceCollection::ConstView  topology,
int  topoId,
int  nSeeds,
int  nRHTopo,
reco::PFRecHitDeviceCollection::ConstView  pfRecHits,
reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars,
reco::PFClusterDeviceCollection::View  clusterView,
reco::PFRecHitFractionDeviceCollection::View  fracView 
)
static

Definition at line 259 of file PFClusterSoAProducerKernel.dev.cc.

References atomicMaxF(), ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), cutoffFraction, debug, dR2(), f, DivergingColor::frac, HLT_2024v14_cff::fraction, getRhFrac(), getSeedRhIdx(), PFLayer::HCAL_BARREL1, PFLayer::HCAL_ENDCAP, mps_fire::i, dqmiolumiharvest::j, cms::alpakatools::once_per_block(), HLT_2024v14_cff::pfRecHits, alignCSCRings::s, HLT_2024v14_cff::seeds, riemannFit::stride, HLT_2024v14_cff::topology, updateClusterPos(), ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Neighbours4::w, w(), ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Neighbours4::x, x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Neighbours4::y, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Neighbours4::z.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::FastCluster::operator()().

269  {
270  int tid = alpaka::getIdx<alpaka::Block,
271  alpaka::Threads>( // Thread index corresponds to a single rechit of the topo cluster
272  acc)[0u];
273 
274  int& nRHNotSeed = alpaka::declareSharedVar<int, __COUNTER__>(acc);
275  int& topoSeedBegin = alpaka::declareSharedVar<int, __COUNTER__>(acc);
276  int& stride = alpaka::declareSharedVar<int, __COUNTER__>(acc);
277  int& iter = alpaka::declareSharedVar<int, __COUNTER__>(acc);
278  float& tol = alpaka::declareSharedVar<float, __COUNTER__>(acc);
279  float& diff2 = alpaka::declareSharedVar<float, __COUNTER__>(acc);
280  float& rhENormInv = alpaka::declareSharedVar<float, __COUNTER__>(acc);
281  bool& notDone = alpaka::declareSharedVar<bool, __COUNTER__>(acc);
282  auto& clusterPos = alpaka::declareSharedVar<Position4[100], __COUNTER__>(acc);
283  auto& prevClusterPos = alpaka::declareSharedVar<Position4[100], __COUNTER__>(acc);
284  auto& clusterEnergy = alpaka::declareSharedVar<float[100], __COUNTER__>(acc);
285  auto& rhFracSum = alpaka::declareSharedVar<float[threadsPerBlockForClustering], __COUNTER__>(acc);
286  auto& seeds = alpaka::declareSharedVar<int[100], __COUNTER__>(acc);
287  auto& rechits = alpaka::declareSharedVar<int[threadsPerBlockForClustering], __COUNTER__>(acc);
288 
289  if (once_per_block(acc)) {
290  nRHNotSeed = nRHTopo - nSeeds + 1; // 1 + (# rechits per topoId that are NOT seeds)
291  topoSeedBegin = pfClusteringVars[topoId].topoSeedOffsets();
292  tol = pfClusParams.stoppingTolerance() *
293  powf(fmaxf(1.0, nSeeds - 1), 2.0); // stopping tolerance * tolerance scaling
294  stride = alpaka::getWorkDiv<alpaka::Block, alpaka::Threads>(acc)[0u];
295  iter = 0;
296  notDone = true;
297 
298  int i = pfClusteringVars[topoSeedBegin].topoSeedList();
299 
300  if (topology.cutsFromDB()) {
301  rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold());
302  } else {
304  rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1];
305  else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP)
306  rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1];
307  else {
308  rhENormInv = 0.;
309  printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer());
310  }
311  }
312  }
313  alpaka::syncBlockThreads(acc); // all threads call sync
314 
315  if (tid < nSeeds)
316  seeds[tid] = pfClusteringVars[topoSeedBegin + tid].topoSeedList();
317  if (tid < nRHNotSeed - 1)
318  rechits[tid] =
319  fracView[pfClusteringVars[pfClusteringVars[topoSeedBegin].topoSeedList()].seedFracOffsets() + tid + 1]
320  .pfrhIdx();
321 
322  alpaka::syncBlockThreads(acc); // all threads call sync
323 
324  if constexpr (debug) {
325  if (once_per_block(acc)) {
326  printf("\n===========================================================================================\n");
327  printf("Processing topo cluster %d with nSeeds = %d nRHTopo = %d and seeds (", topoId, nSeeds, nRHTopo);
328  for (int s = 0; s < nSeeds; s++) {
329  if (s != 0)
330  printf(", ");
331  printf("%d", getSeedRhIdx(seeds, s));
332  }
333  if (nRHTopo == nSeeds) {
334  printf(")\n\n");
335  } else {
336  printf(") and other rechits (");
337  for (int r = 1; r < nRHNotSeed; r++) {
338  if (r != 1)
339  printf(", ");
340  if (r <= 0) {
341  printf("Invalid rhNum (%d) for get RhFracIdx!\n", r);
342  }
343  printf("%d", rechits[r - 1]);
344  }
345  printf(")\n\n");
346  }
347  }
348  alpaka::syncBlockThreads(acc); // all (or none) threads call sync
349  }
350 
351  // Set initial cluster position (energy) to seed rechit position (energy)
352  if (tid < nSeeds) {
353  int i = getSeedRhIdx(seeds, tid);
354  clusterPos[tid] = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.0};
355  prevClusterPos[tid] = clusterPos[tid];
356  clusterEnergy[tid] = pfRecHits[i].energy();
357  for (int r = 0; r < (nRHNotSeed - 1); r++) {
358  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].pfrhIdx() = rechits[r];
359  fracView[pfClusteringVars[i].seedFracOffsets() + r + 1].frac() = -1.;
360  }
361  }
362  alpaka::syncBlockThreads(acc); // all threads call sync
363 
364  int rhThreadIdx = -1;
365  Position4 rhThreadPos;
366  if (tid < (nRHNotSeed - 1)) {
367  rhThreadIdx = rechits[tid]; // Index when thread represents rechit
368  rhThreadPos = Position4{pfRecHits[rhThreadIdx].x(), pfRecHits[rhThreadIdx].y(), pfRecHits[rhThreadIdx].z(), 1.};
369  }
370 
371  // Neighbors when threadIdx represents seed
372  int seedThreadIdx = -1;
373  Neighbours4 seedNeighbors = Neighbours4{-9, -9, -9, -9};
374  float seedEnergy = -1.;
375  Position4 seedInitClusterPos = Position4{0., 0., 0., 0.};
376  if (tid < nSeeds) {
377  if constexpr (debug)
378  printf("tid: %d\n", tid);
379  seedThreadIdx = getSeedRhIdx(seeds, tid);
380  seedNeighbors = Neighbours4{pfRecHits[seedThreadIdx].neighbours()(0),
381  pfRecHits[seedThreadIdx].neighbours()(1),
382  pfRecHits[seedThreadIdx].neighbours()(2),
383  pfRecHits[seedThreadIdx].neighbours()(3)};
384  seedEnergy = pfRecHits[seedThreadIdx].energy();
385 
386  // Compute initial cluster position shift for seed
387  updateClusterPos(pfClusParams, seedInitClusterPos, 1., seedThreadIdx, pfRecHits, rhENormInv);
388  }
389 
390  do {
391  if constexpr (debug) {
392  if (once_per_block(acc))
393  printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId);
394  }
395 
396  // Reset rhFracSum
397  rhFracSum[tid] = 0.;
398  if (once_per_block(acc))
399  diff2 = -1;
400 
401  if (tid < (nRHNotSeed - 1)) {
402  for (int s = 0; s < nSeeds; s++) {
403  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
404  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
405  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
406 
407  float d2 = dist2 / pfClusParams.showerSigma2();
408  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
409 
410  rhFracSum[tid] += fraction;
411  }
412  }
413  alpaka::syncBlockThreads(acc); // all threads call sync
414 
415  if (tid < (nRHNotSeed - 1)) {
416  for (int s = 0; s < nSeeds; s++) {
417  int i = seeds[s];
418  float dist2 = (clusterPos[s].x - rhThreadPos.x) * (clusterPos[s].x - rhThreadPos.x) +
419  (clusterPos[s].y - rhThreadPos.y) * (clusterPos[s].y - rhThreadPos.y) +
420  (clusterPos[s].z - rhThreadPos.z) * (clusterPos[s].z - rhThreadPos.z);
421 
422  float d2 = dist2 / pfClusParams.showerSigma2();
423  float fraction = clusterEnergy[s] * rhENormInv * expf(-0.5f * d2);
424 
425  if (rhFracSum[tid] > pfClusParams.minFracTot()) {
426  float fracpct = fraction / rhFracSum[tid];
427  if (fracpct > cutoffFraction || (d2 < cutoffDistance && fracpct > pfClusParams.minFracToKeep())) {
428  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = fracpct;
429  } else {
430  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
431  }
432  } else {
433  fracView[pfClusteringVars[i].seedFracOffsets() + tid + 1].frac() = -1;
434  }
435  }
436  }
437  alpaka::syncBlockThreads(acc); // all threads call sync
438 
439  if constexpr (debug) {
440  if (once_per_block(acc))
441  printf("Computing cluster position for topoId %d\n", topoId);
442  }
443 
444  // Reset cluster position and energy
445  if (tid < nSeeds) {
446  clusterPos[tid] = seedInitClusterPos;
447  clusterEnergy[tid] = seedEnergy;
448  if constexpr (debug) {
449  printf("Cluster %d (seed %d) has energy %f\tpos = (%f, %f, %f, %f)\n",
450  tid,
451  seeds[tid],
452  clusterEnergy[tid],
453  clusterPos[tid].x,
454  clusterPos[tid].y,
455  clusterPos[tid].z,
456  clusterPos[tid].w);
457  }
458  }
459  alpaka::syncBlockThreads(acc); // all threads call sync
460 
461  // Recalculate position
462  if (tid < nSeeds) {
463  for (int r = 0; r < nRHNotSeed - 1; r++) {
464  int j = rechits[r];
465  float frac = getRhFrac(pfClusteringVars, topoSeedBegin, fracView, tid, r + 1);
466 
467  if (frac > -0.5) {
468  clusterEnergy[tid] += frac * pfRecHits[j].energy();
469 
470  if (nSeeds == 1 || j == seedNeighbors.x || j == seedNeighbors.y || j == seedNeighbors.z ||
471  j == seedNeighbors.w)
472  updateClusterPos(pfClusParams, clusterPos[tid], frac, j, pfRecHits, rhENormInv);
473  }
474  }
475  }
476  alpaka::syncBlockThreads(acc); // all threads call sync
477 
478  // Position normalization
479  if (tid < nSeeds) {
480  if (clusterPos[tid].w >= pfClusParams.minAllowedNormalization()) {
481  // Divide by position norm
482  clusterPos[tid].x /= clusterPos[tid].w;
483  clusterPos[tid].y /= clusterPos[tid].w;
484  clusterPos[tid].z /= clusterPos[tid].w;
485 
486  if constexpr (debug)
487  printf("\tCluster %d (seed %d) energy = %f\tposition = (%f, %f, %f)\n",
488  tid,
489  seedThreadIdx,
490  clusterEnergy[tid],
491  clusterPos[tid].x,
492  clusterPos[tid].y,
493  clusterPos[tid].z);
494  } else {
495  if constexpr (debug)
496  printf("\tCluster %d (seed %d) position norm (%f) less than minimum (%f)\n",
497  tid,
498  seedThreadIdx,
499  clusterPos[tid].w,
500  pfClusParams.minAllowedNormalization());
501  clusterPos[tid].x = 0.0;
502  clusterPos[tid].y = 0.0;
503  clusterPos[tid].z = 0.0;
504  }
505  }
506  alpaka::syncBlockThreads(acc); // all threads call sync
507 
508  if (tid < nSeeds) {
509  float delta2 = dR2(prevClusterPos[tid], clusterPos[tid]);
510  if constexpr (debug)
511  printf("\tCluster %d (seed %d) has delta2 = %f\n", tid, seeds[tid], delta2);
512  atomicMaxF(acc, &diff2, delta2);
513  prevClusterPos[tid] = clusterPos[tid]; // Save clusterPos
514  }
515  alpaka::syncBlockThreads(acc); // all threads call sync
516 
517  if (once_per_block(acc)) {
518  float tol2 = tol * tol;
519  iter++;
520  notDone = (diff2 > tol2) && ((unsigned int)iter < pfClusParams.maxIterations());
521  if constexpr (debug) {
522  if (diff2 > tol2)
523  printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2);
524  else if constexpr (debug)
525  printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2);
526  }
527  }
528  alpaka::syncBlockThreads(acc); // all threads call sync
529  } while (notDone); // shared variable condition ensures synchronization is well defined
530  if (once_per_block(acc))
531  // Fill PFCluster-level info
532  if (tid < nSeeds) {
533  int rhIdx = pfClusteringVars[tid + pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList();
534  int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx();
535  clusterView[seedIdx].energy() = clusterEnergy[tid];
536  clusterView[seedIdx].x() = clusterPos[tid].x;
537  clusterView[seedIdx].y() = clusterPos[tid].y;
538  clusterView[seedIdx].z() = clusterPos[tid].z;
539  }
540  }
ALPAKA_FN_ACC constexpr bool once_per_block(TAcc const &acc)
static ALPAKA_FN_ACC auto getSeedRhIdx(int *seeds, int seedNum)
T w() const
float float float z
constexpr uint32_t stride
Definition: HelixFit.h:22
ALPAKA_FN_ACC static ALPAKA_FN_INLINE float dR2(Position4 pos1, Position4 pos2)
ALPAKA_FN_HOST_ACC static ALPAKA_FN_INLINE float atomicMaxF(const TAcc &acc, float *address, float val)
Definition: atomicMaxF.h:11
double f[11][100]
static ALPAKA_FN_ACC auto getRhFrac(reco::PFClusteringVarsDeviceCollection::View pfClusteringVars, int topoSeedBegin, reco::PFRecHitFractionDeviceCollection::View fracView, int seedNum, int rhNum)
#define debug
Definition: HDRShower.cc:19
static ALPAKA_FN_ACC void updateClusterPos(reco::PFClusterParamsDeviceCollection::ConstView pfClusParams, Position4 &pos4, float frac, int rhInd, reco::PFRecHitDeviceCollection::ConstView pfRecHits, float rhENormInv)
float x

◆ hcalFastCluster_singleSeed()

template<bool debug = false, typename TAcc , typename = std::enable_if_t<alpaka::isAccelerator<TAcc>>>
static ALPAKA_FN_ACC void ALPAKA_ACCELERATOR_NAMESPACE::hcalFastCluster_singleSeed ( const TAcc &  acc,
reco::PFClusterParamsDeviceCollection::ConstView  pfClusParams,
const reco::PFRecHitHCALTopologyDeviceCollection::ConstView  topology,
int  topoId,
int  nRHTopo,
reco::PFRecHitDeviceCollection::ConstView  pfRecHits,
reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars,
reco::PFClusterDeviceCollection::View  clusterView,
reco::PFRecHitFractionDeviceCollection::View  fracView 
)
static

Definition at line 91 of file PFClusterSoAProducerKernel.dev.cc.

References cms::cudacompat::atomicAdd(), ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), cutoffDistance, debug, dR2(), f, HLT_2024v14_cff::fraction, PFLayer::HCAL_BARREL1, PFLayer::HCAL_ENDCAP, mps_fire::i, dqmiolumiharvest::j, cms::alpakatools::once_per_block(), HLT_2024v14_cff::pfRecHits, HLT_2024v14_cff::topology, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::FastCluster::operator()().

100  {
101  int tid = alpaka::getIdx<alpaka::Block, alpaka::Threads>(acc)[0u]; // thread index is rechit number
102  // Declaration of shared variables
103  int& i = alpaka::declareSharedVar<int, __COUNTER__>(acc);
104  int& nRHOther = alpaka::declareSharedVar<int, __COUNTER__>(acc);
105  unsigned int& iter = alpaka::declareSharedVar<unsigned int, __COUNTER__>(acc);
106  float& tol = alpaka::declareSharedVar<float, __COUNTER__>(acc);
107  float& clusterEnergy = alpaka::declareSharedVar<float, __COUNTER__>(acc);
108  float& rhENormInv = alpaka::declareSharedVar<float, __COUNTER__>(acc);
109  float& seedEnergy = alpaka::declareSharedVar<float, __COUNTER__>(acc);
110  Position4& clusterPos = alpaka::declareSharedVar<Position4, __COUNTER__>(acc);
111  Position4& prevClusterPos = alpaka::declareSharedVar<Position4, __COUNTER__>(acc);
112  Position4& seedPos = alpaka::declareSharedVar<Position4, __COUNTER__>(acc);
113  bool& notDone = alpaka::declareSharedVar<bool, __COUNTER__>(acc);
114  if (once_per_block(acc)) {
115  i = pfClusteringVars[pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); // i is the seed rechit index
116  nRHOther = nRHTopo - 1; // number of non-seed rechits
117  seedPos = Position4{pfRecHits[i].x(), pfRecHits[i].y(), pfRecHits[i].z(), 1.};
118  clusterPos = seedPos; // Initial cluster position is just the seed
119  prevClusterPos = seedPos;
120  seedEnergy = pfRecHits[i].energy();
121  clusterEnergy = seedEnergy;
122  tol = pfClusParams.stoppingTolerance(); // stopping tolerance * tolerance scaling
123 
124  if (topology.cutsFromDB()) {
125  rhENormInv = (1.f / topology[pfRecHits[i].denseId()].noiseThreshold());
126  } else {
128  rhENormInv = pfClusParams.recHitEnergyNormInvHB_vec()[pfRecHits[i].depth() - 1];
129  else if (pfRecHits[i].layer() == PFLayer::HCAL_ENDCAP)
130  rhENormInv = pfClusParams.recHitEnergyNormInvHE_vec()[pfRecHits[i].depth() - 1];
131  else {
132  rhENormInv = 0.;
133  printf("Rechit %d has invalid layer %d!\n", i, pfRecHits[i].layer());
134  }
135  }
136 
137  iter = 0;
138  notDone = true;
139  }
140  alpaka::syncBlockThreads(acc); // all threads call sync
141 
142  int j = -1; // j is the rechit index for this thread
143  int rhFracOffset = -1;
144  Position4 rhPos;
145  float rhEnergy = -1., rhPosNorm = -1.;
146 
147  if (tid < nRHOther) {
148  rhFracOffset =
149  pfClusteringVars[i].seedFracOffsets() + tid + 1; // Offset for this rechit in pcrhfrac, pcrhfracidx arrays
150  j = fracView[rhFracOffset].pfrhIdx(); // rechit index for this thread
151  rhPos = Position4{pfRecHits[j].x(), pfRecHits[j].y(), pfRecHits[j].z(), 1.};
152  rhEnergy = pfRecHits[j].energy();
153  rhPosNorm = fmaxf(0., logf(rhEnergy * rhENormInv));
154  }
155  alpaka::syncBlockThreads(acc); // all threads call sync
156 
157  do {
158  if constexpr (debug) {
159  if (once_per_block(acc))
160  printf("\n--- Now on iter %d for topoId %d ---\n", iter, topoId);
161  }
162  float dist2 = -1., d2 = -1., fraction = -1.;
163  if (tid < nRHOther) {
164  // Rechit distance calculation
165  dist2 = (clusterPos.x - rhPos.x) * (clusterPos.x - rhPos.x) +
166  (clusterPos.y - rhPos.y) * (clusterPos.y - rhPos.y) +
167  (clusterPos.z - rhPos.z) * (clusterPos.z - rhPos.z);
168 
169  d2 = dist2 / pfClusParams.showerSigma2();
170  fraction = clusterEnergy * rhENormInv * expf(-0.5f * d2);
171 
172  // For single seed clusters, rechit fraction is either 1 (100%) or -1 (not included)
173  if (fraction > pfClusParams.minFracTot() && d2 < cutoffDistance)
174  fraction = 1.;
175  else
176  fraction = -1.;
177  fracView[rhFracOffset].frac() = fraction;
178  }
179  alpaka::syncBlockThreads(acc); // all threads call sync
180 
181  if constexpr (debug) {
182  if (once_per_block(acc))
183  printf("Computing cluster position for topoId %d\n", topoId);
184  }
185 
186  if (once_per_block(acc)) {
187  // Reset cluster position and energy
188  clusterPos = seedPos;
189  clusterEnergy = seedEnergy;
190  }
191  alpaka::syncBlockThreads(acc); // all threads call sync
192 
193  // Recalculate cluster position and energy
194  if (fraction > -0.5) {
195  alpaka::atomicAdd(acc, &clusterEnergy, rhEnergy, alpaka::hierarchy::Threads{});
196  alpaka::atomicAdd(acc, &clusterPos.x, rhPos.x * rhPosNorm, alpaka::hierarchy::Threads{});
197  alpaka::atomicAdd(acc, &clusterPos.y, rhPos.y * rhPosNorm, alpaka::hierarchy::Threads{});
198  alpaka::atomicAdd(acc, &clusterPos.z, rhPos.z * rhPosNorm, alpaka::hierarchy::Threads{});
199  alpaka::atomicAdd(acc, &clusterPos.w, rhPosNorm, alpaka::hierarchy::Threads{}); // position_norm
200  }
201  alpaka::syncBlockThreads(acc); // all threads call sync
202 
203  if (once_per_block(acc)) {
204  // Normalize the seed postiion
205  if (clusterPos.w >= pfClusParams.minAllowedNormalization()) {
206  // Divide by position norm
207  clusterPos.x /= clusterPos.w;
208  clusterPos.y /= clusterPos.w;
209  clusterPos.z /= clusterPos.w;
210 
211  if constexpr (debug)
212  printf("\tPF cluster (seed %d) energy = %f\tposition = (%f, %f, %f)\n",
213  i,
214  clusterEnergy,
215  clusterPos.x,
216  clusterPos.y,
217  clusterPos.z);
218  } else {
219  if constexpr (debug)
220  printf("\tPF cluster (seed %d) position norm (%f) less than minimum (%f)\n",
221  i,
222  clusterPos.w,
223  pfClusParams.minAllowedNormalization());
224  clusterPos.x = 0.;
225  clusterPos.y = 0.;
226  clusterPos.z = 0.;
227  }
228  float diff2 = dR2(prevClusterPos, clusterPos);
229  if constexpr (debug)
230  printf("\tPF cluster (seed %d) has diff2 = %f\n", i, diff2);
231  prevClusterPos = clusterPos; // Save clusterPos
232 
233  float tol2 = tol * tol;
234  iter++;
235  notDone = (diff2 > tol2) && (iter < pfClusParams.maxIterations());
236  if constexpr (debug) {
237  if (diff2 > tol2)
238  printf("\tTopoId %d has diff2 = %f greater than squared tolerance %f (continuing)\n", topoId, diff2, tol2);
239  else if constexpr (debug)
240  printf("\tTopoId %d has diff2 = %f LESS than squared tolerance %f (terminating!)\n", topoId, diff2, tol2);
241  }
242  }
243  alpaka::syncBlockThreads(acc); // all threads call sync
244  } while (notDone); // shared variable condition ensures synchronization is well defined
245  if (once_per_block(acc)) { // Cluster is finalized, assign cluster information to te SoA
246  int rhIdx =
247  pfClusteringVars[pfClusteringVars[topoId].topoSeedOffsets()].topoSeedList(); // i is the seed rechit index
248  int seedIdx = pfClusteringVars[rhIdx].rhIdxToSeedIdx();
249  clusterView[seedIdx].energy() = clusterEnergy;
250  clusterView[seedIdx].x() = clusterPos.x;
251  clusterView[seedIdx].y() = clusterPos.y;
252  clusterView[seedIdx].z() = clusterPos.z;
253  }
254  }
ALPAKA_FN_ACC constexpr bool once_per_block(TAcc const &acc)
ALPAKA_FN_ACC static ALPAKA_FN_INLINE float dR2(Position4 pos1, Position4 pos2)
double f[11][100]
#define debug
Definition: HDRShower.cc:19
T1 atomicAdd(T1 *a, T2 b)
Definition: cudaCompat.h:61

◆ representative()

ALPAKA_FN_ACC int ALPAKA_ACCELERATOR_NAMESPACE::representative ( const int  idx,
reco::PFClusteringVarsDeviceCollection::View  pfClusteringVars 
)
inline

Definition at line 65 of file PFClusterECLCC.h.

References heavyIonCSV_trainingSettings::idx, and GetRecoTauVFromDQM_MC_cff::next.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::ECLCCCompute1::operator()().

66  {
67  int curr = pfClusteringVars[idx].pfrh_topoId();
68  if (curr != idx) {
69  int next, prev = idx;
70  while (curr > (next = pfClusteringVars[curr].pfrh_topoId())) {
71  pfClusteringVars[prev].pfrh_topoId() = next;
72  prev = curr;
73  curr = next;
74  }
75  }
76  return curr;
77  }

◆ testESAlgoAsync()

AlpakaESTestDataDDevice ALPAKA_ACCELERATOR_NAMESPACE::testESAlgoAsync ( Queue &  queue,
AlpakaESTestDataADevice const &  dataA,
cms::alpakatest::AlpakaESTestDataB< Device > const &  dataB 
)

Definition at line 4 of file testESAlgoAsync.dev.cc.

References a, b, DummyCfis::c, cms::alpakatest::AlpakaESTestDataB< TDev >::data(), mps_fire::i, SiStripPI::min, createBeamHaloJobs::queue, runTheMatrix::ret, cms::alpakatest::AlpakaESTestDataB< TDev >::size(), and riemannFit::stride.

Referenced by ALPAKA_ACCELERATOR_NAMESPACE::TestAlpakaESProducerD::produce().

6  {
7  auto const size = std::min(dataA->metadata().size(), static_cast<int>(dataB.size()));
9 
10  auto const& deviceProperties = alpaka::getAccDevProps<Acc1D>(alpaka::getDev(queue));
11  uint32_t maxThreadsPerBlock = deviceProperties.m_blockThreadExtentMax[0];
12 
13  uint32_t threadsPerBlock = maxThreadsPerBlock;
14  uint32_t blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
15  uint32_t elementsPerThread = 1;
16  auto workDiv = WorkDiv1D{blocksPerGrid, threadsPerBlock, elementsPerThread};
17 
18  alpaka::exec<Acc1D>(
19  queue,
20  workDiv,
21  [] ALPAKA_FN_ACC(Acc1D const& acc,
22  AlpakaESTestDataADevice::ConstView a,
23  int const* b,
25  int size) {
26  const int32_t thread = alpaka::getIdx<alpaka::Grid, alpaka::Threads>(acc)[0u];
27  const int32_t stride = alpaka::getWorkDiv<alpaka::Grid, alpaka::Threads>(acc)[0u];
28  for (auto i = thread; i < size; i += stride) {
29  c[i] = a.z()[i] + b[i];
30  }
31  },
32  dataA.view(),
33  dataB.data(),
34  ret.view(),
35  size);
36 
37  return ret;
38  }
size
Write out results.
ret
prodAgent to be discontinued
constexpr uint32_t stride
Definition: HelixFit.h:22
WorkDiv< Dim1D > WorkDiv1D
Definition: config.h:32
PortableCollection< cms::alpakatest::AlpakaESTestSoAD > AlpakaESTestDataDDevice
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121

◆ updateClusterPos()

template<bool debug = false>
static ALPAKA_FN_ACC void ALPAKA_ACCELERATOR_NAMESPACE::updateClusterPos ( reco::PFClusterParamsDeviceCollection::ConstView  pfClusParams,
Position4 pos4,
float  frac,
int  rhInd,
reco::PFRecHitDeviceCollection::ConstView  pfRecHits,
float  rhENormInv 
)
static

Definition at line 63 of file PFClusterSoAProducerKernel.dev.cc.

References ALPAKA_ACCELERATOR_NAMESPACE::brokenline::constexpr(), debug, f, DivergingColor::frac, SiStripPI::max, HLT_2024v14_cff::pfRecHits, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::w, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::x, ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::y, and ALPAKA_ACCELERATOR_NAMESPACE::reco::pfClustering::Position4::z.

Referenced by hcalFastCluster_exotic(), hcalFastCluster_multiSeedIterative(), and hcalFastCluster_multiSeedParallel().

68  {
69  Position4 rechitPos = Position4{pfRecHits[rhInd].x(), pfRecHits[rhInd].y(), pfRecHits[rhInd].z(), 1.0};
70  const auto rh_energy = pfRecHits[rhInd].energy() * frac;
71  const auto norm = (frac < pfClusParams.minFracInCalc() ? 0.0f : std::max(0.0f, logf(rh_energy * rhENormInv)));
72  if constexpr (debug)
73  printf("\t\t\trechit %d: norm = %f\tfrac = %f\trh_energy = %f\tpos = (%f, %f, %f)\n",
74  rhInd,
75  norm,
76  frac,
77  rh_energy,
78  rechitPos.x,
79  rechitPos.y,
80  rechitPos.z);
81 
82  pos4.x += rechitPos.x * norm;
83  pos4.y += rechitPos.y * norm;
84  pos4.z += rechitPos.z * norm;
85  pos4.w += norm; // position_norm
86  }
double f[11][100]
#define debug
Definition: HDRShower.cc:19

Variable Documentation

◆ blocksForExoticClusters

constexpr uint32_t ALPAKA_ACCELERATOR_NAMESPACE::blocksForExoticClusters = 4
static

◆ cutoffDistance

constexpr float ALPAKA_ACCELERATOR_NAMESPACE::cutoffDistance = 100.
static

Definition at line 24 of file PFClusterSoAProducerKernel.dev.cc.

Referenced by hcalFastCluster_singleSeed().

◆ cutoffFraction

constexpr float ALPAKA_ACCELERATOR_NAMESPACE::cutoffFraction = 0.9999
static

◆ kHBHalf

constexpr uint32_t ALPAKA_ACCELERATOR_NAMESPACE::kHBHalf = 1296
static

Definition at line 27 of file PFClusterSoAProducerKernel.dev.cc.

◆ maxTopoInput

constexpr uint32_t ALPAKA_ACCELERATOR_NAMESPACE::maxTopoInput = 2 * kHBHalf
static

Definition at line 28 of file PFClusterSoAProducerKernel.dev.cc.

Referenced by hcalFastCluster_exotic().

◆ threadsPerBlockForClustering

constexpr int ALPAKA_ACCELERATOR_NAMESPACE::threadsPerBlockForClustering = 512
static