CMS 3D CMS Logo

SiPixelDigiErrorsSoAFromCUDA.cc
Go to the documentation of this file.
13 
14 class SiPixelDigiErrorsSoAFromCUDA : public edm::stream::EDProducer<edm::ExternalWork> {
15 public:
16  explicit SiPixelDigiErrorsSoAFromCUDA(const edm::ParameterSet& iConfig);
17  ~SiPixelDigiErrorsSoAFromCUDA() override = default;
18 
19  static void fillDescriptions(edm::ConfigurationDescriptions& descriptions);
20 
21 private:
22  void acquire(const edm::Event& iEvent,
23  const edm::EventSetup& iSetup,
24  edm::WaitingTaskWithArenaHolder waitingTaskHolder) override;
25  void produce(edm::Event& iEvent, const edm::EventSetup& iSetup) override;
26 
29 
31  cms::cuda::SimpleVector<SiPixelErrorCompact> error_ = cms::cuda::make_SimpleVector<SiPixelErrorCompact>(0, nullptr);
33 };
34 
36  : digiErrorGetToken_(
37  consumes<cms::cuda::Product<SiPixelDigiErrorsCUDA>>(iConfig.getParameter<edm::InputTag>("src"))),
38  digiErrorPutToken_(produces<SiPixelErrorsSoA>()) {}
39 
42  desc.add<edm::InputTag>("src", edm::InputTag("siPixelClustersCUDA"));
43  descriptions.addWithDefaultLabel(desc);
44 }
45 
47  const edm::EventSetup& iSetup,
48  edm::WaitingTaskWithArenaHolder waitingTaskHolder) {
49  // Do the transfer in a CUDA stream parallel to the computation CUDA stream
50  cms::cuda::ScopedContextAcquire ctx{iEvent.streamID(), std::move(waitingTaskHolder)};
51  const auto& gpuDigiErrors = ctx.get(iEvent, digiErrorGetToken_);
52  formatterErrors_ = &(gpuDigiErrors.formatterErrors());
53 
54  if (gpuDigiErrors.nErrorWords() == 0)
55  return;
56 
57  auto tmp = gpuDigiErrors.dataErrorToHostAsync(ctx.stream());
58  error_ = tmp.first;
59  data_ = std::move(tmp.second);
60 }
61 
63  // The following line copies the data from the pinned host memory to
64  // regular host memory. In principle that feels unnecessary (why not
65  // just use the pinned host memory?). There are a few arguments for
66  // doing it though
67  // - Now can release the pinned host memory back to the (caching) allocator
68  // * if we'd like to keep the pinned memory, we'd need to also
69  // keep the CUDA stream around as long as that, or allow pinned
70  // host memory to be allocated without a CUDA stream
71  // - What if a CPU algorithm would produce the same SoA? We can't
72  // use cudaMallocHost without a GPU...
74  error_ = cms::cuda::make_SimpleVector<SiPixelErrorCompact>(0, nullptr);
75  data_.reset();
76  formatterErrors_ = nullptr;
77 }
78 
79 // define as framework plugin
edm::EDGetTokenT< cms::cuda::Product< SiPixelDigiErrorsCUDA > > digiErrorGetToken_
void addWithDefaultLabel(ParameterSetDescription const &psetDescription)
edm::EDPutTokenT< SiPixelErrorsSoA > digiErrorPutToken_
static void fillDescriptions(edm::ConfigurationDescriptions &descriptions)
constexpr T const * data() const
Definition: SimpleVector.h:111
void produce(edm::Event &iEvent, const edm::EventSetup &iSetup) override
SiPixelDigiErrorsSoAFromCUDA(const edm::ParameterSet &iConfig)
int iEvent
Definition: GenABIO.cc:224
void acquire(const edm::Event &iEvent, const edm::EventSetup &iSetup, edm::WaitingTaskWithArenaHolder waitingTaskHolder) override
constexpr int size() const
Definition: SimpleVector.h:109
cms::cuda::SimpleVector< SiPixelErrorCompact > error_
#define DEFINE_FWK_MODULE(type)
Definition: MakerMacros.h:16
Namespace of DDCMS conversion namespace.
HLT enums.
std::unique_ptr< T, impl::HostDeleter > unique_ptr
const SiPixelFormatterErrors * formatterErrors_
tmp
align.sh
Definition: createJobs.py:716
std::map< cms_uint32_t, std::vector< SiPixelRawDataError > > SiPixelFormatterErrors
def move(src, dest)
Definition: eostools.py:511
cms::cuda::host::unique_ptr< SiPixelErrorCompact[]> data_
~SiPixelDigiErrorsSoAFromCUDA() override=default