CMS 3D CMS Logo

EventProcessor.cc
Go to the documentation of this file.
8 
45 
47 
55 
60 
65 
78 
79 #include "MessageForSource.h"
80 #include "MessageForParent.h"
82 #include "RunProcessingStatus.h"
83 
84 #include "boost/range/adaptor/reversed.hpp"
85 
86 #include <cassert>
87 #include <exception>
88 #include <iomanip>
89 #include <iostream>
90 #include <utility>
91 #include <sstream>
92 
93 #include <sys/ipc.h>
94 #include <sys/msg.h>
95 
96 #include "oneapi/tbb/task.h"
97 
98 //Used for CPU affinity
99 #ifndef __APPLE__
100 #include <sched.h>
101 #endif
102 
103 namespace {
104  class PauseQueueSentry {
105  public:
106  PauseQueueSentry(edm::SerialTaskQueue& queue) : queue_(queue) { queue_.pause(); }
107  ~PauseQueueSentry() { queue_.resume(); }
108 
109  private:
110  edm::SerialTaskQueue& queue_;
111  };
112 } // namespace
113 
114 namespace edm {
115 
116  namespace chain = waiting_task::chain;
117 
118  // ---------------------------------------------------------------
119  std::unique_ptr<InputSource> makeInput(unsigned int moduleIndex,
121  CommonParams const& common,
122  std::shared_ptr<ProductRegistry> preg,
123  std::shared_ptr<BranchIDListHelper> branchIDListHelper,
124  std::shared_ptr<ProcessBlockHelper> const& processBlockHelper,
125  std::shared_ptr<ThinnedAssociationsHelper> thinnedAssociationsHelper,
126  std::shared_ptr<ActivityRegistry> areg,
127  std::shared_ptr<ProcessConfiguration const> processConfiguration,
128  PreallocationConfiguration const& allocations) {
129  ParameterSet* main_input = params.getPSetForUpdate("@main_input");
130  if (main_input == nullptr) {
132  << "There must be exactly one source in the configuration.\n"
133  << "It is missing (or there are sufficient syntax errors such that it is not recognized as the source)\n";
134  }
135 
136  std::string modtype(main_input->getParameter<std::string>("@module_type"));
137 
138  std::unique_ptr<ParameterSetDescriptionFillerBase> filler(
140  ConfigurationDescriptions descriptions(filler->baseType(), modtype);
141  filler->fill(descriptions);
142 
143  try {
144  convertException::wrap([&]() { descriptions.validate(*main_input, std::string("source")); });
145  } catch (cms::Exception& iException) {
146  std::ostringstream ost;
147  ost << "Validating configuration of input source of type " << modtype;
148  iException.addContext(ost.str());
149  throw;
150  }
151 
152  main_input->registerIt();
153 
154  // Fill in "ModuleDescription", in case the input source produces
155  // any EDProducts, which would be registered in the ProductRegistry.
156  // Also fill in the process history item for this process.
157  // There is no module label for the unnamed input source, so
158  // just use "source".
159  // Only the tracked parameters belong in the process configuration.
160  ModuleDescription md(main_input->id(),
161  main_input->getParameter<std::string>("@module_type"),
162  "source",
163  processConfiguration.get(),
164  moduleIndex);
165 
166  InputSourceDescription isdesc(md,
167  preg,
168  branchIDListHelper,
169  processBlockHelper,
170  thinnedAssociationsHelper,
171  areg,
172  common.maxEventsInput_,
173  common.maxLumisInput_,
174  common.maxSecondsUntilRampdown_,
175  allocations);
176 
177  areg->preSourceConstructionSignal_(md);
178  std::unique_ptr<InputSource> input;
179  try {
180  //even if we have an exception, send the signal
181  std::shared_ptr<int> sentry(nullptr, [areg, &md](void*) { areg->postSourceConstructionSignal_(md); });
182  convertException::wrap([&]() {
183  input = std::unique_ptr<InputSource>(InputSourceFactory::get()->makeInputSource(*main_input, isdesc).release());
184  input->preEventReadFromSourceSignal_.connect(std::cref(areg->preEventReadFromSourceSignal_));
185  input->postEventReadFromSourceSignal_.connect(std::cref(areg->postEventReadFromSourceSignal_));
186  });
187  } catch (cms::Exception& iException) {
188  std::ostringstream ost;
189  ost << "Constructing input source of type " << modtype;
190  iException.addContext(ost.str());
191  throw;
192  }
193  return input;
194  }
195 
196  // ---------------------------------------------------------------
197  std::shared_ptr<EDLooperBase> fillLooper(eventsetup::EventSetupsController& esController,
200  std::vector<std::string> const& loopers) {
201  std::shared_ptr<EDLooperBase> vLooper;
202 
203  assert(1 == loopers.size());
204 
205  for (auto const& looperName : loopers) {
206  ParameterSet* providerPSet = params.getPSetForUpdate(looperName);
207  // Unlikely we would ever need the ModuleTypeResolver in Looper
208  vLooper = eventsetup::LooperFactory::get()->addTo(esController, cp, *providerPSet, nullptr);
209  }
210  return vLooper;
211  }
212 
213  // ---------------------------------------------------------------
214  EventProcessor::EventProcessor(std::unique_ptr<ParameterSet> parameterSet, //std::string const& config,
215  ServiceToken const& iToken,
217  std::vector<std::string> const& defaultServices,
218  std::vector<std::string> const& forcedServices)
219  : actReg_(),
220  preg_(),
221  branchIDListHelper_(),
222  serviceToken_(),
223  input_(),
224  moduleTypeResolverMaker_(makeModuleTypeResolverMaker(*parameterSet)),
225  espController_(std::make_unique<eventsetup::EventSetupsController>(moduleTypeResolverMaker_.get())),
226  esp_(),
227  act_table_(),
228  processConfiguration_(),
229  schedule_(),
230  subProcesses_(),
231  historyAppender_(new HistoryAppender),
232  fb_(),
233  looper_(),
234  deferredExceptionPtrIsSet_(false),
235  sourceResourcesAcquirer_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().first),
236  sourceMutex_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().second),
237  principalCache_(),
238  beginJobCalled_(false),
239  shouldWeStop_(false),
240  fileModeNoMerge_(false),
241  exceptionMessageFiles_(),
242  exceptionMessageRuns_(false),
243  exceptionMessageLumis_(false),
244  forceLooperToEnd_(false),
245  looperBeginJobRun_(false),
246  forceESCacheClearOnNewRun_(false),
247  eventSetupDataToExcludeFromPrefetching_() {
248  auto processDesc = std::make_shared<ProcessDesc>(std::move(parameterSet));
249  processDesc->addServices(defaultServices, forcedServices);
250  init(processDesc, iToken, iLegacy);
251  }
252 
253  EventProcessor::EventProcessor(std::unique_ptr<ParameterSet> parameterSet, //std::string const& config,
254  std::vector<std::string> const& defaultServices,
255  std::vector<std::string> const& forcedServices)
256  : actReg_(),
257  preg_(),
258  branchIDListHelper_(),
259  serviceToken_(),
260  input_(),
261  moduleTypeResolverMaker_(makeModuleTypeResolverMaker(*parameterSet)),
262  espController_(std::make_unique<eventsetup::EventSetupsController>(moduleTypeResolverMaker_.get())),
263  esp_(),
264  act_table_(),
265  processConfiguration_(),
266  schedule_(),
267  subProcesses_(),
268  historyAppender_(new HistoryAppender),
269  fb_(),
270  looper_(),
271  deferredExceptionPtrIsSet_(false),
272  sourceResourcesAcquirer_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().first),
273  sourceMutex_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().second),
274  principalCache_(),
275  beginJobCalled_(false),
276  shouldWeStop_(false),
277  fileModeNoMerge_(false),
278  exceptionMessageFiles_(),
279  exceptionMessageRuns_(false),
280  exceptionMessageLumis_(false),
281  forceLooperToEnd_(false),
282  looperBeginJobRun_(false),
283  forceESCacheClearOnNewRun_(false),
284  eventSetupDataToExcludeFromPrefetching_() {
285  auto processDesc = std::make_shared<ProcessDesc>(std::move(parameterSet));
286  processDesc->addServices(defaultServices, forcedServices);
288  }
289 
290  EventProcessor::EventProcessor(std::shared_ptr<ProcessDesc> processDesc,
291  ServiceToken const& token,
293  : actReg_(),
294  preg_(),
295  branchIDListHelper_(),
296  serviceToken_(),
297  input_(),
298  moduleTypeResolverMaker_(makeModuleTypeResolverMaker(*processDesc->getProcessPSet())),
299  espController_(std::make_unique<eventsetup::EventSetupsController>(moduleTypeResolverMaker_.get())),
300  esp_(),
301  act_table_(),
302  processConfiguration_(),
303  schedule_(),
304  subProcesses_(),
305  historyAppender_(new HistoryAppender),
306  fb_(),
307  looper_(),
308  deferredExceptionPtrIsSet_(false),
309  sourceResourcesAcquirer_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().first),
310  sourceMutex_(SharedResourcesRegistry::instance()->createAcquirerForSourceDelayedReader().second),
311  principalCache_(),
312  beginJobCalled_(false),
313  shouldWeStop_(false),
314  fileModeNoMerge_(false),
315  exceptionMessageFiles_(),
316  exceptionMessageRuns_(false),
317  exceptionMessageLumis_(false),
318  forceLooperToEnd_(false),
319  looperBeginJobRun_(false),
320  forceESCacheClearOnNewRun_(false),
321  eventSetupDataToExcludeFromPrefetching_() {
322  init(processDesc, token, legacy);
323  }
324 
325  void EventProcessor::init(std::shared_ptr<ProcessDesc>& processDesc,
326  ServiceToken const& iToken,
328  //std::cerr << processDesc->dump() << std::endl;
329 
330  // register the empty parentage vector , once and for all
332 
333  // register the empty parameter set, once and for all.
335 
336  std::shared_ptr<ParameterSet> parameterSet = processDesc->getProcessPSet();
337 
338  // If there are subprocesses, pop the subprocess parameter sets out of the process parameter set
339  auto subProcessVParameterSet = popSubProcessVParameterSet(*parameterSet);
340  bool const hasSubProcesses = !subProcessVParameterSet.empty();
341 
342  // Validates the parameters in the 'options', 'maxEvents', 'maxLuminosityBlocks',
343  // and 'maxSecondsUntilRampdown' top level parameter sets. Default values are also
344  // set in here if the parameters were not explicitly set.
346 
347  // Now set some parameters specific to the main process.
348  ParameterSet const& optionsPset(parameterSet->getUntrackedParameterSet("options"));
349  auto const& fileMode = optionsPset.getUntrackedParameter<std::string>("fileMode");
350  if (fileMode != "NOMERGE" and fileMode != "FULLMERGE") {
351  throw Exception(errors::Configuration, "Illegal fileMode parameter value: ")
352  << fileMode << ".\n"
353  << "Legal values are 'NOMERGE' and 'FULLMERGE'.\n";
354  } else {
355  fileModeNoMerge_ = (fileMode == "NOMERGE");
356  }
357  forceESCacheClearOnNewRun_ = optionsPset.getUntrackedParameter<bool>("forceEventSetupCacheClearOnNewRun");
359 
360  //threading
361  unsigned int nThreads = optionsPset.getUntrackedParameter<unsigned int>("numberOfThreads");
362 
363  // Even if numberOfThreads was set to zero in the Python configuration, the code
364  // in cmsRun.cpp should have reset it to something else.
365  assert(nThreads != 0);
366 
367  unsigned int nStreams = optionsPset.getUntrackedParameter<unsigned int>("numberOfStreams");
368  if (nStreams == 0) {
369  nStreams = nThreads;
370  }
371  unsigned int nConcurrentLumis =
372  optionsPset.getUntrackedParameter<unsigned int>("numberOfConcurrentLuminosityBlocks");
373  if (nConcurrentLumis == 0) {
374  nConcurrentLumis = 2;
375  }
376  if (nConcurrentLumis > nStreams) {
377  nConcurrentLumis = nStreams;
378  }
379  unsigned int nConcurrentRuns = optionsPset.getUntrackedParameter<unsigned int>("numberOfConcurrentRuns");
380  if (nConcurrentRuns == 0 || nConcurrentRuns > nConcurrentLumis) {
381  nConcurrentRuns = nConcurrentLumis;
382  }
383  std::vector<std::string> loopers = parameterSet->getParameter<std::vector<std::string>>("@all_loopers");
384  if (!loopers.empty()) {
385  //For now loopers make us run only 1 transition at a time
386  if (nStreams != 1 || nConcurrentLumis != 1 || nConcurrentRuns != 1) {
387  edm::LogWarning("ThreadStreamSetup") << "There is a looper, so the number of streams, the number "
388  "of concurrent runs, and the number of concurrent lumis "
389  "are all being reset to 1. Loopers cannot currently support "
390  "values greater than 1.";
391  nStreams = 1;
392  nConcurrentLumis = 1;
393  nConcurrentRuns = 1;
394  }
395  }
396  bool dumpOptions = optionsPset.getUntrackedParameter<bool>("dumpOptions");
397  if (dumpOptions) {
398  dumpOptionsToLogFile(nThreads, nStreams, nConcurrentLumis, nConcurrentRuns);
399  } else {
400  if (nThreads > 1 or nStreams > 1) {
401  edm::LogInfo("ThreadStreamSetup") << "setting # threads " << nThreads << "\nsetting # streams " << nStreams;
402  }
403  }
404 
405  // The number of concurrent IOVs is configured individually for each record in
406  // the class NumberOfConcurrentIOVs to values less than or equal to this.
407  // This maximum simplifies to being equal nConcurrentLumis if nConcurrentRuns is 1.
408  // Considering endRun, beginRun, and beginLumi we might need 3 concurrent IOVs per
409  // concurrent run past the first in use cases where IOVs change within a run.
410  unsigned int maxConcurrentIOVs =
411  3 * nConcurrentRuns - 2 + ((nConcurrentLumis > nConcurrentRuns) ? (nConcurrentLumis - nConcurrentRuns) : 0);
412 
413  IllegalParameters::setThrowAnException(optionsPset.getUntrackedParameter<bool>("throwIfIllegalParameter"));
414 
415  printDependencies_ = optionsPset.getUntrackedParameter<bool>("printDependencies");
417  optionsPset.getUntrackedParameter<bool>("deleteNonConsumedUnscheduledModules");
418  //for now, if have a subProcess, don't allow early delete
419  // In the future we should use the SubProcess's 'keep list' to decide what can be kept
420  if (not hasSubProcesses) {
421  branchesToDeleteEarly_ = optionsPset.getUntrackedParameter<std::vector<std::string>>("canDeleteEarly");
422  }
423  if (not branchesToDeleteEarly_.empty()) {
424  auto referencePSets =
425  optionsPset.getUntrackedParameter<std::vector<edm::ParameterSet>>("holdsReferencesToDeleteEarly");
426  for (auto const& pset : referencePSets) {
427  auto product = pset.getParameter<std::string>("product");
428  auto references = pset.getParameter<std::vector<std::string>>("references");
429  for (auto const& ref : references) {
430  referencesToBranches_.emplace(product, ref);
431  }
432  }
434  optionsPset.getUntrackedParameter<std::vector<std::string>>("modulesToIgnoreForDeleteEarly");
435  }
436 
437  // Now do general initialization
439 
440  //initialize the services
441  auto& serviceSets = processDesc->getServicesPSets();
442  ServiceToken token = items.initServices(serviceSets, *parameterSet, iToken, iLegacy, true);
443  serviceToken_ = items.addCPRandTNS(*parameterSet, token);
444 
445  //make the services available
447 
448  CMS_SA_ALLOW try {
449  if (nThreads > 1) {
451  handler->willBeUsingThreads();
452  }
453 
454  // intialize miscellaneous items
455  std::shared_ptr<CommonParams> common(items.initMisc(*parameterSet));
456 
457  // intialize the event setup provider
458  ParameterSet const& eventSetupPset(optionsPset.getUntrackedParameterSet("eventSetup"));
459  esp_ = espController_->makeProvider(
460  *parameterSet, items.actReg_.get(), &eventSetupPset, maxConcurrentIOVs, dumpOptions);
461 
462  // initialize the looper, if any
463  if (!loopers.empty()) {
465  looper_->setActionTable(items.act_table_.get());
466  looper_->attachTo(*items.actReg_);
467 
468  // in presence of looper do not delete modules
470  }
471 
472  preallocations_ = PreallocationConfiguration{nThreads, nStreams, nConcurrentLumis, nConcurrentRuns};
473 
474  runQueue_ = std::make_unique<LimitedTaskQueue>(nConcurrentRuns);
475  lumiQueue_ = std::make_unique<LimitedTaskQueue>(nConcurrentLumis);
476  streamQueues_.resize(nStreams);
477  streamRunStatus_.resize(nStreams);
478  streamLumiStatus_.resize(nStreams);
479 
480  processBlockHelper_ = std::make_shared<ProcessBlockHelper>();
481 
482  {
483  std::optional<ScheduleItems::MadeModules> madeModules;
484 
485  //setup input and modules concurrently
486  tbb::task_group group;
487 
488  // initialize the input source
489  auto tempReg = std::make_shared<ProductRegistry>();
490  auto sourceID = ModuleDescription::getUniqueID();
491 
492  group.run([&, this]() {
493  // initialize the Schedule
496  madeModules =
498  });
499 
500  group.run([&, this, tempReg]() {
502  input_ = makeInput(sourceID,
503  *parameterSet,
504  *common,
505  /*items.preg(),*/ tempReg,
506  items.branchIDListHelper(),
508  items.thinnedAssociationsHelper(),
509  items.actReg_,
510  items.processConfiguration(),
512  });
513 
514  group.wait();
515  items.preg()->addFromInput(*tempReg);
516  input_->switchTo(items.preg());
517 
518  {
520  schedule_ = items.finishSchedule(std::move(*madeModules),
521  *parameterSet,
522  tns,
523  hasSubProcesses,
527  }
528  }
529 
530  // set the data members
531  act_table_ = std::move(items.act_table_);
532  actReg_ = items.actReg_;
533  preg_ = items.preg();
535  branchIDListHelper_ = items.branchIDListHelper();
536  thinnedAssociationsHelper_ = items.thinnedAssociationsHelper();
537  processConfiguration_ = items.processConfiguration();
539 
540  FDEBUG(2) << parameterSet << std::endl;
541 
543  for (unsigned int index = 0; index < preallocations_.numberOfStreams(); ++index) {
544  // Reusable event principal
545  auto ep = std::make_shared<EventPrincipal>(preg(),
549  historyAppender_.get(),
550  index,
551  true /*primary process*/,
554  }
555 
556  for (unsigned int index = 0; index < preallocations_.numberOfRuns(); ++index) {
557  auto rp = std::make_unique<RunPrincipal>(
560  }
561 
562  for (unsigned int index = 0; index < preallocations_.numberOfLuminosityBlocks(); ++index) {
563  auto lp =
564  std::make_unique<LuminosityBlockPrincipal>(preg(), *processConfiguration_, historyAppender_.get(), index);
566  }
567 
568  {
569  auto pb = std::make_unique<ProcessBlockPrincipal>(preg(), *processConfiguration_);
571 
572  auto pbForInput = std::make_unique<ProcessBlockPrincipal>(preg(), *processConfiguration_);
574  }
575 
576  // fill the subprocesses, if there are any
577  subProcesses_.reserve(subProcessVParameterSet.size());
578  for (auto& subProcessPSet : subProcessVParameterSet) {
579  subProcesses_.emplace_back(subProcessPSet,
580  *parameterSet,
581  preg(),
587  *actReg_,
588  token,
593  }
594  } catch (...) {
595  //in case of an exception, make sure Services are available
596  // during the following destructors
597  espController_ = nullptr;
598  esp_ = nullptr;
599  schedule_ = nullptr;
600  input_ = nullptr;
601  looper_ = nullptr;
602  actReg_ = nullptr;
603  throw;
604  }
605  }
606 
608  // Make the services available while everything is being deleted.
611 
612  // manually destroy all these thing that may need the services around
613  // propagate_const<T> has no reset() function
614  espController_ = nullptr;
615  esp_ = nullptr;
616  schedule_ = nullptr;
617  input_ = nullptr;
618  looper_ = nullptr;
619  actReg_ = nullptr;
620 
623  }
624 
628  task.waitNoThrow();
629  assert(task.done());
630  }
631 
633  if (beginJobCalled_)
634  return;
635  beginJobCalled_ = true;
636  bk::beginJob();
637 
638  // StateSentry toerror(this); // should we add this ?
639  //make the services available
641 
646  actReg_->preallocateSignal_(bounds);
647  schedule_->convertCurrentProcessAlias(processConfiguration_->processName());
649 
650  std::vector<ModuleProcessName> consumedBySubProcesses;
652  [&consumedBySubProcesses, deleteModules = deleteNonConsumedUnscheduledModules_](auto& subProcess) {
653  auto c = subProcess.keepOnlyConsumedUnscheduledModules(deleteModules);
654  if (consumedBySubProcesses.empty()) {
655  consumedBySubProcesses = std::move(c);
656  } else if (not c.empty()) {
657  std::vector<ModuleProcessName> tmp;
658  tmp.reserve(consumedBySubProcesses.size() + c.size());
659  std::merge(consumedBySubProcesses.begin(),
660  consumedBySubProcesses.end(),
661  c.begin(),
662  c.end(),
663  std::back_inserter(tmp));
664  std::swap(consumedBySubProcesses, tmp);
665  }
666  });
667 
668  // Note: all these may throw
671  if (auto const unusedModules = nonConsumedUnscheduledModules(pathsAndConsumesOfModules_, consumedBySubProcesses);
672  not unusedModules.empty()) {
674 
675  edm::LogInfo("DeleteModules").log([&unusedModules](auto& l) {
676  l << "Following modules are not in any Path or EndPath, nor is their output consumed by any other module, "
677  "and "
678  "therefore they are deleted before beginJob transition.";
679  for (auto const& description : unusedModules) {
680  l << "\n " << description->moduleLabel();
681  }
682  });
683  for (auto const& description : unusedModules) {
684  schedule_->deleteModule(description->moduleLabel(), actReg_.get());
685  }
686  }
687  }
688  // Initialize after the deletion of non-consumed unscheduled
689  // modules to avoid non-consumed non-run modules to keep the
690  // products unnecessarily alive
691  if (not branchesToDeleteEarly_.empty()) {
692  auto modulesToSkip = std::move(modulesToIgnoreForDeleteEarly_);
693  auto branchesToDeleteEarly = std::move(branchesToDeleteEarly_);
694  auto referencesToBranches = std::move(referencesToBranches_);
695  schedule_->initializeEarlyDelete(branchesToDeleteEarly, referencesToBranches, modulesToSkip, *preg_);
696  }
697 
700  }
701  if (preallocations_.numberOfRuns() > 1) {
703  }
705 
706  //NOTE: This implementation assumes 'Job' means one call
707  // the EventProcessor::run
708  // If it really means once per 'application' then this code will
709  // have to be changed.
710  // Also have to deal with case where have 'run' then new Module
711  // added and do 'run'
712  // again. In that case the newly added Module needs its 'beginJob'
713  // to be called.
714 
715  //NOTE: in future we should have a beginOfJob for looper that takes no arguments
716  // For now we delay calling beginOfJob until first beginOfRun
717  //if(looper_) {
718  // looper_->beginOfJob(es);
719  //}
720  espController_->finishConfiguration();
721  actReg_->eventSetupConfigurationSignal_(esp_->recordsToResolverIndices(), processContext_);
722  actReg_->preBeginJobSignal_(pathsAndConsumesOfModules_, processContext_);
723  try {
724  convertException::wrap([&]() { input_->doBeginJob(); });
725  } catch (cms::Exception& ex) {
726  ex.addContext("Calling beginJob for the source");
727  throw;
728  }
729 
730  schedule_->beginJob(*preg_, esp_->recordsToResolverIndices(), *processBlockHelper_);
731  if (looper_) {
732  constexpr bool mustPrefetchMayGet = true;
733  auto const processBlockLookup = preg_->productLookup(InProcess);
734  auto const runLookup = preg_->productLookup(InRun);
735  auto const lumiLookup = preg_->productLookup(InLumi);
736  auto const eventLookup = preg_->productLookup(InEvent);
737  looper_->updateLookup(InProcess, *processBlockLookup, mustPrefetchMayGet);
738  looper_->updateLookup(InRun, *runLookup, mustPrefetchMayGet);
739  looper_->updateLookup(InLumi, *lumiLookup, mustPrefetchMayGet);
740  looper_->updateLookup(InEvent, *eventLookup, mustPrefetchMayGet);
741  looper_->updateLookup(esp_->recordsToResolverIndices());
742  }
743  // toerror.succeeded(); // should we add this?
744  for_all(subProcesses_, [](auto& subProcess) { subProcess.doBeginJob(); });
745  actReg_->postBeginJobSignal_();
746 
747  oneapi::tbb::task_group group;
749  using namespace edm::waiting_task::chain;
750  first([this](auto nextTask) {
751  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
752  first([i, this](auto nextTask) {
754  schedule_->beginStream(i);
755  }) | ifThen(not subProcesses_.empty(), [this, i](auto nextTask) {
757  for_all(subProcesses_, [i](auto& subProcess) { subProcess.doBeginStream(i); });
758  }) | lastTask(nextTask);
759  }
761  last.wait();
762  }
763 
765  // Collects exceptions, so we don't throw before all operations are performed.
767  "Multiple exceptions were thrown while executing endJob. An exception message follows for each.\n");
768 
769  //make the services available
771 
772  using namespace edm::waiting_task::chain;
773 
774  oneapi::tbb::task_group group;
775  edm::FinalWaitingTask waitTask{group};
776 
777  {
778  //handle endStream transitions
779  edm::WaitingTaskHolder taskHolder(group, &waitTask);
780  std::mutex collectorMutex;
781  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
782  first([this, i, &c, &collectorMutex](auto nextTask) {
783  std::exception_ptr ep;
784  try {
786  this->schedule_->endStream(i);
787  } catch (...) {
788  ep = std::current_exception();
789  }
790  if (ep) {
791  std::lock_guard<std::mutex> l(collectorMutex);
792  c.call([&ep]() { std::rethrow_exception(ep); });
793  }
794  }) | then([this, i, &c, &collectorMutex](auto nextTask) {
795  for (auto& subProcess : subProcesses_) {
796  first([this, i, &c, &collectorMutex, &subProcess](auto nextTask) {
797  std::exception_ptr ep;
798  try {
800  subProcess.doEndStream(i);
801  } catch (...) {
802  ep = std::current_exception();
803  }
804  if (ep) {
805  std::lock_guard<std::mutex> l(collectorMutex);
806  c.call([&ep]() { std::rethrow_exception(ep); });
807  }
808  }) | lastTask(nextTask);
809  }
810  }) | lastTask(taskHolder);
811  }
812  }
813  waitTask.waitNoThrow();
814 
815  auto actReg = actReg_.get();
816  c.call([actReg]() { actReg->preEndJobSignal_(); });
817  schedule_->endJob(c);
818  for (auto& subProcess : subProcesses_) {
819  c.call(std::bind(&SubProcess::doEndJob, &subProcess));
820  }
821  c.call(std::bind(&InputSource::doEndJob, input_.get()));
822  if (looper_) {
823  c.call(std::bind(&EDLooperBase::endOfJob, looper()));
824  }
825  c.call([actReg]() { actReg->postEndJobSignal_(); });
826  if (c.hasThrown()) {
827  c.rethrow();
828  }
829  }
830 
832 
833  std::vector<ModuleDescription const*> EventProcessor::getAllModuleDescriptions() const {
834  return schedule_->getAllModuleDescriptions();
835  }
836 
837  int EventProcessor::totalEvents() const { return schedule_->totalEvents(); }
838 
839  int EventProcessor::totalEventsPassed() const { return schedule_->totalEventsPassed(); }
840 
841  int EventProcessor::totalEventsFailed() const { return schedule_->totalEventsFailed(); }
842 
843  void EventProcessor::clearCounters() { schedule_->clearCounters(); }
844 
845  namespace {
846 #include "TransitionProcessors.icc"
847  }
848 
850  bool returnValue = false;
851 
852  // Look for a shutdown signal
853  if (shutdown_flag.load(std::memory_order_acquire)) {
854  returnValue = true;
855  edm::LogSystem("ShutdownSignal") << "an external signal was sent to shutdown the job early.";
857  jr->reportShutdownSignal();
859  }
860  return returnValue;
861  }
862 
865  InputSource::ItemType itemType;
866  //For now, do nothing with InputSource::IsSynchronize
867  do {
868  itemType = input_->nextItemType();
869  } while (itemType == InputSource::IsSynchronize);
870 
871  lastSourceTransition_ = itemType;
872  sentry.completedSuccessfully();
873 
875 
877  actReg_->preSourceEarlyTerminationSignal_(TerminationOrigin::ExternalSignal);
879  }
880 
881  return lastSourceTransition_;
882  }
883 
885  beginJob(); //make sure this was called
886 
887  // make the services available
889  actReg_->beginProcessingSignal_();
890  auto endSignal = [](ActivityRegistry* iReg) { iReg->endProcessingSignal_(); };
891  std::unique_ptr<ActivityRegistry, decltype(endSignal)> guard(actReg_.get(), endSignal);
892  try {
893  FilesProcessor fp(fileModeNoMerge_);
894 
895  convertException::wrap([&]() {
896  bool firstTime = true;
897  do {
898  if (not firstTime) {
900  rewindInput();
901  } else {
902  firstTime = false;
903  }
904  startingNewLoop();
905 
906  auto trans = fp.processFiles(*this);
907 
908  fp.normalEnd();
909 
910  if (deferredExceptionPtrIsSet_.load()) {
911  std::rethrow_exception(deferredExceptionPtr_);
912  }
913  if (trans != InputSource::IsStop) {
914  //problem with the source
915  doErrorStuff();
916 
917  throw cms::Exception("BadTransition") << "Unexpected transition change " << trans;
918  }
919  } while (not endOfLoop());
920  }); // convertException::wrap
921 
922  } // Try block
923  catch (cms::Exception& e) {
925  std::string message(
926  "Another exception was caught while trying to clean up lumis after the primary fatal exception.");
927  e.addAdditionalInfo(message);
928  if (e.alreadyPrinted()) {
929  LogAbsolute("Additional Exceptions") << message;
930  }
931  }
932  if (exceptionMessageRuns_) {
933  std::string message(
934  "Another exception was caught while trying to clean up runs after the primary fatal exception.");
935  e.addAdditionalInfo(message);
936  if (e.alreadyPrinted()) {
937  LogAbsolute("Additional Exceptions") << message;
938  }
939  }
940  if (!exceptionMessageFiles_.empty()) {
941  e.addAdditionalInfo(exceptionMessageFiles_);
942  if (e.alreadyPrinted()) {
943  LogAbsolute("Additional Exceptions") << exceptionMessageFiles_;
944  }
945  }
946  throw;
947  }
948  return epSuccess;
949  }
950 
952  FDEBUG(1) << " \treadFile\n";
953  size_t size = preg_->size();
955 
956  if (streamRunActive_ > 0) {
957  streamRunStatus_[0]->runPrincipal()->preReadFile();
958  streamRunStatus_[0]->runPrincipal()->adjustIndexesAfterProductRegistryAddition();
959  }
960 
961  if (streamLumiActive_ > 0) {
962  streamLumiStatus_[0]->lumiPrincipal()->adjustIndexesAfterProductRegistryAddition();
963  }
964 
965  fb_ = input_->readFile();
966  if (size < preg_->size()) {
968  }
971  fb_->setNotFastClonable(FileBlock::ParallelProcesses);
972  }
973  sentry.completedSuccessfully();
974  }
975 
976  void EventProcessor::closeInputFile(bool cleaningUpAfterException) {
977  if (fileBlockValid()) {
979  input_->closeFile(fb_.get(), cleaningUpAfterException);
980  sentry.completedSuccessfully();
981  }
982  FDEBUG(1) << "\tcloseInputFile\n";
983  }
984 
986  if (fileBlockValid()) {
987  schedule_->openOutputFiles(*fb_);
988  for_all(subProcesses_, [this](auto& subProcess) { subProcess.openOutputFiles(*fb_); });
989  }
990  FDEBUG(1) << "\topenOutputFiles\n";
991  }
992 
994  schedule_->closeOutputFiles();
995  for_all(subProcesses_, [](auto& subProcess) { subProcess.closeOutputFiles(); });
996  processBlockHelper_->clearAfterOutputFilesClose();
997  FDEBUG(1) << "\tcloseOutputFiles\n";
998  }
999 
1001  if (fileBlockValid()) {
1003  [this](auto& subProcess) { subProcess.updateBranchIDListHelper(branchIDListHelper_->branchIDLists()); });
1004  schedule_->respondToOpenInputFile(*fb_);
1005  for_all(subProcesses_, [this](auto& subProcess) { subProcess.respondToOpenInputFile(*fb_); });
1006  }
1007  FDEBUG(1) << "\trespondToOpenInputFile\n";
1008  }
1009 
1011  if (fileBlockValid()) {
1012  schedule_->respondToCloseInputFile(*fb_);
1013  for_all(subProcesses_, [this](auto& subProcess) { subProcess.respondToCloseInputFile(*fb_); });
1014  }
1015  FDEBUG(1) << "\trespondToCloseInputFile\n";
1016  }
1017 
1019  shouldWeStop_ = false;
1020  //NOTE: for first loop, need to delay calling 'doStartingNewLoop'
1021  // until after we've called beginOfJob
1022  if (looper_ && looperBeginJobRun_) {
1023  looper_->doStartingNewLoop();
1024  }
1025  FDEBUG(1) << "\tstartingNewLoop\n";
1026  }
1027 
1029  if (looper_) {
1030  ModuleChanger changer(schedule_.get(), preg_.get(), esp_->recordsToResolverIndices());
1031  looper_->setModuleChanger(&changer);
1032  EDLooperBase::Status status = looper_->doEndOfLoop(esp_->eventSetupImpl());
1033  looper_->setModuleChanger(nullptr);
1035  return true;
1036  else
1037  return false;
1038  }
1039  FDEBUG(1) << "\tendOfLoop\n";
1040  return true;
1041  }
1042 
1044  input_->repeat();
1045  input_->rewind();
1046  FDEBUG(1) << "\trewind\n";
1047  }
1048 
1050  looper_->prepareForNextLoop(esp_.get());
1051  FDEBUG(1) << "\tprepareForNextLoop\n";
1052  }
1053 
1055  FDEBUG(1) << "\tshouldWeCloseOutput\n";
1056  if (!subProcesses_.empty()) {
1057  for (auto const& subProcess : subProcesses_) {
1058  if (subProcess.shouldWeCloseOutput()) {
1059  return true;
1060  }
1061  }
1062  return false;
1063  }
1064  return schedule_->shouldWeCloseOutput();
1065  }
1066 
1068  FDEBUG(1) << "\tdoErrorStuff\n";
1069  LogError("StateMachine") << "The EventProcessor state machine encountered an unexpected event\n"
1070  << "and went to the error state\n"
1071  << "Will attempt to terminate processing normally\n"
1072  << "(IF using the looper the next loop will be attempted)\n"
1073  << "This likely indicates a bug in an input module or corrupted input or both\n";
1074  }
1075 
1076  void EventProcessor::beginProcessBlock(bool& beginProcessBlockSucceeded) {
1077  ProcessBlockPrincipal& processBlockPrincipal = principalCache_.processBlockPrincipal();
1078  processBlockPrincipal.fillProcessBlockPrincipal(processConfiguration_->processName());
1079 
1081  FinalWaitingTask globalWaitTask{taskGroup_};
1082 
1083  ProcessBlockTransitionInfo transitionInfo(processBlockPrincipal);
1084  beginGlobalTransitionAsync<Traits>(
1085  WaitingTaskHolder(taskGroup_, &globalWaitTask), *schedule_, transitionInfo, serviceToken_, subProcesses_);
1086 
1087  globalWaitTask.wait();
1088  beginProcessBlockSucceeded = true;
1089  }
1090 
1092  input_->fillProcessBlockHelper();
1094  while (input_->nextProcessBlock(processBlockPrincipal)) {
1095  readProcessBlock(processBlockPrincipal);
1096 
1098  FinalWaitingTask globalWaitTask{taskGroup_};
1099 
1100  ProcessBlockTransitionInfo transitionInfo(processBlockPrincipal);
1101  beginGlobalTransitionAsync<Traits>(
1102  WaitingTaskHolder(taskGroup_, &globalWaitTask), *schedule_, transitionInfo, serviceToken_, subProcesses_);
1103 
1104  globalWaitTask.wait();
1105 
1106  FinalWaitingTask writeWaitTask{taskGroup_};
1108  writeWaitTask.wait();
1109 
1110  processBlockPrincipal.clearPrincipal();
1111  for (auto& s : subProcesses_) {
1112  s.clearProcessBlockPrincipal(ProcessBlockType::Input);
1113  }
1114  }
1115  }
1116 
1117  void EventProcessor::endProcessBlock(bool cleaningUpAfterException, bool beginProcessBlockSucceeded) {
1118  ProcessBlockPrincipal& processBlockPrincipal = principalCache_.processBlockPrincipal();
1119 
1121  FinalWaitingTask globalWaitTask{taskGroup_};
1122 
1123  ProcessBlockTransitionInfo transitionInfo(processBlockPrincipal);
1124  endGlobalTransitionAsync<Traits>(WaitingTaskHolder(taskGroup_, &globalWaitTask),
1125  *schedule_,
1126  transitionInfo,
1127  serviceToken_,
1128  subProcesses_,
1129  cleaningUpAfterException);
1130  globalWaitTask.wait();
1131 
1132  if (beginProcessBlockSucceeded) {
1133  FinalWaitingTask writeWaitTask{taskGroup_};
1135  writeWaitTask.wait();
1136  }
1137 
1138  processBlockPrincipal.clearPrincipal();
1139  for (auto& s : subProcesses_) {
1140  s.clearProcessBlockPrincipal(ProcessBlockType::New);
1141  }
1142  }
1143 
1145  FinalWaitingTask waitTask{taskGroup_};
1147  if (streamRunActive_ == 0) {
1148  assert(streamLumiActive_ == 0);
1149 
1150  beginRunAsync(IOVSyncValue(EventID(input_->run(), 0, 0), input_->runAuxiliary()->beginTime()),
1151  WaitingTaskHolder{taskGroup_, &waitTask});
1152  } else {
1154 
1155  auto runStatus = streamRunStatus_[0];
1156 
1157  while (lastTransitionType() == InputSource::IsRun and runStatus->runPrincipal()->run() == input_->run() and
1158  runStatus->runPrincipal()->reducedProcessHistoryID() == input_->reducedProcessHistoryID()) {
1159  readAndMergeRun(*runStatus);
1161  }
1162 
1163  WaitingTaskHolder holder{taskGroup_, &waitTask};
1164  runStatus->setHolderOfTaskInProcessRuns(holder);
1165  if (streamLumiActive_ > 0) {
1167  continueLumiAsync(std::move(holder));
1168  } else {
1170  }
1171  }
1172  waitTask.wait();
1173  return lastTransitionType();
1174  }
1175 
1177  if (iHolder.taskHasFailed()) {
1178  return;
1179  }
1180 
1181  actReg_->esSyncIOVQueuingSignal_.emit(iSync);
1182 
1183  auto status = std::make_shared<RunProcessingStatus>(preallocations_.numberOfStreams(), iHolder);
1184 
1185  chain::first([this, &status, iSync](auto nextTask) {
1186  espController_->runOrQueueEventSetupForInstanceAsync(iSync,
1187  nextTask,
1188  status->endIOVWaitingTasks(),
1189  status->eventSetupImpls(),
1191  actReg_.get(),
1192  serviceToken_,
1194  }) | chain::then([this, status](std::exception_ptr const* iException, auto nextTask) {
1195  CMS_SA_ALLOW try {
1196  if (iException) {
1197  WaitingTaskHolder copyHolder(nextTask);
1198  copyHolder.doneWaiting(*iException);
1199  // Finish handling the exception in the task pushed to runQueue_
1200  }
1202 
1203  runQueue_->pushAndPause(
1204  *nextTask.group(),
1205  [this, postRunQueueTask = nextTask, status](edm::LimitedTaskQueue::Resumer iResumer) mutable {
1206  CMS_SA_ALLOW try {
1207  if (postRunQueueTask.taskHasFailed()) {
1208  status->resetBeginResources();
1210  return;
1211  }
1212 
1213  status->setResumer(std::move(iResumer));
1214 
1216  *postRunQueueTask.group(), [this, postSourceTask = postRunQueueTask, status]() mutable {
1217  CMS_SA_ALLOW try {
1219 
1220  if (postSourceTask.taskHasFailed()) {
1221  status->resetBeginResources();
1223  status->resumeGlobalRunQueue();
1224  return;
1225  }
1226 
1227  status->setRunPrincipal(readRun());
1228 
1229  RunPrincipal& runPrincipal = *status->runPrincipal();
1230  {
1232  input_->doBeginRun(runPrincipal, &processContext_);
1233  sentry.completedSuccessfully();
1234  }
1235 
1236  EventSetupImpl const& es = status->eventSetupImpl(esp_->subProcessIndex());
1237  if (looper_ && looperBeginJobRun_ == false) {
1238  looper_->copyInfo(ScheduleInfo(schedule_.get()));
1239 
1240  oneapi::tbb::task_group group;
1241  FinalWaitingTask waitTask{group};
1242  using namespace edm::waiting_task::chain;
1243  chain::first([this, &es](auto nextTask) {
1244  looper_->esPrefetchAsync(nextTask, es, Transition::BeginRun, serviceToken_);
1245  }) | then([this, &es](auto nextTask) mutable {
1246  looper_->beginOfJob(es);
1247  looperBeginJobRun_ = true;
1248  looper_->doStartingNewLoop();
1249  }) | runLast(WaitingTaskHolder(group, &waitTask));
1250  waitTask.wait();
1251  }
1252 
1253  using namespace edm::waiting_task::chain;
1254  chain::first([this, status](auto nextTask) mutable {
1255  CMS_SA_ALLOW try { readAndMergeRunEntriesAsync(std::move(status), nextTask); } catch (...) {
1256  status->setStopBeforeProcessingRun(true);
1257  nextTask.doneWaiting(std::current_exception());
1258  }
1259  }) | then([this, status, &es](auto nextTask) {
1260  if (status->stopBeforeProcessingRun()) {
1261  return;
1262  }
1263  RunTransitionInfo transitionInfo(*status->runPrincipal(), es, &status->eventSetupImpls());
1265  beginGlobalTransitionAsync<Traits>(
1266  nextTask, *schedule_, transitionInfo, serviceToken_, subProcesses_);
1267  }) | then([status](auto nextTask) mutable {
1268  if (status->stopBeforeProcessingRun()) {
1269  return;
1270  }
1271  status->globalBeginDidSucceed();
1272  }) | ifThen(looper_, [this, status, &es](auto nextTask) {
1273  if (status->stopBeforeProcessingRun()) {
1274  return;
1275  }
1276  looper_->prefetchAsync(
1277  nextTask, serviceToken_, Transition::BeginRun, *status->runPrincipal(), es);
1278  }) | ifThen(looper_, [this, status, &es](auto nextTask) {
1279  if (status->stopBeforeProcessingRun()) {
1280  return;
1281  }
1282  ServiceRegistry::Operate operateLooper(serviceToken_);
1283  looper_->doBeginRun(*status->runPrincipal(), es, &processContext_);
1284  }) | then([this, status](std::exception_ptr const* iException, auto holder) mutable {
1285  bool precedingTasksSucceeded = true;
1286  if (iException) {
1287  precedingTasksSucceeded = false;
1288  WaitingTaskHolder copyHolder(holder);
1289  copyHolder.doneWaiting(*iException);
1290  }
1291 
1292  if (status->stopBeforeProcessingRun()) {
1293  // We just quit now if there was a failure when merging runs
1294  status->resetBeginResources();
1296  status->resumeGlobalRunQueue();
1297  return;
1298  }
1299  CMS_SA_ALLOW try {
1300  // Under normal circumstances, this task runs after endRun has completed for all streams
1301  // and global endLumi has completed for all lumis contained in this run
1302  auto globalEndRunTask =
1303  edm::make_waiting_task([this, status](std::exception_ptr const*) mutable {
1304  WaitingTaskHolder taskHolder = status->holderOfTaskInProcessRuns();
1305  status->holderOfTaskInProcessRuns().doneWaiting(std::exception_ptr{});
1307  });
1308  status->setGlobalEndRunHolder(WaitingTaskHolder{*holder.group(), globalEndRunTask});
1309  } catch (...) {
1310  status->resetBeginResources();
1312  status->resumeGlobalRunQueue();
1313  holder.doneWaiting(std::current_exception());
1314  return;
1315  }
1316 
1317  // After this point we are committed to end the run via endRunAsync
1318 
1320 
1321  // The only purpose of the pause is to cause stream begin run to execute before
1322  // global begin lumi in the single threaded case (maintains consistency with
1323  // the order that existed before concurrent runs were implemented).
1324  PauseQueueSentry pauseQueueSentry(streamQueuesInserter_);
1325 
1326  CMS_SA_ALLOW try {
1328  *holder.group(), [this, status, precedingTasksSucceeded, holder]() mutable {
1329  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
1330  CMS_SA_ALLOW try {
1331  streamQueues_[i].push(
1332  *holder.group(),
1333  [this, i, status, precedingTasksSucceeded, holder]() mutable {
1335  i, std::move(status), precedingTasksSucceeded, std::move(holder));
1336  });
1337  } catch (...) {
1338  if (status->streamFinishedBeginRun()) {
1339  WaitingTaskHolder copyHolder(holder);
1340  copyHolder.doneWaiting(std::current_exception());
1341  status->resetBeginResources();
1344  }
1345  }
1346  }
1347  });
1348  } catch (...) {
1349  WaitingTaskHolder copyHolder(holder);
1350  copyHolder.doneWaiting(std::current_exception());
1351  status->resetBeginResources();
1354  }
1356  }) | runLast(postSourceTask);
1357  } catch (...) {
1358  status->resetBeginResources();
1360  status->resumeGlobalRunQueue();
1361  postSourceTask.doneWaiting(std::current_exception());
1362  }
1363  }); // task in sourceResourcesAcquirer
1364  } catch (...) {
1365  status->resetBeginResources();
1367  status->resumeGlobalRunQueue();
1368  postRunQueueTask.doneWaiting(std::current_exception());
1369  }
1370  }); // task in runQueue
1371  } catch (...) {
1372  status->resetBeginResources();
1374  nextTask.doneWaiting(std::current_exception());
1375  }
1376  }) | chain::runLast(std::move(iHolder));
1377  }
1378 
1379  void EventProcessor::streamBeginRunAsync(unsigned int iStream,
1380  std::shared_ptr<RunProcessingStatus> status,
1381  bool precedingTasksSucceeded,
1382  WaitingTaskHolder iHolder) {
1383  // These shouldn't throw
1384  streamQueues_[iStream].pause();
1385  ++streamRunActive_;
1386  streamRunStatus_[iStream] = std::move(status);
1387 
1388  CMS_SA_ALLOW try {
1389  using namespace edm::waiting_task::chain;
1390  chain::first([this, iStream, precedingTasksSucceeded](auto nextTask) {
1391  if (precedingTasksSucceeded) {
1392  RunProcessingStatus& rs = *streamRunStatus_[iStream];
1393  RunTransitionInfo transitionInfo(
1394  *rs.runPrincipal(), rs.eventSetupImpl(esp_->subProcessIndex()), &rs.eventSetupImpls());
1396  beginStreamTransitionAsync<Traits>(
1397  std::move(nextTask), *schedule_, iStream, transitionInfo, serviceToken_, subProcesses_);
1398  }
1399  }) | then([this, iStream](std::exception_ptr const* exceptionFromBeginStreamRun, auto nextTask) {
1400  if (exceptionFromBeginStreamRun) {
1401  nextTask.doneWaiting(*exceptionFromBeginStreamRun);
1402  }
1403  releaseBeginRunResources(iStream);
1404  }) | runLast(iHolder);
1405  } catch (...) {
1406  releaseBeginRunResources(iStream);
1407  iHolder.doneWaiting(std::current_exception());
1408  }
1409  }
1410 
1411  void EventProcessor::releaseBeginRunResources(unsigned int iStream) {
1412  auto& status = streamRunStatus_[iStream];
1413  if (status->streamFinishedBeginRun()) {
1414  status->resetBeginResources();
1416  }
1417  streamQueues_[iStream].resume();
1418  }
1419 
1420  void EventProcessor::endRunAsync(std::shared_ptr<RunProcessingStatus> iRunStatus, WaitingTaskHolder iHolder) {
1421  RunPrincipal& runPrincipal = *iRunStatus->runPrincipal();
1422  iRunStatus->setEndTime();
1423  IOVSyncValue ts(
1425  runPrincipal.endTime());
1426  CMS_SA_ALLOW try { actReg_->esSyncIOVQueuingSignal_.emit(ts); } catch (...) {
1427  WaitingTaskHolder copyHolder(iHolder);
1428  copyHolder.doneWaiting(std::current_exception());
1429  }
1430 
1431  chain::first([this, &iRunStatus, &ts](auto nextTask) {
1432  espController_->runOrQueueEventSetupForInstanceAsync(ts,
1433  nextTask,
1434  iRunStatus->endIOVWaitingTasksEndRun(),
1435  iRunStatus->eventSetupImplsEndRun(),
1437  actReg_.get(),
1438  serviceToken_);
1439  }) | chain::then([this, iRunStatus](std::exception_ptr const* iException, auto nextTask) {
1440  if (iException) {
1441  iRunStatus->setEndingEventSetupSucceeded(false);
1442  handleEndRunExceptions(*iException, nextTask);
1443  }
1445  streamQueuesInserter_.push(*nextTask.group(), [this, nextTask]() mutable {
1446  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
1447  CMS_SA_ALLOW try {
1448  streamQueues_[i].push(*nextTask.group(), [this, i, nextTask]() mutable {
1449  streamQueues_[i].pause();
1450  streamEndRunAsync(std::move(nextTask), i);
1451  });
1452  } catch (...) {
1453  WaitingTaskHolder copyHolder(nextTask);
1454  copyHolder.doneWaiting(std::current_exception());
1455  }
1456  }
1457  });
1458 
1460  CMS_SA_ALLOW try {
1461  beginRunAsync(IOVSyncValue(EventID(input_->run(), 0, 0), input_->runAuxiliary()->beginTime()), nextTask);
1462  } catch (...) {
1463  WaitingTaskHolder copyHolder(nextTask);
1464  copyHolder.doneWaiting(std::current_exception());
1465  }
1466  }
1467  }) | chain::runLast(std::move(iHolder));
1468  }
1469 
1470  void EventProcessor::handleEndRunExceptions(std::exception_ptr iException, WaitingTaskHolder const& holder) {
1471  if (holder.taskHasFailed()) {
1473  } else {
1474  WaitingTaskHolder tmp(holder);
1475  tmp.doneWaiting(iException);
1476  }
1477  }
1478 
1479  void EventProcessor::globalEndRunAsync(WaitingTaskHolder iTask, std::shared_ptr<RunProcessingStatus> iRunStatus) {
1480  auto& runPrincipal = *(iRunStatus->runPrincipal());
1481  bool didGlobalBeginSucceed = iRunStatus->didGlobalBeginSucceed();
1482  bool cleaningUpAfterException = iRunStatus->cleaningUpAfterException() || iTask.taskHasFailed();
1483  EventSetupImpl const& es = iRunStatus->eventSetupImplEndRun(esp_->subProcessIndex());
1484  std::vector<std::shared_ptr<const EventSetupImpl>> const* eventSetupImpls = &iRunStatus->eventSetupImplsEndRun();
1485  bool endingEventSetupSucceeded = iRunStatus->endingEventSetupSucceeded();
1486 
1487  MergeableRunProductMetadata* mergeableRunProductMetadata = runPrincipal.mergeableRunProductMetadata();
1488  using namespace edm::waiting_task::chain;
1489  chain::first([this, &runPrincipal, &es, &eventSetupImpls, cleaningUpAfterException, endingEventSetupSucceeded](
1490  auto nextTask) {
1491  if (endingEventSetupSucceeded) {
1492  RunTransitionInfo transitionInfo(runPrincipal, es, eventSetupImpls);
1494  endGlobalTransitionAsync<Traits>(
1495  std::move(nextTask), *schedule_, transitionInfo, serviceToken_, subProcesses_, cleaningUpAfterException);
1496  }
1497  }) |
1498  ifThen(looper_ && endingEventSetupSucceeded,
1499  [this, &runPrincipal, &es](auto nextTask) {
1500  looper_->prefetchAsync(std::move(nextTask), serviceToken_, Transition::EndRun, runPrincipal, es);
1501  }) |
1502  ifThen(looper_ && endingEventSetupSucceeded,
1503  [this, &runPrincipal, &es](auto nextTask) {
1505  looper_->doEndRun(runPrincipal, es, &processContext_);
1506  }) |
1507  ifThen(didGlobalBeginSucceed && endingEventSetupSucceeded,
1508  [this, mergeableRunProductMetadata, &runPrincipal = runPrincipal](auto nextTask) {
1509  mergeableRunProductMetadata->preWriteRun();
1510  writeRunAsync(nextTask, runPrincipal, mergeableRunProductMetadata);
1511  }) |
1512  then([status = std::move(iRunStatus),
1513  this,
1514  didGlobalBeginSucceed,
1515  mergeableRunProductMetadata,
1516  endingEventSetupSucceeded](std::exception_ptr const* iException, auto nextTask) mutable {
1517  if (didGlobalBeginSucceed && endingEventSetupSucceeded) {
1518  mergeableRunProductMetadata->postWriteRun();
1519  }
1520  if (iException) {
1521  handleEndRunExceptions(*iException, nextTask);
1522  }
1524 
1525  std::exception_ptr ptr;
1526 
1527  // Try hard to clean up resources so the
1528  // process can terminate in a controlled
1529  // fashion even after exceptions have occurred.
1530  CMS_SA_ALLOW try { clearRunPrincipal(*status); } catch (...) {
1531  if (not ptr) {
1532  ptr = std::current_exception();
1533  }
1534  }
1535  CMS_SA_ALLOW try {
1536  status->resumeGlobalRunQueue();
1538  } catch (...) {
1539  if (not ptr) {
1540  ptr = std::current_exception();
1541  }
1542  }
1543  CMS_SA_ALLOW try {
1544  status->resetEndResources();
1545  status.reset();
1546  } catch (...) {
1547  if (not ptr) {
1548  ptr = std::current_exception();
1549  }
1550  }
1551 
1552  if (ptr && !iException) {
1553  handleEndRunExceptions(ptr, nextTask);
1554  }
1555  }) |
1556  runLast(std::move(iTask));
1557  }
1558 
1559  void EventProcessor::streamEndRunAsync(WaitingTaskHolder iTask, unsigned int iStreamIndex) {
1560  CMS_SA_ALLOW try {
1561  if (!streamRunStatus_[iStreamIndex]) {
1562  if (exceptionRunStatus_->streamFinishedRun()) {
1563  exceptionRunStatus_->globalEndRunHolder().doneWaiting(std::exception_ptr());
1564  exceptionRunStatus_.reset();
1565  }
1566  return;
1567  }
1568 
1569  auto runDoneTask =
1570  edm::make_waiting_task([this, iTask, iStreamIndex](std::exception_ptr const* iException) mutable {
1571  if (iException) {
1572  handleEndRunExceptions(*iException, iTask);
1573  }
1574 
1575  auto runStatus = streamRunStatus_[iStreamIndex];
1576 
1577  //reset status before releasing queue else get race condition
1578  if (runStatus->streamFinishedRun()) {
1579  runStatus->globalEndRunHolder().doneWaiting(std::exception_ptr());
1580  }
1581  streamRunStatus_[iStreamIndex].reset();
1582  --streamRunActive_;
1583  streamQueues_[iStreamIndex].resume();
1584  });
1585 
1586  WaitingTaskHolder runDoneTaskHolder{*iTask.group(), runDoneTask};
1587 
1588  auto runStatus = streamRunStatus_[iStreamIndex].get();
1589 
1590  if (runStatus->didGlobalBeginSucceed() && runStatus->endingEventSetupSucceeded()) {
1591  EventSetupImpl const& es = runStatus->eventSetupImplEndRun(esp_->subProcessIndex());
1592  auto eventSetupImpls = &runStatus->eventSetupImplsEndRun();
1593  bool cleaningUpAfterException = runStatus->cleaningUpAfterException() || iTask.taskHasFailed();
1594 
1595  auto& runPrincipal = *runStatus->runPrincipal();
1597  RunTransitionInfo transitionInfo(runPrincipal, es, eventSetupImpls);
1598  endStreamTransitionAsync<Traits>(std::move(runDoneTaskHolder),
1599  *schedule_,
1600  iStreamIndex,
1601  transitionInfo,
1602  serviceToken_,
1603  subProcesses_,
1604  cleaningUpAfterException);
1605  }
1606  } catch (...) {
1607  handleEndRunExceptions(std::current_exception(), iTask);
1608  }
1609  }
1610 
1611  void EventProcessor::endUnfinishedRun(bool cleaningUpAfterException) {
1612  if (streamRunActive_ > 0) {
1613  FinalWaitingTask waitTask{taskGroup_};
1614 
1615  auto runStatus = streamRunStatus_[0].get();
1616  runStatus->setCleaningUpAfterException(cleaningUpAfterException);
1617  WaitingTaskHolder holder{taskGroup_, &waitTask};
1618  runStatus->setHolderOfTaskInProcessRuns(holder);
1620  endRunAsync(streamRunStatus_[0], std::move(holder));
1621  waitTask.wait();
1622  }
1623  }
1624 
1626  std::shared_ptr<RunProcessingStatus> iRunStatus,
1627  edm::WaitingTaskHolder iHolder) {
1628  actReg_->esSyncIOVQueuingSignal_.emit(iSync);
1629 
1630  auto status = std::make_shared<LuminosityBlockProcessingStatus>(preallocations_.numberOfStreams());
1631  chain::first([this, &iSync, &status](auto nextTask) {
1632  espController_->runOrQueueEventSetupForInstanceAsync(iSync,
1633  nextTask,
1634  status->endIOVWaitingTasks(),
1635  status->eventSetupImpls(),
1637  actReg_.get(),
1638  serviceToken_);
1639  }) | chain::then([this, status, iRunStatus](std::exception_ptr const* iException, auto nextTask) {
1640  CMS_SA_ALLOW try {
1641  //the call to doneWaiting will cause the count to decrement
1642  if (iException) {
1643  WaitingTaskHolder copyHolder(nextTask);
1644  copyHolder.doneWaiting(*iException);
1645  }
1646 
1647  lumiQueue_->pushAndPause(
1648  *nextTask.group(),
1649  [this, postLumiQueueTask = nextTask, status, iRunStatus](edm::LimitedTaskQueue::Resumer iResumer) mutable {
1650  CMS_SA_ALLOW try {
1651  if (postLumiQueueTask.taskHasFailed()) {
1652  status->resetResources();
1654  endRunAsync(iRunStatus, postLumiQueueTask);
1655  return;
1656  }
1657 
1658  status->setResumer(std::move(iResumer));
1659 
1661  *postLumiQueueTask.group(),
1662  [this, postSourceTask = postLumiQueueTask, status, iRunStatus]() mutable {
1663  CMS_SA_ALLOW try {
1665 
1666  if (postSourceTask.taskHasFailed()) {
1667  status->resetResources();
1669  endRunAsync(iRunStatus, postSourceTask);
1670  return;
1671  }
1672 
1673  status->setLumiPrincipal(readLuminosityBlock(iRunStatus->runPrincipal()));
1674 
1675  LuminosityBlockPrincipal& lumiPrincipal = *status->lumiPrincipal();
1676  {
1678  input_->doBeginLumi(lumiPrincipal, &processContext_);
1679  sentry.completedSuccessfully();
1680  }
1681 
1683  if (rng.isAvailable()) {
1684  LuminosityBlock lb(lumiPrincipal, ModuleDescription(), nullptr, false);
1685  rng->preBeginLumi(lb);
1686  }
1687 
1688  EventSetupImpl const& es = status->eventSetupImpl(esp_->subProcessIndex());
1689 
1690  using namespace edm::waiting_task::chain;
1691  chain::first([this, status](auto nextTask) mutable {
1693  firstItemAfterLumiMerge_ = true;
1694  }) | then([this, status, &es, &lumiPrincipal](auto nextTask) {
1695  LumiTransitionInfo transitionInfo(lumiPrincipal, es, &status->eventSetupImpls());
1697  beginGlobalTransitionAsync<Traits>(
1698  nextTask, *schedule_, transitionInfo, serviceToken_, subProcesses_);
1699  }) | ifThen(looper_, [this, status, &es](auto nextTask) {
1700  looper_->prefetchAsync(
1701  nextTask, serviceToken_, Transition::BeginLuminosityBlock, *(status->lumiPrincipal()), es);
1702  }) | ifThen(looper_, [this, status, &es](auto nextTask) {
1703  status->globalBeginDidSucceed();
1704  ServiceRegistry::Operate operateLooper(serviceToken_);
1705  looper_->doBeginLuminosityBlock(*(status->lumiPrincipal()), es, &processContext_);
1706  }) | then([this, status, iRunStatus](std::exception_ptr const* iException, auto holder) mutable {
1707  if (iException) {
1708  status->resetResources();
1710  WaitingTaskHolder copyHolder(holder);
1711  copyHolder.doneWaiting(*iException);
1712  endRunAsync(iRunStatus, holder);
1713  } else {
1714  if (not looper_) {
1715  status->globalBeginDidSucceed();
1716  }
1717 
1718  status->setGlobalEndRunHolder(iRunStatus->globalEndRunHolder());
1719 
1720  EventSetupImpl const& es = status->eventSetupImpl(esp_->subProcessIndex());
1722 
1723  streamQueuesInserter_.push(*holder.group(), [this, status, holder, &es]() mutable {
1724  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
1725  streamQueues_[i].push(*holder.group(), [this, i, status, holder, &es]() mutable {
1726  streamQueues_[i].pause();
1727 
1728  auto& event = principalCache_.eventPrincipal(i);
1729  //We need to be sure that 'status' and its internal shared_ptr<LuminosityBlockPrincipal> are only
1730  // held by the container as this lambda may not finish executing before all the tasks it
1731  // spawns have already started to run.
1732  auto eventSetupImpls = &status->eventSetupImpls();
1733  auto lp = status->lumiPrincipal().get();
1736  event.setLuminosityBlockPrincipal(lp);
1737  LumiTransitionInfo transitionInfo(*lp, es, eventSetupImpls);
1738  using namespace edm::waiting_task::chain;
1739  chain::first([this, i, &transitionInfo](auto nextTask) {
1740  beginStreamTransitionAsync<Traits>(std::move(nextTask),
1741  *schedule_,
1742  i,
1743  transitionInfo,
1744  serviceToken_,
1745  subProcesses_);
1746  }) |
1747  then([this, i](std::exception_ptr const* exceptionFromBeginStreamLumi,
1748  auto nextTask) {
1749  if (exceptionFromBeginStreamLumi) {
1750  WaitingTaskHolder copyHolder(nextTask);
1751  copyHolder.doneWaiting(*exceptionFromBeginStreamLumi);
1752  }
1754  }) |
1755  runLast(std::move(holder));
1756  });
1757  } // end for loop over streams
1758  });
1759  }
1760  }) | runLast(postSourceTask);
1761  } catch (...) {
1762  status->resetResources();
1764  WaitingTaskHolder copyHolder(postSourceTask);
1765  copyHolder.doneWaiting(std::current_exception());
1766  endRunAsync(iRunStatus, postSourceTask);
1767  }
1768  }); // task in sourceResourcesAcquirer
1769  } catch (...) {
1770  status->resetResources();
1772  WaitingTaskHolder copyHolder(postLumiQueueTask);
1773  copyHolder.doneWaiting(std::current_exception());
1774  endRunAsync(iRunStatus, postLumiQueueTask);
1775  }
1776  }); // task in lumiQueue
1777  } catch (...) {
1778  status->resetResources();
1780  WaitingTaskHolder copyHolder(nextTask);
1781  copyHolder.doneWaiting(std::current_exception());
1782  endRunAsync(iRunStatus, nextTask);
1783  }
1784  }) | chain::runLast(std::move(iHolder));
1785  }
1786 
1788  chain::first([this](auto nextTask) {
1789  //all streams are sharing the same status at the moment
1790  auto status = streamLumiStatus_[0]; //read from streamLumiActive_ happened in calling routine
1792 
1793  while (lastTransitionType() == InputSource::IsLumi and
1794  status->lumiPrincipal()->luminosityBlock() == input_->luminosityBlock()) {
1797  }
1798  firstItemAfterLumiMerge_ = true;
1799  }) | chain::then([this](auto nextTask) mutable {
1800  unsigned int streamIndex = 0;
1801  oneapi::tbb::task_arena arena{oneapi::tbb::task_arena::attach()};
1802  for (; streamIndex < preallocations_.numberOfStreams() - 1; ++streamIndex) {
1803  arena.enqueue([this, streamIndex, h = nextTask]() { handleNextEventForStreamAsync(h, streamIndex); });
1804  }
1805  nextTask.group()->run(
1806  [this, streamIndex, h = std::move(nextTask)]() { handleNextEventForStreamAsync(h, streamIndex); });
1807  }) | chain::runLast(std::move(iHolder));
1808  }
1809 
1810  void EventProcessor::handleEndLumiExceptions(std::exception_ptr iException, WaitingTaskHolder const& holder) {
1811  if (holder.taskHasFailed()) {
1813  } else {
1814  WaitingTaskHolder tmp(holder);
1815  tmp.doneWaiting(iException);
1816  }
1817  }
1818 
1820  std::shared_ptr<LuminosityBlockProcessingStatus> iLumiStatus) {
1821  // Get some needed info out of the status object before moving
1822  // it into finalTaskForThisLumi.
1823  auto& lp = *(iLumiStatus->lumiPrincipal());
1824  bool didGlobalBeginSucceed = iLumiStatus->didGlobalBeginSucceed();
1825  bool cleaningUpAfterException = iLumiStatus->cleaningUpAfterException() || iTask.taskHasFailed();
1826  EventSetupImpl const& es = iLumiStatus->eventSetupImpl(esp_->subProcessIndex());
1827  std::vector<std::shared_ptr<const EventSetupImpl>> const* eventSetupImpls = &iLumiStatus->eventSetupImpls();
1828 
1829  using namespace edm::waiting_task::chain;
1830  chain::first([this, &lp, &es, &eventSetupImpls, cleaningUpAfterException](auto nextTask) {
1831  IOVSyncValue ts(EventID(lp.run(), lp.luminosityBlock(), EventID::maxEventNumber()), lp.beginTime());
1832 
1833  LumiTransitionInfo transitionInfo(lp, es, eventSetupImpls);
1835  endGlobalTransitionAsync<Traits>(
1836  std::move(nextTask), *schedule_, transitionInfo, serviceToken_, subProcesses_, cleaningUpAfterException);
1837  }) | then([this, didGlobalBeginSucceed, &lumiPrincipal = lp](auto nextTask) {
1838  //Only call writeLumi if beginLumi succeeded
1839  if (didGlobalBeginSucceed) {
1840  writeLumiAsync(std::move(nextTask), lumiPrincipal);
1841  }
1842  }) | ifThen(looper_, [this, &lp, &es](auto nextTask) {
1843  looper_->prefetchAsync(std::move(nextTask), serviceToken_, Transition::EndLuminosityBlock, lp, es);
1844  }) | ifThen(looper_, [this, &lp, &es](auto nextTask) {
1845  //any thrown exception auto propagates to nextTask via the chain
1847  looper_->doEndLuminosityBlock(lp, es, &processContext_);
1848  }) | then([status = std::move(iLumiStatus), this](std::exception_ptr const* iException, auto nextTask) mutable {
1849  if (iException) {
1850  handleEndLumiExceptions(*iException, nextTask);
1851  }
1853 
1854  std::exception_ptr ptr;
1855 
1856  // Try hard to clean up resources so the
1857  // process can terminate in a controlled
1858  // fashion even after exceptions have occurred.
1859  // Caught exception is passed to handleEndLumiExceptions()
1860  CMS_SA_ALLOW try { clearLumiPrincipal(*status); } catch (...) {
1861  if (not ptr) {
1862  ptr = std::current_exception();
1863  }
1864  }
1865  // Caught exception is passed to handleEndLumiExceptions()
1866  CMS_SA_ALLOW try { queueWhichWaitsForIOVsToFinish_.resume(); } catch (...) {
1867  if (not ptr) {
1868  ptr = std::current_exception();
1869  }
1870  }
1871  // Caught exception is passed to handleEndLumiExceptions()
1872  CMS_SA_ALLOW try {
1873  status->resetResources();
1874  status->globalEndRunHolderDoneWaiting();
1875  status.reset();
1876  } catch (...) {
1877  if (not ptr) {
1878  ptr = std::current_exception();
1879  }
1880  }
1881 
1882  if (ptr && !iException) {
1883  handleEndLumiExceptions(ptr, nextTask);
1884  }
1885  }) | runLast(std::move(iTask));
1886  }
1887 
1888  void EventProcessor::streamEndLumiAsync(edm::WaitingTaskHolder iTask, unsigned int iStreamIndex) {
1889  auto t = edm::make_waiting_task([this, iStreamIndex, iTask](std::exception_ptr const* iException) mutable {
1890  auto status = streamLumiStatus_[iStreamIndex];
1891  if (iException) {
1892  handleEndLumiExceptions(*iException, iTask);
1893  }
1894 
1895  // reset status before releasing queue else get race condition
1896  streamLumiStatus_[iStreamIndex].reset();
1898  streamQueues_[iStreamIndex].resume();
1899 
1900  //are we the last one?
1901  if (status->streamFinishedLumi()) {
1903  }
1904  });
1905 
1906  edm::WaitingTaskHolder lumiDoneTask{*iTask.group(), t};
1907 
1908  // Need to be sure the lumi status is released before lumiDoneTask can every be called.
1909  // therefore we do not want to hold the shared_ptr
1910  auto lumiStatus = streamLumiStatus_[iStreamIndex].get();
1911  lumiStatus->setEndTime();
1912 
1913  EventSetupImpl const& es = lumiStatus->eventSetupImpl(esp_->subProcessIndex());
1914  auto eventSetupImpls = &lumiStatus->eventSetupImpls();
1915  bool cleaningUpAfterException = lumiStatus->cleaningUpAfterException() || iTask.taskHasFailed();
1916 
1917  if (lumiStatus->didGlobalBeginSucceed()) {
1918  auto& lumiPrincipal = *lumiStatus->lumiPrincipal();
1920  LumiTransitionInfo transitionInfo(lumiPrincipal, es, eventSetupImpls);
1921  endStreamTransitionAsync<Traits>(std::move(lumiDoneTask),
1922  *schedule_,
1923  iStreamIndex,
1924  transitionInfo,
1925  serviceToken_,
1926  subProcesses_,
1927  cleaningUpAfterException);
1928  }
1929  }
1930 
1931  void EventProcessor::endUnfinishedLumi(bool cleaningUpAfterException) {
1932  if (streamRunActive_ == 0) {
1933  assert(streamLumiActive_ == 0);
1934  } else {
1936  if (streamLumiActive_ > 0) {
1937  FinalWaitingTask globalWaitTask{taskGroup_};
1939  streamLumiStatus_[0]->setCleaningUpAfterException(cleaningUpAfterException);
1940  for (unsigned int i = 0; i < preallocations_.numberOfStreams(); ++i) {
1941  streamEndLumiAsync(WaitingTaskHolder{taskGroup_, &globalWaitTask}, i);
1942  }
1943  globalWaitTask.wait();
1944  }
1945  }
1946  }
1947 
1950  input_->readProcessBlock(processBlockPrincipal);
1951  sentry.completedSuccessfully();
1952  }
1953 
1954  std::shared_ptr<RunPrincipal> EventProcessor::readRun() {
1956  assert(rp);
1957  rp->setAux(*input_->runAuxiliary());
1958  {
1960  input_->readRun(*rp, *historyAppender_);
1961  sentry.completedSuccessfully();
1962  }
1963  assert(input_->reducedProcessHistoryID() == rp->reducedProcessHistoryID());
1964  return rp;
1965  }
1966 
1968  RunPrincipal& runPrincipal = *iStatus.runPrincipal();
1969 
1970  bool runOK = runPrincipal.adjustToNewProductRegistry(*preg_);
1971  assert(runOK);
1972  runPrincipal.mergeAuxiliary(*input_->runAuxiliary());
1973  {
1975  input_->readAndMergeRun(runPrincipal);
1976  sentry.completedSuccessfully();
1977  }
1978  }
1979 
1980  std::shared_ptr<LuminosityBlockPrincipal> EventProcessor::readLuminosityBlock(std::shared_ptr<RunPrincipal> rp) {
1982  assert(lbp);
1983  lbp->setAux(*input_->luminosityBlockAuxiliary());
1984  {
1986  input_->readLuminosityBlock(*lbp, *historyAppender_);
1987  sentry.completedSuccessfully();
1988  }
1989  lbp->setRunPrincipal(std::move(rp));
1990  return lbp;
1991  }
1992 
1994  auto& lumiPrincipal = *iStatus.lumiPrincipal();
1995  assert(lumiPrincipal.aux().sameIdentity(*input_->luminosityBlockAuxiliary()) or
1996  input_->processHistoryRegistry().reducedProcessHistoryID(lumiPrincipal.aux().processHistoryID()) ==
1997  input_->processHistoryRegistry().reducedProcessHistoryID(
1998  input_->luminosityBlockAuxiliary()->processHistoryID()));
1999  bool lumiOK = lumiPrincipal.adjustToNewProductRegistry(*preg());
2000  assert(lumiOK);
2001  lumiPrincipal.mergeAuxiliary(*input_->luminosityBlockAuxiliary());
2002  {
2004  input_->readAndMergeLumi(*iStatus.lumiPrincipal());
2005  sentry.completedSuccessfully();
2006  }
2007  }
2008 
2010  using namespace edm::waiting_task;
2011  chain::first([&](auto nextTask) {
2013  schedule_->writeProcessBlockAsync(
2014  nextTask, principalCache_.processBlockPrincipal(processBlockType), &processContext_, actReg_.get());
2015  }) | chain::ifThen(not subProcesses_.empty(), [this, processBlockType](auto nextTask) {
2017  for (auto& s : subProcesses_) {
2018  s.writeProcessBlockAsync(nextTask, processBlockType);
2019  }
2020  }) | chain::runLast(std::move(task));
2021  }
2022 
2024  RunPrincipal const& runPrincipal,
2025  MergeableRunProductMetadata const* mergeableRunProductMetadata) {
2026  using namespace edm::waiting_task;
2027  if (runPrincipal.shouldWriteRun() != RunPrincipal::kNo) {
2028  chain::first([&](auto nextTask) {
2030  schedule_->writeRunAsync(nextTask, runPrincipal, &processContext_, actReg_.get(), mergeableRunProductMetadata);
2031  }) | chain::ifThen(not subProcesses_.empty(), [this, &runPrincipal, mergeableRunProductMetadata](auto nextTask) {
2033  for (auto& s : subProcesses_) {
2034  s.writeRunAsync(nextTask, runPrincipal, mergeableRunProductMetadata);
2035  }
2036  }) | chain::runLast(std::move(task));
2037  }
2038  }
2039 
2041  for (auto& s : subProcesses_) {
2042  s.clearRunPrincipal(*iStatus.runPrincipal());
2043  }
2044  iStatus.runPrincipal()->setShouldWriteRun(RunPrincipal::kUninitialized);
2045  iStatus.runPrincipal()->clearPrincipal();
2046  }
2047 
2049  using namespace edm::waiting_task;
2050  if (lumiPrincipal.shouldWriteLumi() != LuminosityBlockPrincipal::kNo) {
2051  chain::first([&](auto nextTask) {
2053 
2054  lumiPrincipal.runPrincipal().mergeableRunProductMetadata()->writeLumi(lumiPrincipal.luminosityBlock());
2055  schedule_->writeLumiAsync(nextTask, lumiPrincipal, &processContext_, actReg_.get());
2056  }) | chain::ifThen(not subProcesses_.empty(), [this, &lumiPrincipal](auto nextTask) {
2058  for (auto& s : subProcesses_) {
2059  s.writeLumiAsync(nextTask, lumiPrincipal);
2060  }
2062  }
2063  }
2064 
2066  for (auto& s : subProcesses_) {
2067  s.clearLumiPrincipal(*iStatus.lumiPrincipal());
2068  }
2069  iStatus.lumiPrincipal()->setRunPrincipal(std::shared_ptr<RunPrincipal>());
2070  iStatus.lumiPrincipal()->setShouldWriteLumi(LuminosityBlockPrincipal::kUninitialized);
2071  iStatus.lumiPrincipal()->clearPrincipal();
2072  }
2073 
2074  void EventProcessor::readAndMergeRunEntriesAsync(std::shared_ptr<RunProcessingStatus> iRunStatus,
2075  WaitingTaskHolder iHolder) {
2076  auto group = iHolder.group();
2078  *group, [this, status = std::move(iRunStatus), holder = std::move(iHolder)]() mutable {
2079  CMS_SA_ALLOW try {
2081 
2082  std::lock_guard<std::recursive_mutex> guard(*(sourceMutex_.get()));
2083 
2085  while (lastTransitionType() == InputSource::IsRun and status->runPrincipal()->run() == input_->run() and
2086  status->runPrincipal()->reducedProcessHistoryID() == input_->reducedProcessHistoryID()) {
2087  if (status->holderOfTaskInProcessRuns().taskHasFailed()) {
2088  status->setStopBeforeProcessingRun(true);
2089  return;
2090  }
2093  }
2094  } catch (...) {
2095  status->setStopBeforeProcessingRun(true);
2096  holder.doneWaiting(std::current_exception());
2097  }
2098  });
2099  }
2100 
2101  void EventProcessor::readAndMergeLumiEntriesAsync(std::shared_ptr<LuminosityBlockProcessingStatus> iLumiStatus,
2102  WaitingTaskHolder iHolder) {
2103  auto group = iHolder.group();
2105  *group, [this, iLumiStatus = std::move(iLumiStatus), holder = std::move(iHolder)]() mutable {
2106  CMS_SA_ALLOW try {
2108 
2109  std::lock_guard<std::recursive_mutex> guard(*(sourceMutex_.get()));
2110 
2112  while (lastTransitionType() == InputSource::IsLumi and
2113  iLumiStatus->lumiPrincipal()->luminosityBlock() == input_->luminosityBlock()) {
2114  readAndMergeLumi(*iLumiStatus);
2116  }
2117  } catch (...) {
2118  holder.doneWaiting(std::current_exception());
2119  }
2120  });
2121  }
2122 
2123  void EventProcessor::handleNextItemAfterMergingRunEntries(std::shared_ptr<RunProcessingStatus> iRunStatus,
2124  WaitingTaskHolder iHolder) {
2126  iRunStatus->holderOfTaskInProcessRuns().doneWaiting(std::exception_ptr{});
2127  iHolder.doneWaiting(std::exception_ptr{});
2128  } else if (lastTransitionType() == InputSource::IsLumi && !iHolder.taskHasFailed()) {
2129  CMS_SA_ALLOW try {
2130  beginLumiAsync(IOVSyncValue(EventID(input_->run(), input_->luminosityBlock(), 0),
2131  input_->luminosityBlockAuxiliary()->beginTime()),
2132  iRunStatus,
2133  iHolder);
2134  } catch (...) {
2135  WaitingTaskHolder copyHolder(iHolder);
2136  iHolder.doneWaiting(std::current_exception());
2137  endRunAsync(std::move(iRunStatus), std::move(iHolder));
2138  }
2139  } else {
2140  // Note that endRunAsync will call beginRunAsync for the following run
2141  // if appropriate.
2142  endRunAsync(std::move(iRunStatus), std::move(iHolder));
2143  }
2144  }
2145 
2147  unsigned int iStreamIndex,
2149  // This function returns true if it successfully reads an event for the stream and that
2150  // requires both that an event is next and there are no problems or requests to stop.
2151 
2152  if (iTask.taskHasFailed()) {
2153  // We want all streams to stop or all streams to pause. If we are already in the
2154  // middle of pausing streams, then finish pausing all of them and the lumi will be
2155  // ended later. Otherwise, just end it now.
2158  }
2159  return false;
2160  }
2161 
2162  // Did another stream already stop or pause this lumi?
2164  return false;
2165  }
2166 
2167  // Are output modules or the looper requesting we stop?
2168  if (shouldWeStop()) {
2171  return false;
2172  }
2173 
2175 
2176  // need to use lock in addition to the serial task queue because
2177  // of delayed provenance reading and reading data in response to
2178  // edm::Refs etc
2179  std::lock_guard<std::recursive_mutex> guard(*(sourceMutex_.get()));
2180 
2181  // If we didn't already call nextTransitionType while merging lumis, call it here.
2182  // This asks the input source what is next and also checks for signals.
2183 
2185  firstItemAfterLumiMerge_ = false;
2186 
2187  if (InputSource::IsEvent != itemType) {
2188  // IsFile may continue processing the lumi and
2189  // looper_ can cause the input source to declare a new IsRun which is actually
2190  // just a continuation of the previous run
2191  if (InputSource::IsStop == itemType or InputSource::IsLumi == itemType or
2192  (InputSource::IsRun == itemType and
2193  (iStatus.lumiPrincipal()->run() != input_->run() or
2194  iStatus.lumiPrincipal()->runPrincipal().reducedProcessHistoryID() != input_->reducedProcessHistoryID()))) {
2196  } else {
2198  }
2199  return false;
2200  }
2201  readEvent(iStreamIndex);
2202  return true;
2203  }
2204 
2205  void EventProcessor::handleNextEventForStreamAsync(WaitingTaskHolder iTask, unsigned int iStreamIndex) {
2206  auto group = iTask.group();
2207  sourceResourcesAcquirer_.serialQueueChain().push(*group, [this, iTask = std::move(iTask), iStreamIndex]() mutable {
2208  CMS_SA_ALLOW try {
2209  auto status = streamLumiStatus_[iStreamIndex].get();
2211 
2212  if (readNextEventForStream(iTask, iStreamIndex, *status)) {
2213  auto recursionTask =
2214  make_waiting_task([this, iTask, iStreamIndex](std::exception_ptr const* iEventException) mutable {
2215  if (iEventException) {
2216  WaitingTaskHolder copyHolder(iTask);
2217  copyHolder.doneWaiting(*iEventException);
2218  // Intentionally, we don't return here. The recursive call to
2219  // handleNextEvent takes care of immediately ending the run properly
2220  // using the same code it uses to end the run in other situations.
2221  }
2222  handleNextEventForStreamAsync(std::move(iTask), iStreamIndex);
2223  });
2224 
2225  processEventAsync(WaitingTaskHolder(*iTask.group(), recursionTask), iStreamIndex);
2226  } else {
2227  // the stream will stop processing this lumi now
2229  if (not status->haveStartedNextLumiOrEndedRun()) {
2230  status->startNextLumiOrEndRun();
2231  if (lastTransitionType() == InputSource::IsLumi && !iTask.taskHasFailed()) {
2232  CMS_SA_ALLOW try {
2233  beginLumiAsync(IOVSyncValue(EventID(input_->run(), input_->luminosityBlock(), 0),
2234  input_->luminosityBlockAuxiliary()->beginTime()),
2235  streamRunStatus_[iStreamIndex],
2236  iTask);
2237  } catch (...) {
2238  WaitingTaskHolder copyHolder(iTask);
2239  copyHolder.doneWaiting(std::current_exception());
2240  endRunAsync(streamRunStatus_[iStreamIndex], iTask);
2241  }
2242  } else {
2243  // If appropriate, this will also start the next run.
2244  endRunAsync(streamRunStatus_[iStreamIndex], iTask);
2245  }
2246  }
2247  streamEndLumiAsync(iTask, iStreamIndex);
2248  } else {
2249  assert(status->eventProcessingState() ==
2251  auto runStatus = streamRunStatus_[iStreamIndex].get();
2252 
2253  if (runStatus->holderOfTaskInProcessRuns().hasTask()) {
2254  runStatus->holderOfTaskInProcessRuns().doneWaiting(std::exception_ptr{});
2255  }
2256  }
2257  }
2258  } catch (...) {
2259  WaitingTaskHolder copyHolder(iTask);
2260  copyHolder.doneWaiting(std::current_exception());
2261  handleNextEventForStreamAsync(std::move(iTask), iStreamIndex);
2262  }
2263  });
2264  }
2265 
2266  void EventProcessor::readEvent(unsigned int iStreamIndex) {
2267  //TODO this will have to become per stream
2268  auto& event = principalCache_.eventPrincipal(iStreamIndex);
2269  StreamContext streamContext(event.streamID(), &processContext_);
2270 
2272  input_->readEvent(event, streamContext);
2273 
2274  streamRunStatus_[iStreamIndex]->updateLastTimestamp(input_->timestamp());
2275  streamLumiStatus_[iStreamIndex]->updateLastTimestamp(input_->timestamp());
2276  sentry.completedSuccessfully();
2277 
2278  FDEBUG(1) << "\treadEvent\n";
2279  }
2280 
2281  void EventProcessor::processEventAsync(WaitingTaskHolder iHolder, unsigned int iStreamIndex) {
2282  iHolder.group()->run([this, iHolder, iStreamIndex]() { processEventAsyncImpl(iHolder, iStreamIndex); });
2283  }
2284 
2285  void EventProcessor::processEventAsyncImpl(WaitingTaskHolder iHolder, unsigned int iStreamIndex) {
2286  auto pep = &(principalCache_.eventPrincipal(iStreamIndex));
2287 
2290  if (rng.isAvailable()) {
2291  Event ev(*pep, ModuleDescription(), nullptr);
2292  rng->postEventRead(ev);
2293  }
2294 
2295  EventSetupImpl const& es = streamLumiStatus_[iStreamIndex]->eventSetupImpl(esp_->subProcessIndex());
2296  using namespace edm::waiting_task::chain;
2297  chain::first([this, &es, pep, iStreamIndex](auto nextTask) {
2298  EventTransitionInfo info(*pep, es);
2299  schedule_->processOneEventAsync(std::move(nextTask), iStreamIndex, info, serviceToken_);
2300  }) | ifThen(not subProcesses_.empty(), [this, pep, iStreamIndex](auto nextTask) {
2301  for (auto& subProcess : boost::adaptors::reverse(subProcesses_)) {
2302  subProcess.doEventAsync(nextTask, *pep, &streamLumiStatus_[iStreamIndex]->eventSetupImpls());
2303  }
2304  }) | ifThen(looper_, [this, iStreamIndex, pep](auto nextTask) {
2305  //NOTE: behavior change. previously if an exception happened looper was still called. Now it will not be called
2306  ServiceRegistry::Operate operateLooper(serviceToken_);
2307  processEventWithLooper(*pep, iStreamIndex);
2308  }) | then([pep](auto nextTask) {
2309  FDEBUG(1) << "\tprocessEvent\n";
2310  pep->clearEventPrincipal();
2311  }) | runLast(iHolder);
2312  }
2313 
2314  void EventProcessor::processEventWithLooper(EventPrincipal& iPrincipal, unsigned int iStreamIndex) {
2315  bool randomAccess = input_->randomAccess();
2316  ProcessingController::ForwardState forwardState = input_->forwardState();
2317  ProcessingController::ReverseState reverseState = input_->reverseState();
2318  ProcessingController pc(forwardState, reverseState, randomAccess);
2319 
2321  do {
2322  StreamContext streamContext(iPrincipal.streamID(), &processContext_);
2323  EventSetupImpl const& es = streamLumiStatus_[iStreamIndex]->eventSetupImpl(esp_->subProcessIndex());
2324  status = looper_->doDuringLoop(iPrincipal, es, pc, &streamContext);
2325 
2326  bool succeeded = true;
2327  if (randomAccess) {
2329  input_->skipEvents(-2);
2331  succeeded = input_->goToEvent(pc.specifiedEventTransition());
2332  }
2333  }
2335  } while (!pc.lastOperationSucceeded());
2337  shouldWeStop_ = true;
2338  }
2339  }
2340 
2342  FDEBUG(1) << "\tshouldWeStop\n";
2343  if (shouldWeStop_)
2344  return true;
2345  if (!subProcesses_.empty()) {
2346  for (auto const& subProcess : subProcesses_) {
2347  if (subProcess.terminate()) {
2348  return true;
2349  }
2350  }
2351  return false;
2352  }
2353  return schedule_->terminate();
2354  }
2355 
2357 
2359 
2361 
2362  bool EventProcessor::setDeferredException(std::exception_ptr iException) {
2363  bool expected = false;
2364  if (deferredExceptionPtrIsSet_.compare_exchange_strong(expected, true)) {
2365  deferredExceptionPtr_ = iException;
2366  return true;
2367  }
2368  return false;
2369  }
2370 
2372  cms::Exception ex("ModulesSynchingOnLumis");
2373  ex << "The framework is configured to use at least two streams, but the following modules\n"
2374  << "require synchronizing on LuminosityBlock boundaries:";
2375  bool found = false;
2376  for (auto worker : schedule_->allWorkers()) {
2377  if (worker->wantsGlobalLuminosityBlocks() and worker->globalLuminosityBlocksQueue()) {
2378  found = true;
2379  ex << "\n " << worker->description()->moduleName() << " " << worker->description()->moduleLabel();
2380  }
2381  }
2382  if (found) {
2383  ex << "\n\nThe situation can be fixed by either\n"
2384  << " * modifying the modules to support concurrent LuminosityBlocks (preferred), or\n"
2385  << " * setting 'process.options.numberOfConcurrentLuminosityBlocks = 1' in the configuration file";
2386  throw ex;
2387  }
2388  }
2389 
2391  std::unique_ptr<LogSystem> s;
2392  for (auto worker : schedule_->allWorkers()) {
2393  if (worker->wantsGlobalRuns() and worker->globalRunsQueue()) {
2394  if (not s) {
2395  s = std::make_unique<LogSystem>("ModulesSynchingOnRuns");
2396  (*s) << "The following modules require synchronizing on Run boundaries:";
2397  }
2398  (*s) << "\n " << worker->description()->moduleName() << " " << worker->description()->moduleLabel();
2399  }
2400  }
2401  }
2402 
2404  std::unique_ptr<LogSystem> s;
2405  for (auto worker : schedule_->allWorkers()) {
2406  if (worker->moduleConcurrencyType() == Worker::kLegacy) {
2407  if (not s) {
2408  s = std::make_unique<LogSystem>("LegacyModules");
2409  (*s) << "The following legacy modules are configured. Support for legacy modules\n"
2410  "is going to end soon. These modules need to be converted to have type\n"
2411  "edm::global, edm::stream, edm::one, or in rare cases edm::limited.";
2412  }
2413  (*s) << "\n " << worker->description()->moduleName() << " " << worker->description()->moduleLabel();
2414  }
2415  }
2416  }
2417 } // namespace edm
LuminosityBlockNumber_t luminosityBlock() const
std::atomic< bool > exceptionMessageLumis_
bool readNextEventForStream(WaitingTaskHolder const &, unsigned int iStreamIndex, LuminosityBlockProcessingStatus &)
void readEvent(unsigned int iStreamIndex)
void clearPrincipal()
Definition: Principal.cc:383
void streamEndRunAsync(WaitingTaskHolder, unsigned int iStreamIndex)
ProcessContext processContext_
Log< level::System, false > LogSystem
void setProcessesWithMergeableRunProducts(ProductRegistry const &productRegistry)
T getParameter(std::string const &) const
Definition: ParameterSet.h:307
void clear()
Not thread safe.
static InputSourceFactory const * get()
void init(std::shared_ptr< ProcessDesc > &processDesc, ServiceToken const &token, serviceregistry::ServiceLegacy)
static const TGPicture * info(bool iBackgroundIsBlack)
void clearRunPrincipal(RunProcessingStatus &)
void globalEndRunAsync(WaitingTaskHolder, std::shared_ptr< RunProcessingStatus >)
#define CMS_SA_ALLOW
SharedResourcesAcquirer sourceResourcesAcquirer_
Timestamp const & endTime() const
Definition: RunPrincipal.h:69
void handleNextEventForStreamAsync(WaitingTaskHolder, unsigned int iStreamIndex)
int totalEventsFailed() const
InputSource::ItemType nextTransitionType()
std::shared_ptr< ProductRegistry const > preg() const
void warnAboutLegacyModules() const
void processEventAsync(WaitingTaskHolder iHolder, unsigned int iStreamIndex)
void setNumberOfConcurrentPrincipals(PreallocationConfiguration const &)
def create(alignables, pedeDump, additionalData, outputFile, config)
std::shared_ptr< RunPrincipal > readRun()
void handleEndLumiExceptions(std::exception_ptr, WaitingTaskHolder const &)
edm::propagate_const< std::unique_ptr< InputSource > > input_
void doEndJob()
Called by framework at end of job.
Definition: InputSource.cc:209
std::unique_ptr< ExceptionToActionTable const > act_table_
static PFTauRenderPlugin instance
SerialTaskQueue streamQueuesInserter_
void endUnfinishedRun(bool cleaningUpAfterException)
void setExceptionMessageFiles(std::string &message)
RunPrincipal const & runPrincipal() const
void processEventAsyncImpl(WaitingTaskHolder iHolder, unsigned int iStreamIndex)
static std::mutex mutex
Definition: Proxy.cc:8
void processEventWithLooper(EventPrincipal &, unsigned int iStreamIndex)
void handleEndRunExceptions(std::exception_ptr, WaitingTaskHolder const &)
edm::propagate_const< std::unique_ptr< HistoryAppender > > historyAppender_
std::unique_ptr< edm::LimitedTaskQueue > lumiQueue_
volatile std::atomic< bool > shutdown_flag
static LuminosityBlockNumber_t maxLuminosityBlockNumber()
void clearCounters()
Clears counters used by trigger report.
ProcessBlockPrincipal & inputProcessBlockPrincipal() const
RunNumber_t run() const
Definition: RunPrincipal.h:61
void push(oneapi::tbb::task_group &, const T &iAction)
asynchronously pushes functor iAction into queue
bool checkForAsyncStopRequest(StatusCode &)
edm::propagate_const< std::shared_ptr< ProductRegistry > > preg_
void writeLumiAsync(WaitingTaskHolder, LuminosityBlockPrincipal &)
std::unique_ptr< edm::ModuleTypeResolverMaker const > makeModuleTypeResolverMaker(edm::ParameterSet const &pset)
std::shared_ptr< EDLooperBase const > looper() const
void ensureAvailableAccelerators(edm::ParameterSet const &parameterSet)
ParameterSet getUntrackedParameterSet(std::string const &name, ParameterSet const &defaultValue) const
std::shared_ptr< RunPrincipal > getAvailableRunPrincipalPtr()
InputSource::ItemType lastTransitionType() const
std::shared_ptr< RunPrincipal > & runPrincipal()
void adjustEventsToNewProductRegistry(std::shared_ptr< ProductRegistry const >)
constexpr std::shared_ptr< T > & get_underlying_safe(propagate_const< std::shared_ptr< T >> &iP)
Log< level::Error, false > LogError
void adjustIndexesAfterProductRegistryAddition()
#define FDEBUG(lev)
Definition: DebugMacros.h:19
StreamID streamID() const
assert(be >=bs)
ParameterSet const & parameterSet(StableProvenance const &provenance, ProcessHistory const &history)
Definition: Provenance.cc:11
PreallocationConfiguration preallocations_
edm::propagate_const< std::shared_ptr< FileBlock > > fb_
constexpr auto then(O &&iO)
Definition: chain_first.h:277
std::vector< SubProcess > subProcesses_
static unsigned int getUniqueID()
Returns a unique id each time called. Intended to be passed to ModuleDescription&#39;s constructor&#39;s modI...
std::unique_ptr< InputSource > makeInputSource(ParameterSet const &, InputSourceDescription const &) const
void swap(Association< C > &lhs, Association< C > &rhs)
Definition: Association.h:112
std::atomic< bool > exceptionMessageRuns_
void mergeAuxiliary(RunAuxiliary const &aux)
Definition: RunPrincipal.h:73
edm::SerialTaskQueue queueWhichWaitsForIOVsToFinish_
void validateTopLevelParameterSets(ParameterSet *processParameterSet)
void beginJob()
Definition: Breakpoints.cc:14
MergeableRunProductProcesses mergeableRunProductProcesses_
static std::string const input
Definition: EdmProvDump.cc:50
Func for_all(ForwardSequence &s, Func f)
wrapper for std::for_each
Definition: Algorithms.h:14
std::vector< ModuleDescription const * > getAllModuleDescriptions() const
void fillProcessBlockPrincipal(std::string const &processName, DelayedReader *reader=nullptr)
void setEndTime(Timestamp const &time)
Definition: RunPrincipal.h:71
ProcessBlockPrincipal & processBlockPrincipal() const
edm::propagate_const< std::unique_ptr< ModuleTypeResolverMaker const > > moduleTypeResolverMaker_
void endProcessBlock(bool cleaningUpAfterException, bool beginProcessBlockSucceeded)
U second(std::pair< T, U > const &p)
std::shared_ptr< LuminosityBlockPrincipal > & lumiPrincipal()
oneapi::tbb::task_group * group() const noexcept
std::multimap< std::string, std::string > referencesToBranches_
fileMode
Definition: DMR_cfg.py:72
std::shared_ptr< ThinnedAssociationsHelper const > thinnedAssociationsHelper() const
ServiceToken serviceToken_
ParameterSetID id() const
std::atomic< bool > deferredExceptionPtrIsSet_
bool resume()
Resumes processing if the queue was paused.
void doneWaiting(std::exception_ptr iExcept)
ParameterSet const & registerIt()
std::shared_ptr< EDLooperBase > fillLooper(eventsetup::EventSetupsController &esController, eventsetup::EventSetupProvider &cp, ParameterSet &params, std::vector< std::string > const &loopers)
std::vector< edm::SerialTaskQueue > streamQueues_
auto runLast(edm::WaitingTaskHolder iTask)
Definition: chain_first.h:297
bool taskHasFailed() const noexcept
std::vector< std::string > modulesToIgnoreForDeleteEarly_
ShouldWriteRun shouldWriteRun() const
Definition: RunPrincipal.h:86
std::unique_ptr< edm::LimitedTaskQueue > runQueue_
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e< void, edm::EventID const &, edm::Timestamp const & > We also list in braces which AR_WATCH_USING_METHOD_ is used for those or
Definition: Activities.doc:12
edm::propagate_const< std::shared_ptr< EDLooperBase > > looper_
void writeRunAsync(WaitingTaskHolder, RunPrincipal const &, MergeableRunProductMetadata const *)
int merge(int argc, char *argv[])
Definition: DMRmerge.cc:37
SerialTaskQueueChain & serialQueueChain() const
static void setThrowAnException(bool v)
void writeProcessBlockAsync(WaitingTaskHolder, ProcessBlockType)
void setLastOperationSucceeded(bool value)
void initialize(Schedule const *, std::shared_ptr< ProductRegistry const >)
std::vector< ModuleDescription const * > nonConsumedUnscheduledModules(edm::PathsAndConsumesOfModulesBase const &iPnC, std::vector< ModuleProcessName > &consumedByChildren)
static ServiceRegistry & instance()
void clear()
Not thread safe.
Definition: Registry.cc:40
StatusCode runToCompletion()
FunctorWaitingTask< F > * make_waiting_task(F f)
Definition: WaitingTask.h:92
void clearLumiPrincipal(LuminosityBlockProcessingStatus &)
virtual void endOfJob()
constexpr auto ifThen(bool iValue, O &&iO)
Only runs this task if the condition (which is known at the call time) is true. If false...
Definition: chain_first.h:288
edm::propagate_const< std::unique_ptr< Schedule > > schedule_
std::shared_ptr< ProcessConfiguration const > processConfiguration_
std::shared_ptr< std::recursive_mutex > sourceMutex_
void insert(std::unique_ptr< ProcessBlockPrincipal >)
std::vector< std::shared_ptr< RunProcessingStatus > > streamRunStatus_
std::string exceptionMessageFiles_
void setProcessConfiguration(ProcessConfiguration const *processConfiguration)
std::unique_ptr< InputSource > makeInput(unsigned int moduleIndex, ParameterSet &params, CommonParams const &common, std::shared_ptr< ProductRegistry > preg, std::shared_ptr< BranchIDListHelper > branchIDListHelper, std::shared_ptr< ProcessBlockHelper > const &processBlockHelper, std::shared_ptr< ThinnedAssociationsHelper > thinnedAssociationsHelper, std::shared_ptr< ActivityRegistry > areg, std::shared_ptr< ProcessConfiguration const > processConfiguration, PreallocationConfiguration const &allocations)
void releaseBeginRunResources(unsigned int iStream)
void writeLumi(LuminosityBlockNumber_t lumi)
std::shared_ptr< RunProcessingStatus > exceptionRunStatus_
InputSource::ItemType lastSourceTransition_
Log< level::Info, false > LogInfo
void readAndMergeRun(RunProcessingStatus &)
Definition: common.py:1
std::vector< ParameterSet > popSubProcessVParameterSet(ParameterSet &parameterSet)
Definition: SubProcess.cc:804
void reportShutdownSignal()
Definition: JobReport.cc:543
void warnAboutModulesRequiringRunSynchronization() const
void checkForModuleDependencyCorrectness(edm::PathsAndConsumesOfModulesBase const &iPnC, bool iPrintDependencies)
void beginLumiAsync(IOVSyncValue const &, std::shared_ptr< RunProcessingStatus >, WaitingTaskHolder)
EventSetupImpl const & eventSetupImpl(unsigned subProcessIndex) const
void handleNextItemAfterMergingRunEntries(std::shared_ptr< RunProcessingStatus >, WaitingTaskHolder)
InputSource::ItemType processRuns()
MergeableRunProductMetadata * mergeableRunProductMetadata()
Definition: RunPrincipal.h:81
oneapi::tbb::task_group taskGroup_
void throwAboutModulesRequiringLuminosityBlockSynchronization() const
void insertForInput(std::unique_ptr< ProcessBlockPrincipal >)
void addContext(std::string const &context)
Definition: Exception.cc:169
ServiceToken getToken()
static EventNumber_t maxEventNumber()
Definition: EventID.h:96
edm::propagate_const< std::unique_ptr< eventsetup::EventSetupsController > > espController_
ShouldWriteLumi shouldWriteLumi() const
edm::EventID specifiedEventTransition() const
void endRunAsync(std::shared_ptr< RunProcessingStatus >, WaitingTaskHolder)
std::vector< std::shared_ptr< LuminosityBlockProcessingStatus > > streamLumiStatus_
bool shouldWeStop() const
std::vector< std::shared_ptr< const EventSetupImpl > > & eventSetupImpls()
std::atomic< unsigned int > streamRunActive_
void readAndMergeLumi(LuminosityBlockProcessingStatus &)
edm::propagate_const< std::shared_ptr< ThinnedAssociationsHelper > > thinnedAssociationsHelper_
edm::propagate_const< std::shared_ptr< eventsetup::EventSetupProvider > > esp_
EventPrincipal & eventPrincipal(unsigned int iStreamIndex) const
std::vector< std::string > branchesToDeleteEarly_
HLT enums.
void readAndMergeLumiEntriesAsync(std::shared_ptr< LuminosityBlockProcessingStatus >, WaitingTaskHolder)
void closeInputFile(bool cleaningUpAfterException)
void readAndMergeRunEntriesAsync(std::shared_ptr< RunProcessingStatus >, WaitingTaskHolder)
void readProcessBlock(ProcessBlockPrincipal &)
bool adjustToNewProductRegistry(ProductRegistry const &reg)
Definition: Principal.cc:312
static ComponentFactory< T > const * get()
std::exception_ptr deferredExceptionPtr_
void removeModules(std::vector< ModuleDescription const *> const &modules)
std::shared_ptr< LuminosityBlockPrincipal > readLuminosityBlock(std::shared_ptr< RunPrincipal > rp)
auto lastTask(edm::WaitingTaskHolder iTask)
Definition: chain_first.h:299
T const & get(Event const &event, InputTag const &tag) noexcept(false)
Definition: Event.h:668
void endUnfinishedLumi(bool cleaningUpAfterException)
bool shouldWeCloseOutput() const
PathsAndConsumesOfModules pathsAndConsumesOfModules_
int totalEventsPassed() const
edm::propagate_const< std::shared_ptr< ProcessBlockHelper > > processBlockHelper_
void continueLumiAsync(WaitingTaskHolder)
Transition requestedTransition() const
void globalEndLumiAsync(WaitingTaskHolder, std::shared_ptr< LuminosityBlockProcessingStatus >)
Log< level::System, true > LogAbsolute
std::shared_ptr< LuminosityBlockPrincipal > getAvailableLumiPrincipalPtr()
bool isAvailable() const
Definition: Service.h:40
void streamBeginRunAsync(unsigned int iStream, std::shared_ptr< RunProcessingStatus >, bool precedingTasksSucceeded, WaitingTaskHolder)
auto wrap(F iFunc) -> decltype(iFunc())
std::shared_ptr< ActivityRegistry > actReg_
#define get
std::atomic< unsigned int > streamLumiActive_
Log< level::Warning, false > LogWarning
void streamEndLumiAsync(WaitingTaskHolder, unsigned int iStreamIndex)
void beginProcessBlock(bool &beginProcessBlockSucceeded)
The Signals That Services Can Subscribe To This is based on ActivityRegistry h
Helper function to determine trigger accepts.
Definition: Activities.doc:4
T first(std::pair< T, U > const &p)
tmp
align.sh
Definition: createJobs.py:716
static ParentageRegistry * instance()
void beginRunAsync(IOVSyncValue const &, WaitingTaskHolder)
bool setDeferredException(std::exception_ptr)
edm::propagate_const< std::shared_ptr< BranchIDListHelper > > branchIDListHelper_
void setEventProcessingState(EventProcessingState val)
bool deleteNonConsumedUnscheduledModules_
std::shared_ptr< BranchIDListHelper const > branchIDListHelper() const
bool insertMapped(value_type const &v)
def move(src, dest)
Definition: eostools.py:511
static Registry * instance()
Definition: Registry.cc:12
Definition: event.py:1
PrincipalCache principalCache_
void push(oneapi::tbb::task_group &iGroup, T &&iAction)
asynchronously pushes functor iAction into queue
EventProcessor(std::unique_ptr< ParameterSet > parameterSet, ServiceToken const &token=ServiceToken(), serviceregistry::ServiceLegacy=serviceregistry::kOverlapIsError, std::vector< std::string > const &defaultServices=std::vector< std::string >(), std::vector< std::string > const &forcedServices=std::vector< std::string >())
void dumpOptionsToLogFile(unsigned int nThreads, unsigned int nStreams, unsigned int nConcurrentLumis, unsigned int nConcurrentRuns)