CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Scenario.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 """
3 _Scenario_
4 
5 Standard cmsRun Process building interface used for data processing
6 for a particular data scenario.
7 A scenario is a macro-data-taking setting such as cosmic running,
8 beam halo running, or particular validation tests.
9 
10 This class defines the interfaces used by the Tier 0 and Tier 1
11 processing to wrap calls to ConfigBuilder in order to retrieve all the
12 configurations for the various types of job
13 
14 """
15 
16 import FWCore.ParameterSet.Config as cms
17 from Configuration.DataProcessing.Merge import mergeProcess
18 from Configuration.DataProcessing.Repack import repackProcess
19 
21  """
22  _Scenario_
23 
24  """
25  def __init__(self):
26  pass
27 
28 
29  def promptReco(self, globalTag, **options):
30  """
31  _installPromptReco_
32 
33  given a skeleton process object and references
34  to the output modules for the products it produces,
35  install the standard reco sequences and event content for this
36  scenario
37 
38  """
39  msg = "Scenario Implementation %s\n" % self.__class__.__name__
40  msg += "Does not contain an implementation for promptReco"
41  raise NotImplementedError, msg
42 
43 
44  def expressProcessing(self, globalTag, **options):
45  """
46  _expressProcessing_
47 
48  Build an express processing configuration for this scenario.
49 
50  Express processing runs conversion, reco and alca reco on each
51  streamer file in the express stream and writes out RAW, RECO and
52  a combined ALCA file that gets mergepacked in a later step
53 
54  writeTiers is list of tiers to write out, not including ALCA
55 
56  datasets is the list of datasets to split into for each tier
57  written out. Should always be one dataset
58 
59  alcaDataset - if set, this means the combined Alca file is written
60  out with no dataset splitting, it gets assigned straight to the datase
61  provided
62 
63  """
64  msg = "Scenario Implementation %s\n" % self.__class__.__name__
65  msg += "Does not contain an implementation for expressProcessing"
66  raise NotImplementedError, msg
67 
68 
69  def alcaSkim(self, skims, **options):
70  """
71  _alcaSkim_
72 
73  Given a skeleton process install the skim splitting for given skims
74 
75  """
76  msg = "Scenario Implementation %s\n" % self.__class__.__name__
77  msg += "Does not contain an implementation for alcaSkim"
78  raise NotImplementedError, msg
79 
80 
81  def alcaReco(self, *skims, **options):
82  """
83  _alcaSkim_
84 
85  Given a skeleton process install the skim production for given skims
86 
87  """
88  msg = "Scenario Implementation %s\n" % self.__class__.__name__
89  msg += "Does not contain an implementation for alcaReco"
90  raise NotImplementedError, msg
91 
92 
93  def dqmHarvesting(self, datasetName, runNumber, globalTag, **options):
94  """
95  _dqmHarvesting_
96 
97  build a DQM Harvesting configuration
98 
99  Arguments:
100 
101  datasetName - aka workflow name for DQMServer, this is the name of the
102  dataset containing the harvested run
103  runNumber - The run being harvested
104  globalTag - The global tag being used
105  inputFiles - The list of LFNs being harvested
106 
107  """
108  msg = "Scenario Implementation %s\n" % self.__class__.__name__
109  msg += "Does not contain an implementation for dqmHarvesting"
110  raise NotImplementedError, msg
111 
112 
113  def alcaHarvesting(self, globalTag, datasetName, **options):
114  """
115  _alcaHarvesting_
116 
117  build an AlCa Harvesting configuration
118 
119  Arguments:
120 
121  globalTag - The global tag being used
122  inputFiles - The list of LFNs being harvested
123 
124  """
125  msg = "Scenario Implementation %s\n" % self.__class__.__name__
126  msg += "Does not contain an implementation for alcaHarvesting"
127  raise NotImplementedError, msg
128 
129 
130  def skimming(self, skims, **options):
131  """
132  _skimming_
133 
134  Given a process install the sequences for Tier 1 skimming
135  and the appropriate output modules
136 
137  """
138  msg = "Scenario Implementation %s\n" % self.__class__.__name__
139  msg += "Does not contain an implementation for skimming"
140  raise NotImplementedError, msg
141 
142 
143  def merge(self, *inputFiles, **options):
144  """
145  _merge_
146 
147  builds a merge configuration
148 
149  """
150  msg = "Scenario Implementation %s\n" % self.__class__.__name__
151  return mergeProcess(*inputFiles, **options)
152 
153 
154  def repack(self, **options):
155  """
156  _repack_
157 
158  builds a repack configuration
159 
160  """
161  msg = "Scenario Implementation %s\n" % self.__class__.__name__
162  return repackProcess(**options)
163 
164 
165  #
166  # helper methods
167  #
168 
169  def dropOutputModule(self, processRef, moduleName):
170  """
171  _dropOutputModule_
172 
173  Util to prune an unwanted output module
174 
175  """
176  del process._Process__outputmodules[moduleName]
177  return
def expressProcessing
Definition: Scenario.py:44
def repackProcess
Definition: Repack.py:12
list object
Definition: dbtoconf.py:77
def mergeProcess
Definition: Merge.py:16