Go to the documentation of this file.00001
00002 """
00003 _Scenario_
00004
00005 Standard cmsRun Process building interface used for data processing
00006 for a particular data scenario.
00007 A scenario is a macro-data-taking setting such as cosmic running,
00008 beam halo running, or particular validation tests.
00009
00010 This class defines the interfaces used by the Tier 0 and Tier 1
00011 processing to wrap calls to ConfigBuilder in order to retrieve all the
00012 configurations for the various types of job
00013
00014 """
00015
00016 import FWCore.ParameterSet.Config as cms
00017 from Configuration.DataProcessing.Merge import mergeProcess
00018 from Configuration.DataProcessing.Repack import repackProcess
00019
00020 class Scenario(object):
00021 """
00022 _Scenario_
00023
00024 """
00025 def __init__(self):
00026 pass
00027
00028
00029 def promptReco(self, globalTag, **options):
00030 """
00031 _installPromptReco_
00032
00033 given a skeleton process object and references
00034 to the output modules for the products it produces,
00035 install the standard reco sequences and event content for this
00036 scenario
00037
00038 """
00039 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00040 msg += "Does not contain an implementation for promptReco"
00041 raise NotImplementedError, msg
00042
00043
00044 def expressProcessing(self, globalTag, **options):
00045 """
00046 _expressProcessing_
00047
00048 Build an express processing configuration for this scenario.
00049
00050 Express processing runs conversion, reco and alca reco on each
00051 streamer file in the express stream and writes out RAW, RECO and
00052 a combined ALCA file that gets mergepacked in a later step
00053
00054 writeTiers is list of tiers to write out, not including ALCA
00055
00056 datasets is the list of datasets to split into for each tier
00057 written out. Should always be one dataset
00058
00059 alcaDataset - if set, this means the combined Alca file is written
00060 out with no dataset splitting, it gets assigned straight to the datase
00061 provided
00062
00063 """
00064 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00065 msg += "Does not contain an implementation for expressProcessing"
00066 raise NotImplementedError, msg
00067
00068
00069 def alcaSkim(self, skims, **options):
00070 """
00071 _alcaSkim_
00072
00073 Given a skeleton process install the skim splitting for given skims
00074
00075 """
00076 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00077 msg += "Does not contain an implementation for alcaSkim"
00078 raise NotImplementedError, msg
00079
00080
00081 def alcaReco(self, *skims, **options):
00082 """
00083 _alcaSkim_
00084
00085 Given a skeleton process install the skim production for given skims
00086
00087 """
00088 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00089 msg += "Does not contain an implementation for alcaReco"
00090 raise NotImplementedError, msg
00091
00092
00093 def dqmHarvesting(self, datasetName, runNumber, globalTag, **options):
00094 """
00095 _dqmHarvesting_
00096
00097 build a DQM Harvesting configuration
00098
00099 Arguments:
00100
00101 datasetName - aka workflow name for DQMServer, this is the name of the
00102 dataset containing the harvested run
00103 runNumber - The run being harvested
00104 globalTag - The global tag being used
00105 inputFiles - The list of LFNs being harvested
00106
00107 """
00108 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00109 msg += "Does not contain an implementation for dqmHarvesting"
00110 raise NotImplementedError, msg
00111
00112
00113 def alcaHarvesting(self, globalTag, datasetName, **options):
00114 """
00115 _alcaHarvesting_
00116
00117 build an AlCa Harvesting configuration
00118
00119 Arguments:
00120
00121 globalTag - The global tag being used
00122 inputFiles - The list of LFNs being harvested
00123
00124 """
00125 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00126 msg += "Does not contain an implementation for alcaHarvesting"
00127 raise NotImplementedError, msg
00128
00129
00130 def skimming(self, skims, **options):
00131 """
00132 _skimming_
00133
00134 Given a process install the sequences for Tier 1 skimming
00135 and the appropriate output modules
00136
00137 """
00138 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00139 msg += "Does not contain an implementation for skimming"
00140 raise NotImplementedError, msg
00141
00142
00143 def merge(self, *inputFiles, **options):
00144 """
00145 _merge_
00146
00147 builds a merge configuration
00148
00149 """
00150 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00151 return mergeProcess(*inputFiles, **options)
00152
00153
00154 def repack(self, **options):
00155 """
00156 _repack_
00157
00158 builds a repack configuration
00159
00160 """
00161 msg = "Scenario Implementation %s\n" % self.__class__.__name__
00162 return repackProcess(**options)
00163
00164
00165
00166
00167
00168
00169 def dropOutputModule(self, processRef, moduleName):
00170 """
00171 _dropOutputModule_
00172
00173 Util to prune an unwanted output module
00174
00175 """
00176 del process._Process__outputmodules[moduleName]
00177 return