CMS 3D CMS Logo

/data/doxygen/doxygen-1.7.3/gen/CMSSW_4_2_8/src/Configuration/DataProcessing/python/Scenario.py

Go to the documentation of this file.
00001 #!/usr/bin/env python
00002 """
00003 _Scenario_
00004 
00005 Standard cmsRun Process building interface used for data processing
00006 for a particular data scenario.
00007 A scenario is a macro-data-taking setting such as cosmic running,
00008 beam halo running, or particular validation tests.
00009 
00010 This class defines the interfaces used by the Tier 0 and Tier 1
00011 processing to wrap calls to ConfigBuilder in order to retrieve all the
00012 configurations for the various types of job
00013 
00014 """
00015 
00016 import FWCore.ParameterSet.Config as cms
00017 from Configuration.DataProcessing.Merge import mergeProcess
00018 
00019 class Scenario(object):
00020     """
00021     _Scenario_
00022 
00023     """
00024     def __init__(self):
00025         pass
00026 
00027 
00028     def promptReco(self, globalTag, writeTiers = ['RECO'], **options):
00029         """
00030         _installPromptReco_
00031 
00032         given a skeleton process object and references
00033         to the output modules for the products it produces,
00034         install the standard reco sequences and event content for this
00035         scenario
00036 
00037         """
00038         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00039         msg += "Does not contain an implementation for promptReco"
00040         raise NotImplementedError, msg
00041 
00042 
00043     def expressProcessing(self, globalTag, writeTiers = [], **options):
00044         """
00045         _expressProcessing_
00046 
00047         Build an express processing configuration for this scenario.
00048 
00049         Express processing runs conversion, reco and alca reco on each
00050         streamer file in the express stream and writes out RAW, RECO and
00051         a combined ALCA file that gets mergepacked in a later step
00052 
00053         writeTiers is list of tiers to write out, not including ALCA
00054         
00055         datasets is the list of datasets to split into for each tier
00056         written out. Should always be one dataset
00057 
00058         alcaDataset - if set, this means the combined Alca file is written
00059         out with no dataset splitting, it gets assigned straight to the datase
00060         provided
00061 
00062         """
00063         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00064         msg += "Does not contain an implementation for expressProcessing"
00065         raise NotImplementedError, msg
00066 
00067 
00068     def alcaSkim(self, skims, **options):
00069         """
00070         _alcaSkim_
00071 
00072         Given a skeleton process install the skim splitting for given skims
00073 
00074         """
00075         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00076         msg += "Does not contain an implementation for alcaSkim"
00077         raise NotImplementedError, msg
00078 
00079 
00080     def alcaReco(self, *skims, **options):
00081         """
00082         _alcaSkim_
00083 
00084         Given a skeleton process install the skim production for given skims
00085 
00086         """
00087         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00088         msg += "Does not contain an implementation for alcaReco"
00089         raise NotImplementedError, msg
00090 
00091 
00092     def dqmHarvesting(self, datasetName, runNumber, globalTag, **options):
00093         """
00094         _dqmHarvesting_
00095 
00096         build a DQM Harvesting configuration
00097 
00098         Arguments:
00099         
00100         datasetName - aka workflow name for DQMServer, this is the name of the
00101         dataset containing the harvested run
00102         runNumber - The run being harvested
00103         globalTag - The global tag being used
00104         inputFiles - The list of LFNs being harvested
00105 
00106         """
00107         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00108         msg += "Does not contain an implementation for dqmHarvesting"
00109         raise NotImplementedError, msg
00110 
00111 
00112     def alcaHarvesting(self, globalTag, datasetName, **options):
00113         """
00114         _alcaHarvesting_
00115 
00116         build an AlCa Harvesting configuration
00117 
00118         Arguments:
00119         
00120         globalTag - The global tag being used
00121         inputFiles - The list of LFNs being harvested
00122 
00123         """
00124         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00125         msg += "Does not contain an implementation for alcaHarvesting"
00126         raise NotImplementedError, msg
00127 
00128 
00129     def skimming(self, skims, **options):
00130         """
00131         _skimming_
00132 
00133         Given a process install the sequences for Tier 1 skimming
00134         and the appropriate output modules
00135 
00136         """
00137         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00138         msg += "Does not contain an implementation for skimming"
00139         raise NotImplementedError, msg        
00140 
00141 
00142     def merge(self, *inputFiles, **options):
00143         """
00144         _merge_
00145 
00146         builds a merge configuration
00147 
00148         """
00149         msg = "Scenario Implementation %s\n" % self.__class__.__name__
00150         return mergeProcess(*inputFiles, **options)
00151 
00152 
00153     #
00154     # helper methods
00155     #
00156 
00157     def dropOutputModule(self, processRef, moduleName):
00158         """
00159         _dropOutputModule_
00160 
00161         Util to prune an unwanted output module
00162 
00163         """
00164         del process._Process__outputmodules[moduleName]
00165         return
00166 
00167 
00168     def addExpressOutputModules(self, process, tiers, datasets):
00169         """
00170         _addExpressOutputModules_
00171 
00172         Util method to unpack and install the set of data tier
00173         output modules corresponding to the list of tiers and datasets
00174         provided
00175 
00176         """
00177         for tier in tiers:
00178             for dataset in datasets:
00179                 moduleName = "write%s%s" % (tier, dataset)
00180                 contentName = "%sEventContent" % tier
00181                 contentAttr = getattr(process, contentName)
00182                 setattr(process, moduleName, 
00183 
00184                         cms.OutputModule(
00185                     "PoolOutputModule", 
00186                     fileName = cms.untracked.string('%s.root' % moduleName), 
00187                     dataset = cms.untracked.PSet( 
00188                     dataTier = cms.untracked.string(tier), 
00189                     ),
00190                     eventContent = contentAttr
00191                     )
00192                         
00193                         )
00194         return
00195 
00196