Go to the documentation of this file.00001
00002 """
00003 _Test_
00004
00005 Test Scenario implementation for unittests/development purposes
00006
00007 Not for use with data taking
00008
00009 """
00010
00011
00012 from Configuration.DataProcessing.Scenario import Scenario
00013 import FWCore.ParameterSet.Config as cms
00014
00015 class Test(Scenario):
00016 """
00017 _Test_
00018
00019 Test Scenario
00020
00021 """
00022
00023
00024 def promptReco(self, globalTag, skims = [], writeTiers = ['RECO','ALCARECO']):
00025 """
00026 _installPromptReco_
00027
00028 given a skeleton process object and references
00029 to the output modules for the products it produces,
00030 install the standard reco sequences and event content for this
00031 scenario
00032
00033 """
00034 return cms.Process("RECO")
00035
00036
00037
00038 def alcaSkim(self, skims):
00039 """
00040 _alcaSkim_
00041
00042 Given a skeleton process install the alcareco sequences and
00043 skims.
00044 For each skim name in the list of skims, install the appropriate
00045 output module with the name of the skim
00046
00047 """
00048 return cms.Process("ALCARECO")
00049
00050
00051 def dqmHarvesting(self, datasetName, runNumber, globalTag, **args):
00052 """
00053 _dqmHarvesting_
00054
00055 build a DQM Harvesting configuration
00056
00057 this method can be used to test an extra scenario, all the
00058 ConfigBuilder options can be overwritten by using **args. This will be
00059 useful for testing with real jobs.
00060
00061 Arguments:
00062
00063 datasetName - aka workflow name for DQMServer, this is the name of the
00064 dataset containing the harvested run
00065 runNumber - The run being harvested
00066 globalTag - The global tag being used
00067 inputFiles - The list of LFNs being harvested
00068
00069 """
00070 options = defaultOptions
00071 options.scenario = "cosmics"
00072 options.step = "HARVESTING:dqmHarvesting"
00073 options.isMC = False
00074 options.isData = True
00075 options.beamspot = None
00076 options.eventcontent = None
00077 options.name = "EDMtoMEConvert"
00078 options.conditions = "FrontierConditions_GlobalTag,%s" % globalTag
00079 options.arguments = ""
00080 options.evt_type = ""
00081 options.filein = []
00082
00083 options.__dict__.update(args)
00084
00085 process = cms.Process("HARVESTING")
00086 process.source = cms.Source("PoolSource")
00087 configBuilder = ConfigBuilder(options, process = process)
00088 configBuilder.prepare()
00089
00090
00091
00092
00093 process.source.processingMode = cms.untracked.string('RunsAndLumis')
00094 process.source.fileNames = cms.untracked(cms.vstring())
00095 process.maxEvents.input = -1
00096 process.dqmSaver.workflow = datasetName
00097 if args.has_key('saveByLumiSection') and \
00098 args.get('saveByLumiSection', ''):
00099 process.dqmSaver.saveByLumiSection = int(args['saveByLumiSection'])
00100 if args.has_key('referenceFile') and args.get('referenceFile', ''):
00101 process.DQMStore.referenceFileName = \
00102 cms.untracked.string(args['referenceFile'])
00103
00104 return process
00105
00106
00107 def expressProcessing(self, globalTag):
00108 """
00109 _expressProcessing_
00110
00111 Build an express processing configuration for this scenario.
00112
00113 Express processing runs conversion, reco and alca reco on each
00114 streamer file in the express stream and writes out RAW, RECO and
00115 a combined ALCA file that gets mergepacked in a later step
00116
00117 """
00118 return cms.Process("Express")
00119
00120
00121 def expressMergepacking(self, *outputModules):
00122 """
00123 _expressMergepacking_
00124
00125 Build/customise a mergepacking configuration
00126
00127 """
00128 return cms.Process("MPack")
00129
00130
00131 def skimming(self, *skims):
00132 """
00133 _skimming_
00134
00135 Given a process install the sequences for Tier 1 skimming
00136 and the appropriate output modules
00137
00138 """
00139 return cms.Process("Skimming")
00140
00141