9 from FWCore.PythonUtilities.LumiList
import LumiList
10 from TkAlExceptions
import AllInOneError
14 def __init__( self, datasetName, dasLimit = 0 ):
17 if re.match(
r'/.+/.+/.+', self.
__name ):
21 fileName = self.
__name +
"_cff.py"
22 searchPath1 = os.path.join( os.environ[
"CMSSW_BASE"],
"python",
23 "Alignment",
"OfflineValidation",
25 searchPath2 = os.path.join( os.environ[
"CMSSW_BASE"],
"src",
26 "Alignment",
"OfflineValidation",
28 searchPath3 = os.path.join( os.environ[
"CMSSW_RELEASE_BASE"],
29 "python",
"Alignment",
30 "OfflineValidation", fileName )
31 if os.path.exists( searchPath1 ):
33 elif os.path.exists( searchPath2 ):
34 msg = (
"The predefined dataset '%s' does exist in '%s', but "
35 "you need to run 'scram b' first."
36 %( self.
__name, searchPath2 ))
38 elif os.path.exists( searchPath3 ):
41 msg = (
"The predefined dataset '%s' does not exist. Please "
42 "create it first or check for typos."%( self.
__name ))
52 """ Yield successive n-sized chunks from theList.
54 for i
in xrange( 0, len( theList ), n ):
58 firstRun =
None, lastRun =
None, repMap =
None,
61 firstRun = int( firstRun )
63 lastRun = int( lastRun )
64 if ( begin
and firstRun )
or ( end
and lastRun ):
65 msg = (
"The Usage of "
66 +
"'begin' & 'firstRun' " * int( bool( begin
and
68 +
"and " * int( bool( ( begin
and firstRun )
and
69 ( end
and lastRun ) ) )
70 +
"'end' & 'lastRun' " * int( bool( end
and lastRun ) )
75 begin = begin, end = end, firstRun = firstRun,
77 if ( firstRun
and lastRun )
and ( firstRun > lastRun ):
78 msg = (
"The lower time/runrange limit ('begin'/'firstRun') "
79 "chosen is greater than the upper time/runrange limit "
85 if firstRun
or lastRun:
86 goodLumiSecStr = (
"lumiSecs = cms.untracked."
87 "VLuminosityBlockRange()\n" )
88 lumiStr =
" lumisToProcess = lumiSecs,\n"
92 selectedRunList = [ run
for run
in selectedRunList \
93 if run[
"run_number"] >= firstRun ]
95 selectedRunList = [ run
for run
in selectedRunList \
96 if run[
"run_number"] <= lastRun ]
97 lumiList = [ str( run[
"run_number"] ) +
":1-" \
98 + str( run[
"run_number"] ) +
":max" \
99 for run
in selectedRunList ]
102 theLumiList = LumiList ( filename = jsonPath )
103 allRuns = theLumiList.getRuns()
106 if firstRun
and int( run ) < firstRun:
107 runsToRemove.append( run )
108 if lastRun
and int( run ) > lastRun:
109 runsToRemove.append( run )
110 theLumiList.removeRuns( runsToRemove )
112 theLumiList.getCMSSWString().
split(
','), 255 ) )
113 if not len(splitLumiList[0][0]) == 0:
114 lumiSecStr = [
"',\n'".
join( lumis ) \
115 for lumis
in splitLumiList ]
116 lumiSecStr = [
"lumiSecs.extend( [\n'" + lumis +
"'\n] )" \
117 for lumis
in lumiSecStr ]
118 lumiSecExtend =
"\n".
join( lumiSecStr )
120 goodLumiSecStr = (
"goodLumiSecs = LumiList.LumiList(filename"
121 "= '%(json)s').getCMSSWString().split(',')\n"
122 "lumiSecs = cms.untracked"
123 ".VLuminosityBlockRange()\n"
125 lumiStr =
" lumisToProcess = lumiSecs,\n"
126 lumiSecExtend =
"lumiSecs.extend(goodLumiSecs)\n"
131 fileStr = [
"',\n'".
join( files )
for files
in splitFileList ]
132 fileStr = [
"readFiles.extend( [\n'" + files +
"'\n] )" \
133 for files
in fileStr ]
134 files =
"\n".
join( fileStr )
136 theMap[
"files"] = files
137 theMap[
"json"] = jsonPath
138 theMap[
"lumiStr"] = lumiStr
139 theMap[
"goodLumiSecStr"] = goodLumiSecStr%( theMap )
140 theMap[
"lumiSecExtend"] = lumiSecExtend
144 dataset_snippet = self.__source_template%( theMap )
145 return dataset_snippet
147 __dummy_source_template = (
"%(process)smaxEvents = cms.untracked.PSet( "
148 "input = cms.untracked.int32(%(nEvents)s) )\n"
149 "readFiles = cms.untracked.vstring()\n"
150 "secFiles = cms.untracked.vstring()\n"
151 "%(process)ssource = cms.Source(\"PoolSource\",\n"
152 "%(tab)s secondaryFileNames ="
154 "%(tab)s fileNames = readFiles\n"
156 "readFiles.extend(['dummy_File.root'])\n")
159 'Find rightmost value less than x'
160 i = bisect.bisect_left( a, x )
166 'Find leftmost item greater than or equal to x'
167 i = bisect.bisect_left( a, x )
173 dasData = das_client.get_data(
'https://cmsweb.cern.ch',
174 dasQuery, 0, dasLimit,
False )
175 jsondict = json.loads( dasData )
177 if jsondict[
"status"] !=
'ok':
178 msg =
"Status not 'ok', but:", jsondict[
"status"]
180 return jsondict[
"data"]
183 dasQuery_type = (
'dataset dataset=%s | grep dataset.datatype,'
184 'dataset.name'%( self.
__name ) )
186 return data[0][
"dataset"][0][
"datatype"]
191 dasQuery_files = (
'file dataset=%s | grep file.name, file.nevents, '
192 'file.creation_time, '
193 'file.modification_time'%( self.
__name ) )
194 print "Requesting file information for '%s' from DAS..."%( self.
__name ),
195 data = self.
__getData( dasQuery_files, dasLimit )
197 data = [ entry[
"file"]
for entry
in data ]
199 msg = (
"No files are available for the dataset '%s'. This can be "
200 "due to a typo or due to a DAS problem. Please check the "
201 "spelling of the dataset and/or retry to run "
202 "'validateAlignments.py'."%( self.
name() ))
204 fileInformationList = []
206 fileName = file[0][
"name"]
207 fileCreationTime = file[0][
"creation_time"]
210 fileNEvents = file[ii][
"nevents"]
217 fileDict = {
"name": fileName,
218 "creation_time": fileCreationTime,
219 "nevents": fileNEvents
221 fileInformationList.append( fileDict )
222 fileInformationList.sort( key=
lambda info: info[
"name"] )
223 return fileInformationList
228 dasQuery_runs = (
'run dataset=%s | grep run.run_number,'
229 'run.creation_time'%( self.
__name ) )
230 print "Requesting run information for '%s' from DAS..."%( self.
__name ),
233 data = [ entry[
"run"][0]
for entry
in data ]
234 data.sort( key =
lambda run: run[
"creation_time"] )
238 __source_template= (
"%(importCms)s"
239 "import FWCore.PythonUtilities.LumiList as LumiList\n\n"
241 "%(process)smaxEvents = cms.untracked.PSet( "
242 "input = cms.untracked.int32(%(nEvents)s) )\n"
243 "readFiles = cms.untracked.vstring()\n"
244 "secFiles = cms.untracked.vstring()\n"
245 "%(process)ssource = cms.Source(\"PoolSource\",\n"
247 "%(tab)s secondaryFileNames ="
249 "%(tab)s fileNames = readFiles\n"
252 "%(lumiSecExtend)s\n")
255 firstRun =
None, lastRun =
None,
257 if ( begin
and firstRun )
or ( end
and lastRun ):
258 msg = (
"The Usage of "
259 +
"'begin' & 'firstRun' " * int( bool( begin
and
261 +
"and " * int( bool( ( begin
and firstRun )
and
262 ( end
and lastRun ) ) )
263 +
"'end' & 'lastRun' " * int( bool( end
and lastRun ) )
267 runList = [ run[
"run_number"]
for run
in self.
__getRunList() ]
268 runTimeList = [ run[
"creation_time"]
for run
in self.
__getRunList() ]
271 runIndex = self.
__find_ge( runTimeList, begin )
273 msg = (
"Your 'begin' is after the creation time of the last "
274 "run in the dataset\n'%s'"%( self.
__name ) )
276 firstRun = runList[runIndex]
280 runIndex = self.
__find_lt( runTimeList, end )
282 msg = (
"Your 'end' is before the creation time of the first "
283 "run in the dataset\n'%s'"%( self.
__name ) )
285 lastRun = runList[runIndex]
288 return firstRun, lastRun
290 return begin, end, firstRun, lastRun
295 def datasetSnippet( self, jsonPath = None, begin = None, end = None,
296 firstRun =
None, lastRun =
None, nEvents =
None,
299 return (
"process.load(\"Alignment.OfflineValidation.%s_cff\")\n"
300 "process.maxEvents = cms.untracked.PSet(\n"
301 " input = cms.untracked.int32(%s)\n"
303 %( self.
__name, nEvents ))
304 theMap = {
"process":
"process.",
305 "tab":
" " * len(
"process." ),
306 "nEvents": str( nEvents ),
316 return datasetSnippet
318 def dump_cff( self, outName = None, jsonPath = None, begin = None,
319 end =
None, firstRun =
None, lastRun =
None ):
322 packageName = os.path.join(
"Alignment",
"OfflineValidation" )
323 if not os.path.exists( os.path.join(
324 os.environ[
"CMSSW_BASE"],
"src", packageName ) ):
325 msg = (
"You try to store the predefined dataset'%s'.\n"
326 "For that you need to check out the package '%s' to your "
327 "private relase area in\n"%( outName, packageName )
328 + os.environ[
"CMSSW_BASE"] )
330 theMap = {
"process":
"",
332 "nEvents": str( -1 ),
333 "importCms":
"import FWCore.ParameterSet.Config as cms\n" }
340 filePath = os.path.join( os.environ[
"CMSSW_BASE"],
"src", packageName,
341 "python", outName +
"_cff.py" )
342 if os.path.exists( filePath ):
343 existMsg =
"The predefined dataset '%s' already exists.\n"%( outName )
344 askString =
"Do you want to overwrite it? [y/n]\n"
345 inputQuery = existMsg + askString
347 userInput = raw_input( inputQuery ).lower()
350 elif userInput ==
"n":
353 inputQuery = askString
354 print (
"The predefined dataset '%s' will be stored in the file\n"
357 "\nFor future use you have to do 'scram b'." )
359 theFile = open( filePath,
"w" )
360 theFile.write( dataset_cff )
367 fileList = [ fileInfo[
"name"] \
387 if __name__ ==
'__main__':
388 print "Start testing..."
389 datasetName =
'/MinimumBias/Run2012D-TkAlMinBias-v1/ALCARECO'
390 jsonFile = (
'/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/'
391 'Collisions12/8TeV/Prompt/'
392 'Cert_190456-207898_8TeV_PromptReco_Collisions12_JSON.txt' )
394 print dataset.datasetSnippet( nEvents = 100,jsonPath = jsonFile,
396 end =
"2012-11-28 00:00:00" )
397 dataset.dump_cff( outName =
"Dataset_Test_TkAlMinBias_Run2012D",
400 end =
"2012-11-28 00:00:00" )
static std::string join(char **cmd)
tuple __dummy_source_template
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run