9 from FWCore.PythonUtilities.LumiList
import LumiList
10 from TkAlExceptions
import AllInOneError
14 def __init__( self, datasetName, dasLimit = 0 ):
17 if re.match(
r'/.+/.+/.+', self.
__name ):
21 fileName = self.
__name +
"_cff.py"
22 searchPath1 = os.path.join( os.environ[
"CMSSW_BASE"],
"python",
23 "Alignment",
"OfflineValidation",
25 searchPath2 = os.path.join( os.environ[
"CMSSW_BASE"],
"src",
26 "Alignment",
"OfflineValidation",
28 searchPath3 = os.path.join( os.environ[
"CMSSW_RELEASE_BASE"],
29 "python",
"Alignment",
30 "OfflineValidation", fileName )
31 if os.path.exists( searchPath1 ):
33 elif os.path.exists( searchPath2 ):
34 msg = (
"The predefined dataset '%s' does exist in '%s', but "
35 "you need to run 'scram b' first."
36 %( self.
__name, searchPath2 ))
38 elif os.path.exists( searchPath3 ):
41 msg = (
"The predefined dataset '%s' does not exist. Please "
42 "create it first or check for typos."%( self.
__name ))
52 """ Yield successive n-sized chunks from theList.
54 for i
in xrange( 0, len( theList ), n ):
58 firstRun =
None, lastRun =
None, repMap =
None,
61 firstRun = int( firstRun )
63 lastRun = int( lastRun )
64 if ( begin
and firstRun )
or ( end
and lastRun ):
65 msg = (
"The Usage of "
66 +
"'begin' & 'firstRun' " * int( bool( begin
and
68 +
"and " * int( bool( ( begin
and firstRun )
and
69 ( end
and lastRun ) ) )
70 +
"'end' & 'lastRun' " * int( bool( end
and lastRun ) )
75 begin = begin, end = end, firstRun = firstRun,
77 if ( firstRun
and lastRun )
and ( firstRun > lastRun ):
78 msg = (
"The lower time/runrange limit ('begin'/'firstRun') "
79 "chosen is greater than the upper time/runrange limit "
85 if firstRun
or lastRun:
86 goodLumiSecStr = (
"lumiSecs = cms.untracked."
87 "VLuminosityBlockRange()\n" )
88 lumiStr =
" lumisToProcess = lumiSecs,\n"
92 selectedRunList = [ run
for run
in selectedRunList \
93 if run[
"run_number"] >= firstRun ]
95 selectedRunList = [ run
for run
in selectedRunList \
96 if run[
"run_number"] <= lastRun ]
97 lumiList = [ str( run[
"run_number"] ) +
":1-" \
98 + str( run[
"run_number"] ) +
":max" \
99 for run
in selectedRunList ]
102 theLumiList = LumiList ( filename = jsonPath )
103 allRuns = theLumiList.getRuns()
106 if firstRun
and int( run ) < firstRun:
107 runsToRemove.append( run )
108 if lastRun
and int( run ) > lastRun:
109 runsToRemove.append( run )
110 theLumiList.removeRuns( runsToRemove )
112 theLumiList.getCMSSWString().
split(
','), 255 ) )
113 if not len(splitLumiList[0][0]) == 0:
114 lumiSecStr = [
"',\n'".
join( lumis ) \
115 for lumis
in splitLumiList ]
116 lumiSecStr = [
"lumiSecs.extend( [\n'" + lumis +
"'\n] )" \
117 for lumis
in lumiSecStr ]
118 lumiSecExtend =
"\n".
join( lumiSecStr )
120 goodLumiSecStr = (
"goodLumiSecs = LumiList.LumiList(filename"
121 "= '%(json)s').getCMSSWString().split(',')\n"
122 "lumiSecs = cms.untracked"
123 ".VLuminosityBlockRange()\n"
125 lumiStr =
" lumisToProcess = lumiSecs,\n"
126 lumiSecExtend =
"lumiSecs.extend(goodLumiSecs)\n"
131 fileStr = [
"',\n'".
join( files )
for files
in splitFileList ]
132 fileStr = [
"readFiles.extend( [\n'" + files +
"'\n] )" \
133 for files
in fileStr ]
134 files =
"\n".
join( fileStr )
136 theMap[
"files"] = files
137 theMap[
"json"] = jsonPath
138 theMap[
"lumiStr"] = lumiStr
139 theMap[
"goodLumiSecStr"] = goodLumiSecStr%( theMap )
140 theMap[
"lumiSecExtend"] = lumiSecExtend
144 dataset_snippet = self.__source_template%( theMap )
145 return dataset_snippet
147 __dummy_source_template = (
"%(process)smaxEvents = cms.untracked.PSet( "
148 "input = cms.untracked.int32(%(nEvents)s) )\n"
149 "readFiles = cms.untracked.vstring()\n"
150 "secFiles = cms.untracked.vstring()\n"
151 "%(process)ssource = cms.Source(\"PoolSource\",\n"
152 "%(tab)s secondaryFileNames ="
154 "%(tab)s fileNames = readFiles\n"
156 "readFiles.extend(['dummy_File.root'])\n")
159 'Find rightmost value less than x'
160 i = bisect.bisect_left( a, x )
166 'Find leftmost item greater than or equal to x'
167 i = bisect.bisect_left( a, x )
173 dasData = das_client.get_data(
'https://cmsweb.cern.ch',
174 dasQuery, 0, dasLimit,
False )
175 if isinstance(dasData, str):
176 jsondict = json.loads( dasData )
180 if jsondict[
"status"] !=
'ok':
181 msg =
"Status not 'ok', but:", jsondict[
"status"]
183 return jsondict[
"data"]
186 dasQuery_type = (
'dataset dataset=%s | grep dataset.datatype,'
187 'dataset.name'%( self.
__name ) )
189 for a
in data[0][
"dataset"]:
192 msg = (
"Cannot find the datatype of the dataset '%s'"%( self.
name() ))
198 dasQuery_files = (
'file dataset=%s | grep file.name, file.nevents, '
199 'file.creation_time, '
200 'file.modification_time'%( self.
__name ) )
201 print "Requesting file information for '%s' from DAS..."%( self.
__name ),
202 data = self.
__getData( dasQuery_files, dasLimit )
204 data = [ entry[
"file"]
for entry
in data ]
206 msg = (
"No files are available for the dataset '%s'. This can be "
207 "due to a typo or due to a DAS problem. Please check the "
208 "spelling of the dataset and/or retry to run "
209 "'validateAlignments.py'."%( self.
name() ))
211 fileInformationList = []
213 fileName = file[0][
"name"]
214 fileCreationTime = file[0][
"creation_time"]
217 fileNEvents = file[ii][
"nevents"]
224 fileDict = {
"name": fileName,
225 "creation_time": fileCreationTime,
226 "nevents": fileNEvents
228 fileInformationList.append( fileDict )
229 fileInformationList.sort( key=
lambda info: info[
"name"] )
230 return fileInformationList
235 dasQuery_runs = (
'run dataset=%s | grep run.run_number,'
236 'run.creation_time'%( self.
__name ) )
237 print "Requesting run information for '%s' from DAS..."%( self.
__name ),
240 data = [ entry[
"run"][0]
for entry
in data ]
241 data.sort( key =
lambda run: run[
"creation_time"] )
245 __source_template= (
"%(importCms)s"
246 "import FWCore.PythonUtilities.LumiList as LumiList\n\n"
248 "%(process)smaxEvents = cms.untracked.PSet( "
249 "input = cms.untracked.int32(%(nEvents)s) )\n"
250 "readFiles = cms.untracked.vstring()\n"
251 "secFiles = cms.untracked.vstring()\n"
252 "%(process)ssource = cms.Source(\"PoolSource\",\n"
254 "%(tab)s secondaryFileNames ="
256 "%(tab)s fileNames = readFiles\n"
259 "%(lumiSecExtend)s\n")
262 firstRun =
None, lastRun =
None,
264 if ( begin
and firstRun )
or ( end
and lastRun ):
265 msg = (
"The Usage of "
266 +
"'begin' & 'firstRun' " * int( bool( begin
and
268 +
"and " * int( bool( ( begin
and firstRun )
and
269 ( end
and lastRun ) ) )
270 +
"'end' & 'lastRun' " * int( bool( end
and lastRun ) )
274 runList = [ run[
"run_number"]
for run
in self.
__getRunList() ]
275 runTimeList = [ run[
"creation_time"]
for run
in self.
__getRunList() ]
278 runIndex = self.
__find_ge( runTimeList, begin )
280 msg = (
"Your 'begin' is after the creation time of the last "
281 "run in the dataset\n'%s'"%( self.
__name ) )
283 firstRun = runList[runIndex]
287 runIndex = self.
__find_lt( runTimeList, end )
289 msg = (
"Your 'end' is before the creation time of the first "
290 "run in the dataset\n'%s'"%( self.
__name ) )
292 lastRun = runList[runIndex]
295 return firstRun, lastRun
297 return begin, end, firstRun, lastRun
302 def datasetSnippet( self, jsonPath = None, begin = None, end = None,
303 firstRun =
None, lastRun =
None, nEvents =
None,
306 return (
"process.load(\"Alignment.OfflineValidation.%s_cff\")\n"
307 "process.maxEvents = cms.untracked.PSet(\n"
308 " input = cms.untracked.int32(%s)\n"
310 %( self.
__name, nEvents ))
311 theMap = {
"process":
"process.",
312 "tab":
" " * len(
"process." ),
313 "nEvents": str( nEvents ),
323 return datasetSnippet
325 def dump_cff( self, outName = None, jsonPath = None, begin = None,
326 end =
None, firstRun =
None, lastRun =
None ):
329 packageName = os.path.join(
"Alignment",
"OfflineValidation" )
330 if not os.path.exists( os.path.join(
331 os.environ[
"CMSSW_BASE"],
"src", packageName ) ):
332 msg = (
"You try to store the predefined dataset'%s'.\n"
333 "For that you need to check out the package '%s' to your "
334 "private relase area in\n"%( outName, packageName )
335 + os.environ[
"CMSSW_BASE"] )
337 theMap = {
"process":
"",
339 "nEvents": str( -1 ),
340 "importCms":
"import FWCore.ParameterSet.Config as cms\n" }
347 filePath = os.path.join( os.environ[
"CMSSW_BASE"],
"src", packageName,
348 "python", outName +
"_cff.py" )
349 if os.path.exists( filePath ):
350 existMsg =
"The predefined dataset '%s' already exists.\n"%( outName )
351 askString =
"Do you want to overwrite it? [y/n]\n"
352 inputQuery = existMsg + askString
354 userInput = raw_input( inputQuery ).lower()
357 elif userInput ==
"n":
360 inputQuery = askString
361 print (
"The predefined dataset '%s' will be stored in the file\n"
364 "\nFor future use you have to do 'scram b'." )
366 theFile = open( filePath,
"w" )
367 theFile.write( dataset_cff )
374 fileList = [ fileInfo[
"name"] \
394 if __name__ ==
'__main__':
395 print "Start testing..."
396 datasetName =
'/MinimumBias/Run2012D-TkAlMinBias-v1/ALCARECO'
397 jsonFile = (
'/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/'
398 'Collisions12/8TeV/Prompt/'
399 'Cert_190456-207898_8TeV_PromptReco_Collisions12_JSON.txt' )
401 print dataset.datasetSnippet( nEvents = 100,jsonPath = jsonFile,
403 end =
"2012-11-28 00:00:00" )
404 dataset.dump_cff( outName =
"Dataset_Test_TkAlMinBias_Run2012D",
407 end =
"2012-11-28 00:00:00" )
static std::string join(char **cmd)
tuple __dummy_source_template
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run