1 from __future__
import print_function
2 from __future__
import absolute_import
6 from builtins
import range
14 import Utilities.General.cmssw_das_client
as das_client
15 from FWCore.PythonUtilities.LumiList
import LumiList
17 from .helperFunctions
import cache
18 from .TkAlExceptions
import AllInOneError
21 def __init__( self, datasetName, dasLimit = 0, tryPredefinedFirst = True,
22 cmssw = os.environ[
"CMSSW_BASE"], cmsswrelease = os.environ[
"CMSSW_RELEASE_BASE"],
23 magneticfield =
None, dasinstance =
None):
35 if re.match(
r'/.+/.+/.+', self.
__name ):
37 fileName =
"Dataset" + self.__name.replace(
"/",
"_") +
"_cff.py" 40 fileName = self.
__name +
"_cff.py" 42 searchPath1 = os.path.join( self.
__cmssw,
"python",
43 "Alignment",
"OfflineValidation",
45 searchPath2 = os.path.join( self.
__cmssw,
"src",
46 "Alignment",
"OfflineValidation",
49 "python",
"Alignment",
50 "OfflineValidation", fileName )
53 elif os.path.exists( searchPath1 ):
56 elif os.path.exists( searchPath2 ):
57 msg = (
"The predefined dataset '%s' does exist in '%s', but " 58 "you need to run 'scram b' first." 59 %( self.
__name, searchPath2 ))
62 print(
"Getting the data from DAS again. To go faster next time, run scram b.")
65 elif os.path.exists( searchPath3 ):
71 msg = (
"The predefined dataset '%s' does not exist. Please " 72 "create it first or check for typos."%( self.
__name ))
76 self.
__name =
"Dataset" + self.__name.replace(
"/",
"_")
78 if magneticfield
is not None:
80 magneticfield =
float(magneticfield)
82 raise AllInOneError(
"Bad magneticfield {} which can't be converted to float".
format(magneticfield))
90 """ Yield successive n-sized chunks from theList. 92 for i
in range( 0, len( theList ), n ):
95 __source_template= (
"%(header)s" 97 "import FWCore.PythonUtilities.LumiList as LumiList\n\n" 99 "readFiles = cms.untracked.vstring()\n" 100 "secFiles = cms.untracked.vstring()\n" 101 "%(process)ssource = cms.Source(\"PoolSource\",\n" 103 "%(tab)s secondaryFileNames =" 105 "%(tab)s fileNames = readFiles\n" 108 "%(lumiSecExtend)s\n" 109 "%(process)smaxEvents = cms.untracked.PSet( " 110 "input = cms.untracked.int32(%(nEvents)s) )\n" 111 "%(skipEventsString)s\n")
113 __dummy_source_template = (
"readFiles = cms.untracked.vstring()\n" 114 "secFiles = cms.untracked.vstring()\n" 115 "%(process)ssource = cms.Source(\"PoolSource\",\n" 116 "%(tab)s secondaryFileNames =" 118 "%(tab)s fileNames = readFiles\n" 120 "readFiles.extend(['dummy_File.root'])\n" 121 "%(process)smaxEvents = cms.untracked.PSet( " 122 "input = cms.untracked.int32(%(nEvents)s) )\n" 123 "%(skipEventsString)s\n")
127 if firstRun
or lastRun
or jsonPath:
131 selectedRunList = [ run
for run
in selectedRunList \
134 selectedRunList = [ run
for run
in selectedRunList \
138 for run
in selectedRunList ]
143 theLumiList = LumiList ( filename = jsonPath )
147 if theLumiList
is not None:
148 allRuns = theLumiList.getRuns()
151 if firstRun
and int( run ) < firstRun:
152 runsToRemove.append( run )
153 if lastRun
and int( run ) > lastRun:
154 runsToRemove.append( run )
155 theLumiList.removeRuns( runsToRemove )
157 theLumiList.getCMSSWString().
split(
','), 255 ) )
158 if not (splitLumiList
and splitLumiList[0]
and splitLumiList[0][0]):
161 with open(jsonPath)
as f:
162 jsoncontents = f.read()
163 if "process.source.lumisToProcess" in jsoncontents:
164 msg =
"%s is not a json file, but it seems to be a CMSSW lumi selection cff snippet. Trying to use it" % jsonPath
165 if firstRun
or lastRun:
166 msg += (
"\n (after applying firstRun and/or lastRun)")
167 msg +=
".\nPlease note that, depending on the format of this file, it may not work as expected." 168 msg +=
"\nCheck your config file to make sure that it worked properly." 172 if firstRun
or lastRun:
175 jsoncontents = re.sub(
r"\d+:(\d+|max)(-\d+:(\d+|max))?", self.
getForceRunRangeFunction(firstRun, lastRun), jsoncontents)
176 jsoncontents = (jsoncontents.replace(
"'',\n",
"").
replace(
"''\n",
"")
185 lumiSecExtend = jsoncontents
188 raise AllInOneError(
"%s is not a valid json file!" % jsonPath)
190 if splitLumiList
and splitLumiList[0]
and splitLumiList[0][0]:
191 lumiSecStr = [
"',\n'".
join( lumis ) \
192 for lumis
in splitLumiList ]
193 lumiSecStr = [
"lumiSecs.extend( [\n'" + lumis +
"'\n] )" \
194 for lumis
in lumiSecStr ]
195 lumiSecExtend =
"\n".
join( lumiSecStr )
202 msg =
"You are trying to run a validation without any runs! Check that:" 203 if firstRun
or lastRun:
204 msg +=
"\n - firstRun/begin and lastRun/end are correct for this dataset, and there are runs in between containing data" 206 msg +=
"\n - your JSON file is correct for this dataset, and the runs contain data" 207 if (firstRun
or lastRun)
and jsonPath:
208 msg +=
"\n - firstRun/begin and lastRun/end are consistent with your JSON file" 221 def __fileListSnippet(self, crab=False, parent=False, firstRun=None, lastRun=None, forcerunselection=False):
225 splitFileList =
list( self.
__chunks( self.
fileList(firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) )
226 if not splitFileList:
227 raise AllInOneError(
"No files found for dataset {}. Check the spelling, or maybe specify another das instance?".
format(self.
__name))
228 fileStr = [
"',\n'".
join( files )
for files
in splitFileList ]
229 fileStr = [
"readFiles.extend( [\n'" + files +
"'\n] )" \
230 for files
in fileStr ]
231 files =
"\n".
join( fileStr )
234 splitParentFileList =
list( self.
__chunks( self.
fileList(parent=
True, firstRun=firstRun, lastRun=lastRun, forcerunselection=forcerunselection), 255 ) )
235 parentFileStr = [
"',\n'".
join( parentFiles )
for parentFiles
in splitParentFileList ]
236 parentFileStr = [
"secFiles.extend( [\n'" + parentFiles +
"'\n] )" \
237 for parentFiles
in parentFileStr ]
238 parentFiles =
"\n".
join( parentFileStr )
239 files +=
"\n\n" + parentFiles
244 firstRun =
None, lastRun =
None, repMap =
None,
245 crab =
False, parent =
False ):
248 firstRun =
int( firstRun )
250 lastRun =
int( lastRun )
251 if ( begin
and firstRun )
or ( end
and lastRun ):
252 msg = (
"The Usage of " 253 +
"'begin' & 'firstRun' " *
int(
bool( begin
and 255 +
"and " *
int(
bool( ( begin
and firstRun )
and 256 ( end
and lastRun ) ) )
257 +
"'end' & 'lastRun' " *
int(
bool( end
and lastRun ) )
262 begin = begin, end = end, firstRun = firstRun,
264 if ( firstRun
and lastRun )
and ( firstRun > lastRun ):
265 msg = (
"The lower time/runrange limit ('begin'/'firstRun') " 266 "chosen is greater than the upper time/runrange limit " 267 "('end'/'lastRun').")
271 lumiStr = goodLumiSecStr =
"" 273 goodLumiSecStr =
"lumiSecs = cms.untracked.VLuminosityBlockRange()\n" 274 lumiStr =
" lumisToProcess = lumiSecs,\n" 276 files = self.
__fileListSnippet(crab=crab, parent=parent, firstRun=firstRun, lastRun=lastRun, forcerunselection=
False)
279 theMap[
"files"] = files
280 theMap[
"json"] = jsonPath
281 theMap[
"lumiStr"] = lumiStr
282 theMap[
"goodLumiSecStr"] = goodLumiSecStr%( theMap )
283 theMap[
"lumiSecExtend"] = lumiSecExtend
288 return dataset_snippet
291 'Find rightmost value less than x' 292 i = bisect.bisect_left( a, x )
298 'Find leftmost item greater than or equal to x' 299 i = bisect.bisect_left( a, x )
305 if isinstance(strings, str):
306 strings = [ strings ]
308 if len(strings) == 0:
310 if isinstance(jsondict,dict):
311 if strings[0]
in jsondict:
313 return self.
__findInJson(jsondict[strings[0]], strings[1:])
321 except (TypeError, KeyError):
324 raise KeyError(
"Can't find " + strings[0])
327 """s must be in the format run1:lum1-run2:lum2""" 329 run1 = s.split(
"-")[0].
split(
":")[0]
330 lum1 = s.split(
"-")[0].
split(
":")[1]
332 run2 = s.split(
"-")[1].
split(
":")[0]
333 lum2 = s.split(
"-")[1].
split(
":")[1]
337 if int(run2) < firstRun
or int(run1) > lastRun:
339 if int(run1) < firstRun
or firstRun < 0:
342 if int(run2) > lastRun:
349 return "%s:%s-%s:%s" % (run1, lum1, run2, lum2)
352 def forcerunrangefunction(s):
354 return forcerunrangefunction
357 dasData = das_client.get_data(dasQuery, dasLimit)
358 if isinstance(dasData, str):
359 jsondict = json.loads( dasData )
367 if error
or self.
__findInJson(jsondict,
"status") !=
'ok' or "data" not in jsondict:
371 jsonstr =
str(jsondict)
372 if len(jsonstr) > 10000:
373 jsonfile =
"das_query_output_%i.txt" 375 while os.path.lexists(jsonfile % i):
377 jsonfile = jsonfile % i
378 theFile = open( jsonfile,
"w" )
379 theFile.write( jsonstr )
381 msg =
"The DAS query returned an error. The output is very long, and has been stored in:\n" + jsonfile
383 msg =
"The DAS query returned a error. Here is the output\n" + jsonstr
384 msg +=
"\nIt's possible that this was a server error. If so, it may work if you try again later" 392 for line
in f.readlines():
393 if line.startswith(
"#data type: "):
394 if datatype
is not None:
396 datatype = line.replace(
"#data type: ",
"").
replace(
"\n",
"")
400 dasQuery_type = (
'dataset dataset=%s instance=%s detail=true | grep dataset.datatype,' 407 print (
"Cannot find the datatype of the dataset '%s'\n" 408 "It may not be possible to automatically find the magnetic field,\n" 409 "and you will not be able run in CRAB mode" 420 "Here is the DAS output:\n" +
str(jsondict) +
421 "\nIt's possible that this was a server error. If so, it may work if you try again later")
424 Bfieldlocation = os.path.join( self.
__cmssw,
"python",
"Configuration",
"StandardSequences" )
425 if not os.path.isdir(Bfieldlocation):
426 Bfieldlocation = os.path.join( self.
__cmsswrelease,
"python",
"Configuration",
"StandardSequences" )
427 Bfieldlist = [ f.replace(
"_cff.py",
'') \
428 for f
in os.listdir(Bfieldlocation) \
429 if f.startswith(
"MagneticField_")
and f.endswith(
"_cff.py") ]
430 Bfieldlist.sort( key =
lambda Bfield: -len(Bfield) )
434 return "MagneticField" 436 return "MagneticField_0T" 444 for line
in f.readlines():
445 if line.startswith(
"#data type: "):
446 if datatype
is not None:
448 datatype = line.replace(
"#data type: ",
"").
replace(
"\n",
"")
449 datatype = datatype.split(
"#")[0].
strip()
450 if line.startswith(
"#magnetic field: "):
451 if Bfield
is not None:
453 Bfield = line.replace(
"#magnetic field: ",
"").
replace(
"\n",
"")
454 Bfield = Bfield.split(
"#")[0].
strip()
455 if Bfield
is not None:
456 Bfield = Bfield.split(
",")[0]
457 if Bfield
in Bfieldlist
or Bfield ==
"unknown":
460 print(
"Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield)
461 print(
"Using Bfield='unknown' - this will revert to the default")
463 elif datatype ==
"data":
464 return "MagneticField" 469 return "MagneticField" 477 Bfield = self.
__findInJson(data, [
"dataset",
"mcm",
"sequences",
"magField"])
478 if Bfield
in Bfieldlist:
480 elif Bfield ==
"38T" or Bfield ==
"38T_PostLS1":
481 return "MagneticField" 482 elif "MagneticField_" + Bfield
in Bfieldlist:
483 return "MagneticField_" + Bfield
487 print(
"Your dataset has magnetic field '%s', which does not exist in your CMSSW version!" % Bfield)
488 print(
"Using Bfield='unknown' - this will revert to the default magnetic field")
493 for possibleB
in Bfieldlist:
494 if (possibleB !=
"MagneticField" 495 and possibleB.replace(
"MagneticField_",
"")
in self.__name.replace(
"TkAlCosmics0T",
"")):
498 if possibleB ==
"MagneticField_38T" or possibleB ==
"MagneticField_38T_PostLS1":
499 return "MagneticField" 505 """For MC, this returns the same as the previous function. 506 For data, it gets the magnetic field from the runs. This is important for 507 deciding which template to use for offlinevalidation 514 Bfield = self.__magneticField.split(
"T")[0].
replace(
"MagneticField_",
"")
516 return float(Bfield) / 10.0
522 for line
in f.readlines():
523 if line.startswith(
"#magnetic field: ")
and "," in line:
524 if Bfield
is not None:
529 dasQuery = (
'run=%s instance=%s detail=true'%(run, self.
__dasinstance))
534 return "unknown Can't get the magnetic field for run %s from DAS" % run
540 return "unknown Can't get the exact magnetic field for the dataset until data has been retrieved from DAS." 544 if abs(firstrunB - lastrunB) <= tolerance:
545 return .5*(firstrunB + lastrunB)
546 print(firstrunB, lastrunB, tolerance)
547 return (
"unknown The beginning and end of your run range for %s\n" 548 "have different magnetic fields (%s, %s)!\n" 549 "Try limiting the run range using firstRun, lastRun, begin, end, or JSON,\n" 550 "or increasing the tolerance (in dataset.py) from %s.") % (self.
__name, firstrunB, lastrunB, tolerance)
553 if "unknown" in firstrunB:
564 extendstring =
"secFiles.extend" 566 extendstring =
"readFiles.extend" 567 with open(self.__fileName)
as f:
570 for line
in f.readlines():
574 files.append({name: line.translate(
None,
"', " +
'"')})
575 if extendstring
in line
and "[" in line
and "]" not in line:
582 searchdataset = self.
__name 583 dasQuery_files = (
'file dataset=%s instance=%s detail=true | grep file.name, file.nevents, ' 584 'file.creation_time, ' 585 'file.modification_time'%( searchdataset, self.
__dasinstance ) )
586 print(
"Requesting file information for '%s' from DAS..."%( searchdataset ), end=
' ')
588 data = self.
__getData( dasQuery_files, dasLimit )
590 data = [ self.
__findInJson(entry,
"file")
for entry
in data ]
592 msg = (
"No files are available for the dataset '%s'. This can be " 593 "due to a typo or due to a DAS problem. Please check the " 594 "spelling of the dataset and/or retry to run " 595 "'validateAlignments.py'."%( self.
name() ))
597 fileInformationList = []
602 fileCreationTime = self.
__findInJson(file,
"creation_time")
605 print((
"DAS query gives bad output for file '%s'. Skipping it.\n" 606 "It may work if you try again later.") % fileName)
611 fileDict = {
"name": fileName,
612 "creation_time": fileCreationTime,
613 "nevents": fileNEvents
615 fileInformationList.append( fileDict )
616 fileInformationList.sort( key=
lambda info: self.
__findInJson(info,
"name") )
617 return fileInformationList
621 dasQuery_runs = (
'run dataset=%s instance=%s | grep run.run_number,' 623 print(
"Requesting run information for '%s' from DAS..."%( self.
__name ), end=
' ')
627 data = [ self.
__findInJson(entry,
"run")
for entry
in data ]
628 data.sort( key =
lambda run: self.
__findInJson(run,
"run_number") )
632 if len(stringForDas) != 8:
633 raise AllInOneError(stringForDas +
" is not a valid date string.\n" 634 +
"DAS accepts dates in the form 'yyyymmdd'")
635 year = stringForDas[:4]
636 month = stringForDas[4:6]
637 day = stringForDas[6:8]
638 return datetime.date(
int(year),
int(month),
int(day))
641 return str(date.year) +
str(date.month).zfill(2) +
str(date.day).zfill(2)
644 firstRun =
None, lastRun =
None,
646 if ( begin
and firstRun )
or ( end
and lastRun ):
647 msg = (
"The Usage of " 648 +
"'begin' & 'firstRun' " *
int(
bool( begin
and 650 +
"and " *
int(
bool( ( begin
and firstRun )
and 651 ( end
and lastRun ) ) )
652 +
"'end' & 'lastRun' " *
int(
bool( end
and lastRun ) )
661 for delta
in [ 1, 5, 10, 20, 30 ]:
664 dasQuery_begin =
"run date between[%s,%s] instance=%s" % (firstdate, lastdate, self.
__dasinstance)
665 begindata = self.
__getData(dasQuery_begin)
666 if len(begindata) > 0:
667 begindata.sort(key =
lambda run: self.
__findInJson(run, [
"run",
"run_number"]))
671 msg = (
"Your 'begin' is after the creation time of the last " 672 "run in the dataset\n'%s'"%( self.
__name ) )
674 firstRun = runList[runIndex]
679 raise AllInOneError(
"No runs within a reasonable time interval after your 'begin'." 680 "Try using a 'begin' that has runs soon after it (within 2 months at most)")
684 for delta
in [ 1, 5, 10, 20, 30 ]:
687 dasQuery_end =
"run date between[%s,%s] instance=%s" % (firstdate, lastdate, self.
__dasinstance)
690 enddata.sort(key =
lambda run: self.
__findInJson(run, [
"run",
"run_number"]))
694 msg = (
"Your 'end' is before the creation time of the first " 695 "run in the dataset\n'%s'"%( self.
__name ) )
697 lastRun = runList[runIndex]
702 raise AllInOneError(
"No runs within a reasonable time interval before your 'end'." 703 "Try using an 'end' that has runs soon before it (within 2 months at most)")
706 return firstRun, lastRun
708 return begin, end, firstRun, lastRun
728 def datasetSnippet( self, jsonPath = None, begin = None, end = None,
729 firstRun =
None, lastRun =
None, crab =
False, parent =
False ):
730 if not firstRun: firstRun =
None 731 if not lastRun: lastRun =
None 732 if not begin: begin =
None 733 if not end: end =
None 734 if self.
__predefined and (jsonPath
or begin
or end
or firstRun
or lastRun):
735 msg = (
"The parameters 'JSON', 'begin', 'end', 'firstRun', and 'lastRun' " 736 "only work for official datasets, not predefined _cff.py files" )
740 if "secFiles.extend" not in f.read():
741 msg = (
"The predefined dataset '%s' does not contain secondary files, " 742 "which your validation requires!") % self.
__name 747 print (
"Retreiving the files from DAS. You will be asked if you want " 748 "to overwrite the old dataset.\n" 749 "It will still be compatible with validations that don't need secondary files.")
754 snippet = (
"process.load(\"Alignment.OfflineValidation.%s_cff\")\n" 755 "process.maxEvents = cms.untracked.PSet(\n" 756 " input = cms.untracked.int32(.oO[nEvents]Oo. / .oO[parallelJobs]Oo.)\n" 758 "process.source.skipEvents=cms.untracked.uint32(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.)" 762 if "secFiles.extend" in f.read():
763 snippet +=
"\nprocess.source.secondaryFileNames = cms.untracked.vstring()" 765 theMap = {
"process":
"process.",
766 "tab":
" " * len(
"process." ),
767 "nEvents":
".oO[nEvents]Oo. / .oO[parallelJobs]Oo.",
768 "skipEventsString":
"process.source.skipEvents=cms.untracked.uint32(.oO[nIndex]Oo.*.oO[nEvents]Oo./.oO[parallelJobs]Oo.)\n",
780 if jsonPath ==
"" and begin ==
"" and end ==
"" and firstRun ==
"" and lastRun ==
"":
783 except AllInOneError
as e:
784 print(
"Can't store the dataset as a cff:")
786 print(
"This may be inconvenient in the future, but will not cause a problem for this validation.")
787 return datasetSnippet
790 def dump_cff( self, outName = None, jsonPath = None, begin = None,
791 end =
None, firstRun =
None, lastRun =
None, parent =
False ):
793 outName =
"Dataset" + self.__name.replace(
"/",
"_")
794 packageName = os.path.join(
"Alignment",
"OfflineValidation" )
795 if not os.path.exists( os.path.join(
796 self.
__cmssw,
"src", packageName ) ):
797 msg = (
"You try to store the predefined dataset'%s'.\n" 798 "For that you need to check out the package '%s' to your " 799 "private relase area in\n"%( outName, packageName )
802 theMap = {
"process":
"",
804 "nEvents":
str( -1 ),
805 "skipEventsString":
"",
806 "importCms":
"import FWCore.ParameterSet.Config as cms\n",
807 "header":
"#Do not delete or (unless you know what you're doing) change these comments\n" 809 "#data type: %(dataType)s\n" 810 "#magnetic field: .oO[magneticField]Oo.\n" 822 if magneticField ==
"MagneticField":
823 magneticField =
"%s, %s #%s" % (magneticField,
825 "Use MagneticField_cff.py; the number is for determining which track selection to use." 827 dataset_cff = dataset_cff.replace(
".oO[magneticField]Oo.",magneticField)
828 filePath = os.path.join( self.
__cmssw,
"src", packageName,
829 "python", outName +
"_cff.py" )
830 if os.path.exists( filePath ):
831 existMsg =
"The predefined dataset '%s' already exists.\n"%( outName )
832 askString =
"Do you want to overwrite it? [y/n]\n" 833 inputQuery = existMsg + askString
835 userInput = raw_input( inputQuery ).lower()
838 elif userInput ==
"n":
841 inputQuery = askString
842 print (
"The predefined dataset '%s' will be stored in the file\n" 845 "\nFor future use you have to do 'scram b'." )
847 theFile = open( filePath,
"w" )
848 theFile.write( dataset_cff )
853 with open(filename,
"w")
as f:
854 for job
in self.
__chunks(self.
fileList(firstRun=firstrun, lastRun=lastrun, forcerunselection=
True), filesperjob):
855 f.write(
",".
join(
"'{}'".
format(file)
for file
in job)+
"\n")
859 parts = filename.split(
"/")
860 result = error =
None 861 if parts[0] !=
"" or parts[1] !=
"store":
862 error =
"does not start with /store" 863 elif parts[2]
in [
"mc",
"relval"]:
865 elif not parts[-1].endswith(
".root"):
866 error =
"does not end with something.root" 867 elif len(parts) != 12:
868 error =
"should be exactly 11 slashes counting the first one" 870 runnumberparts = parts[-5:-2]
871 if not all(len(part)==3
for part
in runnumberparts):
872 error =
"the 3 directories {} do not have length 3 each".
format(
"/".
join(runnumberparts))
874 result =
int(
"".
join(runnumberparts))
876 error =
"the 3 directories {} do not form an integer".
format(
"/".
join(runnumberparts))
879 error =
"could not figure out which run number this file is from:\n{}\n{}".
format(filename, error)
885 def fileList(self, parent=False, firstRun=None, lastRun=None, forcerunselection=False):
889 if firstRun
or lastRun:
890 if not firstRun: firstRun = -1
891 if not lastRun: lastRun =
float(
'infinity')
892 unknownfilenames, reasons = [], set()
893 for filename
in fileList[:]:
896 fileList.remove(filename)
897 except AllInOneError
as e:
898 if forcerunselection:
raise 899 unknownfilenames.append(e.message.split(
"\n")[1])
900 reasons .add (e.message.split(
"\n")[2])
902 if len(unknownfilenames) == len(fileList):
903 print(
"Could not figure out the run numbers of any of the filenames for the following reason(s):")
905 print(
"Could not figure out the run numbers of the following filenames:")
906 for filename
in unknownfilenames:
908 print(
"for the following reason(s):")
909 for reason
in reasons:
911 print(
"Using the files anyway. The runs will be filtered at the CMSSW level.")
928 if __name__ ==
'__main__':
929 print(
"Start testing...")
930 datasetName =
'/MinimumBias/Run2012D-TkAlMinBias-v1/ALCARECO' 931 jsonFile = (
'/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/' 932 'Collisions12/8TeV/Prompt/' 933 'Cert_190456-207898_8TeV_PromptReco_Collisions12_JSON.txt' )
935 print(dataset.datasetSnippet( jsonPath = jsonFile,
938 dataset.dump_cff( outName =
"Dataset_Test_TkAlMinBias_Run2012D",
def __lumiSelectionSnippet(self, jsonPath=None, firstRun=None, lastRun=None)
def __getFileInfoList(self, dasLimit, parent=False)
def __getMagneticFieldForRun(self, run=-1, tolerance=0.5)
def datasetSnippet(self, jsonPath=None, begin=None, end=None, firstRun=None, lastRun=None, crab=False, parent=False)
def __createSnippet(self, jsonPath=None, begin=None, end=None, firstRun=None, lastRun=None, repMap=None, crab=False, parent=False)
def __findInJson(self, jsondict, strings)
def magneticFieldForRun(self, run=-1)
def replace(string, replacements)
S & print(S &os, JobReport::InputFile const &f)
def fileInfoList(self, parent=False)
def __getMagneticField(self)
def forcerunrange(self, firstRun, lastRun, s)
def getrunnumberfromfilename(filename)
Abs< T >::type abs(const T &t)
def getForceRunRangeFunction(self, firstRun, lastRun)
def createdatasetfile_hippy(self, filename, filesperjob, firstrun, lastrun)
def convertTimeToRun(self, begin=None, end=None, firstRun=None, lastRun=None, shortTuple=True)
def __find_lt(self, a, x)
static std::string join(char **cmd)
def __datetime(self, stringForDas)
tuple __dummy_source_template
def __dateString(self, date)
def dump_cff(self, outName=None, jsonPath=None, begin=None, end=None, firstRun=None, lastRun=None, parent=False)
def __chunks(self, theList, n)
def __getParentDataset(self)
def __fileListSnippet(self, crab=False, parent=False, firstRun=None, lastRun=None, forcerunselection=False)
def __find_ge(self, a, x)
def __getData(self, dasQuery, dasLimit=0)
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def fileList(self, parent=False, firstRun=None, lastRun=None, forcerunselection=False)