9 from __future__
import print_function
14 from pluginCondDBPyInterface
import *
15 from CondCore.Utilities
import iovInspector
as inspect
26 import simplejson
as json
28 print(
"Please use lxplus or set an environment (for example crab) with json lib available")
32 print(
"### command line:")
33 copyargs = sys.argv[:]
34 for i
in range(len(copyargs)):
37 if copyargs[i].
find(
" ") != -1:
38 copyargs[i] =
"\"%s\"" % copyargs[i]
39 commandline =
" ".
join(copyargs)
42 infotofile = [
"### %s\n" % commandline]
47 usage=
'%prog [options]\n\n'+\
48 'Creates a Python configuration file with filenames for runs in specified run range, with certain min B field and data quality requirements.' 50 parser=optparse.OptionParser(usage)
52 parser.add_option(
"-d",
"--alcaDataset",
53 help=
"[REQUIRED] Name of the input AlCa dataset to get filenames from.",
60 parser.add_option(
"-m",
"--isMC",
61 help=
"Whether sample is MC (true) or real data (false).",
66 parser.add_option(
"-s",
"--startRun",
67 help=
"First run number in range.",
72 parser.add_option(
"-e",
"--endRun",
73 help=
"Last run number in range.",
78 parser.add_option(
"-b",
"--minB",
79 help=
"Lower limit on minimal B field for a run.",
85 parser.add_option(
"--maxB",
86 help=
"Upper limit on B field for a run.",
91 parser.add_option(
"-r",
"--runRegistry",
92 help=
"If present, use RunRegistry API for B field and data quality quiery",
97 parser.add_option(
"-j",
"--json",
98 help=
"If present with JSON file as argument, use JSON file for the good runs and ignore B field and --runRegistry options. "+\
99 "The latest JSON file is available at /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions10/7TeV/StreamExpress/",
104 parser.add_option(
"-t",
"--dbTag",
105 help=
"Runinfo DB tag to use.",
107 default=
"runinfo_31X_hlt",
110 parser.add_option(
"--printTags",
111 help=
"If present, the only thing script will do is printing list of tags in the DB",
116 parser.add_option(
"--dbName",
117 help=
"RunInfo DB name to use. The default one is "+\
118 "'oracle://cms_orcoff_prod/CMS_COND_31X_RUN_INFO'",
120 default=
"oracle://cms_orcoff_prod/CMS_COND_31X_RUN_INFO",
123 parser.add_option(
"--dqDataset",
124 help=
"Dataset name to query for good data quality runs. "+\
125 "If this option is not used, dqDataset=alcaDataset is automatically set. "+\
126 "If alcaDataset does not have DQ information use /Cosmics/Commissioning08-v1/RAW for CRAFT08 "+\
127 "and use /Cosmics/CRAFT09-v1/RAW for CRAFT08",
134 parser.add_option(
"-c",
"--dqCriteria",
135 help=
"Set of DQ criteria to use with -dq flag of dbs.\n"+\
136 "An example of a really strict condition:\n" 137 "'DT_Shift_Offline=GOOD&CSC_Shift_Offline=GOOD&SiStrip_Shift_Offline=GOOD&Pixel_Shift_Offline=GOOD'" 138 "NOTE: if --runRegistry is used, DQ criteria sintax should be as Advanced query syntax for RR. E.g.:" 139 "\"{cmpDt}='GOOD' and {cmpCsc}='GOOD' and {cmpStrip}='GOOD' and {cmpPix}='GOOD'\"",
147 parser.add_option(
"-o",
"--outputFile",
148 help=
"Name for output file (please include the .py suffix)",
150 default=
"filelist.py",
153 parser.add_option(
"-v",
"--verbose",
154 help=
"Degree of debug info verbosity",
159 options,args=parser.parse_args()
167 if options.alcaDataset==
'' and not options.printTags:
168 print(
"--alcaDataset /your/dataset/name is required!")
171 if options.dqDataset==
'':
172 options.dqDataset = options.alcaDataset
174 if not (options.isMC==
'true' or options.isMC==
'false'):
175 print(
"--isMC option can have only 'true' or 'false' arguments")
180 minI = options.minB*18160/3.8
181 maxI = options.maxB*18160/3.8
185 if options.runRegistry: rr =
' --runRegistry' 188 if options.json!=
'': jj =
' --json '+options.json
190 allOptions =
'### ' + copyargs[0] +
' --alcaDataset ' + options.alcaDataset +
' --isMC ' + options.isMC + \
191 ' --startRun ' +
str(options.startRun) +
' --endRun '+
str(options.endRun) + \
192 ' --minB ' +
str(options.minB) +
' --maxB ' +
str(options.maxB) + rr + jj +\
193 ' --dbTag ' + options.dbTag +
' --dqDataset ' + options.dqDataset +
' --dqCriteria "' + options.dqCriteria +
'"'\
194 ' --outputFile ' + options.outputFile
196 print(
"### all options, including default:")
210 sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
214 rdbms = RDBMS(
"/afs/cern.ch/cms/DB/conddb")
216 db = rdbms.getDB(options.dbName)
219 if options.printTags:
220 print(
"\nOverview of all tags in "+options.dbName+
" :\n")
238 iov = inspect.Iov(db,tag)
243 print(
"######## summries ########")
244 for x
in iov.summaries():
245 print(x[0], x[1], x[2] ,x[3])
250 print(
"###(start_current,stop_current,avg_current,max_current,min_current,run_interval_micros) vs runnumber###")
251 print(iov.trend(what))
254 print(
"######## trends ########")
255 for x
in iov.trendinrange(what,options.startRun-1,options.endRun+1):
256 if v>0
or x[0]==67647
or x[0]==66893
or x[0]==67264:
257 print(x[0],x[1] ,x[2], x[2][4], x[2][3])
259 if x[2][4] >= minI
and x[2][3] <= maxI:
260 runs_b_on.append(
int(x[0]))
262 except Exception
as er :
265 print(
"### runs with good B field ###")
278 dbs_quiery =
"find run where dataset="+options.dqDataset+
" and dq="+options.dqCriteria
279 print(
'dbs search --noheader --query="'+dbs_quiery+
'" | sort')
281 os.system(
'python $DBSCMD_HOME/dbsCommandLine.py -c search --noheader --query="'+dbs_quiery+
'" | sort > /tmp/runs_full_of_pink_bunnies')
285 ff = open(
'/tmp/runs_full_of_pink_bunnies',
"r") 287 while line
and line!=
'':
288 runs_good_dq.append(
int(line))
292 os.system(
'rm /tmp/runs_full_of_pink_bunnies')
294 print(
"### runs with good quality ###")
306 server = xmlrpclib.ServerProxy(
'http://pccmsdqm04.cern.ch/runregistry/xmlrpc')
308 rr_quiery =
"{runNumber}>="+
str(options.startRun)+
" and {runNumber}<="+
str(options.endRun)+\
309 " and {bfield}>="+
str(options.minB)+
" and {bfield}<="+
str(options.maxB)
310 if options.dqCriteria !=
"": rr_quiery +=
" and "+options.dqCriteria
312 rrstr = server.RunDatasetTable.export(
'GLOBAL',
'chart_runs_cum_evs_vs_bfield', rr_quiery)
313 rrstr = rrstr.replace(
"bfield",
"'bfield'")
314 rrstr = rrstr.replace(
"events",
"'events'")
318 for rr
in rrdata[
'events']: runs_good.append(rr[0])
328 jsonfile=
file(options.json,
'r') 329 jsondict = json.load(jsonfile) 332 for run
in jsondict.keys(): runs_good.append(
int(run))
348 if options.isMC==
'false' and not options.runRegistry
and options.json==
'':
351 infotofile.append(
"### runs with good B field ###\n")
352 infotofile.append(
"### %s\n" %
str(runs_b_on))
360 if options.isMC==
'false' and not options.runRegistry
and options.json==
'':
363 infotofile.append(
"### runs with good quality ###\n")
364 infotofile.append(
"### %s\n" %
str(runs_good_dq))
367 runs_good = [val
for val
in runs_b_on
if val
in runs_good_dq]
369 print(
"### runs with good B field and quality ###")
372 infotofile.append(
"### runs with good B field and quality ###\n")
373 infotofile.append(
"### %s\n" %
str(runs_good))
378 if options.isMC==
'false' and options.runRegistry
and options.json==
'':
380 print(
"### runs with good B field and quality ###")
389 if options.isMC==
'false' and options.json!=
'':
391 print(
"### good runs from JSON file ###")
397 dbs_quiery =
"find run, file.numevents, file where dataset="+options.alcaDataset+
" and run>="+
str(options.startRun)+
" and run<="+
str(options.endRun)+
" and file.numevents>0" 400 os.system(
'python $DBSCMD_HOME/dbsCommandLine.py -c search --noheader --query="'+dbs_quiery+
'" | sort > /tmp/runs_and_files_full_of_pink_bunnies')
404 list_of_numevents = []
407 ff = open(
'/tmp/runs_and_files_full_of_pink_bunnies',
'r') 409 (run, numevents, fname) = line.split(
' ')
410 if options.isMC==
'false' and (
int(run)
not in runs_good):
412 fname = fname.rstrip(
'\n')
413 list_of_files.append(fname)
414 list_of_runs.append(
int(run))
415 list_of_numevents.append(numevents)
416 total_numevents +=
int(numevents)
420 uniq_list_of_runs = sorted(set(list_of_runs))
422 print(
"### list of runs with good B field and quality in the dataset: ###")
423 print(uniq_list_of_runs)
424 infotofile.append(
"### list of runs with good B field and quality in the dataset: ###\n")
425 infotofile.append(
"### %s\n" %
str(uniq_list_of_runs))
429 files_events =
list(
zip(list_of_files, list_of_numevents))
430 unique_files_events =
list(set(files_events))
431 list_of_files, list_of_numevents =
map(list,
list(
zip(*unique_files_events)))
432 total_numevents = sum(
map(int, list_of_numevents) )
434 print(
"### total number of events in those "+
str(len(uniq_list_of_runs))+
" runs = "+
str(total_numevents))
436 infotofile.append(
"### total number of events in those "+
str(len(uniq_list_of_runs))+
" runs = "+
str(total_numevents))
442 size = len(list_of_files)
447 ff = open(options.outputFile,
'w')
448 ff.write(
"".
join(infotofile))
449 ff.write(
"\nfileNames = [\n")
451 for i
in range(0,size):
455 ff.write(
" '"+ list_of_files[i] +
"'"+comma+
" # "+ list_of_numevents[i] +
"\n")
def getRunRegistryGoodRuns()
obtaining list of good B and quality runs from Run Registry https://twiki.cern.ch/twiki/bin/view/CMS/...
def getGoodQRuns()
obtaining list of good quality runs
S & print(S &os, JobReport::InputFile const &f)
void find(edm::Handle< EcalRecHitCollection > &hits, DetId thisDet, std::vector< EcalRecHitCollection::const_iterator > &hit, bool debug=false)
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
static std::string join(char **cmd)
def getGoodBRuns()
functions definitions
def getJSONGoodRuns()
obtain a list of good runs from JSON file
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run