9 from __future__
import print_function
10 from builtins
import range
15 from pluginCondDBPyInterface
import *
16 from CondCore.Utilities
import iovInspector
as inspect
27 import simplejson
as json
29 print(
"Please use lxplus or set an environment (for example crab) with json lib available")
33 print(
"### command line:")
34 copyargs = sys.argv[:]
35 for i
in range(len(copyargs)):
38 if copyargs[i].
find(
" ") != -1:
39 copyargs[i] =
"\"%s\"" % copyargs[i]
40 commandline =
" ".
join(copyargs)
43 infotofile = [
"### %s\n" % commandline]
48 usage=
'%prog [options]\n\n'+\
49 'Creates a Python configuration file with filenames for runs in specified run range, with certain min B field and data quality requirements.' 51 parser=optparse.OptionParser(usage)
53 parser.add_option(
"-d",
"--alcaDataset",
54 help=
"[REQUIRED] Name of the input AlCa dataset to get filenames from.",
61 parser.add_option(
"-m",
"--isMC",
62 help=
"Whether sample is MC (true) or real data (false).",
67 parser.add_option(
"-s",
"--startRun",
68 help=
"First run number in range.",
73 parser.add_option(
"-e",
"--endRun",
74 help=
"Last run number in range.",
79 parser.add_option(
"-b",
"--minB",
80 help=
"Lower limit on minimal B field for a run.",
86 parser.add_option(
"--maxB",
87 help=
"Upper limit on B field for a run.",
92 parser.add_option(
"-r",
"--runRegistry",
93 help=
"If present, use RunRegistry API for B field and data quality quiery",
98 parser.add_option(
"-j",
"--json",
99 help=
"If present with JSON file as argument, use JSON file for the good runs and ignore B field and --runRegistry options. "+\
100 "The latest JSON file is available at /afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions10/7TeV/StreamExpress/",
105 parser.add_option(
"-t",
"--dbTag",
106 help=
"Runinfo DB tag to use.",
108 default=
"runinfo_31X_hlt",
111 parser.add_option(
"--printTags",
112 help=
"If present, the only thing script will do is printing list of tags in the DB",
117 parser.add_option(
"--dbName",
118 help=
"RunInfo DB name to use. The default one is "+\
119 "'oracle://cms_orcoff_prod/CMS_COND_31X_RUN_INFO'",
121 default=
"oracle://cms_orcoff_prod/CMS_COND_31X_RUN_INFO",
124 parser.add_option(
"--dqDataset",
125 help=
"Dataset name to query for good data quality runs. "+\
126 "If this option is not used, dqDataset=alcaDataset is automatically set. "+\
127 "If alcaDataset does not have DQ information use /Cosmics/Commissioning08-v1/RAW for CRAFT08 "+\
128 "and use /Cosmics/CRAFT09-v1/RAW for CRAFT08",
135 parser.add_option(
"-c",
"--dqCriteria",
136 help=
"Set of DQ criteria to use with -dq flag of dbs.\n"+\
137 "An example of a really strict condition:\n" 138 "'DT_Shift_Offline=GOOD&CSC_Shift_Offline=GOOD&SiStrip_Shift_Offline=GOOD&Pixel_Shift_Offline=GOOD'" 139 "NOTE: if --runRegistry is used, DQ criteria sintax should be as Advanced query syntax for RR. E.g.:" 140 "\"{cmpDt}='GOOD' and {cmpCsc}='GOOD' and {cmpStrip}='GOOD' and {cmpPix}='GOOD'\"",
148 parser.add_option(
"-o",
"--outputFile",
149 help=
"Name for output file (please include the .py suffix)",
151 default=
"filelist.py",
154 parser.add_option(
"-v",
"--verbose",
155 help=
"Degree of debug info verbosity",
160 options,args=parser.parse_args()
168 if options.alcaDataset==
'' and not options.printTags:
169 print(
"--alcaDataset /your/dataset/name is required!")
172 if options.dqDataset==
'':
173 options.dqDataset = options.alcaDataset
175 if not (options.isMC==
'true' or options.isMC==
'false'):
176 print(
"--isMC option can have only 'true' or 'false' arguments")
181 minI = options.minB*18160/3.8
182 maxI = options.maxB*18160/3.8
186 if options.runRegistry: rr =
' --runRegistry' 189 if options.json!=
'': jj =
' --json '+options.json
191 allOptions =
'### ' + copyargs[0] +
' --alcaDataset ' + options.alcaDataset +
' --isMC ' + options.isMC + \
192 ' --startRun ' +
str(options.startRun) +
' --endRun '+
str(options.endRun) + \
193 ' --minB ' +
str(options.minB) +
' --maxB ' +
str(options.maxB) + rr + jj +\
194 ' --dbTag ' + options.dbTag +
' --dqDataset ' + options.dqDataset +
' --dqCriteria "' + options.dqCriteria +
'"'\
195 ' --outputFile ' + options.outputFile
197 print(
"### all options, including default:")
211 sys.setdlopenflags(DLFCN.RTLD_GLOBAL+DLFCN.RTLD_LAZY)
215 rdbms = RDBMS(
"/afs/cern.ch/cms/DB/conddb")
217 db = rdbms.getDB(options.dbName)
220 if options.printTags:
221 print(
"\nOverview of all tags in "+options.dbName+
" :\n")
239 iov = inspect.Iov(db,tag)
244 print(
"######## summries ########")
245 for x
in iov.summaries():
246 print(x[0], x[1], x[2] ,x[3])
251 print(
"###(start_current,stop_current,avg_current,max_current,min_current,run_interval_micros) vs runnumber###")
252 print(iov.trend(what))
255 print(
"######## trends ########")
256 for x
in iov.trendinrange(what,options.startRun-1,options.endRun+1):
257 if v>0
or x[0]==67647
or x[0]==66893
or x[0]==67264:
258 print(x[0],x[1] ,x[2], x[2][4], x[2][3])
260 if x[2][4] >= minI
and x[2][3] <= maxI:
261 runs_b_on.append(
int(x[0]))
263 except Exception
as er :
266 print(
"### runs with good B field ###")
279 dbs_quiery =
"find run where dataset="+options.dqDataset+
" and dq="+options.dqCriteria
280 print(
'dbs search --noheader --query="'+dbs_quiery+
'" | sort')
282 os.system(
'python $DBSCMD_HOME/dbsCommandLine.py -c search --noheader --query="'+dbs_quiery+
'" | sort > /tmp/runs_full_of_pink_bunnies')
286 ff = open(
'/tmp/runs_full_of_pink_bunnies',
"r") 288 while line
and line!=
'':
289 runs_good_dq.append(
int(line))
293 os.system(
'rm /tmp/runs_full_of_pink_bunnies')
295 print(
"### runs with good quality ###")
307 server = xmlrpclib.ServerProxy(
'http://pccmsdqm04.cern.ch/runregistry/xmlrpc')
309 rr_quiery =
"{runNumber}>="+
str(options.startRun)+
" and {runNumber}<="+
str(options.endRun)+\
310 " and {bfield}>="+
str(options.minB)+
" and {bfield}<="+
str(options.maxB)
311 if options.dqCriteria !=
"": rr_quiery +=
" and "+options.dqCriteria
313 rrstr = server.RunDatasetTable.export(
'GLOBAL',
'chart_runs_cum_evs_vs_bfield', rr_quiery)
314 rrstr = rrstr.replace(
"bfield",
"'bfield'")
315 rrstr = rrstr.replace(
"events",
"'events'")
319 for rr
in rrdata[
'events']: runs_good.append(rr[0])
329 jsonfile=
file(options.json,
'r') 330 jsondict = json.load(jsonfile) 333 for run
in jsondict.keys(): runs_good.append(
int(run))
349 if options.isMC==
'false' and not options.runRegistry
and options.json==
'':
352 infotofile.append(
"### runs with good B field ###\n")
353 infotofile.append(
"### %s\n" %
str(runs_b_on))
361 if options.isMC==
'false' and not options.runRegistry
and options.json==
'':
364 infotofile.append(
"### runs with good quality ###\n")
365 infotofile.append(
"### %s\n" %
str(runs_good_dq))
368 runs_good = [val
for val
in runs_b_on
if val
in runs_good_dq]
370 print(
"### runs with good B field and quality ###")
373 infotofile.append(
"### runs with good B field and quality ###\n")
374 infotofile.append(
"### %s\n" %
str(runs_good))
379 if options.isMC==
'false' and options.runRegistry
and options.json==
'':
381 print(
"### runs with good B field and quality ###")
390 if options.isMC==
'false' and options.json!=
'':
392 print(
"### good runs from JSON file ###")
398 dbs_quiery =
"find run, file.numevents, file where dataset="+options.alcaDataset+
" and run>="+
str(options.startRun)+
" and run<="+
str(options.endRun)+
" and file.numevents>0" 401 os.system(
'python $DBSCMD_HOME/dbsCommandLine.py -c search --noheader --query="'+dbs_quiery+
'" | sort > /tmp/runs_and_files_full_of_pink_bunnies')
405 list_of_numevents = []
408 ff = open(
'/tmp/runs_and_files_full_of_pink_bunnies',
'r') 410 (run, numevents, fname) = line.split(
' ')
411 if options.isMC==
'false' and (
int(run)
not in runs_good):
413 fname = fname.rstrip(
'\n')
414 list_of_files.append(fname)
415 list_of_runs.append(
int(run))
416 list_of_numevents.append(numevents)
417 total_numevents +=
int(numevents)
421 uniq_list_of_runs = sorted(set(list_of_runs))
423 print(
"### list of runs with good B field and quality in the dataset: ###")
424 print(uniq_list_of_runs)
425 infotofile.append(
"### list of runs with good B field and quality in the dataset: ###\n")
426 infotofile.append(
"### %s\n" %
str(uniq_list_of_runs))
430 files_events =
list(
zip(list_of_files, list_of_numevents))
431 unique_files_events =
list(set(files_events))
432 list_of_files, list_of_numevents =
map(list,
list(
zip(*unique_files_events)))
433 total_numevents = sum(
map(int, list_of_numevents) )
435 print(
"### total number of events in those "+
str(len(uniq_list_of_runs))+
" runs = "+
str(total_numevents))
437 infotofile.append(
"### total number of events in those "+
str(len(uniq_list_of_runs))+
" runs = "+
str(total_numevents))
443 size = len(list_of_files)
448 ff = open(options.outputFile,
'w')
449 ff.write(
"".
join(infotofile))
450 ff.write(
"\nfileNames = [\n")
452 for i
in range(0,size):
456 ff.write(
" '"+ list_of_files[i] +
"'"+comma+
" # "+ list_of_numevents[i] +
"\n")
def getRunRegistryGoodRuns()
obtaining list of good B and quality runs from Run Registry https://twiki.cern.ch/twiki/bin/view/CMS/...
def getGoodQRuns()
obtaining list of good quality runs
S & print(S &os, JobReport::InputFile const &f)
void find(edm::Handle< EcalRecHitCollection > &hits, DetId thisDet, std::vector< EcalRecHitCollection::const_iterator > &hit, bool debug=false)
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
static std::string join(char **cmd)
def getGoodBRuns()
functions definitions
def getJSONGoodRuns()
obtain a list of good runs from JSON file
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run