12 from optparse
import OptionParser
18 from subprocess
import call,PIPE
19 from multiprocessing
import Pool
25 if os.environ.has_key(
"RELMON_SA"):
26 import definitions
as definitions
27 from dqm_interfaces
import DirWalkerFile,string2blacklist,DirWalkerFile_thread_wrapper
28 from dirstructure
import Directory
29 from directories2html
import directory2html,make_summary_table
30 from utils
import ask_ok, unpickler, make_files_pairs
33 from Utilities.RelMon.dqm_interfaces
import DirWalkerFile,string2blacklist,DirWalkerFile_thread_wrapper
34 from Utilities.RelMon.dirstructure
import Directory
35 from Utilities.RelMon.directories2html
import directory2html,make_summary_table
36 from Utilities.RelMon.utils
import ask_ok, unpickler, make_files_pairs
42 namebase=os.path.basename(filename)
43 return namebase.split(
"__")[1]
46 namebase=os.path.basename(filename)
47 return namebase.split(
"__")[2]
50 namebase=os.path.basename(filename)
51 return namebase.split(
"__")[0].
split(
"_")[2]
58 skim = skim[:skim.rfind(
'-v')]
59 return "%s_%s"%(run,skim)
65 if len(ref_filenames)*len(test_filenames)==0:
66 print "Empty reference and test filenames lists!"
73 for ref, test
in zip(
map(os.path.basename,ref_filenames),
map(os.path.basename,test_filenames)):
80 print " ## sample 1: %s vs sample 2: %s"%(ref_sample, test_sample)
82 if ref_sample!=test_sample:
83 print "Files %s and %s do not seem to be relative to the same sample." %(ref, test)
87 if search(
"20[01]",ref_version)!=
None:
88 ref_sample+=ref_version.split(
"_")[-1]
89 samples.append(ref_sample)
92 ref_versions.append(ref_version)
93 test_versions.append(test_version)
96 ref_versions=
list(set(ref_versions))
97 test_versions=
list(set(test_versions))
104 cmssw_version1=ref_versions[0]
105 cmssw_version2=test_versions[0]
107 return samples,cmssw_version1,cmssw_version2
113 for name
in names_list:
114 if not name.endswith(
".root"):
115 print "File %s does not seem to be a rootfile. Please check."
123 int_pattern=pattern.strip()
125 if int_pattern[0]==
'!':
126 int_pattern=int_pattern[1:]
129 condition =
search(int_pattern,target)!=
None
131 condition =
not condition
137 blacklist+=blist_piece
145 """Build a blacklist for each sample accordind to a set of rules
148 for sample
in samples:
149 blacklists[sample]=
"FED@1,AlcaBeamMonitor@1,HLT@1,AlCaReco@1"
153 blacklists[sample]+=
",AlCaEcalPi0@2"
154 if not search(
"2010+|2011+",ver1):
155 print "We are treating MC files for the HLT"
156 for pattern,blist
in definitions.hlt_mc_pattern_blist_pairs:
157 blacklists[sample]=
add_to_blacklist(blacklists[sample],pattern,sample,blist)
159 print "We are treating Data files for the HLT"
164 if not search(
"2010+|2011+",ver1):
165 print "We are treating MC files"
167 for pattern,blist
in definitions.mc_pattern_blist_pairs:
168 blacklists[sample]=
add_to_blacklist(blacklists[sample],pattern,sample,blist)
174 print "We are treating Data files:"
175 blacklists[sample]+=
",By__Lumi__Section@-1,AlCaReco@1"
176 for pattern,blist
in definitions.data_pattern_blist_pairs:
187 files_list =
filter(
lambda s: s.endswith(
".root"), os.listdir(directory))
188 files_list_path=
map(
lambda s: os.path.join(directory,s), files_list)
190 return files_list_path
199 if len(files_list)==0:
200 print "Zero files found in directory %s!" %all_samples
204 for name
in files_list:
206 if len(files_list)%2!=0:
207 print "The numbuer of file is not even... Trying to recover a catastrophe."
217 for iname
in xrange(len(files_list)):
218 filename=files_list[iname]
220 ref_filenames.append(filename)
222 test_filenames.append(filename)
224 print "The guess would be the following:"
225 for ref,test
in zip(ref_filenames,test_filenames):
226 refbasedir=os.path.dirname(ref)
227 testbasedir=os.path.dirname(test)
228 dir_to_print=refbasedir
229 if refbasedir!=testbasedir:
230 dir_to_print=
"%s and %s" %(refbasedir,testbasedir)
231 print "* Directory: %s " %dir_to_print
232 refname=os.path.basename(ref)
233 testname=os.path.basename(test)
234 print " o %s" %refname
235 print " o %s" %testname
243 return ref_filenames,test_filenames
250 ref_filenames=
map(
lambda s:s.strip(),ref_samples.split(
","))
251 test_filenames=
map(
lambda s:s.strip(),test_samples.split(
","))
253 if len(ref_filenames)!=len(test_filenames):
254 print "The numebr of reference and test files does not seem to be the same. Please check."
259 return ref_filenames,test_filenames
264 return len(
filter(
lambda p: p.returncode==
None,p_list))
269 """Creates shell command to compare two files using compare_using_files.py
270 script and calls it."""
271 sample, ref_filename, test_filename, options = args
273 command =
" compare_using_files.py "
274 command+=
"%s %s " %(ref_filename,test_filename)
278 command+=
" -o %s " %sample
280 command+=
" --specify_run "
281 command+=
" -t %s " %options.test_threshold
282 command+=
" -s %s " %options.stat_test
288 if options.hash_name:
289 command +=
" --hash_name "
291 if options.blacklist_file:
292 command +=
" --use_black_file "
294 if options.standalone:
295 command +=
" --standalone "
296 if len(blacklists[sample]) >0:
297 command+=
'-B %s ' %blacklists[sample]
298 print "\nExecuting -- %s" %command
300 process=call(
filter(
lambda x: len(x)>0,command.split(
" ")))
308 n_processes= int(options.n_processes)
313 if len(options.all_samples)>0:
319 ref_filenames=
map(os.path.abspath,ref_filenames)
320 test_filenames=
map(os.path.abspath,test_filenames)
322 samples,cmssw_version1,cmssw_version2=
guess_params(ref_filenames,test_filenames)
325 print "No Samples found... Quitting"
331 original_dir=os.getcwd()
333 outdir=options.out_dir
335 print "Creating automatic outdir:",
336 outdir=
"%sVS%s" %(cmssw_version1,cmssw_version2)
338 if len(options.input_dir)==0:
339 print "Creating automatic indir:",
340 options.input_dir=outdir
341 print options.input_dir
343 if not os.path.exists(outdir):
348 n_comparisons=len(ref_filenames)
349 if n_comparisons < n_processes:
350 print "Less comparisons than possible processes: reducing n processes to",
351 n_processes=n_comparisons
368 if search(
"20[01]",cmssw_version1)!=
None:
369 skim_name=cmssw_version1.split(
"_")[-1]
371 running_subprocesses=[]
376 pool = Pool(n_processes)
377 args_iterable = [
list(args) + [options]
for args
in zip(samples, ref_filenames, test_filenames)]
378 pool.map(call_compare_using_files, args_iterable)
380 os.system(
"mv */*pkl .")
387 pkl_list=
filter(
lambda x:
".pkl" in x, os.listdir(
"./"))
388 running_subprocesses=[]
389 n_processes=int(options.n_processes)
391 for pklfilename
in pkl_list:
392 command =
"compare_using_files.py "
396 command+=
"-P %s " %pklfilename
397 command+=
"-o %s " %pklfilename[:-4]
398 print "Executing %s" %command
399 process=call(
filter(
lambda x: len(x)>0,command.split(
" ")))
402 running_subprocesses.append(process)
403 if process_counter>=n_processes:
405 for p
in running_subprocesses:
412 def do_html(options, hashing_flag, standalone):
415 print "Preparing reports for the single files..."
419 aggregation_rules_twiki={}
422 print "Aggregating directories according to HLT rules"
423 aggregation_rules=definitions.aggr_pairs_dict[
'HLT']
424 aggregation_rules_twiki=definitions.aggr_pairs_twiki_dict[
'HLT']
426 aggregation_rules=definitions.aggr_pairs_dict[
'reco']
427 aggregation_rules_twiki=definitions.aggr_pairs_twiki_dict[
'reco']
428 table_html =
make_summary_table(options.input_dir,aggregation_rules,aggregation_rules_twiki, hashing_flag, standalone)
431 ofile = open(
"RelMonSummary.html",
"w")
432 ofile.write(table_html)
437 if __name__ ==
"__main__":
449 test_threshold=0.00001
454 parser = OptionParser(usage=
"usage: %prog [options]")
456 parser.add_option(
"-R",
"--ref_samples ",
460 help=
"The samples that act as reference (comma separated list)")
462 parser.add_option(
"-T",
"--test_samples",
465 default=test_samples,
466 help=
"The samples to be tested (comma separated list)")
468 parser.add_option(
"-a",
"--all_samples",
472 help=
"EXPERIMENTAL: Try to sort all samples selected (wildacrds) and organise a comparison")
474 parser.add_option(
"-o",
"--out_dir",
478 help=
"The outdir other than <Version1>VS<Version2>")
480 parser.add_option(
"-p",
"--do_pngs",
484 help=
"EXPERIMENTAL!!! Do the pngs of the comparison (takes 50%% of the total running time) \n(default is %s)" %
False)
486 parser.add_option(
"-r",
"--run ",
490 help=
"The run to be checked \n(default is %s)" %run)
492 parser.add_option(
"-t",
"--test_threshold",
494 dest=
"test_threshold",
495 default=test_threshold,
496 help=
"Threshold for the statistical test \n(default is %s)" %test_threshold)
498 parser.add_option(
"-s",
"--stat_test",
502 help=
"Statistical test (KS or Chi2) \n(default is %s)" %stat_test)
504 parser.add_option(
"-N",
"--numberOfProcesses",
508 help=
"Number of parallel processes to be run. Be Polite! \n(default is %s)" %n_processes)
510 parser.add_option(
"--HLT",
514 help=
"Analyse HLT histograms\n(default is %s)" %hlt)
516 parser.add_option(
"-i",
"--input_dir",
520 help=
"Input directory for html creation \n(default is %s)" %in_dir)
522 parser.add_option(
"--reports",
526 help=
"Do the reports for the pickles \n(default is %s)" %in_dir)
528 parser.add_option(
"--hash_name",
532 help=
"Set if you want to minimize & hash the output HTML files.")
534 parser.add_option(
"--use_black_file",
536 dest=
"blacklist_file",
538 help=
"Use a black list file of histograms located @ /RelMon/data")
540 parser.add_option(
"--standalone",
544 help=
"Define that using RelMon in standalone method. Makes CSS files accessible over HTTP")
546 (options, args) = parser.parse_args()
548 if len(options.test_samples)*len(options.ref_samples)+len(options.all_samples)==0
and len(options.input_dir)==0:
549 print "No samples given as input."
553 if len(options.all_samples)>0
or (len(options.ref_samples)*len(options.test_samples)>0):
555 if len(options.input_dir)>0:
556 do_html(options, options.hash_name, options.standalone)
def get_filenames_from_pool
std::vector< T >::const_iterator search(const cond::Time_t &val, const std::vector< T > &container)
def count_alive_processes
def do_comparisons_threaded
def call_compare_using_files
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run