CMS 3D CMS Logo

cmsPerfSuiteHarvest.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 #A script to "harverst" PerfSuite work directories, producing an xml file with all the data ready to be uploaded to the PerfSuiteDB DB.
3 from __future__ import print_function
4 import sys, os, re
5 import getopt
8 import Validation.Performance.cmssw_exportdb_xml as cmssw_exportdb_xml
9 from Validation.Performance.parserPerfsuiteMetadata import parserPerfsuiteMetadata
10 
12 import Validation.Performance.parserEdmSize as parserEdmSize
13 
14 import glob
15 from commands import getstatusoutput
16 
17 """ indicates whether the CMSSW is [use False] available or not. on our testing machine it's not [use True] """
18 _TEST_RUN = False
19 
20 """ global variables """
21 test_timing_report_log = re.compile("TimingReport.log$", re.IGNORECASE)
22 test_igprof_report_log = re.compile("^(.*)(IgProfMem|IgProfPerf)\.gz", re.IGNORECASE)
23 test_memcheck_report_log = re.compile("^(.*)memcheck_vlgd.xml", re.IGNORECASE)
24 
25 
26 xmldoc = minidom.Document()
27 release = None
28 steps = {}
29 candles = {}
30 pileups = {}
31 
32 def usage(argv):
33  script = argv[0]
34  return """
35  Usage: %(script)s [-v cmssw_version] [--version=cmssw_version]
36 
37  if the cmssw version is in the system's environment (after running cmsenv):
38  $ %(script)s
39 
40  otherwise one must specify the cmssw version:
41  $ %(script)s --version=CMSSW_3_2_0
42  $ %(script)s -v CMSSW_3_2_0
43 
44  """ % locals()
45 
46 def get_params(argv):
47  """
48  Returns the version of CMSSW to be used which it is taken from:
49  * command line parameter or
50  * environment variable
51  in case of error returns None
52 
53  And also the directory to put the xml files to: if none --> returns ""
54  """
55 
56  """ try to get the version for command line argument """
57  #print argv
58  #FIXME: this should be rewritten using getopt properly
59  version = None
60  #xml_dir = "cmsperfvm:/data/projects/conf/PerfSuiteDB/xml_dropbox" #Set this as default (assume change in write_xml to write to remote machines)
61  #NB write_xml is in Validation/Performance/python/cmssw_exportdb_xml.py
62  #Setting the default to write to a local directory:
63  xml_dir="PerfSuiteDBData"
64  try:
65  opts, args = getopt.getopt(argv[1:], "v:", ["version=", "outdir="])
66  except getopt.GetoptError as e:
67  print(e)
68  for opt, arg in opts:
69  if opt in ("-v", "--version"):
70  version = arg
71  if opt == "--outdir":
72  xml_dir = arg
73 
74  """ if not get it from environment string """
75  if not version:
76  try:
77  version = os.environ["CMSSW_VERSION"]
78  except KeyError:
79  pass
80 
81  return (version, xml_dir)
82 
83 def _eventContent_DEBUG(edm_report):
84  # for testing / information
85  EC_count = {}
86  if not _TEST_RUN:
87  # count the products in event-content's
88  for prod in edm_report:
90  for ec in ecs:
91  if ec not in EC_count:
92  EC_count[ec] = []
93  EC_count[ec].append(prod)
94  #print out the statistics
95  for (ec, prods) in EC_count.items():
96  print("==== %s EVENT CONTENT: have %d items, the listing is: ===" % (ec, len(prods)))
97  # list of products
98  print("\n *".join(["%(cpp_type)s_%(module_name)s_%(module_label)s" % prod for prod in prods]))
99 
100 
102  """ returns modified product by adding the event content relationship """
103 
104  if not _TEST_RUN:
105  product["event_content"] = ",".join(parseEventContent.List_ECs_forProduct(product))
106  return product
107 
108 
110  (sequenceWithModules, sequenceWithModulesString) =ModuleToSequenceAssign.assignModulesToSeqs()
111  return [{"name": seq, "modules": ",".join(modules)} for (seq, modules) in sequenceWithModulesString.items()]
112 
113 
114 def exportIgProfReport(path, igProfReport, igProfType, runinfo):
115  jobID = igProfReport["jobID"]
116  #print jobID
117  candleLong = os.path.split(path)[1].replace("_IgProf_Perf", "").replace("_IgProf_Mem", "").replace("_PU", "")
118  found = False
119  #print igProfType
120  if igProfType in runinfo['TestResults']:
121  for result in runinfo['TestResults'][igProfType]:
122  if candleLong == result["candle"] and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
123  jobID["candle"] = jobID["candle"].upper()
124  if "jobs" not in result:
125  result['jobs'] = []
126  result['jobs'].append(igProfReport)
127  found = True
128  break
129 
130  if not found:
131  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
132  print("JOB ID: %s " % str(jobID))
133  print(" ====================== ")
134  runinfo['unrecognized_jobs'].append(igProfReport)
135  #export_xml(xml_doc = xmldoc, **igProfReport)
136 
137 
138 def exportTimeSizeJob(path, timeSizeReport, runinfo):
139  candleLong = os.path.split(path)[1].replace("_TimeSize", "").replace("_PU", "")
140  jobID = timeSizeReport["jobID"]
141 
142  #search for a run Test to which could belong our JOB
143  found = False
144  if 'TimeSize' in runinfo['TestResults']:
145  for result in runinfo['TestResults']['TimeSize']:
146  #print result
147  """ If this is the testResult which fits TimeSize job """
148  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
149  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
150  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
151  #print result
152  if "jobs" not in result:
153  result['jobs'] = []
154  result['jobs'].append(timeSizeReport)
155  found = True
156  break
157 
158  if not found:
159  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
160  print("JOB ID: %s " % str(jobID))
161  print(" ====================== ")
162  runinfo['unrecognized_jobs'].append(timeSizeReport)
163  #export_xml(xml_doc = xmldoc, **timeSizeReport)
164 
165 def exportMemcheckReport(path, MemcheckReport, runinfo):
166  candleLong = os.path.split(path)[1].replace("_Memcheck", "").replace("_PU", "")
167  jobID = MemcheckReport["jobID"]
168 
169  #search for a run Test to which could belong our JOB
170  found = False
171  if 'Memcheck' in runinfo['TestResults']:
172  for result in runinfo['TestResults']['Memcheck']:
173  #print result
174  #print jobID
175  """ If this is the testResult which fits Memcheck job """
176  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
177  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
178  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
179  #print result
180  if "jobs" not in result:
181  result['jobs'] = []
182  result['jobs'].append(MemcheckReport)
183  found = True
184  break
185 
186  if not found:
187  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
188  print("JOB ID: %s " % str(jobID))
189  print(" ====================== ")
190  runinfo['unrecognized_jobs'].append(MemcheckReport)
191 
192 def process_timesize_dir(path, runinfo):
193  global release,event_content,conditions
194  """ if the release is not provided explicitly we take it from the Simulation candles file """
195  if (not release):
196  release_fromlogfile = read_SimulationCandles(path)
197  release = release_fromlogfile
198  print("release from simulation candles: %s" % release)
199 
200  if (not release):
201  # TODO: raise exception!
202  raise Exception("the release was not found!")
203 
204 
205  """ process the TimingReport log files """
206 
207  # get the file list
208  files = os.listdir(path)
209  timing_report_files = [os.path.join(path, f) for f in files
210  if test_timing_report_log.search(f)
211  and os.path.isfile(os.path.join(path, f)) ]
212 
213  # print timing_report_files
214  for timelog_f in timing_report_files:
215  print("\nProcessing file: %s" % timelog_f)
216  print("------- ")
217 
218  jobID = getJobID_fromTimeReportLogName(os.path.join(path, timelog_f))
219  print("jobID: %s" % str(jobID))
220  (candle, step, pileup_type, conditions, event_content) = jobID
221  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
222  print("Dictionary based jobID %s: " % str(jobID))
223 
224  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
225  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
226  if discard:
227  print(" ====================== The job HAS BEEN DISCARDED =============== ")
228  print(" NOT ALL DATA WAS AVAILABLE ")
229  print(" JOB ID = %s " % str(jobID))
230  print(" ======================= end ===================================== ")
231  continue
232 
233  # TODO: automaticaly detect type of report file!!!
234  (mod_timelog, evt_timelog, rss_data, vsize_data) =loadTimeLog(timelog_f)
235 
236  mod_timelog= processModuleTimeLogData(mod_timelog, groupBy = "module_name")
237  print("Number of modules grouped by (module_label+module_name): %s" % len(mod_timelog))
238 
239  # add to the list to generate the readable filename :)
240  steps[step] = 1
241  candles[candle] = 1
242  if pileup_type=="":
243  pileups["NoPileUp"]=1
244  else:
245  pileups[pileup_type] = 1
246 
247  # root file size (number)
248  root_file_size = getRootFileSize(path = path, candle = candle, step = step.replace(':', '='))
249  # number of events
250  num_events = read_ConfigurationFromSimulationCandles(path = path, step = step, is_pileup = pileup_type)["num_events"]
251 
252  #EdmSize
253  edm_report = parserEdmSize.getEdmReport(path = path, candle = candle, step = step)
254  if edm_report != False:
255  try:
256  # add event content data
257  edm_report = map(assign_event_content_for_product, edm_report)
258  # for testing / imformation
259  _eventContent_DEBUG(edm_report)
260  except Exception as e:
261  print(e)
262 
263  timeSizeReport = {
264  "jobID":jobID,
265  "release": release,
266  "timelog_result": (mod_timelog, evt_timelog, rss_data, vsize_data),
267  "metadata": {"testname": "TimeSize", "root_file_size": root_file_size, "num_events": num_events},
268  "edmSize_result": edm_report
269  }
270 
271  # export to xml: actualy exporting gets suspended and put into runinfo
272  exportTimeSizeJob(path, timeSizeReport, runinfo)
273 
274 def process_memcheck_dir(path, runinfo):
275  global release,event_content,conditions
276  """ if the release is not provided explicitly we take it from the Simulation candles file """
277  if (not release):
278  release_fromlogfile = read_SimulationCandles(path)
279  release = release_fromlogfile
280  print("release from simulation candles: %s" % release)
281 
282  if (not release):
283  # TODO: raise exception!
284  raise Exception("the release was not found!")
285 
286  """ process the vlgd files """
287 
288  # get the file list
289  files = os.listdir(path)
290  memcheck_files = [os.path.join(path, f) for f in files
291  if test_memcheck_report_log.search(f)
292  and os.path.isfile(os.path.join(path, f)) ]
293 
294  if len(memcheck_files) == 0: # Fast protection for old runs, where the _vlgd files is not created...
295  print("No _vlgd files found!")
296  else:
297  for file in memcheck_files:
298  jobID = getJobID_fromMemcheckLogName(os.path.join(path, file))
299 
300  (candle, step, pileup_type, conditions, event_content) = jobID
301 
302  print("jobID: %s" % str(jobID))
303  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
304 
305  print("Dictionary based jobID %s: " % str(jobID))
306 
307  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
308  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
309  if discard:
310  print(" ====================== The job HAS BEEN DISCARDED =============== ")
311  print(" NOT ALL DATA WAS AVAILABLE ")
312  print(" JOB ID = %s " % str(jobID))
313  print(" ======================= end ===================================== ")
314  continue
315 
316  # add to the list to generate the readable filename :)
317  steps[step] = 1
318  candles[candle.upper()] = 1
319  if pileup_type=="":
320  pileups["NoPileUp"]=1
321  else:
322  pileups[pileup_type] = 1
323 
324  memerror = getMemcheckError(path)
325 
326  MemcheckReport = {
327  "jobID": jobID,
328  "release": release,
329  "memcheck_errors": {"error_num": memerror},
330  "metadata": {"testname": "Memcheck"},
331  }
332 
333  # export to xml: actualy exporting gets suspended and put into runinfo
334  exportMemcheckReport(path, MemcheckReport, runinfo)
335 
337  globbed = glob.glob(os.path.join(path, "*memcheck_vlgd.xml"))
338 
339  errnum = 0
340 
341  for f in globbed:
342  #print f
343  cmd = "grep '<error>' "+f+ " | wc -l "
344  p = os.popen(cmd, 'r')
345  errnum += int(p.readlines()[0])
346 
347  return errnum
348 
349 
350 def process_igprof_dir(path, runinfo):
351  global release,event_content,conditions
352  """ if the release is not provided explicitly we take it from the Simulation candles file """
353  if (not release):
354  release_fromlogfile = read_SimulationCandles(path)
355  release = release_fromlogfile
356  print("release from simulation candles: %s" % release)
357 
358  if (not release):
359  # TODO: raise exception!
360  raise Exception("the release was not found!")
361 
362  """ process the IgProf sql3 files """
363 
364  # get the file list
365  files = os.listdir(path)
366  igprof_files = [os.path.join(path, f) for f in files
367  if test_igprof_report_log.search(f)
368  and os.path.isfile(os.path.join(path, f)) ]
369 
370  if len(igprof_files) == 0: # No files...
371  print("No igprof files found!")
372  else:
373  for file in igprof_files:
374  jobID = getJobID_fromIgProfLogName(file)
375 
376  (candle, step, pileup_type, conditions, event_content) = jobID
377 
378  print("jobID: %s" % str(jobID))
379  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
380 
381  print("Dictionary based jobID %s: " % str(jobID))
382 
383  igProfType = path.split("/")[-1].replace("TTbar_", "").replace("MinBias_", "").replace("PU_", "")
384 
385  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
386  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
387  if discard:
388  print(" ====================== The job HAS BEEN DISCARDED =============== ")
389  print(" NOT ALL DATA WAS AVAILABLE ")
390  print(" JOB ID = %s " % str(jobID))
391  print(" ======================= end ===================================== ")
392  continue
393 
394  # add to the list to generate the readable filename :)
395  steps[step] = 1
396  candles[candle.upper()] = 1
397  if pileup_type=="":
398  pileups["NoPileUp"]=1
399  else:
400  pileups[pileup_type] = 1
401 
402  igs = getIgSummary(path)
403  #print igs
404 
405  igProfReport = {
406  "jobID": jobID,
407  "release": release,
408  "igprof_result": igs,
409  "metadata": {"testname": igProfType},
410  }
411 
412  # print igProfReport
413  # export to xml: actualy exporting gets suspended and put into runinfo
414  exportIgProfReport(path, igProfReport, igProfType, runinfo)
415 
416 #get IgProf summary information from the sql3 files
417 def getIgSummary(path):
418  igresult = []
419  globbed = glob.glob(os.path.join(path, "*.sql3"))
420 
421  for f in globbed:
422  #print f
423  profileInfo = getSummaryInfo(f)
424  if not profileInfo:
425  continue
426  cumCounts, cumCalls = profileInfo
427  dump, architecture, release, rest = f.rsplit("/", 3)
428  candle, sequence, pileup, conditions, process, counterType, events = rest.split("___")
429  events = events.replace(".sql3", "")
430  igresult.append({"counter_type": counterType, "event": events, "cumcounts": cumCounts, "cumcalls": cumCalls})
431 
432  #fail-safe(nasty) fix for the diff (even if it gets fixed in the sqls, won't screw this up again...)
433  for ig in igresult:
434  if 'diff' in ig['event']:
435  eventLast,eventOne = ig['event'].split('_diff_')
436  for part in igresult:
437  if part['counter_type'] == ig['counter_type'] and part['event'] == eventOne:
438  cumcountsOne = part['cumcounts']
439  cumcallsOne = part['cumcalls']
440  if part['counter_type'] == ig['counter_type'] and part['event'] == eventLast:
441  cumcountsLast = part['cumcounts']
442  cumcallsLast = part['cumcalls']
443  ig['cumcounts'] = cumcountsLast - cumcountsOne
444  ig['cumcalls'] = cumcallsLast - cumcallsOne
445 
446  return igresult
447 
448 def getSummaryInfo(database):
449  summary_query="""SELECT counter, total_count, total_freq, tick_period
450  FROM summary;"""
451  error, output = doQuery(summary_query, database)
452  if error or not output or output.count("\n") > 1:
453  return None
454  counter, total_count, total_freq, tick_period = output.split("@@@")
455  if counter == "PERF_TICKS":
456  return float(tick_period) * float(total_count), int(total_freq)
457  else:
458  return int(total_count), int(total_freq)
459 
460 def doQuery(query, database):
461  if os.path.exists("/usr/bin/sqlite3"):
462  sqlite="/usr/bin/sqlite3"
463  else:
464  sqlite="/afs/cern.ch/user/e/eulisse/www/bin/sqlite"
465  return getstatusoutput("echo '%s' | %s -separator @@@ %s" % (query, sqlite, database))
466 
467 #TimeSize
468 def searchTimeSizeFiles(runinfo):
469  """ so far we will use the current dir to search in """
470  path = os.getcwd()
471  #print path
472  print('full path =', os.path.abspath(path))
473 
474  files = os.listdir(path)
475 
476  test_timeSizeDirs = re.compile("_TimeSize$", re.IGNORECASE)
477  timesize_dirs = [os.path.join(path, f) for f in files if test_timeSizeDirs.search(f) and os.path.isdir(os.path.join(path, f))]
478 
479  for timesize_dir in timesize_dirs:
480  # print timesize_dir
481  process_timesize_dir(timesize_dir, runinfo)
482 
483 #Memcheck
484 def searchMemcheckFiles(runinfo):
485  """ so far we will use the current dir to search in """
486  path = os.getcwd()
487  #print path
488  print('full path =', os.path.abspath(path))
489 
490  files = os.listdir(path)
491 
492  test_MemcheckDirs = re.compile("_Memcheck(.*)$", re.IGNORECASE)
493  memcheck_dirs = [os.path.join(path, f) for f in files if test_MemcheckDirs.search(f) and os.path.isdir(os.path.join(path, f))]
494 
495  for memcheck_dir in memcheck_dirs:
496  print(memcheck_dir)
497  process_memcheck_dir(memcheck_dir, runinfo)
498 
499 #IgProf
500 def searchIgProfFiles(runinfo):
501  """ so far we will use the current dir to search in """
502  path = os.getcwd()
503  #print path
504  print('full path =', os.path.abspath(path))
505 
506  files = os.listdir(path)
507 
508  test_IgProfDirs = re.compile("_IgProf(.*)$", re.IGNORECASE)
509  igprof_dirs = [os.path.join(path, f) for f in files if test_IgProfDirs.search(f) and os.path.isdir(os.path.join(path, f))]
510 
511  for igprof_dir in igprof_dirs:
512  print(igprof_dir)
513  process_igprof_dir(igprof_dir, runinfo)
514 
516  """ Exports the sequences to XML Doc """
517  try:
518  env_cmssw_version = os.environ["CMSSW_VERSION"]
519  except KeyError:
520  print("<<<<< ====== Error: cannot get CMSSW version [just integrity check for sequences]. \
521  Is the CMSSW environment initialized? (use cmsenv) ==== >>>>")
522  env_cmssw_version = None
523 
524  print(" ==== exporting the sequences. loading files for currently loaded CMSSW version: %s, while the CMSSW we are currently harversting is %s ===" %(env_cmssw_version, release))
525  xml_export_Sequences(xml_doc = xmldoc, sequences = get_modules_sequences_relationships(), release=release)
526 
527 
528 
529 if __name__ == "__main__":
530  #searchFiles()
531  #TO DO:
532  #Use option parser! This is messy.
533 
534  (release, output_dir) = get_params(sys.argv)
535 
536  if not release:
537  """ print usage(sys.argv)
538  sys.exit(2) """
539  print("The version was not provided explicitly, will try to get one from SimulationCandles file """)
540 
541 
542  # Export the metadata from cmsPerfSuite.log (in current working directory!)
543  print("Parsing cmsPerfSuite.log: getting all the metadata concerning the run")
544  p = parserPerfsuiteMetadata(os.getcwd())
545  run_info = p.parseAll()
546 
547  print("Loading Sequences and Event-Content(s). Please wait...")
548 
549  Sequences_OK = False
550  EventContents_OK = False
551 
552  if not _TEST_RUN:
553  try:
554  import Validation.Performance.ModuleToSequenceAssign as ModuleToSequenceAssign
555  Sequences_OK = True
556  except Exception as e:
557  print(e)
558  try:
559  import Validation.Performance.parseEventContent as parseEventContent
560  EventContents_OK = True
561  except Exception as e:
562  print(e)
563 
564  print("Parsing TimeSize report")
565  # Search for TimeSize files: EdmSize, TimingReport
566  searchTimeSizeFiles(run_info)
567  print("Parsing IgProf report")
568  # Search for IgProf files
569  searchIgProfFiles(run_info)
570  print("Parsing Memcheck report")
571  # Search for Memcheck files
572  searchMemcheckFiles(run_info)
573  #print run_info
574 
575  print("Exporting sequences and event-content rules")
576  if not _TEST_RUN:
577  """ for testing on laptom we have no CMSSW """
578  # export sequences (for currently loaded CMSSW)
579  if Sequences_OK:
580  exportSequences()
581 
582  if EventContents_OK:
583  # export event's content rules
584  eventContentRules = parseEventContent.getTxtEventContentRules()
585  cmssw_exportdb_xml.exportECRules(xmldoc, eventContentRules)
586 
587 
588  cmssw_exportdb_xml.exportRunInfo(xmldoc, run_info, release = release)
589  #save the XML file, TODO: change fileName after introducting the JobID
590  import datetime
591  now = datetime.datetime.now()
592  #Changing slightly the XML filename format
593  #FIXME: review this convention and archive the xml in a separate CASTOR xml directory for quick recovery of DB...
594  file_name = "%s___%s___%s___%s___%s___%s___%s.xml" % (release, "_".join(steps.keys()), "_".join(candles.keys()), "_".join(pileups.keys()),event_content,conditions,now.isoformat())
595  print("Writing the output to: %s " % file_name)
596 
597  write_xml(xmldoc, output_dir, file_name) #change this function to be able to handle directories in remote machines (via tar pipes for now could always revert to rsync later).
598  #NB write_xml is in Validation/Performance/python/cmssw_exportdb_xml.py
def process_igprof_dir(path, runinfo)
def doQuery(query, database)
def read_SimulationCandles(path)
def xml_export_Sequences(xml_doc, sequences, release)
def exportIgProfReport(path, igProfReport, igProfType, runinfo)
def searchMemcheckFiles(runinfo)
def replace(string, replacements)
def assign_event_content_for_product(product)
def exportTimeSizeJob(path, timeSizeReport, runinfo)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:65
def searchTimeSizeFiles(runinfo)
def List_ECs_forProduct(product)
def getEdmReport(path, candle, step)
def searchIgProfFiles(runinfo)
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
def getJobID_fromIgProfLogName(logfile_name)
def loadTimeLog(log_filename, maxsize_rad=0)
def processModuleTimeLogData(modules_timelog, groupBy="module_name")
mod_data["stats"] =calc_MinMaxAvgRMS(f_time = lambda x: x["time"], f_evt_num = lambda x: x["event_num...
def getRootFileSize(path, candle, step)
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def getSummaryInfo(database)
def _eventContent_DEBUG(edm_report)
def read_ConfigurationFromSimulationCandles(path, step, is_pileup)
def process_timesize_dir(path, runinfo)
def process_memcheck_dir(path, runinfo)
def getJobID_fromMemcheckLogName(logfile_name)
def getJobID_fromTimeReportLogName(logfile_name)
#define str(s)
double split
Definition: MVATrainer.cc:139
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def exportMemcheckReport(path, MemcheckReport, runinfo)