CMS 3D CMS Logo

Functions | Variables
cmsPerfSuiteHarvest Namespace Reference

Functions

def _eventContent_DEBUG (edm_report)
 
def assign_event_content_for_product (product)
 
def doQuery (query, database)
 
def exportIgProfReport (path, igProfReport, igProfType, runinfo)
 
def exportMemcheckReport (path, MemcheckReport, runinfo)
 
def exportSequences ()
 
def exportTimeSizeJob (path, timeSizeReport, runinfo)
 
def get_modules_sequences_relationships ()
 
def get_params (argv)
 
def getIgSummary (path)
 
def getMemcheckError (path)
 
def getSummaryInfo (database)
 
def process_igprof_dir (path, runinfo)
 
def process_memcheck_dir (path, runinfo)
 
def process_timesize_dir (path, runinfo)
 
def searchIgProfFiles (runinfo)
 
def searchMemcheckFiles (runinfo)
 
def searchTimeSizeFiles (runinfo)
 
def usage (argv)
 

Variables

 _TEST_RUN
 
 candles
 
 eventContentRules
 
 EventContents_OK
 
 file_name
 
 now
 
 output_dir
 
 p
 
 pileups
 
 release
 
 run_info
 
 Sequences_OK
 
 steps
 
 test_igprof_report_log
 
 test_memcheck_report_log
 
 test_timing_report_log
 
 xmldoc
 

Function Documentation

def cmsPerfSuiteHarvest._eventContent_DEBUG (   edm_report)
private

Definition at line 83 of file cmsPerfSuiteHarvest.py.

Referenced by process_timesize_dir().

83 def _eventContent_DEBUG(edm_report):
84  # for testing / information
85  EC_count = {}
86  if not _TEST_RUN:
87  # count the products in event-content's
88  for prod in edm_report:
90  for ec in ecs:
91  if ec not in EC_count:
92  EC_count[ec] = []
93  EC_count[ec].append(prod)
94  #print out the statistics
95  for (ec, prods) in EC_count.items():
96  print("==== %s EVENT CONTENT: have %d items, the listing is: ===" % (ec, len(prods)))
97  # list of products
98  print("\n *".join(["%(cpp_type)s_%(module_name)s_%(module_label)s" % prod for prod in prods]))
99 
100 
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def List_ECs_forProduct(product)
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def _eventContent_DEBUG(edm_report)
def cmsPerfSuiteHarvest.assign_event_content_for_product (   product)
returns modified product by adding the event content relationship 

Definition at line 101 of file cmsPerfSuiteHarvest.py.

References join(), and parseEventContent.List_ECs_forProduct().

102  """ returns modified product by adding the event content relationship """
103 
104  if not _TEST_RUN:
105  product["event_content"] = ",".join(parseEventContent.List_ECs_forProduct(product))
106  return product
107 
108 
def assign_event_content_for_product(product)
def List_ECs_forProduct(product)
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def cmsPerfSuiteHarvest.doQuery (   query,
  database 
)

Definition at line 460 of file cmsPerfSuiteHarvest.py.

Referenced by getSummaryInfo().

460 def doQuery(query, database):
461  if os.path.exists("/usr/bin/sqlite3"):
462  sqlite="/usr/bin/sqlite3"
463  else:
464  sqlite="/afs/cern.ch/user/e/eulisse/www/bin/sqlite"
465  return getstatusoutput("echo '%s' | %s -separator @@@ %s" % (query, sqlite, database))
466 
467 #TimeSize
def doQuery(query, database)
def cmsPerfSuiteHarvest.exportIgProfReport (   path,
  igProfReport,
  igProfType,
  runinfo 
)

Definition at line 114 of file cmsPerfSuiteHarvest.py.

References mps_setup.append, edm.print(), python.rootplot.root2matplotlib.replace(), str, and pfDeepBoostedJetPreprocessParams_cfi.upper.

Referenced by process_igprof_dir().

114 def exportIgProfReport(path, igProfReport, igProfType, runinfo):
115  jobID = igProfReport["jobID"]
116  #print jobID
117  candleLong = os.path.split(path)[1].replace("_IgProf_Perf", "").replace("_IgProf_Mem", "").replace("_PU", "")
118  found = False
119  #print igProfType
120  if igProfType in runinfo['TestResults']:
121  for result in runinfo['TestResults'][igProfType]:
122  if candleLong == result["candle"] and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
123  jobID["candle"] = jobID["candle"].upper()
124  if "jobs" not in result:
125  result['jobs'] = []
126  result['jobs'].append(igProfReport)
127  found = True
128  break
129 
130  if not found:
131  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
132  print("JOB ID: %s " % str(jobID))
133  print(" ====================== ")
134  runinfo['unrecognized_jobs'].append(igProfReport)
135  #export_xml(xml_doc = xmldoc, **igProfReport)
136 
137 
def exportIgProfReport(path, igProfReport, igProfType, runinfo)
def replace(string, replacements)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
#define str(s)
def cmsPerfSuiteHarvest.exportMemcheckReport (   path,
  MemcheckReport,
  runinfo 
)

Definition at line 165 of file cmsPerfSuiteHarvest.py.

References mps_setup.append, edm.print(), python.rootplot.root2matplotlib.replace(), and str.

Referenced by process_memcheck_dir().

165 def exportMemcheckReport(path, MemcheckReport, runinfo):
166  candleLong = os.path.split(path)[1].replace("_Memcheck", "").replace("_PU", "")
167  jobID = MemcheckReport["jobID"]
168 
169  #search for a run Test to which could belong our JOB
170  found = False
171  if 'Memcheck' in runinfo['TestResults']:
172  for result in runinfo['TestResults']['Memcheck']:
173  #print result
174  #print jobID
175  """ If this is the testResult which fits Memcheck job """
176  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
177  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
178  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
179  #print result
180  if "jobs" not in result:
181  result['jobs'] = []
182  result['jobs'].append(MemcheckReport)
183  found = True
184  break
185 
186  if not found:
187  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
188  print("JOB ID: %s " % str(jobID))
189  print(" ====================== ")
190  runinfo['unrecognized_jobs'].append(MemcheckReport)
191 
def replace(string, replacements)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
#define str(s)
def exportMemcheckReport(path, MemcheckReport, runinfo)
def cmsPerfSuiteHarvest.exportSequences ( )
Exports the sequences to XML Doc 

Definition at line 515 of file cmsPerfSuiteHarvest.py.

References get_modules_sequences_relationships(), edm.print(), and cmssw_exportdb_xml.xml_export_Sequences().

516  """ Exports the sequences to XML Doc """
517  try:
518  env_cmssw_version = os.environ["CMSSW_VERSION"]
519  except KeyError:
520  print("<<<<< ====== Error: cannot get CMSSW version [just integrity check for sequences]. \
521  Is the CMSSW environment initialized? (use cmsenv) ==== >>>>")
522  env_cmssw_version = None
523 
524  print(" ==== exporting the sequences. loading files for currently loaded CMSSW version: %s, while the CMSSW we are currently harversting is %s ===" %(env_cmssw_version, release))
525  xml_export_Sequences(xml_doc = xmldoc, sequences = get_modules_sequences_relationships(), release=release)
526 
527 
528 
def xml_export_Sequences(xml_doc, sequences, release)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def cmsPerfSuiteHarvest.exportTimeSizeJob (   path,
  timeSizeReport,
  runinfo 
)

Definition at line 138 of file cmsPerfSuiteHarvest.py.

References mps_setup.append, edm.print(), python.rootplot.root2matplotlib.replace(), and str.

Referenced by process_timesize_dir().

138 def exportTimeSizeJob(path, timeSizeReport, runinfo):
139  candleLong = os.path.split(path)[1].replace("_TimeSize", "").replace("_PU", "")
140  jobID = timeSizeReport["jobID"]
141 
142  #search for a run Test to which could belong our JOB
143  found = False
144  if 'TimeSize' in runinfo['TestResults']:
145  for result in runinfo['TestResults']['TimeSize']:
146  #print result
147  """ If this is the testResult which fits TimeSize job """
148  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
149  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
150  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
151  #print result
152  if "jobs" not in result:
153  result['jobs'] = []
154  result['jobs'].append(timeSizeReport)
155  found = True
156  break
157 
158  if not found:
159  print("============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== ")
160  print("JOB ID: %s " % str(jobID))
161  print(" ====================== ")
162  runinfo['unrecognized_jobs'].append(timeSizeReport)
163  #export_xml(xml_doc = xmldoc, **timeSizeReport)
164 
def replace(string, replacements)
def exportTimeSizeJob(path, timeSizeReport, runinfo)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
#define str(s)
def cmsPerfSuiteHarvest.get_modules_sequences_relationships ( )

Definition at line 109 of file cmsPerfSuiteHarvest.py.

References ModuleToSequenceAssign.assignModulesToSeqs(), and join().

Referenced by exportSequences().

110  (sequenceWithModules, sequenceWithModulesString) =ModuleToSequenceAssign.assignModulesToSeqs()
111  return [{"name": seq, "modules": ",".join(modules)} for (seq, modules) in sequenceWithModulesString.items()]
112 
113 
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def cmsPerfSuiteHarvest.get_params (   argv)
Returns the version of CMSSW to be used which it is taken from:
* command line parameter or 
* environment variable 
in case of error returns None

    And also the directory to put the xml files to: if none --> returns ""
try to get the version for command line argument 

Definition at line 46 of file cmsPerfSuiteHarvest.py.

References edm.print().

46 def get_params(argv):
47  """
48  Returns the version of CMSSW to be used which it is taken from:
49  * command line parameter or
50  * environment variable
51  in case of error returns None
52 
53  And also the directory to put the xml files to: if none --> returns ""
54  """
55 
56  """ try to get the version for command line argument """
57  #print argv
58  #FIXME: this should be rewritten using getopt properly
59  version = None
60  #xml_dir = "cmsperfvm:/data/projects/conf/PerfSuiteDB/xml_dropbox" #Set this as default (assume change in write_xml to write to remote machines)
61  #NB write_xml is in Validation/Performance/python/cmssw_exportdb_xml.py
62  #Setting the default to write to a local directory:
63  xml_dir="PerfSuiteDBData"
64  try:
65  opts, args = getopt.getopt(argv[1:], "v:", ["version=", "outdir="])
66  except getopt.GetoptError as e:
67  print(e)
68  for opt, arg in opts:
69  if opt in ("-v", "--version"):
70  version = arg
71  if opt == "--outdir":
72  xml_dir = arg
73 
74  """ if not get it from environment string """
75  if not version:
76  try:
77  version = os.environ["CMSSW_VERSION"]
78  except KeyError:
79  pass
80 
81  return (version, xml_dir)
82 
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def cmsPerfSuiteHarvest.getIgSummary (   path)

Definition at line 417 of file cmsPerfSuiteHarvest.py.

References getSummaryInfo(), and split.

Referenced by process_igprof_dir().

417 def getIgSummary(path):
418  igresult = []
419  globbed = glob.glob(os.path.join(path, "*.sql3"))
420 
421  for f in globbed:
422  #print f
423  profileInfo = getSummaryInfo(f)
424  if not profileInfo:
425  continue
426  cumCounts, cumCalls = profileInfo
427  dump, architecture, release, rest = f.rsplit("/", 3)
428  candle, sequence, pileup, conditions, process, counterType, events = rest.split("___")
429  events = events.replace(".sql3", "")
430  igresult.append({"counter_type": counterType, "event": events, "cumcounts": cumCounts, "cumcalls": cumCalls})
431 
432  #fail-safe(nasty) fix for the diff (even if it gets fixed in the sqls, won't screw this up again...)
433  for ig in igresult:
434  if 'diff' in ig['event']:
435  eventLast,eventOne = ig['event'].split('_diff_')
436  for part in igresult:
437  if part['counter_type'] == ig['counter_type'] and part['event'] == eventOne:
438  cumcountsOne = part['cumcounts']
439  cumcallsOne = part['cumcalls']
440  if part['counter_type'] == ig['counter_type'] and part['event'] == eventLast:
441  cumcountsLast = part['cumcounts']
442  cumcallsLast = part['cumcalls']
443  ig['cumcounts'] = cumcountsLast - cumcountsOne
444  ig['cumcalls'] = cumcallsLast - cumcallsOne
445 
446  return igresult
447 
def getSummaryInfo(database)
double split
Definition: MVATrainer.cc:139
def cmsPerfSuiteHarvest.getMemcheckError (   path)

Definition at line 336 of file cmsPerfSuiteHarvest.py.

References createfilelist.int.

Referenced by process_memcheck_dir().

337  globbed = glob.glob(os.path.join(path, "*memcheck_vlgd.xml"))
338 
339  errnum = 0
340 
341  for f in globbed:
342  #print f
343  cmd = "grep '<error>' "+f+ " | wc -l "
344  p = os.popen(cmd, 'r')
345  errnum += int(p.readlines()[0])
346 
347  return errnum
348 
349 
def cmsPerfSuiteHarvest.getSummaryInfo (   database)

Definition at line 448 of file cmsPerfSuiteHarvest.py.

References doQuery(), objects.autophobj.float, and createfilelist.int.

Referenced by getIgSummary().

448 def getSummaryInfo(database):
449  summary_query="""SELECT counter, total_count, total_freq, tick_period
450  FROM summary;"""
451  error, output = doQuery(summary_query, database)
452  if error or not output or output.count("\n") > 1:
453  return None
454  counter, total_count, total_freq, tick_period = output.split("@@@")
455  if counter == "PERF_TICKS":
456  return float(tick_period) * float(total_count), int(total_freq)
457  else:
458  return int(total_count), int(total_freq)
459 
def doQuery(query, database)
def getSummaryInfo(database)
def cmsPerfSuiteHarvest.process_igprof_dir (   path,
  runinfo 
)

Definition at line 350 of file cmsPerfSuiteHarvest.py.

References cmsPerfStripChart.dict, Exception, exportIgProfReport(), getIgSummary(), FileNamesHelper.getJobID_fromIgProfLogName(), list(), edm.print(), FileNamesHelper.read_SimulationCandles(), python.rootplot.root2matplotlib.replace(), str, and ComparisonHelper.zip().

Referenced by searchIgProfFiles().

350 def process_igprof_dir(path, runinfo):
351  global release,event_content,conditions
352  """ if the release is not provided explicitly we take it from the Simulation candles file """
353  if (not release):
354  release_fromlogfile = read_SimulationCandles(path)
355  release = release_fromlogfile
356  print("release from simulation candles: %s" % release)
357 
358  if (not release):
359  # TODO: raise exception!
360  raise Exception("the release was not found!")
361 
362  """ process the IgProf sql3 files """
363 
364  # get the file list
365  files = os.listdir(path)
366  igprof_files = [os.path.join(path, f) for f in files
367  if test_igprof_report_log.search(f)
368  and os.path.isfile(os.path.join(path, f)) ]
369 
370  if len(igprof_files) == 0: # No files...
371  print("No igprof files found!")
372  else:
373  for file in igprof_files:
374  jobID = getJobID_fromIgProfLogName(file)
375 
376  (candle, step, pileup_type, conditions, event_content) = jobID
377 
378  print("jobID: %s" % str(jobID))
379  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
380 
381  print("Dictionary based jobID %s: " % str(jobID))
382 
383  igProfType = path.split("/")[-1].replace("TTbar_", "").replace("MinBias_", "").replace("PU_", "")
384 
385  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
386  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
387  if discard:
388  print(" ====================== The job HAS BEEN DISCARDED =============== ")
389  print(" NOT ALL DATA WAS AVAILABLE ")
390  print(" JOB ID = %s " % str(jobID))
391  print(" ======================= end ===================================== ")
392  continue
393 
394  # add to the list to generate the readable filename :)
395  steps[step] = 1
396  candles[candle.upper()] = 1
397  if pileup_type=="":
398  pileups["NoPileUp"]=1
399  else:
400  pileups[pileup_type] = 1
401 
402  igs = getIgSummary(path)
403  #print igs
404 
405  igProfReport = {
406  "jobID": jobID,
407  "release": release,
408  "igprof_result": igs,
409  "metadata": {"testname": igProfType},
410  }
411 
412  # print igProfReport
413  # export to xml: actualy exporting gets suspended and put into runinfo
414  exportIgProfReport(path, igProfReport, igProfType, runinfo)
415 
416 #get IgProf summary information from the sql3 files
def process_igprof_dir(path, runinfo)
def read_SimulationCandles(path)
def exportIgProfReport(path, igProfReport, igProfType, runinfo)
def replace(string, replacements)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
def getJobID_fromIgProfLogName(logfile_name)
#define str(s)
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def cmsPerfSuiteHarvest.process_memcheck_dir (   path,
  runinfo 
)

Definition at line 274 of file cmsPerfSuiteHarvest.py.

References cmsPerfStripChart.dict, Exception, exportMemcheckReport(), FileNamesHelper.getJobID_fromMemcheckLogName(), getMemcheckError(), list(), edm.print(), FileNamesHelper.read_SimulationCandles(), str, and ComparisonHelper.zip().

Referenced by searchMemcheckFiles().

274 def process_memcheck_dir(path, runinfo):
275  global release,event_content,conditions
276  """ if the release is not provided explicitly we take it from the Simulation candles file """
277  if (not release):
278  release_fromlogfile = read_SimulationCandles(path)
279  release = release_fromlogfile
280  print("release from simulation candles: %s" % release)
281 
282  if (not release):
283  # TODO: raise exception!
284  raise Exception("the release was not found!")
285 
286  """ process the vlgd files """
287 
288  # get the file list
289  files = os.listdir(path)
290  memcheck_files = [os.path.join(path, f) for f in files
291  if test_memcheck_report_log.search(f)
292  and os.path.isfile(os.path.join(path, f)) ]
293 
294  if len(memcheck_files) == 0: # Fast protection for old runs, where the _vlgd files is not created...
295  print("No _vlgd files found!")
296  else:
297  for file in memcheck_files:
298  jobID = getJobID_fromMemcheckLogName(os.path.join(path, file))
299 
300  (candle, step, pileup_type, conditions, event_content) = jobID
301 
302  print("jobID: %s" % str(jobID))
303  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
304 
305  print("Dictionary based jobID %s: " % str(jobID))
306 
307  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
308  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
309  if discard:
310  print(" ====================== The job HAS BEEN DISCARDED =============== ")
311  print(" NOT ALL DATA WAS AVAILABLE ")
312  print(" JOB ID = %s " % str(jobID))
313  print(" ======================= end ===================================== ")
314  continue
315 
316  # add to the list to generate the readable filename :)
317  steps[step] = 1
318  candles[candle.upper()] = 1
319  if pileup_type=="":
320  pileups["NoPileUp"]=1
321  else:
322  pileups[pileup_type] = 1
323 
324  memerror = getMemcheckError(path)
325 
326  MemcheckReport = {
327  "jobID": jobID,
328  "release": release,
329  "memcheck_errors": {"error_num": memerror},
330  "metadata": {"testname": "Memcheck"},
331  }
332 
333  # export to xml: actualy exporting gets suspended and put into runinfo
334  exportMemcheckReport(path, MemcheckReport, runinfo)
335 
def read_SimulationCandles(path)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
def process_memcheck_dir(path, runinfo)
def getJobID_fromMemcheckLogName(logfile_name)
#define str(s)
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def exportMemcheckReport(path, MemcheckReport, runinfo)
def cmsPerfSuiteHarvest.process_timesize_dir (   path,
  runinfo 
)

Definition at line 192 of file cmsPerfSuiteHarvest.py.

References _eventContent_DEBUG(), cmsPerfStripChart.dict, Exception, exportTimeSizeJob(), parserEdmSize.getEdmReport(), FileNamesHelper.getJobID_fromTimeReportLogName(), FileNamesHelper.getRootFileSize(), list(), parserTimingReport.loadTimeLog(), genParticles_cff.map, edm.print(), parserTimingReport.processModuleTimeLogData(), FileNamesHelper.read_ConfigurationFromSimulationCandles(), FileNamesHelper.read_SimulationCandles(), str, and ComparisonHelper.zip().

Referenced by searchTimeSizeFiles().

192 def process_timesize_dir(path, runinfo):
193  global release,event_content,conditions
194  """ if the release is not provided explicitly we take it from the Simulation candles file """
195  if (not release):
196  release_fromlogfile = read_SimulationCandles(path)
197  release = release_fromlogfile
198  print("release from simulation candles: %s" % release)
199 
200  if (not release):
201  # TODO: raise exception!
202  raise Exception("the release was not found!")
203 
204 
205  """ process the TimingReport log files """
206 
207  # get the file list
208  files = os.listdir(path)
209  timing_report_files = [os.path.join(path, f) for f in files
210  if test_timing_report_log.search(f)
211  and os.path.isfile(os.path.join(path, f)) ]
212 
213  # print timing_report_files
214  for timelog_f in timing_report_files:
215  print("\nProcessing file: %s" % timelog_f)
216  print("------- ")
217 
218  jobID = getJobID_fromTimeReportLogName(os.path.join(path, timelog_f))
219  print("jobID: %s" % str(jobID))
220  (candle, step, pileup_type, conditions, event_content) = jobID
221  jobID = dict(list(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)))
222  print("Dictionary based jobID %s: " % str(jobID))
223 
224  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
225  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
226  if discard:
227  print(" ====================== The job HAS BEEN DISCARDED =============== ")
228  print(" NOT ALL DATA WAS AVAILABLE ")
229  print(" JOB ID = %s " % str(jobID))
230  print(" ======================= end ===================================== ")
231  continue
232 
233  # TODO: automaticaly detect type of report file!!!
234  (mod_timelog, evt_timelog, rss_data, vsize_data) =loadTimeLog(timelog_f)
235 
236  mod_timelog= processModuleTimeLogData(mod_timelog, groupBy = "module_name")
237  print("Number of modules grouped by (module_label+module_name): %s" % len(mod_timelog))
238 
239  # add to the list to generate the readable filename :)
240  steps[step] = 1
241  candles[candle] = 1
242  if pileup_type=="":
243  pileups["NoPileUp"]=1
244  else:
245  pileups[pileup_type] = 1
246 
247  # root file size (number)
248  root_file_size = getRootFileSize(path = path, candle = candle, step = step.replace(':', '='))
249  # number of events
250  num_events = read_ConfigurationFromSimulationCandles(path = path, step = step, is_pileup = pileup_type)["num_events"]
251 
252  #EdmSize
253  edm_report = parserEdmSize.getEdmReport(path = path, candle = candle, step = step)
254  if edm_report != False:
255  try:
256  # add event content data
257  edm_report = map(assign_event_content_for_product, edm_report)
258  # for testing / imformation
259  _eventContent_DEBUG(edm_report)
260  except Exception as e:
261  print(e)
262 
263  timeSizeReport = {
264  "jobID":jobID,
265  "release": release,
266  "timelog_result": (mod_timelog, evt_timelog, rss_data, vsize_data),
267  "metadata": {"testname": "TimeSize", "root_file_size": root_file_size, "num_events": num_events},
268  "edmSize_result": edm_report
269  }
270 
271  # export to xml: actualy exporting gets suspended and put into runinfo
272  exportTimeSizeJob(path, timeSizeReport, runinfo)
273 
def read_SimulationCandles(path)
def exportTimeSizeJob(path, timeSizeReport, runinfo)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def getEdmReport(path, candle, step)
OutputIterator zip(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, InputIterator2 last2, OutputIterator result, Compare comp)
def loadTimeLog(log_filename, maxsize_rad=0)
def processModuleTimeLogData(modules_timelog, groupBy="module_name")
mod_data["stats"] =calc_MinMaxAvgRMS(f_time = lambda x: x["time"], f_evt_num = lambda x: x["event_num...
def getRootFileSize(path, candle, step)
def _eventContent_DEBUG(edm_report)
def read_ConfigurationFromSimulationCandles(path, step, is_pileup)
def process_timesize_dir(path, runinfo)
def getJobID_fromTimeReportLogName(logfile_name)
#define str(s)
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run
def cmsPerfSuiteHarvest.searchIgProfFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 500 of file cmsPerfSuiteHarvest.py.

References edm.print(), and process_igprof_dir().

500 def searchIgProfFiles(runinfo):
501  """ so far we will use the current dir to search in """
502  path = os.getcwd()
503  #print path
504  print('full path =', os.path.abspath(path))
505 
506  files = os.listdir(path)
507 
508  test_IgProfDirs = re.compile("_IgProf(.*)$", re.IGNORECASE)
509  igprof_dirs = [os.path.join(path, f) for f in files if test_IgProfDirs.search(f) and os.path.isdir(os.path.join(path, f))]
510 
511  for igprof_dir in igprof_dirs:
512  print(igprof_dir)
513  process_igprof_dir(igprof_dir, runinfo)
514 
def process_igprof_dir(path, runinfo)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def searchIgProfFiles(runinfo)
def cmsPerfSuiteHarvest.searchMemcheckFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 484 of file cmsPerfSuiteHarvest.py.

References edm.print(), and process_memcheck_dir().

484 def searchMemcheckFiles(runinfo):
485  """ so far we will use the current dir to search in """
486  path = os.getcwd()
487  #print path
488  print('full path =', os.path.abspath(path))
489 
490  files = os.listdir(path)
491 
492  test_MemcheckDirs = re.compile("_Memcheck(.*)$", re.IGNORECASE)
493  memcheck_dirs = [os.path.join(path, f) for f in files if test_MemcheckDirs.search(f) and os.path.isdir(os.path.join(path, f))]
494 
495  for memcheck_dir in memcheck_dirs:
496  print(memcheck_dir)
497  process_memcheck_dir(memcheck_dir, runinfo)
498 
499 #IgProf
def searchMemcheckFiles(runinfo)
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def process_memcheck_dir(path, runinfo)
def cmsPerfSuiteHarvest.searchTimeSizeFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 468 of file cmsPerfSuiteHarvest.py.

References edm.print(), and process_timesize_dir().

468 def searchTimeSizeFiles(runinfo):
469  """ so far we will use the current dir to search in """
470  path = os.getcwd()
471  #print path
472  print('full path =', os.path.abspath(path))
473 
474  files = os.listdir(path)
475 
476  test_timeSizeDirs = re.compile("_TimeSize$", re.IGNORECASE)
477  timesize_dirs = [os.path.join(path, f) for f in files if test_timeSizeDirs.search(f) and os.path.isdir(os.path.join(path, f))]
478 
479  for timesize_dir in timesize_dirs:
480  # print timesize_dir
481  process_timesize_dir(timesize_dir, runinfo)
482 
483 #Memcheck
S & print(S &os, JobReport::InputFile const &f)
Definition: JobReport.cc:66
def searchTimeSizeFiles(runinfo)
def process_timesize_dir(path, runinfo)
def cmsPerfSuiteHarvest.usage (   argv)

Definition at line 32 of file cmsPerfSuiteHarvest.py.

32 def usage(argv):
33  script = argv[0]
34  return """
35  Usage: %(script)s [-v cmssw_version] [--version=cmssw_version]
36 
37  if the cmssw version is in the system's environment (after running cmsenv):
38  $ %(script)s
39 
40  otherwise one must specify the cmssw version:
41  $ %(script)s --version=CMSSW_3_2_0
42  $ %(script)s -v CMSSW_3_2_0
43 
44  """ % locals()
45 

Variable Documentation

cmsPerfSuiteHarvest._TEST_RUN
private

Definition at line 18 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.candles

Definition at line 29 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.eventContentRules

Definition at line 584 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.EventContents_OK

Definition at line 550 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.file_name

Definition at line 594 of file cmsPerfSuiteHarvest.py.

Referenced by HcalLutManager.create_lut_loader(), SiStripHistoPlotter.createStaticPlot(), DTTPGLutFile.open(), L1TriggerLutFile.open(), TEcnaRead.ReadAverageHighFrequencyNoise(), TEcnaRead.ReadAverageLowFrequencyNoise(), TEcnaRead.ReadAverageMeanCorrelationsBetweenSamples(), TEcnaRead.ReadAveragePedestals(), TEcnaRead.ReadAverageSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadAverageTotalNoise(), TEcnaRead.ReadCorrelationsBetweenSamples(), TEcnaRead.ReadCovariancesBetweenSamples(), TEcnaRead.ReadHighFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadHighFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadHighFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadHighFrequencyNoise(), TEcnaRead.ReadLowFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadLowFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadLowFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadLowFrequencyNoise(), TEcnaRead.ReadMeanCorrelationsBetweenSamples(), TEcnaRead.ReadNumberOfEventsForSamples(), TEcnaRead.ReadPedestals(), TEcnaRead.ReadRelevantCorrelationsBetweenSamples(), TEcnaRead.ReadRootFileHeader(), TEcnaRead.ReadSampleAdcValues(), TEcnaRead.ReadSampleAdcValuesSameFile(), TEcnaRead.ReadSampleMeans(), TEcnaRead.ReadSampleSigmas(), TEcnaRead.ReadSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadStinNumbers(), TEcnaRead.ReadTotalNoise(), SummaryOutputProducer.writeCSV_module(), SummaryOutputProducer.writeCSV_trigger(), SummaryOutputProducer.writeHistograms(), and TEcnaRun.WriteRootFile().

cmsPerfSuiteHarvest.now

Definition at line 591 of file cmsPerfSuiteHarvest.py.

Referenced by edm::CountAndLimit.add(), lumi::RevisionDML.addEntry(), lumi::RevisionDML.addRevision(), lumi::RevisionDML.addRunToCurrentHFDataTag(), dqmservices::DQMFileIterator.advanceToLumi(), HcalLuttoDB.beginJob(), RawEventOutputModuleForBU< Consumer >.beginLuminosityBlock(), edm::service::CondorStatusService.beginPost(), condbon.cdbon_write(), XrdAdaptor::RequestManager.checkSourcesImpl(), dqmservices::DQMFileIterator.collect(), edm::service::ConcurrentModuleTimer.ConcurrentModuleTimer(), dqmservices::DQMMonitoringService.DQMMonitoringService(), DTHVStatusHandler.dumpSnapshot(), XrdAdaptor::XrdReadStatistics.elapsedNS(), dqmservices::DQMMonitoringService.evLumi(), DaqFakeReader.fillRawData(), Fit.getFitQuality(), XrdAdaptor::RequestManager.getInstance(), DTHVStatusHandler.getNewObjects(), XrdAdaptor::RequestManager.handle(), XrdAdaptor::RequestManager::OpenHandler.HandleResponseWithHosts(), HtrXmlPatternWriter.HtrXmlPatternWriter(), cond::persistency.importPayload(), edm::InputSource.InputSource(), cond::persistency::IOVEditor.insert(), edm::service.isProcessWideService(), edm::service::CondorStatusService.lastUpdate(), LHCInfoPopConSourceHandler.LHCInfoPopConSourceHandler(), DQMNet.logme(), edm::InputSource.lumiLimitReached(), FastTimerService::Measurement.measure(), FastTimerService::Measurement.measure_and_accumulate(), FastTimerService::Measurement.measure_and_store(), Fit.multiplyMatrices(), operator<<(), dqmservices::DQMMonitoringService.outputLumiUpdate(), dqmservices::DQMMonitoringService.outputUpdate(), edm::service::StallMonitor.postBeginJob(), ThroughputService.postEvent(), edm::service::StallMonitor.postEvent(), edm::service::StallMonitor.postEventReadFromSource(), edm::service::StallMonitor.postModuleEvent(), edm::service::StallMonitor.postModuleEventAcquire(), edm::service::StallMonitor.postModuleEventPrefetching(), edm::service::StallMonitor.postModuleGlobalTransition(), edm::service::StallMonitor.postModuleStreamTransition(), edm::service::StallMonitor.postSourceEvent(), edm::service::StallMonitor.preEvent(), edm::service::StallMonitor.preEventReadFromSource(), edm::service::StallMonitor.preModuleEvent(), edm::service::StallMonitor.preModuleEventAcquire(), edm::service::StallMonitor.preModuleGlobalTransition(), edm::service::StallMonitor.preModuleStreamTransition(), ThroughputService.preSourceEvent(), edm::service::StallMonitor.preSourceEvent(), Fit.PropagateErrors(), XrdFile.readv(), FedRawDataInputSource.readWorker(), XrdAdaptor::RequestManager.requestFailure(), dqmservices::DQMFileIterator.reset(), lumi::CMSRunSummaryDummy2DB.retrieveData(), DQMNet.run(), ALIUtils.set_time_now(), XrdAdaptor::Source.setLastDowngrade(), edm::TimeoutPoolOutputModule.shouldWeCloseFile(), XrdAdaptor::Source.Source(), edm::service::ConcurrentModuleTimer.start(), edm::service::ConcurrentModuleTimer.stop(), StorageAccount::Stamp.tick(), dqmservices::DQMMonitoringService.tryUpdate(), edm::service::CondorStatusService.update(), dqmservices::DQMFileIterator.update_state(), SiStripPayloadHandler< SiStripPayload >.updateConfigMap(), and edm::service::CondorStatusService.updateImpl().

cmsPerfSuiteHarvest.output_dir

Definition at line 534 of file cmsPerfSuiteHarvest.py.

Referenced by L1TCSCTFClient.dqmEndJob(), and L1TCSCTFClient.initialize().

cmsPerfSuiteHarvest.p

Definition at line 544 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.pileups

Definition at line 30 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.release

Definition at line 27 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.run_info

Definition at line 545 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.Sequences_OK

Definition at line 549 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.steps

Definition at line 28 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.test_igprof_report_log

Definition at line 22 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.test_memcheck_report_log

Definition at line 23 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.test_timing_report_log

Definition at line 21 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.xmldoc

Definition at line 26 of file cmsPerfSuiteHarvest.py.

Referenced by emtf::Tree.loadFromXML(), and emtf::Tree.saveToXML().