CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Functions | Variables
cmsPerfSuiteHarvest Namespace Reference

Functions

def _eventContent_DEBUG
 
def assign_event_content_for_product
 
def doQuery
 
def exportIgProfReport
 
def exportMemcheckReport
 
def exportSequences
 
def exportTimeSizeJob
 
def get_modules_sequences_relationships
 
def get_params
 
def getIgSummary
 
def getMemcheckError
 
def getSummaryInfo
 
def process_igprof_dir
 
def process_memcheck_dir
 
def process_timesize_dir
 
def searchIgProfFiles
 
def searchMemcheckFiles
 
def searchTimeSizeFiles
 
def usage
 

Variables

 _TEST_RUN = False
 
dictionary candles = {}
 
tuple eventContentRules = parseEventContent.getTxtEventContentRules()
 
 EventContents_OK = False
 
string file_name = "%s___%s___%s___%s___%s___%s___%s.xml"
 
tuple now = datetime.datetime.now()
 
tuple p = parserPerfsuiteMetadata(os.getcwd())
 
dictionary pileups = {}
 
 release = None
 
tuple run_info = p.parseAll()
 
 Sequences_OK = False
 
dictionary steps = {}
 
tuple test_igprof_report_log = re.compile("^(.*)(IgProfMem|IgProfPerf)\.gz", re.IGNORECASE)
 
tuple test_memcheck_report_log = re.compile("^(.*)memcheck_vlgd.xml", re.IGNORECASE)
 
tuple test_timing_report_log = re.compile("TimingReport.log$", re.IGNORECASE)
 
tuple xmldoc = minidom.Document()
 

Function Documentation

def cmsPerfSuiteHarvest._eventContent_DEBUG (   edm_report)
private

Definition at line 82 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.append(), join(), and parseEventContent.List_ECs_forProduct().

Referenced by process_timesize_dir().

82 
83 def _eventContent_DEBUG(edm_report):
84  # for testing / information
85  EC_count = {}
86  if not _TEST_RUN:
87  # count the products in event-content's
88  for prod in edm_report:
90  for ec in ecs:
91  if not EC_count.has_key(ec):
92  EC_count[ec] = []
93  EC_count[ec].append(prod)
94  #print out the statistics
95  for (ec, prods) in EC_count.items():
96  print "==== %s EVENT CONTENT: have %d items, the listing is: ===" % (ec, len(prods))
97  # list of products
98  print "\n *".join(["%(cpp_type)s_%(module_name)s_%(module_label)s" % prod for prod in prods])
99 
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def cmsPerfSuiteHarvest.assign_event_content_for_product (   product)
returns modified product by adding the event content relationship 

Definition at line 100 of file cmsPerfSuiteHarvest.py.

References join(), and parseEventContent.List_ECs_forProduct().

102  """ returns modified product by adding the event content relationship """
103 
104  if not _TEST_RUN:
105  product["event_content"] = ",".join(parseEventContent.List_ECs_forProduct(product))
106  return product
107 
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def cmsPerfSuiteHarvest.doQuery (   query,
  database 
)

Definition at line 459 of file cmsPerfSuiteHarvest.py.

Referenced by getSummaryInfo().

460 def doQuery(query, database):
461  if os.path.exists("/usr/bin/sqlite3"):
462  sqlite="/usr/bin/sqlite3"
463  else:
464  sqlite="/afs/cern.ch/user/e/eulisse/www/bin/sqlite"
465  return getstatusoutput("echo '%s' | %s -separator @@@ %s" % (query, sqlite, database))
466 
#TimeSize
def cmsPerfSuiteHarvest.exportIgProfReport (   path,
  igProfReport,
  igProfType,
  runinfo 
)

Definition at line 113 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.append(), python.rootplot.root2matplotlib.replace(), and pileupCalc.upper.

Referenced by process_igprof_dir().

114 def exportIgProfReport(path, igProfReport, igProfType, runinfo):
115  jobID = igProfReport["jobID"]
116  #print jobID
117  candleLong = os.path.split(path)[1].replace("_IgProf_Perf", "").replace("_IgProf_Mem", "").replace("_PU", "")
118  found = False
119  #print igProfType
120  if runinfo['TestResults'].has_key(igProfType):
121  for result in runinfo['TestResults'][igProfType]:
122  if candleLong == result["candle"] and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
123  jobID["candle"] = jobID["candle"].upper()
124  if not result.has_key("jobs"):
125  result['jobs'] = []
126  result['jobs'].append(igProfReport)
127  found = True
128  break
129 
130  if not found:
131  print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== "
132  print "JOB ID: %s " % str(jobID)
133  print " ====================== "
134  runinfo['unrecognized_jobs'].append(igProfReport)
135  #export_xml(xml_doc = xmldoc, **igProfReport)
136 
def cmsPerfSuiteHarvest.exportMemcheckReport (   path,
  MemcheckReport,
  runinfo 
)

Definition at line 164 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.append(), and python.rootplot.root2matplotlib.replace().

Referenced by process_memcheck_dir().

165 def exportMemcheckReport(path, MemcheckReport, runinfo):
166  candleLong = os.path.split(path)[1].replace("_Memcheck", "").replace("_PU", "")
167  jobID = MemcheckReport["jobID"]
168 
169  #search for a run Test to which could belong our JOB
170  found = False
171  if runinfo['TestResults'].has_key('Memcheck'):
172  for result in runinfo['TestResults']['Memcheck']:
173  #print result
174  #print jobID
175  """ If this is the testResult which fits Memcheck job """
176  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
177  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
178  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
179  #print result
180  if not result.has_key("jobs"):
181  result['jobs'] = []
182  result['jobs'].append(MemcheckReport)
183  found = True
184  break
185 
186  if not found:
187  print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== "
188  print "JOB ID: %s " % str(jobID)
189  print " ====================== "
190  runinfo['unrecognized_jobs'].append(MemcheckReport)
def cmsPerfSuiteHarvest.exportSequences ( )
Exports the sequences to XML Doc 

Definition at line 514 of file cmsPerfSuiteHarvest.py.

References get_modules_sequences_relationships(), get_params(), and cmssw_exportdb_xml.xml_export_Sequences().

515 def exportSequences():
516  """ Exports the sequences to XML Doc """
517  try:
518  env_cmssw_version = os.environ["CMSSW_VERSION"]
519  except KeyError:
520  print "<<<<< ====== Error: cannot get CMSSW version [just integrity check for sequences]. \
521  Is the CMSSW environment initialized? (use cmsenv) ==== >>>>"
522  env_cmssw_version = None
523 
524  print " ==== exporting the sequences. loading files for currently loaded CMSSW version: %s, while the CMSSW we are currently harversting is %s ===" %(env_cmssw_version, release)
525  xml_export_Sequences(xml_doc = xmldoc, sequences = get_modules_sequences_relationships(), release=release)
526 
527 
def cmsPerfSuiteHarvest.exportTimeSizeJob (   path,
  timeSizeReport,
  runinfo 
)

Definition at line 137 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.append(), and python.rootplot.root2matplotlib.replace().

Referenced by process_timesize_dir().

138 def exportTimeSizeJob(path, timeSizeReport, runinfo):
139  candleLong = os.path.split(path)[1].replace("_TimeSize", "").replace("_PU", "")
140  jobID = timeSizeReport["jobID"]
141 
142  #search for a run Test to which could belong our JOB
143  found = False
144  if runinfo['TestResults'].has_key('TimeSize'):
145  for result in runinfo['TestResults']['TimeSize']:
146  #print result
147  """ If this is the testResult which fits TimeSize job """
148  #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one
149  #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR)
150  if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']:
151  #print result
152  if not result.has_key("jobs"):
153  result['jobs'] = []
154  result['jobs'].append(timeSizeReport)
155  found = True
156  break
157 
158  if not found:
159  print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== "
160  print "JOB ID: %s " % str(jobID)
161  print " ====================== "
162  runinfo['unrecognized_jobs'].append(timeSizeReport)
163  #export_xml(xml_doc = xmldoc, **timeSizeReport)
def cmsPerfSuiteHarvest.get_modules_sequences_relationships ( )

Definition at line 108 of file cmsPerfSuiteHarvest.py.

References ModuleToSequenceAssign.assignModulesToSeqs(), and join().

Referenced by exportSequences().

110  (sequenceWithModules, sequenceWithModulesString) =ModuleToSequenceAssign.assignModulesToSeqs()
111  return [{"name": seq, "modules": ",".join(modules)} for (seq, modules) in sequenceWithModulesString.items()]
112 
static std::string join(char **cmd)
Definition: RemoteFile.cc:18
def cmsPerfSuiteHarvest.get_params (   argv)
Returns the version of CMSSW to be used which it is taken from:
* command line parameter or 
* environment variable 
in case of error returns None

And also the directory to put the xml files to: if none --> returns ""
try to get the version for command line argument 

Definition at line 45 of file cmsPerfSuiteHarvest.py.

Referenced by exportSequences().

45 
46 def get_params(argv):
47  """
48  Returns the version of CMSSW to be used which it is taken from:
49  * command line parameter or
50  * environment variable
51  in case of error returns None
52 
53  And also the directory to put the xml files to: if none --> returns ""
54  """
55 
56  """ try to get the version for command line argument """
57  #print argv
58  #FIXME: this should be rewritten using getopt properly
59  version = None
60  #xml_dir = "cmsperfvm:/data/projects/conf/PerfSuiteDB/xml_dropbox" #Set this as default (assume change in write_xml to write to remote machines)
61  #NB write_xml is in Validation/Performance/python/cmssw_exportdb_xml.py
62  #Setting the default to write to a local directory:
63  xml_dir="PerfSuiteDBData"
64  try:
65  opts, args = getopt.getopt(argv[1:], "v:", ["version=", "outdir="])
66  except getopt.GetoptError, e:
67  print e
68  for opt, arg in opts:
69  if opt in ("-v", "--version"):
70  version = arg
71  if opt == "--outdir":
72  xml_dir = arg
73 
74  """ if not get it from environment string """
75  if not version:
76  try:
77  version = os.environ["CMSSW_VERSION"]
78  except KeyError:
79  pass
80 
81  return (version, xml_dir)
def cmsPerfSuiteHarvest.getIgSummary (   path)

Definition at line 416 of file cmsPerfSuiteHarvest.py.

References getSummaryInfo(), and split.

Referenced by process_igprof_dir().

417 def getIgSummary(path):
418  igresult = []
419  globbed = glob.glob(os.path.join(path, "*.sql3"))
420 
421  for f in globbed:
422  #print f
423  profileInfo = getSummaryInfo(f)
424  if not profileInfo:
425  continue
426  cumCounts, cumCalls = profileInfo
427  dump, architecture, release, rest = f.rsplit("/", 3)
428  candle, sequence, pileup, conditions, process, counterType, events = rest.split("___")
429  events = events.replace(".sql3", "")
430  igresult.append({"counter_type": counterType, "event": events, "cumcounts": cumCounts, "cumcalls": cumCalls})
431 
432  #fail-safe(nasty) fix for the diff (even if it gets fixed in the sqls, won't screw this up again...)
433  for ig in igresult:
434  if 'diff' in ig['event']:
435  eventLast,eventOne = ig['event'].split('_diff_')
436  for part in igresult:
437  if part['counter_type'] == ig['counter_type'] and part['event'] == eventOne:
438  cumcountsOne = part['cumcounts']
439  cumcallsOne = part['cumcalls']
440  if part['counter_type'] == ig['counter_type'] and part['event'] == eventLast:
441  cumcountsLast = part['cumcounts']
442  cumcallsLast = part['cumcalls']
443  ig['cumcounts'] = cumcountsLast - cumcountsOne
444  ig['cumcalls'] = cumcallsLast - cumcallsOne
445 
446  return igresult
double split
Definition: MVATrainer.cc:139
def cmsPerfSuiteHarvest.getMemcheckError (   path)

Definition at line 335 of file cmsPerfSuiteHarvest.py.

Referenced by process_memcheck_dir().

336 def getMemcheckError(path):
337  globbed = glob.glob(os.path.join(path, "*memcheck_vlgd.xml"))
338 
339  errnum = 0
340 
341  for f in globbed:
342  #print f
343  cmd = "grep '<error>' "+f+ " | wc -l "
344  p = os.popen(cmd, 'r')
345  errnum += int(p.readlines()[0])
346 
347  return errnum
348 
def cmsPerfSuiteHarvest.getSummaryInfo (   database)

Definition at line 447 of file cmsPerfSuiteHarvest.py.

References doQuery().

Referenced by getIgSummary().

448 def getSummaryInfo(database):
449  summary_query="""SELECT counter, total_count, total_freq, tick_period
450  FROM summary;"""
451  error, output = doQuery(summary_query, database)
452  if error or not output or output.count("\n") > 1:
453  return None
454  counter, total_count, total_freq, tick_period = output.split("@@@")
455  if counter == "PERF_TICKS":
456  return float(tick_period) * float(total_count), int(total_freq)
457  else:
458  return int(total_count), int(total_freq)
def cmsPerfSuiteHarvest.process_igprof_dir (   path,
  runinfo 
)

Definition at line 349 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.dict, exportIgProfReport(), getIgSummary(), FileNamesHelper.getJobID_fromIgProfLogName(), FileNamesHelper.read_SimulationCandles(), and python.rootplot.root2matplotlib.replace().

Referenced by searchIgProfFiles().

350 def process_igprof_dir(path, runinfo):
351  global release,event_content,conditions
352  """ if the release is not provided explicitly we take it from the Simulation candles file """
353  if (not release):
354  release_fromlogfile = read_SimulationCandles(path)
355  release = release_fromlogfile
356  print "release from simulation candles: %s" % release
357 
358  if (not release):
359  # TODO: raise exception!
360  raise Exception("the release was not found!")
361 
362  """ process the IgProf sql3 files """
363 
364  # get the file list
365  files = os.listdir(path)
366  igprof_files = [os.path.join(path, f) for f in files
367  if test_igprof_report_log.search(f)
368  and os.path.isfile(os.path.join(path, f)) ]
369 
370  if len(igprof_files) == 0: # No files...
371  print "No igprof files found!"
372  else:
373  for file in igprof_files:
374  jobID = getJobID_fromIgProfLogName(file)
375 
376  (candle, step, pileup_type, conditions, event_content) = jobID
377 
378  print "jobID: %s" % str(jobID)
379  jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID))
380 
381  print "Dictionary based jobID %s: " % str(jobID)
382 
383  igProfType = path.split("/")[-1].replace("TTbar_", "").replace("MinBias_", "").replace("PU_", "")
384 
385  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
386  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
387  if discard:
388  print " ====================== The job HAS BEEN DISCARDED =============== "
389  print " NOT ALL DATA WAS AVAILABLE "
390  print " JOB ID = %s " % str(jobID)
391  print " ======================= end ===================================== "
392  continue
393 
394  # add to the list to generate the readable filename :)
395  steps[step] = 1
396  candles[candle.upper()] = 1
397  if pileup_type=="":
398  pileups["NoPileUp"]=1
399  else:
400  pileups[pileup_type] = 1
401 
402  igs = getIgSummary(path)
403  #print igs
404 
405  igProfReport = {
406  "jobID": jobID,
407  "release": release,
408  "igprof_result": igs,
409  "metadata": {"testname": igProfType},
410  }
411 
412  # print igProfReport
413  # export to xml: actualy exporting gets suspended and put into runinfo
414  exportIgProfReport(path, igProfReport, igProfType, runinfo)
415 
#get IgProf summary information from the sql3 files
def cmsPerfSuiteHarvest.process_memcheck_dir (   path,
  runinfo 
)

Definition at line 273 of file cmsPerfSuiteHarvest.py.

References python.multivaluedict.dict, exportMemcheckReport(), FileNamesHelper.getJobID_fromMemcheckLogName(), getMemcheckError(), and FileNamesHelper.read_SimulationCandles().

Referenced by searchMemcheckFiles().

274 def process_memcheck_dir(path, runinfo):
275  global release,event_content,conditions
276  """ if the release is not provided explicitly we take it from the Simulation candles file """
277  if (not release):
278  release_fromlogfile = read_SimulationCandles(path)
279  release = release_fromlogfile
280  print "release from simulation candles: %s" % release
281 
282  if (not release):
283  # TODO: raise exception!
284  raise Exception("the release was not found!")
285 
286  """ process the vlgd files """
287 
288  # get the file list
289  files = os.listdir(path)
290  memcheck_files = [os.path.join(path, f) for f in files
291  if test_memcheck_report_log.search(f)
292  and os.path.isfile(os.path.join(path, f)) ]
293 
294  if len(memcheck_files) == 0: # Fast protection for old runs, where the _vlgd files is not created...
295  print "No _vlgd files found!"
296  else:
297  for file in memcheck_files:
298  jobID = getJobID_fromMemcheckLogName(os.path.join(path, file))
299 
300  (candle, step, pileup_type, conditions, event_content) = jobID
301 
302  print "jobID: %s" % str(jobID)
303  jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID))
304 
305  print "Dictionary based jobID %s: " % str(jobID)
306 
307  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
308  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
309  if discard:
310  print " ====================== The job HAS BEEN DISCARDED =============== "
311  print " NOT ALL DATA WAS AVAILABLE "
312  print " JOB ID = %s " % str(jobID)
313  print " ======================= end ===================================== "
314  continue
315 
316  # add to the list to generate the readable filename :)
317  steps[step] = 1
318  candles[candle.upper()] = 1
319  if pileup_type=="":
320  pileups["NoPileUp"]=1
321  else:
322  pileups[pileup_type] = 1
323 
324  memerror = getMemcheckError(path)
325 
326  MemcheckReport = {
327  "jobID": jobID,
328  "release": release,
329  "memcheck_errors": {"error_num": memerror},
330  "metadata": {"testname": "Memcheck"},
331  }
332 
333  # export to xml: actualy exporting gets suspended and put into runinfo
334  exportMemcheckReport(path, MemcheckReport, runinfo)
def getJobID_fromMemcheckLogName
def cmsPerfSuiteHarvest.process_timesize_dir (   path,
  runinfo 
)

Definition at line 191 of file cmsPerfSuiteHarvest.py.

References _eventContent_DEBUG(), python.multivaluedict.dict, exportTimeSizeJob(), parserEdmSize.getEdmReport(), FileNamesHelper.getJobID_fromTimeReportLogName(), FileNamesHelper.getRootFileSize(), parserTimingReport.loadTimeLog(), python.multivaluedict.map(), parserTimingReport.processModuleTimeLogData(), FileNamesHelper.read_ConfigurationFromSimulationCandles(), and FileNamesHelper.read_SimulationCandles().

Referenced by searchTimeSizeFiles().

192 def process_timesize_dir(path, runinfo):
193  global release,event_content,conditions
194  """ if the release is not provided explicitly we take it from the Simulation candles file """
195  if (not release):
196  release_fromlogfile = read_SimulationCandles(path)
197  release = release_fromlogfile
198  print "release from simulation candles: %s" % release
199 
200  if (not release):
201  # TODO: raise exception!
202  raise Exception("the release was not found!")
203 
204 
205  """ process the TimingReport log files """
206 
207  # get the file list
208  files = os.listdir(path)
209  timing_report_files = [os.path.join(path, f) for f in files
210  if test_timing_report_log.search(f)
211  and os.path.isfile(os.path.join(path, f)) ]
212 
213  # print timing_report_files
214  for timelog_f in timing_report_files:
215  print "\nProcessing file: %s" % timelog_f
216  print "------- "
217 
218  jobID = getJobID_fromTimeReportLogName(os.path.join(path, timelog_f))
219  print "jobID: %s" % str(jobID)
220  (candle, step, pileup_type, conditions, event_content) = jobID
221  jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID))
222  print "Dictionary based jobID %s: " % str(jobID)
223 
224  #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them
225  discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value])
226  if discard:
227  print " ====================== The job HAS BEEN DISCARDED =============== "
228  print " NOT ALL DATA WAS AVAILABLE "
229  print " JOB ID = %s " % str(jobID)
230  print " ======================= end ===================================== "
231  continue
232 
233  # TODO: automaticaly detect type of report file!!!
234  (mod_timelog, evt_timelog, rss_data, vsize_data) =loadTimeLog(timelog_f)
235 
236  mod_timelog= processModuleTimeLogData(mod_timelog, groupBy = "module_name")
237  print "Number of modules grouped by (module_label+module_name): %s" % len(mod_timelog)
238 
239  # add to the list to generate the readable filename :)
240  steps[step] = 1
241  candles[candle] = 1
242  if pileup_type=="":
243  pileups["NoPileUp"]=1
244  else:
245  pileups[pileup_type] = 1
246 
247  # root file size (number)
248  root_file_size = getRootFileSize(path = path, candle = candle, step = step.replace(':', '='))
249  # number of events
250  num_events = read_ConfigurationFromSimulationCandles(path = path, step = step, is_pileup = pileup_type)["num_events"]
251 
252  #EdmSize
253  edm_report = parserEdmSize.getEdmReport(path = path, candle = candle, step = step)
254  if edm_report != False:
255  try:
256  # add event content data
257  edm_report = map(assign_event_content_for_product, edm_report)
258  # for testing / imformation
259  _eventContent_DEBUG(edm_report)
260  except Exception, e:
261  print e
262 
263  timeSizeReport = {
264  "jobID":jobID,
265  "release": release,
266  "timelog_result": (mod_timelog, evt_timelog, rss_data, vsize_data),
267  "metadata": {"testname": "TimeSize", "root_file_size": root_file_size, "num_events": num_events},
268  "edmSize_result": edm_report
269  }
270 
271  # export to xml: actualy exporting gets suspended and put into runinfo
272  exportTimeSizeJob(path, timeSizeReport, runinfo)
def getJobID_fromTimeReportLogName
def processModuleTimeLogData
mod_data[&quot;stats&quot;] =calc_MinMaxAvgRMS(f_time = lambda x: x[&quot;time&quot;], f_evt_num = lambda x: x[&quot;event_num...
def read_ConfigurationFromSimulationCandles
def cmsPerfSuiteHarvest.searchIgProfFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 499 of file cmsPerfSuiteHarvest.py.

References process_igprof_dir().

500 def searchIgProfFiles(runinfo):
501  """ so far we will use the current dir to search in """
502  path = os.getcwd()
503  #print path
504  print 'full path =', os.path.abspath(path)
505 
506  files = os.listdir(path)
507 
508  test_IgProfDirs = re.compile("_IgProf(.*)$", re.IGNORECASE)
509  igprof_dirs = [os.path.join(path, f) for f in files if test_IgProfDirs.search(f) and os.path.isdir(os.path.join(path, f))]
510 
511  for igprof_dir in igprof_dirs:
512  print igprof_dir
513  process_igprof_dir(igprof_dir, runinfo)
def cmsPerfSuiteHarvest.searchMemcheckFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 483 of file cmsPerfSuiteHarvest.py.

References process_memcheck_dir().

484 def searchMemcheckFiles(runinfo):
485  """ so far we will use the current dir to search in """
486  path = os.getcwd()
487  #print path
488  print 'full path =', os.path.abspath(path)
489 
490  files = os.listdir(path)
491 
492  test_MemcheckDirs = re.compile("_Memcheck(.*)$", re.IGNORECASE)
493  memcheck_dirs = [os.path.join(path, f) for f in files if test_MemcheckDirs.search(f) and os.path.isdir(os.path.join(path, f))]
494 
495  for memcheck_dir in memcheck_dirs:
496  print memcheck_dir
497  process_memcheck_dir(memcheck_dir, runinfo)
498 
#IgProf
def cmsPerfSuiteHarvest.searchTimeSizeFiles (   runinfo)
so far we will use the current dir to search in 

Definition at line 467 of file cmsPerfSuiteHarvest.py.

References process_timesize_dir().

468 def searchTimeSizeFiles(runinfo):
469  """ so far we will use the current dir to search in """
470  path = os.getcwd()
471  #print path
472  print 'full path =', os.path.abspath(path)
473 
474  files = os.listdir(path)
475 
476  test_timeSizeDirs = re.compile("_TimeSize$", re.IGNORECASE)
477  timesize_dirs = [os.path.join(path, f) for f in files if test_timeSizeDirs.search(f) and os.path.isdir(os.path.join(path, f))]
478 
479  for timesize_dir in timesize_dirs:
480  # print timesize_dir
481  process_timesize_dir(timesize_dir, runinfo)
482 
#Memcheck
def cmsPerfSuiteHarvest.usage (   argv)

Definition at line 31 of file cmsPerfSuiteHarvest.py.

31 
32 def usage(argv):
33  script = argv[0]
34  return """
35  Usage: %(script)s [-v cmssw_version] [--version=cmssw_version]
36 
37  if the cmssw version is in the system's environment (after running cmsenv):
38  $ %(script)s
39 
40  otherwise one must specify the cmssw version:
41  $ %(script)s --version=CMSSW_3_2_0
42  $ %(script)s -v CMSSW_3_2_0
43 
44  """ % locals()

Variable Documentation

cmsPerfSuiteHarvest._TEST_RUN = False

Definition at line 17 of file cmsPerfSuiteHarvest.py.

dictionary cmsPerfSuiteHarvest.candles = {}

Definition at line 28 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.eventContentRules = parseEventContent.getTxtEventContentRules()

Definition at line 583 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.EventContents_OK = False

Definition at line 549 of file cmsPerfSuiteHarvest.py.

string cmsPerfSuiteHarvest.file_name = "%s___%s___%s___%s___%s___%s___%s.xml"

Definition at line 593 of file cmsPerfSuiteHarvest.py.

Referenced by HcalLutManager.create_lut_loader(), SiPixelHistoPlotter.createStaticPlot(), SiStripHistoPlotter.createStaticPlot(), DTTPGLutFile.open(), L1TriggerLutFile.open(), TEcnaRead.ReadAverageHighFrequencyNoise(), TEcnaRead.ReadAverageLowFrequencyNoise(), TEcnaRead.ReadAverageMeanCorrelationsBetweenSamples(), TEcnaRead.ReadAveragePedestals(), TEcnaRead.ReadAverageSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadAverageTotalNoise(), TEcnaRead.ReadCorrelationsBetweenSamples(), TEcnaRead.ReadCovariancesBetweenSamples(), TEcnaRead.ReadHighFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadHighFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadHighFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadHighFrequencyNoise(), TEcnaRead.ReadLowFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadLowFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadLowFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadLowFrequencyNoise(), TEcnaRead.ReadMeanCorrelationsBetweenSamples(), TEcnaRead.ReadNumberOfEventsForSamples(), TEcnaRead.ReadPedestals(), TEcnaRead.ReadRelevantCorrelationsBetweenSamples(), TEcnaRead.ReadRootFileHeader(), TEcnaRead.ReadSampleAdcValues(), TEcnaRead.ReadSampleAdcValuesSameFile(), TEcnaRead.ReadSampleMeans(), TEcnaRead.ReadSampleSigmas(), TEcnaRead.ReadSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadStinNumbers(), TEcnaRead.ReadTotalNoise(), and TEcnaRun.WriteRootFile().

tuple cmsPerfSuiteHarvest.now = datetime.datetime.now()

Definition at line 590 of file cmsPerfSuiteHarvest.py.

Referenced by edm::CountAndLimit.add(), lumi::RevisionDML.addEntry(), lumi::RevisionDML.addRevision(), lumi::RevisionDML.addRunToCurrentHFDataTag(), evf::EvFRecordInserter.analyze(), HcalLuttoDB.beginJob(), RawEventOutputModuleForBU< Consumer >.beginLuminosityBlock(), condbon.cdbon_write(), DTHVStatusHandler.dumpSnapshot(), DaqFakeReader.fillFED1023(), Fit.getFitQuality(), DTHVStatusHandler.getNewObjects(), HtrXmlPatternWriter.HtrXmlPatternWriter(), cond::persistency::IOVEditor.insert(), cond::Logger.logFailedOperationNow(), DQMNet.logme(), cond::Logger.logOperationNow(), Fit.multiplyMatrices(), operator<<(), FastTimer.pause(), FastTimerService.postStreamBeginLumi(), FastTimerService.postStreamBeginRun(), FastTimerService.postStreamEndLumi(), FastTimerService.postStreamEndRun(), Fit.PropagateErrors(), FastTimer.resume(), lumi::CMSRunSummaryDummy2DB.retrieveData(), DQMNet.run(), ALIUtils.set_time_now(), edm::TimeoutPoolOutputModule.shouldWeCloseFile(), FastTimer.start(), edm::service::ConcurrentModuleTimer.start(), FastTimer.stop(), edm::service::ConcurrentModuleTimer.stop(), and FastTimer.untilNow().

tuple cmsPerfSuiteHarvest.p = parserPerfsuiteMetadata(os.getcwd())

Definition at line 543 of file cmsPerfSuiteHarvest.py.

dictionary cmsPerfSuiteHarvest.pileups = {}

Definition at line 29 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.release = None

Definition at line 26 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.run_info = p.parseAll()

Definition at line 544 of file cmsPerfSuiteHarvest.py.

cmsPerfSuiteHarvest.Sequences_OK = False

Definition at line 548 of file cmsPerfSuiteHarvest.py.

dictionary cmsPerfSuiteHarvest.steps = {}

Definition at line 27 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.test_igprof_report_log = re.compile("^(.*)(IgProfMem|IgProfPerf)\.gz", re.IGNORECASE)

Definition at line 21 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.test_memcheck_report_log = re.compile("^(.*)memcheck_vlgd.xml", re.IGNORECASE)

Definition at line 22 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.test_timing_report_log = re.compile("TimingReport.log$", re.IGNORECASE)

Definition at line 20 of file cmsPerfSuiteHarvest.py.

tuple cmsPerfSuiteHarvest.xmldoc = minidom.Document()

Definition at line 25 of file cmsPerfSuiteHarvest.py.