def cmsPerfSuiteHarvest::_eventContent_DEBUG | ( | edm_report | ) | [private] |
Definition at line 82 of file cmsPerfSuiteHarvest.py.
00083 : 00084 # for testing / information 00085 EC_count = {} 00086 if not _TEST_RUN: 00087 # count the products in event-content's 00088 for prod in edm_report: 00089 ecs = parseEventContent.List_ECs_forProduct(prod) 00090 for ec in ecs: 00091 if not EC_count.has_key(ec): 00092 EC_count[ec] = [] 00093 EC_count[ec].append(prod) 00094 #print out the statistics 00095 for (ec, prods) in EC_count.items(): 00096 print "==== %s EVENT CONTENT: have %d items, the listing is: ===" % (ec, len(prods)) 00097 # list of products 00098 print "\n *".join(["%(cpp_type)s_%(module_name)s_%(module_label)s" % prod for prod in prods]) 00099
def cmsPerfSuiteHarvest::assign_event_content_for_product | ( | product | ) |
returns modified product by adding the event content relationship
Definition at line 100 of file cmsPerfSuiteHarvest.py.
def cmsPerfSuiteHarvest::doQuery | ( | query, | |
database | |||
) |
Definition at line 459 of file cmsPerfSuiteHarvest.py.
def cmsPerfSuiteHarvest::exportIgProfReport | ( | path, | |
igProfReport, | |||
igProfType, | |||
runinfo | |||
) |
Definition at line 113 of file cmsPerfSuiteHarvest.py.
00114 : 00115 jobID = igProfReport["jobID"] 00116 #print jobID 00117 candleLong = os.path.split(path)[1].replace("_IgProf_Perf", "").replace("_IgProf_Mem", "").replace("_PU", "") 00118 found = False 00119 #print igProfType 00120 if runinfo['TestResults'].has_key(igProfType): 00121 for result in runinfo['TestResults'][igProfType]: 00122 if candleLong == result["candle"] and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']: 00123 jobID["candle"] = jobID["candle"].upper() 00124 if not result.has_key("jobs"): 00125 result['jobs'] = [] 00126 result['jobs'].append(igProfReport) 00127 found = True 00128 break 00129 00130 if not found: 00131 print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== " 00132 print "JOB ID: %s " % str(jobID) 00133 print " ====================== " 00134 runinfo['unrecognized_jobs'].append(igProfReport) 00135 #export_xml(xml_doc = xmldoc, **igProfReport) 00136
def cmsPerfSuiteHarvest::exportMemcheckReport | ( | path, | |
MemcheckReport, | |||
runinfo | |||
) |
Definition at line 164 of file cmsPerfSuiteHarvest.py.
00165 : 00166 candleLong = os.path.split(path)[1].replace("_Memcheck", "").replace("_PU", "") 00167 jobID = MemcheckReport["jobID"] 00168 00169 #search for a run Test to which could belong our JOB 00170 found = False 00171 if runinfo['TestResults'].has_key('Memcheck'): 00172 for result in runinfo['TestResults']['Memcheck']: 00173 #print result 00174 #print jobID 00175 """ If this is the testResult which fits Memcheck job """ 00176 #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one 00177 #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR) 00178 if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']: 00179 #print result 00180 if not result.has_key("jobs"): 00181 result['jobs'] = [] 00182 result['jobs'].append(MemcheckReport) 00183 found = True 00184 break 00185 00186 if not found: 00187 print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== " 00188 print "JOB ID: %s " % str(jobID) 00189 print " ====================== " 00190 runinfo['unrecognized_jobs'].append(MemcheckReport)
def cmsPerfSuiteHarvest::exportSequences | ( | ) |
Exports the sequences to XML Doc
Definition at line 514 of file cmsPerfSuiteHarvest.py.
00515 : 00516 """ Exports the sequences to XML Doc """ 00517 try: 00518 env_cmssw_version = os.environ["CMSSW_VERSION"] 00519 except KeyError: 00520 print "<<<<< ====== Error: cannot get CMSSW version [just integrity check for sequences]. \ 00521 Is the CMSSW environment initialized? (use cmsenv) ==== >>>>" 00522 env_cmssw_version = None 00523 00524 print " ==== exporting the sequences. loading files for currently loaded CMSSW version: %s, while the CMSSW we are currently harversting is %s ===" %(env_cmssw_version, release) 00525 xml_export_Sequences(xml_doc = xmldoc, sequences = get_modules_sequences_relationships(), release=release) 00526 00527
def cmsPerfSuiteHarvest::exportTimeSizeJob | ( | path, | |
timeSizeReport, | |||
runinfo | |||
) |
Definition at line 137 of file cmsPerfSuiteHarvest.py.
00138 : 00139 candleLong = os.path.split(path)[1].replace("_TimeSize", "").replace("_PU", "") 00140 jobID = timeSizeReport["jobID"] 00141 00142 #search for a run Test to which could belong our JOB 00143 found = False 00144 if runinfo['TestResults'].has_key('TimeSize'): 00145 for result in runinfo['TestResults']['TimeSize']: 00146 #print result 00147 """ If this is the testResult which fits TimeSize job """ 00148 #TODO: we do not check teh step when assigning because of the different names, check if this is really OK. make a decission which step name to use later, long or short one 00149 #and jobID["step"] in result['steps'].split(parserPerfsuiteMetadata._LINE_SEPARATOR) 00150 if result['candle'] == candleLong and jobID["pileup_type"] == result['pileup_type'] and jobID["conditions"] == result['conditions'] and jobID["event_content"] == result['event_content']: 00151 #print result 00152 if not result.has_key("jobs"): 00153 result['jobs'] = [] 00154 result['jobs'].append(timeSizeReport) 00155 found = True 00156 break 00157 00158 if not found: 00159 print "============ (almost) ERROR: NOT FOUND THE ENTRY in cmsPerfSuite.log, exporting as separate entry ======== " 00160 print "JOB ID: %s " % str(jobID) 00161 print " ====================== " 00162 runinfo['unrecognized_jobs'].append(timeSizeReport) 00163 #export_xml(xml_doc = xmldoc, **timeSizeReport)
def cmsPerfSuiteHarvest::get_modules_sequences_relationships | ( | ) |
Definition at line 108 of file cmsPerfSuiteHarvest.py.
def cmsPerfSuiteHarvest::get_params | ( | argv | ) |
Returns the version of CMSSW to be used which it is taken from: * command line parameter or * environment variable in case of error returns None And also the directory to put the xml files to: if none --> returns ""
try to get the version for command line argument
Definition at line 45 of file cmsPerfSuiteHarvest.py.
00046 : 00047 """ 00048 Returns the version of CMSSW to be used which it is taken from: 00049 * command line parameter or 00050 * environment variable 00051 in case of error returns None 00052 00053 And also the directory to put the xml files to: if none --> returns "" 00054 """ 00055 00056 """ try to get the version for command line argument """ 00057 #print argv 00058 #FIXME: this should be rewritten using getopt properly 00059 version = None 00060 #xml_dir = "cmsperfvm:/data/projects/conf/PerfSuiteDB/xml_dropbox" #Set this as default (assume change in write_xml to write to remote machines) 00061 #NB write_xml is in Validation/Performance/python/cmssw_exportdb_xml.py 00062 #Setting the default to write to a local directory: 00063 xml_dir="PerfSuiteDBData" 00064 try: 00065 opts, args = getopt.getopt(argv[1:], "v:", ["version=", "outdir="]) 00066 except getopt.GetoptError, e: 00067 print e 00068 for opt, arg in opts: 00069 if opt in ("-v", "--version"): 00070 version = arg 00071 if opt == "--outdir": 00072 xml_dir = arg 00073 00074 """ if not get it from environment string """ 00075 if not version: 00076 try: 00077 version = os.environ["CMSSW_VERSION"] 00078 except KeyError: 00079 pass 00080 00081 return (version, xml_dir)
def cmsPerfSuiteHarvest::getIgSummary | ( | path | ) |
Definition at line 416 of file cmsPerfSuiteHarvest.py.
00417 : 00418 igresult = [] 00419 globbed = glob.glob(os.path.join(path, "*.sql3")) 00420 00421 for f in globbed: 00422 #print f 00423 profileInfo = getSummaryInfo(f) 00424 if not profileInfo: 00425 continue 00426 cumCounts, cumCalls = profileInfo 00427 dump, architecture, release, rest = f.rsplit("/", 3) 00428 candle, sequence, pileup, conditions, process, counterType, events = rest.split("___") 00429 events = events.replace(".sql3", "") 00430 igresult.append({"counter_type": counterType, "event": events, "cumcounts": cumCounts, "cumcalls": cumCalls}) 00431 00432 #fail-safe(nasty) fix for the diff (even if it gets fixed in the sqls, won't screw this up again...) 00433 for ig in igresult: 00434 if 'diff' in ig['event']: 00435 eventLast,eventOne = ig['event'].split('_diff_') 00436 for part in igresult: 00437 if part['counter_type'] == ig['counter_type'] and part['event'] == eventOne: 00438 cumcountsOne = part['cumcounts'] 00439 cumcallsOne = part['cumcalls'] 00440 if part['counter_type'] == ig['counter_type'] and part['event'] == eventLast: 00441 cumcountsLast = part['cumcounts'] 00442 cumcallsLast = part['cumcalls'] 00443 ig['cumcounts'] = cumcountsLast - cumcountsOne 00444 ig['cumcalls'] = cumcallsLast - cumcallsOne 00445 00446 return igresult
def cmsPerfSuiteHarvest::getMemcheckError | ( | path | ) |
Definition at line 335 of file cmsPerfSuiteHarvest.py.
def cmsPerfSuiteHarvest::getSummaryInfo | ( | database | ) |
Definition at line 447 of file cmsPerfSuiteHarvest.py.
00448 : 00449 summary_query="""SELECT counter, total_count, total_freq, tick_period 00450 FROM summary;""" 00451 error, output = doQuery(summary_query, database) 00452 if error or not output or output.count("\n") > 1: 00453 return None 00454 counter, total_count, total_freq, tick_period = output.split("@@@") 00455 if counter == "PERF_TICKS": 00456 return float(tick_period) * float(total_count), int(total_freq) 00457 else: 00458 return int(total_count), int(total_freq)
def cmsPerfSuiteHarvest::process_igprof_dir | ( | path, | |
runinfo | |||
) |
Definition at line 349 of file cmsPerfSuiteHarvest.py.
00350 : 00351 global release,event_content,conditions 00352 """ if the release is not provided explicitly we take it from the Simulation candles file """ 00353 if (not release): 00354 release_fromlogfile = read_SimulationCandles(path) 00355 release = release_fromlogfile 00356 print "release from simulation candles: %s" % release 00357 00358 if (not release): 00359 # TODO: raise exception! 00360 raise Exception("the release was not found!") 00361 00362 """ process the IgProf sql3 files """ 00363 00364 # get the file list 00365 files = os.listdir(path) 00366 igprof_files = [os.path.join(path, f) for f in files 00367 if test_igprof_report_log.search(f) 00368 and os.path.isfile(os.path.join(path, f)) ] 00369 00370 if len(igprof_files) == 0: # No files... 00371 print "No igprof files found!" 00372 else: 00373 for file in igprof_files: 00374 jobID = getJobID_fromIgProfLogName(file) 00375 00376 (candle, step, pileup_type, conditions, event_content) = jobID 00377 00378 print "jobID: %s" % str(jobID) 00379 jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)) 00380 00381 print "Dictionary based jobID %s: " % str(jobID) 00382 00383 igProfType = path.split("/")[-1].replace("TTbar_", "").replace("MinBias_", "").replace("PU_", "") 00384 00385 #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them 00386 discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value]) 00387 if discard: 00388 print " ====================== The job HAS BEEN DISCARDED =============== " 00389 print " NOT ALL DATA WAS AVAILABLE " 00390 print " JOB ID = %s " % str(jobID) 00391 print " ======================= end ===================================== " 00392 continue 00393 00394 # add to the list to generate the readable filename :) 00395 steps[step] = 1 00396 candles[candle.upper()] = 1 00397 if pileup_type=="": 00398 pileups["NoPileUp"]=1 00399 else: 00400 pileups[pileup_type] = 1 00401 00402 igs = getIgSummary(path) 00403 #print igs 00404 00405 igProfReport = { 00406 "jobID": jobID, 00407 "release": release, 00408 "igprof_result": igs, 00409 "metadata": {"testname": igProfType}, 00410 } 00411 00412 # print igProfReport 00413 # export to xml: actualy exporting gets suspended and put into runinfo 00414 exportIgProfReport(path, igProfReport, igProfType, runinfo) 00415 #get IgProf summary information from the sql3 files
def cmsPerfSuiteHarvest::process_memcheck_dir | ( | path, | |
runinfo | |||
) |
Definition at line 273 of file cmsPerfSuiteHarvest.py.
00274 : 00275 global release,event_content,conditions 00276 """ if the release is not provided explicitly we take it from the Simulation candles file """ 00277 if (not release): 00278 release_fromlogfile = read_SimulationCandles(path) 00279 release = release_fromlogfile 00280 print "release from simulation candles: %s" % release 00281 00282 if (not release): 00283 # TODO: raise exception! 00284 raise Exception("the release was not found!") 00285 00286 """ process the vlgd files """ 00287 00288 # get the file list 00289 files = os.listdir(path) 00290 memcheck_files = [os.path.join(path, f) for f in files 00291 if test_memcheck_report_log.search(f) 00292 and os.path.isfile(os.path.join(path, f)) ] 00293 00294 if len(memcheck_files) == 0: # Fast protection for old runs, where the _vlgd files is not created... 00295 print "No _vlgd files found!" 00296 else: 00297 for file in memcheck_files: 00298 jobID = getJobID_fromMemcheckLogName(os.path.join(path, file)) 00299 00300 (candle, step, pileup_type, conditions, event_content) = jobID 00301 00302 print "jobID: %s" % str(jobID) 00303 jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)) 00304 00305 print "Dictionary based jobID %s: " % str(jobID) 00306 00307 #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them 00308 discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value]) 00309 if discard: 00310 print " ====================== The job HAS BEEN DISCARDED =============== " 00311 print " NOT ALL DATA WAS AVAILABLE " 00312 print " JOB ID = %s " % str(jobID) 00313 print " ======================= end ===================================== " 00314 continue 00315 00316 # add to the list to generate the readable filename :) 00317 steps[step] = 1 00318 candles[candle.upper()] = 1 00319 if pileup_type=="": 00320 pileups["NoPileUp"]=1 00321 else: 00322 pileups[pileup_type] = 1 00323 00324 memerror = getMemcheckError(path) 00325 00326 MemcheckReport = { 00327 "jobID": jobID, 00328 "release": release, 00329 "memcheck_errors": {"error_num": memerror}, 00330 "metadata": {"testname": "Memcheck"}, 00331 } 00332 00333 # export to xml: actualy exporting gets suspended and put into runinfo 00334 exportMemcheckReport(path, MemcheckReport, runinfo)
def cmsPerfSuiteHarvest::process_timesize_dir | ( | path, | |
runinfo | |||
) |
Definition at line 191 of file cmsPerfSuiteHarvest.py.
00192 : 00193 global release,event_content,conditions 00194 """ if the release is not provided explicitly we take it from the Simulation candles file """ 00195 if (not release): 00196 release_fromlogfile = read_SimulationCandles(path) 00197 release = release_fromlogfile 00198 print "release from simulation candles: %s" % release 00199 00200 if (not release): 00201 # TODO: raise exception! 00202 raise Exception("the release was not found!") 00203 00204 00205 """ process the TimingReport log files """ 00206 00207 # get the file list 00208 files = os.listdir(path) 00209 timing_report_files = [os.path.join(path, f) for f in files 00210 if test_timing_report_log.search(f) 00211 and os.path.isfile(os.path.join(path, f)) ] 00212 00213 # print timing_report_files 00214 for timelog_f in timing_report_files: 00215 print "\nProcessing file: %s" % timelog_f 00216 print "------- " 00217 00218 jobID = getJobID_fromTimeReportLogName(os.path.join(path, timelog_f)) 00219 print "jobID: %s" % str(jobID) 00220 (candle, step, pileup_type, conditions, event_content) = jobID 00221 jobID = dict(zip(("candle", "step", "pileup_type", "conditions", "event_content"), jobID)) 00222 print "Dictionary based jobID %s: " % str(jobID) 00223 00224 #if any of jobID fields except (isPILEUP) is empty we discard the job as all those are the jobID keys and we must have them 00225 discard = len([key for key, value in jobID.items() if key != "pileup_type" and not value]) 00226 if discard: 00227 print " ====================== The job HAS BEEN DISCARDED =============== " 00228 print " NOT ALL DATA WAS AVAILABLE " 00229 print " JOB ID = %s " % str(jobID) 00230 print " ======================= end ===================================== " 00231 continue 00232 00233 # TODO: automaticaly detect type of report file!!! 00234 (mod_timelog, evt_timelog, rss_data, vsize_data) =loadTimeLog(timelog_f) 00235 00236 mod_timelog= processModuleTimeLogData(mod_timelog, groupBy = "module_name") 00237 print "Number of modules grouped by (module_label+module_name): %s" % len(mod_timelog) 00238 00239 # add to the list to generate the readable filename :) 00240 steps[step] = 1 00241 candles[candle] = 1 00242 if pileup_type=="": 00243 pileups["NoPileUp"]=1 00244 else: 00245 pileups[pileup_type] = 1 00246 00247 # root file size (number) 00248 root_file_size = getRootFileSize(path = path, candle = candle, step = step.replace(':', '=')) 00249 # number of events 00250 num_events = read_ConfigurationFromSimulationCandles(path = path, step = step, is_pileup = pileup_type)["num_events"] 00251 00252 #EdmSize 00253 edm_report = parserEdmSize.getEdmReport(path = path, candle = candle, step = step) 00254 if edm_report != False: 00255 try: 00256 # add event content data 00257 edm_report = map(assign_event_content_for_product, edm_report) 00258 # for testing / imformation 00259 _eventContent_DEBUG(edm_report) 00260 except Exception, e: 00261 print e 00262 00263 timeSizeReport = { 00264 "jobID":jobID, 00265 "release": release, 00266 "timelog_result": (mod_timelog, evt_timelog, rss_data, vsize_data), 00267 "metadata": {"testname": "TimeSize", "root_file_size": root_file_size, "num_events": num_events}, 00268 "edmSize_result": edm_report 00269 } 00270 00271 # export to xml: actualy exporting gets suspended and put into runinfo 00272 exportTimeSizeJob(path, timeSizeReport, runinfo)
def cmsPerfSuiteHarvest::searchIgProfFiles | ( | runinfo | ) |
so far we will use the current dir to search in
Definition at line 499 of file cmsPerfSuiteHarvest.py.
00500 : 00501 """ so far we will use the current dir to search in """ 00502 path = os.getcwd() 00503 #print path 00504 print 'full path =', os.path.abspath(path) 00505 00506 files = os.listdir(path) 00507 00508 test_IgProfDirs = re.compile("_IgProf(.*)$", re.IGNORECASE) 00509 igprof_dirs = [os.path.join(path, f) for f in files if test_IgProfDirs.search(f) and os.path.isdir(os.path.join(path, f))] 00510 00511 for igprof_dir in igprof_dirs: 00512 print igprof_dir 00513 process_igprof_dir(igprof_dir, runinfo)
def cmsPerfSuiteHarvest::searchMemcheckFiles | ( | runinfo | ) |
so far we will use the current dir to search in
Definition at line 483 of file cmsPerfSuiteHarvest.py.
00484 : 00485 """ so far we will use the current dir to search in """ 00486 path = os.getcwd() 00487 #print path 00488 print 'full path =', os.path.abspath(path) 00489 00490 files = os.listdir(path) 00491 00492 test_MemcheckDirs = re.compile("_Memcheck(.*)$", re.IGNORECASE) 00493 memcheck_dirs = [os.path.join(path, f) for f in files if test_MemcheckDirs.search(f) and os.path.isdir(os.path.join(path, f))] 00494 00495 for memcheck_dir in memcheck_dirs: 00496 print memcheck_dir 00497 process_memcheck_dir(memcheck_dir, runinfo) 00498 #IgProf
def cmsPerfSuiteHarvest::searchTimeSizeFiles | ( | runinfo | ) |
so far we will use the current dir to search in
Definition at line 467 of file cmsPerfSuiteHarvest.py.
00468 : 00469 """ so far we will use the current dir to search in """ 00470 path = os.getcwd() 00471 #print path 00472 print 'full path =', os.path.abspath(path) 00473 00474 files = os.listdir(path) 00475 00476 test_timeSizeDirs = re.compile("_TimeSize$", re.IGNORECASE) 00477 timesize_dirs = [os.path.join(path, f) for f in files if test_timeSizeDirs.search(f) and os.path.isdir(os.path.join(path, f))] 00478 00479 for timesize_dir in timesize_dirs: 00480 # print timesize_dir 00481 process_timesize_dir(timesize_dir, runinfo) 00482 #Memcheck
def cmsPerfSuiteHarvest::usage | ( | argv | ) |
Definition at line 31 of file cmsPerfSuiteHarvest.py.
00032 : 00033 script = argv[0] 00034 return """ 00035 Usage: %(script)s [-v cmssw_version] [--version=cmssw_version] 00036 00037 if the cmssw version is in the system's environment (after running cmsenv): 00038 $ %(script)s 00039 00040 otherwise one must specify the cmssw version: 00041 $ %(script)s --version=CMSSW_3_2_0 00042 $ %(script)s -v CMSSW_3_2_0 00043 00044 """ % locals()
cmsPerfSuiteHarvest::_TEST_RUN = False |
Definition at line 17 of file cmsPerfSuiteHarvest.py.
dictionary cmsPerfSuiteHarvest::candles = {} |
Definition at line 28 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::eventContentRules = parseEventContent.getTxtEventContentRules() |
Definition at line 583 of file cmsPerfSuiteHarvest.py.
Definition at line 549 of file cmsPerfSuiteHarvest.py.
string cmsPerfSuiteHarvest::file_name = "%s___%s___%s___%s___%s___%s___%s.xml" |
Definition at line 593 of file cmsPerfSuiteHarvest.py.
Referenced by HcalLutManager::create_lut_loader(), SiStripHistoPlotter::createStaticPlot(), DTTPGLutFile::open(), L1TriggerLutFile::open(), TEcnaRead::ReadAverageHighFrequencyNoise(), TEcnaRead::ReadAverageLowFrequencyNoise(), TEcnaRead::ReadAverageMeanCorrelationsBetweenSamples(), TEcnaRead::ReadAveragePedestals(), TEcnaRead::ReadAverageSigmaOfCorrelationsBetweenSamples(), TEcnaRead::ReadAverageTotalNoise(), TEcnaRead::ReadCorrelationsBetweenSamples(), TEcnaRead::ReadCovariancesBetweenSamples(), TEcnaRead::ReadHighFrequencyCorrelationsBetweenChannels(), TEcnaRead::ReadHighFrequencyCovariancesBetweenChannels(), TEcnaRead::ReadHighFrequencyMeanCorrelationsBetweenStins(), TEcnaRead::ReadHighFrequencyNoise(), TEcnaRead::ReadLowFrequencyCorrelationsBetweenChannels(), TEcnaRead::ReadLowFrequencyCovariancesBetweenChannels(), TEcnaRead::ReadLowFrequencyMeanCorrelationsBetweenStins(), TEcnaRead::ReadLowFrequencyNoise(), TEcnaRead::ReadMeanCorrelationsBetweenSamples(), TEcnaRead::ReadNumberOfEventsForSamples(), TEcnaRead::ReadPedestals(), TEcnaRead::ReadRelevantCorrelationsBetweenSamples(), TEcnaRead::ReadRootFileHeader(), TEcnaRead::ReadSampleAdcValues(), TEcnaRead::ReadSampleAdcValuesSameFile(), TEcnaRead::ReadSampleMeans(), TEcnaRead::ReadSampleSigmas(), TEcnaRead::ReadSigmaOfCorrelationsBetweenSamples(), TEcnaRead::ReadStinNumbers(), TEcnaRead::ReadTotalNoise(), and TEcnaRun::WriteRootFile().
tuple cmsPerfSuiteHarvest::now = datetime.datetime.now() |
Definition at line 590 of file cmsPerfSuiteHarvest.py.
Referenced by edm::CountAndLimit::add(), lumi::RevisionDML::addEntry(), lumi::RevisionDML::addRevision(), lumi::RevisionDML::addRunToCurrentHFDataTag(), evf::EvFRecordInserter::analyze(), HcalLuttoDB::beginJob(), evf::FUResourceQueue::buildResource(), evf::FUResourceTable::buildResource(), stor::StatisticsReporter::calculateStatistics(), stor::MonitorCollection::calculateStatistics(), smproxy::StatisticsReporter::calculateStatistics(), condbon::cdbon_write(), stor::DiskWriter::checkForFileTimeOuts(), smproxy::DataManager::checkForStaleConsumers(), stor::EventDistributor::checkForStaleConsumers(), evf::iDie::detailsTable(), evf::rb_statemachine::Stopping::do_stateAction(), DTHVStatusHandler::dumpSnapshot(), Fit::getFitQuality(), DTHVStatusHandler::getNewObjects(), stor::QueueCollection< T >::getQueue(), HtrXmlPatternWriter::HtrXmlPatternWriter(), cond::Logger::logFailedOperationNow(), DQMNet::logme(), cond::Logger::logOperationNow(), Fit::multiplyMatrices(), operator<<(), evf::iDie::postEntry(), Fit::PropagateErrors(), evf::Vulture::prowling(), stor::StatisticsReporter::reset(), smproxy::StatisticsReporter::reset(), lumi::CMSRunSummaryDummy2DB::retrieveData(), DQMNet::run(), ALIUtils::set_time_now(), edm::TimeoutPoolOutputModule::shouldWeCloseFile(), cond::UpdateStamp::stamp(), evf::Vulture::startProwling(), stor::EventDistributor::tagCompleteEventForQueues(), and evf::FUResourceBroker::waitForStateChange().
tuple cmsPerfSuiteHarvest::p = parserPerfsuiteMetadata(os.getcwd()) |
Definition at line 543 of file cmsPerfSuiteHarvest.py.
dictionary cmsPerfSuiteHarvest::pileups = {} |
Definition at line 29 of file cmsPerfSuiteHarvest.py.
cmsPerfSuiteHarvest::release = None |
Definition at line 26 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::run_info = p.parseAll() |
Definition at line 544 of file cmsPerfSuiteHarvest.py.
Definition at line 548 of file cmsPerfSuiteHarvest.py.
dictionary cmsPerfSuiteHarvest::steps = {} |
Definition at line 27 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::test_igprof_report_log = re.compile("^(.*)(IgProfMem|IgProfPerf)\.gz", re.IGNORECASE) |
Definition at line 21 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::test_memcheck_report_log = re.compile("^(.*)memcheck_vlgd.xml", re.IGNORECASE) |
Definition at line 22 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::test_timing_report_log = re.compile("TimingReport.log$", re.IGNORECASE) |
Definition at line 20 of file cmsPerfSuiteHarvest.py.
tuple cmsPerfSuiteHarvest::xmldoc = minidom.Document() |
Definition at line 25 of file cmsPerfSuiteHarvest.py.