Classes | |
class | CMSHarvester |
CMSHarvester class. More... | |
class | CMSHarvesterHelpFormatter |
Helper class: CMSHarvesterHelpFormatter. More... | |
class | DBSXMLHandler |
Helper class: DBSXMLHandler. More... | |
class | Error |
Helper class: Error exception. More... | |
class | Usage |
Helper class: Usage exception. More... | |
Variables | |
string | __author__ = "Jeroen Hegeman (jeroen.hegeman@cern.ch)," |
string | __version__ = "3.8.2p1" |
File : cmsHarvest.py Authors : Jeroen Hegeman (jeroe) Niklas Pietsch ( n.he geman @cer n.chnikla) Franseco Costanza ( s.pi etsch @des y.defranc) Last change: 20100308. esco .cost anza @desy .deMore... | |
string | action = "callback" |
list | all_file_names = files_info[run_number] |
list | all_t1 |
caf_access | |
callback = self.option_handler_input_Jsonrunfile, | |
castor_base_dir | |
list | castor_dir = self.datasets_information[dataset_name] |
CRABMore... | |
tuple | castor_path_common = self.create_castor_path_name_common(dataset_name) |
DEBUG DEBUG DEBUGThis is probably only useful to make sure we don't muckthings up, right?Figure out across how many sites this sample has been spread.More... | |
tuple | castor_paths |
castor_prefix = self.castor_prefix | |
string | cmd = "rfstat %s" |
self.logger.debug("Path is now `%s'" % \ path) More... | |
list | cmssw_version = self.datasets_information[dataset_name] |
list | complete_sites |
site_names_ref = set(files_info[run_number].values()[0][1]) for site_names_tmp in files_info[run_number].values()[1:]: if set(site_names_tmp[1]) != site_names_ref: mirrored = False break More... | |
tuple | config_builder = ConfigBuilder(config_options, with_input=True) |
config_contents = config_builder.pythonCfgCode | |
In case this file is the second step (the real harvestingstep) of the two-step harvesting we have to tell it to useour local files.More... | |
tuple | config_file_name = self.create_me_summary_config_file_name(dataset_name) |
Only add the alarming piece to the file name if this isa spread-out dataset.More... | |
list | connect_name = self.frontier_connection_name["globaltag"] |
dictionary | country_codes |
string | crab_config = "\n" |
CRABMore... | |
crab_submission | |
list | customisations = [""] |
tuple | dataset_name_escaped = self.escape_dataset_name(dataset_name) |
tuple | dataset_names = self.datasets_to_use.keys() |
dataset_names_after_checks = dataset_names_after_checks_tmp | |
tuple | dataset_names_after_checks_tmp = copy.deepcopy(dataset_names_after_checks) |
datasets_information | |
datasets_to_ignore | |
datasets_to_use | |
list | datatype = self.datasets_information[dataset_name] |
dbs_api | |
tuple | empty_runs = dict(tmp) |
tuple | es_prefer_snippet = self.create_es_prefer_snippet(dataset_name) |
int | exit_code = 1 |
list | file_name = handler.results["file.name"] |
list | files_at_site |
dictionary | files_info = {} |
list | files_without_sites |
list | globaltag = self.datasets_information[dataset_name] |
harvesting_info | |
harvesting_mode | |
harvesting_type | |
string | help = "Jsonfile containing dictionary of run/lumisections pairs. " |
string | index = "site_%02d" |
Jsonfilename | |
Jsonlumi | |
int | loop = 0 |
CMSSWMore... | |
string | marker = "\n" |
list | marker_lines = [] |
string | metavar = "JSONRUNFILE" |
mirrored = None | |
string | msg = "Could not create directory `%s'" |
class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": site_name = str(attrs["STORAGEELEMENT_SENAME"])
TODO TODO TODOUgly hack to get around cases like this:$ dbs search –query="find dataset, site, file.count where dataset=/RelValQCD_Pt_3000_3500/CMSSW_3_3_0_pre1-STARTUP31X_V4-v1/GEN-SIM-RECO"Using DBS instance at: http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServletProcessing ...More... | |
tuple | multicrab_block_name |
string | multicrab_config = "\n" |
list | multicrab_config_lines = [] |
tuple | nevents = int(handler.results["file.numevents"][index]) |
non_t1access | |
nr_max_sites | |
dictionary | num_events_catalog = {} |
tuple | num_events_dataset = sum(tmp) |
tuple | num_sites |
if self.datasets_information[dataset_name]["num_events"][run_number] != 0: pdb.set_trace() DEBUG DEBUG DEBUG end More... | |
int | number_max_sites = self.nr_max_sites+1 |
option_parser | |
output_file_name = self.\ | |
tuple | path = os.path.join(path, piece) |
else:
Piece not in the list, fine.More... | |
tuple | permissions = extract_permissions(output) |
list | permissions_new = [] |
string | permissions_target = "775" |
preferred_site | |
ref_hist_mappings_file_name | |
tuple | run_number = int(handler.results["run.number"][index]) |
list | runs = self.datasets_to_use[dataset_name] |
runs_to_ignore | |
runs_to_use | |
saveByLumiSection | |
tuple | se_name = choice(t1_sites) |
string | sep = "#" |
site_name = None | |
tuple | site_names = list(set([j for i in files_info[run_number].values() for j in i[1]])) |
list | sites = [self.preferred_site] |
list | sites_forbidden = [] |
list | sites_with_complete_copies = [] |
skip_this_path_piece = True | |
self.logger.debug("Checking CASTOR path piece `%s'" % \ piece) More... | |
list | t1_sites = [] |
list | tmp |
TODO TODO TODONeed to think about where this should go, butsomewhere we have to move over the fact that we wantto process all runs for each dataset that we'reconsidering.More... | |
tuple | traceback_string = traceback.format_exc() |
string | twiki_url = "https://twiki.cern.ch/twiki/bin/view/CMS/CmsHarvester" |
string | type = "string" |
tuple | use_es_prefer = (self.harvesting_type == "RelVal") |
use_refs = use_es_preferor\ | |
UserName = output | |
workflow_name = dataset_name | |
def cmsHarvester.build_dataset_ignore_list | ( | self | ) |
Build a list of datasets to ignore. NOTE: We should always have a list of datasets to process, but it may be that we don't have a list of datasets to ignore.
Definition at line 3444 of file cmsHarvester.py.
def cmsHarvester.build_dataset_list | ( | self, | |
input_method, | |||
input_name | |||
) |
def dbs_check_dataset_num_events(self, dataset_name): """Figure out the number of events in each run of this dataset.
This is a more efficient way of doing this than calling dbs_resolve_number_of_events for each run. # BUG BUG BUG
""" # DEBUG DEBUG DEBUG
assert not self.dbs_api is None
api = self.dbs_api dbs_query = "find run.number, file.name, file.numevents where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) try: files_info = {} class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": run_number = int(attrs["RUNS_RUNNUMBER"]) file_name = str(attrs["FILES_LOGICALFILENAME"]) nevents = int(attrs["FILES_NUMBEROFEVENTS"]) try: files_info[run_number][file_name] = nevents except KeyError: files_info[run_number] = {file_name: nevents} xml.sax.parseString(api_result, Handler()) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) num_events_catalog = {} for run_number in files_info.keys(): num_events_catalog[run_number] = sum(files_info[run_number].values()) # End of dbs_check_dataset_num_events. return num_events_catalog End of old version.
Build a list of all datasets to be processed.
Definition at line 3357 of file cmsHarvester.py.
References dbs_resolve_dataset_name(), and list().
def cmsHarvester.build_dataset_use_list | ( | self | ) |
Build a list of datasets to process.
Definition at line 3421 of file cmsHarvester.py.
def cmsHarvester.build_datasets_information | ( | self | ) |
Obtain all information on the datasets that we need to run. Use DBS to figure out all required information on our datasets, like the run numbers and the GlobalTag. All information is stored in the datasets_information member variable.
Definition at line 5322 of file cmsHarvester.py.
def cmsHarvester.build_runs_ignore_list | ( | self | ) |
Build a list of runs to ignore. NOTE: We should always have a list of runs to process, but it may be that we don't have a list of runs to ignore.
Definition at line 3542 of file cmsHarvester.py.
def cmsHarvester.build_runs_list | ( | self, | |
input_method, | |||
input_name | |||
) |
Definition at line 3470 of file cmsHarvester.py.
References list().
def cmsHarvester.build_runs_use_list | ( | self | ) |
Build a list of runs to process.
Definition at line 3521 of file cmsHarvester.py.
def cmsHarvester.check_cmssw | ( | self | ) |
def cmsHarvester.check_dataset_list | ( | self | ) |
Check list of dataset names for impossible ones. Two kinds of checks are done: - Checks for things that do not make sense. These lead to errors and skipped datasets. - Sanity checks. For these warnings are issued but the user is considered to be the authoritative expert. Checks performed: - The CMSSW version encoded in the dataset name should match self.cmssw_version. This is critical. - There should be some events in the dataset/run. This is critical in the sense that CRAB refuses to create jobs for zero events. And yes, this does happen in practice. E.g. the reprocessed CRAFT08 datasets contain runs with zero events. - A cursory check is performed to see if the harvesting type makes sense for the data type. This should prevent the user from inadvertently running RelVal for data. - It is not possible to run single-step harvesting jobs on samples that are not fully contained at a single site. - Each dataset/run has to be available at at least one site.
Definition at line 3795 of file cmsHarvester.py.
def cmsHarvester.check_dbs | ( | self | ) |
def cmsHarvester.check_globaltag | ( | self, | |
globaltag = None |
|||
) |
Check if globaltag exists. Check if globaltag exists as GlobalTag in the database given by self.frontier_connection_name['globaltag']. If globaltag is None, self.globaltag is used instead. If we're going to use reference histograms this method also checks for the existence of the required key in the GlobalTag.
Definition at line 4502 of file cmsHarvester.py.
def cmsHarvester.check_globaltag_contains_ref_hist_key | ( | self, | |
globaltag, | |||
connect_name | |||
) |
Check if globaltag contains the required RefHistos key.
Definition at line 4599 of file cmsHarvester.py.
def cmsHarvester.check_globaltag_exists | ( | self, | |
globaltag, | |||
connect_name | |||
) |
def cmsHarvester.check_input_status | ( | self | ) |
Check completeness and correctness of input information. Check that all required information has been specified and that, at least as far as can be easily checked, it makes sense. NOTE: This is also where any default values are applied.
Definition at line 2191 of file cmsHarvester.py.
References join().
def cmsHarvester.check_ref_hist_mappings | ( | self | ) |
Make sure all necessary reference histograms exist. Check that for each of the datasets to be processed a reference histogram is specified and that that histogram exists in the database. NOTE: There's a little complication here. Since this whole thing was designed to allow (in principle) harvesting of both data and MC datasets in one go, we need to be careful to check the availability fof reference mappings only for those datasets that need it.
Definition at line 5282 of file cmsHarvester.py.
def cmsHarvester.check_ref_hist_tag | ( | self, | |
tag_name | |||
) |
Check the existence of tag_name in database connect_name. Check if tag_name exists as a reference histogram tag in the database given by self.frontier_connection_name['refhists'].
Definition at line 4644 of file cmsHarvester.py.
References join().
def cmsHarvester.create_and_check_castor_dir | ( | self, | |
castor_dir | |||
) |
Check existence of the give CASTOR dir, if necessary create it. Some special care has to be taken with several things like setting the correct permissions such that CRAB can store the output results. Of course this means that things like /castor/cern.ch/ and user/j/ have to be recognised and treated properly. NOTE: Only CERN CASTOR area (/castor/cern.ch/) supported for the moment. NOTE: This method uses some slightly tricky caching to make sure we don't keep over and over checking the same base paths.
Definition at line 1488 of file cmsHarvester.py.
References spr.find().
def cmsHarvester.create_and_check_castor_dirs | ( | self | ) |
Make sure all required CASTOR output dirs exist. This checks the CASTOR base dir specified by the user as well as all the subdirs required by the current set of jobs.
Definition at line 1428 of file cmsHarvester.py.
References list(), and bookConverter.max.
def cmsHarvester.create_castor_path_name_common | ( | self, | |
dataset_name | |||
) |
Build the common part of the output path to be used on CASTOR. This consists of the CASTOR area base path specified by the user and a piece depending on the data type (data vs. MC), the harvesting type and the dataset name followed by a piece containing the run number and event count. (See comments in create_castor_path_name_special for details.) This method creates the common part, without run number and event count.
Definition at line 1324 of file cmsHarvester.py.
References create_castor_path_name_special(), and python.rootplot.root2matplotlib.replace().
def cmsHarvester.create_castor_path_name_special | ( | self, | |
dataset_name, | |||
run_number, | |||
castor_path_common | |||
) |
Create the specialised part of the CASTOR output dir name. NOTE: To avoid clashes with `incremental harvesting' (re-harvesting when a dataset grows) we have to include the event count in the path name. The underlying `problem' is that CRAB does not overwrite existing output files so if the output file already exists CRAB will fail to copy back the output. NOTE: It's not possible to create different kinds of harvesting jobs in a single call to this tool. However, in principle it could be possible to create both data and MC jobs in a single go. NOTE: The number of events used in the path name is the _total_ number of events in the dataset/run at the time of harvesting. If we're doing partial harvesting the final results will reflect lower statistics. This is a) the easiest to code and b) the least likely to lead to confusion if someone ever decides to swap/copy around file blocks between sites.
Definition at line 1380 of file cmsHarvester.py.
Referenced by create_castor_path_name_common().
def cmsHarvester.create_config_file_name | ( | self, | |
dataset_name, | |||
run_number | |||
) |
Generate the name of the configuration file to be run by CRAB. Depending on the harvesting mode (single-step or two-step) this is the name of the real harvesting configuration or the name of the first-step ME summary extraction configuration.
Definition at line 4065 of file cmsHarvester.py.
def cmsHarvester.create_crab_config | ( | self | ) |
Create a CRAB configuration for a given job. NOTE: This is _not_ a complete (as in: submittable) CRAB configuration. It is used to store the common settings for the multicrab configuration. NOTE: Only CERN CASTOR area (/castor/cern.ch/) is supported. NOTE: According to CRAB, you `Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'. For single-step harvesting we force one job, for the rest we don't really care. # BUG BUG BUG # With the current version of CRAB (2.6.1), in which Daniele # fixed the behaviour of no_block_boundary for me, one _has to # specify_ the total_number_of_events and one single site in # the se_white_list. # BUG BUG BUG end
Definition at line 4233 of file cmsHarvester.py.
def cmsHarvester.create_es_prefer_snippet | ( | self, | |
dataset_name | |||
) |
Build the es_prefer snippet for the reference histograms. The building of the snippet is wrapped in some care-taking code that figures out the name of the reference histogram set and makes sure the corresponding tag exists.
Definition at line 4690 of file cmsHarvester.py.
References join().
def cmsHarvester.create_harvesting_config | ( | self, | |
dataset_name | |||
) |
Create the Python harvesting configuration for harvesting. The basic configuration is created by Configuration.PyReleaseValidation.ConfigBuilder. (This mimics what cmsDriver.py does.) After that we add some specials ourselves. NOTE: On one hand it may not be nice to circumvent cmsDriver.py, on the other hand cmsDriver.py does not really do anything itself. All the real work is done by the ConfigBuilder so there is not much risk that we miss out on essential developments of cmsDriver in the future.
Definition at line 4725 of file cmsHarvester.py.
def cmsHarvester.create_harvesting_config_file_name | ( | self, | |
dataset_name | |||
) |
Definition at line 4097 of file cmsHarvester.py.
Referenced by write_harvesting_config().
def cmsHarvester.create_harvesting_output_file_name | ( | self, | |
dataset_name, | |||
run_number | |||
) |
Generate the name to be used for the harvesting output file. This harvesting output file is the _final_ ROOT output file containing the harvesting results. In case of two-step harvesting there is an intermediate ME output file as well.
Definition at line 4169 of file cmsHarvester.py.
def cmsHarvester.create_me_extraction_config | ( | self, | |
dataset_name | |||
) |
def create_harvesting_config_two_step(self, dataset_name): """Create the Python harvesting configuration for two-step harvesting. """ # BUG BUG BUG config_contents = self.create_harvesting_config_single_step(dataset_name)
return config_contents
Definition at line 4951 of file cmsHarvester.py.
References create_output_file_name(), and join().
def cmsHarvester.create_me_summary_config_file_name | ( | self, | |
dataset_name | |||
) |
Definition at line 4111 of file cmsHarvester.py.
Referenced by write_me_extraction_config().
def cmsHarvester.create_me_summary_output_file_name | ( | self, | |
dataset_name | |||
) |
Generate the name of the intermediate ME file name to be used in two-step harvesting.
Definition at line 4201 of file cmsHarvester.py.
def cmsHarvester.create_multicrab_block_name | ( | self, | |
dataset_name, | |||
run_number, | |||
index | |||
) |
Create the block name to use for this dataset/run number. This is what appears in the brackets `[]' in multicrab.cfg. It is used as the name of the job and to create output directories.
Definition at line 4216 of file cmsHarvester.py.
def cmsHarvester.create_multicrab_config | ( | self | ) |
Create a multicrab.cfg file for all samples. This creates the contents for a multicrab.cfg file that uses the crab.cfg file (generated elsewhere) for the basic settings and contains blocks for each run of each dataset. # BUG BUG BUG # The fact that it's necessary to specify the se_white_list # and the total_number_of_events is due to our use of CRAB # version 2.6.1. This should no longer be necessary in the # future. # BUG BUG BUG end
Definition at line 4313 of file cmsHarvester.py.
def cmsHarvester.create_output_file_name | ( | self, | |
dataset_name, | |||
run_number = None |
|||
) |
Create the name of the output file name to be used. This is the name of the output file of the `first step'. In the case of single-step harvesting this is already the final harvesting output ROOT file. In the case of two-step harvesting it is the name of the intermediary ME summary file.
Definition at line 4125 of file cmsHarvester.py.
Referenced by create_me_extraction_config().
def cmsHarvester.dbs_check_dataset_spread | ( | self, | |
dataset_name | |||
) |
def dbs_resolve_dataset_number_of_sites(self, dataset_name): """Ask DBS across how many sites this dataset has been spread out.
This is especially useful to check that we do not submit a job supposed to run on a complete sample that is not contained at a single site. """ # DEBUG DEBUG DEBUG
assert not self.dbs_api is None
api = self.dbs_api dbs_query = "find count(site) where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DbsApiException: raise Error("ERROR: Could not execute DBS query") try: num_sites = [] class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": num_sites.append(str(attrs["COUNT_STORAGEELEMENT"])) xml.sax.parseString(api_result, Handler()) except SAXParseException: raise Error("ERROR: Could not parse DBS server output") # DEBUG DEBUG DEBUG assert len(num_sites) == 1
num_sites = int(num_sites[0]) # End of dbs_resolve_dataset_number_of_sites. return num_sites def dbs_check_dataset_spread(self, dataset_name): """Figure out across how many sites this dataset is spread. NOTE: This is something we need to figure out per run, since we want to submit harvesting jobs per run. Basically three things can happen with a given dataset:
assert not self.dbs_api is None
api = self.dbs_api dbs_query = "find run, run.numevents, site, file.count " \ "where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) # Index things by run number. No cross-check is done to make
sample_info = {} try: class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": run_number = int(attrs["RUNS_RUNNUMBER"]) site_name = str(attrs["STORAGEELEMENT_SENAME"]) file_count = int(attrs["COUNT_FILES"])
event_count = int(attrs["RUNS_NUMBEROFEVENTS"])
info = (site_name, file_count, event_count) try: sample_info[run_number].append(info) except KeyError: sample_info[run_number] = [info] xml.sax.parseString(api_result, Handler()) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # Now translate this into a slightly more usable mapping. sites = {} for (run_number, site_info) in sample_info.iteritems():
unique_file_counts = set([i[1] for i in site_info]) if len(unique_file_counts) == 1:
site_names = [self.pick_a_site([i[0] for i in site_info])] nevents = [site_info[0][2]] else:
site_names = [i[0] for i in site_info] nevents = [i[2] for i in site_info] sites[run_number] = zip(site_names, nevents) self.logger.debug("Sample `%s' spread is:" % dataset_name) run_numbers = sites.keys() run_numbers.sort() for run_number in run_numbers: self.logger.debug(" run # %6d: %d sites (%s)" % \ (run_number, len(sites[run_number]), ", ".join([i[0] for i in sites[run_number]]))) # End of dbs_check_dataset_spread. return sites # DEBUG DEBUG DEBUG
def dbs_check_dataset_spread_old(self, dataset_name): """Figure out across how many sites this dataset is spread. NOTE: This is something we need to figure out per run, since we want to submit harvesting jobs per run. Basically three things can happen with a given dataset:
assert not self.dbs_api is None
api = self.dbs_api dbs_query = "find run, run.numevents, site, file.count " \ "where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DbsApiException: msg = "ERROR: Could not execute DBS query" self.logger.fatal(msg) raise Error(msg) # Index things by run number. No cross-check is done to make
sample_info = {} try: class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": run_number = int(attrs["RUNS_RUNNUMBER"]) site_name = str(attrs["STORAGEELEMENT_SENAME"]) file_count = int(attrs["COUNT_FILES"])
event_count = int(attrs["RUNS_NUMBEROFEVENTS"])
info = (site_name, file_count, event_count) try: sample_info[run_number].append(info) except KeyError: sample_info[run_number] = [info] xml.sax.parseString(api_result, Handler()) except SAXParseException: msg = "ERROR: Could not parse DBS server output" self.logger.fatal(msg) raise Error(msg) # Now translate this into a slightly more usable mapping. sites = {} for (run_number, site_info) in sample_info.iteritems():
unique_file_counts = set([i[1] for i in site_info]) if len(unique_file_counts) == 1:
site_names = [self.pick_a_site([i[0] for i in site_info])] nevents = [site_info[0][2]] else:
site_names = [i[0] for i in site_info] nevents = [i[2] for i in site_info] sites[run_number] = zip(site_names, nevents) self.logger.debug("Sample `%s' spread is:" % dataset_name) run_numbers = sites.keys() run_numbers.sort() for run_number in run_numbers: self.logger.debug(" run # %6d: %d site(s) (%s)" % \ (run_number, len(sites[run_number]), ", ".join([i[0] for i in sites[run_number]]))) # End of dbs_check_dataset_spread_old. return sites
Figure out the number of events in each run of this dataset. This is a more efficient way of doing this than calling dbs_resolve_number_of_events for each run.
Definition at line 3076 of file cmsHarvester.py.
def cmsHarvester.dbs_resolve_cmssw_version | ( | self, | |
dataset_name | |||
) |
Ask DBS for the CMSSW version used to create this dataset.
Definition at line 2474 of file cmsHarvester.py.
def cmsHarvester.dbs_resolve_dataset_name | ( | self, | |
dataset_name | |||
) |
Use DBS to resolve a wildcarded dataset name.
Definition at line 2418 of file cmsHarvester.py.
Referenced by build_dataset_list().
def cmsHarvester.dbs_resolve_datatype | ( | self, | |
dataset_name | |||
) |
Ask DBS for the the data type (data or mc) of a given dataset.
Definition at line 2682 of file cmsHarvester.py.
def cmsHarvester.dbs_resolve_globaltag | ( | self, | |
dataset_name | |||
) |
Ask DBS for the globaltag corresponding to a given dataset. # BUG BUG BUG # This does not seem to work for data datasets? E.g. for # /Cosmics/Commissioning08_CRAFT0831X_V1_311_ReReco_FromSuperPointing_v1/RAW-RECO # Probaly due to the fact that the GlobalTag changed during # datataking... BUG BUG BUG end
Definition at line 2626 of file cmsHarvester.py.
def cmsHarvester.dbs_resolve_number_of_events | ( | self, | |
dataset_name, | |||
run_number = None |
|||
) |
Determine the number of events in a given dataset (and run). Ask DBS for the number of events in a dataset. If a run number is specified the number of events returned is that in that run of that dataset. If problems occur we throw an exception. # BUG BUG BUG # Since DBS does not return the number of events correctly, # neither for runs nor for whole datasets, we have to work # around that a bit... # BUG BUG BUG end
Definition at line 2735 of file cmsHarvester.py.
def cmsHarvester.dbs_resolve_runs | ( | self, | |
dataset_name | |||
) |
def dbs_resolve_dataset_number_of_events(self, dataset_name): """Ask DBS across how many events this dataset has been spread out.
This is especially useful to check that we do not submit a job supposed to run on a complete sample that is not contained at a single site. """ # DEBUG DEBUG DEBUG
assert not self.dbs_api is None
api = self.dbs_api dbs_query = "find count(site) where dataset = %s " \ "and dataset.status = VALID" % \ dataset_name try: api_result = api.executeQuery(dbs_query) except DbsApiException: raise Error("ERROR: Could not execute DBS query") try: num_events = [] class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": num_events.append(str(attrs["COUNT_STORAGEELEMENT"])) xml.sax.parseString(api_result, Handler()) except SAXParseException: raise Error("ERROR: Could not parse DBS server output") # DEBUG DEBUG DEBUG assert len(num_events) == 1
num_events = int(num_events[0]) # End of dbs_resolve_dataset_number_of_events. return num_events
Ask DBS for the list of runs in a given dataset. # NOTE: This does not (yet?) skip/remove empty runs. There is # a bug in the DBS entry run.numevents (i.e. it always returns # zero) which should be fixed in the `next DBS release'. # See also: # https://savannah.cern.ch/bugs/?53452 # https://savannah.cern.ch/bugs/?53711
Definition at line 2568 of file cmsHarvester.py.
def cmsHarvester.escape_dataset_name | ( | self, | |
dataset_name | |||
) |
Escape a DBS dataset name. Escape a DBS dataset name such that it does not cause trouble with the file system. This means turning each `/' into `__', except for the first one which is just removed.
Definition at line 4046 of file cmsHarvester.py.
def cmsHarvester.load_ref_hist_mappings | ( | self | ) |
Load the reference histogram mappings from file. The dataset name to reference histogram name mappings are read from a text file specified in self.ref_hist_mappings_file_name.
Definition at line 5206 of file cmsHarvester.py.
References mergeVDriftHistosByStation.file, and bookConverter.max.
def cmsHarvester.option_handler_caf_access | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Set the self.caf_access flag to try and create jobs that run on the CAF.
Definition at line 1100 of file cmsHarvester.py.
def cmsHarvester.option_handler_castor_dir | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
def option_handler_dataset_name(self, option, opt_str, value, parser): """Specify the name(s) of the dataset(s) to be processed.
It is checked to make sure that no dataset name or listfile names are given yet. If all is well (i.e. we still have a clean slate) the dataset name is stored for later use, otherwise a Usage exception is raised. """ if not self.input_method is None: if self.input_method == "dataset": raise Usage("Please only feed me one dataset specification") elif self.input_method == "listfile": raise Usage("Cannot specify both dataset and input list file") else: assert False, "Unknown input method `%s'" % self.input_method self.input_method = "dataset" self.input_name = value self.logger.info("Input method used: %s" % self.input_method) # End of option_handler_dataset_name. ########## def option_handler_listfile_name(self, option, opt_str, value, parser): """Specify the input list file containing datasets to be processed. It is checked to make sure that no dataset name or listfile names are given yet. If all is well (i.e. we still have a clean slate) the listfile name is stored for later use, otherwise a Usage exception is raised. """ if not self.input_method is None: if self.input_method == "listfile": raise Usage("Please only feed me one list file") elif self.input_method == "dataset": raise Usage("Cannot specify both dataset and input list file") else: assert False, "Unknown input method `%s'" % self.input_method self.input_method = "listfile" self.input_name = value self.logger.info("Input method used: %s" % self.input_method) # End of option_handler_listfile_name. @verbatim Specify where on CASTOR the output should go.
At the moment only output to CERN CASTOR is supported. Eventually the harvested results should go into the central place for DQM on CASTOR anyway.
Definition at line 1058 of file cmsHarvester.py.
def cmsHarvester.option_handler_crab_submission | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Crab jobs are not created and "submitted automatically",
Definition at line 1128 of file cmsHarvester.py.
def cmsHarvester.option_handler_list_types | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
List all harvesting types and their mappings. This lists all implemented harvesting types with their corresponding mappings to sequence names. This had to be separated out from the help since it depends on the CMSSW version and was making things a bit of a mess. NOTE: There is no way (at least not that I could come up with) to code this in a neat generic way that can be read both by this method and by setup_harvesting_info(). Please try hard to keep these two methods in sync!
Definition at line 1150 of file cmsHarvester.py.
def cmsHarvester.option_handler_no_t1access | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Set the self.no_t1access flag to try and create jobs that run without special `t1access' role.
Definition at line 1083 of file cmsHarvester.py.
def cmsHarvester.option_handler_preferred_site | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Definition at line 1144 of file cmsHarvester.py.
def cmsHarvester.option_handler_saveByLumiSection | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Set process.dqmSaver.saveByLumiSectiont=1 in cfg harvesting file
Definition at line 1116 of file cmsHarvester.py.
def cmsHarvester.option_handler_sites | ( | self, | |
option, | |||
opt_str, | |||
value, | |||
parser | |||
) |
Definition at line 1138 of file cmsHarvester.py.
def cmsHarvester.parse_cmd_line_options | ( | self | ) |
Definition at line 1869 of file cmsHarvester.py.
def cmsHarvester.pick_a_site | ( | self, | |
sites, | |||
cmssw_version | |||
) |
Definition at line 1705 of file cmsHarvester.py.
def cmsHarvester.process_dataset_ignore_list | ( | self | ) |
Update the list of datasets taking into account the ones to ignore. Both lists have been generated before from DBS and both are assumed to be unique. NOTE: The advantage of creating the ignore list from DBS (in case a regexp is given) and matching that instead of directly matching the ignore criterion against the list of datasets (to consider) built from DBS is that in the former case we're sure that all regexps are treated exactly as DBS would have done without the cmsHarvester. NOTE: This only removes complete samples. Exclusion of single runs is done by the book keeping. So the assumption is that a user never wants to harvest just part (i.e. n out of N runs) of a sample.
Definition at line 3566 of file cmsHarvester.py.
def cmsHarvester.process_runs_use_and_ignore_lists | ( | self | ) |
Definition at line 3613 of file cmsHarvester.py.
def cmsHarvester.ref_hist_mappings_needed | ( | self, | |
dataset_name = None |
|||
) |
Check if we need to load and check the reference mappings. For data the reference histograms should be taken automatically from the GlobalTag, so we don't need any mappings. For RelVals we need to know a mapping to be used in the es_prefer code snippet (different references for each of the datasets.) WARNING: This implementation is a bit convoluted.
Definition at line 5172 of file cmsHarvester.py.
def cmsHarvester.run | ( | self | ) |
def cmsHarvester.setup_dbs | ( | self | ) |
If that works
cmd = "dbs search --query=\"find dataset where dataset = impossible"" (status, output) = commands.getstatusoutput(cmd) pdb.set_trace() if status != 0 or \ output.lower().find("unsupported api call") > -1: self.logger.fatal("It seems DBS is not setup...") self.logger.fatal(" %s returns crap:" % cmd) for line in output.split("\n"): self.logger.fatal(" %s" % line) raise Error("ERROR: DBS needs to be setup first!")
Setup the Python side of DBS. For more information see the DBS Python API documentation: https://twiki.cern.ch/twiki/bin/view/CMS/DBSApiDocumentation
Definition at line 2392 of file cmsHarvester.py.
def cmsHarvester.setup_harvesting_info | ( | self | ) |
Fill our dictionary with all info needed to understand harvesting. This depends on the CMSSW version since at some point the names and sequences were modified. NOTE: There is no way (at least not that I could come up with) to code this in a neat generic way that can be read both by this method and by option_handler_list_types(). Please try hard to keep these two methods in sync!
Definition at line 1205 of file cmsHarvester.py.
def cmsHarvester.show_exit_message | ( | self | ) |
Tell the user what to do now, after this part is done. This should provide the user with some (preferably copy-pasteable) instructions on what to do now with the setups and files that have been created.
Definition at line 5471 of file cmsHarvester.py.
def cmsHarvester.singlify_datasets | ( | self | ) |
Remove all but the largest part of all datasets. This allows us to harvest at least part of these datasets using single-step harvesting until the two-step approach works.
Definition at line 3742 of file cmsHarvester.py.
References bookConverter.max, and makeHLTPrescaleTable.values.
def cmsHarvester.write_crab_config | ( | self | ) |
def create_harvesting_config(self, dataset_name): """Create the Python harvesting configuration for a given job.
NOTE: The reason to have a single harvesting configuration per sample is to be able to specify the GlobalTag corresponding to each sample. Since it has been decided that (apart from the prompt reco) datasets cannot contain runs with different GlobalTags, we don't need a harvesting config per run. NOTE: This is the place where we distinguish between single-step and two-step harvesting modes (at least for the Python job configuration). """ ### if self.harvesting_mode == "single-step": config_contents = self.create_harvesting_config_single_step(dataset_name) elif self.harvesting_mode == "two-step": config_contents = self.create_harvesting_config_two_step(dataset_name) else:
assert False, "ERROR: unknown harvesting mode `%s'" % \ self.harvesting_mode ### # End of create_harvesting_config. return config_contents
Write a CRAB job configuration Python file.
Definition at line 5048 of file cmsHarvester.py.
References mergeVDriftHistosByStation.file.
def cmsHarvester.write_harvesting_config | ( | self, | |
dataset_name | |||
) |
Write a harvesting job configuration Python file. NOTE: This knows nothing about single-step or two-step harvesting. That's all taken care of by create_harvesting_config.
Definition at line 5106 of file cmsHarvester.py.
References create_harvesting_config_file_name(), and mergeVDriftHistosByStation.file.
def cmsHarvester.write_me_extraction_config | ( | self, | |
dataset_name | |||
) |
Write an ME-extraction configuration Python file. This `ME-extraction' (ME = Monitoring Element) is the first step of the two-step harvesting.
Definition at line 5139 of file cmsHarvester.py.
References create_me_summary_config_file_name(), and mergeVDriftHistosByStation.file.
def cmsHarvester.write_multicrab_config | ( | self | ) |
Write a multi-CRAB job configuration Python file.
Definition at line 5077 of file cmsHarvester.py.
References mergeVDriftHistosByStation.file.
string cmsHarvester.__author__ = "Jeroen Hegeman (jeroen.hegeman@cern.ch)," |
Definition at line 38 of file cmsHarvester.py.
string cmsHarvester.__version__ = "3.8.2p1" |
File : cmsHarvest.py Authors : Jeroen Hegeman (jeroe) Niklas Pietsch ( n.he geman @cer n.chnikla) Franseco Costanza ( s.pi etsch @des y.defranc) Last change: 20100308. esco .cost anza @desy .de
Purpose : Main program to run all kinds of harvesting. For more information please refer to the CMS Twiki url mentioned just below here.
Definition at line 37 of file cmsHarvester.py.
string cmsHarvester.action = "callback" |
Definition at line 2055 of file cmsHarvester.py.
tuple cmsHarvester.all_file_names = files_info[run_number] |
Definition at line 3231 of file cmsHarvester.py.
list cmsHarvester.all_t1 |
Definition at line 1722 of file cmsHarvester.py.
cmsHarvester.caf_access |
Definition at line 1105 of file cmsHarvester.py.
cmsHarvester.callback = self.option_handler_input_Jsonrunfile, |
Definition at line 2056 of file cmsHarvester.py.
Referenced by CaloDualConeSelector< HBHERecHit >.selectCallback(), CaloConeSelector< T >.selectCallback(), and edm::ESProducer.setWhatProduced().
cmsHarvester.castor_base_dir |
Definition at line 1074 of file cmsHarvester.py.
tuple cmsHarvester.castor_dir = self.datasets_information[dataset_name] |
tuple cmsHarvester.castor_path_common = self.create_castor_path_name_common(dataset_name) |
if num_sites == 1: self.logger.info(" sample is contained at a single site") else: self.logger.info(" sample is spread across %d sites" % \ num_sites) if num_sites < 1:
self.logger.warning(" --> skipping dataset which is not " \ "hosted anywhere")
Definition at line 5455 of file cmsHarvester.py.
tuple cmsHarvester.castor_paths |
Definition at line 5459 of file cmsHarvester.py.
cmsHarvester.castor_prefix = self.castor_prefix |
Definition at line 4351 of file cmsHarvester.py.
string cmsHarvester.cmd = "rfstat %s" |
self.logger.debug("Path is now `%s'" % \ path)
Definition at line 1631 of file cmsHarvester.py.
cmsHarvester.cmssw_version = self.datasets_information[dataset_name] |
Definition at line 4394 of file cmsHarvester.py.
list cmsHarvester.complete_sites |
site_names_ref = set(files_info[run_number].values()[0][1]) for site_names_tmp in files_info[run_number].values()[1:]: if set(site_names_tmp[1]) != site_names_ref: mirrored = False break
Definition at line 3276 of file cmsHarvester.py.
tuple cmsHarvester.config_builder = ConfigBuilder(config_options, with_input=True) |
Definition at line 4794 of file cmsHarvester.py.
string cmsHarvester.config_contents = config_builder.pythonCfgCode |
if self.harvesting_mode == "two-step": castor_dir = self.datasets_information[dataset_name] \ ["castor_path"][run] customisations.append("") customisations.append("# This is the second step (the real") customisations.append("# harvesting step) of a two-step") customisations.append("# harvesting procedure.")
customisations.append("import pdb")
customisations.append("import commands") customisations.append("import os") customisations.append("castor_dir = \"s"" % castor_dir) customisations.append("cmd = "rfdir s" % castor_dir") customisations.append("(status, output) = commands.getstatusoutput(cmd)") customisations.append("if status != 0:") customisations.append(" print "ERROR"") customisations.append(" raise Exception, "ERROR"") customisations.append("file_names = [os.path.join("rfio:s" % path, i) for i in output.split() if i.startswith("EDM_summary") and i.endswith(".root")]") #customisations.append("pdb.set_trace()") customisations.append("process.source.fileNames = cms.untracked.vstring(*file_names)") customisations.append("")
Definition at line 4799 of file cmsHarvester.py.
cmsHarvester.config_file_name = self.create_me_summary_config_file_name(dataset_name) |
pdb.set_trace() if self.datasets_information[dataset_name] \ ["mirrored"][run_number] == False: config_file_name = config_file_name.replace(".py", "_partial.py")
Definition at line 4086 of file cmsHarvester.py.
list cmsHarvester.connect_name = self.frontier_connection_name["globaltag"] |
Definition at line 4833 of file cmsHarvester.py.
dictionary cmsHarvester.country_codes |
Definition at line 1735 of file cmsHarvester.py.
string cmsHarvester.crab_config = "\n" |
cmsHarvester.crab_submission |
Definition at line 1132 of file cmsHarvester.py.
list cmsHarvester.customisations = [""] |
Definition at line 4829 of file cmsHarvester.py.
tuple cmsHarvester.dataset_name_escaped = self.escape_dataset_name(dataset_name) |
Definition at line 4350 of file cmsHarvester.py.
tuple cmsHarvester.dataset_names = self.datasets_to_use.keys() |
Definition at line 4345 of file cmsHarvester.py.
cmsHarvester.dataset_names_after_checks = dataset_names_after_checks_tmp |
Definition at line 4031 of file cmsHarvester.py.
tuple cmsHarvester.dataset_names_after_checks_tmp = copy.deepcopy(dataset_names_after_checks) |
Definition at line 4024 of file cmsHarvester.py.
cmsHarvester.datasets_information |
Definition at line 5342 of file cmsHarvester.py.
cmsHarvester.datasets_to_ignore |
Definition at line 3458 of file cmsHarvester.py.
cmsHarvester.datasets_to_use |
Definition at line 3432 of file cmsHarvester.py.
list cmsHarvester.datatype = self.datasets_information[dataset_name] |
Definition at line 4783 of file cmsHarvester.py.
cmsHarvester.dbs_api |
Definition at line 2405 of file cmsHarvester.py.
tuple cmsHarvester.empty_runs = dict(tmp) |
Definition at line 4008 of file cmsHarvester.py.
tuple cmsHarvester.es_prefer_snippet = self.create_es_prefer_snippet(dataset_name) |
Definition at line 4882 of file cmsHarvester.py.
int cmsHarvester.exit_code = 1 |
Definition at line 5693 of file cmsHarvester.py.
list cmsHarvester.file_name = handler.results["file.name"] |
Definition at line 3175 of file cmsHarvester.py.
Referenced by HcalLutManager.create_lut_loader(), SiStripHistoPlotter.createStaticPlot(), DTTPGLutFile.open(), L1TriggerLutFile.open(), TEcnaRead.ReadAverageHighFrequencyNoise(), TEcnaRead.ReadAverageLowFrequencyNoise(), TEcnaRead.ReadAverageMeanCorrelationsBetweenSamples(), TEcnaRead.ReadAveragePedestals(), TEcnaRead.ReadAverageSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadAverageTotalNoise(), TEcnaRead.ReadCorrelationsBetweenSamples(), TEcnaRead.ReadCovariancesBetweenSamples(), TEcnaRead.ReadHighFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadHighFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadHighFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadHighFrequencyNoise(), TEcnaRead.ReadLowFrequencyCorrelationsBetweenChannels(), TEcnaRead.ReadLowFrequencyCovariancesBetweenChannels(), TEcnaRead.ReadLowFrequencyMeanCorrelationsBetweenStins(), TEcnaRead.ReadLowFrequencyNoise(), TEcnaRead.ReadMeanCorrelationsBetweenSamples(), TEcnaRead.ReadNumberOfEventsForSamples(), TEcnaRead.ReadPedestals(), TEcnaRead.ReadRelevantCorrelationsBetweenSamples(), TEcnaRead.ReadRootFileHeader(), TEcnaRead.ReadSampleAdcValues(), TEcnaRead.ReadSampleAdcValuesSameFile(), TEcnaRead.ReadSampleMeans(), TEcnaRead.ReadSampleSigmas(), TEcnaRead.ReadSigmaOfCorrelationsBetweenSamples(), TEcnaRead.ReadStinNumbers(), TEcnaRead.ReadTotalNoise(), and TEcnaRun.WriteRootFile().
tuple cmsHarvester.files_at_site |
Definition at line 3235 of file cmsHarvester.py.
dictionary cmsHarvester.files_info = {} |
Definition at line 3161 of file cmsHarvester.py.
list cmsHarvester.files_without_sites |
Definition at line 3201 of file cmsHarvester.py.
cmsHarvester.globaltag = self.datasets_information[dataset_name] |
Definition at line 4786 of file cmsHarvester.py.
cmsHarvester.harvesting_info |
Definition at line 1311 of file cmsHarvester.py.
cmsHarvester.harvesting_mode |
Definition at line 2215 of file cmsHarvester.py.
cmsHarvester.harvesting_type |
Definition at line 3859 of file cmsHarvester.py.
string cmsHarvester.help = "Jsonfile containing dictionary of run/lumisections pairs. " |
Definition at line 2053 of file cmsHarvester.py.
string cmsHarvester.index = "site_%02d" |
Definition at line 4378 of file cmsHarvester.py.
Referenced by python.listobjects._printTableInfo(), TrackingTruthAccumulator.accumulateEvent(), edm::Principal.addAliasedProduct(), ora::InputRelationalData.addBlobData(), helper::ClusterStorer.addCluster(), ora::InputRelationalData.addData(), edm::IndexIntoFile.addEntry(), ora::InputRelationalData.addId(), pat::PackedTriggerPrescales.addPrescaledTrigger(), edm::Principal.addProduct_(), DetGroupMerger.addSameLevel(), PhysicsTools::TreeReader.addTypeMulti(), PhysicsTools::TreeReader.addTypeSingle(), HcalCovarianceMatrices.addValues(), HcalCholeskyMatrices.addValues(), CastorCondObjectContainer< Item >.addValues(), HcalCondObjectContainer< Item >.addValues(), ora::InputRelationalData.addWhereId(), ora::SelectOperation.addWhereId(), edm::Principal.adjustIndexesAfterProductRegistryAddition(), AlignmentMonitorMuonResiduals.afterAlignment(), reco::Conversion.algoByName(), HcalGeometry.alignmentBarEndForIndexLocal(), HcalGeometry.alignmentOuterIndexLocal(), HcalGeometry.alignmentTransformIndexLocal(), EcalEndcapGeometry.alignmentTransformIndexLocal(), EcalBarrelGeometry.alignmentTransformIndexLocal(), JetPlotsExample< Jet >.analyze(), JetAnaPythia< Jet >.analyze(), evf::ExceptionGenerator.analyze(), CaloTowerAnalyzer.analyze(), TrackCategoriesAnalyzer.analyze(), TrackingParticleCategoriesAnalyzer.analyze(), EwkMuDQM.analyze(), EwkElecDQM.analyze(), PixelVTXMonitor.analyze(), HcalDetDiagNoiseMonitor.analyze(), SimplePhotonAnalyzer.analyze(), VertexHistoryAnalyzer.analyze(), EwkDQM.analyze(), SVTagInfoValidationAnalyzer.analyze(), TrackHistoryAnalyzer.analyze(), recoBSVTagInfoValidationAnalyzer.analyze(), HLTJetMETDQMSource.analyze(), MuonAlignmentAnalyzer.analyze(), GeneralHLTOffline.analyze(), FourVectorHLT.analyze(), HLTrigReport.analyze(), HcalRecHitsAnalyzer.analyze(), HcalRecHitsValidation.analyze(), HeavyFlavorValidation.analyze(), EcalLaserAnalyzerYousi.analyze(), DetIdSelectorTest.analyze(), OccupancyPlotter.analyze(), PhotonValidator.analyze(), OverlapProblemTPAnalyzer.analyze(), PrimaryVertexAnalyzer.analyze(), GenPurposeSkimmerData.analyze(), TriggerJSONMonitoring.analyze(), TrackerDpgAnalysis.analyze(), GenHFHadronMatcher.analyzeMothers(), PrimaryVertexAnalyzer4PU.analyzeVertexCollection(), CSCConditions.anodeBXoffset(), TKinFitter.applyDeltaA(), TKinFitter.applyDeltaY(), gen::PhotosInterface.applyToVertex(), RPCConeBuilder.areConnected(), TShapeAnalysis.assignChannel(), reco::PFDisplacedVertexCandidate.associatedElements(), reco::PFBlock.associatedElements(), reco::btag::TrackData.associatedToVertex(), VEcalCalibBlock.at(), attrEscape(), BackgroundHandler.BackgroundHandler(), TkStripMeasurementDet.badStripBlocks(), TkStripMeasurementDet.badStripCuts(), HcalTBTiming.BeamCoincidenceHits(), RPCSeedPattern.BestRefRecHit(), HcalTBTiming.BH1Hits(), HcalTBTiming.BH2Hits(), HcalTBTiming.BH3Hits(), HcalTBTiming.BH4Hits(), RPCRecHitValid.bookHistograms(), PixelBarrelLayerBuilder.build(), TOBLayerBuilder.build(), FWCaloRecHitDigitSetProxyBuilder.build(), FWPRCaloTowerProxyBuilder.build(), FWPCaloHitProxyBuilder.build(), FWPFEcalRecHitRPProxyBuilder.build(), FWSimpleProxyBuilder.build(), FWPFEcalRecHitLegoProxyBuilder.build(), MuonSeedBuilder.build(), gen::AMPTHadronizer.build_ampt(), gen::HijingHadronizer.build_hijing(), gen::HydjetHadronizer.build_hyjet(), gen::Hydjet2Hadronizer.build_hyjet2(), SiStripFedCabling.buildFedCabling(), pos::PixelNameTranslation.buildROCsFromFEDChannel(), FWSimpleProxyBuilder.buildViewType(), evf::EvFDaqDirector.bumpFile(), LocalCacheFile.cache(), Averages.calc(), CSCTFPtLUT.calcPt(), CSCHaloAlgo.Calculate(), MedianCommonModeCalculator.calculateCommonMode(), dqmTnP::AbstractFitter.calculateEfficiency(), PileUpSubtractor.calculateOrphanInput(), npstat::ArrayND< Numeric, StackLen, StackDim >.cdfValue(), HcalDDDGeometry.cellGeomPtr(), HcalGeometry.cellGeomPtr(), FWCollectionSummaryTableManager.cellRenderer(), CSCConditions.chamberTimingCorrection(), PFPileUpAlgo.chargedHadronVertex(), PFIsolationEstimator.chargedHadronVertex(), PFPhotonIsolationCalculator.chargedHadronVertex(), RPCSeedPattern.checkSegmentAlgorithmSpecial(), RPCSeedPattern.checkSimplePattern(), TiXmlHandle.Child(), TiXmlHandle.ChildElement(), CSCConditions.chipCorrection(), FWModelContextMenuHandler.chosenItem(), TrajectorySegmentBuilder.cleanCandidates(), SeedClusterRemover.cleanup(), HLTTrackClusterRemoverNew.cleanup(), HITrackClusterRemover.cleanup(), FWFromSliceSelector.clear(), MuonMillepedeAlgorithm.collect(), ora::MappingRules.columnNameForOID(), GeometricDet.component(), HFShower.compute(), HDShower.compute(), TShapeAnalysis.computeShape(), EcalUncalibRecHitRatioMethodAlgo< C >.computeTime(), EcalTPGParamBuilder.computeWeights(), L1GtVmeWriterCore.condIndex2reg(), VirtualJetProducer.copyConstituents(), MuonAlignmentFromReference.correctBField(), L1RCTParameters.correctedTPGSum(), reco::GsfComponent5D.covariance(), edm::WaitingTaskList.createNode(), CSCConditions.crossTalk(), CSCConditions.crosstalkIntercept(), CSCConditions.crosstalkSlope(), L1MuGMTPSB.CSCMuon(), CSCTFSectorProcessor.CSCTFSectorProcessor(), customizeTrackingMonitorSeedNumber.customise_trackMon_IterativeTracking_PHASE1(), customizeTrackingMonitorSeedNumber.customise_trackMon_IterativeTracking_PHASE1PU140(), customizeTrackingMonitorSeedNumber.customise_trackMon_IterativeTracking_PHASE1PU70(), pat::MuonSelector.customSelection_(), pat::ElectronSelector.customSelection_(), HLTConfigData.datasetIndex(), reco::PFDisplacedVertexCandidate.dcaPoint(), gen::EvtGenLHCInterface.decay(), evf::FastMonitoringService::Encoding.decode(), EcalShapeBase.derivative(), SurveyDet.derivatives(), SiStripLorentzAngleCalibration.derivatives(), SiPixelLorentzAngleCalibration.derivatives(), SiStripBackplaneCalibration.derivatives(), FWEventItem.destroy(), cscdqm::Detector.Detector(), HcalTopology.detId2denseIdCALIB(), HcalTopology.detId2denseIdHT(), TkStripMeasurementDet.detSet(), GenericMVAJetTagComputer.discriminator(), reco::PFBlock.dist(), reco::PFDisplacedVertexCandidate.dist(), CachedTrajectory.distance(), FWHistSliceSelector.doSelect(), FWHFTowerSliceSelector.doSelect(), FWHistSliceSelector.doUnselect(), FWHFTowerSliceSelector.doUnselect(), RPCEfficiencySecond.dqmEndJob(), L1MuGMTPSB.DTBXMuon(), DTROSWordType.DTROSWordType(), TKStatus.dumpTkDcsStatus(), BeamFitter.dumpTxtFile(), EcalDigiProducer.EcalDigiProducer(), EgammaTowerIsolationNew< NC >.EgammaTowerIsolationNew(), cond::IOVRange.end(), cond::IOVProxy.end(), FWModelChangeManager.endChanges(), HcalDetDiagNoiseMonitor.endLuminosityBlock(), EcalEleCalibLooper.endOfLoop(), EcalSimHitsValidProducer.energyInEBMatrix(), EcalSimHitsValidProducer.energyInEEMatrix(), EcalBarrelSimHitsValidation.energyInMatrixEB(), EcalEndcapSimHitsValidation.energyInMatrixEE(), edm::RootTree.entryNumberForIndex(), edm::RefVector< C, T, F >.erase(), edm::RefVectorBase< key_type >.eraseAtIndex(), MuonAlignmentFromReference.eraseNotSelectedResiduals(), HLTBitVariable.eval(), TrackClassifierByProxy< Collection >.evaluate(), VertexClassifierByProxy< reco::SecondaryVertexTagInfoCollection >.evaluate(), L1GtMuonCondition.evaluateCondition(), l1t::MuCondition.evaluateCondition(), L1GtCaloCondition.evaluateCondition(), l1t::CaloCondition.evaluateCondition(), PFPhotonAlgo.EvaluateGCorrMVA(), PFPhotonAlgo.EvaluateResMVA(), ora.existAttribute(), CastorQIEShape.expand(), HcalQIEShape.expand(), ClusterShapeAlgo.fast_AbsZernikeMoment(), EcalClusterToolsT< noZS >.fast_AbsZernikeMoment(), MeasurementDet.fastMeasurements(), pos::PixelCalibConfiguration.fedCardsAndChannels(), SiStripFedCabling.fedConnections(), CmsShowSearchFiles.fileEntryChanged(), VpspScanTask.fill(), PedestalsTask.fill(), ChannelPattern.Fill(), PedsFullNoiseTask.fill(), PhysicsTools::TreeReader::Value.fill(), HMassResolutionVSPart.Fill(), PFClusterShapeAlgo.fill5x5Map(), TrackingMaterialPlotter.fill_gradient(), FWCandidateTowerProxyBuilder.fillCaloData(), FWPFCandidateTowerProxyBuilder.fillCaloData(), FWCaloTowerProxyBuilderBase.fillCaloData(), FWHFTowerProxyBuilderBase.fillCaloData(), MixCollectionValidation.fillCaloHitTime(), reco.fillCovariance(), GenParticleProducer.fillDaughters(), EcalSimHitsValidProducer.fillEBMatrix(), EcalBarrelSimHitsValidation.fillEBMatrix(), EcalSimHitsValidProducer.fillEEMatrix(), EcalEndcapSimHitsValidation.fillEEMatrix(), MixCollectionValidation.fillGenParticleMulti(), EwkElecTauHistManager.fillHistograms(), EwkMuTauHistManager.fillHistograms(), Py8toJetInputHEPEVT.fillJetAlgoInput(), EcalTrigPrimFunctionalAlgo.fillMap(), JetMETHLTOfflineSource.fillMEforTriggerNTfired(), CmsShowModelPopup.fillModelPopup(), MixCollectionValidation.fillMultiplicity(), reco::Mustache.FillMustacheVar(), HcalDeadCellMonitor.fillNevents_recentdigis(), HcalDeadCellMonitor.fillNevents_recentrechits(), MuonAlignmentFromReference.fillNtuple(), fillPathSummary(), FWTGeoRecoGeometry::Info.fillPoints(), FWRecoGeometryESProducer.fillPoints(), edm::IndexIntoFile.fillRunOrLumiIndexes(), MixCollectionValidation.fillSimHitTime(), TrigResRateMon.filltestHisto(), HLTRHemisphere.filter(), DYGenFilter.filter(), FWFileEntry.filterEventsWithCustomParser(), ecaldqm::LedTask.filterRunType(), find(), edm::LazyGetter< T >.find(), EcalRecHitsValidation.findBarrelMatrix(), CalibratableTest.findCandidatesInDeltaR(), TagInfoMVACategorySelector.findCategory(), FWColorRow.FindColorIndex(), EcalRecHitsValidation.findEndcapMatrix(), L1GctHardwareJetFinder.findFinalClusters(), SiStripProcessedRawDigiProducer.findInput(), TrackingMaterialAnalyser.findLayer(), CompositeTECPetal.findPar(), PFClusterShapeAlgo.findPFRHIndexFromDetId(), CalibratableTest.findPrimarySimParticles(), edm::Principal.findProductByLabel(), FFTJetPFPileupCleaner.findSomeVertexWFakes(), sistrip::MeanAndStdDev.fit(), ora::MappingRules.fkNameForIdentity(), reco::TemplatedSecondaryVertexTagInfo< reco::CandIPTagInfo, reco::VertexCompositePtrCandidate >.flightDirection(), reco::TemplatedSecondaryVertexTagInfo< reco::CandIPTagInfo, reco::VertexCompositePtrCandidate >.flightDistance(), MuonSeedBuilder.foundMatchingSegment(), tauImpactParameter::TrackHelixVertexFitter.freeParName(), npstat::ArrayND< Numeric, StackLen, StackDim >.functorFill(), CSCDBGains.gain(), CSCConditions.gain(), GammaSeries(), CSCConditions.gasGainCorrection(), gen::Hydjet2Hadronizer.generatePartonsAndHadronize(), JetTagComputer::TagInfoHelper.get(), pat::EventHypothesis.getAs(), TkStripMeasurementDet.getBadStripBlocks(), JetTagComputer::TagInfoHelper.getBase(), SiStripDelay.getBaseDelay(), DTMuonMillepede.getbcsMatrix(), edm::PrincipalGetAdapter.getBranchDescription(), edm::Principal.getByToken(), edm::PrincipalGetAdapter.getByToken_(), Gflash.getCalorimeterNumber(), DTMuonMillepede.getCcsMatrix(), CaloSubdetectorGeometry.getClosestCell(), LMFDefFabric.getColor(), GenericMVAComputerCache.getComputer(), VirtualJetProducer.getConstituents(), npstat::LinInterpolatedTableND< Numeric, Axis >.getCoords(), edm::Principal.getExistingProduct(), SiStripCorrelateBadStripAndNoise.getHisto(), SiStripPlotGain.getHisto(), SiStripCorrelateNoise.getHisto(), Fp420AnalysisHistManager.GetHisto(), BscAnalysisHistManager.GetHisto(), Fp420AnalysisHistManager.GetHisto2(), BscAnalysisHistManager.GetHisto2(), SiStripCorrelateBadStripAndNoise.getHistos(), SiStripPlotGain.getHistos(), SiStripCorrelateNoise.getHistos(), EcalBarrelSimHitsValidation.getIdsAroundMax(), EcalEndcapSimHitsValidation.getIdsAroundMax(), ecaldqm::MESetMulti.getIndex(), SiStripDelay.getLabelName(), SiStripGain.getLabelName(), edm::Principal.getManyByType(), MuonAssociatorByHits.getMatchedIds(), TKinFitter.getMeasParticle(), ClusterSummary.getModule(), SoftElectronMVAEstimator.GetMVABin(), edm::ProducerSourceBase.getNextItemType(), HcalDbOnline.getObject(), SiStripLorentzAngleCalibration.getParameter(), SiPixelLorentzAngleCalibration.getParameter(), SiStripBackplaneCalibration.getParameter(), SiStripLorentzAngleCalibration.getParameterError(), SiPixelLorentzAngleCalibration.getParameterError(), SiStripBackplaneCalibration.getParameterError(), SiStripBackplaneCalibration.getParameterForDetId(), SiPixelLorentzAngleCalibration.getParameterForDetId(), SiStripLorentzAngleCalibration.getParameterForDetId(), TkModuleGroupSelector.getParameterIndexFromDetId(), DatabasePDG.GetPDGParticleByIndex(), DatabasePDG.GetPDGParticleStatusByIndex(), pat::PackedTriggerPrescales.getPrescaleForIndex(), pat::PackedTriggerPrescales.getPrescaleForName(), edm::Principal.getProductHolder(), edm::Principal.getProductHolderByIndex(), SiStripGain.getRange(), SiStripDelay.getRcdName(), SiStripGain.getRcdName(), GBRTree2D.GetResponse(), GBRTree.GetResponse(), CaloGeometry.getSubdetectorGeometry(), SiStripGain.getTagNorm(), SiStripDelay.getTagSign(), cond.getTotalErrors(), HcalTestNumberingScheme.getUnitID(), HcalNumberingScheme.getUnitID(), ZdcNumberingScheme.getUnitID(), CastorNumberingScheme.getUnitID(), TKinFitter.getUnmeasParticle(), CaloGeometry.getValidDetIds(), HcalCovarianceMatrices.getValues(), CastorCondObjectContainer< Item >.getValues(), HcalCholeskyMatrices.getValues(), HcalCondObjectContainer< Item >.getValues(), CachedTrajectory.getWideTrajectory(), gen::Herwig6Instance.give(), CmsShowNavigator.goToRunEvent(), TIDLayer.groupedCompatibleDetsV(), reco::GsfComponent5D.GsfComponent5D(), TkStripMeasurementDet.hasAny128StripBad(), CastorCondObjectContainer< Item >.hashed_id(), HcalRecHitsClient.HcalRecHitsEndjob(), HcalRecHitsDQMClient.HcalRecHitsEndjob(), HFShowerFibreBundle.HFShowerFibreBundle(), HFShowerPMT.HFShowerPMT(), TEcnaHistos.HistoPlot(), HLTSummaryFilter.hltFilter(), CmsShowSearchFiles.hyperlinkClicked(), MuonSeedBuilder.IdentifyShowering(), HcalCondObjectContainerBase.indexFor(), edm::InputTag.indexFor(), HcalHPDRBXMap.indexHPD(), triggerExpression::PathReader.init(), HcalRecHitsMaker.init(), TShapeAnalysis.init(), edm::EventProcessor.init(), module.init(), pftools::LinearCalibrator.initEijMatrix(), Thrust.initialAxis(), CombinedMVAJetTagComputer.initialize(), CSCTFSectorProcessor.initialize(), FWColorManager.initialize(), MuonAlignmentFromReference.initialize(), edm::StreamSchedule.initializeEarlyDelete(), edm::ProductRegistry.initializeLookupTables(), MultiGaussianStateTransform.innerMultiState1D(), MuonDigiCollection< CSCDetId, CSCCLCTDigi >.insertDigi(), edm::RootTree.insertEntryForIndex(), MagneticFieldGrid.interpolateAtPoint(), TkPixelMeasurementDet.isActive(), TkStripMeasurementDet.isActive(), TkPixelMeasurementDet.isEmpty(), TkStripMeasurementDet.isEmpty(), HLTInclusiveVBFSource.isHLTPathAccepted(), JetMETHLTOfflineSource.isHLTPathAccepted(), CSCConditions.isInBadChamber(), TkStripMeasurementDet.isMasked(), btag::Matching< Delta >.isMatched1st(), lhef::Matching< Delta >.isMatched1st(), btag::Matching< Delta >.isMatched2nd(), lhef::Matching< Delta >.isMatched2nd(), RPCCosmicSeedrecHitFinder.isouterLayer(), HLTInclusiveVBFSource.isTriggerObjectFound(), JetMETHLTOfflineSource.isTriggerObjectFound(), QuadrupletSeedMerger.isValidQuadruplet(), CSCDBGasGainCorrection.item(), CSCDBGains.item(), CSCDBChipSpeedCorrection.item(), CSCDBNoiseMatrix.item(), CSCDBPedestals.item(), CSCDBCrosstalk.item(), CSCChamberTimeCorrections.item(), gen::JetMatchingMadgraph.JetMatchingMadgraph(), gen::JetMatchingMGFastJet.JetMatchingMGFastJet(), L1AcceptBunchCrossing.L1AcceptBunchCrossing(), edm::EDConsumerBase.labelsForToken(), TrackQuality.layer(), LayerTriplets.layers(), npstat::ArrayND< Numeric, StackLen, StackDim >.linearValue(), npstat::ArrayND< Numeric, StackLen, StackDim >.linearValueAt(), CSCDBCrosstalk.linter(), python.diff_provenance.difference.list_diff(), python.diffProv.difference.list_diff(), dbUtil.dbUtil.listIndex(), CaloTPGTranscoderULUT.loadHCALCompress(), fftjetcms::FFTJetInterface.loadInputCollection(), QualityCutsAnalyzer.LoopOverJetTracksAssociation(), CSCDBCrosstalk.lslope(), HcalTBTiming.M1Hits(), HcalTBTiming.M2Hits(), HcalTBTiming.M3Hits(), main(), L1GctHardwareJetFinder.makeProtoJet(), edm.makeRefTo(), edmNew.makeRefTo(), edm.makeRefToDetSetLazyVector(), edm.makeRefToDetSetRefVector(), edm.makeRefToLazyGetter(), TemplatedSecondaryVertexProducer< IPTI, VTX >.markUsedTracks(), edm::ContainerMask< T >.mask(), pf2pat::TopProjectorAlgo< Top, Bottom >.maskAncestors(), ecaldqm::MESetMulti.MESetMulti(), FWProxyBuilderBase.modelChanges(), HLTConfigData.moduleIndex(), edm::EDConsumerBase.modulesDependentUpon(), MuonScenarioBuilder.moveCSCSectors(), MuonScenarioBuilder.moveDTSectors(), MultiGaussianStateTransform.multiState1D(), MuonGeometrySanityCheckPoint.MuonGeometrySanityCheckPoint(), HcalDDDGeometry.newCell(), HcalGeometry.newCell(), FWGUIEventDataAdder.newIndexSelected(), ora::MappingRules.newNameForSchemaObject(), cscdqm::Detector.NextAddressBoxByPartition(), CSCConditions.noiseMatrix(), HcalQIECoder.offset(), CastorQIECoder.offset(), VoronoiSubtractor.offsetCorrectJets(), FWDetailViewManager.openDetailViewFor(), EcalShapeBase.operator()(), Grid3D.operator()(), reco::tau::Combinatoric< T >::ValueAccessor.operator()(), edm::LazyAdapter< T >.operator()(), edm::UpdateGetterAdapter< T >.operator()(), edm::FindValue< T >.operator()(), operator<<(), edm::RefGetter< T >.operator[](), SeedingLayerSetsHits.operator[](), edm::LazyGetter< T >.operator[](), MultiGaussianStateTransform.outerMultiState1D(), TIDLayer.overlapInR(), AlignmentSurfaceDeformations.parameters(), MuonAlignmentFromReference.parseReference(), CaloTowerConstituentsMapBuilder.parseTextMap(), ParticleDecayProducer.ParticleDecayProducer(), HWWFunctions.passBaseline(), HWWFunctions.passFullLep(), CSCDBPedestals.pedestal(), CSCConditions.pedestal(), CSCDBPedestals.pedestal_rms(), CSCConditions.pedestalSigma(), PFTauMVAInputDiscriminantTranslator.PFTauMVAInputDiscriminantTranslator(), pos::PixelCalibConfiguration.PixelCalibConfiguration(), pos::PixelNameTranslation.PixelNameTranslation(), HDQMInspector.plot(), edm::PileUp.poissonDistr_OOT(), edm::PileUp.poissonDistribution(), pftools::LinearCalibrator.populateDetElIndex(), SiStripRegionCabling.position(), edm::service::Timing.postEvent(), edm::service::Timing.preEvent(), gbl::GblTrajectory.prepare(), MatrixInjector.MatrixInjector.prepare(), pf2pat::TopProjectorAlgo< Top, Bottom >.printAncestors(), SiStripFedCabling.printDebug(), TShapeAnalysis.printshapeData(), SiStripFedCabling.printSummary(), ErsatzMEt.probeFinder(), PFAlgoTestBenchElectrons.processBlock(), PFAlgo.processBlock(), HcalBeamMonitor.processEvent(), l1t::Stage1Layer2EGammaAlgorithmImpPP.processEvent(), l1t::Stage1Layer2EGammaAlgorithmImpHI.processEvent(), l1t::Stage1Layer2EGammaAlgorithmImpHW.processEvent(), TrackListCombiner.produce(), QualityFilter.produce(), JetTracksAssociationDRCalo.produce(), MuonTrackProducer.produce(), GenParticlePruner.produce(), SecondaryVertexTagInfoProxy.produce(), FastTrackMerger.produce(), reco::CorrectedJetProducer< T >.produce(), cms::JetCorrectionProducer< T >.produce(), edm::LogErrorHarvester.produce(), pat::PATConversionProducer.produce(), ScalersRawToDigi.produce(), TrackCandidateProducer.produce(), pat::PATMuonProducer.produce(), HITSiStripRawToClustersRoI.produce(), cms::DigitizerFP420.produce(), pat::PATTriggerMatchEmbedder< PATObjectType >.produce(), JetDeltaRValueMapProducer< T >.produce(), JetDeltaRTagInfoValueMapProducer< T, I >.produce(), pat::PATElectronProducer.produce(), reco::PhysObjectMatcher< C1, C2, S, D, Q >.produce(), ecaldqm::OccupancyClient.producePlots(), MuonDigiCollection< CSCDetId, CSCCLCTDigi >.put(), TShapeAnalysis.putAllVals(), MagneticFieldGrid.putCoordGetInd(), MagneticFieldGrid.putIndGetCoord(), SiPixelGenError.qbin(), SiStripTemplate.qbin(), SiPixelTemplate.qbin(), SiPixelTemplate.qbin_dist(), HcalDigiProducer.randomEngine(), CastorDigiProducer.randomEngine(), HcalTBDigiProducer.randomEngine(), SiStripDigitizer.randomEngine(), cms::SiPixelDigitizer.randomEngine(), edm::PileUp.randomEngine(), EcalDigiProducer.randomEngine(), EcalMixingModuleValidation.randomEngine(), TkStripMeasurementDet.rawId(), L1CaloRegionDetId.rctCard(), DQMRootSource.readElements(), CSCTFSectorProcessor.readParameters(), MuonAlignmentFromReference.readTmpFiles(), edm::Principal.recombine(), fwlite::Record.Record(), edm::EDConsumerBase.recordConsumes(), GenParticlePruner.recursiveFlagDaughters(), GenParticlePruner.recursiveFlagMothers(), edm::RefGetter< T >.RefGetter(), SiStripRegionCabling.region(), python.multivaluedict.remove(), lhef::LHEEvent.removeParticle(), edmplugin::CacheParser.replaceSpaces(), BackgroundHandler.rescale(), FWColorPopup.ResetColors(), resetColors(), edm::StreamSchedule.resetEarlyDelete(), edmplugin::CacheParser.restoreSpaces(), PhotonOfflineClient.retrieveHisto(), ZeeCalibration.ringNumberCorrector(), CSCDBCrosstalk.rinter(), FWCollectionSummaryTableManager.rowHeader(), L1MuGMTPSB.RPCMuon(), CSCDBCrosstalk.rslope(), EcalTrigPrimFunctionalAlgo.run_part2(), FWEventItem.runFilter(), runInspector(), DTOccupancyTest.runOccupancyTest(), ecaldqm::LedTask.runOnDigis(), ecaldqm::LedTask.runOnPnDigis(), ecaldqm::LedTask.runOnRawData(), ecaldqm::LedTask.runOnUncalibRecHits(), PFPhotonAlgo.RunPFPhoton(), HcalTBTiming.S1Hits(), HcalTBTiming.S2Hits(), HcalTBTiming.S3Hits(), HcalTBTiming.S4Hits(), HcalDetDiagNoiseMonitor.SaveRates(), FWCaloRecHitDigitSetProxyBuilder.scaleProduct(), FWPFEcalRecHitRPProxyBuilder.scaleProduct(), reco::TemplatedSecondaryVertexTagInfo< reco::CandIPTagInfo, reco::VertexCompositePtrCandidate >.secondaryVertex(), RPCSeedPattern.SegmentAlgorithmSpecial(), FWViewContextMenuHandlerGL.select(), HLTEventSelector.select(), FWModelExpressionSelector.select(), MuonAlignmentFromReference.selectResidualsPeaks(), CmsShowSearchFiles.sendToWebBrowser(), TkStripMeasurementDet.set128StripStatus(), TkPixelMeasurementDet.setActive(), TkPixelMeasurementDet.setActiveThisEvent(), TkStripMeasurementDet.setActiveThisEvent(), TkStripMeasurementDet.setActiveThisPeriod(), ElectronDqmAnalyzerBase.setBookIndex(), CalibrationInterface< CategoryT, CalibDataT >.setCalibData(), Vispa.Main.Application.Application.setCurrentTabController(), FWEventItem.setDefaultDisplayProperties(), FWEventItem.setDisplayProperties(), edm::Path.setEarlyDeleteHelpers(), TkPixelMeasurementDet.setEmpty(), TkStripMeasurementDet.setEmpty(), SimpleL1MuGMTCand.setEta(), FWPSetTableManager.setExpanded(), Particle.SetFirstDaughterIndex(), HBHEStatusBitSetter.SetFlagsFromDigi(), HBHEStatusBitSetter.SetFlagsFromRecHits(), RPCLogCone.setIdx(), Particle.SetLastDaughterIndex(), reco::PFBlock.setLink(), reco::PFDisplacedVertexCandidate.setLink(), PFElectronAlgo.SetLinks(), HcalQIECoder.setOffset(), CastorQIECoder.setOffset(), SiStripLorentzAngleCalibration.setParameter(), SiPixelLorentzAngleCalibration.setParameter(), SiStripBackplaneCalibration.setParameter(), SiStripLorentzAngleCalibration.setParameterError(), SiPixelLorentzAngleCalibration.setParameterError(), SiStripBackplaneCalibration.setParameterError(), fit::RootMinuit< Function >.setParameters(), DreamSD.setPbWO2MaterialProperties_(), SimpleL1MuGMTCand.setPhi(), reco::PFTrack.setPoint(), SimpleL1MuGMTCand.setPt(), edm::OwnArray< T, MAX_SIZE, P >.setPtr(), edm::OwnVector< T, P >.setPtr(), reco::SoftLeptonProperties.setQuality(), gen::Pythia6Service.setSLHAFromHeader(), HcalQIECoder.setSlope(), CastorQIECoder.setSlope(), CaloGeometry.setSubdetGeometry(), CaloTopology.setSubdetTopology(), FWCaloTowerDetailView.setTextInfo(), FWMuonDetailView.setTextInfo(), FWPhotonDetailView.setTextInfo(), FWPFCandidateDetailView.setTextInfo(), FWElectronDetailView.setTextInfo(), FWTrackHitsDetailView.setTextInfo(), FWConvTrackHitsDetailView.setTextInfo(), edm::IndexIntoFile::IndexIntoFileItrSorted.setToLastEventInRange(), DQMRootSource.setupFile(), SiPixelPerformanceSummary.setValue(), PFElToElAssMaker.SetVars(), VertexMaker.SetVars(), PFCandidateMaker.SetVars(), MonitorElement.ShiftFillLast(), FWGUIValidatingTextEntry.showOptions(), CmsShowSearchFiles.showPrefixes(), FWModelContextMenuHandler.showSelectedModelContext(), L1DummyProducer.SimpleDigi(), SiPixelDetSummary.SiPixelDetSummary(), HcalQIECoder.slope(), CastorQIECoder.slope(), l1t.SortEGammas(), CSCChamberSpecs.specsValue(), TrackingMaterialAnalyser.split(), DQMRootOutputModule.startEndFile(), HLTConfigData.streamIndex(), edm.stripNamespace(), TkStripMeasurementDet.subId(), edm::SubProcess.SubProcess(), VoronoiSubtractor.subtractPedestal(), ReflectedIterator.subtractPedestal(), MultipleAlgoIterator.subtractPedestal(), ParametrizedSubtractor.subtractPedestal(), PileUpSubtractor.subtractPedestal(), cond::PayLoadInspector< DataT >.summary(), edm::SystemTimeKeeper.SystemTimeKeeper(), reco::GsfTrackExtra.tangentDeltaP(), reco::GsfTrackExtra.tangentMomentum(), reco::GsfTrackExtra.tangentPosition(), TauDQMHistPlotter.TauDQMHistPlotter(), SiPixelTemplate.temperrors(), GBRTree2D.TerminalIndex(), GBRTreeD.TerminalIndex(), GBRTree.TerminalIndex(), SiStripFedCabling.terse(), TEveElementIter.TEveElementIter(), TkStripMeasurementDet.theSet(), EcalShapeBase.timeIndex(), HFTimingTrust.timerr_hf(), CSCCFEBTimeSlice.timeSample(), timeshift_ns_hbheho(), timeshift_ns_hf(), TkStripMeasurementDet.totalStrips(), reco::TemplatedSecondaryVertexTagInfo< IPTI, VTX >.track(), reco::TemplatedSecondaryVertexTagInfo< IPTI, VTX >.trackData(), reco::TemplatedSecondaryVertexTagInfo< IPTI, VTX >.trackIPData(), reco::PFTrack.trajectoryPoint(), HLTConfigData.triggerIndex(), edm::TriggerNames.TriggerNames(), l1t::Stage2Layer2EGammaAlgorithmFirmwareImp1.trimmingLutIndex(), CSCOfflineMonitor.typeIndex(), CSCValidation.typeIndex(), uniqueElectronFinder(), TrackCategories.unknownTrack(), VertexCategories.unknownVertex(), CSCTFEvent.unpack(), edm::LazyGetter< T >.unpacked(), DCCMemBlock.unpackMemTowerData(), TkPixelMeasurementDet.update(), HcaluLUTTPGCoder.update(), FWPSetTableManager.update(), FP420Test.update(), BscTest.update(), G4StepStatistics.update(), gen::EvtGenLHCInterface.update_candlist(), EcalRegionCabling.updateEcalRefGetterWithElementIndex(), EcalRegionCabling.updateEcalRefGetterWithEtaPhi(), SeedingLayerSetsBuilder.updateEventSetup(), HcalDetDiagNoiseMonitor.UpdateHistos(), FWTableView.updateItems(), edm::EDConsumerBase.updateLookup(), SiStripRegionCabling.updateSiStripRefGetter(), HcalTBTiming.V775(), HLTMuon.validChambers(), CSCDBGasGainCorrection.value(), CSCDBChipSpeedCorrection.value(), PrimaryVertexMonitor.vertexPlots(), sistrip::EnsembleCalibrationLA.write_ensembles_text(), GctFormatTranslateMCLegacy.writeRctEmCandBlocks(), MuonAlignmentFromReference.writeTmpFiles(), and L1GtVmeWriterCore.writeVME().
cmsHarvester.Jsonfilename |
Definition at line 3708 of file cmsHarvester.py.
cmsHarvester.Jsonlumi |
Definition at line 3682 of file cmsHarvester.py.
int cmsHarvester.loop = 0 |
Definition at line 4391 of file cmsHarvester.py.
Referenced by optutl::CommandLineParser._getSectionFiles(), addFiles(), RawDataConverter.ClearData(), CmsShowMainFrame.CmsShowMainFrame(), DDCheckMaterial(), SiStripTFile.dirContent(), MillePedeAlignmentAlgorithm.doIO(), LaserAlignment.DumpPosFileSet(), LaserAlignment.DumpStripFileSet(), SETSeedFinder.estimateMomentum(), PhysicsTools::MVAComputer.evalInternal(), LMFDefFabric.getColor(), LMFDefFabric.getColorFromID(), RawDataConverter.GetDigis(), LMFDefFabric.getRunTag(), LMFDefFabric.getRunTagFromID(), LMFDefFabric.getTrigType(), LMFDefFabric.getTrigTypeFromID(), pat::EventHypothesis.loop(), output(), optutl::CommandLineParser.parseArguments(), EcalDigiSelector.produce(), SiStripTFile.readDQMFormat(), TrajectoryManager.reconstruct(), stdcomb.recursive_combination(), LMFColoredTable.setColor(), LMFColoredTable.setSystem(), HcalTestAnalysis.update(), DDI::Polyhedra.volume(), and DDI::Polycone.volume().
string cmsHarvester.marker = "\n" |
Definition at line 4814 of file cmsHarvester.py.
Referenced by fireworks.addDashedArrow(), fireworks.addDashedLine(), FWPhotonLegoProxyBuilder.build(), FWMuonGlimpseProxyBuilder.build(), FWElectronLegoProxyBuilder.build(), FWElectronGlimpseProxyBuilder.build(), FWTauProxyBuilderBase.buildBaseTau(), and FWMETProxyBuilder.buildViewType().
list cmsHarvester.marker_lines = [] |
Definition at line 4806 of file cmsHarvester.py.
string cmsHarvester.metavar = "JSONRUNFILE" |
Definition at line 2058 of file cmsHarvester.py.
cmsHarvester.mirrored = None |
Definition at line 3222 of file cmsHarvester.py.
string cmsHarvester.msg = "Could not create directory `%s'" |
class Handler(xml.sax.handler.ContentHandler): def startElement(self, name, attrs): if name == "result": site_name = str(attrs["STORAGEELEMENT_SENAME"])
\
if len(site_name) < 1: return
run_number = int(attrs["RUNS_RUNNUMBER"]) file_name = str(attrs["FILES_LOGICALFILENAME"]) nevents = int(attrs["FILES_NUMBEROFEVENTS"])
if not files_info.has_key(run_number):
files_info[run_number] = {} files_info[run_number][file_name] = (nevents, [site_name]) elif not files_info[run_number].has_key(file_name):
files_info[run_number][file_name] = (nevents, [site_name]) else:
assert nevents == files_info[run_number][file_name][0]
files_info[run_number][file_name][1].append(site_name) OBSOLETE OBSOLETE OBSOLETE end
Definition at line 1639 of file cmsHarvester.py.
tuple cmsHarvester.multicrab_block_name |
Definition at line 4414 of file cmsHarvester.py.
string cmsHarvester.multicrab_config = "\n" |
Definition at line 4495 of file cmsHarvester.py.
list cmsHarvester.multicrab_config_lines = [] |
Definition at line 4338 of file cmsHarvester.py.
list cmsHarvester.nevents = int(handler.results["file.numevents"][index]) |
Definition at line 3176 of file cmsHarvester.py.
cmsHarvester.non_t1access |
Definition at line 1089 of file cmsHarvester.py.
cmsHarvester.nr_max_sites |
Definition at line 1140 of file cmsHarvester.py.
dictionary cmsHarvester.num_events_catalog = {} |
Definition at line 3215 of file cmsHarvester.py.
tuple cmsHarvester.num_events_dataset = sum(tmp) |
Definition at line 3986 of file cmsHarvester.py.
tuple cmsHarvester.num_sites |
if self.datasets_information[dataset_name]["num_events"][run_number] != 0: pdb.set_trace()
DEBUG DEBUG DEBUG end
Definition at line 3956 of file cmsHarvester.py.
int cmsHarvester.number_max_sites = self.nr_max_sites+1 |
Definition at line 4336 of file cmsHarvester.py.
cmsHarvester.option_parser |
Definition at line 1878 of file cmsHarvester.py.
cmsHarvester.output_file_name = self.\ |
Definition at line 4382 of file cmsHarvester.py.
Referenced by HcalLutManager.writeLutXmlFiles().
tuple cmsHarvester.path = os.path.join(path, piece) |
else:
self.logger.debug(" accepting")
Add piece to the path we're building. self.logger.debug("!!! Skip path piece `%s'? %s" % \ (piece, str(skip_this_path_piece))) self.logger.debug("Adding piece to path...")
Definition at line 1591 of file cmsHarvester.py.
Referenced by RPCLinkSynchroStat.add(), addFiles(), HLTPerformanceInfo.addModuleToPath(), DeDxDiscriminatorLearner.algoAnalyzeTheTree(), SiStripGainFromCalibTree.algoAnalyzeTheTree(), HLTTauDQMPathSummaryPlotter.analyze(), PixelVTXMonitor.analyze(), MuonDTDigis.analyze(), TriggerRatesMonitor.analyze(), PrintLoadingPlugins.askedToLoad(), SiStripFedCablingBuilderFromDb.assignDcuAndDetIds(), JetCorrectorDBWriter.beginJob(), GenericBenchmarkAnalyzer.beginJob(), ProfilerService.beginPath(), MEtoEDMConverter.beginRun(), HLTHiggsSubAnalysis.beginRun(), HLTExoticaSubAnalysis.beginRun(), ecaldqm::MESetEcal.book(), DQMStore.book(), HLTHiggsSubAnalysis.bookHistograms(), CSCOfflineClient.bookMonitorObject(), CSCMonitorModule.bookMonitorObject(), edmplugin::standard.cachefileName(), DQMStoreStats.calcstats(), FWGeometryTableViewBase.cdTop(), FWPSetTableManager.cellRenderer(), LStoreStorageMaker.check(), edm::Schedule.checkForCorrectness(), cleanTrailingSlashes(), ecaldqm::MESetNonObject.clone(), ecaldqm::MESetProjection.clone(), ecaldqm::MESetDet2D.clone(), ecaldqm::MESetDet0D.clone(), ecaldqm::MESetDet1D.clone(), ecaldqm::MESetTrend.clone(), ecaldqm::MESetEcal.clone(), ecaldqm::MESetMulti.clone(), ecaldqm::MESet.clone(), CmsShowSearchFiles.CmsShowSearchFiles(), cond::persistency.compareTags(), SiStripGainFromData.ComputeChargeOverPath(), edmplugin::standard.config(), convertFile(), CommissioningHistograms.copyCustomInformation(), FWTextTreeCellRenderer.coreIcondir(), FWCheckBoxIcon.coreIcondir(), coreIcondir(), SiStripCommissioningSource.createCablingTasks(), SiStripHistoId.createHistoLayer(), ecaldqm.createMESet(), evf::EvFDaqDirector.createRunOpendirMaybe(), ASmirnovDeDxDiscriminator.dedx(), BTagLikeDeDxDiscriminator.dedx(), ProductDeDxDiscriminator.dedx(), SmirnovDeDxDiscriminator.dedx(), FedRawDataInputSource.deleteFile(), do_work(), PFClient.doEfficiency(), PFClient_JetRes.doEfficiency(), MuonTestSummary.doEnergyTests(), MuonTestSummary.doKinematicsTests(), MuonTestSummary.doMuonIDTests(), PFClient.doProfiles(), PFClient.doProjection(), MuonTestSummary.doResidualsTests(), PFClient.doSummaries(), PFClient_JetRes.doSummaries(), HLTMuonValidator.dqmBeginRun(), HLTMuonOfflineAnalyzer.dqmBeginRun(), TriggerRatesMonitor.dqmBeginRun(), MuonTrackResidualsTest.dqmEndJob(), MuonRecoTest.dqmEndJob(), DTMuonMillepede.DTMuonMillepede(), DTMuonSLToSL.DTMuonSLToSL(), EcalTrivialConditionRetriever.EcalTrivialConditionRetriever(), RawEventFileWriterForBU.endOfLS(), MuonAlignmentSummary.endRun(), ESTrivialConditionRetriever.ESTrivialConditionRetriever(), CommissioningHistograms.extractHistograms(), spf::SherpackFetcher.Fetch(), SummaryGeneratorControlView.fill(), SummaryGeneratorReadoutView.fill(), SiStripCommissioningSource.fillCablingHistos(), DQMFileSaver.fillJson(), SiPixelActionExecutor.fillOccupancy(), edm::SystemTimeKeeper.fillTriggerTimingReport(), PFDQMEventSelector.filter(), LocalFileSystem.findCachePath(), EmDQM.findEgammaPaths(), SiPixelFrameReverter.findFedId(), FWGeometry.findFile(), SiPixelFrameReverter.findLinkInFed(), SiPixelInformationExtractor.findNoisyPixels(), DQMImplNet< DQMNet::Object >.findObject(), SiPixelFrameReverter.findPixelInRoc(), SiPixelFrameReverter.findRocInDet(), SiPixelFrameReverter.findRocInLink(), RawEventFileWriterForBU.finishFileWrite(), spf::SherpackFetcher.FnFileGet(), TkHistoMap.folderDefinition(), ecaldqm::MESet.formPath(), ecaldqm::MESetEcal.generatePaths(), pos::PixelConfigFile.get(), SiPixelQuality.getBadRocPositions(), MonitorElement.getFullname(), cscdqm::HistoDef.getFullPath(), HFShowerParam.getHits(), DQMStore.getMatchingContents(), FedRawDataInputSource.grabNextJsonFile(), RawEventFileWriterForBU.handler(), fit::RootMinuitCommands< Function >.init(), TStorageFactoryFile.Initialize(), edm::FileInPath.initialize_(), metsig::SignAlgoResolutions.initializeJetResolutions(), edmplugin::PluginManager.load(), EcalDeadChannelRecoveryNN< DetIdT >.load_file(), edmplugin::PluginManager.loadableFor_(), FWGeometry.loadMap(), LumiCorrectionSource.LumiCorrectionSource(), MagFieldConfig.MagFieldConfig(), main(), dqmservices::DQMFileIterator.make_path(), pos::PixelConfigFile.makeNewVersion(), TrajectoryManager.makeSinglePSimHit(), SiPixelSCurveCalibrationAnalysis.makeThresholdSummary(), MergeRootfile(), SiPixelDigitizerAlgorithm.module_killing_DB(), MonitorElement.MonitorElement(), PhysicsTools::MVAComputerESSourceBase.MVAComputerESSourceBase(), edmplugin::PluginFactoryBase.newPlugin(), LStoreStorageMaker.open(), StormStorageMaker.open(), StormLcgGtStorageMaker.open(), dqmservices::DQMStreamerReader.openFile_(), ora::SharedLibraryName.operator()(), SiPixelFedCablingTree.pathToDetUnit(), SiPixelFedCablingMap.pathToDetUnit(), dqmservices::JsonWritingTimeoutPoolOutputModule.physicalAndLogicalNameForNewFile(), PlotPixelMultVtxPos(), edmplugin::PluginManager.PluginManager(), edmplugin::standard.poisonedCachefileName(), DQMRootOutputModule.postForkReacquireResources(), FastTimerService.postPathEvent(), evf::FastMonitoringService.preallocate(), evf::EvFDaqDirector.preGlobalEndLumi(), evf::FastMonitoringService.preGlobalEndLumi(), FastTimerService.prePathEvent(), FedCablingAnalysis.print(), cond::XMLAuthenticationService::XMLAuthenticationService.processFile(), ShallowGainCalibration.produce(), MuonSimHitProducer.produce(), RKPropagatorInS.propagateParametersOnCylinder(), RKPropagatorInS.propagateParametersOnPlane(), MEtoEDMConverter.putData(), edmplugin::CacheParser.read(), CSCGasCollisions.readCollisionTable(), DQMStore.readFilePB(), FedRawDataInputSource.readSupervisor(), DQMBasicNet.removeLocalExcept(), FedRawDataInputSource.renameToNextFree(), gen::ParameterCollector.resolve(), ecaldqm::MESetEcal.retrieve(), SiPixelDQMRocLevelAnalyzer.RocSummary(), CommissioningHistograms.runNumber(), CommissioningHistograms.runType(), CommissioningHistograms.save(), DQMStore.save(), lumi::DBConfig.setAuthentication(), cond::XMLAuthenticationService::XMLAuthenticationService.setAuthenticationPath(), SiStripCommissioningOfflineClient.setInputFiles(), SiStripHistoPlotter.setNewCondDBPlot(), SiStripHistoPlotter.setNewPlot(), CommonAnalyzer.setPath(), PhiSymmetryCalibration_step2_SM.setUp(), PhiSymmetryCalibration_step2.setUp(), PFJetBenchmark.setup(), PFMETBenchmark.setup(), PFTauElecRejectionBenchmark.setup(), cond::CredentialStore.setUpForService(), HelixArbitraryPlaneCrossing2Order.solutionByDirection(), splitPath(), RawEventFileWriterForBU.stop(), PedsOnlyAnalysis.summary(), VpspScanAnalysis.summary(), PedestalsAnalysis.summary(), NoiseAnalysis.summary(), OptoScanAnalysis.summary(), CommissioningAnalysis.summary(), PedsFullNoiseAnalysis.summary(), SiPixelFrameReverter.toCabling(), SiPixelFrameConverter.toRoc(), LumiCorrectionSource.translateFrontierConnect(), edmplugin::PluginManager.tryToLoad(), DQMStore.useQTestByMatch(), SiStripConfigDb.usingDatabase(), cond::persistency.validateTag(), cond::XMLAuthenticationService::XMLAuthenticationService.verifyFileName(), and PixelDataFormatter.word2digi().
tuple cmsHarvester.permissions = extract_permissions(output) |
Definition at line 1648 of file cmsHarvester.py.
string cmsHarvester.permissions_new = [] |
Definition at line 1678 of file cmsHarvester.py.
string cmsHarvester.permissions_target = "775" |
Definition at line 1672 of file cmsHarvester.py.
cmsHarvester.preferred_site |
Definition at line 1146 of file cmsHarvester.py.
cmsHarvester.ref_hist_mappings_file_name |
Definition at line 2257 of file cmsHarvester.py.
tuple cmsHarvester.run_number = int(handler.results["run.number"][index]) |
Definition at line 3174 of file cmsHarvester.py.
Referenced by BlockFormatter.DigiToRaw(), TEcnaRead.FileParameters(), HcalChannelQualityXml.HcalChannelQualityXml(), HcalL1TriggerObjectsXml.HcalL1TriggerObjectsXml(), TEcnaHeader.HeaderParameters(), HcalChannelDataXml.init_data(), HcalChannelDataXml.set_header_run_number(), SiStripCommissioningOfflineClient.setInputFiles(), and XMLHTRZeroSuppressionLoader.XMLHTRZeroSuppressionLoader().
list cmsHarvester.runs = self.datasets_to_use[dataset_name] |
Definition at line 4349 of file cmsHarvester.py.
cmsHarvester.runs_to_ignore |
Definition at line 3555 of file cmsHarvester.py.
cmsHarvester.runs_to_use |
Definition at line 3531 of file cmsHarvester.py.
cmsHarvester.saveByLumiSection |
Definition at line 1119 of file cmsHarvester.py.
tuple cmsHarvester.se_name = choice(t1_sites) |
Definition at line 1791 of file cmsHarvester.py.
string cmsHarvester.sep = "#" |
Definition at line 4807 of file cmsHarvester.py.
Referenced by HLTMonBitSummary.analyze(), PrimaryVertexAnalyzer4PU.analyzeVertexCollection(), PrimaryVertexAnalyzer4PU.analyzeVertexCollectionTP(), edm::Entry.Entry(), ExpressionVariable< Object, label >.ExpressionVariable(), LumiProducer.fillRunCache(), ElectronDqmAnalyzerBase.find(), fwlite::DataGetterHelper.getBranchDataFor(), HLTPixlMBForAlignmentFilter.hltFilter(), fit::RootMinuitCommands< Function >.init(), Tokenizer.join(), std.operator<<(), HcalTopologyRestrictionParser.parse(), lumi::NormDML.parseAfterglows(), ParticleReplacerZtautau.ParticleReplacerZtautau(), ClhepEvaluator.prepare(), ElectronIDValueMapProducer.produce(), ElectronEnergyRegressionEvaluate.regressionUncertaintyNoTrkVar(), ElectronEnergyRegressionEvaluate.regressionUncertaintyNoTrkVarV1(), ElectronEnergyRegressionEvaluate.regressionUncertaintyWithSubClusters(), ElectronEnergyRegressionEvaluate.regressionUncertaintyWithTrkVar(), ElectronEnergyRegressionEvaluate.regressionUncertaintyWithTrkVarV1(), ElectronEnergyRegressionEvaluate.regressionUncertaintyWithTrkVarV2(), ElectronEnergyRegressionEvaluate.regressionValueNoTrkVar(), ElectronEnergyRegressionEvaluate.regressionValueNoTrkVarV1(), ElectronEnergyRegressionEvaluate.regressionValueWithSubClusters(), ElectronEnergyRegressionEvaluate.regressionValueWithTrkVar(), ElectronEnergyRegressionEvaluate.regressionValueWithTrkVarV1(), ElectronEnergyRegressionEvaluate.regressionValueWithTrkVarV2(), cond::UpdateStamp.stamp(), StringBasedNTupler.StringBasedNTupler(), edm.tokenize(), and cond::IOVSequence.updateMetadata().
list cmsHarvester.site_name = None |
Definition at line 1772 of file cmsHarvester.py.
list cmsHarvester.site_names = list(set([j for i in files_info[run_number].values() for j in i[1]])) |
Definition at line 3217 of file cmsHarvester.py.
list cmsHarvester.sites = [self.preferred_site] |
Definition at line 1761 of file cmsHarvester.py.
Referenced by edm::service::SiteLocalConfigService.parse().
list cmsHarvester.sites_forbidden = [] |
Definition at line 1708 of file cmsHarvester.py.
list cmsHarvester.sites_with_complete_copies = [] |
Definition at line 3233 of file cmsHarvester.py.
cmsHarvester.skip_this_path_piece = True |
self.logger.debug("Checking CASTOR path piece `%s'" % \ piece)
self.logger.debug("Checking `%s' against `%s'" % \ (castor_path_pieces[piece_index + check_size], castor_paths_dont_touch[check_size])) self.logger.debug(" skipping")
Definition at line 1583 of file cmsHarvester.py.
list cmsHarvester.t1_sites = [] |
Definition at line 1778 of file cmsHarvester.py.
dictionary cmsHarvester.tmp |
This basically means copying over the
for dataset_name in self.datasets_to_use.keys(): self.datasets_to_use[dataset_name] = self.datasets_information[dataset_name]["runs"]
OBSOLETE OBSOLETE OBSOLETE end tmp = self.datasets_information[dataset_name] \ ["num_events"]
Definition at line 3983 of file cmsHarvester.py.
tuple cmsHarvester.traceback_string = traceback.format_exc() |
Definition at line 5718 of file cmsHarvester.py.
string cmsHarvester.twiki_url = "https://twiki.cern.ch/twiki/bin/view/CMS/CmsHarvester" |
Definition at line 41 of file cmsHarvester.py.
string cmsHarvester.type = "string" |
Definition at line 2057 of file cmsHarvester.py.
tuple cmsHarvester.use_es_prefer = (self.harvesting_type == "RelVal") |
Definition at line 4858 of file cmsHarvester.py.
cmsHarvester.use_refs = use_es_preferor\ |
Definition at line 4859 of file cmsHarvester.py.
cmsHarvester.UserName = output |
Definition at line 4331 of file cmsHarvester.py.
cmsHarvester.workflow_name = dataset_name |
Definition at line 4887 of file cmsHarvester.py.